前面已经分析了伙伴管理算法的释放实现,接着分析一下伙伴管理算法的内存申请实现。
伙伴管理算法内存申请和释放的入口一样,其实并没有很清楚的界限表示这个函数是入口,而那个不是,所以例行从稍微偏上一点的地方作为入口分析。于是选择了alloc_pages()宏定义作为分析切入口:
-
【file:/include/linux/gfp.h】
-
#define alloc_pages(gfp_mask, order) \
-
alloc_pages_node(numa_node_id(), gfp_mask, order)
而alloc_pages_node()的实现:
-
【file:/include/linux/gfp.h】
-
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
-
unsigned int order)
-
{
-
/* Unknown node is current node */
-
if (nid < 0)
-
nid = numa_node_id();
-
-
return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
-
}
没有明确内存申请的node节点时,则默认会选择当前的node节点作为申请节点。往下则接着调用__alloc_pages()来申请具体内存,其中入参node_zonelist()是用于获取node节点的zone管理区列表。接着往下看一下__alloc_pages()的实现:
-
【file:/include/linux/gfp.h】
-
static inline struct page *
-
__alloc_pages(gfp_t gfp_mask, unsigned int order,
-
struct zonelist *zonelist)
-
{
-
return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
-
}
实则是封装了__alloc_pages_nodemask()。而__alloc_pages_nodemask()的实现:
-
【file:/mm/page_alloc.c】
-
/*
-
* This is the 'heart' of the zoned buddy allocator.
-
*/
-
struct page *
-
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
-
struct zonelist *zonelist, nodemask_t *nodemask)
-
{
-
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
-
struct zone *preferred_zone;
-
struct page *page = NULL;
-
int migratetype = allocflags_to_migratetype(gfp_mask);
-
unsigned int cpuset_mems_cookie;
-
int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR;
-
struct mem_cgroup *memcg = NULL;
-
-
gfp_mask &= gfp_allowed_mask;
-
-
lockdep_trace_alloc(gfp_mask);
-
-
might_sleep_if(gfp_mask & __GFP_WAIT);
-
-
if (should_fail_alloc_page(gfp_mask, order))
-
return NULL;
-
-
/*
-
* Check the zones suitable for the gfp_mask contain at least one
-
* valid zone. It's possible to have an empty zonelist as a result
-
* of GFP_THISNODE and a memoryless node
-
*/
-
if (unlikely(!zonelist->_zonerefs->zone))
-
return NULL;
-
-
/*
-
* Will only have any effect when __GFP_KMEMCG is set. This is
-
* verified in the (always inline) callee
-
*/
-
if (!memcg_kmem_newpage_charge(gfp_mask, &memcg, order))
-
return NULL;
-
-
retry_cpuset:
-
cpuset_mems_cookie = get_mems_allowed();
-
-
/* The preferred zone is used for statistics later */
-
first_zones_zonelist(zonelist, high_zoneidx,
-
nodemask ? : &cpuset_current_mems_allowed,
-
&preferred_zone);
-
if (!preferred_zone)
-
goto out;
-
-
#ifdef CONFIG_CMA
-
if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
-
alloc_flags |= ALLOC_CMA;
-
#endif
-
retry:
-
/* First allocation attempt */
-
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
-
zonelist, high_zoneidx, alloc_flags,
-
preferred_zone, migratetype);
-
if (unlikely(!page)) {
-
/*
-
* The first pass makes sure allocations are spread
-
* fairly within the local node. However, the local
-
* node might have free pages left after the fairness
-
* batches are exhausted, and remote zones haven't
-
* even been considered yet. Try once more without
-
* fairness, and include remote zones now, before
-
* entering the slowpath and waking kswapd: prefer
-
* spilling to a remote zone over swapping locally.
-
*/
-
if (alloc_flags & ALLOC_FAIR) {
-
reset_alloc_batches(zonelist, high_zoneidx,
-
preferred_zone);
-
alloc_flags &= ~ALLOC_FAIR;
-
goto retry;
-
}
-
/*
-
* Runtime PM, block IO and its error handling path
-
* can deadlock because I/O on the device might not
-
* complete.
-
*/
-
gfp_mask = memalloc_noio_flags(gfp_mask);
-
page = __alloc_pages_slowpath(gfp_mask, order,
-
zonelist, high_zoneidx, nodemask,
-
preferred_zone, migratetype);
-
}
-
-
trace_mm_page_alloc(page, order, gfp_mask, migratetype);
-
-
out:
-
/*
-
* When updating a task's mems_allowed, it is possible to race with
-
* parallel threads in such a way that an allocation can fail while
-
* the mask is being updated. If a page allocation is about to fail,
-
* check if the cpuset changed during allocation and if so, retry.
-
*/
-
if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
-
goto retry_cpuset;
-
-
memcg_kmem_commit_charge(page, memcg, order);
-
-
return page;
-
}
这就是伙伴管理算法的核心了,于是兜兜转转,终于到了。
其中lockdep_trace_alloc()需要CONFIG_TRACE_IRQFLAGS和CONFIG_PROVE_LOCKING同时定义的时候,才起作用,否则为空函数;如果申请页面传入的gfp_mask掩码携带__GFP_WAIT标识,表示允许页面申请时休眠,则会进入might_sleep_if()检查是否需要休眠等待以及重新调度;由于未设置CONFIG_FAIL_PAGE_ALLOC,则should_fail_alloc_page()恒定返回false;if (unlikely(!zonelist->_zonerefs->zone))用于检查当前申请页面的内存管理区zone是否为空;memcg_kmem_newpage_charge()和memcg_kmem_commit_charge()与控制组群Cgroup相关;get_mems_allowed()封装了read_seqcount_begin()用于获得当前对被顺序计数保护的共享资源进行读访问的顺序号,用于避免并发的情况下引起的失败,与其组合的操作函数是put_mems_allowed();first_zones_zonelist()则是用于根据nodemask,找到合适的不大于high_zoneidx的内存管理区preferred_zone;另外allocflags_to_migratetype()是用于转换GFP标识为正确的迁移类型。
最后__alloc_pages_nodemask()分配内存页面的关键函数是:get_page_from_freelist()和__alloc_pages_slowpath(),其中get_page_from_freelist()最先用于尝试页面分配,如果分配失败的情况下,则会进一步调用__alloc_pages_slowpath()。__alloc_pages_slowpath()是用于慢速页面分配,允许等待和内存回收。由于__alloc_pages_slowpath()涉及其他内存管理机制,这里暂不深入分析。
故最后分析一下get_page_from_freelist()的实现:
-
【file:/mm/page_alloc.c】
-
/*
-
* get_page_from_freelist goes through the zonelist trying to allocate
-
* a page.
-
*/
-
static struct page *
-
get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order,
-
struct zonelist *zonelist, int high_zoneidx, int alloc_flags,
-
struct zone *preferred_zone, int migratetype)
-
{
-
struct zoneref *z;
-
struct page *page = NULL;
-
int classzone_idx;
-
struct zone *zone;
-
nodemask_t *allowednodes = NULL;/* zonelist_cache approximation */
-
int zlc_active = 0; /* set if using zonelist_cache */
-
int did_zlc_setup = 0; /* just call zlc_setup() one time */
-
-
classzone_idx = zone_idx(preferred_zone);
-
zonelist_scan:
-
/*
-
* Scan zonelist, looking for a zone with enough free.
-
* See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c.
-
*/
-
for_each_zone_zonelist_nodemask(zone, z, zonelist,
-
high_zoneidx, nodemask) {
-
unsigned long mark;
-
-
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
-
!zlc_zone_worth_trying(zonelist, z, allowednodes))
-
continue;
-
if ((alloc_flags & ALLOC_CPUSET) &&
-
!cpuset_zone_allowed_softwall(zone, gfp_mask))
-
continue;
-
BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
-
if (unlikely(alloc_flags & ALLOC_NO_WATERMARKS))
-
goto try_this_zone;
-
/*
-
* Distribute pages in proportion to the individual
-
* zone size to ensure fair page aging. The zone a
-
* page was allocated in should have no effect on the
-
* time the page has in memory before being reclaimed.
-
*/
-
if (alloc_flags & ALLOC_FAIR) {
-
if (!zone_local(preferred_zone, zone))
-
continue;
-
if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
-
continue;
-
}
-
/*
-
* When allocating a page cache page for writing, we
-
* want to get it from a zone that is within its dirty
-
* limit, such that no single zone holds more than its
-
* proportional share of globally allowed dirty pages.
-
* The dirty limits take into account the zone's
-
* lowmem reserves and high watermark so that kswapd
-
* should be able to balance it without having to
-
* write pages from its LRU list.
-
*
-
* This may look like it could increase pressure on
-
* lower zones by failing allocations in higher zones
-
* before they are full. But the pages that do spill
-
* over are limited as the lower zones are protected
-
* by this very same mechanism. It should not become
-
* a practical burden to them.
-
*
-
* XXX: For now, allow allocations to potentially
-
* exceed the per-zone dirty limit in the slowpath
-
* (ALLOC_WMARK_LOW unset) before going into reclaim,
-
* which is important when on a NUMA setup the allowed
-
* zones are together not big enough to reach the
-
* global limit. The proper fix for these situations
-
* will require awareness of zones in the
-
* dirty-throttling and the flusher threads.
-
*/
-
if ((alloc_flags & ALLOC_WMARK_LOW) &&
-
(gfp_mask & __GFP_WRITE) && !zone_dirty_ok(zone))
-
goto this_zone_full;
-
-
mark = zone->watermark[alloc_flags & ALLOC_WMARK_MASK];
-
if (!zone_watermark_ok(zone, order, mark,
-
classzone_idx, alloc_flags)) {
-
int ret;
-
-
if (IS_ENABLED(CONFIG_NUMA) &&
-
!did_zlc_setup && nr_online_nodes > 1) {
-
/*
-
* we do zlc_setup if there are multiple nodes
-
* and before considering the first zone allowed
-
* by the cpuset.
-
*/
-
allowednodes = zlc_setup(zonelist, alloc_flags);
-
zlc_active = 1;
-
did_zlc_setup = 1;
-
}
-
-
if (zone_reclaim_mode == 0 ||
-
!zone_allows_reclaim(preferred_zone, zone))
-
goto this_zone_full;
-
-
/*
-
* As we may have just activated ZLC, check if the first
-
* eligible zone has failed zone_reclaim recently.
-
*/
-
if (IS_ENABLED(CONFIG_NUMA) && zlc_active &&
-
!zlc_zone_worth_trying(zonelist, z, allowednodes))
-
continue;
-
-
ret = zone_reclaim(zone, gfp_mask, order);
-
switch (ret) {
-
case ZONE_RECLAIM_NOSCAN:
-
/* did not scan */
-
continue;
-
case ZONE_RECLAIM_FULL:
-
/* scanned but unreclaimable */
-
continue;
-
default:
-
/* did we reclaim enough */
-
if (zone_watermark_ok(zone, order, mark,
-
classzone_idx, alloc_flags))
-
goto try_this_zone;
-
-
/*
-
* Failed to reclaim enough to meet watermark.
-
* Only mark the zone full if checking the min
-
* watermark or if we failed to reclaim just
-
* 1<<order pages or else the page allocator
-
* fastpath will prematurely mark zones full
-
* when the watermark is between the low and
-
* min watermarks.
-
*/
-
if (((alloc_flags & ALLOC_WMARK_MASK) == ALLOC_WMARK_MIN) ||
-
ret == ZONE_RECLAIM_SOME)
-
goto this_zone_full;
-
-
continue;
-
}
-
}
-
-
try_this_zone:
-
page = buffered_rmqueue(preferred_zone, zone, order,
-
gfp_mask, migratetype);
-
if (page)
-
break;
-
this_zone_full:
-
if (IS_ENABLED(CONFIG_NUMA))
-
zlc_mark_zone_full(zonelist, z);
-
}
-
-
if (unlikely(IS_ENABLED(CONFIG_NUMA) && page == NULL && zlc_active)) {
-
/* Disable zlc cache for second zonelist scan */
-
zlc_active = 0;
-
goto zonelist_scan;
-
}
-
-
if (page)
-
/*
-
* page->pfmemalloc is set when ALLOC_NO_WATERMARKS was
-
* necessary to allocate the page. The expectation is
-
* that the caller is taking steps that will free more
-
* memory. The caller should avoid the page being used
-
* for !PFMEMALLOC purposes.
-
*/
-
page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS);
-
-
return page;
-
}
该函数主要是遍历各个内存管理区列表zonelist以尝试页面申请。其中for_each_zone_zonelist_nodemask()则是用于遍历zonelist的,每个内存管理区尝试申请前,都将检查内存管理区是否有可分配的内存空间、根据alloc_flags判断当前CPU是否允许在该内存管理区zone中申请以及做watermark水印检查以判断zone中的内存是否足够等。这部分的功能实现将在后面详细分析,当前主要聚焦在伙伴管理算法的实现。
不难找到真正用于分配内存页面的函数为buffered_rmqueue(),其实现:
-
【file:/mm/page_alloc.c】
-
/*
-
* Really, prep_compound_page() should be called from __rmqueue_bulk(). But
-
* we cheat by calling it from here, in the order > 0 path. Saves a branch
-
* or two.
-
*/
-
static inline
-
struct page *buffered_rmqueue(struct zone *preferred_zone,
-
struct zone *zone, int order, gfp_t gfp_flags,
-
int migratetype)
-
{
-
unsigned long flags;
-
struct page *page;
-
int cold = !!(gfp_flags & __GFP_COLD);
-
-
again:
-
if (likely(order == 0)) {
-
struct per_cpu_pages *pcp;
-
struct list_head *list;
-
-
local_irq_save(flags);
-
pcp = &this_cpu_ptr(zone->pageset)->pcp;
-
list = &pcp->lists[migratetype];
-
if (list_empty(list)) {
-
pcp->count += rmqueue_bulk(zone, 0,
-
pcp->batch, list,
-
migratetype, cold);
-
if (unlikely(list_empty(list)))
-
goto failed;
-
}
-
-
if (cold)
-
page = list_entry(list->prev, struct page, lru);
-
else
-
page = list_entry(list->next, struct page, lru);
-
-
list_del(&page->lru);
-
pcp->count--;
-
} else {
-
if (unlikely(gfp_flags & __GFP_NOFAIL)) {
-
/*
-
* __GFP_NOFAIL is not to be used in new code.
-
*
-
* All __GFP_NOFAIL callers should be fixed so that they
-
* properly detect and handle allocation failures.
-
*
-
* We most definitely don't want callers attempting to
-
* allocate greater than order-1 page units with
-
* __GFP_NOFAIL.
-
*/
-
WARN_ON_ONCE(order > 1);
-
}
-
spin_lock_irqsave(&zone->lock, flags);
-
page = __rmqueue(zone, order, migratetype);
-
spin_unlock(&zone->lock);
-
if (!page)
-
goto failed;
-
__mod_zone_freepage_state(zone, -(1 << order),
-
get_pageblock_migratetype(page));
-
}
-
-
__mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
-
-
__count_zone_vm_events(PGALLOC, zone, 1 << order);
-
zone_statistics(preferred_zone, zone, gfp_flags);
-
local_irq_restore(flags);
-
-
VM_BUG_ON_PAGE(bad_range(zone, page), page);
-
if (prep_new_page(page, order, gfp_flags))
-
goto again;
-
return page;
-
-
failed:
-
local_irq_restore(flags);
-
return NULL;
-
}
if (likely(order == 0))如果申请的内存页面处于伙伴管理算法中的0阶,即只申请一个内存页面时,则首先尝试从冷热页中申请,若申请失败则继而调用rmqueue_bulk()去申请页面至冷热页管理列表中,继而再从冷热页列表中获取;如果申请多个页面则会通过__rmqueue()直接从伙伴管理中申请。
__rmqueue()的实现:
-
【file:/mm/page_alloc.c】
-
/*
-
* Do the hard work of removing an element from the buddy allocator.
-
* Call me with the zone->lock already held.
-
*/
-
static struct page *__rmqueue(struct zone *zone, unsigned int order,
-
int migratetype)
-
{
-
struct page *page;
-
-
retry_reserve:
-
page = __rmqueue_smallest(zone, order, migratetype);
-
-
if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
-
page = __rmqueue_fallback(zone, order, migratetype);
-
-
/*
-
* Use MIGRATE_RESERVE rather than fail an allocation. goto
-
* is used because __rmqueue_smallest is an inline function
-
* and we want just one call site
-
*/
-
if (!page) {
-
migratetype = MIGRATE_RESERVE;
-
goto retry_reserve;
-
}
-
}
-
-
trace_mm_page_alloc_zone_locked(page, order, migratetype);
-
return page;
-
}
该函数里面有两个关键函数:__rmqueue_smallest()和__rmqueue_fallback()。
先行分析一下__rmqueue_fallback():
-
【file:/mm/page_alloc.c】
-
/*
-
* Go through the free lists for the given migratetype and remove
-
* the smallest available page from the freelists
-
*/
-
static inline
-
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
-
int migratetype)
-
{
-
unsigned int current_order;
-
struct free_area *area;
-
struct page *page;
-
-
/* Find a page of the appropriate size in the preferred list */
-
for (current_order = order; current_order < MAX_ORDER; ++current_order) {
-
area = &(zone->free_area[current_order]);
-
if (list_empty(&area->free_list[migratetype]))
-
continue;
-
-
page = list_entry(area->free_list[migratetype].next,
-
struct page, lru);
-
list_del(&page->lru);
-
rmv_page_order(page);
-
area->nr_free--;
-
expand(zone, page, order, current_order, area, migratetype);
-
return page;
-
}
-
-
return NULL;
-
}
该函数实现了分配算法的核心功能,首先for()循环其由指定的伙伴管理算法链表order阶开始,如果该阶的链表不为空,则直接通过list_del()从该链表中获取空闲页面以满足申请需要;如果该阶的链表为空,则往更高一阶的链表查找,直到找到链表不为空的一阶,至于若找到了最高阶仍为空链表,则申请失败;否则将在找到链表不为空的一阶后,将空闲页面块通过list_del()从链表中摘除出来,然后通过expand()将其对等拆分开,并将拆分出来的一半空闲部分挂接至低一阶的链表中,直到拆分至恰好满足申请需要的order阶,最后将得到的满足要求的页面返回回去。至此,页面已经分配到了。
至于__rmqueue_fallback():
-
【file:/mm/page_alloc.c】
-
/* Remove an element from the buddy allocator from the fallback list */
-
static inline struct page *
-
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
-
{
-
struct free_area *area;
-
int current_order;
-
struct page *page;
-
int migratetype, new_type, i;
-
-
/* Find the largest possible block of pages in the other list */
-
for (current_order = MAX_ORDER-1; current_order >= order;
-
--current_order) {
-
for (i = 0;; i++) {
-
migratetype = fallbacks[start_migratetype][i];
-
-
/* MIGRATE_RESERVE handled later if necessary */
-
if (migratetype == MIGRATE_RESERVE)
-
break;
-
-
area = &(zone->free_area[current_order]);
-
if (list_empty(&area->free_list[migratetype]))
-
continue;
-
-
page = list_entry(area->free_list[migratetype].next,
-
struct page, lru);
-
area->nr_free--;
-
-
new_type = try_to_steal_freepages(zone, page,
-
start_migratetype,
-
migratetype);
-
-
/* Remove the page from the freelists */
-
list_del(&page->lru);
-
rmv_page_order(page);
-
-
expand(zone, page, order, current_order, area,
-
new_type);
-
-
trace_mm_page_alloc_extfrag(page, order, current_order,
-
start_migratetype, migratetype, new_type);
-
-
return page;
-
}
-
}
-
-
return NULL;
-
}
其主要是向其他迁移类型中获取内存。较正常的伙伴算法不同,其向迁移类型的内存申请内存页面时,是从最高阶开始查找的,主要是从大块内存中申请可以避免更少的碎片。如果尝试完所有的手段仍无法获得内存页面,则会从MIGRATE_RESERVE列表中获取。这部分暂不深入,后面再详细分析。
毕了,至此伙伴管理算法的分配部分暂时分析完毕。
阅读(3243) | 评论(0) | 转发(1) |