Chinaunix首页 | 论坛 | 博客
  • 博客访问: 294448
  • 博文数量: 44
  • 博客积分: 10
  • 博客等级: 民兵
  • 技术积分: 1354
  • 用 户 组: 普通用户
  • 注册时间: 2012-04-08 15:38
个人简介

人生像是在跑马拉松,能够完赛的都是不断地坚持向前迈进;人生就是像在跑马拉松,不断调整步伐,把握好分分秒秒;人生还是像在跑马拉松,能力决定了能跑短程、半程还是全程。人生其实就是一场马拉松,坚持不懈,珍惜时间。

文章分类

分类: LINUX

2015-01-01 12:43:42

    前面构建内存管理框架,已经将内存管理node节点设置完毕,接下来将是管理区和页面管理的构建。此处代码实现主要在于setup_arch()下的一处钩子:x86_init.paging.pagetable_init()。据前面分析可知x86_init结构体内该钩子实际上挂接的是native_pagetable_init()函数。

    native_pagetable_init():

  1. 【file:/arch/x86/mm/init_32.c】
  2. void __init native_pagetable_init(void)
  3. {
  4.     unsigned long pfn, va;
  5.     pgd_t *pgd, *base = swapper_pg_dir;
  6.     pud_t *pud;
  7.     pmd_t *pmd;
  8.     pte_t *pte;
  9.  
  10.     /*
  11.      * Remove any mappings which extend past the end of physical
  12.      * memory from the boot time page table.
  13.      * In virtual address space, we should have at least two pages
  14.      * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
  15.      * definition. And max_low_pfn is set to VMALLOC_END physical
  16.      * address. If initial memory mapping is doing right job, we
  17.      * should have pte used near max_low_pfn or one pmd is not present.
  18.      */
  19.     for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  20.         va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  21.         pgd = base + pgd_index(va);
  22.         if (!pgd_present(*pgd))
  23.             break;
  24.  
  25.         pud = pud_offset(pgd, va);
  26.         pmd = pmd_offset(pud, va);
  27.         if (!pmd_present(*pmd))
  28.             break;
  29.  
  30.         /* should not be large page here */
  31.         if (pmd_large(*pmd)) {
  32.             pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
  33.                 pfn, pmd, __pa(pmd));
  34.             BUG_ON(1);
  35.         }
  36.  
  37.         pte = pte_offset_kernel(pmd, va);
  38.         if (!pte_present(*pte))
  39.             break;
  40.  
  41.         printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
  42.                 pfn, pmd, __pa(pmd), pte, __pa(pte));
  43.         pte_clear(NULL, va, pte);
  44.     }
  45.     paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  46.     paging_init();
  47. }

 该函数的for循环主要是用于检测max_low_pfn直接映射空间后面的物理内存是否存在系统启动引导时创建的页表,如果存在,则使用pte_clear()将其清除。

接下来的paravirt_alloc_pmd()主要是用于准虚拟化,主要是使用钩子函数的方式替换x86环境中多种多样的指令实现。

再往下的paging_init()

  1. 【file:/arch/x86/mm/init_32.c】
  2. /*
  3.  * paging_init() sets up the page tables - note that the first 8MB are
  4.  * already mapped by head.S.
  5.  *
  6.  * This routines also unmaps the page at virtual kernel address 0, so
  7.  * that we can trap those pesky NULL-reference errors in the kernel.
  8.  */
  9. void __init paging_init(void)
  10. {
  11.     pagetable_init();
  12.  
  13.     __flush_tlb_all();
  14.  
  15.     kmap_init();
  16.  
  17.     /*
  18.      * NOTE: at this point the bootmem allocator is fully available.
  19.      */
  20.     olpc_dt_build_devicetree();
  21.     sparse_memory_present_with_active_regions(MAX_NUMNODES);
  22.     sparse_init();
  23.     zone_sizes_init();
  24. }

paging_init()主要都是函数调用,现在逐一分析各个函数功能,先看pagetable_init()

  1. 【file:/arch/x86/mm/init_32.c】
  2. static void __init pagetable_init(void)
  3. {
  4.     pgd_t *pgd_base = swapper_pg_dir;
  5.  
  6.     permanent_kmaps_init(pgd_base);
  7. }

这里再次看到页全局目录swapper_pg_dir变量,它作为参数接着调用permanent_kmaps_init()

  1. 【file:/arch/x86/mm/init_32.c】
  2. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  3. {
  4.     unsigned long vaddr;
  5.     pgd_t *pgd;
  6.     pud_t *pud;
  7.     pmd_t *pmd;
  8.     pte_t *pte;
  9.  
  10.     vaddr = PKMAP_BASE;
  11.     page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  12.  
  13.     pgd = swapper_pg_dir + pgd_index(vaddr);
  14.     pud = pud_offset(pgd, vaddr);
  15.     pmd = pmd_offset(pud, vaddr);
  16.     pte = pte_offset_kernel(pmd, vaddr);
  17.     pkmap_page_table = pte;
  18. }

这里可以看到前面分析过的建立页表函数page_table_range_init(),此处建立页表范围为PKMAP_BASEPKMAP_BASE + PAGE_SIZE*LAST_PKMAP,这是KMAP区(永久映射区)的范围。继而也就是说当前是在建立永久映射区的页表,建好页表后将页表地址给永久映射区页表变量pkmap_page_table置值。

完了接着看paging_init()里面调用的下一个函数kmap_init()

  1. 【file:/arch/x86/mm/init_32.c】
  2. static void __init kmap_init(void)
  3. {
  4.     unsigned long kmap_vstart;
  5.  
  6.     /*
  7.      * Cache the first kmap pte:
  8.      */
  9.     kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  10.     kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  11.  
  12.     kmap_prot = PAGE_KERNEL;
  13. }

其中kmap_get_fixmap_pte()

  1. 【file:/arch/x86/mm/init_32.c】
  2. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  3. {
  4.     return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  5.             vaddr), vaddr), vaddr);
  6. }

     可以很容易看到kmap_init()主要是获取到临时映射区间的起始页表并往临时映射页表变量kmap_pte置值,并置页表属性kmap_protPAGE_KERNEL

paging_init()中,由于没有开启CONFIG_OLPC配置,故olpc_dt_build_devicetree()为空函数,暂不分析。同样,前面提及的sparse_memory_present_with_active_regions()sparse_init()也暂不分析。

最后看一下zone_sizes_init()

  1. 【file:/arch/x86/mm/init.c】
  2. void __init zone_sizes_init(void)
  3. {
  4.     unsigned long max_zone_pfns[MAX_NR_ZONES];
  5.  
  6.     memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
  7.  
  8. #ifdef CONFIG_ZONE_DMA
  9.     max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
  10. #endif
  11. #ifdef CONFIG_ZONE_DMA32
  12.     max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
  13. #endif
  14.     max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
  15. #ifdef CONFIG_HIGHMEM
  16.     max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
  17. #endif
  18.  
  19.     free_area_init_nodes(max_zone_pfns);
  20. }

通过max_zone_pfns获取各个管理区的最大页面数,并作为参数调用free_area_init_nodes(),其中free_area_init_nodes()函数实现:

  1. 【file:/mm/page_alloc.c】
  2. /**
  3.  * free_area_init_nodes - Initialise all pg_data_t and zone data
  4.  * @max_zone_pfn: an array of max PFNs for each zone
  5.  *
  6.  * This will call free_area_init_node() for each active node in the system.
  7.  * Using the page ranges provided by add_active_range(), the size of each
  8.  * zone in each node and their holes is calculated. If the maximum PFN
  9.  * between two adjacent zones match, it is assumed that the zone is empty.
  10.  * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
  11.  * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
  12.  * starts where the previous one ended. For example, ZONE_DMA32 starts
  13.  * at arch_max_dma_pfn.
  14.  */
  15. void __init free_area_init_nodes(unsigned long *max_zone_pfn)
  16. {
  17.     unsigned long start_pfn, end_pfn;
  18.     int i, nid;
  19.  
  20.     /* Record where the zone boundaries are */
  21.     memset(arch_zone_lowest_possible_pfn, 0,
  22.                 sizeof(arch_zone_lowest_possible_pfn));
  23.     memset(arch_zone_highest_possible_pfn, 0,
  24.                 sizeof(arch_zone_highest_possible_pfn));
  25.     arch_zone_lowest_possible_pfn[0] = find_min_pfn_with_active_regions();
  26.     arch_zone_highest_possible_pfn[0] = max_zone_pfn[0];
  27.     for (i = 1; i < MAX_NR_ZONES; i++) {
  28.         if (i == ZONE_MOVABLE)
  29.             continue;
  30.         arch_zone_lowest_possible_pfn[i] =
  31.             arch_zone_highest_possible_pfn[i-1];
  32.         arch_zone_highest_possible_pfn[i] =
  33.             max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);
  34.     }
  35.     arch_zone_lowest_possible_pfn[ZONE_MOVABLE] = 0;
  36.     arch_zone_highest_possible_pfn[ZONE_MOVABLE] = 0;
  37.  
  38.     /* Find the PFNs that ZONE_MOVABLE begins at in each node */
  39.     memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
  40.     find_zone_movable_pfns_for_nodes();
  41.  
  42.     /* Print out the zone ranges */
  43.     printk("Zone ranges:\n");
  44.     for (i = 0; i < MAX_NR_ZONES; i++) {
  45.         if (i == ZONE_MOVABLE)
  46.             continue;
  47.         printk(KERN_CONT " %-8s ", zone_names[i]);
  48.         if (arch_zone_lowest_possible_pfn[i] ==
  49.                 arch_zone_highest_possible_pfn[i])
  50.             printk(KERN_CONT "empty\n");
  51.         else
  52.             printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
  53.                 arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
  54.                 (arch_zone_highest_possible_pfn[i]
  55.                     << PAGE_SHIFT) - 1);
  56.     }
  57.  
  58.     /* Print out the PFNs ZONE_MOVABLE begins at in each node */
  59.     printk("Movable zone start for each node\n");
  60.     for (i = 0; i < MAX_NUMNODES; i++) {
  61.         if (zone_movable_pfn[i])
  62.             printk(" Node %d: %#010lx\n", i,
  63.                    zone_movable_pfn[i] << PAGE_SHIFT);
  64.     }
  65.  
  66.     /* Print out the early node map */
  67.     printk("Early memory node ranges\n");
  68.     for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
  69.         printk(" node %3d: [mem %#010lx-%#010lx]\n", nid,
  70.                start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
  71.  
  72.     /* Initialise every node */
  73.     mminit_verify_pageflags_layout();
  74.     setup_nr_node_ids();
  75.     for_each_online_node(nid) {
  76.         pg_data_t *pgdat = NODE_DATA(nid);
  77.         free_area_init_node(nid, NULL,
  78.                 find_min_pfn_for_node(nid), NULL);
  79.  
  80.         /* Any memory on that node */
  81.         if (pgdat->node_present_pages)
  82.             node_set_state(nid, N_MEMORY);
  83.         check_for_memory(pgdat, nid);
  84.     }
  85. }

该函数中,arch_zone_lowest_possible_pfn用于存储各内存管理区可使用的最小内存页框号,而arch_zone_highest_possible_pfn则是用来存储各内存管理区可使用的最大内存页框号。于是find_min_pfn_with_active_regions()函数主要是实现用于获取最小内存页框号,而获取最大内存页框号则是紧随的for循环:

    for (i = 1; i < MAX_NR_ZONES; i++) {

        if (i == ZONE_MOVABLE)

            continue;

        arch_zone_lowest_possible_pfn[i] =

            arch_zone_highest_possible_pfn[i-1];

        arch_zone_highest_possible_pfn[i] =

            max(max_zone_pfn[i], arch_zone_lowest_possible_pfn[i]);

    }

该循环里面除了确定各内存管理区最大内存页框号,同时也确定了各管理区的最小内存页框号,实际上就是确定各个管理区的上下边界。此外,还有一个全局数组zone_movable_pfn,用于记录各个node节点的Movable管理区的起始页框号,而查找该页框号的相应函数为find_zone_movable_pfns_for_nodes()

具体实现:

  1. 【file:/mm/page_alloc.c】
  2. /*
  3.  * Find the PFN the Movable zone begins in each node. Kernel memory
  4.  * is spread evenly between nodes as long as the nodes have enough
  5.  * memory. When they don't, some nodes will have more kernelcore than
  6.  * others
  7.  */
  8. static void __init find_zone_movable_pfns_for_nodes(void)
  9. {
  10.     int i, nid;
  11.     unsigned long usable_startpfn;
  12.     unsigned long kernelcore_node, kernelcore_remaining;
  13.     /* save the state before borrow the nodemask */
  14.     nodemask_t saved_node_state = node_states[N_MEMORY];
  15.     unsigned long totalpages = early_calculate_totalpages();
  16.     int usable_nodes = nodes_weight(node_states[N_MEMORY]);
  17.     struct memblock_type *type = &memblock.memory;
  18.  
  19.     /* Need to find movable_zone earlier when movable_node is specified. */
  20.     find_usable_zone_for_movable();
  21.  
  22.     /*
  23.      * If movable_node is specified, ignore kernelcore and movablecore
  24.      * options.
  25.      */
  26.     if (movable_node_is_enabled()) {
  27.         for (i = 0; i < type->cnt; i++) {
  28.             if (!memblock_is_hotpluggable(&type->regions[i]))
  29.                 continue;
  30.  
  31.             nid = type->regions[i].nid;
  32.  
  33.             usable_startpfn = PFN_DOWN(type->regions[i].base);
  34.             zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
  35.                 min(usable_startpfn, zone_movable_pfn[nid]) :
  36.                 usable_startpfn;
  37.         }
  38.  
  39.         goto out2;
  40.     }
  41.  
  42.     /*
  43.      * If movablecore=nn[KMG] was specified, calculate what size of
  44.      * kernelcore that corresponds so that memory usable for
  45.      * any allocation type is evenly spread. If both kernelcore
  46.      * and movablecore are specified, then the value of kernelcore
  47.      * will be used for required_kernelcore if it's greater than
  48.      * what movablecore would have allowed.
  49.      */
  50.     if (required_movablecore) {
  51.         unsigned long corepages;
  52.  
  53.         /*
  54.          * Round-up so that ZONE_MOVABLE is at least as large as what
  55.          * was requested by the user
  56.          */
  57.         required_movablecore =
  58.             roundup(required_movablecore, MAX_ORDER_NR_PAGES);
  59.         corepages = totalpages - required_movablecore;
  60.  
  61.         required_kernelcore = max(required_kernelcore, corepages);
  62.     }
  63.  
  64.     /* If kernelcore was not specified, there is no ZONE_MOVABLE */
  65.     if (!required_kernelcore)
  66.         goto out;
  67.  
  68.     /* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
  69.     usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];
  70.  
  71. restart:
  72.     /* Spread kernelcore memory as evenly as possible throughout nodes */
  73.     kernelcore_node = required_kernelcore / usable_nodes;
  74.     for_each_node_state(nid, N_MEMORY) {
  75.         unsigned long start_pfn, end_pfn;
  76.  
  77.         /*
  78.          * Recalculate kernelcore_node if the division per node
  79.          * now exceeds what is necessary to satisfy the requested
  80.          * amount of memory for the kernel
  81.          */
  82.         if (required_kernelcore < kernelcore_node)
  83.             kernelcore_node = required_kernelcore / usable_nodes;
  84.  
  85.         /*
  86.          * As the map is walked, we track how much memory is usable
  87.          * by the kernel using kernelcore_remaining. When it is
  88.          * 0, the rest of the node is usable by ZONE_MOVABLE
  89.          */
  90.         kernelcore_remaining = kernelcore_node;
  91.  
  92.         /* Go through each range of PFNs within this node */
  93.         for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
  94.             unsigned long size_pages;
  95.  
  96.             start_pfn = max(start_pfn, zone_movable_pfn[nid]);
  97.             if (start_pfn >= end_pfn)
  98.                 continue;
  99.  
  100.             /* Account for what is only usable for kernelcore */
  101.             if (start_pfn < usable_startpfn) {
  102.                 unsigned long kernel_pages;
  103.                 kernel_pages = min(end_pfn, usable_startpfn)
  104.                                 - start_pfn;
  105.  
  106.                 kernelcore_remaining -= min(kernel_pages,
  107.                             kernelcore_remaining);
  108.                 required_kernelcore -= min(kernel_pages,
  109.                             required_kernelcore);
  110.  
  111.                 /* Continue if range is now fully accounted */
  112.                 if (end_pfn <= usable_startpfn) {
  113.  
  114.                     /*
  115.                      * Push zone_movable_pfn to the end so
  116.                      * that if we have to rebalance
  117.                      * kernelcore across nodes, we will
  118.                      * not double account here
  119.                      */
  120.                     zone_movable_pfn[nid] = end_pfn;
  121.                     continue;
  122.                 }
  123.                 start_pfn = usable_startpfn;
  124.             }
  125.  
  126.             /*
  127.              * The usable PFN range for ZONE_MOVABLE is from
  128.              * start_pfn->end_pfn. Calculate size_pages as the
  129.              * number of pages used as kernelcore
  130.              */
  131.             size_pages = end_pfn - start_pfn;
  132.             if (size_pages > kernelcore_remaining)
  133.                 size_pages = kernelcore_remaining;
  134.             zone_movable_pfn[nid] = start_pfn + size_pages;
  135.  
  136.             /*
  137.              * Some kernelcore has been met, update counts and
  138.              * break if the kernelcore for this node has been
  139.              * satisfied
  140.              */
  141.             required_kernelcore -= min(required_kernelcore,
  142.                                 size_pages);
  143.             kernelcore_remaining -= size_pages;
  144.             if (!kernelcore_remaining)
  145.                 break;
  146.         }
  147.     }
  148.  
  149.     /*
  150.      * If there is still required_kernelcore, we do another pass with one
  151.      * less node in the count. This will push zone_movable_pfn[nid] further
  152.      * along on the nodes that still have memory until kernelcore is
  153.      * satisfied
  154.      */
  155.     usable_nodes--;
  156.     if (usable_nodes && required_kernelcore > usable_nodes)
  157.         goto restart;
  158.  
  159. out2:
  160.     /* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
  161.     for (nid = 0; nid < MAX_NUMNODES; nid++)
  162.         zone_movable_pfn[nid] =
  163.             roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
  164.  
  165. out:
  166.     /* restore the node_state */
  167.     node_states[N_MEMORY] = saved_node_state;
  168. }

该函数中early_calculate_totalpages()主要用于统计系统页面总数,而nodes_weight()则是将当前系统的节点数统计返回,其入参node_states[N_MEMORY]的定义在page_alloc.c中:

nodemask_t node_states[NR_NODE_STATES] __read_mostly = {

    [N_POSSIBLE] = NODE_MASK_ALL,

    [N_ONLINE] = { { [0] = 1UL } },

#ifndef CONFIG_NUMA

    [N_NORMAL_MEMORY] = { { [0] = 1UL } },

#ifdef CONFIG_HIGHMEM

    [N_HIGH_MEMORY] = { { [0] = 1UL } },

#endif

#ifdef CONFIG_MOVABLE_NODE

    [N_MEMORY] = { { [0] = 1UL } },

#endif

    [N_CPU] = { { [0] = 1UL } },

#endif  /* NUMA */

};

EXPORT_SYMBOL(node_states);

接着往下的find_usable_zone_for_movable()

  1. 【file:/mm/page_alloc.c】
  2. /*
  3.  * This finds a zone that can be used for ZONE_MOVABLE pages. The
  4.  * assumption is made that zones within a node are ordered in monotonic
  5.  * increasing memory addresses so that the "highest" populated zone is used
  6.  */
  7. static void __init find_usable_zone_for_movable(void)
  8. {
  9.     int zone_index;
  10.     for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
  11.         if (zone_index == ZONE_MOVABLE)
  12.             continue;
  13.  
  14.         if (arch_zone_highest_possible_pfn[zone_index] >
  15.                 arch_zone_lowest_possible_pfn[zone_index])
  16.             break;
  17.     }
  18.  
  19.     VM_BUG_ON(zone_index == -1);
  20.     movable_zone = zone_index;
  21. }

其主要实现查找一个可用于ZONE_MOVABLE页面的内存管理区,该区低于ZONE_MOVABLE且页面数不为0。通常最高内存管理区被找到,然后管理区索引记录在全局变量movable_zone中。

接下来的if分支:

    if (movable_node_is_enabled()) {

        for (i = 0; i < type->cnt; i++) {

            if (!memblock_is_hotpluggable(&type->regions[i]))

                continue;

            nid = type->regions[i].nid;

            usable_startpfn = PFN_DOWN(type->regions[i].base);

            zone_movable_pfn[nid] = zone_movable_pfn[nid] ?

                min(usable_startpfn, zone_movable_pfn[nid]) :

                usable_startpfn;

        }

        goto out2;

    }

该分支主要是当movable_node已经设置的情况下,忽略kernelcoremovablecore的设置,找到最高内存管理区的起始页usable_startpfnMovable管理区的页框号。

再往下的if分支:

    if (required_movablecore) {

        unsigned long corepages;

        required_movablecore =

            roundup(required_movablecore, MAX_ORDER_NR_PAGES);

        corepages = totalpages - required_movablecore;

 

        required_kernelcore = max(required_kernelcore, corepages);

    }

该分支是当movablecore设置时,尽可能满足movablecore的设置情况下计算kernelcore的剩余空间大小,但如果kernelcore也设置时,则优先满足kernelcore的设置。

接着的:

    if (!required_kernelcore)

        goto out;

如果至此kernelcore仍未设置时,则表示其实movable管理区是不存在的。

最后在restart的标签内的代码,其主要实现的是将kernelcore的内存平均分配到各个node上面。其中局部变量kernelcore_node表示各个nodes平均分摊到的内存页面数,usable_startpfn表示movable管理区的最低内存页框号,主要通过遍历node_states[N_MEMORY]中标志可用的node节点并遍历节点内的各个内存块信息,将均摊的内存页面数分到各个node当中,如果无法均摊时,通过判断:

    if (usable_nodes && required_kernelcore > usable_nodes)

        goto restart;

重新再次平均分摊,基于优先满足kernelcore的设置前提,直至无法满足条件为止。

而在out2的标签内的代码则是用于将movable管理区的起始地址做MAX_ORDER_NR_PAGES对齐操作。

末尾out的标签则仅是恢复node_states[]而已。

find_zone_movable_pfns_for_nodes()函数虽然分析了这么多,但个人实验环境由于required_movablecorerequired_kernelcore0,故仅分析这么多了。

 

下面回到free_area_init_nodes()函数中。跟随在find_zone_movable_pfns_for_nodes()后面是一段日志信息内容打印,分别打印管理区范围信息(dmesg命令可以查看),个人实验环境上的信息为:

再往下的mminit_verify_pageflags_layout()函数主要用于内存初始化调测使用的,由于未开启CONFIG_DEBUG_MEMORY_INIT配置项,此函数为空。而setup_nr_node_ids()是用于设置内存节点总数的,此处如果最大节点数MAX_NUMNODES不超过1,则是空函数。

free_area_init_nodes()函数末了还有一个遍历各个节点做初始化的操作,暂且留待后面再分析。

阅读(2851) | 评论(0) | 转发(2) |
给主人留下些什么吧!~~