Chinaunix首页 | 论坛 | 博客
  • 博客访问: 294149
  • 博文数量: 44
  • 博客积分: 10
  • 博客等级: 民兵
  • 技术积分: 1354
  • 用 户 组: 普通用户
  • 注册时间: 2012-04-08 15:38
个人简介

人生像是在跑马拉松,能够完赛的都是不断地坚持向前迈进;人生就是像在跑马拉松,不断调整步伐,把握好分分秒秒;人生还是像在跑马拉松,能力决定了能跑短程、半程还是全程。人生其实就是一场马拉松,坚持不懈,珍惜时间。

文章分类

分类: LINUX

2017-05-14 00:07:56

从前文分析来看,不连续页面管理的初始化较为简单,现在分析一下具体的分配实现。

vmalloc内存申请函数入口为vmalloc()

  1. 【file:/mm/vmalloc.c】
  2. /**
  3.  * vmalloc - allocate virtually contiguous memory
  4.  * @size: allocation size
  5.  * Allocate enough pages to cover @size from the page level
  6.  * allocator and map them into contiguous kernel virtual space.
  7.  *
  8.  * For tight control over page level allocator and protection flags
  9.  * use __vmalloc() instead.
  10.  */
  11. void *vmalloc(unsigned long size)
  12. {
  13.     return __vmalloc_node_flags(size, NUMA_NO_NODE,
  14.                     GFP_KERNEL | __GFP_HIGHMEM);
  15. }


    该函数简单地封装调用__vmalloc_node_flags()__vmalloc_node_flags()的存在,主要是用于指定申请不连续内存页面所来源的node结点。

  1. 【file:/mm/vmalloc.c】
  2. static inline void *__vmalloc_node_flags(unsigned long size,
  3.                     int node, gfp_t flags)
  4. {
  5.     return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
  6.                     node, __builtin_return_address(0));
  7. }


   __vmalloc_node_flags()从结点请求分配连续虚拟内存,而__vmalloc_node_flags()则是封装__vmalloc_node()

  1. 【file:/mm/vmalloc.c】
  2. /**
  3.  * __vmalloc_node - allocate virtually contiguous memory
  4.  * @size: allocation size
  5.  * @align: desired alignment
  6.  * @gfp_mask: flags for the page level allocator
  7.  * @prot: protection mask for the allocated pages
  8.  * @node: node to use for allocation or NUMA_NO_NODE
  9.  * @caller: caller's return address
  10.  *
  11.  * Allocate enough pages to cover @size from the page level
  12.  * allocator with @gfp_mask flags. Map them into contiguous
  13.  * kernel virtual space, using a pagetable protection of @prot.
  14.  */
  15. static void *__vmalloc_node(unsigned long size, unsigned long align,
  16.                 gfp_t gfp_mask, pgprot_t prot,
  17.                 int node, const void *caller)
  18. {
  19.     return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
  20.                 gfp_mask, prot, node, caller);
  21. }


    最终调用__vmalloc_node_range()函数,该函数才是vmalloc的具体实现。现在具体分析一下函数:

  1. 【file:/mm/vmalloc.c】
  2. /**
  3.  * __vmalloc_node_range - allocate virtually contiguous memory
  4.  * @size: allocation size
  5.  * @align: desired alignment
  6.  * @start: vm area range start
  7.  * @end: vm area range end
  8.  * @gfp_mask: flags for the page level allocator
  9.  * @prot: protection mask for the allocated pages
  10.  * @node: node to use for allocation or NUMA_NO_NODE
  11.  * @caller: caller's return address
  12.  *
  13.  * Allocate enough pages to cover @size from the page level
  14.  * allocator with @gfp_mask flags. Map them into contiguous
  15.  * kernel virtual space, using a pagetable protection of @prot.
  16.  */
  17. void *__vmalloc_node_range(unsigned long size, unsigned long align,
  18.             unsigned long start, unsigned long end, gfp_t gfp_mask,
  19.             pgprot_t prot, int node, const void *caller)
  20. {
  21.     struct vm_struct *area;
  22.     void *addr;
  23.     unsigned long real_size = size;
  24.  
  25.     size = PAGE_ALIGN(size);
  26.     if (!size || (size >> PAGE_SHIFT) > totalram_pages)
  27.         goto fail;
  28.  
  29.     area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
  30.                   start, end, node, gfp_mask, caller);
  31.     if (!area)
  32.         goto fail;
  33.  
  34.     addr = __vmalloc_area_node(area, gfp_mask, prot, node);
  35.     if (!addr)
  36.         return NULL;
  37.  
  38.     /*
  39.      * In this function, newly allocated vm_struct has VM_UNINITIALIZED
  40.      * flag. It means that vm_struct is not fully initialized.
  41.      * Now, it is fully initialized, so remove this flag here.
  42.      */
  43.     clear_vm_uninitialized_flag(area);
  44.  
  45.     /*
  46.      * A ref_count = 2 is needed because vm_struct allocated in
  47.      * __get_vm_area_node() contains a reference to the virtual address of
  48.      * the vmalloc'ed block.
  49.      */
  50.     kmemleak_alloc(addr, real_size, 2, gfp_mask);
  51.  
  52.     return addr;
  53.  
  54. fail:
  55.     warn_alloc_failed(gfp_mask, 0,
  56.               "vmalloc: allocation failure: %lu bytes\n",
  57.               real_size);
  58.     return NULL;
  59. }


该函数首先对申请内存的大小做对齐后,如果大小为0或者大于总内存,则返回失败;继而调用__get_vm_area_node()向内核请求一个空间大小相匹配的虚拟地址空间,返回管理信息结构vm_struct;而调用__vmalloc_area_node()将根据vm_struct的信息进行内存空间申请;接着通过clear_vm_uninitialized_flag()标示内存空间初始化;最后调用kmemleak_alloc()进行内存分配泄漏调测。

深入__get_vm_area_node()函数分析:

  1. 【file:/mm/vmalloc.c】
  2. static struct vm_struct *__get_vm_area_node(unsigned long size,
  3.         unsigned long align, unsigned long flags, unsigned long start,
  4.         unsigned long end, int node, gfp_t gfp_mask, const void *caller)
  5. {
  6.     struct vmap_area *va;
  7.     struct vm_struct *area;
  8.  
  9.     BUG_ON(in_interrupt());
  10.     if (flags & VM_IOREMAP)
  11.         align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
  12.  
  13.     size = PAGE_ALIGN(size);
  14.     if (unlikely(!size))
  15.         return NULL;
  16.  
  17.     area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
  18.     if (unlikely(!area))
  19.         return NULL;
  20.  
  21.     /*
  22.      * We always allocate a guard page.
  23.      */
  24.     size += PAGE_SIZE;
  25.  
  26.     va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
  27.     if (IS_ERR(va)) {
  28.         kfree(area);
  29.         return NULL;
  30.     }
  31.  
  32.     setup_vmalloc_vm(area, va, flags, caller);
  33.  
  34.     return area;
  35. }


    如果标记为VM_IOREMAP,表示它是用于特殊架构修正内存对齐;通过PAGE_ALIGN对内存进行对齐操作,如果申请的内存空间大小小于内存页面大小,那么将返回NULL;接着通过kzalloc_node()申请vmap_area结构空间;不连续内存页面的申请,将会新增一页内存作为保护页,继而调用alloc_vmap_area()申请指定的虚拟地址范围内的未映射空间,说白了就是申请不连续的物理内存;最后setup_vmalloc_vm()设置vm_structvmap_area收尾,用于将分配的虚拟地址空间信息返回出去。

alloc_vmap_area()则是申请不连续内存页面的具体实现。

  1. 【file:/mm/vmalloc.c】
  2. /*
  3.  * Allocate a region of KVA of the specified size and alignment, within the
  4.  * vstart and vend.
  5.  */
  6. static struct vmap_area *alloc_vmap_area(unsigned long size,
  7.                 unsigned long align,
  8.                 unsigned long vstart, unsigned long vend,
  9.                 int node, gfp_t gfp_mask)
  10. {
  11.     struct vmap_area *va;
  12.     struct rb_node *n;
  13.     unsigned long addr;
  14.     int purged = 0;
  15.     struct vmap_area *first;
  16.  
  17.     BUG_ON(!size);
  18.     BUG_ON(size & ~PAGE_MASK);
  19.     BUG_ON(!is_power_of_2(align));
  20.  
  21.     va = kmalloc_node(sizeof(struct vmap_area),
  22.             gfp_mask & GFP_RECLAIM_MASK, node);
  23.     if (unlikely(!va))
  24.         return ERR_PTR(-ENOMEM);
  25.  
  26.     /*
  27.      * Only scan the relevant parts containing pointers to other objects
  28.      * to avoid false negatives.
  29.      */
  30.     kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
  31.  
  32. retry:
  33.     spin_lock(&vmap_area_lock);
  34.     /*
  35.      * Invalidate cache if we have more permissive parameters.
  36.      * cached_hole_size notes the largest hole noticed _below_
  37.      * the vmap_area cached in free_vmap_cache: if size fits
  38.      * into that hole, we want to scan from vstart to reuse
  39.      * the hole instead of allocating above free_vmap_cache.
  40.      * Note that __free_vmap_area may update free_vmap_cache
  41.      * without updating cached_hole_size or cached_align.
  42.      */
  43.     if (!free_vmap_cache ||
  44.             size < cached_hole_size ||
  45.             vstart < cached_vstart ||
  46.             align < cached_align) {
  47. nocache:
  48.         cached_hole_size = 0;
  49.         free_vmap_cache = NULL;
  50.     }
  51.     /* record if we encounter less permissive parameters */
  52.     cached_vstart = vstart;
  53.     cached_align = align;
  54.  
  55.     /* find starting point for our search */
  56.     if (free_vmap_cache) {
  57.         first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
  58.         addr = ALIGN(first->va_end, align);
  59.         if (addr < vstart)
  60.             goto nocache;
  61.         if (addr + size < addr)
  62.             goto overflow;
  63.  
  64.     } else {
  65.         addr = ALIGN(vstart, align);
  66.         if (addr + size < addr)
  67.             goto overflow;
  68.  
  69.         n = vmap_area_root.rb_node;
  70.         first = NULL;
  71.  
  72.         while (n) {
  73.             struct vmap_area *tmp;
  74.             tmp = rb_entry(n, struct vmap_area, rb_node);
  75.             if (tmp->va_end >= addr) {
  76.                 first = tmp;
  77.                 if (tmp->va_start <= addr)
  78.                     break;
  79.                 n = n->rb_left;
  80.             } else
  81.                 n = n->rb_right;
  82.         }
  83.  
  84.         if (!first)
  85.             goto found;
  86.     }
  87.  
  88.     /* from the starting point, walk areas until a suitable hole is found */
  89.     while (addr + size > first->va_start && addr + size <= vend) {
  90.         if (addr + cached_hole_size < first->va_start)
  91.             cached_hole_size = first->va_start - addr;
  92.         addr = ALIGN(first->va_end, align);
  93.         if (addr + size < addr)
  94.             goto overflow;
  95.  
  96.         if (list_is_last(&first->list, &vmap_area_list))
  97.             goto found;
  98.  
  99.         first = list_entry(first->list.next,
  100.                 struct vmap_area, list);
  101.     }
  102.  
  103. found:
  104.     if (addr + size > vend)
  105.         goto overflow;
  106.  
  107.     va->va_start = addr;
  108.     va->va_end = addr + size;
  109.     va->flags = 0;
  110.     __insert_vmap_area(va);
  111.     free_vmap_cache = &va->rb_node;
  112.     spin_unlock(&vmap_area_lock);
  113.  
  114.     BUG_ON(va->va_start & (align-1));
  115.     BUG_ON(va->va_start < vstart);
  116.     BUG_ON(va->va_end > vend);
  117.  
  118.     return va;
  119.  
  120. overflow:
  121.     spin_unlock(&vmap_area_lock);
  122.     if (!purged) {
  123.         purge_vmap_area_lazy();
  124.         purged = 1;
  125.         goto retry;
  126.     }
  127.     if (printk_ratelimit())
  128.         printk(KERN_WARNING
  129.             "vmap allocation for size %lu failed: "
  130.             "use vmalloc= to increase size.\n", size);
  131.     kfree(va);
  132.     return ERR_PTR(-EBUSY);
  133. }


其通过kmalloc_node()申请vmap_area空间,仅使用GFP_RECLAIM_MASK标识;接着调用kmemleak_scan_area()将该内存空间添加扫描区域内的内存块中;加锁vmap_area_lock之后紧接着的条件判断中,如果free_vmap_cache为空,意味着是首次进行vmalloc内存分配,而cached_hole_size记录最大空洞空间大小,如果size小于最大空洞那么表示存在着可以复用的空洞,其余的则是cached_vstart起始位置和cached_align对齐大小的比较,只要最终条件判断结果为true的情况下,那么都将会自vmalloc空间起始去查找合适的内存空间进行分配;往下记录cached_vstartcached_align的最小合适的参数。

继而判断free_vmap_cache是否为空,free_vmap_cache记录着最近释放的或最近注册使用的不连续内存页面空间,是用以加快空间的搜索速度的。如果free_vmap_cache不为空的情况下,将对申请的空间进行检查,当申请的内存空间超出范围将不使用cache,而当空间溢出时将直接跳转至overflow退出申请。如果free_vmap_cache为空的情况下,将先做溢出检验,接着循环查找vmap_area_root红黑树,尝试找到vstart附件已经分配出去的虚拟地址空间。若能找到的话,first将不为空,否则在first为空的情况下,表示vstart为起始的虚拟地址空间未被使用过,将会直接对该虚拟地址空间进行分配;若first不为空,意味着该空间曾经分配过,那么将会进入while分支进行处理,该循环是从first为起点遍历vmap_area_list链表管理的虚拟地址空间链表进行查找,如果找合适的未使用的虚拟地址空间或者遍历到了链表末尾,除非空间溢出,否则都表示找到了该空间。

找到了合适的虚拟地址空间后,对地址空间进行分配,并将分配信息记录到vmap_area结构中,最后将该管理结构通过__insert_vmap_area()插入到vmap_area_root红黑树中,以及vmap_area_list链表中。

至此虚拟地址空间分配完毕。

    但是如果想要深入了解不连续内存页面的虚拟地址空间管理结构,就需要深入__insert_vmap_area()函数。

  1. 【file:/mm/vmalloc.c】
  2. static void __insert_vmap_area(struct vmap_area *va)
  3. {
  4.     struct rb_node **p = &vmap_area_root.rb_node;
  5.     struct rb_node *parent = NULL;
  6.     struct rb_node *tmp;
  7.  
  8.     while (*p) {
  9.         struct vmap_area *tmp_va;
  10.  
  11.         parent = *p;
  12.         tmp_va = rb_entry(parent, struct vmap_area, rb_node);
  13.         if (va->va_start < tmp_va->va_end)
  14.             p = &(*p)->rb_left;
  15.         else if (va->va_end > tmp_va->va_start)
  16.             p = &(*p)->rb_right;
  17.         else
  18.             BUG();
  19.     }
  20.  
  21.     rb_link_node(&va->rb_node, parent, p);
  22.     rb_insert_color(&va->rb_node, &vmap_area_root);
  23.  
  24.     /* address-sort this list */
  25.     tmp = rb_prev(&va->rb_node);
  26.     if (tmp) {
  27.         struct vmap_area *prev;
  28.         prev = rb_entry(tmp, struct vmap_area, rb_node);
  29.         list_add_rcu(&va->list, &prev->list);
  30.     } else
  31.         list_add_rcu(&va->list, &vmap_area_list);
  32. }


分析该函数,while循环是对vmap_area_root红黑树进行查找,找到合适的位置后通过rb_link_node()插入红黑树,继而rb_insert_color()对红黑树进行平衡调整;最后根据插入的红黑树的父子节点关系插入到vmap_area_list链表中。由此逻辑,可以知道已分配的虚拟地址空间地址是通过红黑树根据地址空间的高低次序进行管理的,同时又通过链表将地址空间全部串联起来。

如下图则是红黑树管理的示意例图。

    相对应红黑树的管理结构,其链表串联的情况则是下面这样的。

    回到__vmalloc_node_range()函数实现中,看一下__vmalloc_area_node()实现。

  1. 【file:/mm/vmalloc.c】
  2. static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
  3.                  pgprot_t prot, int node)
  4. {
  5.     const int order = 0;
  6.     struct page **pages;
  7.     unsigned int nr_pages, array_size, i;
  8.     gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
  9.  
  10.     nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
  11.     array_size = (nr_pages * sizeof(struct page *));
  12.  
  13.     area->nr_pages = nr_pages;
  14.     /* Please note that the recursion is strictly bounded. */
  15.     if (array_size > PAGE_SIZE) {
  16.         pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
  17.                 PAGE_KERNEL, node, area->caller);
  18.         area->flags |= VM_VPAGES;
  19.     } else {
  20.         pages = kmalloc_node(array_size, nested_gfp, node);
  21.     }
  22.     area->pages = pages;
  23.     if (!area->pages) {
  24.         remove_vm_area(area->addr);
  25.         kfree(area);
  26.         return NULL;
  27.     }
  28.  
  29.     for (i = 0; i < area->nr_pages; i++) {
  30.         struct page *page;
  31.         gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
  32.  
  33.         if (node == NUMA_NO_NODE)
  34.             page = alloc_page(tmp_mask);
  35.         else
  36.             page = alloc_pages_node(node, tmp_mask, order);
  37.  
  38.         if (unlikely(!page)) {
  39.             /* Successfully allocated i pages, free them in __vunmap() */
  40.             area->nr_pages = i;
  41.             goto fail;
  42.         }
  43.         area->pages[i] = page;
  44.     }
  45.  
  46.     if (map_vm_area(area, prot, &pages))
  47.         goto fail;
  48.     return area->addr;
  49.  
  50. fail:
  51.     warn_alloc_failed(gfp_mask, order,
  52.               "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
  53.               (area->nr_pages*PAGE_SIZE), area->size);
  54.     vfree(area->addr);
  55.     return NULL;
  56. }


该函数首先计算需要申请的内存空间页面数量nr_pages以及需要存储等量页面指针的数组空间大小,如果该数组所需内存空间超过单个页面的时候,将通过__vmalloc_node()申请,否则使用kmalloc_node()进行申请。如果存放页面管理的数组空间申请失败,则内存申请失败并对前面申请的虚拟空间还回。接着for循环主要是根据页面数量,循环申请内存页面空间。物理内存空间申请成功后,将通过map_vm_area()进行内存映射处理。

vmalloc不连续内存页面空间的申请分析完毕。

阅读(3183) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~