Chinaunix首页 | 论坛 | 博客
  • 博客访问: 3647052
  • 博文数量: 880
  • 博客积分: 0
  • 博客等级: 民兵
  • 技术积分: 6155
  • 用 户 组: 普通用户
  • 注册时间: 2016-11-11 09:12
个人简介

To be a better coder

文章分类

全部博文(880)

文章存档

2022年(5)

2021年(60)

2020年(175)

2019年(207)

2018年(210)

2017年(142)

2016年(81)

分类: LINUX

2016-12-21 17:42:58

上篇分析了一下cache的创建过程,现在cache已经创建完成,跟踪一下slab对象的申请过程。
目前使用的申请方式主要是kmalloc(从general cache中申请)和kmem_cache_alloc(从专用cache中申请)。

先看一下kmalloc

点击(此处)折叠或打开

  1. static __always_inline void *kmalloc(size_t size, gfp_t flags)
  2. {
  3.     struct kmem_cache *cachep;
  4.     void *ret;

  5.     /* __builtin_constant_p Gcc的内置函数,用于判断一个值是否为常量,如果是常量则返回1 */
  6.     if (__builtin_constant_p(size)) {
  7.         int i = 0;

  8.         if (!size)
  9.             return ZERO_SIZE_PTR;

  10. #define CACHE(x) \
  11.         if (size <= x) \
  12.             goto found; \
  13.         else \
  14.             i++;
  15. #include <linux/kmalloc_sizes.h>
  16. #undef CACHE
  17.         return NULL;
  18. found:
  19. #ifdef CONFIG_ZONE_DMA
  20.         if (flags & GFP_DMA)
  21.             cachep = malloc_sizes[i].cs_dmacachep;
  22.         else
  23. #endif
  24.             cachep = malloc_sizes[i].cs_cachep;

  25.         ret = kmem_cache_alloc_notrace(cachep, flags);

  26.         trace_kmalloc(_THIS_IP_, ret,
  27.              size, slab_buffer_size(cachep), flags);

  28.         return ret;
  29.     }
  30.     /* 正常调用的分配函数 */
  31.     return __kmalloc(size, flags);
  32. }

点击(此处)折叠或打开

  1. void *__kmalloc(size_t size, gfp_t flags)
  2. {
  3.     return __do_kmalloc(size, flags, NULL);
  4. }

点击(此处)折叠或打开

  1. /**
  2.  * __do_kmalloc - allocate memory
  3.  * @size: how many bytes of memory are required.
  4.  * @flags: the type of memory to allocate (see kmalloc).
  5.  * @caller: function caller for debug tracking of the caller
  6.  */
  7. static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
  8.                      void *caller)
  9. {
  10.     struct kmem_cache *cachep;
  11.     void *ret;

  12.     /* If you want to save a few bytes .text space: replace
  13.      * __ with kmem_.
  14.      * Then kmalloc uses the uninlined functions instead of the inline
  15.      * functions.
  16.      */
  17.     /* 根据size大小,查找对应的general cache */
  18.     cachep = __find_general_cachep(size, flags);
  19.     /* 对于0size的kmalloc请求,直接返回cache的地址 */
  20.     if (unlikely(ZERO_OR_NULL_PTR(cachep)))
  21.         return cachep;
  22.     /* 具体分配在这里进行 */
  23.     ret = __cache_alloc(cachep, flags, caller);

  24.     trace_kmalloc((unsigned long) caller, ret,
  25.          size, cachep->buffer_size, flags);

  26.     return ret;
  27. }
__cache_alloc函数中实际上调用的是__do_cache_alloc,对于非NUMA架构

点击(此处)折叠或打开

  1. static __always_inline void *
  2. __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  3. {
  4.     return ____cache_alloc(cachep, flags);
  5. }

点击(此处)折叠或打开

  1. static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
  2. {
  3.     void *objp;
  4.     struct array_cache *ac;

  5.     check_irq_off();
  6.     /* cachep->array[smp_processor_id()],获取当前cpu对应的array_cache */
  7.     ac = cpu_cache_get(cachep);
  8.     /* 检查是否存在可用对象,avail指向当前可用的节点 */
  9.     if (likely(ac->avail)) {
  10.         /* 如果存在可用对象,更新local cache的命中次数 */
  11.         STATS_INC_ALLOCHIT(cachep);
  12.         /* 标示最近使用过local_cache */
  13.         ac->touched = 1;
  14.         /* 获取空闲对象,从后向前,当avail变为0时表示已无可用对象 */
  15.         objp = ac->entry[--ac->avail];
  16.     } else {
  17.         /* local cache中已无空闲对象,更新未命中次数 */
  18.         STATS_INC_ALLOCMISS(cachep);
  19.         /* local cache中无空闲对象,则从slab的几个链表中提取空闲对象放入local cache中 */
  20.         objp = cache_alloc_refill(cachep, flags);
  21.     }
  22.     /*
  23.      * To avoid a false negative, if an object that is in one of the
  24.      * per-CPU caches is leaked, we need to make sure kmemleak doesn't
  25.      * treat the array pointers as a reference to the object.
  26.      */
  27.     /* 对于分配出去的对象,将对应的指针置为NULL */
  28.     kmemleak_erase(&ac->entry[ac->avail]);
  29.     return objp;
  30. }

点击(此处)折叠或打开

  1. static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
  2. {
  3.     int batchcount;
  4.     struct kmem_list3 *l3;
  5.     struct array_cache *ac;
  6.     int node;

  7. retry:
  8.     check_irq_off();
  9.     /* 获取当前的NUMA节点 */
  10.     node = numa_node_id();
  11.     /* 获取local cache */
  12.     ac = cpu_cache_get(cachep);
  13.     /* 批量填充的数目 */
  14.     batchcount = ac->batchcount;
  15.     /* 如果最近未使用过该local cache,则一次填充的上限为BATCHREFILL_LIMIT个 */
  16.     if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
  17.         /*
  18.          * If there was little recent activity on this cache, then
  19.          * perform only a partial refill. Otherwise we could generate
  20.          * refill bouncing.
  21.          */
  22.         batchcount = BATCHREFILL_LIMIT;
  23.     }
  24.     /* 获取本内存节点的kmem_list3的几个slab链表 */
  25.     l3 = cachep->nodelists[node];

  26.     BUG_ON(ac->avail > 0 || !l3);
  27.     spin_lock(&l3->list_lock);

  28.     /* See if we can refill from the shared array */
  29.     /* shared local cache 用于多核中,所有cpu共享,首先从shared中批量获取slab对象到local */
  30.     if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
  31.         goto alloc_done;
  32.     /* 如果shared为空,或者已无空闲对象,则从slab链表中分配 */
  33.     while (batchcount > 0) {
  34.         struct list_head *entry;
  35.         struct slab *slabp;
  36.         /* Get slab alloc is to come from. */
  37.         /* 先从部分未满的slab链表中分配 */
  38.         entry = l3->slabs_partial.next;
  39.         /* 判断是否为空 */
  40.         if (entry == &l3->slabs_partial) {
  41.             /* 标示刚访问了空链表 */
  42.             l3->free_touched = 1;
  43.             entry = l3->slabs_free.next;
  44.             /* 如果空链表为空,则必须新增slab */
  45.             if (entry == &l3->slabs_free)
  46.                 goto must_grow;
  47.         }
  48.         /* 从链表上获取到了一个slab */
  49.         slabp = list_entry(entry, struct slab, list);
  50.         check_slabp(cachep, slabp);
  51.         check_spinlock_acquired(cachep);

  52.         /*
  53.          * The slab was either on partial or free list so
  54.          * there must be at least one object available for
  55.          * allocation.
  56.          */
  57.         BUG_ON(slabp->inuse >= cachep->num);
  58.         /* 当前slab的对象活跃数必须小于每个slab的最大对象数 */
  59.         while (slabp->inuse < cachep->num && batchcount--) {
  60.             STATS_INC_ALLOCED(cachep);
  61.             STATS_INC_ACTIVE(cachep);
  62.             STATS_SET_HIGH(cachep);
  63.             /* 从slab中提取空闲对象,将虚拟地址插入到local cache中 */
  64.             ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
  65.                              node);
  66.         }
  67.         check_slabp(cachep, slabp);

  68.         /* move slabp to correct slabp list: */
  69.         /* 从原链表中删除slab */
  70.         list_del(&slabp->list);
  71.         if (slabp->free == BUFCTL_END)
  72.             /* 此slab中已经没有空闲对象,移动到full链表中 */
  73.             list_add(&slabp->list, &l3->slabs_full);
  74.         else
  75.             /* 此slab中还有空闲对象,移动到partial链表中 */
  76.             list_add(&slabp->list, &l3->slabs_partial);
  77.     }

  78. must_grow:
  79.     /* 从slab链表中添加了avail个空闲对象到local cache中,空闲的对象数量需要更新一下 */
  80.     l3->free_objects -= ac->avail;
  81. alloc_done:
  82.     spin_unlock(&l3->list_lock);
  83.     /* slab链表中也无空闲对象,创建新的slab */
  84.     if (unlikely(!ac->avail)) {
  85.         int x;
  86.         /* 创建空slab */
  87.         x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);

  88.         /* cache_grow can reenable interrupts, then ac could change. */
  89.         /* 看注释,由于cache_grow开启了中断,local cache指针可能发生裱花,ac需要重新获取 */
  90.         ac = cpu_cache_get(cachep);
  91.         /* 新的slab创建失败 */
  92.         if (!x && ac->avail == 0)    /* no objects in sight? abort */
  93.             return NULL;
  94.         /* 新增slab成功,重新填充local cache */
  95.         if (!ac->avail)        /* objects refilled by interrupt? */
  96.             goto retry;
  97.     }
  98.     /* 设置近期访问的标志 */
  99.     ac->touched = 1;
  100.     /* 返回空闲对象的地址 */
  101.     return ac->entry[--ac->avail];
  102. }
分析一下几个函数

点击(此处)折叠或打开

  1. /*
  2.  * Transfer objects in one arraycache to another.
  3.  * Locking must be handled by the caller.
  4.  *
  5.  * Return the number of entries transferred.
  6.  */
  7. /* 从shared local cache中移动对象到local cache中,shared local cache 被同一NUMA节点的CPU所共享 */
  8. static int transfer_objects(struct array_cache *to,
  9.         struct array_cache *from, unsigned int max)
  10. {
  11.     /* Figure out how many entries to transfer */
  12.     int nr = min(min(from->avail, max), to->limit - to->avail);

  13.     if (!nr)
  14.         return 0;
  15.     /* 拷贝并更新相关成员 */
  16.     memcpy(to->entry + to->avail, from->entry + from->avail -nr,
  17.             sizeof(void *) *nr);

  18.     from->avail -= nr;
  19.     to->avail += nr;
  20.     to->touched = 1;
  21.     return nr;
  22. }

点击(此处)折叠或打开

  1. static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
  2.                 int nodeid)
  3. {
  4.     /* 获取空闲对象,free是slabp中第一个空闲对象索引 */
  5.     /* index_to_obj:  slab->s_mem + cache->buffer_size * idx; s_mem是slab中第一个对象的起始地址,buffer_size是每个对象的大小*/
  6.     void *objp = index_to_obj(cachep, slabp, slabp->free);
  7.     kmem_bufctl_t next;
  8.     /* 更新当前slab中活跃对象的数量 */
  9.     slabp->inuse++;
  10.     /* 获取下一个空闲对象的索引  */
  11.     /* slab_bufctl:  (kmem_bufctl_t *) (slabp + 1) */
  12.     next = slab_bufctl(slabp)[slabp->free];
  13. #if DEBUG
  14.     slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
  15.     WARN_ON(slabp->nodeid != nodeid);
  16. #endif
  17.     /* 指向下一个空闲对象索引 */
  18.     slabp->free = next;

  19.     return objp;
  20. }
阅读(1638) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~