Chinaunix首页 | 论坛 | 博客
  • 博客访问: 1176742
  • 博文数量: 573
  • 博客积分: 0
  • 博客等级: 民兵
  • 技术积分: 66
  • 用 户 组: 普通用户
  • 注册时间: 2016-06-28 16:21
文章分类

全部博文(573)

文章存档

2018年(3)

2016年(48)

2015年(522)

分类: LINUX

2015-12-04 09:34:22

kmalloc 代码

Kmalloc()->__kmalloc()->__do_kmalloc()->__cache_alloc()->____cache_alloc()->cache_alloc_refill()->cache_grow()->kmem_getpages()->alloc_pages_exact_node()->__alloc_pages()

 

include\linux\Slab_def.h

static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
 struct kmem_cache *cachep;
 void *ret;

 if (__builtin_constant_p(size)) {
  int i = 0;

  if (!size)
   return ZERO_SIZE_PTR;

#define CACHE(x) \
  if (size <= x) \
   goto found; \
  else \
   i++;
#include
#undef CACHE
  return NULL;
found:
#ifdef CONFIG_ZONE_DMA
  if (flags & GFP_DMA)
   cachep = malloc_sizes[i].cs_dmacachep;
  else
#endif
   cachep = malloc_sizes[i].cs_cachep;

  ret = kmem_cache_alloc_trace(size, cachep, flags);

  return ret;
 }
 return __kmalloc(size, flags);

 

mm\Slab.c

void *__kmalloc(size_t size, gfp_t flags)
{
 return __do_kmalloc(size, flags, NULL);
}

 

 

static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
       void *caller)
{
 struct kmem_cache *cachep;
 void *ret;

 /* If you want to save a few bytes .text space: replace
  * __ with kmem_.
  * Then kmalloc uses the uninlined functions instead of the inline
  * functions.
  */
 cachep = __find_general_cachep(size, flags);
 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
  return cachep;
 ret = __cache_alloc(cachep, flags, caller);

 trace_kmalloc((unsigned long) caller, ret,
        size, cachep->buffer_size, flags);

 return ret;
}

 

static __always_inline void *
__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
{
 unsigned long save_flags;
 void *objp;

 flags &= gfp_allowed_mask;

 lockdep_trace_alloc(flags);

 if (slab_should_failslab(cachep, flags))
  return NULL;

 cache_alloc_debugcheck_before(cachep, flags);
 local_irq_save(save_flags);
 objp = __do_cache_alloc(cachep, flags);
 local_irq_restore(save_flags);
 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
 kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags,
     flags);
 prefetchw(objp);

 if (likely(objp))
  kmemcheck_slab_alloc(cachep, flags, objp, obj_size(cachep));

 if (unlikely((flags & __GFP_ZERO) && objp))
  memset(objp, 0, obj_size(cachep));

 return objp;
}

 

static __always_inline void *
__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
{
 void *objp;

 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
  objp = alternate_node_alloc(cache, flags);
  if (objp)
   goto out;
 }
 objp = ____cache_alloc(cache, flags);

 /*
  * We may just have run out of memory on the local node.
  * ____cache_alloc_node() knows how to locate memory on other nodes
  */
 if (!objp)
  objp = ____cache_alloc_node(cache, flags, numa_mem_id());

  out:
 return objp;

}

static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
{
 void *objp;
 struct array_cache *ac;

 check_irq_off();

 ac = cpu_cache_get(cachep);
 if (likely(ac->avail)) {
  STATS_INC_ALLOCHIT(cachep);
  ac->touched = 1;
  objp = ac->entry[--ac->avail];
 } else {
  STATS_INC_ALLOCMISS(cachep);
  objp = cache_alloc_refill(cachep, flags);
  /*
   * the 'ac' may be updated by cache_alloc_refill(),
   * and kmemleak_erase() requires its correct value.
   */
  ac = cpu_cache_get(cachep);
 }
 /*
  * To avoid a false negative, if an object that is in one of the
  * per-CPU caches is leaked, we need to make sure kmemleak doesn't
  * treat the array pointers as a reference to the object.
  */
 if (objp)
  kmemleak_erase(&ac->entry[ac->avail]);
 return objp;
}

 

static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
{
 int batchcount;
 struct kmem_list3 *l3;
 struct array_cache *ac;
 int node;

retry:
 check_irq_off();
 node = numa_mem_id();
 ac = cpu_cache_get(cachep);
 batchcount = ac->batchcount;
 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
  /*
   * If there was little recent activity on this cache, then
   * perform only a partial refill.  Otherwise we could generate
   * refill bouncing.
   */
  batchcount = BATCHREFILL_LIMIT;
 }
 l3 = cachep->nodelists[node];

 BUG_ON(ac->avail > 0 || !l3);
 spin_lock(&l3->list_lock);

 /* See if we can refill from the shared array */
 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
  l3->shared->touched = 1;
  goto alloc_done;
 }

 while (batchcount > 0) {
  struct list_head *entry;
  struct slab *slabp;
  /* Get slab alloc is to come from. */
  entry = l3->slabs_partial.next;
  if (entry == &l3->slabs_partial) {
   l3->free_touched = 1;
   entry = l3->slabs_free.next;
   if (entry == &l3->slabs_free)
    goto must_grow;
  }

  slabp = list_entry(entry, struct slab, list);
  check_slabp(cachep, slabp);
  check_spinlock_acquired(cachep);

  /*
   * The slab was either on partial or free list so
   * there must be at least one object available for
   * allocation.
   */
  BUG_ON(slabp->inuse >= cachep->num);

  while (slabp->inuse < cachep->num && batchcount--) {
   STATS_INC_ALLOCED(cachep);
   STATS_INC_ACTIVE(cachep);
   STATS_SET_HIGH(cachep);

   ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
           node);
  }
  check_slabp(cachep, slabp);

  /* move slabp to correct slabp list: */
  list_del(&slabp->list);
  if (slabp->free == BUFCTL_END)
   list_add(&slabp->list, &l3->slabs_full);
  else
   list_add(&slabp->list, &l3->slabs_partial);
 }

must_grow:
 l3->free_objects -= ac->avail;
alloc_done:
 spin_unlock(&l3->list_lock);

 if (unlikely(!ac->avail)) {
  int x;
  x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);

  /* cache_grow can reenable interrupts, then ac could change. */
  ac = cpu_cache_get(cachep);
  if (!x && ac->avail == 0) /* no objects in sight? abort */
   return NULL;

  if (!ac->avail)  /* objects refilled by interrupt? */
   goto retry;
 }
 ac->touched = 1;
 return ac->entry[--ac->avail];
}

 

static int cache_grow(struct kmem_cache *cachep,
  gfp_t flags, int nodeid, void *objp)
{
 struct slab *slabp;
 size_t offset;
 gfp_t local_flags;
 struct kmem_list3 *l3;

 /*
  * Be lazy and only check for valid flags here,  keeping it out of the
  * critical path in kmem_cache_alloc().
  */
 BUG_ON(flags & GFP_SLAB_BUG_MASK);
 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);

 /* Take the l3 list lock to change the colour_next on this node */
 check_irq_off();
 l3 = cachep->nodelists[nodeid];
 spin_lock(&l3->list_lock);

 /* Get colour for the slab, and cal the next value. */
 offset = l3->colour_next;
 l3->colour_next++;
 if (l3->colour_next >= cachep->colour)
  l3->colour_next = 0;
 spin_unlock(&l3->list_lock);

 offset *= cachep->colour_off;

 if (local_flags & __GFP_WAIT)
  local_irq_enable();

 /*
  * The test for missing atomic flag is performed here, rather than
  * the more obvious place, simply to reduce the critical path length
  * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
  * will eventually be caught here (where it matters).
  */
 kmem_flagcheck(cachep, flags);

 /*
  * Get mem for the objs.  Attempt to allocate a physical page from
  * 'nodeid'.
  */
 if (!objp)
  objp = kmem_getpages(cachep, local_flags, nodeid);
 if (!objp)
  goto failed;

 /* Get slab management. */
 slabp = alloc_slabmgmt(cachep, objp, offset,
   local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
 if (!slabp)
  goto opps1;

 slab_map_pages(cachep, slabp, objp);

 cache_init_objs(cachep, slabp);

 if (local_flags & __GFP_WAIT)
  local_irq_disable();
 check_irq_off();
 spin_lock(&l3->list_lock);

 /* Make slab active. */
 list_add_tail(&slabp->list, &(l3->slabs_free));
 STATS_INC_GROWN(cachep);
 l3->free_objects += cachep->num;
 spin_unlock(&l3->list_lock);
 return 1;
opps1:
 kmem_freepages(cachep, objp);
failed:
 if (local_flags & __GFP_WAIT)
  local_irq_disable();
 return 0;
}

 

static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
{
 struct page *page;
 int nr_pages;
 int i;

#ifndef CONFIG_MMU
 /*
  * Nommu uses slab's for process anonymous memory allocations, and thus
  * requires __GFP_COMP to properly refcount higher order allocations
  */
 flags |= __GFP_COMP;
#endif

 flags |= cachep->gfpflags;
 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
  flags |= __GFP_RECLAIMABLE;

 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
 if (!page)
  return NULL;

 nr_pages = (1 << cachep->gfporder);
 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
  add_zone_page_state(page_zone(page),
   NR_SLAB_RECLAIMABLE, nr_pages);
 else
  add_zone_page_state(page_zone(page),
   NR_SLAB_UNRECLAIMABLE, nr_pages);
 for (i = 0; i < nr_pages; i++)
  __SetPageSlab(page + i);

 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
  kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);

  if (cachep->ctor)
   kmemcheck_mark_uninitialized_pages(page, nr_pages);
  else
   kmemcheck_mark_unallocated_pages(page, nr_pages);
 }

 return page_address(page);
}

 

 

static inline struct page *alloc_pages_exact_node(int nid, gfp_t gfp_mask,
      unsigned int order)
{
 VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);

 return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}

阅读(607) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~