Chinaunix首页 | 论坛 | 博客
  • 博客访问: 429264
  • 博文数量: 123
  • 博客积分: 2686
  • 博客等级: 少校
  • 技术积分: 1349
  • 用 户 组: 普通用户
  • 注册时间: 2009-12-23 22:11
文章分类
文章存档

2012年(3)

2011年(10)

2010年(100)

2009年(10)

我的朋友

分类: LINUX

2010-10-20 16:52:02

kmem_cache_create() must be invoked to create a new slab cache.

/**
 * kmem_cache_create - Create a cache.
 * @name: A string which is used in /proc/slabinfo to identify this cache.
 * @size: The size of objects to be created in this cache.
 * @align: The required alignment for the objects.
 * @flags: SLAB flags
 * @ctor: A constructor for the objects.
 *
 * Returns a ptr to the cache on success, NULL on failure.
 * Cannot be called within a int, but can be interrupted.
 * The @ctor is run when new pages are allocated by the cache.
 *
 * @name must be valid until the cache is destroyed. This implies that
 * the module calling this has to destroy the cache before getting unloaded.
 * Note that kmem_cache_name() is not guaranteed to return the same pointer,
 * therefore applications must manage it themselves.
 *
 * The flags are
 *
 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
 * to catch references to uninitialised memory.
 *
 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
 * for buffer overruns.
 *
 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
 * cacheline. This can be beneficial if you're counting cycles as closely
 * as davem.
 */

struct kmem_cache *
kmem_cache_create (const char *name, size_t size, size_t align,
    unsigned long flags, void (*ctor)(void *))
{
    size_t left_over, slab_size, ralign;
    struct kmem_cache *cachep = NULL, *pc;
    gfp_t gfp;

    /*
     * Sanity checks... these are all serious usage bugs.
     */

    if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
     size > KMALLOC_MAX_SIZE) {
        printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
                name);
        BUG();
    }

    /*
     * We use cache_chain_mutex to ensure a consistent view of
     * cpu_online_mask as well. Please see cpuup_callback
     */

    if (slab_is_available()) {
        get_online_cpus();
        mutex_lock(&cache_chain_mutex);
    }

    list_for_each_entry(pc, &cache_chain, next) {
        char tmp;
        int res;

        /*
         * This happens when the module gets unloaded and doesn't
         * destroy its slab cache and no-one else reuses the vmalloc
         * area of the module. Print a warning.
         */

        res = probe_kernel_address(pc->name, tmp);
        if (res) {
            printk(KERN_ERR
             "SLAB: cache with size %d has lost its name\n",
             pc->buffer_size);
            continue;
        }

        if (!strcmp(pc->name, name)) {
            printk(KERN_ERR
             "kmem_cache_create: duplicate cache %s\n", name);
            dump_stack();
            goto oops;
        }
    }

#if DEBUG
    WARN_ON(strchr(name, ' '));    /* It confuses parsers */
#if FORCED_DEBUG
    /*
     * Enable redzoning and last user accounting, except for caches with
     * large objects, if the increased size would increase the object size
     * above the next power of two: caches with object sizes just above a
     * power of two have a significant amount of internal fragmentation.
     */

    if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
                        2 * sizeof(unsigned long long)))
        flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
    if (!(flags & SLAB_DESTROY_BY_RCU))
        flags |= SLAB_POISON;
#endif
    if (flags & SLAB_DESTROY_BY_RCU)
        BUG_ON(flags & SLAB_POISON);
#endif
    /*
     * Always checks flags, a caller might be expecting debug support which
     * isn't available.
     */

    BUG_ON(flags & ~CREATE_MASK);

    /*
     * Check that size is in terms of words. This is needed to avoid
     * unaligned accesses for some archs when redzoning is used, and makes
     * sure any on-slab bufctl's are also correctly aligned.
     */

    if (size & (BYTES_PER_WORD - 1)) {
        size += (BYTES_PER_WORD - 1);
        size &= ~(BYTES_PER_WORD - 1);
    }

    /* calculate the final buffer alignment: */

    /* 1) arch recommendation: can be overridden for debug */
    if (flags & SLAB_HWCACHE_ALIGN) {
        /*
         * Default alignment: as specified by the arch code. Except if
         * an object is really small, then squeeze multiple objects into
         * one cacheline.
         */

        ralign = cache_line_size();
        while (size <= ralign / 2)
            ralign /= 2;
    } else {
        ralign = BYTES_PER_WORD;
    }

    /*
     * Redzoning and user store require word alignment or possibly larger.
     * Note this will be overridden by architecture or caller mandated
     * alignment if either is greater than BYTES_PER_WORD.
     */

    if (flags & SLAB_STORE_USER)
        ralign = BYTES_PER_WORD;

    if (flags & SLAB_RED_ZONE) {
        ralign = REDZONE_ALIGN;
        /* If redzoning, ensure that the second redzone is suitably
         * aligned, by adjusting the object size accordingly. */

        size += REDZONE_ALIGN - 1;
        size &= ~(REDZONE_ALIGN - 1);
    }

    /* 2) arch mandated alignment */
    if (ralign < ARCH_SLAB_MINALIGN) {
        ralign = ARCH_SLAB_MINALIGN;
    }
    /* 3) caller mandated alignment */
    if (ralign < align) {
        ralign = align;
    }
    /* disable debug if not aligning with REDZONE_ALIGN */
    if (ralign & (__alignof__(unsigned long long) - 1))
        flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
    /*
     * 4) Store it.
     */

    align = ralign;

    if (slab_is_available())
        gfp = GFP_KERNEL;
    else
        gfp = GFP_NOWAIT;

    /* Get cache's description obj. */
    cachep = kmem_cache_zalloc(&cache_cache, gfp);
    if (!cachep)
        goto oops;

#if DEBUG
    cachep->obj_size = size;

    /*
     * Both debugging options require word-alignment which is calculated
     * into align above.
     */

    if (flags & SLAB_RED_ZONE) {
        /* add space for red zone words */
        cachep->obj_offset += align;
        size += align + sizeof(unsigned long long);
    }
    if (flags & SLAB_STORE_USER) {
        /* user store requires one word storage behind the end of
         * the real object. But if the second red zone needs to be
         * aligned to 64 bits, we must allow that much space.
         */

        if (flags & SLAB_RED_ZONE)
            size += REDZONE_ALIGN;
        else
            size += BYTES_PER_WORD;
    }
#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
    if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
     && cachep->obj_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
        cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
        size = PAGE_SIZE;
    }
#endif
#endif

    /*
     * Determine if the slab management is 'on' or 'off' slab.
     * (bootstrapping cannot cope with offslab caches so don't do
     * it too early on. Always use on-slab management when
     * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
     */

    if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
     !(flags & SLAB_NOLEAKTRACE))
        /*
         * Size is large, assume best to place the slab management obj
         * off-slab (should allow better packing of objs).
         */

        flags |= CFLGS_OFF_SLAB;

    size = ALIGN(size, align);

    left_over = calculate_slab_order(cachep, size, align, flags);

    if (!cachep->num) {
        printk(KERN_ERR
         "kmem_cache_create: couldn't create cache %s.\n", name);
        kmem_cache_free(&cache_cache, cachep);
        cachep = NULL;
        goto oops;
    }
    slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
             + sizeof(struct slab), align);

    /*
     * If the slab has been placed off-slab, and we have enough space then
     * move it on-slab. This is at the expense of any extra colouring.
     */

    if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
        flags &= ~CFLGS_OFF_SLAB;
        left_over -= slab_size;
    }

    if (flags & CFLGS_OFF_SLAB) {
        /* really off slab. No need for manual alignment */
        slab_size =
         cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);

#ifdef CONFIG_PAGE_POISONING
        /* If we're going to use the generic kernel_map_pages()
         * poisoning, then it's going to smash the contents of
         * the redzone and userword anyhow, so s

阅读(1066) | 评论(1) | 转发(0) |
给主人留下些什么吧!~~

chinaunix网友2010-10-21 10:13:16

很好的, 收藏了 推荐一个博客,提供很多免费软件编程电子书下载: http://free-ebooks.appspot.com