Chinaunix首页 | 论坛 | 博客
  • 博客访问: 3676735
  • 博文数量: 880
  • 博客积分: 0
  • 博客等级: 民兵
  • 技术积分: 6155
  • 用 户 组: 普通用户
  • 注册时间: 2016-11-11 09:12
个人简介

To be a better coder

文章分类

全部博文(880)

文章存档

2022年(5)

2021年(60)

2020年(175)

2019年(207)

2018年(210)

2017年(142)

2016年(81)

分类: LINUX

2016-12-21 17:41:55

Kernel提供了kmem_cache_create函数用于创建Cache,下面我们直接从API入手。

函数有点长,逐行分析一下。

点击(此处)折叠或打开

  1. /**
  2.  * kmem_cache_create - Create a cache.
  3.  * @name: A string which is used in /proc/slabinfo to identify this cache.
  4.  * @size: The size of objects to be created in this cache.
  5.  * @align: The required alignment for the objects.
  6.  * @flags: SLAB flags
  7.  * @ctor: A constructor for the objects.
  8.  *
  9.  * Returns a ptr to the cache on success, NULL on failure.
  10.  * Cannot be called within a int, but can be interrupted.
  11.  * The @ctor is run when new pages are allocated by the cache.
  12.  *
  13.  * @name must be valid until the cache is destroyed. This implies that
  14.  * the module calling this has to destroy the cache before getting unloaded.
  15.  * Note that kmem_cache_name() is not guaranteed to return the same pointer,
  16.  * therefore applications must manage it themselves.
  17.  *
  18.  * The flags are
  19.  *
  20.  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
  21.  * to catch references to uninitialised memory.
  22.  *
  23.  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
  24.  * for buffer overruns.
  25.  *
  26.  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
  27.  * cacheline. This can be beneficial if you're counting cycles as closely
  28.  * as davem.
  29.  */
  30. /× 创建成功后,cache中没有任何slab及对象,当分配对象时才会创建新的slab ×/
  31. struct kmem_cache *
  32. kmem_cache_create (const char *name, size_t size, size_t align,
  33.     unsigned long flags, void (*ctor)(void *))
  34. {
  35.     size_t left_over, slab_size, ralign;
  36.     struct kmem_cache *cachep = NULL, *pc;
  37.     gfp_t gfp;

  38.     /*
  39.      * Sanity checks... these are all serious usage bugs.
  40.      */
  41.     /× cache未指定名字,在中断上下文,对象大小小于sizeof(void ×),对象大小大于KMALLOC_MAX_SIZE,则报错 ×/
  42.     if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
  43.      size > KMALLOC_MAX_SIZE) {
  44.         printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
  45.                 name);
  46.         BUG();
  47.     }

  48.     /*
  49.      * We use cache_chain_mutex to ensure a consistent view of
  50.      * cpu_online_mask as well. Please see cpuup_callback
  51.      */
  52.     /× 判断slab释放已经初始化好,g_cpucache_up >= EARLY,见前文的初始化分析 
  53.        如果是内核启动阶段,因为只有一个cpu在执行初始化的操作,所以不需要加锁 ×/
  54.     if (slab_is_available()) {
  55.         get_online_cpus();
  56.         mutex_lock(&cache_chain_mutex);
  57.     }
  58.     /× 所有创建的cache都连接在cache_chain链表上,遍历链表检查是否有重名的cache ×/
  59.     list_for_each_entry(pc, &cache_chain, next) {
  60.         char tmp;
  61.         int res;

  62.         /*
  63.          * This happens when the module gets unloaded and doesn't
  64.          * destroy its slab cache and no-one else reuses the vmalloc
  65.          * area of the module. Print a warning.
  66.          */
  67.         /* 检查cache是否都有名字,没有名字则告警,并跳过 */
  68.         res = probe_kernel_address(pc->name, tmp);
  69.         if (res) {
  70.             printk(KERN_ERR
  71.              "SLAB: cache with size %d has lost its name\n",
  72.              pc->buffer_size);
  73.             continue;
  74.         }
  75.         /× 检查是否存在名字冲突的cache ×/
  76.         if (!strcmp(pc->name, name)) {
  77.             printk(KERN_ERR
  78.              "kmem_cache_create: duplicate cache %s\n", name);
  79.             dump_stack();
  80.             goto oops;
  81.         }
  82.     }

  83. #if DEBUG    // 调试,跳过
  84.     WARN_ON(strchr(name, ' '));    /* It confuses parsers */
  85. #if FORCED_DEBUG // 调试,跳过
  86.     /*
  87.      * Enable redzoning and last user accounting, except for caches with
  88.      * large objects, if the increased size would increase the object size
  89.      * above the next power of two: caches with object sizes just above a
  90.      * power of two have a significant amount of internal fragmentation.
  91.      */
  92.     if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
  93.                         2 * sizeof(unsigned long long)))
  94.         flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
  95.     if (!(flags & SLAB_DESTROY_BY_RCU))
  96.         flags |= SLAB_POISON;
  97. #endif
  98.     if (flags & SLAB_DESTROY_BY_RCU)
  99.         BUG_ON(flags & SLAB_POISON);
  100. #endif
  101.     /*
  102.      * Always checks flags, a caller might be expecting debug support which
  103.      * isn't available.
  104.      */
  105.     BUG_ON(flags & ~CREATE_MASK);

  106.     /*
  107.      * Check that size is in terms of words. This is needed to avoid
  108.      * unaligned accesses for some archs when redzoning is used, and makes
  109.      * sure any on-slab bufctl's are also correctly aligned.
  110.      */
  111.     /* size 按照BYTES_PER_WORD对齐 */
  112.     if (size & (BYTES_PER_WORD - 1)) {
  113.         size += (BYTES_PER_WORD - 1);
  114.         size &= ~(BYTES_PER_WORD - 1);
  115.     }

  116.     /* calculate the final buffer alignment: */

  117.     /* 1) arch recommendation: can be overridden for debug */
  118.     /× 与硬件高速缓存行的cache_line_size对齐,根据size的大小决定对齐的单位 ×/
  119.     if (flags & SLAB_HWCACHE_ALIGN) {
  120.         /*
  121.          * Default alignment: as specified by the arch code. Except if
  122.          * an object is really small, then squeeze multiple objects into
  123.          * one cacheline.
  124.          */
  125.         ralign = cache_line_size();
  126.         while (size <= ralign / 2)
  127.             ralign /= 2;
  128.     } else {
  129.         ralign = BYTES_PER_WORD;
  130.     }

  131.     /*
  132.      * Redzoning and user store require word alignment or possibly larger.
  133.      * Note this will be overridden by architecture or caller mandated
  134.      * alignment if either is greater than BYTES_PER_WORD.
  135.      */
  136.     if (flags & SLAB_STORE_USER)
  137.         ralign = BYTES_PER_WORD;

  138.     if (flags & SLAB_RED_ZONE) {
  139.         ralign = REDZONE_ALIGN;
  140.         /* If redzoning, ensure that the second redzone is suitably
  141.          * aligned, by adjusting the object size accordingly. */
  142.         size += REDZONE_ALIGN - 1;
  143.         size &= ~(REDZONE_ALIGN - 1);
  144.     }

  145.     /* 2) arch mandated alignment */
  146.     if (ralign < ARCH_SLAB_MINALIGN) {
  147.         ralign = ARCH_SLAB_MINALIGN;
  148.     }
  149.     /* 3) caller mandated alignment */
  150.     if (ralign < align) {
  151.         ralign = align;
  152.     }
  153.     /* disable debug if necessary */
  154.     if (ralign > __alignof__(unsigned long long))
  155.         flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
  156.     /*
  157.      * 4) Store it.
  158.      */
  159.     /× 存储对齐但 ×/
  160.     align = ralign;

  161.     /× 确定slab是否可以使用,GFP_KERNEL允许申请时睡眠 ×/
  162.     if (slab_is_available())
  163.         gfp = GFP_KERNEL;
  164.     else
  165.         /× GFP_NOWAIT,在slab初始化完成前使用,不能阻塞,只能在低端内存区分配 ×/
  166.         gfp = GFP_NOWAIT;

  167.     /* Get cache's description obj. */
  168.     /× 申请kmem_cache结构,并初始化,cache_cache的对象正是struct kmem_cache结构 ×/
  169.     cachep = kmem_cache_zalloc(&cache_cache, gfp);
  170.     if (!cachep)
  171.         goto oops;

  172. #if DEBUG
  173.     cachep->obj_size = size;

  174.     /*
  175.      * Both debugging options require word-alignment which is calculated
  176.      * into align above.
  177.      */
  178.     if (flags & SLAB_RED_ZONE) {
  179.         /* add space for red zone words */
  180.         cachep->obj_offset += sizeof(unsigned long long);
  181.         size += 2 * sizeof(unsigned long long);
  182.     }
  183.     if (flags & SLAB_STORE_USER) {
  184.         /* user store requires one word storage behind the end of
  185.          * the real object. But if the second red zone needs to be
  186.          * aligned to 64 bits, we must allow that much space.
  187.          */
  188.         if (flags & SLAB_RED_ZONE)
  189.             size += REDZONE_ALIGN;
  190.         else
  191.             size += BYTES_PER_WORD;
  192.     }
  193. #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
  194.     if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
  195.      && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
  196.         cachep->obj_offset += PAGE_SIZE - size;
  197.         size = PAGE_SIZE;
  198.     }
  199. #endif
  200. #endif

  201.     /*
  202.      * Determine if the slab management is 'on' or 'off' slab.
  203.      * (bootstrapping cannot cope with offslab caches so don't do
  204.      * it too early on. Always use on-slab management when
  205.      * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
  206.      */
  207.     /× 确定slab管理对象时采用内置还是外置的方式,当对象大小超过512时,采用外置方式;初始化阶段使用内置方式 ×/
  208.     if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
  209.      !(flags & SLAB_NOLEAKTRACE))
  210.         /*
  211.          * Size is large, assume best to place the slab management obj
  212.          * off-slab (should allow better packing of objs).
  213.          */
  214.         flags |= CFLGS_OFF_SLAB;
  215.     /× 按照之前计算的对齐单元,调整size的大小 ×/
  216.     size = ALIGN(size, align);
  217.     /× 计算slab中碎片的大小 ×/
  218.     left_over = calculate_slab_order(cachep, size, align, flags);
  219.     /× num代表了当前cache允许每个slab中存在的对象数,正常不应该为0 ×/
  220.     if (!cachep->num) {
  221.         printk(KERN_ERR
  222.          "kmem_cache_create: couldn't create cache %s.\n", name);
  223.         kmem_cache_free(&cache_cache, cachep);
  224.         cachep = NULL;
  225.         goto oops;
  226.     }
  227.     /× 计算slab管理对象的大小,包括slab和kmem_bufctl_t ×/
  228.     slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
  229.              + sizeof(struct slab), align);

  230.     /*
  231.      * If the slab has been placed off-slab, and we have enough space then
  232.      * move it on-slab. This is at the expense of any extra colouring.
  233.      */
  234.     /× 如果碎片大小已经超过了管理对象的大小,并且是slab管理对象外置的话,可以直接移进slab中 ×/
  235.     if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
  236.         /× 取消外置的标签,此时是内置的 ×/
  237.         flags &= ~CFLGS_OFF_SLAB;
  238.         /× 碎片的大小可以减去管理对象的大小了 ×/
  239.         left_over -= slab_size;
  240.     }
  241.     
  242.     /× 如果是外置的,则slab_size按照不对齐的方式重新计算一下大小 ×/
  243.     if (flags & CFLGS_OFF_SLAB) {
  244.         /* really off slab. No need for manual alignment */
  245.         slab_size =
  246.          cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);

  247. #ifdef CONFIG_PAGE_POISONING
  248.         /* If we're going to use the generic kernel_map_pages()
  249.          * poisoning, then it's going to smash the contents of
  250.          * the redzone and userword anyhow, so switch them off.
  251.          */
  252.         if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
  253.             flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
  254. #endif
  255.     }
  256.     /× 记录着色块的大小,cache_line_size ×/
  257.     cachep->colour_off = cache_line_size();
  258.     /* Offset must be a multiple of the alignment. */
  259.     if (cachep->colour_off < align)
  260.         cachep->colour_off = align;
  261.     /* 计算碎片区需要多少着色块 */
  262.     cachep->colour = left_over / cachep->colour_off;
  263.     /* 记录slab管理对象的大小 */
  264.     cachep->slab_size = slab_size;
  265.     cachep->flags = flags;
  266.     cachep->gfpflags = 0;
  267.     /× 如果当前kernel配置了DMA,并且函数指定了DMA参数,则在cache上打上DMA的标签 ×/
  268.     if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
  269.         cachep->gfpflags |= GFP_DMA;
  270.     /× 记录每个slab对象的大小 ×/
  271.     cachep->buffer_size = size;
  272.     /× 下面成员用于后续计算对象在slab中的索引 ×/
  273.     cachep->reciprocal_buffer_size = reciprocal_value(size);

  274.     if (flags & CFLGS_OFF_SLAB) {
  275.         /× 分配一个slab管理区对象,保存在cachep->slabp_cache中 ×/
  276.         /× 函数传入的slab_size是管理区对象的大小,如果是slab管理区是外置的,则从slab_size大小的普通cache中申请对象 ×/
  277.         /* 这里找到对应的kmem_cache并记录下来,如果是内置的,则slabp_cache为NULL */
  278.         cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
  279.         /*
  280.          * This is a possibility for one of the malloc_sizes caches.
  281.          * But since we go off slab only for object size greater than
  282.          * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
  283.          * this should not happen at all.
  284.          * But leave a BUG_ON for some lucky dude.
  285.          */
  286.         BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
  287.     }
  288.     /* 设置构造函数 */
  289.     cachep->ctor = ctor;
  290.     /* 记录cache的名字 */
  291.     cachep->name = name;
  292.     /* 设置每个cpu上的local cache */
  293.     if (setup_cpu_cache(cachep, gfp)) {
  294.         __kmem_cache_destroy(cachep);
  295.         cachep = NULL;
  296.         goto oops;
  297.     }

  298.     /* cache setup completed, link it into the list */
  299.     /* cache创建完毕,将其加入全局的cache_chain上 */
  300.     list_add(&cachep->next, &cache_chain);
  301. oops:
  302.     if (!cachep && (flags & SLAB_PANIC))
  303.         panic("kmem_cache_create(): failed to create slab `%s'\n",
  304.          name);
  305.     /* 如果不是初始化阶段,前面曾经加了锁,此处去掉,另,释放cpu热插拔相关计数 */
  306.     if (slab_is_available()) {
  307.         mutex_unlock(&cache_chain_mutex);
  308.         put_online_cpus();
  309.     }
  310.     return cachep;
  311. }
下面函数计算slab由几个页面组成,以及每个slab中存在多少个对象 

点击(此处)折叠或打开

  1. /**
  2.  * calculate_slab_order - calculate size (page order) of slabs
  3.  * @cachep: pointer to the cache that is being created
  4.  * @size: size of objects to be created in this cache.
  5.  * @align: required alignment for the objects.
  6.  * @flags: slab allocation flags
  7.  *
  8.  * Also calculates the number of objects per slab.
  9.  *
  10.  * This could be made much more intelligent. For now, try to avoid using
  11.  * high order pages for slabs. When the gfp() functions are more friendly
  12.  * towards high-order requests, this should be changed.
  13.  */
  14. static size_t calculate_slab_order(struct kmem_cache *cachep,
  15.             size_t size, size_t align, unsigned long flags)
  16. {
  17.     unsigned long offslab_limit;
  18.     size_t left_over = 0;
  19.     int gfporder;

  20.     for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
  21.         unsigned int num;
  22.         size_t remainder;
  23.         /* 计算slab中存在的对象数量和slab浪费的空间大小 */
  24.         cache_estimate(gfporder, size, align, flags, &remainder, &num);
  25.         /* 如果num为0,则代表当前order的页面数连一个对象都无放入,需要扩大页面数 */
  26.         if (!num)
  27.             continue;
  28.         /*摘抄一段网友的注释:http://blog.csdn.net/bullbat/article/details/7192845
  29.         /* 创建一个外置式slab时,要相应分配该slab的管理对象,包含struct slab对象和kmem_bufctl_t数组,分配管理对象的流程就是分配普通对象的流程
  30.            再来看一下分配对象的流程:          
  31.            kmem_cache_alloc->__cache_alloc-> __do_cache_alloc-> ____cache_alloc->
  32.                  cache_alloc_refill->cache_grow-> alloc_slabmgmt-> kmem_cache_alloc_node-> kmem_cache_alloc 
  33.            可以看出这里可能存在一个循环,循环的关键在于alloc_slabmgmt函数,当slab管理对象是off-slab方式时,就形成了循环
  34.            那么什么时候slab管理对象会采用外置式slab呢?显然当其管理的slab中对象很多,从而kmem_bufctl_t数组很大,致使整个管理对象也很大,
  35.            此时才会形成循环。故需要对kmem_bufctl_t的数目做限制,下面的算法是很粗略的,既然对象大小为size时,是外置式slab,
  36.            那么我们假设管理对象的大小也是size,计算出kmem_bufctl_t数组的大小,即此大小的kmem_bufctl_t数组一定会造成管理对象是外置式slab。
  37.            之所以说粗略,是指数组大小小于这个限制时,也不能确保管理对象一定是内置式slab。但这也不会引发错误,因为还有一个slab_break_gfp_order
  38.            阀门来控制每个slab所占页面数,通常其值为1,即每个slab最多两个页面,外置式slab存放的都是大于512的大对象,所以slab中不会有太多的大对象,
  39.            kmem_bufctl_t数组也不会很大,粗略判断一下就足够了。 
  40.         */
  41.         if (flags & CFLGS_OFF_SLAB) {
  42.             /*
  43.              * Max number of objs-per-slab for caches which
  44.              * use off-slab slabs. Needed to avoid a possible
  45.              * looping condition in cache_grow().
  46.              */
  47.             offslab_limit = size - sizeof(struct slab);
  48.             offslab_limit /= sizeof(kmem_bufctl_t);
  49.              /* 当前计算得到的对象数量,大于计算得到的限制时,就可以跳出循环了 */
  50.             if (num > offslab_limit)
  51.                 break;
  52.         }

  53.         /* Found something acceptable - save it away */
  54.         /* slab中的对象数量 */
  55.         cachep->num = num;
  56.         /* slab由几个页面组成,见cache_estimate的计算过程 */
  57.         cachep->gfporder = gfporder;
  58.         /* slab中存在的碎片的大小,同样在cache_estimate中计算出来 */
  59.         left_over = remainder;

  60.         /*
  61.          * A VFS-reclaimable slab tends to have most allocations
  62.          * as GFP_NOFS and we really don't want to have to be allocating
  63.          * higher-order pages when we are unable to shrink dcache.
  64.          */
  65.         /* 该标签代表slab中的页面可以回收,直接跳出 */
  66.         /* 可回收意味着当前slab占用的内存被当做可用内存看待,通过kmem_freepages可以将slab占用的页释放 */
  67.         if (flags & SLAB_RECLAIM_ACCOUNT)
  68.             break;

  69.         /*
  70.          * Large number of objects is good, but very large slabs are
  71.          * currently bad for the gfp()s.
  72.          */
  73.         /* 一旦超过slab页框允许的上限,则不再继续循环,直接使用当前的gfporder */
  74.         if (gfporder >= slab_break_gfp_order)
  75.             break;

  76.         /*
  77.          * Acceptable internal fragmentation?
  78.          */
  79.         /* 判断一下,当前页面的利用率,当利用率满足下方条件时,不再继续循环 */
  80.         if (left_over * 8 <= (PAGE_SIZE << gfporder))
  81.             break;
  82.     }
  83.     /* 当前slab引入的碎片的大小 */
  84.     return left_over;
  85. }
单独再分析一下设置cpu的本地cache ,见下方函数

点击(此处)折叠或打开

  1. static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
  2. {
  3.     /* enable_cpucache前面文章已经分析过,FULL在kmem_cache_init_late中赋值,
  4.        此时普通cache已经初始化完成了,直接配置每个cpu的local cache */
  5.     if (g_cpucache_up == FULL)
  6.         return enable_cpucache(cachep, gfp);
  7.     /* g_cpucache_up代表了通用cache的初始化的进度,取值NONE/EARLY/FULL/PARTIAL_AC/PARTIAL_L3 */
  8.     /* chicken and egg problem: delay the per-cpu array allocation until the general caches are up.
           static enum {
                NONE,            // 系统初始化阶段
                PARTIAL_AC,      // struct array_cache所在的cache已经创建
                PARTIAL_L3,      // struct kmem_list3所在的cache已经创建
                EARLY,           // kmem_cache_init阶段完成
                FULL             // kmem_cache_init_late,resize head arrays完成
           } g_cpucache_up;
        */
  9.     if (g_cpucache_up == NONE) {
  10.         /*
  11.          * Note: the first kmem_cache_create must create the cache
  12.          * that's used by kmalloc(24), otherwise the creation of
  13.          * further caches will BUG().
  14.          */
  15.         /* 初始化阶段创建struct array_cache时走进这里,此时general cache尚未创建,只能使用静态的cache */
  16.         cachep->array[smp_processor_id()] = &initarray_generic.cache;

  17.         /*
  18.          * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
  19.          * the first cache, then we need to set up all its list3s,
  20.          * otherwise the creation of further caches will BUG().
  21.          */
  22.         /* kmem_list3的cache也未创建,使用全局变量 */
  23.         set_up_list3s(cachep, SIZE_AC);
  24.         /* 更新进度 */
  25.         if (INDEX_AC == INDEX_L3)
  26.             g_cpucache_up = PARTIAL_L3;
  27.         else
  28.             g_cpucache_up = PARTIAL_AC;
  29.     } else {
  30.         /* general cache已经创建,使用kmalloc申请 */
  31.         cachep->array[smp_processor_id()] =
  32.             kmalloc(sizeof(struct arraycache_init), gfp);

  33.         if (g_cpucache_up == PARTIAL_AC) {
  34.             /* kmem_list3所在cache尚未创建完成,仍使用静态全局的slab三链 */
  35.             set_up_list3s(cachep, SIZE_L3);
  36.             /* 只有创建kmem_list3 cache时才会走进该流程,set_up_list3创建了kmem_list3的cache,更新进度 */
  37.             g_cpucache_up = PARTIAL_L3;
  38.         } else {
  39.             int node;
  40.             for_each_online_node(node) {
  41.                 /* 通过kmalloc直接申请 */
  42.                 cachep->nodelists[node] =
  43.                  kmalloc_node(sizeof(struct kmem_list3),
  44.                         gfp, node);
  45.                 BUG_ON(!cachep->nodelists[node]);
  46.                 /* 初始化链表 */
  47.                 kmem_list3_init(cachep->nodelists[node]);
  48.             }
  49.         }
  50.     }
  51.     cachep->nodelists[numa_node_id()]->next_reap =
  52.             jiffies + REAPTIMEOUT_LIST3 +
  53.             ((unsigned long)cachep) % REAPTIMEOUT_LIST3;

  54.     cpu_cache_get(cachep)->avail = 0;
  55.     cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
  56.     cpu_cache_get(cachep)->batchcount = 1;
  57.     cpu_cache_get(cachep)->touched = 0;
  58.     cachep->batchcount = 1;
  59.     cachep->limit = BOOT_CPUCACHE_ENTRIES;
  60.     return 0;
  61. }

阅读(1556) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~