/* 下面的测试数据是在linux 2.6.16.36中进行的,这个版本没有支持slub cache_cache是对所有cache描述符的管理 */ /* * Initialisation. Called after the page allocator have been initialised and * before smp_init(). 初始化操作,在页分配初始化后,smp_init()之前被调用 */ void __init kmem_cache_init(void) { size_t left_over; struct cache_sizes *sizes; struct cache_names *names; int i; int order; int node;
for (i = 0; i < NUM_INIT_LISTS; i++) { //NUM_INIT_LISTS = 3 kmem_list3_init(&initkmem_list3[i]); if (i < MAX_NUMNODES) cache_cache.nodelists[i] = NULL; //初始化当前CPU对应的kmem_list3为空 }
/* * Fragmentation resistance on low memory - only use bigger * page orders on machines with more than 32MB of memory. * the value of num_physpages is 32752 */
/* Bootstrap is tricky, because several objects are allocated * from caches that do not exist yet: * 1) initialize the cache_cache cache: it contains the struct * kmem_cache structures of all caches, except cache_cache itself: * cache_cache is statically allocated. * Initially an __init data area is used for the head array and the * kmem_list3 structures, it's replaced with a kmalloc allocated * array at the end of the bootstrap. * 2) Create the first kmalloc cache. * The struct kmem_cache for the new cache is allocated normally. * An __init data area is used for the head array. * 3) Create the remaining kmalloc caches, with minimally sized * head arrays. * 4) Replace the __init data head arrays for cache_cache and the first * kmalloc cache with kmalloc allocated arrays. * 5) Replace the __init data for kmem_list3 for cache_cache and * the other cache's with kmalloc allocated memory. * 6) Resize the head arrays of the kmalloc caches to their final sizes. */
node = numa_node_id();
/* 1) create the cache_cache */ /* 创建cache_cache 初始化cache_chain,即cache链
/* 此循环初始创建所有类型的高速缓存,相当于为所有类型的高速缓存开辟了一个起点 */ while (sizes->cs_size != ULONG_MAX) { /* * For performance, all the general caches are L1 aligned. * This should be particularly beneficial on SMP boxes, as it
* eliminates "false sharing". * Note for systems short on memory removing the alignment will * allow tighter packing of the smaller caches. */ if (!sizes->cs_cachep) { sizes->cs_cachep = kmem_cache_create(names->name, sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_PANIC, NULL); } #ifdef CONFIG_ZONE_DMA /* 给DMA创建一个高速缓存 */ sizes->cs_dmacachep = kmem_cache_create( names->name_dma, sizes->cs_size, ARCH_KMALLOC_MINALIGN, ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA| SLAB_PANIC, NULL); #endif sizes++; names++; } /* 4) Replace the bootstrap head arrays */ { struct array_cache *ptr;
local_irq_disable(); BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); //复制cache_cache中array_cache的值到ptr. memcpy(ptr, cpu_cache_get(&cache_cache), sizeof(struct arraycache_init)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->lock);
local_irq_disable(); BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep) != &initarray_generic.cache); memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), sizeof(struct arraycache_init)); /* * Do not assume that spinlocks can be initialized via memcpy: */ spin_lock_init(&ptr->lock);
malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = ptr; local_irq_enable(); } /* 5) Replace the bootstrap kmem_list3's */ //取代引导程序所使用的kmem_list3 { int nid;
/* 6) resize the head arrays to their final sizes */ { struct kmem_cache *cachep; mutex_lock(&cache_chain_mutex); list_for_each_entry(cachep, &cache_chain, next) if (enable_cpucache(cachep)) BUG(); mutex_unlock(&cache_chain_mutex); }
/* Annotate slab for lockdep -- annotate the malloc caches */ init_lock_keys();
/* Done! */ g_cpucache_up = FULL;
/* * Register a cpu startup notifier callback that initializes * cpu_cache_get for all new cpus */ register_cpu_notifier(&cpucache_notifier);
/* * The reap timers are started later, with a module init call: That part * of the kernel is not yet operational. */ }