start_kernel的在mm_init->kmem_cache_init之后,还调用了kmem_cache_init_late函数。
该函数接着kmem_cache_init的处理进度,继续展开,如下:
-
void __init kmem_cache_init_late(void)
-
{
-
struct kmem_cache *cachep;
-
-
/* 6) resize the head arrays to their final sizes */
-
/× 之前的函数处理到5),这里紧接着从6)继续,遍历cache_chain上的所有的cache ×/
-
mutex_lock(&cache_chain_mutex);
-
list_for_each_entry(cachep, &cache_chain, next)
-
/* 初始化阶段local_cache的大小是固定的,现在要根据对象的大小重新计算 */
-
if (enable_cpucache(cachep, GFP_NOWAIT))
-
BUG();
-
mutex_unlock(&cache_chain_mutex);
-
-
/* 至此,普通cache已经建立起来了 */
-
g_cpucache_up = FULL;
-
-
/* Annotate slab for lockdep -- annotate the malloc caches */
-
init_lock_keys();
-
-
/*
-
* Register a cpu startup notifier callback that initializes
-
* cpu_cache_get for all new cpus
-
*/
-
/× 注册CPU up的回调函数,用于在cpu up时配置local cache ×/
-
register_cpu_notifier(&cpucache_notifier);
-
-
#ifdef CONFIG_NUMA
-
/*
-
* Register a memory hotplug callback that initializes and frees
-
* nodelists.
-
*/
-
hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
-
#endif
-
-
/*
-
* The reap timers are started later, with a module init call: That part
-
* of the kernel is not yet operational.
-
*/
-
}
-
/* Called with cache_chain_mutex held always */
-
/* 初始化local cache */
-
static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
-
{
-
int err;
-
int limit, shared;
-
-
/*
-
* The head array serves three purposes:
-
* - create a LIFO ordering, i.e. return objects that are cache-warm
-
* - reduce the number of spinlock operations.
-
* - reduce the number of linked list operations on the slab and
-
* bufctl chains: array operations are cheaper.
-
* The numbers are guessed, we should auto-tune as described by
-
* Bonwick.
-
*/
-
/* 根据对象的大小计算local cache中对象的数量 */
-
if (cachep->buffer_size > 131072)
-
limit = 1;
-
else if (cachep->buffer_size > PAGE_SIZE)
-
limit = 8;
-
else if (cachep->buffer_size > 1024)
-
limit = 24;
-
else if (cachep->buffer_size > 256)
-
limit = 54;
-
else
-
limit = 120;
-
-
/*
-
* CPU bound tasks (e.g. network routing) can exhibit cpu bound
-
* allocation behaviour: Most allocs on one cpu, most free operations
-
* on another cpu. For these cases, an efficient object passing between
-
* cpus is necessary. This is provided by a shared array. The array
-
* replaces Bonwick's magazine layer.
-
* On uniprocessor, it's functionally equivalent (but less efficient)
-
* to a larger limit. Thus disabled by default.
-
*/
-
shared = 0;
-
/× 多核下设置share local cache中的对象数目 ×/
-
if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
-
shared = 8;
-
-
#if DEBUG
-
/*
-
* With debugging enabled, large batchcount lead to excessively long
-
* periods with disabled local interrupts. Limit the batchcount
-
*/
-
if (limit > 32)
-
limit = 32;
-
#endif
-
/× 配置local cache ×/
-
err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
-
if (err)
-
printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
-
cachep->name, -err);
-
return err;
-
}
-
/* Always called with the cache_chain_mutex held */
-
/* 设置local cache, share local cache和slab的三个链表 */
-
static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
-
int batchcount, int shared, gfp_t gfp)
-
{
-
struct ccupdate_struct *new;
-
int i;
-
-
new = kzalloc(sizeof(*new), gfp);
-
if (!new)
-
return -ENOMEM;
-
-
for_each_online_cpu(i) {
-
/* 为每个cpu分配新的struct array_cache对象 */
-
new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
-
batchcount, gfp);
-
if (!new->new[i]) {
-
for (i--; i >= 0; i--)
-
kfree(new->new[i]);
-
kfree(new);
-
return -ENOMEM;
-
}
-
}
-
new->cachep = cachep;
-
-
on_each_cpu(do_ccupdate_local, (void *)new, 1);
-
-
check_irq_on();
-
cachep->batchcount = batchcount;
-
cachep->limit = limit;
-
cachep->shared = shared;
-
-
/× 释放旧的local cache ×/
-
for_each_online_cpu(i) {
-
struct array_cache *ccold = new->new[i];
-
if (!ccold)
-
continue;
-
spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
-
/* 释放旧的local cache中的对象 */
-
free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
-
spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
-
/× 释放旧的struct array_cache对象 ×/
-
kfree(ccold);
-
}
-
kfree(new);
-
/× 初始化shared local cache和slab三个链表 ×/
-
return alloc_kmemlist(cachep, gfp);
-
}
-
/× 更新每个cpu的struct array_cache对象 ×/
-
static void do_ccupdate_local(void *info)
-
{
-
struct ccupdate_struct *new = info;
-
struct array_cache *old;
-
-
check_irq_off();
-
old = cpu_cache_get(new->cachep);
-
/× 指向新的struct array_cache对象 ×/
-
new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
-
/× 保存旧的struct array_cache对象 ×/
-
new->new[smp_processor_id()] = old;
-
}
-
/*
-
* This initializes kmem_list3 or resizes various caches for all nodes.
-
*/
-
/× 初始化shared local cache和slab的三个链表 ×/
-
static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
-
{
-
int node;
-
struct kmem_list3 *l3;
-
struct array_cache *new_shared;
-
struct array_cache **new_alien = NULL;
-
-
for_each_online_node(node) {
-
-
if (use_alien_caches) {
-
new_alien = alloc_alien_cache(node, cachep->limit, gfp);
-
if (!new_alien)
-
goto fail;
-
}
-
-
new_shared = NULL;
-
/× 分配shared local cache ×/
-
if (cachep->shared) {
-
new_shared = alloc_arraycache(node,
-
cachep->shared*cachep->batchcount,
-
0xbaadf00d, gfp);
-
if (!new_shared) {
-
free_alien_cache(new_alien);
-
goto fail;
-
}
-
}
-
/* 获取旧的slab三个链表 */
-
l3 = cachep->nodelists[node];
-
if (l3) {
-
struct array_cache *shared = l3->shared;
-
-
spin_lock_irq(&l3->list_lock);
-
/× 释放旧的shared local cache中的对象 ×/
-
if (shared)
-
free_block(cachep, shared->entry,
-
shared->avail, node);
-
/× 指向新的share local cache ×/
-
l3->shared = new_shared;
-
if (!l3->alien) {
-
l3->alien = new_alien;
-
new_alien = NULL;
-
}
-
/* 计算cache中空闲对象的上限 */
-
l3->free_limit = (1 + nr_cpus_node(node)) *
-
cachep->batchcount + cachep->num;
-
spin_unlock_irq(&l3->list_lock);
-
kfree(shared);
-
free_alien_cache(new_alien);
-
continue;
-
}
-
/× 分配新的slab 三链 ×/
-
l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
-
if (!l3) {
-
free_alien_cache(new_alien);
-
kfree(new_shared);
-
goto fail;
-
}
-
/× 进行初始化 ×/
-
kmem_list3_init(l3);
-
l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
-
((unsigned long)cachep) % REAPTIMEOUT_LIST3;
-
l3->shared = new_shared;
-
l3->alien = new_alien;
-
l3->free_limit = (1 + nr_cpus_node(node)) *
-
cachep->batchcount + cachep->num;
-
cachep->nodelists[node] = l3;
-
}
-
return 0;
-
-
fail:
-
if (!cachep->next.next) {
-
/* Cache is not active yet. Roll back what we did */
-
node--;
-
while (node >= 0) {
-
if (cachep->nodelists[node]) {
-
l3 = cachep->nodelists[node];
-
-
kfree(l3->shared);
-
free_alien_cache(l3->alien);
-
kfree(l3);
-
cachep->nodelists[node] = NULL;
-
}
-
node--;
-
}
-
}
-
return -ENOMEM;
-
}
阅读(1288) | 评论(0) | 转发(0) |