(dtor && !ctor)) {
printk(KERN_ERR "%s: Early error in slab %s\n",
__FUNCTION__, name);
BUG();
}
if (flags & SLAB_DESTROY_BY_RCU)
BUG_ON(dtor);
//flag参数的有效性检查
if (flags & ~CREATE_MASK)
BUG();
//align参数的调整。如无特别要求,align设为零,flag设为SLAB_HWCACHE_ALIGN。按照处理器缓//存对齐
if (align) {
flags &= ~(SLAB_RED_ZONE|SLAB_STORE_USER);
} else {
if (flags & SLAB_HWCACHE_ALIGN) {
//cache_line_size取得处理平始的cache line.前面已经分析过
align = cache_line_size();
//如果对象太小,为了提高利用了,取cache line半数对齐
while (size <= align/2)
align /= 2;
} else {
align = BYTES_PER_WORD;
}
}
//从cache_cache中分得一个缓存描述符 kmem_cache_alloc函数在后面讲述
cachep = (kmem_cache_t *) kmem_cache_alloc(&cache_cache, SLAB_KERNEL);
if (!cachep)
goto opps;
//初始化
memset(cachep, 0, sizeof(kmem_cache_t));
//把大小按照BYTES_PER_WORD 对齐。BYTES_PER_WORD也即处理器的地址单元,在i32 为32
if (size & (BYTES_PER_WORD-1)) {
size += (BYTES_PER_WORD-1);
size &= ~(BYTES_PER_WORD-1);
}
//如果size 大于1/8 个页面。就把slab放到缓存区的外面
if (size >= (PAGE_SIZE>>3))
flags |= CFLGS_OFF_SLAB;
//使size按照align对齐
size = ALIGN(size, align);
if ((flags & SLAB_RECLAIM_ACCOUNT) && size <= PAGE_SIZE) {
cachep->gfporder = 0;
cache_estimate(cachep->gfporder, size, align, flags,
&left_over, &cachep->num);
} else {
//在这里,为cache中每个slab的大小以及slab中的对象个数取得一个平衡点
do {
unsigned int break_flag = 0;
cal_wastage:
//cache_estimate:指定slab的大小后,返回slab中的对像个数
//以及剩余空间数
cache_estimate(cachep->gfporder, size, align, flags,
&left_over, &cachep->num);
if (break_flag)
break;
if (cachep->gfporder >= MAX_GFP_ORDER)
break;
if (!cachep->num)
goto next;
if (flags & CFLGS_OFF_SLAB &&
cachep->num > offslab_limit) {
/* This num of objs will cause problems. */
cachep->gfporder--;
break_flag++;
goto cal_wastage;
}
/*
* Large num of objs is good, but v. large slabs are
* currently bad for the gfp()s.
*/
if (cachep->gfporder >= slab_break_gfp_order)
break;
if ((left_over*8) <= (PAGE_SIZE<gfporder))
break; /* Acceptable internal fragmentation. */
next:
cachep->gfporder++;
} while (1);
}
if (!cachep->num) {
//出现意外,打印出常现的oops错误
printk("kmem_cache_create: couldn't create cache %s.\n", name);
kmem_cache_free(&cache_cache, cachep);
cachep = NULL;
goto opps;
}
使slab大小按照align对齐
slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t)
+ sizeof(struct slab), align);
if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
//如果剩余空间足间大,就把slab描述符放到缓存区里面
flags &= ~CFLGS_OFF_SLAB;
left_over -= slab_size;
}
if (flags & CFLGS_OFF_SLAB) {
//如果slab描述符依然只能放到缓存区外面。则取slab_size大小的实际值
//也就是说不需要与alin 对齐了
slab_size = cachep->num*sizeof(kmem_bufctl_t)+sizeof(struct slab);
}
//着色偏移量,至少为一个cache_size.若align值是自己指定的,且超出了一个cache size.这样//值就会取设定的align
cachep->colour_off = cache_line_size();
if (cachep->colour_off < align)
cachep->colour_off = align;
//颜色的总数,为剩余的空间数/着色偏移量
, //从这里我们可以看到,如果偏移量太少,着色机制是没有任何意义的
//这是值得提醒的是colour_next没有被特别赋值,即为默认值0
cachep->colour = left_over/cachep->colour_off;
//各种成员的初始化
cachep->slab_size = slab_size;
cachep->flags = flags;
cachep->gfpflags = 0;
if (flags & SLAB_CACHE_DMA)
cachep->gfpflags |= GFP_DMA;
spin_lock_init(&cachep->spinlock);
cachep->objsize = size;
/* NUMA */
INIT_LIST_HEAD(&cachep->lists.slabs_full);
INIT_LIST_HEAD(&cachep->lists.slabs_partial);
INIT_LIST_HEAD(&cachep->lists.slabs_free);
//如果slab描述符是放在缓存区外面的。那就为slab描述符指定一个分配缓存
if (flags & CFLGS_OFF_SLAB)
cachep->slabp_cache = kmem_find_general_cachep(slab_size,0);
cachep->ctor = ctor;
cachep->dtor = dtor;
cachep->name = name;
/* Don't let CPUs to come and go */
lock_cpu_hotplug();
//g_cpucache_up:判断普通缓存是否就绪的标志
//NONE是初始值 PARTIAL:是一个中间的状态,即普通缓存正在初始化
//FULL:普通缓存已经初始化完成
if (g_cpucache_up == FULL) {
enable_cpucache(cachep);
} else {
if (g_cpucache_up == NONE) {
/* Note: the first kmem_cache_create must create
* the cache that's used by kmalloc(24), otherwise
* the creation of further caches will BUG().
*/
cachep->array[smp_processor_id()] =
&initarray_generic.cache;
g_cpucache_up = PARTIAL;
} else {
cachep->array[smp_processor_id()] =
kmalloc(sizeof(struct arraycache_init),
GFP_KERNEL);
}
BUG_ON(!ac_data(cachep));
ac_data(cachep)->avail = 0;
ac_data(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
ac_data(cachep)->batchcount = 1;
ac_data(cachep)->touched = 0;
cachep->batchcount = 1;
cachep->limit = BOOT_CPUCACHE_ENTRIES;
cachep->free_limit = (1+num_online_cpus())*cachep->batchcount
+ cachep->num;
}
cachep->lists.next_reap = jiffies + REAPTIMEOUT_LIST3 +
((unsigned long)cachep)%REAPTIMEOUT_LIST3;
//查看是否有相同名字的cache
down(&cache_chain_sem);
{
struct list_head *p;
mm_segment_t old_fs;
old_fs = get_fs();
set_fs(KERNEL_DS);
list_for_each(p, &cache_chain) {
kmem_cache_t *pc = list_entry(p, kmem_cache_t, next);
char tmp;
/*
* This happens when the module gets unloaded and
* doesn't destroy its slab cache and noone else reuses
* the vmalloc area of the module. Print a warning.
*/
#ifdef CONFIG_X86_UACCESS_INDIRECT
if (__direct_get_user(tmp,pc->name)) {
#else
if (__get_user(tmp,pc->name)) {
#endif
printk("SLAB: cache with size %d has lost its "
"name\n", pc->objsize);
continue;
}
if (!strcmp(pc->name,name)) {
printk("kmem_cache_create: duplicate "
"cache %s\n",name);
up(&cache_chain_sem);
unlock_cpu_hotplug();
BUG();
}
}
set_fs(old_fs);
}
//将cache挂至cache_chain链
list_add(&cachep->next, &cache_chain);
up(&cache_chain_sem);
unlock_cpu_hotplug();
opps:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
return cachep;
}
首先我们遇到的问题是第一个鸡与鸡蛋的问题:新建cache描述符是从cache_cache中分配cache描述符,那cache_cache是从何而来呢?cache_cache是静态定义的一个数据结构,只要静态初始化它的成员就可以了。另一个鸡与鸡蛋的问题就是cache中array数组的初始化问题。例如:
cachep->array[smp_processor_id()] =
kmalloc(sizeof(struct arraycache_init),
GFP_KERNEL);
也就是说从普通缓存中分得空间,那普通缓存区中的arry如何取得空间呢?这也是一个静态定义的数组:initarray_generic.cache。我们以后再详细分析内存各子系统的初始化过程。详情请关注本站更新。
另外,我们也接触到了着色部份的代码。如下所示:
cachep->colour_off = cache_line_size();
if (cachep->colour_off < align)
cachep->colour_off = align;
cachep->colour = left_over/cachep->colour_off;
着色的原理在前面已经分析过了。Colour_off:每一个slab中偏移值。以colour:颜色的总数,即最大的偏移位置,它的大小为剩余大小/偏移值,colour_next初始化为零。
举例说明:
Colour_off = 32 colour = 2; colour_next = 0
第一个slab偏移colour_next* Colour_off = 0*32 = 0 然后colour_next加1。即为1
第二个slab偏移colour_next* Colour_off = 1*32 = 32然后colour_next加1。即为2
第三个slab偏移colour_next* Colour_off = 2*32 = 64然后colour_next加1。即为3,由于colour为2。所以,colour_next = 0;
第四个slab偏移colour_next* Colour_off = 0*32 = 0
……
另外:要注意的是slab大小计算的时候:
slab_size = ALIGN(cachep->num*sizeof(kmem_bufctl_t) + sizeof(struct slab), align);
虽然在struct slab里没有定义kmem_bufctl_t.但在为slab申请空间的时候申请了num个kmem_bufctl_t的多余空间,也就是说kmem_bufctl_t数组紧放在slab描述符之后
此外,array被初始化了arraycache_init大小。
struct arraycache_init {
struct array_cache cache;
void * entries[BOOT_CPUCACHE_ENTRIES];
};
为什么要这样做?我们在后面再给出分析
六:kmem_cache_alloc的实现分析:
我们在上面可以看到,创建一个cache描述符的时候,并没有这之分配slab数据。现在我们来看一下怎么从cache中申请对象
void * kmem_cache_alloc (kmem_cache_t *cachep, int flags)
{
return __cache_alloc(cachep, flags);
}
实际上会调用__cache_alloc
如下:
static inline void * __cache_alloc (kmem_cache_t *cachep, int flags)
{
unsigned long save_flags;
void* objp;
struct array_cache *ac;
//如果定义了__GFP_WAIT。可能会引起睡眠
cache_alloc_debugcheck_before(cachep, flags);
local_irq_save(save_flags);
//取得当前处理器所在的array_cache(简称为AC,我们下面也这样称呼它)
ac = ac_data(cachep);
//ac->avail:AC中第后一个可用的对象索引
//如果AC中还有可用的对象
if (likely(ac->avail)) {
STATS_INC_ALLOCHIT(cachep);
//每次分配都会把ac->touched置为1
ac->touched = 1;
objp = ac_entry(ac)[--ac->avail];
} else {
//如果AC中没有可用对象,那只能从l3中“搬出”对象到AC中
STATS_INC_ALLOCMISS(cachep);
objp = cache_alloc_refill(cachep, flags);
}
local_irq_restore(save_flags);
objp = cache_alloc_debugcheck_after(cachep, flags, objp, __builtin_return_address(0));
return objp;
}
首先,会从AC中分配对象,如果AC中无可用对象,那就从l3链表中分配对象了,首先它会从share链表中取对象,然后再从末满,空链表中取对象,如果都没有空闲对象的话,只能从伙伴系统中分配内存了.接着看下面的代码:
static void* cache_alloc_refill(kmem_cache_t* cachep, int flags)
{
int batchcount;
struct kmem_list3 *l3;
struct array_cache *ac;
check_irq_off();
ac = ac_data(cachep);
retry:
//batchcount:一次向AC填充的对象值
batchcount = ac->batchcount;
if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
batchcount = BATCHREFILL_LIMIT;
}
//取得cache所对象的l3
l3 = list3_data(cachep);
//如果Ac中依然有可用对象,则退出
BUG_ON(ac->avail > 0);
spin_lock(&cachep->spinlock);
//首先会从shared中取对象
if (l3->shared) {
struct array_cache *shared_array = l3->shared;
if (shared_array->avail) {
如果share的剩余量不足batchcount。则把它全部都移至AC中
if (batchcount > shared_array->avail)
batchcount = shared_array->avail;
shared_array->avail -= batchcount;
ac->avail = batchcount;
//把share链中的object拷贝到AC中
memcpy(ac_entry(ac), &ac_entry(shared_array)[shared_array->avail],
sizeof(void*)*batchcount);
shared_array->touched = 1;
//AC中已经有数据了,那么,可以直接从AC中分配了
goto alloc_done;
}
}
//运行到这里的话,那说明share链中没有对象了
while (batchcount > 0) {
//先从末满的链表中获取,若末满链为空的话,从全空链表中获取
struct list_head *entry;
struct slab *slabp;
/* Get slab alloc is to come from. */
entry = l3->slabs_partial.next;
//判断slabs_partial是否为空
if (entry == &l3->slabs_partial) {
l3->free_touched = 1;
//判断slabs_free链是否为空
entry = l3->slabs_free.next;
if (entry == &l3->slabs_free)
//若全为空的话,就从伙伴系统中分配页面了
goto must_grow;
}
//从链表中取得slab描述符
slabp = list_entry(entry, struct slab, list);
check_slabp(cachep, slabp);
check_spinlock_acquired(cachep);
//对象取尽,或者已经满尽分配要求
while (slabp->inuse < cachep->num && batchcount--) {
//从相应的slab中分配对象
kmem_bufctl_t next;
STATS_INC_ALLOCED(cachep);
STATS_INC_ACTIVE(cachep);
STATS_SET_HIGH(cachep);
//得到空闲对象指针
ac_entry(ac)[ac->avail++] = slabp->s_mem + slabp->free*cachep->objsize;
//更新计数
slabp->inuse++;
next = slab_bufctl(slabp)[slabp->free];
#if DEBUG
slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
#endif
//使free指向下一人空闲对像的索引
slabp->free = next;
}
check_slabp(cachep, slabp);
/* move slabp to correct slabp list: */
//slab从链中脱落
list_del(&slabp->list);
if (slabp->free == BUFCTL_END)
//如果slab中没有空闲对象了,则把它加入slabs_full链
list_add(&slabp->list, &l3->slabs_full);
else
//如果slab中没有空闲对象了,则把它加入slabs_partial链
list_add(&slabp->list, &l3->slabs_partial);
}
must_grow:
//更新free_objects计数.(如果三链都为空的情况下:ac->avail为进入函数的初始值,即为0)
l3->free_objects -= ac->avail;
alloc_done:
spin_unlock(&cachep->spinlock);
if (unlikely(!ac->avail)) {
int x;
x = cache_grow(cachep, flags);
// cache_grow can reenable interrupts, then ac could change.
ac = ac_data(cachep);
//如果grow失败,返回NULL
if (!x && ac->avail == 0) // no objects in sight? abort
return NULL;
//如果grow成功,则重复上述操作,即从三链表中取空闲对象^_^
if (!ac->avail) // objects refilled by interrupt?
goto retry;
}
ac->touched = 1;
return ac_entry(ac)[--ac->avail];
}
这段代码涉及到slab_bufctl(),等我们看完分配,释放的全过程后。再来详细分析它涉及到的各项操作,cache_grow()用来做slab分配器与slab的交互。它的代码如下示:
static int cache_grow (kmem_cache_t * cachep, int flags)
{
struct slab *slabp;
void *objp;
size_t offset;
int local_flags;
unsigned long ctor_flags;
if (flags & ~(SLAB_DMA|SLAB_LEVEL_MASK|SLAB_NO_GROW))
BUG();
if (flags & SLAB_NO_GROW)
return 0;
ctor_flags = SLAB_CTOR_CONSTRUCTOR;
local_flags = (flags & SLAB_LEVEL_MASK);
if (!(local_flags & __GFP_WAIT))
/*
* Not allowed to sleep. Need to tell a constructor about
* this - it might need to know...
*/
ctor_flags |= SLAB_CTOR_ATOMIC;
/* About to mess with non-constant members - lock. */
check_irq_off();
spin_lock(&cachep->spinlock);
//取得下一个偏移索引(着色机制在前面已经详细分析了)
offset = cachep->colour_next;
cachep->colour_next++;
//如果大于允许的最大颜色,那就把计数归位,即为0
if (cachep->colour_next >= cachep->colour)
cachep->colour_next = 0;
//计算偏移量
offset *= cachep->colour_off;
spin_unlock(&cachep->spinlock);
if (local_flags & __GFP_WAIT)
local_irq_enable();
kmem_flagcheck(cachep, flags);
//向伙伴系统申请内存
if (!(objp = kmem_getpages(cachep, flags, -1)))
goto failed;
//分配slab描述符,这里有两种情况,一种是slab在缓存外部,另一种是内部
if (!(slabp = alloc_slabmgmt(cachep, objp, offset, local_flags)))
goto opps1;
set_slab_attr(cachep, slabp, objp);
//初始化slab的对像
cache_init_objs(cachep, slabp, ctor_flags);
if (local_flags & __GFP_WAIT)
local_irq_disable();
check_irq_off();
spin_lock(&cachep->spinlock);
//将新构建的slab加至slabs_free链
list_add_tail(&slabp->list, &(list3_data(cachep)->slabs_free));
STATS_INC_GROWN(cachep);
//更新计数
list3_data(cachep)->free_objects += cachep->num;
spin_unlock(&cachep->spinlock);
return 1;
opps1:
//发生了错误,把内存归还伙伴系统
kmem_freepages(cachep, objp);
failed:
if (local_flags & __GFP_WAIT)
local_irq_disable();
return 0;
}
我们看到了cache_grow的概貌,接着分析它里面调用的子函数。
kmem_getpages()用于slab分配器向伙伴系统分配内存,代码如下:
//nodeid:分配内面的cpu结点。如果从当前CPU分存,nodeid置为-1
static void *kmem_getpages(kmem_cache_t *cachep, int flags, int nodeid)
{
struct page *page;
void *addr;
int i;
flags |= cachep->gfpflags;
//__get_free_pages与alloc_pages_node在《linux内存管理之伙伴系统分析》一文中已有详
//细分析,请参考
if (likely(nodeid == -1)) {
//从当前cpu结点分配内存
addr = (void*)__get_free_pages(flags, cachep->gfporder);
if (!addr)
return NULL;
//将地址转换为页描述符
page = virt_to_page(addr);
} else {
//从指定结点分配内存
page = alloc_pages_node(nodeid, flags, cachep->gfporder);
if (!page)
return NULL;
addr = page_address(page);
}
//计算页面个数。即为2^ cachep->gfporder
i = (1 << cachep->gfporder);
if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
atomic_add(i, &slab_reclaim_pages);
//更新cpu nr_slab状态计数
add_page_state(nr_slab, i);
while (i--) {
//将页面标识为PG_slab,表示该页面已被slab使用
SetPageSlab(page);
page++;
}
return addr;
}
alloc_slabmgmt()是一个slab描述符分配器接口,代码如下:
static struct slab* alloc_slabmgmt (kmem_cache_t *cachep,
void *objp, int colour_off, int local_flags)
{
struct slab *slabp;
//如果slab描述符是外置的
if (OFF_SLAB(cachep)) {
//从对应的cache中分配slab描述符
slabp = kmem_cache_alloc(cachep->slabp_cache, local_flags);
if (!slabp)
return NULL;
} else {
//从偏移量后开始安置slab
slabp = objp+colour_off;
//更新偏移量,即加上slab的大小,这位置也是有效数据的起始偏移位置
colour_off += cachep->slab_size;
}
slabp->inuse = 0;
slabp->colouroff = colour_off;
slabp->s_mem = objp+colour_off;
return slabp;
}
cache_init_objs()初始化分配得到的每一个对象,代码如下:
static void cache_init_objs (kmem_cache_t * cachep,
struct slab * slabp, unsigned long ctor_flags)
{
int i;
for (i = 0; i < cachep->num; i++) {
//取slab中的每一个对像
void* objp = slabp->s_mem+cachep->objsize*i;
#if DEBUG
//忽略掉debug信息
/* need to poison the objs? */
if (cachep->flags & SLAB_POISON)
poison_obj(cachep, objp, POISON_FREE);
if (cachep->flags & SLAB_STORE_USER)
*dbg_userword(cachep, objp) = NULL;
if (cachep->flags & SLAB_RED_ZONE) {
*dbg_redzone1(cachep, objp) = RED_INACTIVE;
*dbg_redzone2(cachep, objp) = RED_INACTIVE;
}
/*
* Constructors are not allowed to allocate memory from
* the same cache which they are a constructor for.
* Otherwise, deadlock. They must also be threaded.
*/
if (cachep->ctor && !(cachep->flags & SLAB_POISON))
cachep->ctor(objp+obj_dbghead(cachep), cachep, ctor_flags);
if (cachep->flags & SLAB_RED_ZONE) {
if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the"
" end of an object");
if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
slab_error(cachep, "constructor overwrote the"
" start of an object");
}
if ((cachep->objsize % PAGE_SIZE) == 0 && OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
kernel_map_pages(virt_to_page(objp), cachep->objsize/PAGE_SIZE, 0);
#else
//如果有初始化函数,则调用之
if (cachep->ctor)
cachep->ctor(objp, cachep, ctor_flags);
#endif
//更新bufctl数组
slab_bufctl(slabp)[i] = i+1;
}
//置末尾描述符
slab_bufctl(slabp)[i-1] = BUFCTL_END;
slabp->free = 0;
}
同样,slab_bufctl的分析,等讲完释放对像的时候再继续
到此,我们已经看完到分配对象的全过程,接着来看怎么释放一个对象