2011年(38)
分类: LINUX
2011-03-03 16:44:06
释放一个对象到slab系统中。调用关系为:kmem_cache_free->__cache_free直接看__cache_free函数。
static inline void __cache_free(struct kmem_cache *cachep, void *objp)
{
/* 获得本CPU的local cache */
struct array_cache *ac = cpu_cache_get(cachep);
check_irq_off();
kmemleak_free_recursive(objp, cachep->flags);
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
kmemcheck_slab_free(cachep, objp, obj_size(cachep));
/*
* Skip calling cache_free_alien() when the platform is not numa.
* This will avoid cache misses that happen while accessing slabp (which
* is per page memory reference) to get nodeid. Instead use a global
* variable to skip the call, which is mostly likely to be present in
* the cache.
*/
/* NUMA相关 */
if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
return;
if (likely(ac->avail < ac->limit)) {
/* local cache中的空闲对象数小于上限时,只需将对象释放回entry数组中 */
STATS_INC_FREEHIT(cachep);
ac->entry[ac->avail++] = objp;
return;
} else {
/* 大于等于上限时, */
STATS_INC_FREEMISS(cachep);
cache_flusharray(cachep, ac);
ac->entry[ac->avail++] = objp;
}
}
cache_flusharray
local cache中对象过多,需要释放一批对象到slab三链中。
static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
{
int batchcount;
struct kmem_list3 *l3;
int node = numa_mem_id();
/* 每次释放多少个对象 */
batchcount = ac->batchcount;
#if DEBUG
BUG_ON(!batchcount || batchcount > ac->avail);
#endif
check_irq_off();
/* 获得此cache的slab三链 */
l3 = cachep->nodelists[node];
spin_lock(&l3->list_lock);
if (l3->shared) {
/* 如果存在shared local cache,将对象释放到其中 */
struct array_cache *shared_array = l3->shared;
/* 计算shared local cache中还有多少空位 */
int max = shared_array->limit - shared_array->avail;
if (max) {
/* 空位数小于要释放的对象数时,释放数等于空位数 */
if (batchcount > max)
batchcount = max;
/* 释放local cache前面的几个对象到shared local cache中,前面的是最早不用的 */
memcpy(&(shared_array->entry[shared_array->avail]),
ac->entry, sizeof(void *) * batchcount);
/* 增加shared local cache可用对象数 */
shared_array->avail += batchcount;
goto free_done;
}
}
/* 无shared local cache,释放对象到slab三链中 */
free_block(cachep, ac->entry, batchcount, node);
free_done:
……
spin_unlock(&l3->list_lock);
/* 减少local cache可用对象数*/
ac->avail -= batchcount;
/* local cache前面有batchcount个空位,将后面的对象依次前移batchcount位 */
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
}
free_block
释放一定数目的对象。
static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
int node)
{
int i;
struct kmem_list3 *l3;
/* 逐一释放对象到slab三链中 */
for (i = 0; i < nr_objects; i++) {
void *objp = objpp[i];
struct slab *slabp;
/* 通过虚拟地址得到page,再通过page得到slab */
slabp = virt_to_slab(objp);
/* 获得slab三链 */
l3 = cachep->nodelists[node];
/* 先将对象所在的slab从链表中摘除 */
list_del(&slabp->list);
check_spinlock_acquired_node(cachep, node);
check_slabp(cachep, slabp);
/* 将对象释放到其slab中 */
slab_put_obj(cachep, slabp, objp, node);
STATS_DEC_ACTIVE(cachep);
/* 空闲对象数加一 */
l3->free_objects++;
check_slabp(cachep, slabp);
/* fixup slab chains */
if (slabp->inuse == 0) {
/* 如果slab中均为空闲对象 */
if (l3->free_objects > l3->free_limit) {
/* 如果slab三链中空闲对象数超过上限,直接回收整个slab到内存,空闲对象数减去每个slab中对象数 */
l3->free_objects -= cachep->num;
/* No need to drop any previously held
* lock here, even if we have a off-slab slab
* descriptor it is guaranteed to come from
* a different cache, refer to comments before
* alloc_slabmgmt.
*/
/* 销毁struct slab对象 */
slab_destroy(cachep, slabp);
} else {
/* 将此slab添加到空slab链表中 */
list_add(&slabp->list, &l3->slabs_free);
}
} else {
/* Unconditionally move a slab to the end of the
* partial list on free - maximum time for the
* other objects to be freed, too.
*/
/*将此slab添加到部分满slab链表中*/
list_add_tail(&slabp->list, &l3->slabs_partial);
}
}
}
slab_put_obj
static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
void *objp, int nodeid)
{
/* 获得对象在kmem_bufctl_t数组中的索引 */
unsigned int objnr = obj_to_index(cachep, slabp, objp);
#if DEBUG
/* Verify that the slab belongs to the intended node */
WARN_ON(slabp->nodeid != nodeid);
if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
printk(KERN_ERR "slab: double free detected in cache "
"'%s', objp %p\n", cachep->name, objp);
BUG();
}
#endif
/* 指向slab中原来的第一个空闲对象 */
slab_bufctl(slabp)[objnr] = slabp->free;
/* 释放的对象作为第一个空闲对象 */
slabp->free = objnr;
/* 已分配对象数减一 */
slabp->inuse--;
}
obj_to_index
static inline unsigned int obj_to_index(const struct kmem_cache *cache,
const struct slab *slab, void *obj)
{
/* 计算对象与slab中首个对象的偏移 */
u32 offset = (obj - slab->s_mem);
/* 通过偏移计算其在kmem_bufctl_t数组中的索引 */
return reciprocal_divide(offset, cache->reciprocal_buffer_size);
}