Chinaunix首页 | 论坛 | 博客
  • 博客访问: 429220
  • 博文数量: 123
  • 博客积分: 2686
  • 博客等级: 少校
  • 技术积分: 1349
  • 用 户 组: 普通用户
  • 注册时间: 2009-12-23 22:11
文章分类
文章存档

2012年(3)

2011年(10)

2010年(100)

2009年(10)

我的朋友

分类: LINUX

2010-10-19 19:50:48

 

static struct vm_struct *__get_vm_area_node(unsigned long size,
        unsigned long align, unsigned long flags, unsigned long start,
        unsigned long end, int node, gfp_t gfp_mask, void *caller)
{
    static struct vmap_area *va;//see Comment 1
    struct vm_struct *area;

    BUG_ON(in_interrupt());
    if (flags & VM_IOREMAP) {
        int bit = fls(size);

        if (bit > IOREMAP_MAX_ORDER)
            bit = IOREMAP_MAX_ORDER;
        else if (bit < PAGE_SHIFT)
            bit = PAGE_SHIFT;

        align = 1ul << bit;
    }

    size = PAGE_ALIGN(size);
    if (unlikely(!size))
        return NULL;

    //see comment 2
    area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
    if (unlikely(!area))
        return NULL;

    /*
     * We always allocate a guard page.
     */

    size += PAGE_SIZE;

    va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
    if (IS_ERR(va)) {
        kfree(area);
        return NULL;
    }

    //see comment 4
    insert_vmalloc_vm(area, va, flags, caller);
    return area;
}



Comment 1:

struct vmap_area {

    unsigned long va_start;
    unsigned long va_end;
    unsigned long flags;
    struct rb_node rb_node;        /* address sorted rbtree */
    struct list_head list;        /* address sorted list */
    struct list_head purge_list;    /* "lazy purge" list */
    void *private;
    struct rcu_head rcu_head;
};

Comment 2:

/**
 * kzalloc_node - allocate zeroed memory from a particular memory node.
 * @size: how many bytes of memory are required.
 * @flags: the type of memory to allocate (see kmalloc).
 * @node: memory node from which to allocate
 */

static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
{
    return kmalloc_node(size, flags | __GFP_ZERO, node);
}



Comment 3:

/*
 * Allocate a region of KVA of the specified size and alignment, within the
 * vstart and vend.
 */

static struct vmap_area *alloc_vmap_area(unsigned long size,
                unsigned long align,
                unsigned long vstart, unsigned long vend,
                int node, gfp_t gfp_mask)
{
    struct vmap_area *va;
    struct rb_node *n;
    unsigned long addr;
    int purged = 0;

    BUG_ON(!size);
    BUG_ON(size & ~PAGE_MASK);

    va = kmalloc_node(sizeof(struct vmap_area),
            gfp_mask & GFP_RECLAIM_MASK, node);
    if (unlikely(!va))
        return ERR_PTR(-ENOMEM);

retry:
    addr = ALIGN(vstart, align);

    spin_lock(&vmap_area_lock);
    if (addr + size - 1 < addr)
        goto overflow;

    /* XXX: could have a last_hole cache */
    n = vmap_area_root.rb_node;
    if (n) {
        struct vmap_area *first = NULL;

        do {
            struct vmap_area *tmp;
            tmp = rb_entry(n, struct vmap_area, rb_node);
            if (tmp->va_end >= addr) {
                if (!first && tmp->va_start < addr + size)
                    first = tmp;
                n = n->rb_left;
            } else {
                first = tmp;
                n = n->rb_right;
            }
        } while (n);

        if (!first)
            goto found;

        if (first->va_end < addr) {
            n = rb_next(&first->rb_node);
            if (n)
                first = rb_entry(n, struct vmap_area, rb_node);
            else
                goto found;
        }

        while (addr + size > first->va_start && addr + size <= vend) {
            addr = ALIGN(first->va_end + PAGE_SIZE, align);
            if (addr + size - 1 < addr)
                goto overflow;

            n = rb_next(&first->rb_node);
            if (n)
                first = rb_entry(n, struct vmap_area, rb_node);
            else
                goto found;
        }
    }
found:
    if (addr + size > vend) {
overflow:
        spin_unlock(&vmap_area_lock);
        if (!purged) {
            purge_vmap_area_lazy();
            purged = 1;
            goto retry;
        }
        if (printk_ratelimit())
            printk(KERN_WARNING
                "vmap allocation for size %lu failed: "
                "use vmalloc= to increase size.\n", size);
        kfree(va);
        return ERR_PTR(-EBUSY);
    }

    BUG_ON(addr & (align-1));

    va->va_start = addr;
    va->va_end = addr + size;
    va->flags = 0;
    __insert_vmap_area(va);
    spin_unlock(&vmap_area_lock);

    return va;
}



Comment 4:

static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
             unsigned long flags, void *caller)
{
    struct vm_struct *tmp, **p;

    vm->flags = flags;
    vm->addr = (void *)va->va_start;
    vm->size = va->va_end - va->va_start;
    vm->caller = caller;
    va->private = vm;
    va->flags |= VM_VM_AREA;

    write_lock(&vmlist_lock);
    for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
        if (tmp->addr >= vm->addr)
            break;
    }
    vm->next = *p;
    *p = vm;
    write_unlock(&vmlist_lock);
}


阅读(1277) | 评论(1) | 转发(0) |
给主人留下些什么吧!~~

chinaunix网友2010-10-20 10:46:51

很好的, 收藏了 推荐一个博客,提供很多免费软件编程电子书下载: http://free-ebooks.appspot.com