Chinaunix首页 | 论坛 | 博客
  • 博客访问: 437313
  • 博文数量: 123
  • 博客积分: 2686
  • 博客等级: 少校
  • 技术积分: 1349
  • 用 户 组: 普通用户
  • 注册时间: 2009-12-23 22:11
文章分类
文章存档

2012年(3)

2011年(10)

2010年(100)

2009年(10)

我的朋友

分类: LINUX

2010-10-19 21:02:51

 For kernel, allocation of non-contiguous memory area is initiated by vmalloc().

/**
 *    vmalloc - allocate virtually contiguous memory
 *    @size:        allocation size
 *    Allocate enough pages to cover @size from the page level
 *    allocator and map them into contiguous kernel virtual space.
 *
 *    For tight control over page level allocator and protection flags
 *    use __vmalloc() instead.
 */

void *vmalloc(unsigned long size)
{
    return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
                    -1, __builtin_return_address(0));
}
EXPORT_SYMBOL(vmalloc);



/**
 *    __vmalloc_node - allocate virtually contiguous memory
 *    @size:        allocation size
 *    @align:        desired alignment
 *    @gfp_mask:    flags for the page level allocator
 *    @prot:        protection mask for the allocated pages
 *    @node:        node to use for allocation or -1
 *    @caller:    caller's return address
 *
 *    Allocate enough pages to cover @size from the page level
 *    allocator with @gfp_mask flags. Map them into contiguous
 *    kernel virtual space, using a pagetable protection of @prot.
 */

static void *__vmalloc_node(unsigned long size, unsigned long align,
             gfp_t gfp_mask, pgprot_t prot,
             int node, void *caller)
{
    struct vm_struct *area;
    void *addr;
    unsigned long real_size = size;

    size = PAGE_ALIGN(size);
    if (!size || (size >> PAGE_SHIFT) > totalram_pages)
        return NULL;


    //see Comment 1
    area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
                 VMALLOC_END, node, gfp_mask, caller);

    if (!area)
        return NULL;
   

    //see Comment 2
    addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);

    /*
     * A ref_count = 3 is needed because the vm_struct and vmap_area
     * structures allocated in the __get_vm_area_node() function contain
     * references to the virtual address of the vmalloc'ed block.
     */

    kmemleak_alloc(addr, real_size, 3, gfp_mask);

    return addr;
}



Comment 1:
Find a suitable area for vmalloc()

Comment 2:
Individual physical pages are allocated from physical memory.

Comment 3:
These pages are mapped contiguously into vmalloc area.




static struct vm_struct *__get_vm_area_node(unsigned long size,
        unsigned long align, unsigned long flags, unsigned long start,
        unsigned long end, int node, gfp_t gfp_mask, void *caller)
{
    static struct vmap_area *va;
    struct vm_struct *area;

    BUG_ON(in_interrupt());
    if (flags & VM_IOREMAP) {
        int bit = fls(size);

        if (bit > IOREMAP_MAX_ORDER)
            bit = IOREMAP_MAX_ORDER;
        else if (bit < PAGE_SHIFT)
            bit = PAGE_SHIFT;

        align = 1ul << bit;
    }

    size = PAGE_ALIGN(size);
    if (unlikely(!size))
        return NULL;

    area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
    if (unlikely(!area))
        return NULL;

    /*
     * We always allocate a guard page.
     */

    size += PAGE_SIZE;

    va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
    if (IS_ERR(va)) {
        kfree(area);
        return NULL;
    }

    insert_vmalloc_vm(area, va, flags, caller);
    return area;
}



static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
                 pgprot_t prot, int node, void *caller)
{
    struct page **pages;
    unsigned int nr_pages, array_size, i;
    gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;

    nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
    array_size = (nr_pages * sizeof(struct page *));

    area->nr_pages = nr_pages;
    /* Please note that the recursion is strictly bounded. */
    if (array_size > PAGE_SIZE) {
        pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
                PAGE_KERNEL, node, caller);
        area->flags |= VM_VPAGES;
    } else {
        pages = kmalloc_node(array_size, nested_gfp, node);
    }
    area->pages = pages;
    area->caller = caller;
    if (!area->pages) {
        remove_vm_area(area->addr);
        kfree(area);
        return NULL;
    }

    for (i = 0; i < area->nr_pages; i++) {
        struct page *page;

        if (node < 0)
            page = alloc_page(gfp_mask);
        else
            page = alloc_pages_node(node, gfp_mask, 0);

        if (unlikely(!page)) {
            /* Successfully allocated i pages, free them in __vunmap() */
            area->nr_pages = i;
            goto fail;
        }
        area->pages[i] = page;
    }

    if (map_vm_area(area, prot, &pages))
        goto fail;
    return area->addr;

fail:
    vfree(area->addr);
    return NULL;
}



/*
 * Memory allocation function callback. This function is called from the
 * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc,
 * vmalloc etc.).
 */

void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
             gfp_t gfp)
{
    pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);

    if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
        create_object((unsigned long)ptr, size, min_count, gfp);
    else if (atomic_read(&kmemleak_early_log))
        log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
}
EXPORT_SYMBOL_GPL(kmemleak_alloc);


阅读(1027) | 评论(1) | 转发(0) |
0

上一篇:__get_vm_area_node()

下一篇:about kernel mappings

给主人留下些什么吧!~~

chinaunix网友2010-10-20 11:00:25

很好的, 收藏了 推荐一个博客,提供很多免费软件编程电子书下载: http://free-ebooks.appspot.com