Chinaunix首页 | 论坛 | 博客
  • 博客访问: 346461
  • 博文数量: 167
  • 博客积分: 2867
  • 博客等级: 少校
  • 技术积分: 1306
  • 用 户 组: 普通用户
  • 注册时间: 2010-05-12 00:08
文章分类

全部博文(167)

文章存档

2017年(10)

2016年(5)

2015年(9)

2014年(10)

2013年(5)

2012年(17)

2011年(110)

2010年(1)

我的朋友

分类: LINUX

2011-02-12 09:52:46

    __get_free_page is a macro, which was defined in  
>>>
#define __get_free_page(gfp_mask) \
                 __get_free_pages((gfp_mask),0)
>>>
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{
        struct page *page;

        /*   
         * __get_free_pages() returns a 32-bit address, which cannot represent
         * a highmem page
         */
        VM_BUG_ON((gfp_mask & __GFP_HIGHMEM) != 0);

        page = alloc_pages(gfp_mask, order);
        if (!page)
                return 0;
        return (unsigned long) page_address(page);
}

__get_free_pages--->alloc_pages
>>>
include/linux/gfp.h
#ifdef CONFIG_NUMA
extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);

static inline struct page *
alloc_pages(gfp_t gfp_mask, unsigned int order)
{       
        return alloc_pages_current(gfp_mask, order);
}
extern struct page *alloc_page_vma(gfp_t gfp_mask, 
                        struct vm_area_struct *vma, unsigned long addr);
#else   
#define alloc_pages(gfp_mask, order) \
                alloc_pages_node(numa_node_id(), gfp_mask, order)
#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
#endif
#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)

__get_free_pages--->alloc_pages--->alloc_pages_node
>>>
include/linux/gfp.h
static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
                                                unsigned int order)
{
        /* Unknown node is current node */
        if (nid < 0)
                nid = numa_node_id();

        return __alloc_pages(gfp_mask, order, node_zonelist(nid, gfp_mask));
}

__get_free_pages--->alloc_pages--->alloc_pages_node--->__alloc_pages
>>>
include/linux/gfp.h
static inline struct page *
__alloc_pages(gfp_t gfp_mask, unsigned int order,
                struct zonelist *zonelist)
{
        return __alloc_pages_nodemask(gfp_mask, order, zonelist, NULL);
}

__get_free_pages--->alloc_pages--->alloc_pages_node--->__alloc_pages--->__alloc_pages_nodemask
>>>
mm/page_alloc.c
/*
 * This is the 'heart' of the zoned buddy allocator.
 */
struct page *
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
                        struct zonelist *zonelist, nodemask_t *nodemask)
{
        enum zone_type high_zoneidx = gfp_zone(gfp_mask);
        struct zone *preferred_zone;
        struct page *page;
        int migratetype = allocflags_to_migratetype(gfp_mask);

        gfp_mask &= gfp_allowed_mask;

        lockdep_trace_alloc(gfp_mask);

        might_sleep_if(gfp_mask & __GFP_WAIT);

        if (should_fail_alloc_page(gfp_mask, order))
                return NULL;

        /*
         * Check the zones suitable for the gfp_mask contain at least one
         * valid zone. It's possible to have an empty zonelist as a result
         * of GFP_THISNODE and a memoryless node
         */
        if (unlikely(!zonelist->_zonerefs->zone))
                return NULL;

        /* The preferred zone is used for statistics later */
        first_zones_zonelist(zonelist, high_zoneidx, nodemask, &preferred_zone);
        if (!preferred_zone)
                return NULL;

        /* First allocation attempt */
        page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
                        zonelist, high_zoneidx, ALLOC_WMARK_LOW|ALLOC_CPUSET,
                        preferred_zone, migratetype);
        if (unlikely(!page))
                page = __alloc_pages_slowpath(gfp_mask, order,
                                zonelist, high_zoneidx, nodemask,
                                preferred_zone, migratetype);

        trace_mm_page_alloc(page, order, gfp_mask, migratetype);
        return page;
}

>>>
include/linux/gfp.h
get the zone type according to the gfp flags
static inline enum zone_type gfp_zone(gfp_t flags)
{       
        enum zone_type z;
        /*
          #define __GFP_DMA       ((__force gfp_t)0x01u)
          #define __GFP_HIGHMEM   ((__force gfp_t)0x02u)
          #define __GFP_DMA32     ((__force gfp_t)0x04u)
          #define __GFP_MOVABLE   ((__force gfp_t)0x08u)  /* Page is movable */
          #define GFP_ZONEMASK    (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
         */
        int bit = flags & GFP_ZONEMASK;

        z = (GFP_ZONE_TABLE >> (bit * ZONES_SHIFT)) &
                                         ((1 << ZONES_SHIFT) - 1);
        
        if (__builtin_constant_p(bit))
                MAYBE_BUILD_BUG_ON((GFP_ZONE_BAD >> bit) & 1);
        else {
#ifdef CONFIG_DEBUG_VM
                BUG_ON((GFP_ZONE_BAD >> bit) & 1);
#endif
        }
        return z;
}

>>>
__get_free_pages--->page_address--->lowmem_page_address
//here we can find the return value is the virtual address of the page
static __always_inline void *lowmem_page_address(struct page *page)
{
        return __va(page_to_pfn(page) << PAGE_SHIFT);
}

__get_free_pages--->page_address--->lowmem_page_address--->page_to_pfn
#define page_to_pfn __page_to_pfn

 #define __page_to_pfn(page)     ((unsigned long)((page) - mem_map) + \
                                  ARCH_PFN_OFFSET)

struct page *mem_map;  //mem_map is the page array of the whole system.
阅读(1035) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~