Chinaunix首页 | 论坛 | 博客
  • 博客访问: 206800
  • 博文数量: 33
  • 博客积分: 0
  • 博客等级: 民兵
  • 技术积分: 1277
  • 用 户 组: 普通用户
  • 注册时间: 2013-03-03 10:03
个人简介

现于杭州电子科技大学攻读硕士学位

文章分类

全部博文(33)

文章存档

2013年(33)

我的朋友

分类: LINUX

2013-09-21 21:41:45

/*****************************************************************************************************************************************/
/* arch-sa1100-memory.h */
/*
 * Page offset: 3GB
 */
#define PAGE_OFFSET (0xc0000000UL)   /* 内核空间起始地址 */


/*
 * Physical DRAM offset is 0xc0000000 on the SA1100
 */
#define PHYS_OFFSET (0xc0000000UL)  /* 内存起始地址 */
/*****************************************************************************************************************************************/
/* asm-arm-cache.h */
#define        L1_CACHE_BYTES  32  /* 高速缓存(cache)的行大小 */
#define        L1_CACHE_ALIGN(x)       (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))  /* 高速缓存对齐 */
/*****************************************************************************************************************************************/
/* asm-arm-memory.h */
/*
 *  linux/include/asm-arm/memory.h
 *
 *  Copyright (C) 2000-2002 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Note: this file should not be included by non-asm/.h files
 */
#ifndef __ASM_ARM_MEMORY_H
#define __ASM_ARM_MEMORY_H


#include
#include


/*
 * PFNs are used to describe any physical page; this means
 * PFN 0 == physical address 0.
 *
 * This is the PFN of the first RAM page in the kernel
 * direct-mapped view.  We assume this is the first page
 * of RAM in the mem_map as well.
 */
#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT) /* 物理内存起始页帧号 */


/*
 * These are *only* valid on the kernel direct mapped RAM memory.
 */
 /* 将内核空间线性地址转换为物理地址 */
static inline unsigned long virt_to_phys(void *x)
{
return __virt_to_phys((unsigned long)(x));
}


/* 将物理地址转换为内核虚拟地址 */
static inline void *phys_to_virt(unsigned long x)
{
return (void *)(__phys_to_virt((unsigned long)(x)));
}


#define __pa(x) __virt_to_phys((unsigned long)(x)) /* 将内核虚拟地址转换为物理地址 ,在sa1000平台上内存起始地址就是
                                                                                                      0xc0000000,而内核空间起始地址也是0xc0000000,所以内核虚拟地址就等于
                                                                                                      物理地址*/
#define __va(x) ((void *)__phys_to_virt((unsigned long)(x)))


/*
 * Virtual <-> DMA view memory address translations
 * Again, these are *only* valid on the kernel direct mapped RAM
 * memory.  Use of these is *depreciated*.
 */
#define virt_to_bus(x) (__virt_to_bus((unsigned long)(x)))
#define bus_to_virt(x) ((void *)(__bus_to_virt((unsigned long)(x))))


/*
 * Conversion between a struct page and a physical address.
 *
 * Note: when converting an unknown physical address to a
 * struct page, the resulting pointer must be validated
 * using VALID_PAGE().  It must return an invalid struct page
 * for any physical address not corresponding to a system
 * RAM address.
 *
 *  page_to_pfn(page) convert a struct page * to a PFN number
 *  pfn_to_page(pfn) convert a _valid_ PFN number to struct page *
 *  pfn_valid(pfn) indicates whether a PFN number is valid
 *
 *  virt_to_page(k) convert a _valid_ virtual address to struct page *
 *  virt_addr_valid(k) indicates whether a virtual address is valid
 */
#ifndef CONFIG_DISCONTIGMEM


#define page_to_pfn(page) (((page) - mem_map) + PHYS_PFN_OFFSET)
#define pfn_to_page(pfn) ((mem_map + (pfn)) - PHYS_PFN_OFFSET)   /* 返回物理页帧号pfn对应的物理页描述结构struct page(其实际就是
                                                                                                                   以页帧号为下标在数组中进行引索) */
#define pfn_valid(pfn) ((pfn) >= PHYS_PFN_OFFSET && (pfn) < (PHYS_PFN_OFFSET + max_mapnr))


#define virt_to_page(kaddr) (pfn_to_page(__pa(kaddr) >> PAGE_SHIFT))
#define virt_addr_valid(kaddr) ((kaddr) >= PAGE_OFFSET && (kaddr) < (unsigned long)high_memory)


#define PHYS_TO_NID(addr) (0)


#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)


#else


/*
 * This is more complex.  We have a set of mem_map arrays spread
 * around in memory.
 */
#define page_to_pfn(page) \
(((page) - page_zone(page)->zone_mem_map) \
 + (page_zone(page)->zone_start_paddr >> PAGE_SHIFT))


#define pfn_to_page(pfn) \
(PFN_TO_MAPBASE(pfn) + LOCAL_MAP_NR((pfn) << PAGE_SHIFT))


#define pfn_valid(pfn) \
  ({ \
unsigned int node = PFN_TO_NID(pfn); \
struct pglist_data *nd = NODE_DATA(node); \
((node < NR_NODES) && \
((pfn - (nd->node_start_paddr >> PAGE_SHIFT)) < nd->node_size));\
   })


#define virt_to_page(kaddr) \
(ADDR_TO_MAPBASE(kaddr) + LOCAL_MAP_NR(kaddr))


#define virt_addr_valid(kaddr) (KVADDR_TO_NID(kaddr) < NR_NODES)


/*
 * Common discontigmem stuff.
 *  PHYS_TO_NID is used by the ARM kernel/setup.c
 */
#define PHYS_TO_NID(addr) PFN_TO_NID((addr) >> PAGE_SHIFT)


/*
 * 2.4 compatibility
 *
 * VALID_PAGE returns a non-zero value if given page pointer is valid.
 * This assumes all node's mem_maps are stored within the node they
 * refer to.  This is actually inherently buggy.
 */
#define VALID_PAGE(page) \
({ unsigned int node = KVADDR_TO_NID(page); \
   ((node < NR_NODES) && \
     ((unsigned)((page) - NODE_MEM_MAP(node)) < NODE_DATA(node)->node_size)); \
})


#endif


/*
 * For BIO.  "will die".  Kill me when bio_to_phys() and bvec_to_phys() die.
 */
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)


/*
 * We should really eliminate virt_to_bus() here - it's depreciated.
 */
#define page_to_bus(page) (virt_to_bus(page_address(page)))


#endif
/*****************************************************************************************************************************************/
/* asm-arm-pgtable.h */
/*
 *  linux/include/asm-arm/pgtable.h
 *
 *  Copyright (C) 2000-2002 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef _ASMARM_PGTABLE_H
#define _ASMARM_PGTABLE_H


#include
#include
#include


/*
 * PMD_SHIFT determines the size of the area a second-level page table can map
 * PGDIR_SHIFT determines what a third-level page table entry can map
 */
#define PMD_SHIFT 20  /* 中间目录的定义和全局目录项相同,说明中间目录没有使用地址任何位作为引索 */
#define PGDIR_SHIFT 20  /* 全局目录项在地址中偏移,即使用地址的bit[31:20]来再全局目录表中引索 */


#define LIBRARY_TEXT_START 0x0c000000


#ifndef __ASSEMBLY__
extern void __pte_error(const char *file, int line, unsigned long val);
extern void __pmd_error(const char *file, int line, unsigned long val);
extern void __pgd_error(const char *file, int line, unsigned long val);


#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
#endif /* !__ASSEMBLY__ */


#define PMD_SIZE (1UL << PMD_SHIFT)  /* 表示每个中间目录项代表的地址空间的大小为1M  */
#define PMD_MASK (~(PMD_SIZE-1))       /* 用于屏蔽中间目录项的基地址 */
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* 表示每个全局目录项代表的地址空间的大小为1M */
#define PGDIR_MASK (~(PGDIR_SIZE-1))    /* 用于屏蔽全局目录项的基地址 */


#define FIRST_USER_PGD_NR 1  /*  */
#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)


/*
 * The table below defines the page protection levels that we insert into our
 * Linux page table version.  These get translated into the best that the
 * architecture can perform.  Note that on most ARM hardware:
 *  1) We cannot do execute protection
 *  2) If we could do execute protection, then read is implied
 *  3) write implies read permissions
 */
 /* linux版本页表使用 */
#define __P000  PAGE_NONE
#define __P001  PAGE_READONLY
#define __P010  PAGE_COPY
#define __P011  PAGE_COPY
#define __P100  PAGE_READONLY
#define __P101  PAGE_READONLY
#define __P110  PAGE_COPY
#define __P111  PAGE_COPY


#define __S000  PAGE_NONE
#define __S001  PAGE_READONLY
#define __S010  PAGE_SHARED
#define __S011  PAGE_SHARED
#define __S100  PAGE_READONLY
#define __S101  PAGE_READONLY
#define __S110  PAGE_SHARED
#define __S111  PAGE_SHARED


#ifndef __ASSEMBLY__
/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)


#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)  /* 将页表项转换为物理页帧号 */
#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))  /* 将物理页帧号和访问权限转换为页表项 */


#define pte_none(pte) (!pte_val(pte))  /* 页表项不存在则返回真 */
#define pte_clear(ptep) set_pte((ptep), __pte(0))  /* 清除ptep指向的页表项 */


#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))  /* 根据页表项pte找到对应的物理页 */


#define pmd_none(pmd) (!pmd_val(pmd))  /* 中间目录项不存在则返回真 */
#define pmd_present(pmd) (pmd_val(pmd))  /* 中间目录项存在则返回真 */
#define pmd_clear(pmdp) set_pmd(pmdp, __pmd(0))  /* 清空pmdp指向中间目录项 */


/*
 * Permanent address of a page. We never have highmem, so this is trivial.
 */
#define pages_to_mb(x) ((x) >> (20 - PAGE_SHIFT))  


/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
 /* 将物理页和页的访问权限转换为页表项 */
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
pte_t pte;
pte_val(pte) = physpage | pgprot_val(pgprot);
return pte;
}


#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)  /* 将页和页的访问权限转换为页表项 */


/*
 * The "pgd_xxx()" functions here are trivial for a folded two-level
 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 * into the pgd entry)
 */
#define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0)
#define pgd_present(pgd) (1)
#define pgd_clear(pgdp) do { } while (0)


#define page_pte_prot(page,prot) mk_pte(page, prot)   /* 将页和页的访问权限转换为页表项 */
#define page_pte(page) mk_pte(page, __pgprot(0))  /* 将页转换为页表项 */


/* to find an entry in a page-table-directory */
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT) /* 将addr转换为在全局目录项中引索 (即使用addr[31:20]来引索)*/
#define __pgd_offset(addr) pgd_index(addr) /* 将addr转换为在全局目录项中引索 (即使用addr[31:20]来引索)*/


#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr)) /* 获取进程mm的地址addr对应的全局目录项 */


/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)


/* Find an entry in the second-level page table.. */
#define pmd_offset(dir, addr) ((pmd_t *)(dir))  /* 获取中间目录项即页表pte的基地址(这里直接将全局目录项转换为
                                                                               中间目录项是因为中间目录项只有一项即全局目录项中
                                                                                存储的就是中间目录) */


/* Find an entry in the third-level page table.. */
#define __pte_offset(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) /* 将addr转换为在页表中的引索(即使用addr[19:12]来引索) */
#define pte_offset(dir, addr) ((pte_t *)pmd_page(*(dir)) + __pte_offset(addr)) /* 获取全局目录dir中的地址addr对应的页表项即物理页的起始地址 */


#include


/* 修改访问权限 */
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot);
return pte;
}


extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; /* 全局目录项(=TEXTADDR - 0x4000 ---> 页表放在距离内核代码段下面的16k(因为一级页
                                                                             表中有4096个描述符,每个描述符占4个字节,所以需要16K的内存来存放页表
                                                                             )的地方 ) */


/* Encode and decode a swap entry.
 *
 * We support up to 32GB of swap on 4k machines
 */
#define SWP_TYPE(x) (((x).val >> 2) & 0x7f)
#define SWP_OFFSET(x) ((x).val >> 9)
#define SWP_ENTRY(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
#define pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define swp_entry_to_pte(swp) ((pte_t) { (swp).val })


/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
/* FIXME: this is not correct */
#define kern_addr_valid(addr) (1)


#include


extern void pgtable_cache_init(void);


/*
 * remap a physical address `phys' of size `size' with page protection `prot'
 * into virtual address `from'
 */
#define io_remap_page_range(from,phys,size,prot) \
remap_page_range(from,phys,size,prot)


#endif /* !__ASSEMBLY__ */


#endif /* _ASMARM_PGTABLE_H */
/*****************************************************************************************************************************************/
/* cache.h */
/*
 *  linux/include/asm-arm/proc-armv/cache.h
 *
 *  Copyright (C) 1999-2001 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#include


/*
 * This flag is used to indicate that the page pointed to by a pte
 * is dirty and requires cleaning before returning it to the user.
 */
#define PG_dcache_dirty PG_arch_1


/*
 * Cache handling for 32-bit ARM processors.
 *
 * Note that on ARM, we have a more accurate specification than that
 * Linux's "flush".  We therefore do not use "flush" here, but instead
 * use:
 *
 * clean:      the act of pushing dirty cache entries out to memory.
 * invalidate: the act of discarding data held within the cache,
 *             whether it is dirty or not.
 */


/*
 * Generic I + D cache
 */
 /* 该宏刷新整个CPU高速缓存系统,该函数在内核页表发生变化时使用 */
#define flush_cache_all() \
do { \
cpu_cache_clean_invalidate_all(); \
} while (0)


/* This is always called for current->mm */
/* 该函数刷新所有和地址空间相关联的项,完成后,将不再有高速缓存行与mm相关联*/
#define flush_cache_mm(_mm) \
do { \
if ((_mm) == current->active_mm) \
cpu_cache_clean_invalidate_all(); \
} while (0)


/*该函数刷新和地址空间中与某个范围内的地址相关联的高速缓存行 */
#define flush_cache_range(_mm,_start,_end) \
do { \
if ((_mm) == current->active_mm) \
cpu_cache_clean_invalidate_range((_start) & PAGE_MASK, \
PAGE_ALIGN(_end), 1); \
} while (0)
/* 该函数刷新一个单页面大小的区域 */
#define flush_cache_page(_vma,_vmaddr) \
do { \
if ((_vma)->vm_mm == current->active_mm) { \
unsigned long _addr = (_vmaddr) & PAGE_MASK; \
cpu_cache_clean_invalidate_range(_addr, \
_addr + PAGE_SIZE, \
((_vma)->vm_flags & VM_EXEC)); \
} \
} while (0)


/*
 * This flushes back any buffered write data.  We have to clean the entries
 * in the cache for this page.  This does not invalidate either I or D caches.
 *
 * Called from:
 * 1. mm/filemap.c:filemap_nopage
 * 2. mm/filemap.c:filemap_nopage
 *    [via do_no_page - ok]
 *
 * 3. mm/memory.c:break_cow
 *    [copy_cow_page doesn't do anything to the cache; insufficient cache
 *     handling.  Need to add flush_dcache_page() here]
 *
 * 4. mm/memory.c:do_swap_page
 *    [read_swap_cache_async doesn't do anything to the cache: insufficient
 *     cache handling.  Need to add flush_dcache_page() here]
 *
 * 5. mm/memory.c:do_anonymous_page
 *    [zero page, never written by kernel - ok]
 *
 * 6. mm/memory.c:do_no_page
 *    [we will be calling update_mmu_cache, which will catch on PG_dcache_dirty]
 *
 * 7. mm/shmem.c:shmem_nopage
 * 8. mm/shmem.c:shmem_nopage
 *    [via do_no_page - ok]
 *
 * 9. fs/exec.c:put_dirty_page
 *    [we call flush_dcache_page prior to this, which will flush out the
 *     kernel virtual addresses from the dcache - ok]
 */
static __inline__ void flush_page_to_ram(struct page *page)
{
cpu_flush_ram_page(page_address(page));
}


/*
 * D cache only
 */


#define invalidate_dcache_range(_s,_e) cpu_dcache_invalidate_range((_s),(_e))
#define clean_dcache_range(_s,_e) cpu_dcache_clean_range((_s),(_e))




#define flush_dcache_range(_s,_e) cpu_cache_clean_invalidate_range((_s),(_e),0)


/*
 * flush_dcache_page is used when the kernel has written to the page
 * cache page at virtual address page->virtual.
 *
 * If this page isn't mapped (ie, page->mapping = NULL), or it has
 * userspace mappings (page->mapping->i_mmap or page->mapping->i_mmap_shared)
 * then we _must_ always clean + invalidate the dcache entries associated
 * with the kernel mapping.
 *
 * Otherwise we can defer the operation, and clean the cache when we are
 * about to change to user space.  This is the same method as used on SPARC64.
 * See update_mmu_cache for the user space part.
 */
#define mapping_mapped(map) ((map)->i_mmap || (map)->i_mmap_shared)
/* 该函数在内核写入一个页面高速缓存或者从一个页面高速缓存复制时调用,因为它们可能被多个进程所映射 */
static inline void flush_dcache_page(struct page *page)
{
if (page->mapping && !mapping_mapped(page->mapping))
set_bit(PG_dcache_dirty, &page->flags);
else {
unsigned long virt = (unsigned long)page_address(page);
cpu_cache_clean_invalidate_range(virt, virt + PAGE_SIZE, 0);
}
}
/* 除了它是在一个用户空间的范围受影响时才被调用外,该函数与flush_icache_range类似,目前它只在访问
    进程vm()访问地址空间时,用于ptrace()调试时使用*/
#define flush_icache_user_range(vma,page,addr,len) \
flush_dcache_page(page)


#define clean_dcache_entry(_s) cpu_dcache_clean_entry((unsigned long)(_s))


/*
 * This function is misnamed IMHO.  There are three places where it
 * is called, each of which is preceded immediately by a call to
 * flush_page_to_ram:
 *
 *  1. kernel/ptrace.c:access_one_page
 *     called after we have written to the kernel view of a user page.
 *     The user page has been expundged from the cache by flush_cache_page.
 *     [we don't need to do anything here if we add a call to
 *      flush_dcache_page]
 *
 *  2. mm/memory.c:do_swap_page
 *     called after we have (possibly) written to the kernel view of a
 *     user page, which has previously been removed (ie, has been through
 *     the swap cache).
 *     [if the flush_page_to_ram() conditions are satisfied, then ok]
 *
 *  3. mm/memory.c:do_no_page
 *     [if the flush_page_to_ram() conditions are satisfied, then ok]
 *
 * Invalidating the icache at the kernels virtual page isn't really
 * going to do us much good, since we wouldn't have executed any
 * instructions there.
 */
 /* 该函数在映射一个对应于高速缓存的页面调用 */
#define flush_icache_page(vma,pg) do { } while (0)


/*
 * I cache coherency stuff.
 *
 * This *is not* just icache.  It is to make data written to memory
 * consistent such that instructions fetched from the region are what
 * we expect.
 *
 * This generally means that we have to clean out the Dcache and write
 * buffers, and maybe flush the Icache in the specified range.
 */
 /* 该函数在内核在可能被执行的地址中存储信息时调用,比如载入一个内核模块时 */
#define flush_icache_range(_s,_e) \
do { \
cpu_icache_invalidate_range((_s), (_e)); \
} while (0)


/*
 * TLB flushing.
 *
 *  - flush_tlb_all() flushes all processes TLBs
 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
 *  - flush_tlb_page(vma, vmaddr) flushes TLB for specified page
 *  - flush_tlb_range(mm, start, end) flushes TLB for specified range of pages
 *
 * We drain the write buffer in here to ensure that the page tables in ram
 * are really up to date.  It is more efficient to do this here...
 */


/*
 * Notes:
 *  current->active_mm is the currently active memory description.
 *  current->mm == NULL iff we are lazy.
 */
/* 该函数刷新系统中所有处理器上的所有TLB ,在它完成以后,对应页表的变更在
     全局可见,修改全局的内核页表后,如完成vfree()后,有必要调用该函数操作*/ 
#define flush_tlb_all() \
do { \
cpu_tlb_invalidate_all(); \
} while (0)


/*
 * Flush all user virtual address space translations described by `_mm'.
 *
 * Currently, this is always called for current->mm, which should be
 * the same as current->active_mm.  This is currently not be called for
 * the lazy TLB case.
 */
  /*  该函数刷新与用户空间所需要的mm上下文相关的所有TLB项,该函数只有在
        某个操作影响到整个地址空间时才被调用*/
#define flush_tlb_mm(_mm) \
do { \
if ((_mm) == current->active_mm) \
cpu_tlb_invalidate_all(); \
} while (0)


/*
 * Flush the specified range of user virtual address space translations.
 *
 * _mm may not be current->active_mm, but may not be NULL.
 */
  /* 该函数刷新所有在用户空间范围内对应mm上下文的项 ,它在移动或更新区域时使用*/
#define flush_tlb_range(_mm,_start,_end) \
do { \
if ((_mm) == current->active_mm) \
cpu_tlb_invalidate_range((_start), (_end)); \
} while (0)


/*
 * Flush the specified user virtual address space translation.
 */
 /* 用于从TLB中刷新单个页面,最普遍的两个作用是在发生缺页中断时或换出错误页面时刷新TLB*/
#define flush_tlb_page(_vma,_page) \
do { \
if ((_vma)->vm_mm == current->active_mm) \
cpu_tlb_invalidate_page((_page), \
((_vma)->vm_flags & VM_EXEC)); \
} while (0)


/*
 * if PG_dcache_dirty is set for the page, we need to ensure that any
 * cache entries for the kernels virtual memory range are written
 * back to the page.
 */
extern void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte);


/*
 * Old ARM MEMC stuff.  This supports the reversed mapping handling that
 * we have on the older 26-bit machines.  We don't have a MEMC chip, so...
 */
#define memc_update_all() do { } while (0)
#define memc_update_mm(mm) do { } while (0)
#define memc_update_addr(mm,pte,log) do { } while (0)
#define memc_clear(mm,physaddr) do { } while (0)
/*****************************************************************************************************************************************/
/* domain.h */
/*
 *  linux/include/asm-arm/proc-armv/domain.h
 *
 *  Copyright (C) 1999 Russell King.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef __ASM_PROC_DOMAIN_H
#define __ASM_PROC_DOMAIN_H


/*
 * Domain numbers
 *
 *  DOMAIN_IO     - domain 2 includes all IO only
 *  DOMAIN_KERNEL - domain 1 includes all kernel memory only
 *  DOMAIN_USER   - domain 0 includes all user memory only
 */
 /* 一级页表的domain域的取值,其取值的域为C3寄存器16个域中的一个 */
#define DOMAIN_USER 0  /* 域0 */
#define DOMAIN_KERNEL 1  /* 域1 */
#define DOMAIN_TABLE 1  /*域1 */
#define DOMAIN_IO 2         /* 域2 */


/*
 * Domain types
 */
 /* C3寄存器16个域的取值 */
#define DOMAIN_NOACCESS 0  /* 没有访问权限 */
#define DOMAIN_CLIENT 1         /* 客户访问权限,根据C1控制寄存器中的R和S位以及页表中地址变换条目中的访问控制位
                                                          AP来确定是否允许各种系统工作模式的存储访问*/
#define DOMAIN_MANAGER 3  /* 管理者访问权限,不考虑C1控制寄存器中的R和S位以及页表中地址变换条目中的访问控制位
                                                          AP,在这种情况下不管系统工作在特权模式还是用户模式都不会产生访问失效 */
/* 设置dom域的取值type */
#define domain_val(dom,type) ((type) << 2*(dom))


/* 设置所属访问域 */
#define set_domain(x) \
do { \
__asm__ __volatile__( \
"mcr p15, 0, %0, c3, c0 @ set domain" \
 : : "r" (x)); \
} while (0)


/* 修改域和域值
     type为域值
     dom:哪个域*/
#define modify_domain(dom,type) \
do { \
unsigned int domain = current->thread.domain; \
domain &= ~domain_val(dom, DOMAIN_MANAGER); \
domain |= domain_val(dom, type); \
current->thread.domain = domain; \
set_domain(current->thread.domain); \
} while (0)

#endif
/*****************************************************************************************************************************************/
/* mm.h */
typedef struct page { /* 物理页面描述结构,所有该结构都放在mem_map数组中 */
struct list_head list; /* ->mapping has some page lists. */ /* 页面可能属于多个列表,此字段用作该列表的首部。
                                                                                                      在slab分配器中,该字段存储有指向管理页面的slab和
                                                                                                      高速缓存结构指针,它也用于链接空闲页面块*/
                                                                                                      
struct address_space *mapping; /* The inode (or ...) we belong to. *//* 如果文件或设备已经映射到内存,它们的引索
                                                                                                                 节点会有一个关联的address_space。如果这个页面
                                                                                                                 属于这个文件,则该字段指向这个address_space,
                                                                                                                 如果页面是匿名的,且设置了mapping,则address_space
                                                                                                                 就是交换地址的swapper_space*/
                                                                                                                 
unsigned long index; /* Our offset within mapping. *//* 如果页面是文件映射的一部分,它就是页面在文件中的偏移。
                                                                                            如果页面是交换高速缓存的一部分,它就是交换地址空间中
                                                                                            address_space的偏移量。此外,如果包含页面的块被释放以提供
                                                                                            给一个特殊的进程,那么被释放的块的顺序(被释放页面的2
                                                                                            的幂)存放在该字段中,这在__free_pages_ok()中设置*/
                                                                                            
struct page *next_hash; /* Next page sharing our hash bucket in
  the pagecache hash table. *//* 属于一个文件映射被散列到引索节点及偏移中的页面,该字段
                                                将共享相同的哈希桶的页面链接在一起*/
                                                
atomic_t count; /* Usage count, see below. *//* 页面被引用的数据,如果count减到0就会被释放。当页面被多个
                                                                                          进程使用到或者被内核用到的时候count就会增大*/
 
unsigned long flags; /* atomic flags, some possibly
  updated asynchronously *//* 这些标志位用于描述页面的状态,所有这些标志位在
                                              中申明,其中最为有用的标志位是SetPageUptodate(),如果在设置该
                                              之前已经定义,那么它会调用体系结构相关的函数arch_set_page_update()*/
                                              
struct list_head lru; /* Pageout list, eg. active_list;
  protected by pagemap_lru_lock !! *//* 根据页面替换策略,可能被交换出内存的页面要么存放于
                                                             page_alloc.c中所申明的active_list中,要么存放于inactive_list中 */
                                                             
struct page **pprev_hash; /* Complement to *next_hash. *//* 是对next_hash的补充,使得hash链表可以以双向链表工作 */
  
struct buffer_head * buffers; /* Buffer maps us to a disk block. *//* 如果一个页面有相关的块设备缓冲区,该字段用于跟踪buffer_head*/


/*
* On machines where all RAM is mapped into kernel address space,
* we can simply calculate the virtual address. On machines with
* highmem some memory is mapped into kernel virtual memory
* dynamically, so we need a place to store that address.
* Note that this field could be 16 bits on x86 ... ;)
*
* Architectures with slow multiplication can define
* WANT_PAGE_VIRTUAL in asm/page.h
*/
#if defined(CONFIG_HIGHMEM) || defined(WANT_PAGE_VIRTUAL)
void *virtual; /* Kernel virtual address (NULL if
  not kmapped, ie. highmem) */ /* 通常情况下只有来自ZONE_NORMAL的页面才由内核直接映射,当ZONE_HIGHMEM中页面被
                                                    映射时,这就是它的虚拟地址*/
#endif /* CONFIG_HIGMEM || WANT_PAGE_VIRTUAL */
} mem_map_t;

#define PG_locked 0 /* Page is locked. Don't touch. *//* 在页面必须在磁盘I/O所在内存时,该位被设置,在I/O启动时,该位被设置,
                                                                                                      并在I/O被完成时释放掉*/


#define PG_error 1           /* 在磁盘I/O发生错误时,该位被设置 */


#define PG_referenced 2 /* 当页面被映射了,并被映射的引索哈希表引用时,该位设置,在LRU链表上移动页面左页面替换
                                                        时使用该位*/


#define PG_uptodate 3  /* 当一个页面从磁盘无错误地读出时,该位被设置 */


#define PG_dirty 4         /* 该位显示了页面是否需要被刷新到磁盘上去,当磁盘上对应的某个页面进行了写操作后,并不是马上
                                                   马上刷新到磁盘上去,该位保证了一个脏页面在写出以前不会被释放掉*/
#define PG_unused 5 /* 该位表示没有被使用 */


#define PG_lru 6  /* 当页面存在于active_list或inactive_list其中之一时该位被设置 */


#define PG_active 7 /* 当位于LRU active_list链表上的页面该位被设置,并在页面移除时清除该位,它标记了页面是否处于或者状态 */


#define PG_slab 8 /* 该位标志了被slab分配器使用的页面 */


#define PG_skip 10 /* 该位被某些体系结构用于跳过部分地址空间,在2.6中该位被完全废除了 */


#define PG_highmem 11  /* 高端内存的页面不可能长久被内核所映射,高端内存的页面在mem_init()时通过该位标记 */


#define PG_checked 12 /* kill me in 2.5.. *//* 仅由Ext2文件系统使用 */


#define PG_arch_1 13 /* 这是一个体系结构相关的页面状态位,一般的代码保证了在第一次进入页面高速缓存时,该位
                                                   被清除,这使得体系结构可以延迟到页面被某个进程映射后才进行D-Cache刷盘*/
                                                   
#define PG_reserved 14 /* 该位标志了禁止换出的页面,该位在系统初始化时,由引导分配器设置。接下来该位用于标志
                                                   空页面或不存在的页面*/


#define PG_launder 15 /* written out by VM pressure.. *//* 该位只对页面替换策略很重要,在VM要换出页面时,该位被设置,并调用
                                                                                                       writepage()函数,在扫描过程中,若遇到一个设置了该位以及PG_locked位的页面
                                                                                                       则需要等待I/O完成*/


#define PG_fs_1 16 /* Filesystem specific *//* 该位被文件系统所保留,以作他用,目前只有NFS用它表示一个页面是否和远程
                                                                                         服务器同步*/


#ifndef arch_set_page_uptodate
#define arch_set_page_uptodate(page)
#endif


/* Make it prettier to test the above... */
#define UnlockPage(page) unlock_page(page) /* 解锁页面 */
#define Page_Uptodate(page) test_bit(PG_uptodate, &(page)->flags)
#define SetPageUptodate(page) \
do { \
arch_set_page_uptodate(page); \
set_bit(PG_uptodate, &(page)->flags); \
} while (0)
#define ClearPageUptodate(page) clear_bit(PG_uptodate, &(page)->flags)
#define PageDirty(page) test_bit(PG_dirty, &(page)->flags)
#define SetPageDirty(page) set_bit(PG_dirty, &(page)->flags)
#define ClearPageDirty(page) clear_bit(PG_dirty, &(page)->flags)
#define PageLocked(page) test_bit(PG_locked, &(page)->flags)
#define LockPage(page) set_bit(PG_locked, &(page)->flags) /* 锁住页面以防止访问不一致的数据 */
#define TryLockPage(page) test_and_set_bit(PG_locked, &(page)->flags)
#define PageChecked(page) test_bit(PG_checked, &(page)->flags)
#define SetPageChecked(page) set_bit(PG_checked, &(page)->flags)
#define PageLaunder(page) test_bit(PG_launder, &(page)->flags)
#define SetPageLaunder(page) set_bit(PG_launder, &(page)->flags)
#define ClearPageLaunder(page) clear_bit(PG_launder, &(page)->flags)


/*
 * The zone field is never updated after free_area_init_core()
 * sets it, so none of the operations on it need to be atomic.
 */
#define NODE_SHIFT 4
#define ZONE_SHIFT (BITS_PER_LONG - 8) /* 用于记录该页面所属的管理区 */
/*****************************************************************************************************************************************/
/* page.h */
#ifndef _ASMARM_PAGE_H
#define _ASMARM_PAGE_H


#include


#define PAGE_SIZE       (1UL << PAGE_SHIFT) /* =4096 */
#define PAGE_MASK       (~(PAGE_SIZE-1))  /*页掩码,用于和线性地址做与操作以找到所属的页表的位(这个功能经常被
                                                                       用于判断线性地址是否与某一页表对齐) =1111 1111 1111 */


#ifdef __KERNEL__
#ifndef __ASSEMBLY__


#define STRICT_MM_TYPECHECKS


#define clear_page(page) memzero((void *)(page), PAGE_SIZE)
extern void copy_page(void *to, void *from);


#define clear_user_page(page, vaddr) clear_page(page)
#define copy_user_page(to, from, vaddr) copy_page(to, from)


#ifdef STRICT_MM_TYPECHECKS
/*
 * These are used to make use of C type-checking..
 */
typedef struct { unsigned long pte; } pte_t;             /* 页表描述结构 */
typedef struct { unsigned long pmd; } pmd_t;         /* 中间目录表描述结构 */
typedef struct { unsigned long pgd; } pgd_t;           /* 全局目录表描述结构 */
typedef struct { unsigned long pgprot; } pgprot_t; /* 权限和属性描述结构 */


#define pte_val(x)      ((x).pte)  /* 获取页表项对应的值 */
#define pmd_val(x)      ((x).pmd) /* 获取中间目录项对应的值 */
#define pgd_val(x)      ((x).pgd)   /* 获取全局目录项对应的值 */
#define pgprot_val(x)   ((x).pgprot)  /* 获取权限值 */


/* 将x转换为相应的页表项 */
#define __pte(x)        ((pte_t) { (x) } )
#define __pmd(x)        ((pmd_t) { (x) } )
#define __pgd(x)        ((pgd_t) { (x) } )
#define __pgprot(x)     ((pgprot_t) { (x) } )


#else
/*
 * .. while these make it easier on the compiler
 */
typedef unsigned long pte_t;
typedef unsigned long pmd_t;
typedef unsigned long pgd_t;
typedef unsigned long pgprot_t;


#define pte_val(x)      (x)
#define pmd_val(x)      (x)
#define pgd_val(x)      (x)
#define pgprot_val(x)   (x)


#define __pte(x)        (x)
#define __pmd(x)        (x)
#define __pgd(x)        (x)
#define __pgprot(x)     (x)


#endif
#endif /* !__ASSEMBLY__ */


/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) /* 设置页对齐 */


#ifndef __ASSEMBLY__


#ifdef CONFIG_DEBUG_BUGVERBOSE
extern void __bug(const char *file, int line, void *data);


/* give file/line information */
#define BUG() __bug(__FILE__, __LINE__, NULL)
#define PAGE_BUG(page) __bug(__FILE__, __LINE__, page)


#else


/* these just cause an oops */
#define BUG() (*(int *)0 = 0)
#define PAGE_BUG(page) (*(int *)0 = 0)


#endif


/* Pure 2^n version of get_order */
static inline int get_order(unsigned long size)
{
int order;


size = (size-1) >> (PAGE_SHIFT-1);
order = -1;
do {
size >>= 1;
order++;
} while (size);
return order;
}


#endif /* !__ASSEMBLY__ */


#include
#include


#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)


#endif /* __KERNEL__ */


#endif
/*****************************************************************************************************************************************/
/* pgalloc.h */
/*
 *  linux/include/asm-arm/proc-armv/pgalloc.h
 *
 *  Copyright (C) 2001 Russell King
 *
 * Page table allocation/freeing primitives for 32-bit ARM processors.
 */


/* unfortunately, this includes linux/mm.h and the rest of the universe. */
#include


extern kmem_cache_t *pte_cache;


/*
 * Allocate one PTE table.
 *
 * Note that we keep the processor copy of the PTE entries separate
 * from the Linux copy.  The processor copies are offset by -PTRS_PER_PTE
 * words from the Linux copy.
 */
 /* 当缓存中没有多余的页面时,使用该函数来分配物理页面 */
static inline pte_t *pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;


pte = kmem_cache_alloc(pte_cache, GFP_KERNEL);
if (pte)
pte += PTRS_PER_PTE;
return pte;
}


/*
 * Free one PTE table.
 */
 /* 释放物理页面 */
static inline void pte_free_slow(pte_t *pte)
{
if (pte) {
pte -= PTRS_PER_PTE;
kmem_cache_free(pte_cache, pte);
}
}


/*
 * Populate the pmdp entry with a pointer to the pte.  This pmd is part
 * of the mm address space.
 *
 * If 'mm' is the init tasks mm, then we are doing a vmalloc, and we
 * need to set stuff up correctly for it.
 */
#define pmd_populate(mm,pmdp,pte) \
do { \
unsigned long __prot; \
if (mm == &init_mm) \
__prot = _PAGE_KERNEL_TABLE; \
else \
__prot = _PAGE_USER_TABLE; \
set_pmd(pmdp, __mk_pmd(pte, __prot)); \
} while (0)
/*****************************************************************************************************************************************/
/* proc-armv-page.h */
#ifndef __ASM_PROC_PAGE_H
#define __ASM_PROC_PAGE_H


/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12  /* 页偏移量,因为一页为2^PAGE_SHIFT=4K,物理页的基地址低12位为0 */


#define EXEC_PAGESIZE   4096  /* 一页的大小 */


#endif /* __ASM_PROC_PAGE_H */
/*****************************************************************************************************************************************/
/* proc-armv-pgtable.h */
/*
 *  linux/include/asm-arm/proc-armv/pgtable.h
 *
 *  Copyright (C) 1995-2001 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  12-Jan-1997 RMK Altered flushing routines to use function pointers
 * now possible to combine ARM6, ARM7 and StrongARM versions.
 *  17-Apr-1999 RMK Now pass an area size to clean_cache_area and
 * flush_icache_area.
 */
#ifndef __ASM_PROC_PGTABLE_H
#define __ASM_PROC_PGTABLE_H


#include
#include


/*
 * entries per page directory level: they are two-level, so
 * we don't really have any PMD directory.
 */
#define PTRS_PER_PTE 256  /* 页表中含有的页表项的个数,256*4K=1024k=1M */
#define PTRS_PER_PMD 1      /* 中间目录项的个数,只有一个表项 ,该表项存放的是页表的基地址*/
#define PTRS_PER_PGD 4096 /* 全局目录表中目录项的个数,全局目录项中存储的是一个中间目录项 */


/****************
* PMD functions *
****************/


/* PMD types (actually level 1 descriptor) */
/* 一级描述符的最低两位为:
       01:粗页表,linux使用它进行小页(4K)映射 
       00:缺页
       11:极小页*/
#define PMD_TYPE_MASK 0x0003 /* 掩码 */
#define PMD_TYPE_FAULT 0x0000 /* 缺页 */
#define PMD_TYPE_TABLE 0x0001 /* 粗页 */
#define PMD_TYPE_SECT 0x0002 /* 段 */
#define PMD_UPDATABLE 0x0010 /* 可更新的,一级页表的第4位,由用户定义*/


#define PMD_SECT_CACHEABLE 0x0008  /* 使能cache ,一级描述符的*/
#define PMD_SECT_BUFFERABLE 0x0004  /* 使能write buffer */
#define PMD_SECT_AP_WRITE 0x0400         /* AP=01--->用户模式无访问权限,只允许在超级模式下访问 */
#define PMD_SECT_AP_READ 0x0800         /* AP=10--->用户模式下只读 */


#define PMD_DOMAIN(x) ((x) << 5)    /* 将x移动到一级页表的相应的位置 */


#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_USER)) /* 用户一级页表 */
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_DOMAIN(DOMAIN_KERNEL)) /* 内核一级页表 */


#define pmd_bad(pmd) (pmd_val(pmd) & 2)  /* 返回0表示缺页 */
#define set_pmd(pmdp,pmd) cpu_set_pmd(pmdp,pmd)  /* 将pmd写入pmdp指向的中间目录项中 */


static inline pmd_t __mk_pmd(pte_t *ptep, unsigned long prot)
{
unsigned long pte_ptr = (unsigned long)ptep;
pmd_t pmd;


pte_ptr -= PTRS_PER_PTE * sizeof(void *);


/*
* The pmd must be loaded with the physical
* address of the PTE table
*/
pmd_val(pmd) = __virt_to_phys(pte_ptr) | prot;


return pmd;
}
/* 获取中间目录项pmd对应的页表的基地址 */
static inline unsigned long pmd_page(pmd_t pmd)
{
unsigned long ptr;


ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);  /* 获取中间目录项的值并屏蔽掉低10位 */


ptr += PTRS_PER_PTE * sizeof(void *);


return __phys_to_virt(ptr);
}


/****************
* PTE functions *
****************/


/* PTE types (actually level 2 descriptor) */
/* 二级页表低11位的取值 
    最低两位的取值:
    00:缺页
    01:大页
    10:小页
    11:极小页*/
#define PTE_TYPE_MASK 0x0003   /* 掩码 */
#define PTE_TYPE_FAULT 0x0000   /* 缺页 */
#define PTE_TYPE_LARGE 0x0001   /* 大页 */
#define PTE_TYPE_SMALL 0x0002   /* 小页 */
#define PTE_AP_READ 0x0aa0          /* apn=10--->用户模式有只读权限,特权模式可读写 */
#define PTE_AP_WRITE 0x0550   /* apn=01--->特权模式下可读写,用户模式下无访问权限 */
#define PTE_CACHEABLE 0x0008   /* 使能cache */
#define PTE_BUFFERABLE 0x0004   /*  使能write buffer*/


#define set_pte(ptep, pte) cpu_set_pte(ptep,pte) /* 将页表项pte写入ptep中 */


/* We now keep two sets of ptes - the physical and the linux version.
 * This gives us many advantages, and allows us greater flexibility.
 *
 * The Linux pte's contain:
 *  bit   meaning
 *   0    page present
 *   1    young
 *   2    bufferable - matches physical pte
 *   3    cacheable - matches physical pte
 *   4    user
 *   5    write
 *   6    execute
 *   7    dirty
 *  8-11  unused
 *  12-31 virtual page address
 *
 * These are stored at the pte pointer; the physical PTE is at -1024bytes
 */
 /* linux版本的pte的访问权限和属性设置 */
 /* 页面访问属性 */
#define L_PTE_PRESENT (1 << 0)   /* 页面存在*/
#define L_PTE_YOUNG (1 << 1)          /* 页面最近没有被访问 */
#define L_PTE_BUFFERABLE (1 << 2)       /* 使能write buffer*/
#define L_PTE_CACHEABLE (1 << 3) /* 使能cache */
#define L_PTE_USER (1 << 4)    /* 用户模式的pte */
#define L_PTE_WRITE (1 << 5)    /* 可写 */
#define L_PTE_EXEC (1 << 6)    /* 可执行 */
#define L_PTE_DIRTY (1 << 7)    /* 脏的 */


/*
 * The following macros handle the cache and bufferable bits...
 */
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG   /* 默认的pte设置:页面常驻内存,不进行换出操作 */
#define _L_PTE_READ L_PTE_USER | L_PTE_CACHEABLE | L_PTE_BUFFERABLE  /* 只读访问权限设置:用户模式下,使能cache和write buffer */


#define PAGE_NONE       __pgprot(_L_PTE_DEFAULT)   /* 无权限 */
#define PAGE_COPY       __pgprot(_L_PTE_DEFAULT | _L_PTE_READ) /* 拷贝权限 */
#define PAGE_SHARED     __pgprot(_L_PTE_DEFAULT | _L_PTE_READ | L_PTE_WRITE) /* 有共享权限 */
#define PAGE_READONLY   __pgprot(_L_PTE_DEFAULT | _L_PTE_READ)  /* 只读权限 */
#define PAGE_KERNEL     __pgprot(_L_PTE_DEFAULT | L_PTE_CACHEABLE | L_PTE_BUFFERABLE | L_PTE_DIRTY | L_PTE_WRITE) /*有管理者权限 */


#define _PAGE_CHG_MASK (PAGE_MASK | L_PTE_DIRTY | L_PTE_YOUNG)




/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)  /* 页表项是否可用,当页在内存中但是不可读写时置此标志,
                                                                                                         典型的用途是写时复制 */
#define pte_read(pte) (pte_val(pte) & L_PTE_USER)      /* 页表项是否有可读标志 */
#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)  /* 页表项是否有可写标志 */
#define pte_exec(pte) (pte_val(pte) & L_PTE_EXEC)     /* 页表是否有可执行标志 */
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)   /* 页表项是否为脏 */
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)  /* 页表项是否表示最近没有被访问过 */


#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }


/*PTE_BIT_FUNC(rdprotect, &= ~L_PTE_USER);*/
/*PTE_BIT_FUNC(mkread,    |= L_PTE_USER);*/
PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
PTE_BIT_FUNC(mkwrite,   |= L_PTE_WRITE);
PTE_BIT_FUNC(exprotect, &= ~L_PTE_EXEC);
PTE_BIT_FUNC(mkexec,    |= L_PTE_EXEC);
PTE_BIT_FUNC(mkclean,   &= ~L_PTE_DIRTY);
PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);
PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);
PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG);


/*
 * Mark the prot value as uncacheable and unbufferable.
 */
#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~(L_PTE_CACHEABLE | L_PTE_BUFFERABLE)) /* 设置不使用cache和write buffer  */


#endif /* __ASM_PROC_PGTABLE_H */










阅读(3619) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~