Chinaunix首页 | 论坛 | 博客
  • 博客访问: 203277
  • 博文数量: 33
  • 博客积分: 0
  • 博客等级: 民兵
  • 技术积分: 1277
  • 用 户 组: 普通用户
  • 注册时间: 2013-03-03 10:03
个人简介

现于杭州电子科技大学攻读硕士学位

文章分类

全部博文(33)

文章存档

2013年(33)

我的朋友

分类: LINUX

2013-09-21 21:03:14

/*****************************************************************************************************************************************/
/* head.S */
/*
 *  linux/arch/arm/kernel/head.S
 *
 *  Copyright (C) 1994-2002 Russell King
 *  Copyright (c) 2003 ARM Limited
 *  All Rights Reserved
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 *  Kernel startup code for all 32-bit CPUs
 */
#include
#include


#include
#include
#include
#include
#include
#include
#include


#if (PHYS_OFFSET & 0x001fffff)
#error "PHYS_OFFSET must be at an even 2MiB boundary!"
#endif


#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET)    /* KERNEL_RAM_VADDR=0xc0008000:kernel在内存中的虚拟地址
                                                                                                        PAGE_OFFSET=0xc0000000:内核空间的起始虚拟地址 */
                                                                                                        
#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET)    /* KERNEL_RAM_PADDR=0x30008000:内核在内存中的物理地址
                                                                                                        PHYS_OFFSET=0x30000000:内存的起始地址
                                                                                                        TEXT_OFFSET=0x00008000: 相对于存储空间的偏移*/




/*
 * swapper_pg_dir is the virtual address of the initial page table.
 * We place the page tables 16K below KERNEL_RAM_VADDR.  Therefore, we must
 * make sure that KERNEL_RAM_VADDR is correctly set.  Currently, we expect
 * the least significant 16 bits to be 0x8000, but we could probably
 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000.
 */
#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif


.globl swapper_pg_dir /* 是系统中4GB虚拟内存空间的中间(一级/段)页表的虚拟基地址 ,必须是16KB对齐,虚拟
                                                 地址空间中的某个地址对应(一级/段)描述符在swapper_pg_dir中的位置由该虚拟地址所
                                                 在段的基址决定的,是固定的*/
.equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000    /* swapper_pg_dir=0x30008000-16K=0x30004000 */


.macro pgtbl, rd
ldr \rd, =(KERNEL_RAM_PADDR - 0x4000)   /* KERNEL_RAM_PADDR - 0x4000=0x30004000 */
.endm


#ifdef CONFIG_XIP_KERNEL
#define KERNEL_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR)
#define KERNEL_END _edata_loc
#else
#define KERNEL_START KERNEL_RAM_VADDR    /* 0xc0008000 */
#define KERNEL_END _end    /*内核结束地址,在vmlinux.lds中定义 */
#endif


/*
 * Kernel startup entry point.
 * ---------------------------
 *
 * This is normally called from the decompressor code.  The requirements
 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
 * r1 = machine nr, r2 = atags pointer.
 *
 * This code is mostly position independent, so if you link the kernel at
 * 0xc0008000, you call this at __pa(0xc0008000).
 *
 * See linux/arch/arm/tools/mach-types for the complete list of machine
 * numbers for r1.
 *
 * We're trying to keep crap to a minimum; DO NOT add any machine specific
 * crap here - that's what the boot loader (or in extreme, well justified
 * circumstances, zImage) is for.
 */
.section ".text.head", "ax"
ENTRY(stext)                       /* 内核的入口点 ,在vmlinux.lds中定义*/
msr cpsr_c, # PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode /* 进入管理模式,并且禁止中断 */
@ and irqs disabled
mrc p15, 0, r9, c0, c0 @ get processor id   /* 获取CPU的ID,即读取协处理器寄存器的c0,判断是否支持这个CPU */

bl __lookup_processor_type  @ r5=procinfo r9=cpuid /* 调用__lookup_processor_type函数,确定内核是否支持当前CPU,如果
                                                                                    支持则 r5=返回一个用来描述处理器的结构体proc_info_list的地址,
                                                                                    否则返回r5=0 */
movs r10, r5 @ invalid processor (r5=0)?
beq  __error_p @ yes, error 'p'          /* 如果内核不支持该处理器则进入错误处理 */
bl __lookup_machine_type @ r5=machinfo   /* 判断是否支持这个单板?_lookup_machine_type 函数处理后如果支持该开
                                                                             发板则r5=返回一个用来描述开发板的结构体struct machine_desc的地址,
                                                                             否则r5=0 */
movs r8, r5 @ invalid machine (r5=0)?
beq __error_a @ yes, error 'a'        /*如果经过测试不支持该单板则 出错处理 */
bl __vet_atags
bl __create_page_tables    /* 创建页表 */


/*
* The following calls CPU specific code in a position independent
* manner.  See arch/arm/mm/proc-*.S for details.  r10 = base of
* xxx_proc_info structure selected by __lookup_machine_type
* above.  On return, the CPU will be ready for the MMU to be
* turned on, and r0 will hold the CPU control register value.
*/
ldr r13, __switch_data   @ address to jump to after   /*__switch_data 在head-common.S中定义,设置好页表,启动MMU后,就进
                                                                                     入这个函数,进而跳到第一个C函数start_kernel去执行tart_kernel 在main.c */
@ mmu has been enabled
adr lr, __enable_mmu @ return (PIC) address
add pc, r10, # PROCINFO_INITFUNC
ENDPROC(stext)


#if defined(CONFIG_SMP)
ENTRY(secondary_startup)
/*
* Common entry point for secondary CPUs.
*
* Ensure that we're in SVC mode, and IRQs are disabled.  Lookup
* the processor type - there is no need to check the machine type
* as it has already been validated by the primary processor.
*/
msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type
movs r10, r5 @ invalid processor?
moveq r0, #'p' @ yes, error 'p'
beq __error


/*
* Use the page tables supplied from  __cpu_up.
*/
adr r4, __secondary_data
ldmia r4, {r5, r7, r13} @ address to jump to after
sub r4, r4, r5 @ mmu has been enabled
ldr r4, [r7, r4] @ get secondary_data.pgdir
adr lr, __enable_mmu @ return address
add pc, r10, #PROCINFO_INITFUNC @ initialise processor
@ (return control reg)
ENDPROC(secondary_startup)


/*
* r6  = &secondary_data
*/
ENTRY(__secondary_switched)
ldr sp, [r7, #4] @ get secondary_data.stack
mov fp, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)


.type __secondary_data, %object
__secondary_data:
.long .
.long secondary_data
.long __secondary_switched
#endif /* defined(CONFIG_SMP) */






/*
 * Setup common bits before finally enabling the MMU.  Essentially
 * this is just loading the page table pointer and domain access
 * registers.
 */
__enable_mmu:
#ifdef CONFIG_ALIGNMENT_TRAP
orr r0, r0, #CR_A
#else
bic r0, r0, #CR_A
#endif
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CR_C
#endif
#ifdef CONFIG_CPU_BPREDICT_DISABLE
bic r0, r0, #CR_Z
#endif
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
     domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
     domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
     domain_val(DOMAIN_IO, DOMAIN_CLIENT))
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
b __turn_mmu_on
ENDPROC(__enable_mmu)


/*
 * Enable the MMU.  This completely changes the structure of the visible
 * memory space.  You will not be able to trace execution through this.
 * If you have an enquiry about this, *please* check the linux-arm-kernel
 * mailing list archives BEFORE sending another post to the list.
 *
 *  r0  = cp#15 control register
 *  r13 = *virtual* address to jump to upon completion
 *
 * other registers depend on the function called upon completion
 */
.align 5
__turn_mmu_on:
mov r0, r0
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mrc p15, 0, r3, c0, c0, 0 @ read id reg
mov r3, r3
mov r3, r3
mov pc, r13
ENDPROC(__turn_mmu_on)




/*
 * Setup the initial page tables.  We only setup the barest
 * amount which are required to get the kernel running, which
 * generally means mapping in the kernel code.
 *
 * r8  = machinfo     struct machine_desc的基地址
 * r9  = cpuid           通过CP15协处理器获得的处理器ID
 * r10 = procinfo      struct proc_info_list的基地址
 *
 * Returns:
 *  r0, r3, r6, r7 corrupted
 *  r4 = physical page table address
 */
__create_page_tables:     /* 创建页表 -----为系统的内存头4段即头4MB建立一级映射表*/
       /* .macro pgtbl, rd
       ldr \rd, =(KERNEL_RAM_PADDR - 0x4000)
       .endm 
       */
pgtbl r4 @ page table address   /* 相当于ldr r4,KERNEL_RAM_PADDR - 0x4000 ===>ldr r4,0x30004000 */


/*
* Clear the 16K level 1 swapper page table
*/
mov r0, r4   /* r0=0x30004000 */
mov r3, #0
add r6, r0, #0x4000    /* r6=0x30008000 */

/* 初始化0x30004000~0x30008000之间的16K(4096*4=16K)的内存空间为0 */
1: str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
teq r0, r6
bne 1b


ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags   /* r7=struct proc_info_list的成员__cpu_mm_mmu_flags的值即=0xc1e=1100 0001 1110 */


/*
* Create identity mapping for first MB of kernel to
* cater for the MMU enable.  This identity mapping
* will be removed by paging_init().  We use our current program
* counter to determine corresponding section base address.
*/
/* 段映射公式为:*(mmu_tlb_base+(virtualaddr>>20)) = phyaddr_and_permission; mmu_tlb_base为页表基地址, phyaddr_and_permission为物理段
     基地址|访问权限*/
mov r6, pc, lsr #20 @ start of kernel section   /* r6=300 (pgd_idx)*/
orr r3, r7, r6, lsl #20 @ flags + kernel base              /* 0x30000000+0xc1e=0x30000c1e ,其中r6=300<<20=0x30000000段基址,0xc1e为访问权限(
                                                                                             设置访问内存的权限--->可读写,写回模式)(从这里可以看出映射的
                                                                                             虚拟地址等于物理地址)*/
str r3, [r4, r6, lsl #2] @ identity mapping                /*  r4+r6<<2=r3=0x30000c1e===>0x30004000+300*4(这里乘以4是因为描述符是4字节的)=
                                                                                             0x30000c1e即((unsigned int)r4)[pgd_idx]=0x30000c1e*/


/*
* Now setup the pagetables for our kernel direct
* mapped region.
*/
add r0, r4,  #(KERNEL_START & 0xff000000) >> 18   /* KERNEL_START=0xc0008000,r0=0x30004000+0xc00 */
str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]!
ldr r6, =(KERNEL_END - 1)
add r0, r0, #4
add r6, r4, r6, lsr #18
1: cmp r0, r6
add r3, r3, #1 << 20
strls r3, [r0], #4
bls 1b


#ifdef CONFIG_XIP_KERNEL
/*
* Map some ram to cover our .data and .bss areas.
*/
orr r3, r7, #(KERNEL_RAM_PADDR & 0xff000000)
.if (KERNEL_RAM_PADDR & 0x00f00000)
orr r3, r3, #(KERNEL_RAM_PADDR & 0x00f00000)
.endif
add r0, r4,  #(KERNEL_RAM_VADDR & 0xff000000) >> 18
str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]!
ldr r6, =(_end - 1)
add r0, r0, #4
add r6, r4, r6, lsr #18
1: cmp r0, r6
add r3, r3, #1 << 20
strls r3, [r0], #4
bls 1b
#endif


/*
* Then map first 1MB of ram in case it contains our boot params.
*/
add r0, r4, #PAGE_OFFSET >> 18
orr r6, r7, #(PHYS_OFFSET & 0xff000000)
.if (PHYS_OFFSET & 0x00f00000)
orr r6, r6, #(PHYS_OFFSET & 0x00f00000)
.endif
str r6, [r0]


#ifdef CONFIG_DEBUG_LL
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
/*
* Map in IO space for serial debugging.
* This allows debug messages to be output
* via a serial console before paging_init.
*/
ldr r3, [r8, #MACHINFO_PGOFFIO]
add r0, r4, r3
rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long)
cmp r3, #0x0800 @ limit to 512MB
movhi r3, #0x0800
add r6, r0, r3
ldr r3, [r8, #MACHINFO_PHYSIO]
orr r3, r3, r7
1: str r3, [r0], #4
add r3, r3, #1 << 20
teq r0, r6
bne 1b
#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS)
/*
* If we're using the NetWinder or CATS, we also need to map
* in the 16550-type serial port for the debug messages
*/
add r0, r4, #0xff000000 >> 18
orr r3, r7, #0x7c000000
str r3, [r0]
#endif
#ifdef CONFIG_ARCH_RPC
/*
* Map in screen at 0x02000000 & SCREEN2_BASE
* Similar reasons here - for debug.  This is
* only for Acorn RiscPC architectures.
*/
add r0, r4, #0x02000000 >> 18
orr r3, r7, #0x02000000
str r3, [r0]
add r0, r4, #0xd8000000 >> 18
str r3, [r0]
#endif
#endif
mov pc, lr
ENDPROC(__create_page_tables)
.ltorg


#include "head-common.S"
/*****************************************************************************************************************************************/
/* head-common.S */
/*
 *  linux/arch/arm/kernel/head-common.S
 *
 *  Copyright (C) 1994-2002 Russell King
 *  Copyright (c) 2003 ARM Limited
 *  All Rights Reserved
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 */


#define ATAG_CORE 0x54410001
#define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2)


.type __switch_data, %object
__switch_data:
.long __mmap_switched
.long __data_loc @ r4
.long _data @ r5
.long __bss_start @ r6
.long _end @ r7
.long processor_id @ r4
.long __machine_arch_type @ r5
.long __atags_pointer @ r6
.long cr_alignment @ r7
.long init_thread_union + THREAD_START_SP @ sp


/*
 * The following fragment of code is executed with the MMU on in MMU mode,
 * and uses absolute addresses; this is not position independent.
 *
 *  r0  = cp#15 control register
 *  r1  = machine ID
 *  r2  = atags pointer
 *  r9  = processor ID
 */
__mmap_switched:
adr r3, __switch_data + 4


ldmia r3!, {r4, r5, r6, r7}
cmp r4, r5 @ Copy data segment if needed
1: cmpne r5, r6
ldrne fp, [r4], #4
strne fp, [r5], #4
bne 1b


mov fp, #0 @ Clear BSS (and zero fp)
1: cmp r6, r7
strcc fp, [r6],#4
bcc 1b


ldmia r3, {r4, r5, r6, r7, sp}
str r9, [r4] @ Save processor ID
str r1, [r5] @ Save machine type
str r2, [r6] @ Save atags pointer
bic r4, r0, #CR_A @ Clear 'A' bit
stmia r7, {r0, r4} @ Save control register values
b start_kernel                   /* 第一个C函数,启动内核 */
ENDPROC(__mmap_switched)


/*
 * Exception handling.  Something went wrong and we can't proceed.  We
 * ought to tell the user, but since we don't have any guarantee that
 * we're even running on the right architecture, we do virtually nothing.
 *
 * If CONFIG_DEBUG_LL is set we try to print out something about the error
 * and hope for the best (useful if bootloader fails to pass a proper
 * machine ID for example).
 */
__error_p:   /*即为 获取CPU的ID与内核定义ID不相符即内核不支持该CPU的错误处理函数  */
#ifdef CONFIG_DEBUG_LL      
adr r0, str_p1       /* r0指向字符串 "\nError: unrecognized/unsupported processor variant (0x).\n"的首地址*/
bl printascii      /* 打印字符 */
mov r0, r9
bl printhex8
adr r0, str_p2
bl printascii
b __error
str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x"
str_p2: .asciz ").\n"
.align
#endif
ENDPROC(__error_p)


__error_a:      /*如果经过测试不支持该单板则 出错处理 */
#ifdef CONFIG_DEBUG_LL
mov r4, r1 @ preserve machine ID
adr r0, str_a1
bl printascii
mov r0, r4
bl printhex8
adr r0, str_a2
bl printascii
adr r3, 3f
ldmia r3, {r4, r5, r6} @ get machine desc list
sub r4, r3, r4 @ get offset between virt&phys
add r5, r5, r4 @ convert virt addresses to
add r6, r6, r4 @ physical address space
1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
bl printhex8
mov r0, #'\t'
bl printch
ldr     r0, [r5, #MACHINFO_NAME] @ get machine name
add r0, r0, r4
bl printascii
mov r0, #'\n'
bl printch
add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
cmp r5, r6
blo 1b
adr r0, str_a3
bl printascii
b __error
ENDPROC(__error_a)


str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
.align
#endif


__error:
#ifdef CONFIG_ARCH_RPC
/*
 * Turn the screen red on a error - RiscPC only.
 */
mov r0, #0x02000000
mov r3, #0x11
orr r3, r3, r3, lsl #8
orr r3, r3, r3, lsl #16
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
#endif
1: mov r0, r0
b 1b
ENDPROC(__error)




/*
 * Read processor ID register (CP#15, CR0), and look up in the linker-built
 * supported processor list.  Note that we can't use the absolute addresses
 * for the __proc_info lists since we aren't running with the MMU on
 * (and therefore, we are not in the correct address space).  We have to
 * calculate the offset.
 *
 * r9 = cpuid
 * Returns:
 * r3, r4, r6 corrupted
 * r5 = proc_info pointer in physical address space
 * r9 = cpuid (preserved)
 */
__lookup_processor_type:   /* 查看cpu类型即看cpu的ID是否匹配 */


adr r3, 3f                         /* r3=后面地址标号3的地址,是物理地址 */

ldmda r3, {r5 - r7}       /* ldmda加载r3指向的地址开始的数据到r5~r7,其中da的意思是每次传送后地址减一,
                                            执行该指令后r5=__proc_info_begin  r6=__proc_info_end   r7=.  ,这些地址都是虚拟地址*/

sub r3, r3, r7 @ get offset between virt&phys   /* 得到虚拟地址与物理地址的偏差,这里r3<0 */

add r5, r5, r3 @ convert virt addresses to       /* r5=__proc_info_begin 对应的物理地址 */

add r6, r6, r3 @ physical address space            /* r6=__proc_info_end  对应的物理地址 */

1: ldmia r5, {r3, r4} @ value, mask                     /* 在arch/arm/mm/proc-arm920.S中定义,
                                                                                                        r3=cpu_val=0x41009200    r4=cpu_mask=0xff00fff0   */
                                                                                                        
and r4, r4, r9 @ mask wanted bits                     /* r4=r4&r9=cpu_mask&r9=0xff00fff0&0x41129200= 0x41009200=cpu_val */
teq r3, r4                                                                 
beq 2f             /* r4=r4&r9=cpu_mask&r9=0xff00fff0&0x41129200= 0x41009200=cpu_val 跳转到2地址标号处 */
add r5, r5, # PROC_INFO_SZ  @ sizeof(proc_info_list)   /* r5指向下一个proc_info_list结构体 */
cmp r5, r6                           /*是否比较完所有的proc_info_list结构体了  */
blo 1b                                 /* 没有则继续比较 */
mov r5, #0 @ unknown processor 
2: mov pc, lr                            /* 比较完了,如果没有找到匹配的proc_info_list结构体,则r5=0 */
ENDPROC(__lookup_processor_type)


/*
 * This provides a C-API version of the above function.
 */
ENTRY(lookup_processor_type)    /* 提供给C函数调用的接口 */
stmfd sp!, {r4 - r7, r9, lr}
mov r9, r0
bl __lookup_processor_type
mov r0, r5
ldmfd sp!, {r4 - r7, r9, pc}
ENDPROC(lookup_processor_type)


/*
 * Look in and arch/arm/kernel/arch.[ch] for
 * more information about the __proc_info and __arch_info structures.
 */
.long __proc_info_begin
.long __proc_info_end
3: .long .
.long __arch_info_begin
.long __arch_info_end


/* 
 连接脚本中:


  __proc_info_begin = .;  
  *(.proc.info.init) 
  __proc_info_end = .;
  
  在include/asm-arm/procinfo.h中定义:
  struct proc_info_list {
unsigned int cpu_val;
unsigned int cpu_mask;
unsigned long __cpu_mm_mmu_flags; //used by head.S 
unsigned long __cpu_io_mmu_flags; //used by head.S 
unsigned long __cpu_flush; //used by head.S 
const char *arch_name;
const char *elf_name;
unsigned int elf_hwcap;
const char *cpu_name;
struct processor *proc;
struct cpu_tlb_fns *tlb;
struct cpu_user_fns *user;
struct cpu_cache_fns *cache;
};
  
arch/arm/mm/proc-arm920.S中定义:
  .section ".proc.info.init", #alloc, #execinstr


.type __arm920_proc_info,#object
__arm920_proc_info:
.long 0x41009200    cpu_val        s3c2410 s3c2440的CPUID均为0x41129200
.long 0xff00fff0        cpu_mask
  。。。。。。


  
  __arch_info_begin = .; 
  *(.arch.info.init)  
  __arch_info_end = .;
  
 include/asm-arm/mach/arch.h中定义:
  #define MACHINE_START(_type,_name) \
static const struct machine_desc __mach_desc_##_type \
 __used \
 __attribute__((__section__(".arch.info.init"))) = { \
.nr = MACH_TYPE_##_type, \
.name = _name,


#define MACHINE_END \


arch/arm/mach-s3c2410/mach-smdk2410中定义:
MACHINE_START(SMDK2410, "SMDK2410") // @TODO: request a new identifier and switch * to SMDK2410 
//Maintainer: Jonas Dietsche
.phys_io = S3C2410_PA_UART,
.io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
.boot_params = S3C2410_SDRAM_PA + 0x100,
.map_io = smdk2410_map_io,
.init_irq = s3c24xx_init_irq,
.init_machine = smdk2410_init,
.timer = &s3c24xx_timer,
};


对宏进行展开:


  #define MACHINE_START(SMDK2410,"SMDK2410") \
static const struct machine_desc __mach_desc_SMDK2410 \
 __used \
 __attribute__((__section__(".arch.info.init"))) = { \        强制设置该段属性.arch.info.init
.nr = MACH_TYPE_SMDK2410, \       MACH_TYPE_SMDK2410在为193,在u-boot中也有定义      
.name = "SMDK2410",
.phys_io = S3C2410_PA_UART,
.io_pg_offst = (((u32)S3C24XX_VA_UART) >> 18) & 0xfffc,
.boot_params = S3C2410_SDRAM_PA + 0x100,
.map_io = smdk2410_map_io,
.init_irq = s3c24xx_init_irq,
.init_machine = smdk2410_init,
.timer = &s3c24xx_timer,
};
  */




/*
 * Lookup machine architecture in the linker-build list of architectures.
 * Note that we can't use the absolute addresses for the __arch_info
 * lists since we aren't running with the MMU on (and therefore, we are
 * not in the correct address space).  We have to calculate the offset.
 *
 *  r1 = machine architecture number
 * Returns:
 *  r3, r4, r6 corrupted
 *  r5 = mach_info pointer in physical address space
 */
__lookup_machine_type:
adr r3, 3b                           /* r3=3b的地址,就是上面的3的地址,是物理地址 */
ldmia r3, {r4, r5, r6}      /*r4=.  ,代表3这个标号的虚拟地址 r5=__arch_info_begin  r6=__arch_info_end,这两个地址标号都
                                              是在连接脚本中定义的  */
sub r3, r3, r4 @ get offset between virt&phys   /* 得到虚拟地址与物理地址的偏差 */
add r5, r5, r3 @ convert virt addresses to       /* r5=__arch_info_begin 的物理地址  */
add r6, r6, r3 @ physical address space             /*  r6=__arch_info_end 的物理地址 */
1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type             /* MACHINFO_TYPE=0即获取machine_desc 结构体成员.nr= MACH_TYPE_SMDK2410(在mach-types.h中定义), */
teq r3, r1 @ matches loader number?         /* r1就是bootloader传进来的ID=362 */
beq 2f @ found     /* 如果 r1就是bootloader传进来的ID=362 = nr= MACH_TYPE_SMDK2410则返回*/
add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc   /*r5指向下一个结构体  */
cmp r5, r6     /*是否比较完了所有的machine_desc 结构体 */
blo 1b          /* 没有则继续比较 */
mov r5, #0   @ unknown machine    /* 比较完毕,但没有匹配的machine_desc 结构,则r5=0 */
2: mov pc, lr         /* 返回 */
ENDPROC(__lookup_machine_type)


/*
 * This provides a C-API version of the above function.
 */
ENTRY(lookup_machine_type)
stmfd sp!, {r4 - r6, lr}
mov r1, r0
bl __lookup_machine_type
mov r0, r5
ldmfd sp!, {r4 - r6, pc}
ENDPROC(lookup_machine_type)


/* Determine validity of the r2 atags pointer.  The heuristic requires
 * that the pointer be aligned, in the first 16k of physical RAM and
 * that the ATAG_CORE marker is first and present.  Future revisions
 * of this function may be more lenient with the physical address and
 * may also be able to move the ATAGS block if necessary.
 *
 * r8  = machinfo
 *
 * Returns:
 *  r2 either valid atags pointer, or zero
 *  r5, r6 corrupted
 */
__vet_atags:
tst r2, #0x3 @ aligned?
bne 1f


ldr r5, [r2, #0] @ is first tag ATAG_CORE?
subs r5, r5, #ATAG_CORE_SIZE
bne 1f
ldr r5, [r2, #4]
ldr r6, =ATAG_CORE
cmp r5, r6
bne 1f


mov pc, lr @ atag pointer is ok


1: mov r2, #0
mov pc, lr
ENDPROC(__vet_atags)
/*****************************************************************************************************************************************/
/* proc-arm920.S */
/*
 *  linux/arch/arm/mm/proc-arm920.S: MMU functions for ARM920
 *
 *  Copyright (C) 1999,2000 ARM Limited
 *  Copyright (C) 2000 Deep Blue Solutions Ltd.
 *  hacked for non-paged-MM by Hyok S. Choi, 2003.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 *
 *
 * These are the low level assembler for performing cache and TLB
 * functions on the arm920.
 *
 *  CONFIG_CPU_ARM920_CPU_IDLE -> nohlt
 */
#include
#include
#include
#include
#include
#include
#include
#include
#include "proc-macros.S"


/*
 * The size of one data cache line.
 */
#define CACHE_DLINESIZE 32


/*
 * The number of data cache segments.
 */
#define CACHE_DSEGMENTS 8


/*
 * The number of lines in a cache segment.
 */
#define CACHE_DENTRIES 64


/*
 * This is the size at which it becomes more efficient to
 * clean the whole cache, rather than using the individual
 * cache line maintainence instructions.
 */
#define CACHE_DLIMIT 65536




.text
/*
 * cpu_arm920_proc_init()
 */
ENTRY(cpu_arm920_proc_init)
mov pc, lr


/*
 * cpu_arm920_proc_fin()
 */
ENTRY(cpu_arm920_proc_fin)
stmfd sp!, {lr}
mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
msr cpsr_c, ip
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
bl arm920_flush_kern_cache_all
#else
bl v4wt_flush_kern_cache_all
#endif
mrc p15, 0, r0, c1, c0, 0 @ ctrl register
bic r0, r0, #0x1000 @ ...i............
bic r0, r0, #0x000e @ ............wca.
mcr p15, 0, r0, c1, c0, 0 @ disable caches
ldmfd sp!, {pc}


/*
 * cpu_arm920_reset(loc)
 *
 * Perform a soft reset of the system.  Put the CPU into the
 * same state as it would be if it had been reset, and branch
 * to what would be the reset vector.
 *
 * loc: location to jump to for soft reset
 */
.align 5
ENTRY(cpu_arm920_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
mcr p15, 0, ip, c7, c10, 4 @ drain WB
#ifdef CONFIG_MMU
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
mrc p15, 0, ip, c1, c0, 0 @ ctrl register
bic ip, ip, #0x000f @ ............wcam
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0


/*
 * cpu_arm920_do_idle()
 */
.align 5
ENTRY(cpu_arm920_do_idle)
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
mov pc, lr




#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH


/*
 * flush_user_cache_all()
 *
 * Invalidate all cache entries in a particular address
 * space.
 */
ENTRY(arm920_flush_user_cache_all)
/* FALLTHROUGH */


/*
 * flush_kern_cache_all()
 *
 * Clean and invalidate the entire cache.
 */
ENTRY(arm920_flush_kern_cache_all)
mov r2, #VM_EXEC
mov ip, #0
__flush_whole_cache:
mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 7 to 0
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr


/*
 * flush_user_cache_range(start, end, flags)
 *
 * Invalidate a range of cache entries in the specified
 * address space.
 *
 * - start - start address (inclusive)
 * - end - end address (exclusive)
 * - flags - vm_flags for address space
 */
ENTRY(arm920_flush_user_cache_range)
mov ip, #0
sub r3, r1, r0 @ calculate total size
cmp r3, #CACHE_DLIMIT
bhs __flush_whole_cache


1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
tst r2, #VM_EXEC
mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
tst r2, #VM_EXEC
mcrne p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr


/*
 * coherent_kern_range(start, end)
 *
 * Ensure coherency between the Icache and the Dcache in the
 * region described by start, end.  If you have non-snooping
 * Harvard caches, you need to implement this function.
 *
 * - start - virtual start address
 * - end - virtual end address
 */
ENTRY(arm920_coherent_kern_range)
/* FALLTHROUGH */


/*
 * coherent_user_range(start, end)
 *
 * Ensure coherency between the Icache and the Dcache in the
 * region described by start, end.  If you have non-snooping
 * Harvard caches, you need to implement this function.
 *
 * - start - virtual start address
 * - end - virtual end address
 */
ENTRY(arm920_coherent_user_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr


/*
 * flush_kern_dcache_page(void *page)
 *
 * Ensure no D cache aliasing occurs, either with itself or
 * the I cache
 *
 * - addr - page aligned address
 */
ENTRY(arm920_flush_kern_dcache_page)
add r1, r0, #PAGE_SZ
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr


/*
 * dma_inv_range(start, end)
 *
 * Invalidate (discard) the specified virtual address range.
 * May not write back any entries.  If 'start' or 'end'
 * are not cache line aligned, those lines must be written
 * back.
 *
 * - start - virtual start address
 * - end - virtual end address
 *
 * (same as v4wb)
 */
ENTRY(arm920_dma_inv_range)
tst r0, #CACHE_DLINESIZE - 1
bic r0, r0, #CACHE_DLINESIZE - 1
mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
tst r1, #CACHE_DLINESIZE - 1
mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr


/*
 * dma_clean_range(start, end)
 *
 * Clean the specified virtual address range.
 *
 * - start - virtual start address
 * - end - virtual end address
 *
 * (same as v4wb)
 */
ENTRY(arm920_dma_clean_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr


/*
 * dma_flush_range(start, end)
 *
 * Clean and invalidate the specified virtual address range.
 *
 * - start - virtual start address
 * - end - virtual end address
 */
ENTRY(arm920_dma_flush_range)
bic r0, r0, #CACHE_DLINESIZE - 1
1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
add r0, r0, #CACHE_DLINESIZE
cmp r0, r1
blo 1b
mcr p15, 0, r0, c7, c10, 4 @ drain WB
mov pc, lr


ENTRY(arm920_cache_fns)
.long arm920_flush_kern_cache_all
.long arm920_flush_user_cache_all
.long arm920_flush_user_cache_range
.long arm920_coherent_kern_range
.long arm920_coherent_user_range
.long arm920_flush_kern_dcache_page
.long arm920_dma_inv_range
.long arm920_dma_clean_range
.long arm920_dma_flush_range


#endif




ENTRY(cpu_arm920_dcache_clean_area)
1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
add r0, r0, #CACHE_DLINESIZE
subs r1, r1, #CACHE_DLINESIZE
bhi 1b
mov pc, lr


/* =============================== PageTable ============================== */


/*
 * cpu_arm920_switch_mm(pgd)
 *
 * Set the translation base pointer to be as described by pgd.
 *
 * pgd: new page tables
 */
.align 5
ENTRY(cpu_arm920_switch_mm)
#ifdef CONFIG_MMU
mov ip, #0
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
#else
@ && 'Clean & Invalidate whole DCache'
@ && Re-written to use Index Ops.
@ && Uses registers r1, r3 and ip


mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 8 segments
1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 7 to 0
#endif
mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
#endif
mov pc, lr


/*
 * cpu_arm920_set_pte(ptep, pte, ext)
 *
 * Set a PTE and flush it out
 */
.align 5
ENTRY(cpu_arm920_set_pte_ext)
#ifdef CONFIG_MMU
armv3_set_pte_ext
mov r0, r0
mcr p15, 0, r0, c7, c10, 1 @ clean D entry
mcr p15, 0, r0, c7, c10, 4 @ drain WB
#endif
mov pc, lr


__INIT


.type __arm920_setup, #function
__arm920_setup:
mov r0, #0     
mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4   /* 使数据及代码段cache无效*/
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4  /* 清空写缓存 */
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4      /*是TLB整个页表无效  */
#endif
adr r5, arm920_crval
ldmia r5, {r5, r6}                                                        /* r5= 0x00003f3f,r6=0x00003135*/
mrc p15, 0, r0, c1, c0 @ get control register v4         /* 读取协处理器cp15的寄存器c1(控制寄存器)的值 */
bic r0, r0, r5                                                                  /* 清除bit[13:8] bit[5:0] */
orr r0, r0, r6                                                                  /* 置位bit[13:12],bit[8],bit[5:3],bit[1] */
mov pc, lr
.size __arm920_setup, . - __arm920_setup


/*
*  R
* .RVI ZFRS BLDP WCAM
* ..11 0001 ..11 0101

*/
.type arm920_crval, #object
arm920_crval:
crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130   /* crval被宏定义为.word \clear   .word \mmuset ;
                                                                                                        所以arm920_crval定义定义为.word  0x00003f3f   .word  0x00003135*/


__INITDATA


/*
 * Purpose : Function pointers used to access above functions - all calls
 *     come through these
 */
.type arm920_processor_functions, #object
arm920_processor_functions:
.word v4t_early_abort
.word pabort_noifar
.word cpu_arm920_proc_init
.word cpu_arm920_proc_fin
.word cpu_arm920_reset
.word   cpu_arm920_do_idle
.word cpu_arm920_dcache_clean_area
.word cpu_arm920_switch_mm
.word cpu_arm920_set_pte_ext
.size arm920_processor_functions, . - arm920_processor_functions


.section ".rodata"


.type cpu_arch_name, #object
cpu_arch_name:
.asciz "armv4t"
.size cpu_arch_name, . - cpu_arch_name


.type cpu_elf_name, #object
cpu_elf_name:
.asciz "v4"
.size cpu_elf_name, . - cpu_elf_name


.type cpu_arm920_name, #object
cpu_arm920_name:
.asciz "ARM920T"
.size cpu_arm920_name, . - cpu_arm920_name


.align


.section ".proc.info.init", #alloc, #execinstr    /* 设置段的属性为.proc.info.init,在连接脚本中会将所有有此 
                                                                     段属性的段全部组织在一起*/


.type __arm920_proc_info,#object
__arm920_proc_info:   /* 2440架构的(proc_info_list结构体的定义 */
.long 0x41009200     /* cpu_val */
.long 0xff00fff0     /* cpu_mask */
.long   PMD_TYPE_SECT | \
PMD_SECT_BUFFERABLE | \
PMD_SECT_CACHEABLE | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ         /* =0xc1e=1100 0001 1110 ===>设置访问内存的权限--->可读写,写回模式*/
.long   PMD_TYPE_SECT | \
PMD_BIT4 | \
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ         /* =0xc12=1100 0001 0010 ====>设置访问内存权限--->可读写,no Dcache,no buffer*/
b __arm920_setup                /*  */
.long cpu_arch_name          /* ="armv4t" */
.long cpu_elf_name             /* ="v4" */
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB   /* =7 */
.long cpu_arm920_name        /* ="ARM920T" */
.long arm920_processor_functions       /*  */
.long v4wbi_tlb_fns         /*  */
.long v4wb_user_fns       /*  */
#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
.long arm920_cache_fns  /*  */
#else
.long v4wt_cache_fns       /*  */
#endif
.size __arm920_proc_info, . - __arm920_proc_info



/*****************************************************************************************************************************************/
/* domain.h:访问权限定义 */
/*
 *  arch/arm/include/asm/domain.h
 *
 *  Copyright (C) 1999 Russell King.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef __ASM_PROC_DOMAIN_H
#define __ASM_PROC_DOMAIN_H


/*
 * Domain numbers
 *
 *  DOMAIN_IO     - domain 2 includes all IO only
 *  DOMAIN_USER   - domain 1 includes all user memory only
 *  DOMAIN_KERNEL - domain 0 includes all kernel memory only
 *
 * The domain numbering depends on whether we support 36 physical
 * address for I/O or not.  Addresses above the 32 bit boundary can
 * only be mapped using supersections and supersections can only
 * be set for domain 0.  We could just default to DOMAIN_IO as zero,
 * but there may be systems with supersection support and no 36-bit
 * addressing.  In such cases, we want to map system memory with
 * supersections to reduce TLB misses and footprint.
 *
 * 36-bit addressing and supersections are only available on
 * CPUs based on ARMv6+ or the Intel XSC3 core.
 */
#ifndef CONFIG_IO_36
#define DOMAIN_KERNEL 0
#define DOMAIN_TABLE 0
#define DOMAIN_USER 1
#define DOMAIN_IO 2
#else
#define DOMAIN_KERNEL 2   /* 内核空间用的域,使用2号存储域,管理者权限(内部高速SRAM空间、内部mini cache空间、
                                                   RAM内存空间、ROM(flash)空间) */
#define DOMAIN_TABLE 2   /* 页表空间用的域,使用2号存储域,管理者权限 */
#define DOMAIN_USER 1   /* 用户空间用的域,使用1号存储预,客户权限 (低端中断向量空间、高端中断向量空间)*/
#define DOMAIN_IO 0          /* I/O空间用的域,使用0号存储域,客户权限 (设备空间)*/
#endif


/*
 * Domain types
 */
#define DOMAIN_NOACCESS 0  /* 没有访问权限 */
#define DOMAIN_CLIENT 1         /* 客户权限:根据CP15的C1控制寄存器中的R和S位以及以及页表中地址变换条目中的访
                                                         问控制位AP来确定是否允许各种系统工作模式的存储访问*/
#define DOMAIN_MANAGER 3 /* 管理者权限: 不考虑CP15的C1控制寄存器中的R和S位以及一级页表中地址变换条目中的访问
                                                        控制位AP*/


#define domain_val(dom,type) ((type) << (2*(dom)))


#ifndef __ASSEMBLY__


#ifdef CONFIG_MMU
#define set_domain(x) \
do { \
__asm__ __volatile__( \
"mcr p15, 0, %0, c3, c0 @ set domain" \
 : : "r" (x)); \
isb(); \
} while (0)


#define modify_domain(dom,type) \
do { \
struct thread_info *thread = current_thread_info(); \
unsigned int domain = thread->cpu_domain; \
domain &= ~domain_val(dom, DOMAIN_MANAGER); \
thread->cpu_domain = domain | domain_val(dom, type); \
set_domain(thread->cpu_domain); \
} while (0)


#else
#define set_domain(x) do { } while (0)
#define modify_domain(dom,type) do { } while (0)
#endif


#endif
#endif /* !__ASSEMBLY__ */
/*****************************************************************************************************************************************/
/* page.h */
/*
 *  arch/arm/include/asm/page.h
 *
 *  Copyright (C) 1995-2003 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef _ASMARM_PAGE_H
#define _ASMARM_PAGE_H


/* PAGE_SHIFT determines the page size */
#define PAGE_SHIFT 12       /* linux支持4K大小的页,因此其定义为12,它表示一个虚拟地址的页内偏移量的位数 */
#define PAGE_SIZE (1UL << PAGE_SHIFT)  /* 页大小为1<<12=4K( 物理地址被分成离散的单元,称之为页,系统内部许多对内存的操作都是基于单个页的) */
#define PAGE_MASK (~(PAGE_SIZE-1))  /* 页屏蔽码为0xfffff000 */


#ifndef __ASSEMBLY__


#ifndef CONFIG_MMU


#include "page-nommu.h"


#else


#include


/*
 * User Space Model
 * ================
 *
 * This section selects the correct set of functions for dealing with
 * page-based copying and clearing for user space for the particular
 * processor(s) we're building for.
 *
 * We have the following to choose from:
 *  v3 - ARMv3
 *  v4wt - ARMv4 with writethrough cache, without minicache
 *  v4wb - ARMv4 with writeback cache, without minicache
 *  v4_mc - ARMv4 with minicache
 *  xscale - Xscale
 *  xsc3 - XScalev3
 */
#undef _USER
#undef MULTI_USER


#ifdef CONFIG_CPU_COPY_V3
#ifdef _USER
#define MULTI_USER 1
#else
#define _USER v3
#endif
#endif


#ifdef CONFIG_CPU_COPY_V4WT
#ifdef _USER
#define MULTI_USER 1
#else
#define _USER v4wt
#endif
#endif


#ifdef CONFIG_CPU_COPY_V4WB
#ifdef _USER
#define MULTI_USER 1
#else
#define _USER v4wb
#endif
#endif


#ifdef CONFIG_CPU_COPY_FEROCEON
#ifdef _USER
#define MULTI_USER 1
#else
#define _USER feroceon
#endif
#endif


#ifdef CONFIG_CPU_COPY_FA
#ifdef _USER
#define MULTI_USER 1
#else
#define _USER fa
#endif
#endif


#ifdef CONFIG_CPU_SA1100
#ifdef _USER
#define MULTI_USER 1
#else
#define _USER v4_mc
#endif
#endif


#ifdef CONFIG_CPU_XSCALE
#ifdef _USER
#define MULTI_USER 1
#else
#define _USER xscale_mc
#endif
#endif


#ifdef CONFIG_CPU_XSC3
#ifdef _USER
#define MULTI_USER 1
#else
#define _USER xsc3_mc
#endif
#endif


#ifdef CONFIG_CPU_COPY_V6
#define MULTI_USER 1
#endif


#if !defined(_USER) && !defined(MULTI_USER)
#error Unknown user operations model
#endif


struct page;


struct cpu_user_fns {
void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
unsigned long vaddr);
};


#ifdef MULTI_USER
extern struct cpu_user_fns cpu_user;


#define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage
#define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage


#else


#define __cpu_clear_user_highpage __glue(_USER,_clear_user_highpage)
#define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage)


extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr);
#endif


#define clear_user_highpage(page,vaddr) \
__cpu_clear_user_highpage(page, vaddr)


#define __HAVE_ARCH_COPY_USER_HIGHPAGE
#define copy_user_highpage(to,from,vaddr,vma) \
__cpu_copy_user_highpage(to, from, vaddr)


#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);


#undef STRICT_MM_TYPECHECKS


#ifdef STRICT_MM_TYPECHECKS
/*
 * These are used to make use of C type-checking..
 */
typedef struct { unsigned long pte; } pte_t;  /*二级页表基地址  */
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd[2]; } pgd_t;  /* 一级页表基地址,可以看出pgd_t是8字节的,也就是说一个pgd对应两个一级
                                                                                描述符,就就对应两张二级页表,每张表有两份实现,一份是linux定义的,
                                                                                一份四硬件定义的*/
typedef struct { unsigned long pgprot; } pgprot_t;


#define pte_val(x)      ((x).pte)
#define pmd_val(x)      ((x).pmd)
#define pgd_val(x) ((x).pgd[0])
#define pgprot_val(x)   ((x).pgprot)


#define __pte(x)        ((pte_t) { (x) } )
#define __pmd(x)        ((pmd_t) { (x) } )
#define __pgprot(x)     ((pgprot_t) { (x) } )


#else
/*
 * .. while these make it easier on the compiler
 */
typedef unsigned long pte_t;
typedef unsigned long pmd_t;
typedef unsigned long pgd_t[2];
typedef unsigned long pgprot_t;


#define pte_val(x)      (x)
#define pmd_val(x)      (x)
#define pgd_val(x) ((x)[0])
#define pgprot_val(x)   (x)


#define __pte(x)        (x)
#define __pmd(x)        (x)
#define __pgprot(x)     (x)


#endif /* STRICT_MM_TYPECHECKS */


#endif /* CONFIG_MMU */


typedef struct page *pgtable_t;


#include


#endif /* !__ASSEMBLY__ */


#define VM_DATA_DEFAULT_FLAGS \
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

#include

#endif
/*****************************************************************************************************************************************/
/* pgtable.h */
/*
 *  arch/arm/include/asm/pgtable.h
 *
 *  Copyright (C) 1995-2002 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
#ifndef _ASMARM_PGTABLE_H
#define _ASMARM_PGTABLE_H


#include
#include


#ifndef CONFIG_MMU


#include "pgtable-nommu.h"


#else


#include
#include
#include


/*
 * Just any arbitrary offset to the start of the vmalloc VM area: the
 * current 8MB value just means that there will be a 8MB "hole" after the
 * physical memory until the kernel virtual memory starts.  That means that
 * any out-of-bounds memory accesses will hopefully be caught.
 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
 * area for the same reason. ;)
 *
 * Note that platforms may override VMALLOC_START, but they must provide
 * VMALLOC_END.  VMALLOC_END defines the (exclusive) limit of this space,
 * which may not overlap IO space.
 */
#ifndef VMALLOC_START
#define VMALLOC_OFFSET (8*1024*1024)  /* vmalloc分配内存的起始地址的的偏移 */
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) /* 用于vmalloc分配内存的内存空间起始地址 */
#endif


/*
 * Hardware-wise, we have a two level page table structure, where the first
 * level has 4096 entries, and the second level has 256 entries.  Each entry
 * is one 32-bit word.  Most of the bits in the second level entry are used
 * by hardware, and there aren't any "accessed" and "dirty" bits.
 *
 * Linux on the other hand has a three level page table structure, which can
 * be wrapped to fit a two level page table structure easily - using the PGD
 * and PTE only.  However, Linux also expects one "PTE" table per page, and
 * at least a "dirty" bit.
 *
 * Therefore, we tweak the implementation slightly - we tell Linux that we
 * have 2048 entries in the first level, each of which is 8 bytes (iow, two
 * hardware pointers to the second level.)  The second level contains two
 * hardware PTE tables arranged contiguously, followed by Linux versions
 * which contain the state information Linux needs.  We, therefore, end up
 * with 512 entries in the "PTE" level.
 *
 * This leads to the page tables having the following layout:
 *
 *    pgd             pte
 * |        |
 * +--------+ +0
 * |        |-----> +------------+ +0
 * +- - - - + +4    |  h/w pt 0  |
 * |        |-----> +------------+ +1024
 * +--------+ +8    |  h/w pt 1  |
 * |        |       +------------+ +2048
 * +- - - - +       | Linux pt 0 |
 * |        |       +------------+ +3072
 * +--------+       | Linux pt 1 |
 * |        |       +------------+ +4096
 *
 * See L_PTE_xxx below for definitions of bits in the "Linux pt", and
 * PTE_xxx for definitions of bits appearing in the "h/w pt".
 *
 * PMD_xxx definitions refer to bits in the first level page table.
 *
 * The "dirty" bit is emulated by only granting hardware write permission
 * iff the page is marked "writable" and "dirty" in the Linux PTE.  This
 * means that a write to a clean page will cause a permission fault, and
 * the Linux MM layer will mark the page dirty via handle_pte_fault().
 * For the hardware to notice the permission change, the TLB entry must
 * be flushed, and ptep_set_access_flags() does that for us.
 *
 * The "accessed" or "young" bit is emulated by a similar method; we only
 * allow accesses to the page if the "young" bit is set.  Accesses to the
 * page will cause a fault, and handle_pte_fault() will set the young bit
 * for us as long as the page is marked present in the corresponding Linux
 * PTE entry.  Again, ptep_set_access_flags() will ensure that the TLB is
 * up to date.
 *
 * However, when the "young" bit is cleared, we deny access to the page
 * by clearing the hardware PTE.  Currently Linux does not flush the TLB
 * for us in this case, which means the TLB will retain the transation
 * until either the TLB entry is evicted under pressure, or a context
 * switch which changes the user space mapping occurs.
 */
#define PTRS_PER_PTE 512    /* 表示一个页表中表项的个数 (PTE---page )*/
#define PTRS_PER_PMD 1        /* 表示一个页中间目录表中表项的个数(PMD---page middle descriptor) */
#define PTRS_PER_PGD 2048  /* 表示一个页全局目录表中表项的个数 (PGD---page descriptor)*/


/*
 * PMD_SHIFT determines the size of the area a second-level page table can map
 * PGDIR_SHIFT determines what a third-level page table entry can map
 */
#define PMD_SHIFT 21
#define PGDIR_SHIFT 21


#define LIBRARY_TEXT_START 0x0c000000


#ifndef __ASSEMBLY__
extern void __pte_error(const char *file, int line, unsigned long val);
extern void __pmd_error(const char *file, int line, unsigned long val);
extern void __pgd_error(const char *file, int line, unsigned long val);


#define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte))
#define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd))
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
#endif /* !__ASSEMBLY__ */


#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))


/*
 * This is the lowest virtual address we can permit any user space
 * mapping to be mapped at.  This is particularly important for
 * non-high vector CPUs.
 */
#define FIRST_USER_ADDRESS PAGE_SIZE


#define FIRST_USER_PGD_NR 1
#define USER_PTRS_PER_PGD ((TASK_SIZE/PGDIR_SIZE) - FIRST_USER_PGD_NR)


/*
 * section address mask and size definitions.
 */
#define SECTION_SHIFT 20
#define SECTION_SIZE (1UL << SECTION_SHIFT)
#define SECTION_MASK (~(SECTION_SIZE-1))


/*
 * ARMv6 supersection address mask and size definitions.
 */
#define SUPERSECTION_SHIFT 24
#define SUPERSECTION_SIZE (1UL << SUPERSECTION_SHIFT)
#define SUPERSECTION_MASK (~(SUPERSECTION_SIZE-1))


/*
 * "Linux" PTE definitions.
 *
 * We keep two sets of PTEs - the hardware and the linux version.
 * This allows greater flexibility in the way we map the Linux bits
 * onto the hardware tables, and allows us to have YOUNG and DIRTY
 * bits.
 *
 * The PTE table pointer refers to the hardware entries; the "Linux"
 * entries are stored 1024 bytes below.
 */
#define L_PTE_PRESENT (1 << 0)
#define L_PTE_FILE (1 << 1) /* only when !PRESENT */
#define L_PTE_YOUNG (1 << 1)
#define L_PTE_BUFFERABLE (1 << 2) /* obsolete, matches PTE */
#define L_PTE_CACHEABLE (1 << 3) /* obsolete, matches PTE */
#define L_PTE_DIRTY (1 << 6)
#define L_PTE_WRITE (1 << 7)
#define L_PTE_USER (1 << 8)
#define L_PTE_EXEC (1 << 9)
#define L_PTE_SHARED (1 << 10) /* shared(v6), coherent(xsc3) */


/*
 * These are the memory types, defined to be compatible with
 * pre-ARMv6 CPUs cacheable and bufferable bits:   XXCB
 */
#define L_PTE_MT_UNCACHED (0x00 << 2) /* 0000 */
#define L_PTE_MT_BUFFERABLE (0x01 << 2) /* 0001 */
#define L_PTE_MT_WRITETHROUGH (0x02 << 2) /* 0010 */
#define L_PTE_MT_WRITEBACK (0x03 << 2) /* 0011 */
#define L_PTE_MT_MINICACHE (0x06 << 2) /* 0110 (sa1100, xscale) */
#define L_PTE_MT_WRITEALLOC (0x07 << 2) /* 0111 */
#define L_PTE_MT_DEV_SHARED (0x04 << 2) /* 0100 */
#define L_PTE_MT_DEV_NONSHARED (0x0c << 2) /* 1100 */
#define L_PTE_MT_DEV_WC (0x09 << 2) /* 1001 */
#define L_PTE_MT_DEV_CACHED (0x0b << 2) /* 1011 */
#define L_PTE_MT_MASK (0x0f << 2)


#ifndef __ASSEMBLY__


/*
 * The pgprot_* and protection_map entries will be fixed up in runtime
 * to include the cachable and bufferable bits based on memory policy,
 * as well as any architecture dependent bits like global/ASID and SMP
 * shared mapping bits.
 */
#define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG


extern pgprot_t pgprot_user;
extern pgprot_t pgprot_kernel;


#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))


#define PAGE_NONE pgprot_user
#define PAGE_SHARED _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE)
#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
#define PAGE_COPY _MOD_PROT(pgprot_user, L_PTE_USER)
#define PAGE_COPY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
#define PAGE_READONLY _MOD_PROT(pgprot_user, L_PTE_USER)
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_EXEC)
#define PAGE_KERNEL pgprot_kernel
#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_kernel, L_PTE_EXEC)


#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT)
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE)
#define __PAGE_SHARED_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_WRITE | L_PTE_EXEC)
#define __PAGE_COPY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
#define __PAGE_COPY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)
#define __PAGE_READONLY __pgprot(_L_PTE_DEFAULT | L_PTE_USER)
#define __PAGE_READONLY_EXEC __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_EXEC)


#endif /* __ASSEMBLY__ */


/*
 * The table below defines the page protection levels that we insert into our
 * Linux page table version.  These get translated into the best that the
 * architecture can perform.  Note that on most ARM hardware:
 *  1) We cannot do execute protection
 *  2) If we could do execute protection, then read is implied
 *  3) write implies read permissions
 */
#define __P000  __PAGE_NONE
#define __P001  __PAGE_READONLY
#define __P010  __PAGE_COPY
#define __P011  __PAGE_COPY
#define __P100  __PAGE_READONLY_EXEC
#define __P101  __PAGE_READONLY_EXEC
#define __P110  __PAGE_COPY_EXEC
#define __P111  __PAGE_COPY_EXEC


#define __S000  __PAGE_NONE
#define __S001  __PAGE_READONLY
#define __S010  __PAGE_SHARED
#define __S011  __PAGE_SHARED
#define __S100  __PAGE_READONLY_EXEC
#define __S101  __PAGE_READONLY_EXEC
#define __S110  __PAGE_SHARED_EXEC
#define __S111  __PAGE_SHARED_EXEC


#ifndef __ASSEMBLY__
/*
 * ZERO_PAGE is a global shared page that is always zero: used
 * for zero-mapped memory areas etc..
 */
extern struct page *empty_zero_page;
#define ZERO_PAGE(vaddr) (empty_zero_page)


#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)   /* 获取页表项中的页帧号 */
#define pfn_pte(pfn,prot) (__pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot)))  /* 根据页帧号和页面属性,合成页表项 */


#define pte_none(pte) (!pte_val(pte))  /* 判断页表项是否为0 */
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)  /* 清除页表项的值 */
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))           /* 从页表项中提取页帧号,并定位该页帧号对应的页框 */
#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))  /* 在主内核页表中定位内核地址对应的页表项的虚拟地址 */
#define pte_offset_map(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))   /* 在进程页表中定位线性地址对应的页表项的地址,
                                                                                                                                             如果页表保存在高端内存中,那么还为页表建立一个临时内核映射 */
#define pte_offset_map_nested(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
#define pte_unmap(pte) do { } while (0)    /* 如果在高端内存中,不解除由 pte_offset_map建立的临时内核映射*/
#define pte_unmap_nested(pte) do { } while (0)


#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)  /* 向一个页表项中写入指定的值 */


#define set_pte_at(mm,addr,ptep,pteval) do { \
set_pte_ext(ptep, pteval, (addr) >= TASK_SIZE ? 0 : PTE_EXT_NG); \
 } while (0)


/*
 * The following only work if pte_present() is true.
 * Undefined behaviour if not..
 */
#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT)  /* 页表项是否可用,当页在内存中但是不可读写时置此标志,典型的用途是写时复制 */
#define pte_write(pte) (pte_val(pte) & L_PTE_WRITE)  /* 页表项是否有可写标志 */
#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY)          /* 页表项是否为脏 */
#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG)  /* 页表项是否表示最近没有被访问过 */
#define pte_special(pte) (0)


/*
 * The following only works if pte_present() is not true.
 */
#define pte_file(pte) (pte_val(pte) & L_PTE_FILE)
#define pte_to_pgoff(x) (pte_val(x) >> 2)     /* 当页表项映射到文件,并且没有装载进内存时,从页表项中提取文件页号 */
#define pgoff_to_pte(x) __pte(((x) << 2) | L_PTE_FILE)  /* 将页面映射的页号存放到页表项中 */


#define PTE_FILE_MAX_BITS 30


#define PTE_BIT_FUNC(fn,op) \
static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }


PTE_BIT_FUNC(wrprotect, &= ~L_PTE_WRITE);
PTE_BIT_FUNC(mkwrite,   |= L_PTE_WRITE);
PTE_BIT_FUNC(mkclean,   &= ~L_PTE_DIRTY);
PTE_BIT_FUNC(mkdirty,   |= L_PTE_DIRTY);
PTE_BIT_FUNC(mkold,     &= ~L_PTE_YOUNG);
PTE_BIT_FUNC(mkyoung,   |= L_PTE_YOUNG);


static inline pte_t pte_mkspecial(pte_t pte) { return pte; }


/*
 * Mark the prot value as uncacheable and unbufferable.
 */
#define pgprot_noncached(prot) \
__pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_UNCACHED)
#define pgprot_writecombine(prot) \
__pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_BUFFERABLE)


#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & 2)  /* 检查页中间目录项是否指向不可用的页表 */


#define copy_pmd(pmdpd,pmdps) \
do { \
pmdpd[0] = pmdps[0]; \
pmdpd[1] = pmdps[1]; \
flush_pmd_entry(pmdpd); \
} while (0)


#define pmd_clear(pmdp) \
do { \
pmdp[0] = __pmd(0); \
pmdp[1] = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)


static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
unsigned long ptr;


ptr = pmd_val(pmd) & ~(PTRS_PER_PTE * sizeof(void *) - 1);
ptr += PTRS_PER_PTE * sizeof(void *);


return __va(ptr);
}


#define pmd_page(pmd) virt_to_page(__va(pmd_val(pmd)))  /* 获得页中间目录指向的页表页面 */


/*
 * Conversion functions: convert a page and protection to a page entry,
 * and a page entry and page directory to the page they refer to.
 */
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot)  /* 根据页框和页面属性,合成页表项 */


/*
 * The "pgd_xxx()" functions here are trivial for a folded two-level
 * setup: the pgd is never bad, and a pmd always exists (as it's folded
 * into the pgd entry)
 */
#define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0)
#define pgd_present(pgd) (1)
#define pgd_clear(pgdp) do { } while (0)
#define set_pgd(pgd,pgdp) do { } while (0)


/* to find an entry in a page-table-directory */
#define pgd_index(addr) ((addr) >> PGDIR_SHIFT)   /* 虚拟地址在页全局目录中索引 */


#define pgd_offset(mm, addr) ((mm)->pgd+pgd_index(addr))  /* 计算一个进程用户态地址对应的页全局目录项地址*/


/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)  /*  计算内核态地址在页全局目录项地址 */


/* Find an entry in the second-level page table.. */
#define pmd_offset(dir, addr) ((pmd_t *)(dir))


/* Find an entry in the third-level page table.. */
#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))  /* 获得一个线性地址对应的页表项在页表中索引 */


static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const unsigned long mask = L_PTE_EXEC | L_PTE_WRITE | L_PTE_USER;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}


extern pgd_t swapper_pg_dir[PTRS_PER_PGD];  /* 一级页表存放在虚拟地址swapper_pg_dir处,swapper_pg_dir是系统中4GB虚拟内存空间
                                                                              的中间(一级/段)页表的虚拟基地址 ,必须是16KB对齐,栈16KB空间,虚拟地址
                                                                              空间中的某个地址对应(一级/段)描述符在swapper_pg_dir中的位置由该虚拟地址所
                                                                              在段的基址决定的,是固定的(一级页表有4096个描述符,每个描述符表示1M
                                                                              的物理空间,而PTRS_PER_PGD等于2048(这是因为pgd_t是8个字节的))*/


/* Encode and decode a swap entry.
 *
 * We support up to 32GB of swap on 4k machines
 */
#define __swp_type(x) (((x).val >> 2) & 0x7f)
#define __swp_offset(x) ((x).val >> 9)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 9) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })


/* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
/* FIXME: this is not correct */
#define kern_addr_valid(addr) (1)


#include


/*
 * We provide our own arch_get_unmapped_area to cope with VIPT caches.
 */
#define HAVE_ARCH_UNMAPPED_AREA


/*
 * remap a physical page `pfn' of size `size' with page protection `prot'
 * into virtual address `from'
 */
 /*为了支持mmap操作,驱动程序需要为它的地址范围建立合适的页表 
    参数vma是内核根据用户空间传递过来的映射参数即虚拟内存区域,在一定范围内的页将被映射到区域内。
    参数addr表示目标用户开始地址
    参数pfn为内核物理地址,确切的说应该是虚拟地址应该映射到的物理地址的页面号,实际上就是物理地址
                    右移PAGE_SHIFT位,在多数请求下,vma结构中的vm_pgoff成员包含了用户需要的值
    参数size为映射大小
    参数prot为新页所要求的保护属性,驱动程序能够使用vma->vm_page_prot(如果想要把kmalloc申请的内存映射到用户空间,
               通常要把相应的内存配置为保留)**/
#define io_remap_pfn_range(vma,from,pfn,size,prot) \
remap_pfn_range(vma, from, pfn, size, prot)


#define pgtable_cache_init() do { } while (0)


#endif /* !__ASSEMBLY__ */


#endif /* CONFIG_MMU */


#endif /* _ASMARM_PGTABLE_H */









阅读(2875) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~