For x86 64bits architecture platform, linux-2.6.35.4
- arch/x86/boot/header.S: the first instruction that runs in kernel image in real mode is at label "_start", then call "start_of_setup", then call "main".
- arch/x86/boot/main.c: "main" function is called by "start_of_setup", after "main" doing lots of operation, at last, call "go_to_protected_mode",
- arch/x86/boot/pm.c: "go_to_protected_mode" is defined here, it will call "protected_mode_jump".
- arch/x86/boot/pmjump.S: "protected_mode_jump" is in this file. After executing this function, the processor is in protected mode.
- arch/x86/boot/compressed/head_64.S: the entry point is "startup_32", call "decompress_kernel"
- arch/x86/boot/compressed/misc.c: executing "decompress_kernel".
- arch/x86/boot/compressed/head_64.S: jump to the decompressed kernel entry point "startup_64"
- arch/x86/kernel/head_64.S: start to execute at label "startup_64", then call "x86_64_start_kernel".
- arch/x86/kernel/head64.c: executing "x86_64_start_kernel", it call "x86_64_start_reservations", "x86_64_start_reservations" call "start_kernel".
- init/main.c: start executing "start_kernel".
In find init-mm.c:
The kernel static define struct mm_struct init_mm, which belongs kernel, kernel accessing physical address:
pgd_t *pgd = init_mm->pgd + pgd_index(addr);
pud_t *pud = pgd + pud_index(addr);
pmd_t *pmd = pud + pmd_index(addr);
pte_t *pte = pmd + pte_index(addr);
// physical_addr the physical address of pfn responding to addr.
long physical_addr = pte_val(*pte) & PAGE_SHIFT;
struct mm_struct init_mm = {
.mm_rb = RB_ROOT,
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
.mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
.cpu_vm_mask = CPU_MASK_ALL,
};
|
IN pgtable_64.h:
#define swapper_pg_dir init_level4_pgt
|
init_level4_pgt is a global variable defined in head_64.S, it is in data segmentation of kernel image.
/* Automate the creation of 1 to 1 mapping pmd entries */ #define PMDS(START, PERM, COUNT) \ i = 0 ; \ .rept (COUNT) ; \ .quad (START) + (i << PMD_SHIFT) + (PERM) ; \ i = i + 1 ; \ .endr
.data
/*
* This default setting generates an ident mapping at address 0x100000
* and a mapping for the kernel that precisely maps virtual address
* 0xffffffff80000000 to physical address 0x000000. (always using
* 2Mbyte large pages provided by PAE mode)
*/
NEXT_PAGE(init_level4_pgt)
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
.org init_level4_pgt + L4_PAGE_OFFSET*8, 0
.quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
.org init_level4_pgt + L4_START_KERNEL*8, 0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
NEXT_PAGE(level3_ident_pgt)
.quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
.fill 511,8,0
NEXT_PAGE(level3_kernel_pgt)
.fill L3_START_KERNEL,8,0
/* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
.quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
.quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
NEXT_PAGE(level2_fixmap_pgt)
.fill 506,8,0
.quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
/* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
.fill 5,8,0
NEXT_PAGE(level1_fixmap_pgt)
.fill 512,8,0
NEXT_PAGE(level2_ident_pgt)
/* Since I easily can, map the first 1G.
* Don
NEXT_PAGE(level2_kernel_pgt) /* * 512 MB kernel mapping. We spend a full page on this pagetable * anyway. * * The kernel code+data+bss must not be bigger than that. * * (NOTE: at +512MB starts the module area, see MODULES_VADDR. * If you want to increase this then increase MODULES_VADDR * too.) */ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
NEXT_PAGE(level2_spare_pgt) .fill 512, 8, 0
|
In function start_kernel(), call
mm_init_owner(&init_mm, &init_task);
|
to assign init_mm to init_task.
In head_64.S:
/* Setup early boot stage 4 level pagetables. */
movq $(init_level4_pgt - __START_KERNEL_map), %rax
addq phys_base(%rip), %rax
movq %rax, %cr3
|
The address of kernel page table is loaded to cr3 register.
阅读(1597) | 评论(0) | 转发(0) |