看FreeBSD内存管理的时候搜到这样的文章,但只有两个傻 逼doc网站(doc88和it168),收留了这文档,下载还要积分,原文不见了,所以努力找了一篇,方便大家。
本文版权归chishanmingshen所有,欢迎转载。但转载请保持文档的完整性,注明原作者及原链接,严禁用于任何商业用途。作者:chishanmingshen 博客:chishanmingshen.blog.chinaunix.net======================================== BSD内存管理之amd64 by chishanmingshen 之前的引导和加载略去.elf64_exec(struct preloaded_file *fp) 第一次设置页表:2M*512=1G空间的映射.将所有空间都映射到1G,其实只使用第一个1G. 用于32bit模式处理. __exec((void *)VTOP(amd64_tramp), modulep, kernend); amd64_tramp: 设置cr3.打开分页机制.此时是32bit模式 跳到64bit模式.(之前的entry_hi/entry_lo即btext地址) ljmp $0x8, $VTOP(longmode) locore.S 建好一个bootstack 1. call hammer_time(modulep,physfree)(其中 会初始化中断,并建立0进程的pcb.将内核映像的名字拷贝到kernelname变量中. 调用getmemsize(kmdp, physfree)->pmap_bootstrap()->create_pagetable(). 最后返回进程0的栈) 2. call mi_startup(module init) 1.1 pmap_bootstrap(vm_paddr_t *firstaddr)/*入参其实就是kernend*/ create_pagetables(firstaddr)为内核映像建立页表 virtual_avail = (vm_offset_t) KERNBASE + *firstaddr;/*可见,之后都是用的这个虚拟地址来读内存.*/ (#define KERNBASE KVADDR(KPML4I, KPDPI, 0, 0)/(511,510,0,0),就是最后1G空间.) virtual_end = VM_MAX_KERNEL_ADDRESS;/*虚拟地址的最大值:0xfFFFFFFFFFFFFFFF*/ (#define VM_MAX_KERNEL_ADDRESS KVADDR(KPML4I, NPDPEPG-1, NKPDE-1, NPTEPG-1)/*511,510,511*/最后留了2M.) load_cr3(KPML4phys);从此进入KPML4phys时代 /*kernel_pmap这个是最重要的,没有之一.它记录PML4表基址的虚拟地址,从物理地址KPML4phys开始.*/ kernel_pmap->pm_pml4 = (pdp_entry_t *) (KERNBASE + KPML4phys); 1.2 static voidcreate_pagetables(vm_paddr_t *firstaddr){ int i; /* Allocate pages */ KPTphys = allocpages(firstaddr, NKPT); KPML4phys = allocpages(firstaddr, 1); KPDPphys = allocpages(firstaddr, NKPML4E); KPDphys = allocpages(firstaddr, NKPDPE); ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT; if (ndmpdp < 4) /* Minimum 4GB of dirmap */ ndmpdp = 4; DMPDPphys = allocpages(firstaddr, NDMPML4E); DMPDphys = allocpages(firstaddr, ndmpdp); dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT; /* Fill in the underlying page table pages */ /* Read-only from zero to physfree */ /* XXX not fully used, underneath 2M pages */ for (i = 0; (i << PAGE_SHIFT) < *firstaddr; i++) {/*1->PT表*/ ((pt_entry_t *)KPTphys)[i] = i << PAGE_SHIFT; ((pt_entry_t *)KPTphys)[i] |= PG_RW | PG_V | PG_G | PG_U; } /* Now map the page tables at their location within PTmap */ for (i = 0; i < NKPT; i++) {/*2->PD表*/ ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT); ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_U; } /* Map from zero to end of allocations under 2M pages */ /* This replaces some of the KPTphys entries above */ for (i = 0; (i << PDRSHIFT) < *firstaddr; i++) {/*2->PD表,每项代表2M空间,直接覆盖上面2和1的赋值.*/ ((pd_entry_t *)KPDphys)[i] = i << PDRSHIFT; ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G | PG_U; } /* And connect up the PD to the PDP */ for (i = 0; i < NKPDPE; i++) {/*3->PDP表*/ ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys + (i << PAGE_SHIFT); ((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U | PG_U; } /*4->DMPD表*/ /* Now set up the direct map space using 2MB pages */ for (i = 0; i < NPDEPG * ndmpdp; i++) {/* ((pd_entry_t *)DMPDphys)[i] = (vm_paddr_t)i << PDRSHIFT; ((pd_entry_t *)DMPDphys)[i] |= PG_RW | PG_V | PG_PS | PG_G | PG_U; } /*5->DMPDP表*/ /* And the direct map space's PDP */ for (i = 0; i < ndmpdp; i++) { ((pdp_entry_t *)DMPDPphys)[i] = DMPDphys + (i << PAGE_SHIFT); ((pdp_entry_t *)DMPDPphys)[i] |= PG_RW | PG_V | PG_U; } /*最后,要设置KMPL4phys表的3个表项:递归表/直连表/内核表.*/ /* And recursively map PML4 to itself in order to get PTmap */ ((pdp_entry_t *)KPML4phys)[PML4PML4I] = KPML4phys;/*256 递归用*/ ((pdp_entry_t *)KPML4phys)[PML4PML4I] |= PG_RW | PG_V | PG_U; /* Connect the Direct Map slot up to the PML4 */ ((pdp_entry_t *)KPML4phys)[DMPML4I] = DMPDPphys;/*510,倒数第二个512G */ ((pdp_entry_t *)KPML4phys)[DMPML4I] |= PG_RW | PG_V | PG_U; /* Connect the KVA slot up to the PML4 */ ((pdp_entry_t *)KPML4phys)[KPML4I] = KPDPphys;/*511,最后一个512G */ ((pdp_entry_t *)KPML4phys)[KPML4I] |= PG_RW | PG_V | PG_U;} 2.voidmi_startup(void)将所有注册的sysinit执行一遍,我们关注SI_SUB_VM模块. 2.1 SYSINIT(vm_mem, SI_SUB_VM, SI_ORDER_FIRST, vm_mem_init, NULL);vm_mem_init() vm_set_page_size设置页面大小为4k virtual_avail = vm_page_startup(virtual_avail);/*初始化各个物理页面,然后加入到freelist中*/ 遍历phys_avail[],得到段数:nblocks,总的空间大小:total. for (i = 0; i < PQ_COUNT; i++)/*初始化3个vm_page queue,各维护一个vm_page链表.*/ TAILQ_INIT(&vm_page_queues[i].pl); vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count; vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count; vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count; end = phys_avail[biggestone+1];end先赋为最大可用的内存地址处,以下为预留扣除. 扣去boot umaslab(大小为(boot_pages * UMA_SLAB_SIZE)) 得到new_end,将umaslab调pmap_map和uma_startup.(扣去的是由Dmap映射的, 所以不需要递增vaddr. #define PHYS_TO_DMAP(x) ((x) | DMAP_MIN_ADDRESS) #define DMAP_MIN_ADDRESS KVADDR(DMPML4I, 0, 0, 0)/*510, 倒数第二个512G.*/) 接着扣去dump. phys_avail[biggestone + 1] = new_end;最后一段内存更正为到new_end结束,扣除了vm_page[], 和slab,和dump. vm_page_array_size = page_range; vm_phys_init();/*初始化物理内存分配器*/ 对所有段调用vm_phys_create_seg(phys_avail[i], phys_avail[i + 1],VM_FREELIST_DEFAULT); 更新到vm_phys_segs[]中. vm_phys_free_queues[vm_nfreelists][VM_NFREEPOOL] /*static int vm_nfreelists = VM_FREELIST_DEFAULT + 1;*/ vaddr += PAGE_SIZE;/*vm_page_array[-1]置空映射,这样就造成一个保护空洞,在vm_page_array之前.*/ new_end = trunc_page(end - page_range * sizeof(struct vm_page)); mapped = pmap_map(&vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE); vm_page_array = (vm_page_t) mapped; /*将预留的vm_page结构数组空间扣去. vm_page_array指向pmap_map()映射后的vm_page[]空间,共npages个页面. 计算可用物理页面总数为page_range个,从而计算出对应的vm_page结构数组需要的页数. npages = (total - (page_range * sizeof(struct vm_page)) - (end - new_end)) / PAGE_SIZE;*/ 遍历phys_avail[],对所有物理页调用vm_phys_add_page(pa). vm_phys_add_page(pa/*vm_paddr 物理地址*/):初始化一个物理页面,同时将它加到free list中. m = vm_phys_paddr_to_vm_page(vm_paddr_t pa):/*找到给定物理地址对应的vm_page*/ 遍历vm_phys_segs[],找到对应的vm_page结构指针,并返回该指针. return &(seg->first_page[atop(pa - seg->start)]); pmap_page_init(m); vm_phys_free_pages(m, 0);/*buddy算法,初始时都加到freelist中的单页队列,buddy里面会自动调整.*/ return (vaddr); /*最后将可以用的虚拟地址返回,其中vm_page[]的空间已经加进去了. 返回的virtual_avail,由外面使用,即普通物理页面空间*/
阅读(1975) | 评论(2) | 转发(0) |