Chinaunix首页 | 论坛 | 博客
  • 博客访问: 203247
  • 博文数量: 33
  • 博客积分: 0
  • 博客等级: 民兵
  • 技术积分: 1277
  • 用 户 组: 普通用户
  • 注册时间: 2013-03-03 10:03
个人简介

现于杭州电子科技大学攻读硕士学位

文章分类

全部博文(33)

文章存档

2013年(33)

我的朋友

分类: LINUX

2013-10-11 21:58:29

/***************************************************************************************************************/
/* highmem.h */
#ifndef _LINUX_HIGHMEM_H
#define _LINUX_HIGHMEM_H


#include
#include


#ifdef CONFIG_HIGHMEM


extern struct page *highmem_start_page; /* 高端内存起始页面 */


#include


/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);


extern struct buffer_head *create_bounce(int rw, struct buffer_head * bh_orig);


static inline char *bh_kmap(struct buffer_head *bh)
{
return kmap(bh->b_page) + bh_offset(bh);
}


static inline void bh_kunmap(struct buffer_head *bh)
{
kunmap(bh->b_page);
}


/*
 * remember to add offset! and never ever reenable interrupts between a
 * bh_kmap_irq and bh_kunmap_irq!!
 */
static inline char *bh_kmap_irq(struct buffer_head *bh, unsigned long *flags)
{
unsigned long addr;


__save_flags(*flags);


/*
* could be low
*/
if (!PageHighMem(bh->b_page))
return bh->b_data;


/*
* it's a highmem page
*/
__cli();
addr = (unsigned long) kmap_atomic(bh->b_page, KM_BH_IRQ);


if (addr & ~PAGE_MASK)
BUG();


return (char *) addr + bh_offset(bh);
}


static inline void bh_kunmap_irq(char *buffer, unsigned long *flags)
{
unsigned long ptr = (unsigned long) buffer & PAGE_MASK;


kunmap_atomic((void *) ptr, KM_BH_IRQ);
__restore_flags(*flags);
}


#else /* CONFIG_HIGHMEM */


static inline unsigned int nr_free_highpages(void) { return 0; }


/* 该函数用于映射高端内存到低端内存 */
static inline void *kmap(struct page *page) { return page_address(page); }


#define kunmap(page) do { } while (0)


/* 中断用户可以该宏(原子) */
#define kmap_atomic(page,idx) kmap(page)


#define kunmap_atomic(page,idx) kunmap(page)


#define bh_kmap(bh) ((bh)->b_data)
#define bh_kunmap(bh) do { } while (0)


/* kmap无阻塞版本 */
#define kmap_nonblock(page)            kmap(page)


#define bh_kmap_irq(bh, flags) ((bh)->b_data)
#define bh_kunmap_irq(bh, flags) do { *(flags) = 0; } while (0)


#endif /* CONFIG_HIGHMEM */


/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *addr = kmap_atomic(page, KM_USER0);
clear_user_page(addr, vaddr);
kunmap_atomic(addr, KM_USER0);
}


static inline void clear_highpage(struct page *page)
{
clear_page(kmap(page));
kunmap(page);
}


/*
 * Same but also flushes aliased cache contents to RAM.
 */
static inline void memclear_highpage_flush(struct page *page, unsigned int offset, unsigned int size)
{
char *kaddr;


if (offset + size > PAGE_SIZE)
out_of_line_bug();
kaddr = kmap(page);
memset(kaddr + offset, 0, size);
flush_dcache_page(page);
flush_page_to_ram(page);
kunmap(page);
}


static inline void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr)
{
char *vfrom, *vto;


vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
copy_user_page(vto, vfrom, vaddr);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
}


static inline void copy_highpage(struct page *to, struct page *from)
{
char *vfrom, *vto;


vfrom = kmap_atomic(from, KM_USER0);
vto = kmap_atomic(to, KM_USER1);
copy_page(vto, vfrom);
kunmap_atomic(vfrom, KM_USER0);
kunmap_atomic(vto, KM_USER1);
}


#endif /* _LINUX_HIGHMEM_H */
/***************************************************************************************************************/
/* highmem.c */
/*
 * High memory handling common code and variables.
 *
 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
 *          Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
 *
 *
 * Redesigned the x86 32-bit VM architecture to deal with
 * 64-bit physical space. With current x86 CPUs this
 * means up to 64 Gigabytes physical RAM.
 *
 * Rewrote high memory support to move the page cache into
 * high memory. Implemented permanent (schedulable) kmaps
 * based on Linus' idea.
 *
 * Copyright (C) 1999 Ingo Molnar
 */


#include
#include
#include
#include
#include


/*
 * Virtual_count is not a pure "count".
 *  0 means that it is not mapped, and has not been mapped
 *    since a TLB flush - it is usable.
 *  1 means that there are no users, but it has been mapped
 *    since the last TLB flush - so we can't use it.
 *  n means that there are (n-1) current users of it.
 */
static int pkmap_count[LAST_PKMAP]; /* 该数组用于管理当前页表项的状态(LAST_PKMAP=PTRS_PER_PTE=256)*/
static unsigned int last_pkmap_nr; /* 用于记录最近一次扫描pkmap_count[] 数组的下标*/
static spinlock_cacheline_t kmap_lock_cacheline = {SPIN_LOCK_UNLOCKED};
#define kmap_lock  kmap_lock_cacheline.lock


pte_t * pkmap_page_table; /* kmap中用到的页表项,它位于PKMAP_BASE,并且在系统初始化时就建立 */


static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);


#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
#define LAST_PKMAP PTRS_PER_PTE
#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)


/* 当 last_pkmap_nr折回到0就调用该函数将所有项从1设为0,然后刷新TLB*/
static void flush_all_zero_pkmaps(void)
{
int i;


flush_cache_all();


for (i = 0; i < LAST_PKMAP; i++) {
struct page *page;


/*
* zero means we don't have anything to do,
* >1 means that it is still in use. Only
* a count of 1 means that it is free but
* needs to be unmapped
*/
if (pkmap_count[i] != 1)
continue;
pkmap_count[i] = 0;


/* sanity check */
if (pte_none(pkmap_page_table[i]))
BUG();


/*
* Don't need an atomic fetch-and-clear op here;
* no-one has the page mapped, and cannot get at
* its virtual address (and hence PTE) without first
* getting the kmap_lock (which is held here).
* So no dangers, even with speculative execution.
*/
page = pte_page(pkmap_page_table[i]);
pte_clear(&pkmap_page_table[i]);


page->virtual = NULL;
}
flush_tlb_all();
}


/* 进行页面的映射 */
static inline unsigned long map_new_virtual(struct page *page, int nonblocking)
{
unsigned long vaddr;
int count;


start:
count = LAST_PKMAP;
/* Find an empty entry */
for (;;) {
last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
if (!last_pkmap_nr) { /* 当last_pkmap_nr折回到0 */
flush_all_zero_pkmaps();
count = LAST_PKMAP;
}
if (!pkmap_count[last_pkmap_nr]) /* 如果找到空项了则跳出循环 */
break; /* Found a usable entry */
if (--count)
continue;


if (nonblocking)
return 0;


/*
* Sleep for somebody else to unmap their entries
*/
{ /* 如果没有找到空项则休眠 */
DECLARE_WAITQUEUE(wait, current);


current->state = TASK_UNINTERRUPTIBLE;
add_wait_queue(&pkmap_map_wait, &wait);
spin_unlock(&kmap_lock);
schedule();
remove_wait_queue(&pkmap_map_wait, &wait);
spin_lock(&kmap_lock);


/* Somebody else might have mapped it while we slept */
if (page->virtual) /* 如果页面已经映射了则返回该页面的映射虚拟地址 */
return (unsigned long) page->virtual;


/* Re-start */
goto start;
}
}
vaddr = PKMAP_ADDR(last_pkmap_nr);
set_pte(&(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); /* 设置页表 */


pkmap_count[last_pkmap_nr] = 1; /* 将相应的项设置为1 */
page->virtual = (void *) vaddr;


return vaddr;
}


/* 该函数用于映射高端内存 */
void *kmap_high(struct page *page, int nonblocking)
{
unsigned long vaddr;


/*
* For highmem pages, we can't trust "virtual" until
* after we have the lock.
*
* We cannot call this from interrupts, as it may block
*/
spin_lock(&kmap_lock);
vaddr = (unsigned long) page->virtual; /* 该字段在页面已经被映射时设置 */
if (!vaddr) { /* 如果该字段为空则说明没有建立映射 */
vaddr = map_new_virtual(page, nonblocking); /* 则调用该函数提供页面的一个映射 */
if (!vaddr)
goto out;
}
pkmap_count[PKMAP_NR(vaddr)]++; /* 加1 */
if (pkmap_count[PKMAP_NR(vaddr)] < 2) 
BUG();
 out:
spin_unlock(&kmap_lock);
return (void*) vaddr;
}


void kunmap_high(struct page *page)
{
unsigned long vaddr;
unsigned long nr;
int need_wakeup;


spin_lock(&kmap_lock);
vaddr = (unsigned long) page->virtual;
if (!vaddr)
BUG();
nr = PKMAP_NR(vaddr);


/*
* A count must never go down to zero
* without a TLB flush!
*/
need_wakeup = 0;
switch (--pkmap_count[nr]) { /* 取消映射时减1 */
case 0:
BUG();
case 1: /* 如果减到1了 就意味着没有更多的用户了,需要刷新一次TLB */
/*
* Avoid an unnecessary wake_up() function call.
* The common case is pkmap_count[] == 1, but
* no waiters.
* The tasks queued in the wait-queue are guarded
* by both the lock in the wait-queue-head and by
* the kmap_lock.  As the kmap_lock is held here,
* no need for the wait-queue-head's lock.  Simply
* test if the queue is empty.
*/
need_wakeup = waitqueue_active(&pkmap_map_wait);
}
spin_unlock(&kmap_lock);


/* do wake-up, if needed, race-free outside of the spin lock */
if (need_wakeup)
wake_up(&pkmap_map_wait); /* 唤醒 */
}


#define POOL_SIZE 32


/*
 * This lock gets no contention at all, normally.
 */
static spinlock_t emergency_lock = SPIN_LOCK_UNLOCKED;


int nr_emergency_pages;
static LIST_HEAD(emergency_pages);


int nr_emergency_bhs;
static LIST_HEAD(emergency_bhs);


/*
 * Simple bounce buffer support for highmem pages.
 * This will be moved to the block layer in 2.5.
 */
/* 如果缓冲区用于写数据到设备,那么该缓冲区存储的是在该函数创建弹性缓冲区时从高端内存读出的数据,
    回调函数bounce_end_io_write()将在设备准备好接收数据时完成I/O操作*/
static inline void copy_from_high_bh (struct buffer_head *to,
struct buffer_head *from)
{
struct page *p_from;
char *vfrom;


p_from = from->b_page;


vfrom = kmap_atomic(p_from, KM_USER0);
memcpy(to->b_data, vfrom + bh_offset(from), to->b_size);
kunmap_atomic(vfrom, KM_USER0);
}


/* 如果该缓冲区用于从设备读数据,则在设备准备好前是没有数据移动的,当设备准备好后,设备的中断
    处理程序将调用回调函数该bounce_end_io_read(),由它调用该函数把数据复制到高端内存*/
static inline void copy_to_high_bh_irq (struct buffer_head *to,
struct buffer_head *from)
{
struct page *p_to;
char *vto;
unsigned long flags;


p_to = to->b_page;
__save_flags(flags);
__cli();
vto = kmap_atomic(p_to, KM_BOUNCE_READ);
memcpy(vto + bh_offset(to), from->b_data, to->b_size);
kunmap_atomic(vto, KM_BOUNCE_READ);
__restore_flags(flags);
}


/* IO完成回调函数 */
static inline void bounce_end_io (struct buffer_head *bh, int uptodate)
{
struct page *page;
struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);
unsigned long flags;


bh_orig->b_end_io(bh_orig, uptodate);


page = bh->b_page;


spin_lock_irqsave(&emergency_lock, flags);
if (nr_emergency_pages >= POOL_SIZE) /* 如果紧急池中的页面大于设定的个数则释放页面 */
__free_page(page);
else {
/*
* We are abusing page->list to manage
* the highmem emergency pool:
*/
list_add(&page->list, &emergency_pages); /* 否则将页面放入链表并且增加页面计数器的计数 */
nr_emergency_pages++;
}

if (nr_emergency_bhs >= POOL_SIZE) { /* 如果buffer_head的个数大于设定的个数则释放它 */
#ifdef HIGHMEM_DEBUG
/* Don't clobber the constructed slab cache */
init_waitqueue_head(&bh->b_wait);
#endif
kmem_cache_free(bh_cachep, bh);
} else {
/*
* Ditto in the bh case, here we abuse b_inode_buffers:
*/
list_add(&bh->b_inode_buffers, &emergency_bhs); /* 否则添加链表并增加计数 */
nr_emergency_bhs++;
}
spin_unlock_irqrestore(&emergency_lock, flags);
}


/* 初始化用于弹性缓冲区的紧急池 */
static __init int init_emergency_pool(void)
{
struct sysinfo i;
        si_meminfo(&i);
        si_swapinfo(&i);
        
        if (!i.totalhigh)
        return 0;


spin_lock_irq(&emergency_lock);
while (nr_emergency_pages < POOL_SIZE) {
struct page * page = alloc_page(GFP_ATOMIC);
if (!page) {
printk("couldn't refill highmem emergency pages");
break;
}
list_add(&page->list, &emergency_pages); /* 将分配的页面链入链表emergency_pages*/
nr_emergency_pages++; /* 增加分配的页面数量计数 */
}
while (nr_emergency_bhs < POOL_SIZE) {
struct buffer_head * bh = kmem_cache_alloc(bh_cachep, SLAB_ATOMIC);
if (!bh) {
printk("couldn't refill highmem emergency bhs");
break;
}
list_add(&bh->b_inode_buffers, &emergency_bhs);  /* 将分配的buffer_head链入链表emergency_bhs */
nr_emergency_bhs++; /* 增加分配的buffer_head计数 */
}
spin_unlock_irq(&emergency_lock);
printk("allocated %d pages and %d bhs reserved for the highmem bounces\n",
      nr_emergency_pages, nr_emergency_bhs);


return 0;
}


__initcall(init_emergency_pool);


/* 写弹性缓冲区完成回调函数 */
static void bounce_end_io_write (struct buffer_head *bh, int uptodate)
{
bounce_end_io(bh, uptodate);
}
/* 读弹性缓冲区完成回调函数 */
static void bounce_end_io_read (struct buffer_head *bh, int uptodate)
{
struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private);


if (uptodate)
copy_to_high_bh_irq(bh_orig, bh); /* 将数据复制到高端内存 */
bounce_end_io(bh, uptodate);
}
/* 分配弹性缓冲区页面 */
struct page *alloc_bounce_page (void)
{
struct list_head *tmp;
struct page *page;


page = alloc_page(GFP_NOHIGHIO);
if (page)
return page;
/*
* No luck. First, kick the VM so it doesn't idle around while
* we are using up our emergency rations.
*/
wakeup_bdflush();


repeat_alloc:
/*
* Try to allocate from the emergency pool.
*/
tmp = &emergency_pages;
spin_lock_irq(&emergency_lock);
if (!list_empty(tmp)) {
page = list_entry(tmp->next, struct page, list);
list_del(tmp->next);
nr_emergency_pages--;
}
spin_unlock_irq(&emergency_lock);
if (page)
return page;


/* we need to wait I/O completion */
run_task_queue(&tq_disk);


yield();
goto repeat_alloc;
}


/* 分配一个buffer_head */
struct buffer_head *alloc_bounce_bh (void)
{
struct list_head *tmp;
struct buffer_head *bh;


bh = kmem_cache_alloc(bh_cachep, SLAB_NOHIGHIO);
if (bh)
return bh;
/*
* No luck. First, kick the VM so it doesn't idle around while
* we are using up our emergency rations.
*/
wakeup_bdflush();


repeat_alloc:
/*
* Try to allocate from the emergency pool.
*/
tmp = &emergency_bhs;
spin_lock_irq(&emergency_lock);
if (!list_empty(tmp)) {
bh = list_entry(tmp->next, struct buffer_head, b_inode_buffers);
list_del(tmp->next);
nr_emergency_bhs--;
}
spin_unlock_irq(&emergency_lock);
if (bh)
return bh;


/* we need to wait I/O completion */
run_task_queue(&tq_disk);


yield();
goto repeat_alloc;
}


/* 弹性缓冲区:它用于设备不能访问CPU可见的所有内存,弹性缓冲区驻留在足够低端的内存中,从而设备从
    里面复制数据以及向里面写数据。然后它会把相应的用户页面复制到高端内存*/


/* 创建弹性缓冲区 */
struct buffer_head * create_bounce(int rw, struct buffer_head * bh_orig)
{
struct page *page;
struct buffer_head *bh;


if (!PageHighMem(bh_orig->b_page))
return bh_orig;


bh = alloc_bounce_bh(); /* 分配buffer_head */
/*
* This is wasteful for 1k buffers, but this is a stopgap measure
* and we are being ineffective anyway. This approach simplifies
* things immensly. On boxes with more than 4GB RAM this should
* not be an issue anyway.
*/
page = alloc_bounce_page(); /* 分配弹性缓冲区页面 */


set_bh_page(bh, page, 0);
       /* 将模板bh_orig的信息复制到新的buffer_head中 */
bh->b_next = NULL;
bh->b_blocknr = bh_orig->b_blocknr;
bh->b_size = bh_orig->b_size;
bh->b_list = -1;
bh->b_dev = bh_orig->b_dev;
bh->b_count = bh_orig->b_count;
bh->b_rdev = bh_orig->b_rdev;
bh->b_state = bh_orig->b_state;
#ifdef HIGHMEM_DEBUG
bh->b_flushtime = jiffies;
bh->b_next_free = NULL;
bh->b_prev_free = NULL;
/* bh->b_this_page */
bh->b_reqnext = NULL;
bh->b_pprev = NULL;
#endif
/* bh->b_page */
if (rw == WRITE) { /* 如果是写操作 */
bh->b_end_io = bounce_end_io_write; /* 设置该IO完成回调函数 */
copy_from_high_bh(bh, bh_orig);
} else
bh->b_end_io = bounce_end_io_read;
bh->b_private = (void *)bh_orig;
bh->b_rsector = bh_orig->b_rsector;
#ifdef HIGHMEM_DEBUG
memset(&bh->b_wait, -1, sizeof(bh->b_wait));
#endif


return bh;
}




阅读(2018) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~