Chinaunix首页 | 论坛 | 博客
  • 博客访问: 201281
  • 博文数量: 213
  • 博客积分: 1685
  • 博客等级: 上尉
  • 技术积分: 1515
  • 用 户 组: 普通用户
  • 注册时间: 2012-03-20 19:22
文章分类

全部博文(213)

文章存档

2016年(7)

2014年(63)

2013年(87)

2012年(56)

我的朋友

分类: Android平台

2013-07-23 10:40:09

1、触发异常的线性地址处于用户空间的vma中:
    1.1 如果是stack消耗完了而触发的,则对进程的栈区进行扩展,并分配物理页。
    1.2 其他,则直接检查访问权限,如果OK的话内核就给进程分配相应的物理页了

2, 触发异常的线性地址不处于用户空间的vma中, 则作为一次非法地址访问来处理,内核将调用__do_user_fault()终结进程

static int __kprobes
__do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
                unsigned int flags, struct task_struct *tsk)
{
        struct vm_area_struct *vma;
        int fault;


        vma = find_vma(mm, addr);
        fault = VM_FAULT_BADMAP;
        if (unlikely(!vma))
                goto out;
        if (unlikely(vma->vm_start > addr))
                goto check_stack;


        /*
         * Ok, we have a good vm_area for this
         * memory access, so we can handle it.
         */
good_area:
        if (access_error(fsr, vma)) {
                fault = VM_FAULT_BADACCESS;
                goto out;
        }


        return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);


check_stack:
        /* Don't allow expansion below FIRST_USER_ADDRESS */
        if (vma->vm_flags & VM_GROWSDOWN &&
            addr >= FIRST_USER_ADDRESS && !expand_stack(vma, addr))
                goto good_area;
out:
        return fault;
}


static int __kprobes
do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
        struct task_struct *tsk;
        struct mm_struct *mm;
        int fault, sig, code;
        int write = fsr & FSR_WRITE;
        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
                                (write ? FAULT_FLAG_WRITE : 0);


        if (notify_page_fault(regs, fsr))
                return 0;


        tsk = current;
        mm  = tsk->mm;


        /* Enable interrupts if they were enabled in the parent context. */
        if (interrupts_enabled(regs))
                local_irq_enable();


        /*
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
        if (in_atomic() || !mm)
                goto no_context;


        /*
         * As per x86, we may deadlock here.  However, since the kernel only
         * validly references user space from well defined areas of the code,
         * we can bug out early if this is from code which shouldn't.
         */
        if (!down_read_trylock(&mm->mmap_sem)) {
                if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc))
                        goto no_context;
retry:
                down_read(&mm->mmap_sem);
        } else {
                /*
                 * The above down_read_trylock() might have succeeded in
                 * which case, we'll have missed the might_sleep() from
                 * down_read()
                 */
                might_sleep();
#ifdef CONFIG_DEBUG_VM
                if (!user_mode(regs) &&
                    !search_exception_tables(regs->ARM_pc))
                        goto no_context;
#endif
        }


        fault = __do_page_fault(mm, addr, fsr, flags, tsk);


        /* If we need to retry but a fatal signal is pending, handle the
         * signal first. We do not need to release the mmap_sem because
         * it would already be released in __lock_page_or_retry in
         * mm/filemap.c. */
 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
                return 0;


        /*
         * Major/minor page fault accounting is only done on the
         * initial attempt. If we go through a retry, it is extremely
         * likely that the page will be found in page cache at that point.
         */


        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
        if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
                if (fault & VM_FAULT_MAJOR) {
                        tsk->maj_flt++;
                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
                                        regs, addr);
                } else {
                        tsk->min_flt++;
                        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
                                        regs, addr);
                }
                if (fault & VM_FAULT_RETRY) {
                        /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
                        * of starvation. */
                        flags &= ~FAULT_FLAG_ALLOW_RETRY;
                        goto retry;
                }
        }


        up_read(&mm->mmap_sem);


        /*
         * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
         */
        if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
                return 0;


        if (fault & VM_FAULT_OOM) {
                /*
                 * We ran out of memory, call the OOM killer, and return to
                 * userspace (which will retry the fault, or kill us if we
                 * got oom-killed)
                 */
                pagefault_out_of_memory();
                return 0;
        }


        /*
         * If we are in kernel mode at this point, we
         * have no context to handle this fault with.
         */
        if (!user_mode(regs))
                goto no_context;


        if (fault & VM_FAULT_SIGBUS) {
                /*
                 * We had some memory, but were unable to
                 * successfully fix up this page fault.
                */
                sig = SIGBUS;
                code = BUS_ADRERR;
        } else {
                /*
                 * Something tried to access memory that
                 * isn't in our memory map..
                 */
                sig = SIGSEGV;
                code = fault == VM_FAULT_BADACCESS ?
                        SEGV_ACCERR : SEGV_MAPERR;
        }


        __do_user_fault(tsk, addr, fsr, sig, code, regs);
        return 0;


no_context:
        __do_kernel_fault(mm, addr, fsr, regs);
        return 0;
}
















/*
 * Something tried to access memory that isn't in our memory map..
 * User mode accesses just cause a SIGSEGV
 */
static void
__do_user_fault(struct task_struct *tsk, unsigned long addr,
                unsigned int fsr, unsigned int sig, int code,
                struct pt_regs *regs)
{
        struct siginfo si;


#ifdef CONFIG_DEBUG_USER
        if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) ||
            ((user_debug & UDBG_BUS)  && (sig == SIGBUS))) {
                printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n",
                       tsk->comm, sig, addr, fsr);
                show_pte(tsk->mm, addr);
                show_regs(regs);
        }
#endif


        tsk->thread.address = addr;
        tsk->thread.error_code = fsr;
        tsk->thread.trap_no = 14;
        si.si_signo = sig;
        si.si_errno = 0;
        si.si_code = code;
        si.si_addr = (void __user *)addr;
        force_sig_info(sig, &si, tsk);
}

阅读(1429) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~