Chinaunix首页 | 论坛 | 博客
  • 博客访问: 1634726
  • 博文数量: 197
  • 博客积分: 10046
  • 博客等级: 上将
  • 技术积分: 1983
  • 用 户 组: 普通用户
  • 注册时间: 2006-08-07 12:36
个人简介

在外企做服务器开发, 目前是项目经理, 管理两个server开发的项目。不做嵌入式好久了。

文章分类
文章存档

2011年(2)

2010年(6)

2009年(18)

2008年(30)

2007年(100)

2006年(41)

分类: LINUX

2009-01-06 17:25:14

 

/*
  * Copyright 2001-2002 Pavel Machek
  * Based on code
  * Copyright 2001 Patrick Mochel
  */

#include <asm/desc.h>
#include <asm/i387.h>
#include <asm/apic.h>
#ifdef SUSPEND_C
#include <asm/processor.h>
#endif
/* image of the saved processor states */
struct saved_context {
    u32 eax, ebx, ecx, edx;
    u32 esp, ebp, esi, edi;
    u16 es, fs, gs, ss;
    u32 cr0, cr2, cr3, cr4;
    u16 gdt_pad;
    u16 gdt_limit;
    u32 gdt_base;
    u16 idt_pad;
    u16 idt_limit;
    u32 idt_base;
    u16 ldt;
    u16 tss;
    u32 tr;
    u32 safety;
    u32 return_address;
    u32 eflags;
} __attribute__((packed));

struct saved_context saved_contexts[NR_CPUS];
struct saved_context saved_context;    /* temporary storage */

spinlock_t saved_context_lock __nosavedata = SPIN_LOCK_UNLOCKED;

#define loaddebug(thread,register) \
               __asm__("movl %0,%%db" #register \
                       : /* no output */ \
                       :"r" ((thread)->debugreg[register]))

 
/*
 * save_processor_context
 *
 * Save the state of the processor before we go to sleep.
 *
 * return_stack is the value of the stack pointer (%esp) as the caller sees it.
 * A good way could not be found to obtain it from here (don't want to make _too_
 * many assumptions about the layout of the stack this far down.) Also, the
 * handy little __builtin_frame_pointer(level) where level > 0, is blatantly
 * buggy - it returns the value of the stack at the proper location, not the
 * location, like it should (as of gcc 2.91.66)
 *
 * Note that the context and timing of this function is pretty critical.
 * With a minimal amount of things going on in the caller and in here, gcc
 * does a good job of being just a dumb compiler. Watch the assembly output
 * if anything changes, though, and make sure everything is going in the right
 * place.
 */

static inline void save_processor_context(void)
{
    kernel_fpu_begin();

    /*
     * descriptor tables
     */

    asm volatile ("sgdt (%0)" : "=m" (saved_context.gdt_limit));
    asm volatile ("sidt (%0)" : "=m" (saved_context.idt_limit));
    asm volatile ("sldt (%0)" : "=m" (saved_context.ldt));
    asm volatile ("str (%0)" : "=m" (saved_context.tr));

    /*
     * save the general registers.
     * note that gcc has constructs to specify output of certain registers,
     * but they're not used here, because it assumes that you want to modify
     * those registers, so it tries to be smart and save them beforehand.
     * It's really not necessary, and kinda fishy (check the assembly output),
     * so it's avoided.
     */

    asm volatile ("movl %%esp, (%0)" : "=m" (saved_context.esp));
    asm volatile ("movl %%eax, (%0)" : "=m" (saved_context.eax));
    asm volatile ("movl %%ebx, (%0)" : "=m" (saved_context.ebx));
    asm volatile ("movl %%ecx, (%0)" : "=m" (saved_context.ecx));
    asm volatile ("movl %%edx, (%0)" : "=m" (saved_context.edx));
    asm volatile ("movl %%ebp, (%0)" : "=m" (saved_context.ebp));
    asm volatile ("movl %%esi, (%0)" : "=m" (saved_context.esi));
    asm volatile ("movl %%edi, (%0)" : "=m" (saved_context.edi));

    /*
     * segment registers
     */

    asm volatile ("movw %%es, %0" : "=r" (saved_context.es));
    asm volatile ("movw %%fs, %0" : "=r" (saved_context.fs));
    asm volatile ("movw %%gs, %0" : "=r" (saved_context.gs));
    asm volatile ("movw %%ss, %0" : "=r" (saved_context.ss));

    /*
     * control registers
     */

    asm volatile ("movl %%cr0, %0" : "=r" (saved_context.cr0));
    asm volatile ("movl %%cr2, %0" : "=r" (saved_context.cr2));
    asm volatile ("movl %%cr3, %0" : "=r" (saved_context.cr3));
    asm volatile ("movl %%cr4, %0" : "=r" (saved_context.cr4));

    /*
     * eflags
     */

    asm volatile ("pushfl ; popl (%0)" : "=m" (saved_context.eflags));
}

static void fix_processor_context(void)
{
    int nr = smp_processor_id();
    struct tss_struct * t = &init_tss[nr];

    set_tss_desc(nr,t);    /* This just modifies memory; should not be neccessary. But... This is neccessary, because 386 hardware has concept of busy tsc or some similar stupidity. */
        gdt_table[__TSS(nr)].b &= 0xfffffdff;

    load_TR(nr);        /* This does ltr */

    load_LDT(current->active_mm);    /* This does lldt */

    /*
     * Now maybe reload the debug registers
     */

    if (current->thread.debugreg[7]){
                loaddebug(&current->thread, 0);
                loaddebug(&current->thread, 1);
                loaddebug(&current->thread, 2);
                loaddebug(&current->thread, 3);
                /* no 4 and 5 */
                loaddebug(&current->thread, 6);
                loaddebug(&current->thread, 7);
    }

}

static void do_fpu_end(void)
{
        /* restore FPU regs if necessary */
    /* Do it out of line so that gcc does not move cr0 load to some stupid place */
        kernel_fpu_end();
}

/*
 * restore_processor_context
 *
 * Restore the processor context as it was before we went to sleep
 * - descriptor tables
 * - control registers
 * - segment registers
 * - flags
 *
 * Note that it is critical that this function is declared inline.
 * It was separated out from restore_state to make that function
 * a little clearer, but it needs to be inlined because we won't have a
 * stack when we get here (so we can't push a return address).
 */

static inline void restore_processor_context(void)
{
    /*
     * first restore %ds, so we can access our data properly
     */

    asm volatile (".align 4");
    asm volatile ("movw %0, %%ds" :: "r" ((u16)__KERNEL_DS));


    /*
     * control registers
     */

    asm volatile ("movl %0, %%cr4" :: "r" (saved_context.cr4));
    asm volatile ("movl %0, %%cr3" :: "r" (saved_context.cr3));
    asm volatile ("movl %0, %%cr2" :: "r" (saved_context.cr2));
    asm volatile ("movl %0, %%cr0" :: "r" (saved_context.cr0));

    /*
     * segment registers
     */

    asm volatile ("movw %0, %%es" :: "r" (saved_context.es));
    asm volatile ("movw %0, %%fs" :: "r" (saved_context.fs));
    asm volatile ("movw %0, %%gs" :: "r" (saved_context.gs));
    asm volatile ("movw %0, %%ss" :: "r" (saved_context.ss));

    /*
     * the other general registers
     *
     * note that even though gcc has constructs to specify memory
     * input into certain registers, it will try to be too smart
     * and save them at the beginning of the function. This is esp.
     * bad since we don't have a stack set up when we enter, and we
     * want to preserve the values on exit. So, we set them manually.
     */

    asm volatile ("movl %0, %%esp" :: "m" (saved_context.esp));
    asm volatile ("movl %0, %%ebp" :: "m" (saved_context.ebp));
    asm volatile ("movl %0, %%eax" :: "m" (saved_context.eax));
    asm volatile ("movl %0, %%ebx" :: "m" (saved_context.ebx));
    asm volatile ("movl %0, %%ecx" :: "m" (saved_context.ecx));
    asm volatile ("movl %0, %%edx" :: "m" (saved_context.edx));
    asm volatile ("movl %0, %%esi" :: "m" (saved_context.esi));
    asm volatile ("movl %0, %%edi" :: "m" (saved_context.edi));

    /*
     * now restore the descriptor tables to their proper values
     * ltr is done i fix_processor_context().
     */


    asm volatile ("lgdt (%0)" :: "m" (saved_context.gdt_limit));
    asm volatile ("lidt (%0)" :: "m" (saved_context.idt_limit));
    asm volatile ("lldt (%0)" :: "m" (saved_context.ldt));

    fix_processor_context();

    /*
     * the flags
     */

    asm volatile ("pushl %0 ; popfl" :: "m" (saved_context.eflags));

    do_fpu_end();
}

#ifdef SUSPEND_C
/* Local variables for do_swsusp2_lowlevel */
volatile static int loop __nosavedata = 0;
volatile static int state1 __nosavedata = 0;
volatile static int state2 __nosavedata = 0;
volatile static int state3 __nosavedata = 0;
volatile static struct range *origrange __nosavedata;
volatile static struct range *copyrange __nosavedata;
volatile static int origoffset __nosavedata;
volatile static int copyoffset __nosavedata;
volatile static unsigned long * origpage __nosavedata;
volatile static unsigned long * copypage __nosavedata;
#ifndef CONFIG_SMP
static unsigned long c_loops_per_jiffy_ref __nosavedata = 0;
static unsigned long cpu_khz_ref __nosavedata = 0;
#endif
extern atomic_t swsusp_cpu_counter __nosavedata;
extern inline void do_flush_tlb_all_local(void);

/*
 * APIC support: These routines save the APIC
 * configuration for the CPU on which they are
 * being executed
 */

extern void swsusp_apic_save_state(void);
extern void swsusp_apic_reload_state(void);

#ifdef CONFIG_SMP
/* ------------------------------------------------
 * BEGIN Irq affinity code, based on code from LKCD.
 *
 * IRQ affinity support:
 * Save and restore IRQ affinities, and set them
 * all to CPU 0.
 *
 * Section between dashes taken from LKCD code.
 * Perhaps we should be working toward a shared library
 * of such routines for kexec, lkcd, software suspend
 * and whatever other similar projects there are?
 */


extern irq_desc_t irq_desc[];
extern unsigned long irq_affinity[];
unsigned long saved_affinity[NR_IRQS];

/*
 * Routine to save the old irq affinities and change affinities of all irqs to
 * the dumping cpu.
 */

static void set_irq_affinity(void)
{
    int i;
    int cpu = smp_processor_id();

    memcpy(saved_affinity, irq_affinity, NR_IRQS * sizeof(unsigned long));
    for (i = 0; i < NR_IRQS; i++) {
        if (irq_desc[i].handler == NULL)
            continue;
        irq_affinity[i] = 1UL << cpu;
        if (irq_desc[i].handler->set_affinity != NULL)
            irq_desc[i].handler->set_affinity(i, irq_affinity[i]);
    }
}

/*
 * Restore old irq affinities.
 */

static void reset_irq_affinity(void)
{
    int i;

    memcpy(irq_affinity, saved_affinity, NR_IRQS * sizeof(unsigned long));
    for (i = 0; i < NR_IRQS; i++) {
        if (irq_desc[i].handler == NULL)
            continue;
        if (irq_desc[i].handler->set_affinity != NULL)
            irq_desc[i].handler->set_affinity(i, saved_affinity[i]);
    }
}

/*
 * END of IRQ affinity code, based on LKCD code.
 * -----------------------------------------------------------------
 */

#endif

/*
 * FIXME: This function should really be written in assembly. Actually
 * requirement is that it does not touch stack, because %esp will be
 * wrong during resume before restore_processor_context(). Check
 * assembly if you modify this.
 *
 * SMP support:
 * All SMP processors enter this routine during suspend. The one through
 * which the suspend is initiated (which, for simplicity, is always CPU 0)
 * sends the others here using an IPI during do_swsusp2_suspend_1. They
 * remain here until after the atomic copy of the kernel is made, to ensure
 * that they don't mess with memory in the meantime (even just idling will
 * do that). Once the atomic copy is made, they are free to carry on idling.
 * Note that we must let them go, because if we're using compression, the
 * vfree calls in the compressors will result in IPIs being called and hanging
 * because the CPUs are still here.
 *
 * At resume time, we do a similar thing. CPU 0 sends the others in here using
 * an IPI. It then copies the original kernel back, restores its own processor
 * context and flushes local tlbs before freeing the others to do the same.
 * They can then go back to idling while CPU 0 reloads pageset 2, cleans up
 * and unfreezes the processes.
 *
 * (Remember that freezing and thawing processes also uses IPIs, as may
 * decompressing the data. Again, therefore, we cannot leave the other processors
 * in here).
 *
 * At the moment, we do nothing about APICs, even though the code is there.
 */

void do_swsusp_lowlevel(int resume)
{
    if (!resume) {
#ifdef CONFIG_SMP
        if (smp_processor_id() != cpu_logical_map(0)) {
            unsigned long flags;
            char * my_saved_context = (char *) &saved_contexts[smp_processor_id()];
            /*
             *Save context and go back to idling.
             * Note that we cannot leave the processor
             * here. It must be able to receive IPIs if
             * the LZF compression driver (eg) does a
             * vfree after compressing the kernel etc
             */

            mb();
            barrier();
            spin_lock_irqsave(&saved_context_lock, flags);
            printnolog(SUSPEND_FREEZER, SUSPEND_MEDIUM, 0,
                "Processor %d saving context...", smp_processor_id());
            PRINTPREEMPTCOUNT("Before save_processor_context.");
            save_processor_context();
            for (loop = sizeof(struct saved_context); loop--; loop)
                *(my_saved_context + loop - 1) = *(((char *) &saved_context) + loop - 1);
            atomic_inc(&swsusp_cpu_counter);
            printnolog(SUSPEND_FREEZER, SUSPEND_MEDIUM, 0,
                "Processor %d context saved. CPU counter ->%d\n",
                smp_processor_id(), atomic_read(&swsusp_cpu_counter));
            spin_unlock_irqrestore(&saved_context_lock, flags);
            /* Now spin until the atomic copy of the kernel is made. */
            while (swsusp_state & FREEZE_SMP) {
                cpu_relax();
                smp_mb();
            }
            atomic_dec(&swsusp_cpu_counter);
            return;
        }

        /*
         * Save the irq affinities before we freeze the
         * other processors!
         */

        set_irq_affinity();
#endif //in our DV's STB, no SMP ,bob


        do_swsusp2_suspend_1();
        PRINTPREEMPTCOUNT("Before save_processor_context.");
        save_processor_context();    /* We need to capture registers and memory at "same time" */
        PRINTPREEMPTCOUNT("After save_processor_context.");
        do_swsusp2_suspend_2();        /* If everything goes okay, this function does not return */
        return;
    }

    /* We want to run from swapper_pg_dir, since swapper_pg_dir is stored in constant
     * place in memory
     */


        __asm__( "movl %%ecx,%%cr3\n" ::"c"(__pa(swapper_pg_dir)));

/*
 * Final function for resuming: after copying the pages to their original
 * position, it restores the register state.
 *
 * What about page tables? Writing data pages may toggle
 * accessed/dirty bits in our page tables. That should be no problems
 * with 4MB page tables. That's why we require have_pse.
 *
 * This loops destroys stack from under itself, so it better should
 * not use any stack space, itself. When this function is entered at
 * resume time, we move stack to _old_ place. This is means that this
 * function must use no stack and no local variables in registers,
 * until calling restore_processor_context();
 *
 * Critical section here: noone should touch saved memory after
 * do_swsusp2_resume_1; copying works, because nr_copy_pages,
 * pagedir_resume, loop and loop2 are nosavedata.
 */


#ifdef CONFIG_PREEMPT
    /*
     * Preempt disabled in kernel we're about to restore.
     * Make sure we match state now.
     */

    preempt_disable();
    PRINTPREEMPTCOUNT("Prior to copying old kernel back.");
#endif

#ifdef CONFIG_SMP
    if (smp_processor_id() != cpu_logical_map(0)) {
        unsigned long flags;
        char * my_saved_context = (char *) &saved_contexts[smp_processor_id()];
        /* Save context and hold other processors here */
        atomic_inc(&swsusp_cpu_counter);
        printnolog(SUSPEND_FREEZER, SUSPEND_MEDIUM, 0,
            "Processor %d waiting for restoration of old kernel. CPU counter -> %d.\n",
            smp_processor_id(), atomic_read(&swsusp_cpu_counter));
        smp_mb();
        while (swsusp_state & FREEZE_SMP) {
            cpu_relax();
            smp_mb();
        }
        spin_lock_irqsave(&saved_context_lock, flags);
        for (loop = sizeof(struct saved_context); loop--; loop)
            *(((char *) &saved_context) + loop - 1) = *(my_saved_context + loop - 1);
        restore_processor_context();
        do_flush_tlb_all_local();
        atomic_dec(&swsusp_cpu_counter);
        spin_unlock_irqrestore(&saved_context_lock, flags);
        return;
    }
#endif

    do_swsusp2_resume_1();

    state1 = swsusp_action;
    state2 = swsusp_debug_state;
    state3 = console_loglevel;

#ifdef CONFIG_SMP
    /* Send all IRQs to CPU 0. We will replace the saved affinities
     * with the suspend-time ones when we copy the original kernel
     * back in place
     */

    set_irq_affinity();
#else
    c_loops_per_jiffy_ref = cpu_data->loops_per_jiffy;
    cpu_khz_ref = cpu_khz;
#endif
    
    origrange = pagedir_resume.origranges.first;
    copyrange = pagedir_resume.destranges.first;
    origoffset = origrange->minimum;
    copyoffset = copyrange->minimum;
    origpage = (unsigned long *) (page_address(mem_map + origoffset));
    copypage = (unsigned long *) (page_address(mem_map + copyoffset));

    while (origrange) {
        for (loop=0; loop < (PAGE_SIZE / sizeof(unsigned long)); loop++)
            *(origpage + loop) = *(copypage + loop);
        
        if (origoffset < origrange->maximum) {
            origoffset++;
            origpage += (PAGE_SIZE / sizeof(unsigned long));
        } else {
            origrange = origrange->next;
            if (origrange) {
                origoffset = origrange->minimum;
                origpage = (unsigned long *) (page_address(mem_map + origoffset));
            }
        }

        if (copyoffset < copyrange->maximum) {
            copyoffset++;
            copypage += (PAGE_SIZE / sizeof(unsigned long));
        } else {
            copyrange = copyrange->next;
            if (copyrange) {
                copyoffset = copyrange->minimum;
                copypage = (unsigned long *) (page_address(mem_map + copyoffset));
            }
        }
    }
    
    restore_processor_context();
#ifdef CONFIG_SMP
    do_flush_tlb_all_local();
#else
    __flush_tlb_all();
#endif
    
    /* Get other CPUs to restore their contexts and flush their tlbs. */
    swsusp_state &= ~FREEZE_SMP;
    
    while (atomic_read(&swsusp_cpu_counter)) {
        cpu_relax();
        smp_mb();
    }

/* Ahah, we now run with our old stack, and with registers copied from
   suspend time */


#ifdef CONFIG_SMP
    /* put the irq affinity tables back */
    reset_irq_affinity();
#else
    cpu_data->loops_per_jiffy = c_loops_per_jiffy_ref;
    loops_per_jiffy = c_loops_per_jiffy_ref;
    cpu_khz = cpu_khz_ref;
#endif
    swsusp_action = state1;
    swsusp_debug_state = state2;
    console_loglevel = state3;

    do_swsusp2_resume_2();
}

/*
 * Function to put other smp processors in do_swsusp_lowlevel
 * during suspend or resume. They get their CPU data saved and
 * restored there
 */


void smp_swsusp_lowlevel(void * info)
{
    unsigned long irq_lock_flags;
    spinlock_t irq_lock = SPIN_LOCK_UNLOCKED;

    smp_mb();
    barrier();
    spin_lock_irqsave(&irq_lock, irq_lock_flags);
    kernel_fpu_begin();
    do_swsusp_lowlevel(now_resuming);
    barrier();
    smp_mb();
    kernel_fpu_end();
    spin_unlock_irqrestore(&irq_lock, irq_lock_flags);
}
#endif

阅读(1135) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~