Chinaunix首页 | 论坛 | 博客
  • 博客访问: 191110
  • 博文数量: 76
  • 博客积分: 2510
  • 博客等级: 少校
  • 技术积分: 831
  • 用 户 组: 普通用户
  • 注册时间: 2007-12-31 00:52
文章分类

全部博文(76)

文章存档

2010年(58)

2009年(18)

我的朋友

分类:

2009-10-30 13:47:01



Reading notes: Kernel Preemption
1st Part: The magic of preempt_count

Hardirq.h (d:\eric\linux\linux-2.6.26\linux-2.6.26\include\linux)    4636    2008-7-14

/*
 * We put the hardirq and softirq counter into the preemption
 * counter. The bitmask has the following meaning:
 *
 * - bits 0-7 are the preemption count (max preemption depth: 256)
 * - bits 8-15 are the softirq count (max # of softirqs: 256)
 *
 * The hardirq count can be overridden per architecture, the default is:
 *
 * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
 * - ( bit 28 is the PREEMPT_ACTIVE flag. )
 *
 * PREEMPT_MASK: 0x000000ff
 * SOFTIRQ_MASK: 0x0000ff00
 * HARDIRQ_MASK: 0x0fff0000
 */
#define PREEMPT_BITS    8
#define SOFTIRQ_BITS    8

#ifndef HARDIRQ_BITS
#define HARDIRQ_BITS    12

#ifndef MAX_HARDIRQS_PER_CPU
#define MAX_HARDIRQS_PER_CPU NR_IRQS
#endif

/*
 * The hardirq mask has to be large enough to have space for potentially
 * all IRQ sources in the system nesting on a single CPU.
 */
#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
# error HARDIRQ_BITS is too low!
#endif
#endif

#define PREEMPT_SHIFT    0
#define SOFTIRQ_SHIFT    (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT    (SOFTIRQ_SHIFT + SOFTIRQ_BITS)

#define __IRQ_MASK(x)    ((1UL << (x))-1)

#define PREEMPT_MASK    (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define SOFTIRQ_MASK    (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define HARDIRQ_MASK    (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)

#define PREEMPT_OFFSET    (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET    (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET    (1UL << HARDIRQ_SHIFT)

#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
#error PREEMPT_ACTIVE is too low!
#endif

#define hardirq_count()    (preempt_count() & HARDIRQ_MASK)
#define softirq_count()    (preempt_count() & SOFTIRQ_MASK)
#define irq_count()    (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))

/*
 * Are we doing bottom half or hardware interrupt processing?
 * Are we in a softirq context? Interrupt context?
 */
#define in_irq()        (hardirq_count())     //return nonzero only if kernel is executing an interrupt hanlder
#define in_softirq()        (softirq_count())
#define in_interrupt()        (irq_count())  //return nonzero if kernel is in interrupt context, including executing an interrupt hanlder or a bottom-half handler; If return zero, then kernel is running in process context.

#if defined(CONFIG_PREEMPT)
# define PREEMPT_INATOMIC_BASE kernel_locked()
# define PREEMPT_CHECK_OFFSET 1
#else
# define PREEMPT_INATOMIC_BASE 0
# define PREEMPT_CHECK_OFFSET 0
#endi


2nd Part: How does kernel preemption happpen ?

Preempt.h (d:\eric\linux\linux-2.6.26\linux-2.6.26\include\linux)    2671    2008-7-14

#define preempt_enable() \
do { \
    preempt_enable_no_resched(); \
    barrier(); \
    preempt_check_resched(); \
} while (0)


#define preempt_check_resched() \
do { \
    if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
        preempt_schedule(); \
} while (0)


#ifdef CONFIG_PREEMPT

/*
 * this is the entry point to schedule() from in-kernel preemption
 * off of preempt_enable. Kernel preemptions off return from interrupt
 * occur there and call schedule directly.
 */
asmlinkage void __sched preempt_schedule(void)
{
    struct thread_info *ti = current_thread_info();

    /*
     * If there is a non-zero preempt_count or interrupts are disabled,
     * we do not want to preempt the current task. Just return..
     */
    if (likely(ti->preempt_count || irqs_disabled()))
        return;

    do {
        add_preempt_count(PREEMPT_ACTIVE);
        schedule();
        sub_preempt_count(PREEMPT_ACTIVE);

        /*
         * Check again in case we missed a preemption opportunity
         * between schedule and now.
         */
        barrier();
    } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
}
EXPORT_SYMBOL(preempt_schedule);

/*
 * this is the entry point to schedule() from kernel preemption
 * off of irq context.
 * Note, that this is called and return with irqs disabled. This will
 * protect us against recursive calling from irq.
 */
asmlinkage void __sched preempt_schedule_irq(void)
{
    struct thread_info *ti = current_thread_info();

    /* Catch callers which need to be fixed */
    BUG_ON(ti->preempt_count || !irqs_disabled());

    do {
        add_preempt_count(PREEMPT_ACTIVE);
        local_irq_enable();
        schedule();
        local_irq_disable();
        sub_preempt_count(PREEMPT_ACTIVE);

        /*
         * Check again in case we missed a preemption opportunity
         * between schedule and now.
         */
        barrier();
    } while (unlikely(test_thread_flag(TIF_NEED_RESCHED)));
}


From the above code, we can know following points:
1. Two kinds of kernel preemption function: preempt_schedule and preempt_schedule_irq.
Former one is for preempt_enable function, while the latter one is for irq context after returning from interrupt or exception.

2.In preempt_enable funciton, firstly kernel code will check if flag TIF_NEED_RESCHED is set. When it is set, then perform kernel preemption only when preempt_count is 0 and local irqs are enabled.


Then where does the flag TIF_NEED_RESCHED set ?? Need to verify.




阅读(854) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~