对于RT ,要中断线程化
[中断线程化理论部分]:
二、中断线程化
中断线程化是实现Linux实时性的一个重要步骤,在Linux标准内核中,中断是最高优先级的执行单元,不管内核当时处理什么,只要有中断事件, 系统将立即响应该事件并执行相应的中断处理代码,除非当时中断关闭(即使用local_irq_disable失效了IRQ)。因此,如果系统有严重的网络或I/O负载, 中断将非常频繁,实时任务将很难有机会运行,也就是说,毫无实时性可言。中断线程化之后,中断将作为内核线程运行而且赋予不同的实时优先级, 实时任务可以有比中断线程更高的优先级,这样,实时任务就可以作为最高优先级的执行单元来运行,即使在严重负载下仍有实时性保证。
中断线程化的另一个重要原因是spinlock被mutex取代。中断处理代码中大量地使用了spinlock,当spinlock被mutex取代之后, 中断处理代码就有可能因为得不到锁而需要被挂到等待队列上,但是只有可调度的进程才可以这么做,如果中断处理代码仍然使用原来的spinlock, 则spinlock取代mutex的努力将大打折扣,因此为了满足这一要求,中断必须被线程化,包括IRQ和softirq。
【make menuconfig 】后, vi .config 会看到:
# # Kernel Features #
为了能并入主流内核,Ingo Molnar的实时补丁也采用了非常灵活的策略,它支持四种抢占模式:
1.No Forced Preemption (Server),这种模式等同于没有使能抢占选项的标准内核,主要适用于科学计算等服务器环境。
2.Voluntary Kernel Preemption (Desktop),这种模式使能了自愿抢占,但仍然失效抢占内核选项,它通过增加抢占点缩减了抢占延迟,因此适用于一些需要较好的响应性的环境,如桌面环境,当然这种好的响应性是以牺牲一些吞吐率为代价的。
3.Preemptible Kernel (Low-Latency Desktop),这种模式既包含了自愿抢占,又使能了可抢占内核选项,因此有很好的响应延迟,实际上在一定程度上已经达到了软实时性。它主要适用于桌面和一些嵌入式系统,但是吞吐率比模式2更低。
4.Complete Preemption (Real-Time),这种模式使能了所有实时功能,因此完全能够满足软实时需求,它适用于延迟要求为100微秒或稍低的实时系统。
实现实时是以牺牲系统的吞吐率为代价的,因此实时性越好,系统吞吐率就越低。
[4个选择] CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT_DESKTOP is not set # CONFIG_PREEMPT_RT is not set
【线程化与否】 CONFIG_PREEMPT_SOFTIRQS=y CONFIG_PREEMPT_HARDIRQS=y
# CONFIG_PREEMPT_BKL is not set # CONFIG_CLASSIC_RCU is not set CONFIG_PREEMPT_RCU=y CONFIG_RCU_TRACE=y # CONFIG_NO_IDLE_HZ is not set CONFIG_HZ=200
下面是中断线程化的代码,
kernel/irq/manage.c :
#ifdef CONFIG_PREEMPT_HARDIRQS int hardirq_preemption = 1;
EXPORT_SYMBOL(hardirq_preemption);
/* * Real-Time Preemption depends on hardirq threading: */ #ifndef CONFIG_PREEMPT_RT static int __init hardirq_preempt_setup (char *str) { if (!strncmp(str, "off", 3)) hardirq_preemption = 0; else get_option(&str, &hardirq_preemption); if (!hardirq_preemption) printk("turning off hardirq preemption!\n");
return 1; }
__setup("hardirq-preempt=", hardirq_preempt_setup);
#endif
/* * threaded simple handler */ static void thread_simple_irq(irq_desc_t *desc) { struct irqaction *action = desc->action; unsigned int irq = desc - irq_desc; irqreturn_t action_ret;
if (action && !desc->depth) { spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, NULL, action); local_irq_enable(); cond_resched_all(); spin_lock_irq(&desc->lock); if (!noirqdebug) note_interrupt(irq, desc, action_ret, NULL); } desc->status &= ~IRQ_INPROGRESS; }
/* * threaded level type irq handler */ static void thread_level_irq(irq_desc_t *desc) { unsigned int irq = desc - irq_desc;
thread_simple_irq(desc); if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) desc->chip->unmask(irq); }
/* * threaded fasteoi type irq handler */ static void thread_fasteoi_irq(irq_desc_t *desc) { unsigned int irq = desc - irq_desc;
thread_simple_irq(desc); if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) desc->chip->unmask(irq); }
/* * threaded edge type IRQ handler */ static void thread_edge_irq(irq_desc_t *desc) { unsigned int irq = desc - irq_desc;
do { struct irqaction *action = desc->action; irqreturn_t action_ret;
if (unlikely(!action)) { desc->status &= ~IRQ_INPROGRESS; desc->chip->mask(irq); return; }
/* * When another irq arrived while we were handling * one, we could have masked the irq. * Renable it, if it was not disabled in meantime. */ if (unlikely(((desc->status & (IRQ_PENDING | IRQ_MASKED)) == (IRQ_PENDING | IRQ_MASKED)) && !desc->depth)) desc->chip->unmask(irq);
desc->status &= ~IRQ_PENDING; spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, NULL, action); local_irq_enable(); cond_resched_all(); spin_lock_irq(&desc->lock); if (!noirqdebug) note_interrupt(irq, desc, action_ret, NULL); } while ((desc->status & IRQ_PENDING) && !desc->depth);
desc->status &= ~IRQ_INPROGRESS; }
/* * threaded edge type IRQ handler */ static void thread_do_irq(irq_desc_t *desc) { unsigned int irq = desc - irq_desc;
do { struct irqaction *action = desc->action; irqreturn_t action_ret;
if (unlikely(!action)) { desc->status &= ~IRQ_INPROGRESS; desc->chip->disable(irq); return; }
desc->status &= ~IRQ_PENDING; spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, NULL, action); local_irq_enable(); cond_resched_all(); spin_lock_irq(&desc->lock); if (!noirqdebug) note_interrupt(irq, desc, action_ret, NULL); } while ((desc->status & IRQ_PENDING) && !desc->depth);
desc->status &= ~IRQ_INPROGRESS; desc->chip->end(irq); }
static void do_hardirq(struct irq_desc *desc) { spin_lock_irq(&desc->lock);
if (!(desc->status & IRQ_INPROGRESS)) goto out;
if (desc->handle_irq == handle_simple_irq) thread_simple_irq(desc); else if (desc->handle_irq == handle_level_irq) thread_level_irq(desc); else if (desc->handle_irq == handle_fasteoi_irq) thread_fasteoi_irq(desc); else if (desc->handle_irq == handle_edge_irq) thread_edge_irq(desc); else thread_do_irq(desc); out: spin_unlock_irq(&desc->lock);
if (waitqueue_active(&desc->wait_for_handler)) wake_up(&desc->wait_for_handler); }
extern asmlinkage void __do_softirq(void);
static int curr_irq_prio = 49;
static int do_irqd(void * __desc) { struct sched_param param = { 0, }; struct irq_desc *desc = __desc; #ifdef CONFIG_SMP int irq = desc - irq_desc; cpumask_t mask;
mask = cpumask_of_cpu(any_online_cpu(irq_desc[irq].affinity)); set_cpus_allowed(current, mask); #endif current->flags |= PF_NOFREEZE | PF_HARDIRQ;
/* * Set irq thread priority to SCHED_FIFO/50: */ param.sched_priority = MAX_USER_RT_PRIO/2;
sys_sched_setscheduler(current->pid, SCHED_FIFO, ¶m);
while (!kthread_should_stop()) { set_current_state(TASK_INTERRUPTIBLE); do_hardirq(desc); cond_resched_all(); local_irq_disable(); __do_softirq(); local_irq_enable(); #ifdef CONFIG_SMP /* * Did IRQ affinities change? */ if (!cpus_equal(current->cpus_allowed, irq_desc[irq].affinity)) set_cpus_allowed(current, irq_desc[irq].affinity); #endif schedule(); } __set_current_state(TASK_RUNNING); return 0; }
static int ok_to_create_irq_threads;
//中断线程化 static int start_irq_thread(int irq, struct irq_desc *desc) { if (desc->thread || !ok_to_create_irq_threads) return 0;
desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq); if (!desc->thread) { printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq); return -ENOMEM; }
/* * An interrupt may have come in before the thread pointer was * stored in desc->thread; make sure the thread gets woken up in * such a case: */ smp_mb(); wake_up_process(desc->thread);
return 0; }
void __init init_hardirqs(void) { int i; ok_to_create_irq_threads = 1;
for (i = 0; i < NR_IRQS; i++) { irq_desc_t *desc = irq_desc + i;
if (desc->action && !(desc->status & IRQ_NODELAY)) start_irq_thread(i, desc); } }
#else
static int start_irq_thread(int irq, struct irq_desc *desc) { return 0; }
#endif
[ 本帖最后由 bobzhang 于 2007-8-27 15:30 编辑 ]
|