Chinaunix首页 | 论坛 | 博客
  • 博客访问: 94728
  • 博文数量: 19
  • 博客积分: 1471
  • 博客等级: 上尉
  • 技术积分: 272
  • 用 户 组: 普通用户
  • 注册时间: 2010-07-09 19:48
文章分类

全部博文(19)

文章存档

2011年(2)

2010年(17)

分类: LINUX

2010-07-09 23:53:03

一 软中断执行体函数:
asmlinkage void __do_softirq(void)
{
    struct softirq_action *h;
    __u32 pending;
    int max_restart = MAX_SOFTIRQ_RESTART;
    int cpu;

    pending = local_softirq_pending();
    account_system_vtime(current);
   /*增加软中断计数,表示关闭软中断*/
    __local_bh_disable((unsigned long)__builtin_return_address(0));
    lockdep_softirq_enter();

    cpu = smp_processor_id();
restart:
    /* Reset the pending bitmask before enabling irqs */
    set_softirq_pending(0);
 
    local_irq_enable();

    h = softirq_vec;

    do {
        if (pending & 1) {
            int prev_count = preempt_count();
            kstat_incr_softirqs_this_cpu(h - softirq_vec);

            trace_softirq_entry(h, softirq_vec);
            h->action(h);
            trace_softirq_exit(h, softirq_vec);
            if (unlikely(prev_count != preempt_count())) {
                printk(KERN_ERR "huh, entered softirq %td %s %p"
                       "with preempt_count %08x,"
                       " exited with %08x?\n", h - softirq_vec,
                       softirq_to_name[h - softirq_vec],
                       h->action, prev_count, preempt_count());
                preempt_count() = prev_count;
            }

            rcu_bh_qs(cpu);
        }
        h++;
        pending >>= 1;
    } while (pending);
 /*这里为何关本地中断呢?可能与进程唤醒有关吧*/
    local_irq_disable();

    pending = local_softirq_pending();
    /*最大循环10次*/
    if (pending && --max_restart)
        goto restart;
   /*超过10次还有软中断请求的话,就通过softirq线程来完成*/
    if (pending)
        wakeup_softirqd();

    lockdep_softirq_exit();

    account_system_vtime(current);
    /*softirq 开启*
    _local_bh_enable();
}
二 /*do_softirq 一般是体系结构不一样的函数,下面这个是通用情况下的操作,同样是确保执行期间不被抢占*/
#ifndef __ARCH_HAS_DO_SOFTIRQ
asmlinkage void do_softirq(void)
{
    __u32 pending;
    unsigned long flags;
 /*防止嵌套,起到同步的作用*/
    if (in_interrupt())
        return;

    local_irq_save(flags);

    pending = local_softirq_pending();

    if (pending)
        __do_softirq();
   /*可以看到此处本地中断被开启了*/
    local_irq_restore(flags);
}

#endif

三 /* 软中断进程代码 */
static int run_ksoftirqd(void * __bind_cpu)
{
    set_current_state(TASK_INTERRUPTIBLE);

    while (!kthread_should_stop()) {
        /*软中断优先级比普通进程高,所以要关抢占*/
        preempt_disable();
        if (!local_softirq_pending()) {
            preempt_enable_no_resched();
            schedule();
            preempt_disable();
        }

        __set_current_state(TASK_RUNNING);

        while (local_softirq_pending()) {
            /* Preempt disable stops cpu going offline.
               If already offline, we'll be on wrong CPU:
               don't process */
            if (cpu_is_offline((long)__bind_cpu))
                goto wait_to_die;
            do_softirq();
            /*执行完毕后,开抢占*/
            preempt_enable_no_resched();
            cond_resched();
            preempt_disable();
            rcu_note_context_switch((long)__bind_cpu);
        }
     /*执行完毕后,开抢占*/
        preempt_enable();
        set_current_state(TASK_INTERRUPTIBLE);
    }
    __set_current_state(TASK_RUNNING);
    return 0;

wait_to_die:
    preempt_enable();
    /* Wait for kthread_stop */
    set_current_state(TASK_INTERRUPTIBLE);
    while (!kthread_should_stop()) {
        schedule();
        set_current_state(TASK_INTERRUPTIBLE);
    }
    __set_current_state(TASK_RUNNING);
    return 0;
}

smp 全局变量介绍在线,可能,到位,活跃

#define cpu_online(cpu)        cpumask_test_cpu((cpu), cpu_online_mask)
#define cpu_possible(cpu)    cpumask_test_cpu((cpu), cpu_possible_mask)
#define cpu_present(cpu)    cpumask_test_cpu((cpu), cpu_present_mask)
#define cpu_active(cpu)        cpumask_test_cpu((cpu), cpu_active_mask)

#ifdef CONFIG_INIT_ALL_POSSIBLE
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
    = CPU_BITS_ALL;
#else
static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
#endif
const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
EXPORT_SYMBOL(cpu_possible_mask);

static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
EXPORT_SYMBOL(cpu_online_mask);

static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
EXPORT_SYMBOL(cpu_present_mask);

static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
EXPORT_SYMBOL(cpu_active_mask);

do_irq调用的的函数
void irq_enter(void)
{
    int cpu = smp_processor_id();

    rcu_irq_enter();
    if (idle_cpu(cpu) && !in_interrupt()) {
        __irq_enter(); /*增加抢占计数*/
        tick_check_idle(cpu);
    } else
        __irq_enter();
}
#define __irq_enter()                    \
    do {                        \
        account_system_vtime(current);        \
        add_preempt_count(HARDIRQ_OFFSET);    \
        trace_hardirq_enter();            \
    } while (0)

void irq_exit(void)
{
    account_system_vtime(current);
    trace_hardirq_exit();
    sub_preempt_count(IRQ_EXIT_OFFSET);
    if (!in_interrupt() && local_softirq_pending()) /*抢占条件下,不会调用软中断*/
        invoke_softirq();

    rcu_irq_exit();
#ifdef CONFIG_NO_HZ
    /* Make sure that timer wheel updates are propagated */
    if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
        tick_nohz_stop_sched_tick(0);
#endif
    preempt_enable_no_resched();
}
#ifdef CONFIG_PREEMPT
# define preemptible()    (preempt_count() == 0 && !irqs_disabled())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1) /*抢占条件下,消除抢占计数*/
#else
# define preemptible()    0
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET  /*正常条件下,完全消除抢占计数*/
#endif

#ifdef CONFIG_PREEMPT
#define preempt_enable_no_resched() \
do { \
    barrier(); \
    dec_preempt_count(); \   /*抢占条件下,会减掉计数*/
} while (0)
#else
#define preempt_enable_no_resched()    do { } while (0)
#endif

/*
什么时候创建ksoftirqd内核线程
*/
static int __cpuinit cpu_callback(struct notifier_block *nfb,
                  unsigned long action,
                  void *hcpu)
{
    int hotcpu = (unsigned long)hcpu;
    struct task_struct *p;

    switch (action) {
/*PREPARE cpu 的时候创建线程*/
    case CPU_UP_PREPARE:
    case CPU_UP_PREPARE_FROZEN:
        p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
        if (IS_ERR(p)) {
            printk("ksoftirqd for %i failed\n", hotcpu);
            return notifier_from_errno(PTR_ERR(p));
        }
        kthread_bind(p, hotcpu);
          per_cpu(ksoftirqd, hotcpu) = p;
         break;
    case CPU_ONLINE:
    case CPU_ONLINE_FROZEN:
        wake_up_process(per_cpu(ksoftirqd, hotcpu));
        break;
#ifdef CONFIG_HOTPLUG_CPU
    case CPU_UP_CANCELED:
    case CPU_UP_CANCELED_FROZEN:
        if (!per_cpu(ksoftirqd, hotcpu))
            break;
        /* Unbind so it can run.  Fall thru. */
        kthread_bind(per_cpu(ksoftirqd, hotcpu),
                 cpumask_any(cpu_online_mask));
    case CPU_DEAD:
    case CPU_DEAD_FROZEN: {
        struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };

        p = per_cpu(ksoftirqd, hotcpu);
        per_cpu(ksoftirqd, hotcpu) = NULL;
        sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
        kthread_stop(p);
        takeover_tasklets(hotcpu);
        break;
    }
#endif /* CONFIG_HOTPLUG_CPU */
     }
    return NOTIFY_OK;
}


static struct notifier_block __cpuinitdata cpu_nfb = {
    .notifier_call = cpu_callback
};

static __init int spawn_ksoftirqd(void)
{
    void *cpu = (void *)(long)smp_processor_id();
/*设置调用callback,首先是PREPAREcallback创建线程*/
    int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);

    BUG_ON(err != NOTIFY_OK);
/*产生ONLINE事件,唤醒软中断内核线程*/
    cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
/*注册notifier,这样每个cpu, PREPARE 时候都会创建自己的内核线程*/
    register_cpu_notifier(&cpu_nfb);
    return 0;
}
early_initcall(spawn_ksoftirqd);  /*主核初始化的时候,产生自己的软中断进程*/

下面介绍一下cpu notifier机制
int __ref register_cpu_notifier(struct notifier_block *nb)
{
    int ret;
    cpu_maps_update_begin(); /*加锁*/
    ret = raw_notifier_chain_register(&cpu_chain, nb);
    cpu_maps_update_done();
    return ret;
}

void cpu_maps_update_begin(void)
{
    mutex_lock(&cpu_add_remove_lock);
}

static RAW_NOTIFIER_HEAD(cpu_chain);

/*向链头注册block的函数*/
static int notifier_chain_register(struct notifier_block **nl,
        struct notifier_block *n)
{
    while ((*nl) != NULL) {
/*优先级大的在前*/
        if (n->priority > (*nl)->priority)
            break;
        nl = &((*nl)->next);
    }
    n->next = *nl;
    rcu_assign_pointer(*nl, n);
    return 0;
}

/*cpu的通知函数*/
static int cpu_notify(unsigned long val, void *v)
{
    return __cpu_notify(val, v, -1, NULL);
}

static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
            int *nr_calls)
{
    int ret;
    /*利用rcu原理,遍历raw notifier chain的时候不需要枷锁,这里不存在延后释放内存的情况,所以不需要加rcu_reak_lock()*/
    ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
                    nr_calls);

    return notifier_to_errno(ret);
}

cpu_noifty主要是cpu_up(),cpu_down()中调用。

#ifdef CONFIG_SMP
/*
 * Call a function on all processors
 */
int on_each_cpu(void (*func) (void *info), void *info, int wait)
{
    int ret = 0;
/*关中断,关抢占条件下执行函数*/
    preempt_disable();
    ret = smp_call_function(func, info, wait);
    local_irq_disable();
    func(info);
    local_irq_enable();
    preempt_enable();
    return ret;
}
EXPORT_SYMBOL(on_each_cpu);
#endif

int smp_call_function(void (*func)(void *), void *info, int wait)
{
    preempt_disable();
    smp_call_function_many(cpu_online_mask, func, info, wait);
    preempt_enable();

    return 0;
}


/**
 * smp_call_function_many(): Run a function on a set of other CPUs.
 * @mask: The set of cpus to run on (only runs on online subset).
 * @func: The function to run. This must be fast and non-blocking.
 * @info: An arbitrary pointer to pass to the function.
 * @wait: If true, wait (atomically) until function has completed
 *        on other CPUs.
 *
 * If @wait is true, then returns once @func has returned.
 *
 * You must not call this function with disabled interrupts or from a
 * hardware interrupt handler or from a bottom half handler. Preemption
 * must be disabled when calling this function.
 */
void smp_call_function_many(const struct cpumask *mask,
                void (*func)(void *), void *info, bool wait)
{
    struct call_function_data *data;
    unsigned long flags;
    int cpu, next_cpu, this_cpu = smp_processor_id();

    /*
     * Can deadlock when called with interrupts disabled.
     * We allow cpu's that are not yet online though, as no one else can
     * send smp call function interrupt to this cpu and as such deadlocks
     * can't happen.
     */
    WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
             && !oops_in_progress);

    /* So, what's a CPU they want? Ignoring this one. */
    cpu = cpumask_first_and(mask, cpu_online_mask);
    if (cpu == this_cpu)
        cpu = cpumask_next_and(cpu, mask, cpu_online_mask);

    /* No online cpus?  We're done. */
    if (cpu >= nr_cpu_ids)
        return;

    /* Do we have another CPU which isn't us? */
    next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
    if (next_cpu == this_cpu)
        next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);

    /* Fastpath: do that cpu by itself. */
    if (next_cpu >= nr_cpu_ids) {
        smp_call_function_single(cpu, func, info, wait);
        return;
    }

    data = &__get_cpu_var(cfd_data);
    csd_lock(&data->csd);

    data->csd.func = func;
    data->csd.info = info;
    cpumask_and(data->cpumask, mask, cpu_online_mask);
    cpumask_clear_cpu(this_cpu, data->cpumask);
    atomic_set(&data->refs, cpumask_weight(data->cpumask));

    raw_spin_lock_irqsave(&call_function.lock, flags);
    /*
     * Place entry at the _HEAD_ of the list, so that any cpu still
     * observing the entry in generic_smp_call_function_interrupt()
     * will not miss any other list entries:
     */
    list_add_rcu(&data->csd.list, &call_function.queue);
    raw_spin_unlock_irqrestore(&call_function.lock, flags);

    /*
     * Make the list addition visible before sending the ipi.
     * (IPIs must obey or appear to obey normal Linux cache
     * coherency rules -- see comment in generic_exec_single).
     */
    smp_mb();

    /* Send a message to all CPUs in the map */
    arch_send_call_function_ipi_mask(data->cpumask);

    /* Optionally wait for the CPUs to complete */
    if (wait)
        csd_lock_wait(&data->csd);
}

阅读(3326) | 评论(0) | 转发(0) |
0

上一篇:Linux 驱动

下一篇:Linux SMP

给主人留下些什么吧!~~