自己用记事本分析的内核的work机制,给大家分享下:
struct cpu_workqueue_struct {
spinlock_t lock;
struct list_head worklist;//工作者节点链表
wait_queue_head_t more_work; //没有工作要处理时,该工作者线程睡眠的等待队列上
struct work_struct *current_work;//当前正在执行的work
struct workqueue_struct *wq;//指向的workqueue
struct task_struct *thread;//工作者线程的指针
} ____cacheline_aligned;
/*
* The externally visible workqueue abstraction is an array of
* per-CPU workqueues:
*/
struct workqueue_struct {
struct cpu_workqueue_struct *cpu_wq;//指向上面的struct cpu_workqueue_struct结构体
struct list_head list;//加入到全局的workqueue
const char *name; //工作队列的名字
int singlethread;//标识创建的工人线程的数量
int freezeable;/* 表示进程是否进入冻结状态 */
int rt;//所在进程的调度策略
#ifdef CONFIG_LOCKDEP
struct lockdep_map lockdep_map;
#endif
}
dev->wq = create_singlethread_workqueue("k_otg");
#define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0, 0)
#define __create_workqueue(name, singlethread, freezeable, rt) __create_workqueue_key((name), (singlethread), (freezeable), (rt), NULL, NULL)
struct workqueue_struct *__create_workqueue_key(const char *name,//工作队列名字
int singlethread,//是否为单线程
int freezeable,//是否可睡眠
int rt,//进程的调度策略
struct lock_class_key *key,
const char *lock_name)
{
struct workqueue_struct *wq;
struct cpu_workqueue_struct *cwq;
int err = 0, cpu;
wq = kzalloc(sizeof(*wq), GFP_KERNEL);//分配一个workqueue
if (!wq)
return NULL;
wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);//分配per cpu workqueue
if (!wq->cpu_wq) {
kfree(wq);
return NULL;
}
wq->name = name;
lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
wq->singlethread = singlethread;
wq->freezeable = freezeable;
wq->rt = rt;
INIT_LIST_HEAD(&wq->list);//加入到全局的workqueue
if (singlethread) {//为第一个cpu分配
cwq = init_cpu_workqueue(wq, singlethread_cpu);
err = create_workqueue_thread(cwq, singlethread_cpu);
start_workqueue_thread(cwq, -1);
} else {//为每个cpu分配
cpu_maps_update_begin();
/*
* We must place this wq on list even if the code below fails.
* cpu_down(cpu) can remove cpu from cpu_populated_map before
* destroy_workqueue() takes the lock, in that case we leak
* cwq[cpu]->thread.
*/
spin_lock(&workqueue_lock);
list_add(&wq->list, &workqueues);//为每个cpu时,加入到全局的workqueue
spin_unlock(&workqueue_lock);
/*
* We must initialize cwqs for each possible cpu even if we
* are going to call destroy_workqueue() finally. Otherwise
* cpu_up() can hit the uninitialized cwq once we drop the
* lock.
*/
for_each_possible_cpu(cpu) {
cwq = init_cpu_workqueue(wq, cpu);
if (err || !cpu_online(cpu))
continue;
err = create_workqueue_thread(cwq, cpu);
start_workqueue_thread(cwq, cpu);
}
cpu_maps_update_done();
}
if (err) {
destroy_workqueue(wq);
wq = NULL;
}
return wq;
}
1、init_cpu_workqueue(wq, singlethread_cpu);
static struct cpu_workqueue_struct *init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
{
struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);//由wq获得cwq
/*初始化cwq*/
cwq->wq = wq;
spin_lock_init(&cwq->lock);
INIT_LIST_HEAD(&cwq->worklist);
init_waitqueue_head(&cwq->more_work);//初始化等待队列
return cwq;
}
2、err = create_workqueue_thread(cwq, singlethread_cpu);
static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
struct workqueue_struct *wq = cwq->wq;
const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
struct task_struct *p;
p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);//创建一个线程
if (IS_ERR(p))
return PTR_ERR(p);
if (cwq->wq->rt)
sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
cwq->thread = p;//把上面创建的线程挂到cwq上
trace_workqueue_creation(cwq->thread, cpu);
return 0;
}
3、start_workqueue_thread(cwq, -1);
static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
{
struct task_struct *p = cwq->thread;
if (p != NULL) {
if (cpu >= 0)
kthread_bind(p, cpu);
wake_up_process(p);//唤醒该进程
}
}
/*唤醒后执行该线程的处理函数*/
static int worker_thread(void *__cwq)
{
struct cpu_workqueue_struct *cwq = __cwq;
DEFINE_WAIT(wait);
if (cwq->wq->freezeable)//该进程是否可以冻结
set_freezable();
for (;;) {
prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);//设置进程的状态信息
if (!freezing(current) &&
!kthread_should_stop() &&
list_empty(&cwq->worklist))//如果worklist链表为空,则调度,睡眠
schedule();
finish_wait(&cwq->more_work, &wait);//唤醒后把该进程状态置为running,并从等待队列中删除
try_to_freeze();
if (kthread_should_stop())
break;
run_workqueue(cwq);//执行遍历cwq->worklist里面的work
}
return 0;
}
static void run_workqueue(struct cpu_workqueue_struct *cwq)
{
spin_lock_irq(&cwq->lock);
while (!list_empty(&cwq->worklist)) {//遍历该链表中的每一个work
struct work_struct *work = list_entry(cwq->worklist.next,
struct work_struct, entry);
work_func_t f = work->func;//取出work的执行函数
trace_workqueue_execution(cwq->thread, work);
debug_work_deactivate(work);
cwq->current_work = work;//当前正在执行的work
list_del_init(cwq->worklist.next);
spin_unlock_irq(&cwq->lock);
BUG_ON(get_wq_data(work) != cwq);
work_clear_pending(work);//清除pending位
lock_map_acquire(&cwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
f(work);//执行该work
lock_map_release(&lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);
spin_lock_irq(&cwq->lock);
cwq->current_work = NULL;
}
spin_unlock_irq(&cwq->lock);
}
//下面分析下初始化work
#define INIT_WORK(_work, _func) \
do { \
__INIT_WORK((_work), (_func), 0); \
} while (0)
#define __INIT_WORK(_work, _func, _onstack) \
do { \
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT();\
INIT_LIST_HEAD(&(_work)->entry); \
PREPARE_WORK((_work), (_func)); \
} while (0)
#define PREPARE_WORK(_work, _func) \
do { \
(_work)->func = (_func); \
} while (0)
struct work_struct {
atomic_long_t data;
#define WORK_STRUCT_PENDING 0 /* T if work item pending execution */
#define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */
#define WORK_STRUCT_FLAG_MASK (3UL)
#define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK)
struct list_head entry;
work_func_t func;
};
/*初始化work工作节点*/
INIT_WORK(&dev->otg_resume_work, msm_otg_resume_w);
/*调度work节点*/
queue_work(dev->wq, &dev->otg_resume_work);
int queue_work(struct workqueue_struct *wq, struct work_struct *work)
{
int ret;
ret = queue_work_on(get_cpu(), wq, work);
put_cpu();
return ret;
}
int queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
{
int ret = 0;
if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {//判断并置位,如果该位置1,表示该work还没有处理,则不允许再提交该work
BUG_ON(!list_empty(&work->entry));
__queue_work(wq_per_cpu(wq, cpu), work);
ret = 1;
}
return ret;
}
static void __queue_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work)
{
unsigned long flags;
debug_work_activate(work);
spin_lock_irqsave(&cwq->lock, flags);
insert_work(cwq, work, &cwq->worklist);//插入到cwq链表中
spin_unlock_irqrestore(&cwq->lock, flags);
}
static void insert_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work, struct list_head *head)
{
trace_workqueue_insertion(cwq->thread, work);
set_wq_data(work, cwq);
/*
* Ensure that we get the right work->data if we see the
* result of list_add() below, see try_to_grab_pending().
*/
smp_wmb();
list_add_tail(&work->entry, head);
wake_up(&cwq->more_work);//唤醒该队列上等待的进程
}
阅读(441) | 评论(0) | 转发(0) |