????Workqueque?????
??????????????workqueue_struct??????棬cpu_workqueue_struct???????檔?????init_cpu_workqueue?????ж?cpu_workqueue_struct??????г????????????create_workqueue_thread??????????????????е????????
????create_workqueue_thread?д????????μ???????
????p = kthread_create(worker_thread?? cwq?? fmt?? wq->name?? cpu);
?????????start_workqueue_thread????′?????????
struct workqueue_struct *__create_workqueue_key(const char *name??
int singlethread??
int freezeable??
int rt??
struct lock_class_key *key??
const char *lock_name)
{
struct workqueue_struct *wq;
struct cpu_workqueue_struct *cwq;
int err = 0?? cpu;wq = kzalloc(sizeof(*wq)?? GFP_KERNEL);
if (!wq)
return NULL;
wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
if (!wq->cpu_wq)
{
kfree(wq);
return NULL;
}
wq->name = name;
lockdep_init_map(&wq->lockdep_map?? lock_name?? key?? 0);
wq->singlethread = singlethread;
wq->freezeable = freezeable;
wq->rt = rt;
INIT_LIST_HEAD(&wq->list);
if (singlethread)
{
cwq = init_cpu_workqueue(wq?? singlethread_cpu);
err = create_workqueue_thread(cwq?? singlethread_cpu);
start_workqueue_thread(cwq?? -1);
}
else
{
cpu_maps_update_begin();
/*
* We must place this wq on list even if the code below fails.
* cpu_down(cpu) can remove cpu from cpu_populated_map before
* destroy_workqueue() takes the lock?? in that case we leak
* cwq[cpu]->thread.
*/
spin_lock(&workqueue_lock);
list_add(&wq->list?? &workqueues);
spin_unlock(&workqueue_lock);
/*
* We must initialize cwqs for each possible cpu even if we
* are going to call destroy_workqueue() finally. Otherwise
* cpu_up() can hit the uninitialized cwq once we drop the
* lock.
*/
for_each_possible_cpu(cpu)
{
cwq = init_cpu_workqueue(wq?? cpu);
if (err || !cpu_online(cpu))
continue;
err = create_workqueue_thread(cwq?? cpu);
start_workqueue_thread(cwq?? cpu);
}
cpu_maps_update_done();
}
if (err)
{
destroy_workqueue(wq);
wq = NULL;
}
return wq;
}
EXPORT_SYMBOL_GPL(__create_workqueue_key);
????????????????????
????Shedule_work ??????????????????????????????????????Щ???в???????????????
????/**
????* schedule_work - put work task in global workqueue
????* @work: job to be done
????*
????* This puts a job in the kernel-global workqueue.
????*/
????int schedule_work(struct work_struct *work)
????{
????return queue_work(keventd_wq?? work);
????}
????EXPORT_SYMBOL(schedule_work);
?????????????????????????
????????????????е??????????????????????????????????????????????????????????????????worker_thread??????????????????????????????????????????worker_thread???????????????????????????????ù?????????е??????????????????
??????????????????????????????????????????work????????????????????pending?????????work?????????????
static int worker_thread(void *__cwq)
{
struct cpu_workqueue_struct *cwq = __cwq;
DEFINE_WAIT(wait);if (cwq->wq->freezeable)
set_freezable();
set_user_nice(current?? -5);
for (;;)
{
prepare_to_wait(&cwq->more_work?? &wait?? TASK_INTERRUPTIBLE);
if (!freezing(current) &&
!kthread_should_stop() &&
list_empty(&cwq->worklist))
schedule();
finish_wait(&cwq->more_work?? &wait);
try_to_freeze();
if (kthread_should_stop())
break;
run_workqueue(cwq);
}
return 0;
}
static void run_workqueue(struct cpu_workqueue_struct *cwq)
{
spin_lock_irq(&cwq->lock);
cwq->run_depth++;
if (cwq->run_depth > 3)
{
/* morton gets to eat his hat */
printk("%s: recursion depth exceeded: %dn"??
__func__?? cwq->run_depth);
dump_stack();
}
while (!list_empty(&cwq->worklist))
{
struct work_struct *work = list_entry(cwq->worklist.next??
struct work_struct?? entry);
work_func_t f = work->func;
#ifdef CONFIG_LOCKDEP
/*
* It is permissible to free the struct work_struct
* from inside the function that is called from it??
* this we need to take into account for lockdep too.
* To avoid bogus "held lock freed" warnings as well
* as problems when looking into work->lockdep_map??
* make a copy and use that here.
*/
struct lockdep_map lockdep_map = work->lockdep_map;
#endifcwq->current_work = work;
list_del_init(cwq->worklist.next);
spin_unlock_irq(&cwq->lock);
BUG_ON(get_wq_data(work) != cwq);
work_clear_pending(work);
lock_map_acquire(&cwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map);
f(work);
lock_map_release(&lockdep_map);
lock_map_release(&cwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0))
{
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
"%s/0x%08x/%dn"??
current->comm?? preempt_count()??
task_pid_nr(current));
printk(KERN_ERR "    last function: ");
print_symbol("%sn"?? (unsigned long)f);
debug_show_held_locks(current);
dump_stack();
}
spin_lock_irq(&cwq->lock);
cwq->current_work = NULL;
}
cwq->run_depth--;
spin_unlock_irq(&cwq->lock);
}