轉自:http://blog.chinaunix.net/uid-24148050-id-296982.html
一、workqueue簡介
workqueue與tasklet類似,都是允許內核代碼請求某個函數在將來的時間被調用(抄《ldd3》上的)
每個workqueue就是一個內核進程。
workqueue與tasklet的區別:
- tasklet是通過軟中斷實現的,在軟中斷上下文中運行,tasklet代碼必須是原子的. 而workqueue是通過內核進程實現的,就沒有上述限制的,而且工作隊列函數可以休眠。
- tasklet始終運行在被初始提交的同一處理器上,workqueue不一定
- tasklet不能確定延時時間(即使很短),workqueue可以設定延遲時間
我的驅動模塊就是印在計時器中調用了可休眠函數,所以出現了cheduling while atomic告警
內核計時器也是通過軟中斷實現的
二、workqueue的API
workqueue的API自2.6.20后發生了變化
1 #include <linux/workqueue.h> 2 struct workqueue_struct; 3 struct work_struct; 4 struct workqueue_struct *create_workqueue(const char *name); 5 void destroy_workqueue(struct workqueue_struct *queue); 6 INIT_WORK(_work, _func); 7 INIT_DELAYED_WORK(_work, _func); 8 int queue_work(struct workqueue_struct *wq, struct work_struct *work); 9 int queue_delayed_work(struct workqueue_struct *wq,struct delayed_work *dwork, unsigned long delay); 10 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 11 struct delayed_work *dwork, unsigned long delay); 12 int cancel_work_sync(struct work_struct *work); 13 int cancel_delayed_work_sync(struct delayed_work *dwork); 14 void flush_workqueue(struct workqueue_struct *wq);
Workqueue編程接口
序號 |
接口函數 |
說明 |
1 |
create_workqueue |
用於創建一個workqueue隊列,為系統中的每個CPU都創建一個內核線程。輸入參數: @name:workqueue的名稱 |
2 |
create_singlethread_workqueue |
用於創建workqueue,只創建一個內核線程。輸入參數: @name:workqueue名稱 |
3 |
destroy_workqueue |
釋放workqueue隊列。輸入參數: @ workqueue_struct:需要釋放的workqueue隊列指針 |
4 |
schedule_work |
調度執行一個具體的任務,執行的任務將會被掛入Linux系統提供的workqueue——keventd_wq輸入參數: @ work_struct:具體任務對象指針 |
5 |
schedule_delayed_work |
延遲一定時間去執行一個具體的任務,功能與schedule_work類似,多了一個延遲時間,輸入參數: @work_struct:具體任務對象指針 @delay:延遲時間 |
6 |
queue_work |
調度執行一個指定workqueue中的任務。輸入參數: @ workqueue_struct:指定的workqueue指針 @work_struct:具體任務對象指針 |
7 |
queue_delayed_work |
延遲調度執行一個指定workqueue中的任務,功能與queue_work類似,輸入參數多了一個delay。 |
下面實例是不指定delay時間的workqueue
(代碼基於2.6.24)
1 struct my_work_stuct{ 2 int test; 3 struct work_stuct save; 4 }; 5 struct my_work_stuct test_work; 6 struct workqueue_struct *test_workqueue; 7 void do_save(struct work_struct *p_work) 8 { 9 struct my_work_struct *p_test_work = container_of(p_work, struct my_work_stuct, save); 10 printk("%d\n",p_test_work->test); 11 } 12 13 void test_init() 14 { 15 test_workqueue = create_workqueue("test_workqueue"); 16 if (!test_workqueue) 17 panic("Failed to create test_workqueue\n"); 18 INIT_WORK(&(test_work.save), do_save); 19 queue_work(test_workqueue, &(test_work.save)); 20 } 21 void test_destory(void) 22 { 23 if(test_workqueue) 24 destroy_workqueue(test_workqueue); 25 }
三、workqueue的實現
工作隊列workqueue不是通過軟中斷實現的,它是通過內核進程實現的
首先,創建一個workqueue,實際上就是建立一個內核進程
1 create_workqueue("tap_workqueue") 2 --> __create_workqueue(“tap_workqueue”, 0, 0) 3 --> __create_workqueue_key((name), (singlethread), (freezeable), NULL, NULL){ 4 wq = kzalloc(sizeof(*wq), GFP_KERNEL); 5 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct); 6 wq->name = name; 7 wq->singlethread = singlethread; 8 wq->freezeable = freezeable; 9 INIT_LIST_HEAD(&wq->list); 10 for_each_possible_cpu(cpu) { 11 cwq = init_cpu_workqueue(wq, cpu); 12 err = create_workqueue_thread(cwq, cpu); 13 start_workqueue_thread(cwq, cpu); 14 } 15 }
create_workqueue_thread 建立了一個內核進程 worker_thread(linux_2_6_24/kernel/workqueue.c)
1 create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) 2 { 3 struct workqueue_struct *wq = cwq->wq; 4 const char *fmt = is_single_threaded(wq) ? "%s" : "%s/%d"; 5 struct task_struct *p; 6 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu); 7 if (IS_ERR(p)) 8 return PTR_ERR(p); 9 cwq->thread = p; 10 return 0; 11 }
內核進程worker_thread做的事情很簡單,死循環而已,不停的執行workqueue上的work_list
(linux_2_6_24/kernel/workqueue.c)
1 int worker_thread (void *__cwq) 2 { 3 struct cpu_workqueue_struct *cwq = __cwq; 4 /*下面定義等待隊列項*/ 5 DEFINE_WAIT(wait); 6 /*下面freezeable一般為0*/ 7 if (cwq->wq->freezeable) 8 set_freezable(); 9 /*提高優先級別*/ 10 set_user_nice(current, -5); 11 for (;;) { 12 /*在cwq->more_work上等待, 若有人調用queue_work,該函數將調用wake_up(&cwq->more_work) 激活本進程*/ 13 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE); 14 /*work隊列空則切換出去*/ 15 if (!freezing(current) && !kthread_should_stop() && list_empty(&cwq->worklist)) 16 schedule(); 17 /*切換回來則結束等待 說明有人喚醒cwq->more_work上的等待 有work需要處理*/ 18 finish_wait(&cwq->more_work, &wait); 19 /*下面空,因為沒有定義電源管理*/ 20 try_to_freeze(); 21 if (kthread_should_stop()) 22 break; 23 /*run_workqueue依次處理工作隊列上所有的work*/ 24 run_workqueue(cwq); 25 } 26 return 0; 27 } 28 /*run_workqueue依次處理工作隊列上所有的work*/ 29 static void run_workqueue(struct cpu_workqueue_struct *cwq) 30 { 31 spin_lock_irq(&cwq->lock); 32 cwq->run_depth++; 33 if (cwq->run_depth > 3) { 34 /* morton gets to eat his hat */ 35 printk("%s: recursion depth exceeded: %d\n", 36 __FUNCTION__, cwq->run_depth); 37 dump_stack(); 38 } 39 while (!list_empty(&cwq->worklist)) { 40 struct work_struct *work = list_entry(cwq->worklist.next, 41 struct work_struct, entry); 42 work_func_t f = work->func; 43 #ifdef CONFIG_LOCKDEP 44 /* 45 * It is permissible to free the struct work_struct 46 * from inside the function that is called from it, 47 * this we need to take into account for lockdep too. 48 * To avoid bogus "held lock freed" warnings as well 49 * as problems when looking into work->lockdep_map, 50 * make a copy and use that here. 51 */ 52 struct lockdep_map lockdep_map = work->lockdep_map; 53 #endif 54 cwq->current_work = work; 55 list_del_init(cwq->worklist.next); 56 spin_unlock_irq(&cwq->lock); 57 BUG_ON(get_wq_data(work) != cwq); 58 work_clear_pending(work); 59 lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); 60 lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); 61 f(work); /*執行work項中的func*/ 62 63 lock_release(&lockdep_map, 1, _THIS_IP_); 64 lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); 65 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { 66 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " 67 "%s/0x%08x/%d\n", 68 current->comm, preempt_count(), 69 task_pid_nr(current)); 70 printk(KERN_ERR " last function: "); 71 print_symbol("%s\n", (unsigned long)f); 72 debug_show_held_locks(current); 73 dump_stack(); 74 } 75 spin_lock_irq(&cwq->lock); 76 cwq->current_work = NULL; 77 } 78 cwq->run_depth--; 79 spin_unlock_irq(&cwq->lock); 80 }
將一個work加入到指定workqueue的work_list中(文件linux_2_6_24/kernel/workqueue.c)
1 int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 2 { 3 int ret = 0; 4 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 5 BUG_ON(!list_empty(&work->entry)); 6 __queue_work(wq_per_cpu(wq, get_cpu()), work); 7 put_cpu(); 8 ret = 1; 9 } 10 return ret; 11 } 12 /* Preempt must be disabled. */ 13 static void __queue_work(struct cpu_workqueue_struct *cwq, struct work_struct *work) 14 { 15 unsigned long flags; 16 spin_lock_irqsave(&cwq->lock, flags); 17 insert_work(cwq, work, 1); 18 spin_unlock_irqrestore(&cwq->lock, flags); 19 } 20 static void insert_work(struct cpu_workqueue_struct *cwq, 21 struct work_struct *work, int tail) 22 { 23 set_wq_data(work, cwq); 24 /* 25 * Ensure that we get the right work->data if we see the 26 * result of list_add() below, see try_to_grab_pending(). 27 */ 28 smp_wmb(); 29 if (tail) 30 list_add_tail(&work->entry, &cwq->worklist); 31 else 32 list_add(&work->entry, &cwq->worklist); 33 wake_up(&cwq->more_work); 34 }
四、共享隊列
其實內核有自己的一個workqueue,叫keventd_wq,這個工作隊列也叫做“共享隊列”。
do_basic_setup --> init_workqueues --> create_workqueue("events");
若驅動模塊使用的workqueue功能很簡單的話,可以使用“共享隊列”,不用自己再建一個隊列
使用共享隊列,有這樣一套API
1 int schedule_work(struct work_struct *work) 2 { 3 queue_work(keventd_wq, work); 4 } 5 int schedule_delayed_work(struct delayed_work *dwork,unsigned long delay) 6 { 7 timer_stats_timer_set_start_info(&dwork->timer); 8 return queue_delayed_work(keventd_wq, dwork, delay); 9 } 10 void flush_scheduled_work(void) 11 { 12 flush_workqueue(keventd_wq); 13 }