序:
這是我第二次寫Pintos內核,第一次上這課的時候由於剛接觸操作系統,這個project難度又是非常大,所以其實寫出的代碼有很多bug,一些測試也沒有通過,希望通過這次重寫Pintos,加深對操作系統內核的理解,並力爭通過所有測試。代碼完整部分在我的github里,如果覺得有幫助的話記得star我一下哦。
Part1:
第一部分的目的是讓我們修改timer_sleep(int64_t ticks)函數的忙等機制。thread結構體如下
1 struct thread 2 { 3 /* Owned by thread.c. */ 4 tid_t tid; /* Thread identifier. */ 5 enum thread_status status; /* Thread state. */ 6 char name[16]; /* Name (for debugging purposes). */ 7 uint8_t *stack; /* Saved stack pointer. */ 8 int priority; /* Priority. */ 9 struct list_elem allelem; /* List element for all threads list. */ 10 11 /* Shared between thread.c and synch.c. */ 12 struct list_elem elem; /* List element. */ 13 14 #ifdef USERPROG 15 /* Owned by userprog/process.c. */ 16 uint32_t *pagedir; /* Page directory. */ 17 #endif 18 19 /* Owned by thread.c. */ 20 unsigned magic; /* Detects stack overflow. */ 21 22 int64_t ticks_blocked; /* Time for blocked. */ 23 };
這里我們添加了一個變量ticks_blocked用於記錄剩余阻塞時間。在timer_sleep函數中,將該線程阻塞並設置阻塞時間。注意這一過程需要解除中斷。
1 /* Sleeps for approximately TICKS timer ticks. Interrupts must 2 be turned on. */ 3 void 4 timer_sleep (int64_t ticks) 5 { 6 if(ticks <= 0) return; 7 ASSERT (intr_get_level () == INTR_ON); 8 enum intr_level old_level = intr_disable (); 9 struct thread *current_thread = thread_current (); 10 current_thread->ticks_blocked = ticks; 11 thread_block (); 12 intr_set_level (old_level); 13 }
thread_block()的底層實現是將當前線程的狀態設置為THREAD_BLOCKED,然后重新調度。狀態為THREAD_BLOCKED的線程將從就緒隊列中移除。
1 /* Puts the current thread to sleep. It will not be scheduled 2 again until awoken by thread_unblock(). 3 4 This function must be called with interrupts turned off. It 5 is usually a better idea to use one of the synchronization 6 primitives in synch.h. */ 7 void 8 thread_block (void) 9 { 10 ASSERT (!intr_context ()); 11 ASSERT (intr_get_level () == INTR_OFF); 12 13 thread_current ()->status = THREAD_BLOCKED; 14 schedule (); 15 }
接下來就是在適當的時間喚醒線程。我們在每個tick內遍歷所有線程,並將ticks_blocked值減一,如果該值小於等於0,則將其從阻塞隊列中移除重新參與調度。每次時間片輪轉時都會調度timer_interrupt函數。
1 /* Timer interrupt handler. */ 2 static void 3 timer_interrupt (struct intr_frame *args UNUSED) 4 { 5 ticks++; 6 thread_tick (); 7 thread_foreach(check_blocked_time,NULL); 8 }
thread_foreach函數的作用是對每個線程調用func函數,thread_action_func是用戶定義類型typedef void thread_action_func (struct thread *t, void *aux);它規定了函數的返回值為void且有struct thread和void*兩個參數。
1 /* Invoke function 'func' on all threads, passing along 'aux'. 2 This function must be called with interrupts off. */ 3 void 4 thread_foreach (thread_action_func *func, void *aux) 5 { 6 struct list_elem *e; 7 8 ASSERT (intr_get_level () == INTR_OFF); 9 10 for (e = list_begin (&all_list); e != list_end (&all_list); 11 e = list_next (e)) 12 { 13 struct thread *t = list_entry (e, struct thread, allelem); 14 func (t, aux); 15 } 16 }
每次時間片用完時,我們都需要將每個線程的ticks_blocked減1,如果該線程ticks_blocked小於0,則將其喚醒,因此我們實現函數如下。
1 /* Check every threads whether they should be awaked. */ 2 void check_blocked_time(struct thread *t, void *aux){ 3 if (t->status == THREAD_BLOCKED && t->ticks_blocked > 0){ 4 t->ticks_blocked--; 5 if (t->ticks_blocked == 0) 6 thread_unblock(t); 7 } 8 }
別忘記在頭文件里添加定義
1 void check_blocked_time(struct thread *t, void *aux);
至此,part1通過大半部分
下面來部署線程的優先級系統,我們發現thread.c里面有那么一個函數,看名字就知道它返回下一個要執行的線程,那么我們只要在ready_list中找到優先級最高的線程並將其返回即可。
1 /* Chooses and returns the next thread to be scheduled. Should 2 return a thread from the run queue, unless the run queue is 3 empty. (If the running thread can continue running, then it 4 will be in the run queue.) If the run queue is empty, return 5 idle_thread. */ 6 static struct thread * 7 next_thread_to_run (void) 8 { 9 if (list_empty (&ready_list)) 10 return idle_thread; 11 else 12 return list_entry (list_pop_front (&ready_list), struct thread, elem); 13 }
查找list.c文件,我發現了list_max函數,用於根據比較函數查找ready_list中優先級最高的線程,然后將其從ready_list中移除並返回。這里的list_entry用於將鏈表節點類型轉換為線程結構體類型。
1 bool thread_compare_priority (const struct list_elem *a,const struct list_elem *b,void *aux UNUSED){ 2 return list_entry(a,struct thread,elem)->priority < list_entry(b,struct thread,elem)->priority; 3 } 4 5 /* Chooses and returns the next thread to be scheduled. Should 6 return a thread from the run queue, unless the run queue is 7 empty. (If the running thread can continue running, then it 8 will be in the run queue.) If the run queue is empty, return 9 idle_thread. */ 10 static struct thread * 11 next_thread_to_run (void) 12 { 13 if (list_empty (&ready_list)) 14 return idle_thread; 15 else{ 16 struct list_elem *max_priority = list_max (&ready_list,thread_compare_priority,NULL); 17 list_remove (max_priority); 18 return list_entry (max_priority,struct thread,elem); 19 } 20 }
至此,part1所有test cases全部pass
Part2:
第二部分有以下測試
我們先看除去donation以外的測試,先看priority-fifo的源代碼
1 void 2 test_priority_fifo (void) 3 { 4 struct simple_thread_data data[THREAD_CNT]; 5 struct lock lock; 6 int *output, *op; 7 int i, cnt; 8 9 /* This test does not work with the MLFQS. */ 10 ASSERT (!thread_mlfqs); 11 12 /* Make sure our priority is the default. */ 13 ASSERT (thread_get_priority () == PRI_DEFAULT); 14 15 msg ("%d threads will iterate %d times in the same order each time.", 16 THREAD_CNT, ITER_CNT); 17 msg ("If the order varies then there is a bug."); 18 19 output = op = malloc (sizeof *output * THREAD_CNT * ITER_CNT * 2); 20 ASSERT (output != NULL); 21 lock_init (&lock); 22 23 thread_set_priority (PRI_DEFAULT + 2); 24 for (i = 0; i < THREAD_CNT; i++) 25 { 26 char name[16]; 27 struct simple_thread_data *d = data + i; 28 snprintf (name, sizeof name, "%d", i); 29 d->id = i; 30 d->iterations = 0; 31 d->lock = &lock; 32 d->op = &op; 33 thread_create (name, PRI_DEFAULT + 1, simple_thread_func, d); 34 } 35 36 thread_set_priority (PRI_DEFAULT); 37 /* All the other threads now run to termination here. */ 38 ASSERT (lock.holder == NULL); 39 40 cnt = 0; 41 for (; output < op; output++) 42 { 43 struct simple_thread_data *d; 44 45 ASSERT (*output >= 0 && *output < THREAD_CNT); 46 d = data + *output; 47 if (cnt % THREAD_CNT == 0) 48 printf ("(priority-fifo) iteration:"); 49 printf (" %d", d->id); 50 if (++cnt % THREAD_CNT == 0) 51 printf ("\n"); 52 d->iterations++; 53 } 54 }
這個測試創建了一個優先級PRI_DEFAULT+2的主線程,並用這個線程創建了16個優先級PRI_DEFAULT+1的子線程,然后把主線程的優先級設置為優先級PRI_DEFAULT,所以現在pintos內有16個優先級PRI_DEFAULT+1的線程和1個優先級PRI_DEFAULT的線程在跑,測試需要把16個線程跑完再結束那一個線程,看起來沒什么問題,但注意OS中線程是並行執行的,有可能最開始的一個線程在設置完優先級之后立刻結束了,而此時其他線程並未結束,即37行的注釋,因此在線程設置完優先級之后應該立刻重新調度,因此只需要在thread_set_priority()函數里添加thread_yield()函數即可。
1 /* Sets the current thread's priority to NEW_PRIORITY. */ 2 void 3 thread_set_priority (int new_priority) 4 { 5 thread_current ()->priority = new_priority; 6 thread_yield(); 7 }
接下來看priority-preempt測試了什么
1 void 2 test_priority_preempt (void) 3 { 4 /* This test does not work with the MLFQS. */ 5 ASSERT (!thread_mlfqs); 6 7 /* Make sure our priority is the default. */ 8 ASSERT (thread_get_priority () == PRI_DEFAULT); 9 10 thread_create ("high-priority", PRI_DEFAULT + 1, simple_thread_func, NULL); 11 msg ("The high-priority thread should have already completed."); 12 }
這個就很簡單了,創建一個新的高優先級線程搶占當前線程,因此在thread_create中,如果新線程的優先級高於當前線程優先級,調用thread_yield()函數即可。
1 tid_t 2 thread_create (const char *name, int priority, 3 thread_func *function, void *aux) 4 { 5 struct thread *t; 6 struct kernel_thread_frame *kf; 7 struct switch_entry_frame *ef; 8 struct switch_threads_frame *sf; 9 tid_t tid; 10 11 ASSERT (function != NULL); 12 13 /* Allocate thread. */ 14 t = palloc_get_page (PAL_ZERO); 15 if (t == NULL) 16 return TID_ERROR; 17 18 /* Initialize thread. */ 19 init_thread (t, name, priority); 20 tid = t->tid = allocate_tid (); 21 22 /* Stack frame for kernel_thread(). */ 23 kf = alloc_frame (t, sizeof *kf); 24 kf->eip = NULL; 25 kf->function = function; 26 kf->aux = aux; 27 28 /* Stack frame for switch_entry(). */ 29 ef = alloc_frame (t, sizeof *ef); 30 ef->eip = (void (*) (void)) kernel_thread; 31 32 /* Stack frame for switch_threads(). */ 33 sf = alloc_frame (t, sizeof *sf); 34 sf->eip = switch_entry; 35 sf->ebp = 0; 36 37 /* Add to run queue. */ 38 thread_unblock (t); 39 40 if (thread_current ()->priority < priority) 41 thread_yield (); 42 43 return tid; 44 }
意外發現priority-change也過了,順便看看這個測試在做什么
1 void 2 test_priority_change (void) 3 { 4 /* This test does not work with the MLFQS. */ 5 ASSERT (!thread_mlfqs); 6 7 msg ("Creating a high-priority thread 2."); 8 thread_create ("thread 2", PRI_DEFAULT + 1, changing_thread, NULL); 9 msg ("Thread 2 should have just lowered its priority."); 10 thread_set_priority (PRI_DEFAULT - 2); 11 msg ("Thread 2 should have just exited."); 12 }
很明顯,這個測試創建了一個新線程並要這個線程立刻調用,然后在降低優先級之后它就不應該繼續執行了,這正好對應於之前修改的兩處,所以自然能通過測試。
通過了三個簡單測試,接下來處理線程同步問題
先來看看priority-seme這個測試
1 void 2 test_priority_sema (void) 3 { 4 int i; 5 6 /* This test does not work with the MLFQS. */ 7 ASSERT (!thread_mlfqs); 8 9 sema_init (&sema, 0); 10 thread_set_priority (PRI_MIN); 11 for (i = 0; i < 10; i++) 12 { 13 int priority = PRI_DEFAULT - (i + 3) % 10 - 1; 14 char name[16]; 15 snprintf (name, sizeof name, "priority %d", priority); 16 thread_create (name, priority, priority_sema_thread, NULL); 17 } 18 19 for (i = 0; i < 10; i++) 20 { 21 sema_up (&sema); 22 msg ("Back in main thread."); 23 } 24 }
這個測試創建了10個優先級不等的線程,並且每個線程調用sema_down函數,其他得不到信號量的線程都得阻塞,而每次運行的線程釋放信號量時必須確保優先級最高的線程繼續執行,因此修改sema_up。查到semaphore結構體如下,waiters為阻塞隊列
1 /* A counting semaphore. */ 2 struct semaphore 3 { 4 unsigned value; /* Current value. */ 5 struct list waiters; /* List of waiting threads. */ 6 };
再來看pintos的sema_up是如何設計的,他只是把waiters最前面的線程取出來加入到ready_list而已。
1 /* Up or "V" operation on a semaphore. Increments SEMA's value 2 and wakes up one thread of those waiting for SEMA, if any. 3 4 This function may be called from an interrupt handler. */ 5 void 6 sema_up (struct semaphore *sema) 7 { 8 enum intr_level old_level; 9 10 ASSERT (sema != NULL); 11 12 old_level = intr_disable (); 13 if (!list_empty (&sema->waiters)) 14 thread_unblock (list_entry (list_pop_front (&sema->waiters), 15 struct thread, elem)); 16 sema->value++; 17 intr_set_level (old_level); 18 }
現在問題很簡單了,就是把14和15行修改一下,在waiters中取出優先級最高的thread,並yield()即可即可,修改如下
1 /* Up or "V" operation on a semaphore. Increments SEMA's value 2 and wakes up one thread of those waiting for SEMA, if any. 3 4 This function may be called from an interrupt handler. */ 5 void 6 sema_up (struct semaphore *sema) 7 { 8 enum intr_level old_level; 9 10 ASSERT (sema != NULL); 11 12 old_level = intr_disable (); 13 if (!list_empty (&sema->waiters)) { 14 struct list_elem *max_priority = list_max (&sema->waiters,thread_compare_priority,NULL); 15 list_remove (max_priority); 16 thread_unblock(list_entry (max_priority,struct thread,elem)); 17 }18 sema->value++; 19 intr_set_level (old_level); 20 thread_yield(); 21 }
接下來看priority-condvar測試,
1 void 2 test_priority_condvar (void) 3 { 4 int i; 5 6 /* This test does not work with the MLFQS. */ 7 ASSERT (!thread_mlfqs); 8 9 lock_init (&lock); 10 cond_init (&condition); 11 12 thread_set_priority (PRI_MIN); 13 for (i = 0; i < 10; i++) 14 { 15 int priority = PRI_DEFAULT - (i + 7) % 10 - 1; 16 char name[16]; 17 snprintf (name, sizeof name, "priority %d", priority); 18 thread_create (name, priority, priority_condvar_thread, NULL); 19 } 20 21 for (i = 0; i < 10; i++) 22 { 23 lock_acquire (&lock); 24 msg ("Signaling..."); 25 cond_signal (&condition, &lock); 26 lock_release (&lock); 27 } 28 }
和前面的信號量機制類似,條件變量也維護了一個waiters用於存儲等待接受條件變量的線程,那么就修改cond_signal()函數喚醒優先級最高的線程即可,和priority-sema類似,直接上代碼。值得注意的是,這里的cond->waiters推入的是semaphore_elem而不是thread,這是為了等待不同信號量的線程直接互不影響,
1 bool cond_compare_priority (const struct list_elem *a,const struct list_elem *b,void *aux UNUSED){ 2 struct semaphore_elem *sa = list_entry(a,struct semaphore_elem,elem); 3 struct semaphore_elem *sb = list_entry(b,struct semaphore_elem,elem); 4 return list_entry(list_front(&sa->semaphore.waiters),struct thread,elem)->priority < 5 list_entry(list_front(&sb->semaphore.waiters),struct thread,elem)->priority; 6 } 7 8 /* If any threads are waiting on COND (protected by LOCK), then 9 this function signals one of them to wake up from its wait. 10 LOCK must be held before calling this function. 11 12 An interrupt handler cannot acquire a lock, so it does not 13 make sense to try to signal a condition variable within an 14 interrupt handler. */ 15 void 16 cond_signal (struct condition *cond, struct lock *lock UNUSED) 17 { 18 ASSERT (cond != NULL); 19 ASSERT (lock != NULL); 20 ASSERT (!intr_context ()); 21 ASSERT (lock_held_by_current_thread (lock)); 22 23 if (!list_empty (&cond->waiters)){ 24 struct list_elem *max_priority = list_max (&cond->waiters,cond_compare_priority,NULL); 25 list_remove (max_priority); 26 sema_up(&list_entry(max_priority,struct semaphore_elem,elem)->semaphore); 27 } 28 }
線程同步測試至此結束
接下來處理優先級捐贈的問題,這里七個測試用例我就不一一解釋了,但是我強烈建議把七個測試的內容仔細看一遍,這里我從七個測試中提取要點進行解釋,因為前幾個測試比較簡單,實際上處理完復雜的測試之后簡單的測試必然能通過,因此這里我專門挑選復雜的測試進行講解。
這七個測試有兩個關鍵點,一個是priority-donate-multiple這個測試它給了我們一個關鍵信息,一個線程可能有多個鎖,然后多個其他線程會因為這個線程而阻塞,這是第一個要點。第二個要點是priority-donate-chain這個測試,這個測試比較復雜,是個優先級嵌套的問題。如下圖
thread_0(has lock[0]) thread_3(has lock[1], blocked by lock[0]) thread_6(has lock[2], blocked by lock[1]) thread_9(has lock[3], blocked by lock[2])
0 3 6 9
3 6 9
6 9
9
分析一下這張圖,thread_0的線程擁有了lock[0],thread_3的線程擁有lock[1],並因為thread_0的lock[0]而阻塞,因此將thread_0優先級提升到3,thread_6因為hread_3的lock[1]而阻塞,所以thread_3和thread_0將優先級提升到6,以此類推。這里我們提取到的關鍵信息是,優先級的更新操作是需要循環的,而循環的關鍵點在於知道當前鎖的擁有者,如thread_9就需要知道lock[2]的所有者是誰(thread_6),以及thread_6在等待哪個鎖(lock[1])。
因此我們修改thread和lock結構體。
1 struct thread 2 { 3 /* Owned by thread.c. */ 4 tid_t tid; /* Thread identifier. */ 5 enum thread_status status; /* Thread state. */ 6 char name[16]; /* Name (for debugging purposes). */ 7 uint8_t *stack; /* Saved stack pointer. */ 8 int priority; /* Priority. */ 9 struct list_elem allelem; /* List element for all threads list. */ 10 11 /* Shared between thread.c and synch.c. */ 12 struct list_elem elem; /* List element. */ 13 14 #ifdef USERPROG 15 /* Owned by userprog/process.c. */ 16 uint32_t *pagedir; /* Page directory. */ 17 #endif 18 19 /* Owned by thread.c. */ 20 unsigned magic; /* Detects stack overflow. */ 21 22 int64_t ticks_blocked; /* Time for blocked. */ 23 struct list locks; /* Locks this thread holds */ 24 struct lock *waiting_lock; /* The lock this thread is waiting for */ 25 int original_priority; /* Original priority of this thread */ 26 };
1 struct lock 2 { 3 int max_priority; /* Max priority of all threads aquiring this lock */ 4 struct list_elem elem; /* Used in thread.c */ 5 struct thread *holder; /* Thread holding lock (for debugging). */ 6 struct semaphore semaphore; /* Binary semaphore controlling access. */ 7 };
接下來處理lock_acquire(),為了區別與第三部分的mlfqs,所有添加部分都進行了thread_mlfqs判斷(我是做了第三部分才改了第二部分,因此提前在這里作出修改)。在獲取鎖之前,根據前面的分析,循環更新所有參與嵌套的線程的優先級。
1 /* Acquires LOCK, sleeping until it becomes available if 2 necessary. The lock must not already be held by the current 3 thread. 4 5 This function may sleep, so it must not be called within an 6 interrupt handler. This function may be called with 7 interrupts disabled, but interrupts will be turned back on if 8 we need to sleep. */ 9 void 10 lock_acquire (struct lock *lock) 11 { 12 ASSERT (lock != NULL); 13 ASSERT (!intr_context ()); 14 ASSERT (!lock_held_by_current_thread (lock)); 15 16 if(lock->holder != NULL && !thread_mlfqs){ 17 thread_current()->waiting_lock = lock; 18 struct lock *wlock = lock; 19 while(wlock != NULL && thread_current()->priority > wlock->max_priority){ 20 wlock->max_priority = thread_current()->priority; 21 struct list_elem *max_priority_in_locks = list_max(&wlock->holder->locks,lock_compare_max_priority,NULL); 22 int maximal = list_entry(max_priority_in_locks,struct lock,elem)->max_priority; 23 if(wlock->holder->priority < maximal) 24 wlock->holder->priority = maximal; 25 wlock = wlock->holder->waiting_lock; 26 } 27 } 28 29 sema_down (&lock->semaphore); 30 lock->holder = thread_current (); 31 32 if(!thread_mlfqs){ 33 thread_current()->waiting_lock = NULL; 34 lock->max_priority = thread_current()->priority; 35 list_push_back(&thread_current()->locks,&lock->elem); 36 if(lock->max_priority > thread_current()->priority){ 37 thread_current()->priority = lock->max_priority; 38 thread_yield(); 39 } 40 } 41 }
處理lock_release()函數,在釋放鎖之前,對該線程的優先級進行更新,如果他沒有擁有的鎖,就直接更新為original_priority,否則從所有鎖的max_priority中找到最大值進行更新。
1 void 2 lock_release (struct lock *lock) 3 { 4 ASSERT (lock != NULL); 5 ASSERT (lock_held_by_current_thread (lock)); 6 7 if(!thread_mlfqs){ 8 list_remove(&lock->elem); 9 int maximal = thread_current()->original_priority; 10 if(!list_empty(&thread_current()->locks)){ 11 struct list_elem *max_priority_in_locks = list_max(&thread_current()->locks,lock_compare_max_priority,NULL); 12 int p = list_entry(max_priority_in_locks,struct lock,elem)->max_priority; 13 if(p > maximal) 14 maximal = p; 15 } 16 thread_current()->priority = maximal; 17 } 18 19 lock->holder = NULL; 20 sema_up (&lock->semaphore); 21 }
最后需要對thread_set_priority (int new_priority)進行更新,如果沒有鎖,那優先級捐贈的情況根本不用考慮,直接更新,或者更新的優先級大於當前線程的優先級,則更新當前線程優先級,但無論如何,original_priority都需要進行更新。
1 void 2 thread_set_priority (int new_priority) 3 { 4 thread_current ()->original_priority = new_priority; 5 if(list_empty(&thread_current()->locks) || new_priority > thread_current()->priority){ 6 thread_current()->priority = new_priority; 7 thread_yield(); 8 } 9 }
還有個list_max中的比較函數
1 /* Compare function in list of a lock */ 2 bool lock_compare_max_priority (const struct list_elem *a,const struct list_elem *b,void *aux UNUSED){ 3 return list_entry(a,struct lock,elem)->max_priority < list_entry(b,struct lock,elem)->max_priority; 4 }
至此第二部分完全通過,該部分是project1最難的一部分,代碼量不大,但需要從七大測試中提取出最難的測試並解決,其余簡單的測試都迎刃而解。
Part3:
第三部分主要讓我們實現多級反饋隊列調度算法,文檔中有這么一句話:Unfortunately, Pintos does not support floating-point arithmeticin the kernel, because it would complicate and slow the kernel.所以說我們先不管算法什么的,先部署這個浮點類型的計算部分,依據文檔95頁的要求,編寫下面的文件fixed-point.h
1 #ifndef FIXED_POINT_H 2 #define FIXED_POINT_H 3 4 #define p 17 5 #define q 14 6 #define f (1<<q) 7 8 #define CONVERT_N_TO_FIXED_POINT(n) ((n)*(f)) 9 #define CONVERT_X_TO_INTEGER_ZERO(x) ((x)/(f)) 10 #define CONVERT_X_TO_INTEGER_NEAREST(x) (((x)>=0)?(((x)+(f)/2)/(f)):(((x)-(f)/2)/(f))) 11 12 #define ADD_X_AND_Y(x,y) ((x)+(y)) 13 #define SUBTRACT_Y_FROM_X(x,y) ((x)-(y)) 14 #define ADD_X_AND_N(x,n) ((x)+(n)*(f)) 15 #define SUBTRACT_N_FROM_X(x,n) ((x)-(n)*(f)) 16 #define MULTIPLY_X_BY_Y(x,y) (((int64_t) (x))*(y)/(f)) 17 #define MULTIPLY_X_BY_N(x,n) ((x)*(n)) 18 #define DIVIDE_X_BY_Y(x,y) (((int64_t) (x))*(f)/(y)) 19 #define DIVIDE_X_BY_N(x,n) ((x)/(n)) 20 21 #endif
接下來實現算法部分,在此之前,先理解這個算法是咋回事。閱讀文檔BSD4.4部分,我們得到以下信息:
1. 該算法的優先級是動態變化的,主要動態修改Niceness, Priority, recent_cpu, load_avg四大變量
2. Priority的計算公式為:priority= PRI_MAX - (recent_cpu/ 4) - (nice*2),每四個clock tick對所有線程更新一次
3. recent_cpu的計算公式為recent_cpu= (2*load_avg)/(2*load_avg+ 1) *recent_cpu+nice,當timer_ticks () % TIMER_FREQ == 0時對所有線程更新,每個tick對當前線程的recent_cpu加1。
4. load_avg的計算公式為load_avg= (59/60)*load_avg+ (1/60)*ready_threads,當timer_ticks () % TIMER_FREQ == 0時對所有線程更新
現在問題就很簡單了,首先在在thread結構體中添加成員,
1 struct thread 2 { 3 /* Owned by thread.c. */ 4 tid_t tid; /* Thread identifier. */ 5 enum thread_status status; /* Thread state. */ 6 char name[16]; /* Name (for debugging purposes). */ 7 uint8_t *stack; /* Saved stack pointer. */ 8 int priority; /* Priority. */ 9 struct list_elem allelem; /* List element for all threads list. */ 10 11 /* Shared between thread.c and synch.c. */ 12 struct list_elem elem; /* List element. */ 13 14 #ifdef USERPROG 15 /* Owned by userprog/process.c. */ 16 uint32_t *pagedir; /* Page directory. */ 17 #endif 18 19 /* Owned by thread.c. */ 20 unsigned magic; /* Detects stack overflow. */ 21 22 int64_t ticks_blocked; /* Time for blocked. */ 23 struct list locks; /* Locks this thread holds */ 24 struct lock *waiting_lock; /* The lock this thread is waiting for */ 25 int original_priority; /* Original priority of this thread */ 26 27 int nice; /* Niceness of thread used in mlfqs */ 28 int64_t recent_cpu; /* Used in mlfqs */ 29 };
然后在thread.c中定義一個全局變量load_avg,根據2,3,4條我們在thread.c中編寫以下函數,記得在thread.h中添加聲明
1 /* Increment by 1 for each clock tick */ 2 void increase_recent_cpu(void){ 3 if (thread_current()!=idle_thread) 4 thread_current()->recent_cpu = ADD_X_AND_N(thread_current()->recent_cpu,1); 5 } 6 7 /* Modify Priority */ 8 void modify_priority(struct thread *t,void *aux UNUSED){ 9 if (t!=idle_thread){ 10 //priority = PRI_MAX - (recent_cpu / 4) - (nice * 2) 11 t->priority = CONVERT_X_TO_INTEGER_NEAREST(CONVERT_N_TO_FIXED_POINT(PRI_MAX)- 12 t->recent_cpu/4-CONVERT_N_TO_FIXED_POINT(2*t->nice)); 13 if (t->priority < PRI_MIN) 14 t->priority = PRI_MIN; 15 if (t->priority > PRI_MAX) 16 t->priority = PRI_MAX; 17 } 18 } 19 20 /* Modify recent_cpu */ 21 void modify_cpu(struct thread *t,void *aux UNUSED){ 22 if (t != idle_thread){ 23 int64_t fa = MULTIPLY_X_BY_N(load_avg,2); 24 int64_t fb = MULTIPLY_X_BY_N(load_avg,2)+CONVERT_N_TO_FIXED_POINT(1); 25 t->recent_cpu = MULTIPLY_X_BY_Y(DIVIDE_X_BY_Y(fa,fb),t->recent_cpu)+ 26 CONVERT_N_TO_FIXED_POINT(t->nice); 27 } 28 } 29 30 /* Modify load average */ 31 void modify_load_avg(void){ 32 int ready_threads = list_size(&ready_list); 33 if (thread_current()!=idle_thread){ 34 ready_threads++; 35 } 36 int64_t fa = MULTIPLY_X_BY_N(load_avg,59); 37 int add1 = DIVIDE_X_BY_N(fa,60); 38 int add2 = DIVIDE_X_BY_N(CONVERT_N_TO_FIXED_POINT(ready_threads),60); 39 load_avg = ADD_X_AND_Y(add1,add2); 40 }
接下來就是在每次中斷時對這些值進行更新,修改timer.c文件
1 /* Timer interrupt handler. */ 2 static void 3 timer_interrupt (struct intr_frame *args UNUSED) 4 { 5 ticks++; 6 thread_tick (); 7 thread_foreach(check_blocked_time,NULL); 8 9 if(thread_mlfqs){ 10 increase_recent_cpu(); 11 if (timer_ticks() % TIMER_FREQ == 0){ 12 modify_load_avg(); 13 thread_foreach(modify_cpu,NULL); 14 } 15 if (timer_ticks() % 4 == 0){ 16 thread_foreach(modify_priority,NULL); 17 } 18 } 19 }
最后,把原框架留給我們的幾個函數補全
1 /* Sets the current thread's nice value to NICE. */ 2 void 3 thread_set_nice (int nice UNUSED) 4 { 5 thread_current()->nice = nice; 6 modify_priority(thread_current(),NULL); 7 thread_yield(); 8 } 9 10 /* Returns the current thread's nice value. */ 11 int 12 thread_get_nice (void) 13 { 14 return thread_current()->nice; 15 } 16 17 /* Returns 100 times the system load average. */ 18 int 19 thread_get_load_avg (void) 20 { 21 int temp = MULTIPLY_X_BY_N(load_avg,100); 22 return CONVERT_X_TO_INTEGER_NEAREST(temp); 23 } 24 25 /* Returns 100 times the current thread's recent_cpu value. */ 26 int 27 thread_get_recent_cpu (void) 28 { 29 return CONVERT_X_TO_INTEGER_NEAREST(MULTIPLY_X_BY_N(thread_current()->recent_cpu,100)); 30 }
至此,project 1所有測試通過
------------------------
未完待續