1 概述
當一個數據包到達的時候,網卡驅動會完成接收並且觸發中斷。產生中斷的每個設備都有一個相應的中斷處理程序,每個網卡都有一個中斷處理程序,是設備驅動程序的一部分。用於通知網卡該中斷已經被接收了,以及把網卡緩沖區的數據包拷貝到內存中。當網卡接收來自網絡的數據包時,需要通知內核數據包到了。內核通過執行網卡已注冊的中斷處理函數來做出應答。中斷處理程序開始執行,通知硬件,拷貝最新的網絡數據包到內存,然后讀取網卡更多的數據包。
這些都是重要、緊迫而又與硬件相關的工作。內核通常需要快速的拷貝網絡數據包到系統內存,因為網卡上接收網絡數據包的緩存大小固定,而且相比系統內存也要小得多。所以上述拷貝動作一旦被延遲,必然造成網卡緩存溢出 - 進入的數據包占滿了網卡的緩存,后續的包只能被丟棄。
一個中斷處理函數主要分兩個部分,上半部和下半部。中斷產生並發送給CPU的時候,對於NAPI和不支持NAPI的設備來說處理結果是不一樣的,NAPI調用的函數是napi_schedule,非NAPI調用的函數是netif_rx,這兩個函數都是在網卡驅動的中斷處理函數上半部分被調用的。
當網絡數據包被拷貝到系統內存后,中斷的上半部任務算完成了,這時它把控制權交還給被系統中斷前運行的程序,處理和操作數據包的其他工作在隨后的下半部中進行。
不管是否支持NAPI,對於驅動來說無非是調用napi_schedule或者netif_rx來通知內核,將數據包交給內核。所以如果不知道驅動使用的中斷處理程序是哪個,那么只要搜索一下這兩個函數就能定位出來了。因為NAPI是基於前者發展出來的,所以先從netif_rx開始分析。
2 函數netif_rx
非NAPI的接收處理,定義位於net/core/dev.c
1 int netif_rx(struct sk_buff *skb) 2 { 3 trace_netif_rx_entry(skb); 4 5 return netif_rx_internal(skb); 6 } 7 8 static int netif_rx_internal(struct sk_buff *skb) 9 { 10 int ret; 11 12 net_timestamp_check(netdev_tstamp_prequeue, skb);//記錄接收時間到skb->tstamp 13 14 trace_netif_rx(skb); 15 #ifdef CONFIG_RPS 16 if (static_key_false(&rps_needed)) { 17 struct rps_dev_flow voidflow, *rflow = &voidflow; 18 int cpu; 19 20 preempt_disable(); 21 rcu_read_lock(); 22 23 cpu = get_rps_cpu(skb->dev, skb, &rflow);////如果有支持rps,則獲取這個包交給了哪個cpu處理 24 if (cpu < 0) 25 cpu = smp_processor_id();//如果上面獲取失敗,則用另外一種方式獲取當前cpu的id 26 27 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);//調用該函數將包添加到queue->input_pkt_queue里面 28 29 rcu_read_unlock(); 30 preempt_enable(); 31 } else 32 #endif 33 { 34 unsigned int qtail; 35 ret = enqueue_to_backlog(skb, get_cpu(), &qtail); 36 put_cpu(); 37 } 38 return ret; 39 }
2.1 函數enqueue_to_backlog
這個函數最后調用enqueue_to_backlog將包添加到queue->input_pkt_queue的尾部,這個input_pkt_queue是每個cpu都有的一個隊列,這個隊列的初始化在net_dev_init()中完成。
1 static int enqueue_to_backlog(struct sk_buff *skb, int cpu, 2 unsigned int *qtail) 3 { 4 struct softnet_data *sd; 5 unsigned long flags; 6 unsigned int qlen; 7 8 sd = &per_cpu(softnet_data, cpu);//獲取當前cpu的softnet_data對象 9 10 local_irq_save(flags);//保存中斷狀態 11 12 rps_lock(sd); 13 if (!netif_running(skb->dev))//確認net_device的dev->state是__LINK_STATE_START狀態,如果該網絡設備沒有運行,直接退出,不進行包的處理 14 goto drop; 15 qlen = skb_queue_len(&sd->input_pkt_queue);//獲取input_pkt_queue的當前長度 16 if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {//如果當前長度小於最大長度,且符合流量限制的要求 17 if (qlen) { 18 enqueue: 19 __skb_queue_tail(&sd->input_pkt_queue, skb);//將SKB添加到input_pkt_queue隊列的后面 20 input_queue_tail_incr_save(sd, qtail);//隊列尾部指針加1 21 rps_unlock(sd); 22 local_irq_restore(flags);//恢復中斷狀態 23 return NET_RX_SUCCESS;//返回接收成功 24 } 25 26 /* Schedule NAPI for backlog device 27 * We can use non atomic operation since we own the queue lock 28 */ 29 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) { 30 if (!rps_ipi_queued(sd)) 31 ____napi_schedule(sd, &sd->backlog);//把虛擬設備backlog添加到sd->poll_list中以便進行輪詢,最后設置NET_RX_SOFTIRQ標志觸發軟中斷 32 } 33 goto enqueue; 34 } 35 36 drop: 37 sd->dropped++;//若接收隊列滿了就直接丟棄 38 rps_unlock(sd); 39 40 local_irq_restore(flags);//恢復本地中斷 41 42 atomic_long_inc(&skb->dev->rx_dropped); 43 kfree_skb(skb); 44 return NET_RX_DROP; 45 }
2.2 函數net_dev_init
初始化input_pkt_queue隊列,每個cpu都有的一個這樣的隊列。
1 static int __init net_dev_init(void) 2 { 3 ... 4 5 for_each_possible_cpu(i) { 6 struct work_struct *flush = per_cpu_ptr(&flush_works, i); 7 struct softnet_data *sd = &per_cpu(softnet_data, i); 8 9 INIT_WORK(flush, flush_backlog); 10 11 skb_queue_head_init(&sd->input_pkt_queue);//初始化每個cpu的input_pkt_queue隊列 12 skb_queue_head_init(&sd->process_queue); 13 INIT_LIST_HEAD(&sd->poll_list); 14 sd->output_queue_tailp = &sd->output_queue; 15 #ifdef CONFIG_RPS 16 sd->csd.func = rps_trigger_softirq; 17 sd->csd.info = sd; 18 sd->cpu = i; 19 #endif 20 21 sd->backlog.poll = process_backlog;//初始化非NAPI的poll callback函數 ----------------B 22 sd->backlog.weight = weight_p; 23 } 24 25 ... 26 open_softirq(NET_TX_SOFTIRQ, net_tx_action);//初始化TX中斷 27 open_softirq(NET_RX_SOFTIRQ, net_rx_action);//初始化RX中斷 ---------------------- A 28 29 hotcpu_notifier(dev_cpu_callback, 0); 30 dst_subsys_init(); 31 rc = 0; 32 out: 33 return rc; 34 }
2.3 函數調用__napi_schedule
作用:觸發軟中斷
1 /* Called with irq disabled */ 2 static inline void ____napi_schedule(struct softnet_data *sd, 3 struct napi_struct *napi) 4 { 5 list_add_tail(&napi->poll_list, &sd->poll_list); 6 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 7 }
此軟中斷的初始化在2.2節函數net_dev_init中A處。最后調用函數net_rx_action
3 函數napi_schedule
NAPI接收處理,定義位於: include\linux\netdevice.h 和net/core/dev.c
1 static inline void napi_schedule(struct napi_struct *n) 2 { 3 if (napi_schedule_prep(n)) 4 __napi_schedule(n); 5 } 6 7 void __napi_schedule(struct napi_struct *n) 8 { 9 unsigned long flags; 10 11 local_irq_save(flags);//保存中斷 12 ____napi_schedule(this_cpu_ptr(&softnet_data), n);//詳見2.3節,觸發RX軟中斷,最后同2.3節調用函數net_rx_action 13 local_irq_restore(flags);//恢復中斷 14 }
4 函數net_rx_action
此函數為RX中斷的下半部分處理,第2-3節是RX中斷的上半部處理。
1 static __latent_entropy void net_rx_action(struct softirq_action *h) 2 { 3 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 4 unsigned long time_limit = jiffies + 2; 5 int budget = netdev_budget;//指定一次軟中斷處理的skb的數目,這里是300 6 LIST_HEAD(list); 7 LIST_HEAD(repoll); 8 9 local_irq_disable(); 10 list_splice_init(&sd->poll_list, &list); 11 local_irq_enable(); 12 13 for (;;) { 14 struct napi_struct *n; 15 16 if (list_empty(&list)) {//檢查POLL隊列(poll_list)上是否有設備在准備等待輪詢 17 if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll)) 18 return; 19 break; 20 } 21 22 n = list_first_entry(&list, struct napi_struct, poll_list); 23 budget -= napi_poll(n, &repoll);//調用poll函數從網卡驅動中讀取一定數量的skb 24 25 /* If softirq window is exhausted then punt. 26 * Allow this to run for 2 jiffies since which will allow 27 * an average latency of 1.5/HZ. 28 */ 29 if (unlikely(budget <= 0 || //如果讀取的數量超過300,則終止中斷處理 30 time_after_eq(jiffies, time_limit))) { 31 sd->time_squeeze++; 32 break; 33 } 34 } 35 36 __kfree_skb_flush(); 37 local_irq_disable(); 38 39 list_splice_tail_init(&sd->poll_list, &list); 40 list_splice_tail(&repoll, &list); 41 list_splice(&list, &sd->poll_list); 42 if (!list_empty(&sd->poll_list))//如果poll list中不為空,表示還有skb沒有讀取完成,則繼續讀取,觸發下一次軟中斷 43 __raise_softirq_irqoff(NET_RX_SOFTIRQ); 44 45 net_rps_action_and_irq_enable(sd); 46 }
4.1 函數napi_poll
1 static int napi_poll(struct napi_struct *n, struct list_head *repoll) 2 { 3 void *have; 4 int work, weight; 5 6 list_del_init(&n->poll_list); 7 8 have = netpoll_poll_lock(n); 9 10 weight = n->weight; 11 12 /* This NAPI_STATE_SCHED test is for avoiding a race 13 * with netpoll's poll_napi(). Only the entity which 14 * obtains the lock and sees NAPI_STATE_SCHED set will 15 * actually make the ->poll() call. Therefore we avoid 16 * accidentally calling ->poll() when NAPI is not scheduled. 17 */ 18 work = 0; 19 if (test_bit(NAPI_STATE_SCHED, &n->state)) { 20 work = n->poll(n, weight);//在這里調用驅動的poll函數,如果驅動有支持NAPI,會定義並初始化這個poll函數,默認的poll函數是process_backlog 21 trace_napi_poll(n, work, weight); 22 } 23 24 WARN_ON_ONCE(work > weight); 25 26 if (likely(work < weight)) 27 goto out_unlock; 28 29 /* Drivers must not modify the NAPI state if they 30 * consume the entire weight. In such cases this code 31 * still "owns" the NAPI instance and therefore can 32 * move the instance around on the list at-will. 33 */ 34 if (unlikely(napi_disable_pending(n))) { 35 napi_complete(n); 36 goto out_unlock; 37 } 38 39 if (n->gro_list) { 40 /* flush too old packets 41 * If HZ < 1000, flush all packets. 42 */ 43 napi_gro_flush(n, HZ >= 1000); 44 } 45 46 /* Some drivers may have called napi_schedule 47 * prior to exhausting their budget. 48 */ 49 if (unlikely(!list_empty(&n->poll_list))) { 50 pr_warn_once("%s: Budget exhausted after napi rescheduled\n", 51 n->dev ? n->dev->name : "backlog"); 52 goto out_unlock; 53 } 54 55 list_add_tail(&n->poll_list, repoll); 56 57 out_unlock: 58 netpoll_poll_unlock(have); 59 60 return work; 61 }
4.2 非NAPI的poll函數
非NAPI的poll函數在上述2.2節B處賦值,即為函數process_backlog
1 static int process_backlog(struct napi_struct *napi, int quota) 2 { 3 struct softnet_data *sd = container_of(napi, struct softnet_data, backlog); 4 bool again = true; 5 int work = 0; 6 7 /* Check if we have pending ipi, its better to send them now, 8 * not waiting net_rx_action() end. 9 */ 10 if (sd_has_rps_ipi_waiting(sd)) { 11 local_irq_disable(); 12 net_rps_action_and_irq_enable(sd); 13 } 14 15 napi->weight = weight_p; 16 while (again) { 17 struct sk_buff *skb; 18 19 while ((skb = __skb_dequeue(&sd->process_queue)))//從隊列頭部讀取一個skb { 20 rcu_read_lock(); 21 __netif_receive_skb(skb);//調用此函數將skb傳給網路層 ------------------ A 22 rcu_read_unlock(); 23 input_queue_head_incr(sd);//將隊列頭部往后偏移一個單位 24 if (++work >= quota) 25 return work; 26 27 } 28 29 local_irq_disable(); 30 rps_lock(sd); 31 if (skb_queue_empty(&sd->input_pkt_queue)) {//如果隊列為空,表示skb讀取完了 32 33 napi->state = 0;//狀態置0並退出讀取循環 34 again = false; 35 } else { 36 skb_queue_splice_tail_init(&sd->input_pkt_queue, 37 &sd->process_queue); 38 } 39 rps_unlock(sd); 40 local_irq_enable(); 41 } 42 43 return work; 44 }
4.3 NAPI類的poll函數
對於NAPI來說,它的poll函數是在驅動加載初始化的時候指定的。以linux-4.9.73\drivers\net\wireless\ath\wil6210\pcie_bus.c
為例來分析NAPI類的poll函數賦值過程。
從wil6210 驅動的probe函數開始:
1 static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) 2 { 3 ... 4 rc = wil_if_add(wil); 5 if (rc) { 6 wil_err(wil, "wil_if_add failed: %d\n", rc); 7 goto bus_disable; 8 ... 9 } 10 11 int wil_if_add(struct wil6210_priv *wil) 12 { 13 struct wireless_dev *wdev = wil_to_wdev(wil); 14 struct wiphy *wiphy = wdev->wiphy; 15 struct net_device *ndev = wil_to_ndev(wil); 16 int rc; 17 18 wil_dbg_misc(wil, "entered"); 19 20 strlcpy(wiphy->fw_version, wil->fw_version, sizeof(wiphy->fw_version)); 21 22 rc = wiphy_register(wiphy);//注冊wiphy 23 if (rc < 0) { 24 wil_err(wil, "failed to register wiphy, err %d\n", rc); 25 return rc; 26 } 27 28 netif_napi_add(ndev, &wil->napi_rx, wil6210_netdev_poll_rx,//注冊poll函數wil6210_netdev_poll_rx 29 WIL6210_NAPI_BUDGET); 30 ... 31 } 32 void netif_napi_add(struct net_device *dev, struct napi_struct *napi, 33 int (*poll)(struct napi_struct *, int), int weight) 34 { 35 ... 36 napi->timer.function = napi_watchdog; 37 napi->gro_count = 0; 38 napi->gro_list = NULL; 39 napi->skb = NULL; 40 napi->poll = poll;//賦值poll callback函數 41 ... 42 }
4.3.1 wil6210的poll函數
1 static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget) 2 { 3 struct wil6210_priv *wil = container_of(napi, struct wil6210_priv, 4 napi_rx); 5 int quota = budget; 6 int done; 7 8 wil_rx_handle(wil, "a); 9 ... 10 } 11 void wil_rx_handle(struct wil6210_priv *wil, int *quota) 12 { 13 ... 14 15 wil_netif_rx_any(skb, ndev); 16 ... 17 } 18 void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) 19 { 20 ... 21 if (skb) { /* deliver to local stack */ 22 23 skb->protocol = eth_type_trans(skb, ndev); 24 rc = napi_gro_receive(&wil->napi_rx, skb);//將分散的skb進行組裝,形成一個skb 25 wil_dbg_txrx(wil, "Rx complete %d bytes => %s\n", 26 len, gro_res_str[rc]); 27 } 28 ... 29 }
4.3.2 函數napi_gro_receive
1 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) 2 { 3 skb_mark_napi_id(skb, napi); 4 trace_napi_gro_receive_entry(skb); 5 6 skb_gro_reset_offset(skb); 7 8 return napi_skb_finish(dev_gro_receive(napi, skb), skb); 9 } 10 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb) 11 { 12 switch (ret) { 13 case GRO_NORMAL: 14 if (netif_receive_skb_internal(skb)) 15 ret = GRO_DROP; 16 break; 17 ... 18 } 19 static int netif_receive_skb_internal(struct sk_buff *skb) 20 { 21 ... 22 ret = __netif_receive_skb(skb);//將skb傳送給網路層 ------------------ A 23 rcu_read_unlock(); 24 return ret; 25 }
4.4 NAPI和非NAPI
經上分析4.2節非NAPI的poll函數A處和NAPI類poll函數的4.3.2節的A處,最終都調用函數__netif_receive_skb,將skb傳送給網路層。下面來看此函數:
1 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc) 2 { 3 struct packet_type *ptype, *pt_prev;//用於操作包類型 4 rx_handler_func_t *rx_handler; 5 struct net_device *orig_dev;//存放報文的原始設備 6 bool deliver_exact = false; 7 int ret = NET_RX_DROP; 8 __be16 type; 9 10 net_timestamp_check(!netdev_tstamp_prequeue, skb);//check時間戳,並且會更新skb的時間戳,skb->tstamp 11 12 trace_netif_receive_skb(skb); 13 14 orig_dev = skb->dev;//將原始的dve做一個備份 15 16 skb_reset_network_header(skb);//重置network header,此時skb已經指向IP頭(沒有vlan的情況下) 17 if (!skb_transport_header_was_set(skb)) 18 skb_reset_transport_header(skb); 19 skb_reset_mac_len(skb);//重置mac len 20 21 pt_prev = NULL; 22 23 another_round: 24 skb->skb_iif = skb->dev->ifindex; 25 26 __this_cpu_inc(softnet_data.processed); 27 28 if (skb->protocol == cpu_to_be16(ETH_P_8021Q) || 29 skb->protocol == cpu_to_be16(ETH_P_8021AD)) { 30 skb = skb_vlan_untag(skb);//去除vlan tag 31 if (unlikely(!skb)) 32 goto out; 33 } 34 35 #ifdef CONFIG_NET_CLS_ACT 36 if (skb->tc_verd & TC_NCLS) { 37 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 38 goto ncls; 39 } 40 #endif 41 42 if (pfmemalloc) 43 goto skip_taps; 44 /*把包交給特定協議相關的處理函數前,先調用ptype_all中注冊的函數。最常見的為tcpdump,該工具就是從這里拿到所有收到的包的,例如raw socket和tcpdump實現*/ 45 list_for_each_entry_rcu(ptype, &ptype_all, list) { 46 if (pt_prev) 47 ret = deliver_skb(skb, pt_prev, orig_dev);//將包直接傳給應用層 48 pt_prev = ptype;//pt_prev的加入是為了優化,只有當找到下一個匹配的時候,才執行這一次的回調函數 49 } 50 /*設備上注冊ptype_all,做相應的處理,更加精細的控制,ptype_all里面包括IP和arp等 */ 51 list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) { 52 if (pt_prev) 53 ret = deliver_skb(skb, pt_prev, orig_dev); 54 pt_prev = ptype; 55 } 56 57 skip_taps: 58 #ifdef CONFIG_NET_INGRESS 59 if (static_key_false(&ingress_needed)) { 60 skb = sch_handle_ingress(skb, &pt_prev, &ret, orig_dev); 61 if (!skb) 62 goto out; 63 64 if (nf_ingress(skb, &pt_prev, &ret, orig_dev) < 0) 65 goto out; 66 } 67 #endif 68 #ifdef CONFIG_NET_CLS_ACT 69 skb->tc_verd = 0; 70 ncls: 71 #endif 72 if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) 73 goto drop; 74 75 if (skb_vlan_tag_present(skb)) {//如果需要將vlan的信息提供給上層,則執行下面的代碼 76 if (pt_prev) { 77 ret = deliver_skb(skb, pt_prev, orig_dev); 78 pt_prev = NULL; 79 } 80 if (vlan_do_receive(&skb)) 81 goto another_round; 82 else if (unlikely(!skb)) 83 goto out; 84 } 85 86 rx_handler = rcu_dereference(skb->dev->rx_handler);//設備rx_handler,加入OVS時會注冊為OVS的入口函數 87 if (rx_handler) { 88 if (pt_prev) { 89 ret = deliver_skb(skb, pt_prev, orig_dev); 90 pt_prev = NULL; 91 } 92 switch (rx_handler(&skb)) {//執行rx_handler處理,例如進入OVS,OVS不支持報頭中攜帶vlan的報文 93 case RX_HANDLER_CONSUMED: 94 ret = NET_RX_SUCCESS; 95 goto out; 96 case RX_HANDLER_ANOTHER: 97 goto another_round; 98 case RX_HANDLER_EXACT: 99 deliver_exact = true; 100 case RX_HANDLER_PASS: 101 break; 102 default: 103 BUG(); 104 } 105 } 106 107 if (unlikely(skb_vlan_tag_present(skb))) { 108 if (skb_vlan_tag_get_id(skb)) 109 skb->pkt_type = PACKET_OTHERHOST; 110 /* Note: we might in the future use prio bits 111 * and set skb->priority like in vlan_do_receive() 112 * For the time being, just ignore Priority Code Point 113 */ 114 skb->vlan_tci = 0; 115 } 116 117 type = skb->protocol; 118 119 /* deliver only exact match when indicated */ 120 if (likely(!deliver_exact)) { 121 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,//根據全局定義的協議處理報文 122 &ptype_base[ntohs(type) & 123 PTYPE_HASH_MASK]); 124 } 125 126 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,//根據設備上注冊的協議進行處理 127 &orig_dev->ptype_specific); 128 129 if (unlikely(skb->dev != orig_dev)) {//如果設備發生變化,那么還需要針對新設備的注冊協議進行處理 130 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type, 131 &skb->dev->ptype_specific); 132 } 133 134 if (pt_prev) { 135 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) 136 goto drop; 137 else 138 ret = pt_prev->func(skb, skb->dev, pt_prev, //調用協議處理orig_dev); 139 } else { 140 drop: 141 if (!deliver_exact) 142 atomic_long_inc(&skb->dev->rx_dropped); 143 else 144 atomic_long_inc(&skb->dev->rx_nohandler); 145 kfree_skb(skb); 146 /* Jamal, now you will not able to escape explaining 147 * me how you were going to use this. :-) 148 */ 149 ret = NET_RX_DROP; 150 } 151 152 out: 153 return ret; 154 }
到此即為數據包從驅動接收到接口層的過程,接下來會通過接口層傳送給網路層。
4.4.1 補充下函數__netif_receive_skb
補充說明下這兩個鏈表ptype_base和ptype_all,在內核中存儲情況如下圖:
從圖中可以看到,ptype_all是一個鏈表,這個鏈表里面最大的區別是func=packet_rcv,即這個鏈表一般是提供給一些抓包程序使用的,比如tcp_dump。它可以不區分包的類型而將所有的包的抓取過來,它的統一處理函數都是packet_rcv,在這里面可以對一些過濾選項進行處理。對象中的type一般使用的是以太網類型,而dev表示在哪個接口上抓包。
但是ptype_base則是一個哈希表,注意這個表是以type來進行分類的,比如ip協議可以指定不同的dev接口,但是他們都在同一張表上。不同的協議類型對應了不同的接收函數,比如IP報文的接收函數是ip_rcv, 802.2對應的是llc_rcv等。總的來說,報文從網卡驅動里面上來以后,第一次在這里進行分流,不同的報文類型交給不同的協議處理函數進行處理。
5 總結整個flow:
參考博文:https://blog.csdn.net/lee244868149/article/details/77625367