Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2#include <net/gro.h>
3#include <net/dst_metadata.h>
4#include <net/busy_poll.h>
5#include <trace/events/net.h>
6
7#define MAX_GRO_SKBS 8
8
9/* This should be increased if a protocol with a bigger head is added. */
10#define GRO_MAX_HEAD (MAX_HEADER + 128)
11
12static DEFINE_SPINLOCK(offload_lock);
13
14/**
15 * dev_add_offload - register offload handlers
16 * @po: protocol offload declaration
17 *
18 * Add protocol offload handlers to the networking stack. The passed
19 * &proto_offload is linked into kernel lists and may not be freed until
20 * it has been removed from the kernel lists.
21 *
22 * This call does not sleep therefore it can not
23 * guarantee all CPU's that are in middle of receiving packets
24 * will see the new offload handlers (until the next received packet).
25 */
26void dev_add_offload(struct packet_offload *po)
27{
28 struct packet_offload *elem;
29
30 spin_lock(&offload_lock);
31 list_for_each_entry(elem, &net_hotdata.offload_base, list) {
32 if (po->priority < elem->priority)
33 break;
34 }
35 list_add_rcu(&po->list, elem->list.prev);
36 spin_unlock(&offload_lock);
37}
38EXPORT_SYMBOL(dev_add_offload);
39
40/**
41 * __dev_remove_offload - remove offload handler
42 * @po: packet offload declaration
43 *
44 * Remove a protocol offload handler that was previously added to the
45 * kernel offload handlers by dev_add_offload(). The passed &offload_type
46 * is removed from the kernel lists and can be freed or reused once this
47 * function returns.
48 *
49 * The packet type might still be in use by receivers
50 * and must not be freed until after all the CPU's have gone
51 * through a quiescent state.
52 */
53static void __dev_remove_offload(struct packet_offload *po)
54{
55 struct list_head *head = &net_hotdata.offload_base;
56 struct packet_offload *po1;
57
58 spin_lock(&offload_lock);
59
60 list_for_each_entry(po1, head, list) {
61 if (po == po1) {
62 list_del_rcu(&po->list);
63 goto out;
64 }
65 }
66
67 pr_warn("dev_remove_offload: %p not found\n", po);
68out:
69 spin_unlock(&offload_lock);
70}
71
72/**
73 * dev_remove_offload - remove packet offload handler
74 * @po: packet offload declaration
75 *
76 * Remove a packet offload handler that was previously added to the kernel
77 * offload handlers by dev_add_offload(). The passed &offload_type is
78 * removed from the kernel lists and can be freed or reused once this
79 * function returns.
80 *
81 * This call sleeps to guarantee that no CPU is looking at the packet
82 * type after return.
83 */
84void dev_remove_offload(struct packet_offload *po)
85{
86 __dev_remove_offload(po);
87
88 synchronize_net();
89}
90EXPORT_SYMBOL(dev_remove_offload);
91
92
93int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
94{
95 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
96 unsigned int offset = skb_gro_offset(skb);
97 unsigned int headlen = skb_headlen(skb);
98 unsigned int len = skb_gro_len(skb);
99 unsigned int delta_truesize;
100 unsigned int gro_max_size;
101 unsigned int new_truesize;
102 struct sk_buff *lp;
103 int segs;
104
105 /* Do not splice page pool based packets w/ non-page pool
106 * packets. This can result in reference count issues as page
107 * pool pages will not decrement the reference count and will
108 * instead be immediately returned to the pool or have frag
109 * count decremented.
110 */
111 if (p->pp_recycle != skb->pp_recycle)
112 return -ETOOMANYREFS;
113
114 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
115 gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
116 READ_ONCE(p->dev->gro_max_size) :
117 READ_ONCE(p->dev->gro_ipv4_max_size);
118
119 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
120 return -E2BIG;
121
122 if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
123 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
124 (p->protocol == htons(ETH_P_IPV6) &&
125 skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
126 p->encapsulation)
127 return -E2BIG;
128 }
129
130 segs = NAPI_GRO_CB(skb)->count;
131 lp = NAPI_GRO_CB(p)->last;
132 pinfo = skb_shinfo(lp);
133
134 if (headlen <= offset) {
135 skb_frag_t *frag;
136 skb_frag_t *frag2;
137 int i = skbinfo->nr_frags;
138 int nr_frags = pinfo->nr_frags + i;
139
140 if (nr_frags > MAX_SKB_FRAGS)
141 goto merge;
142
143 offset -= headlen;
144 pinfo->nr_frags = nr_frags;
145 skbinfo->nr_frags = 0;
146
147 frag = pinfo->frags + nr_frags;
148 frag2 = skbinfo->frags + i;
149 do {
150 *--frag = *--frag2;
151 } while (--i);
152
153 skb_frag_off_add(frag, offset);
154 skb_frag_size_sub(frag, offset);
155
156 /* all fragments truesize : remove (head size + sk_buff) */
157 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
158 delta_truesize = skb->truesize - new_truesize;
159
160 skb->truesize = new_truesize;
161 skb->len -= skb->data_len;
162 skb->data_len = 0;
163
164 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
165 goto done;
166 } else if (skb->head_frag) {
167 int nr_frags = pinfo->nr_frags;
168 skb_frag_t *frag = pinfo->frags + nr_frags;
169 struct page *page = virt_to_head_page(skb->head);
170 unsigned int first_size = headlen - offset;
171 unsigned int first_offset;
172
173 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
174 goto merge;
175
176 first_offset = skb->data -
177 (unsigned char *)page_address(page) +
178 offset;
179
180 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
181
182 skb_frag_fill_page_desc(frag, page, first_offset, first_size);
183
184 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
185 /* We dont need to clear skbinfo->nr_frags here */
186
187 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
188 delta_truesize = skb->truesize - new_truesize;
189 skb->truesize = new_truesize;
190 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
191 goto done;
192 }
193
194merge:
195 /* sk ownership - if any - completely transferred to the aggregated packet */
196 skb->destructor = NULL;
197 skb->sk = NULL;
198 delta_truesize = skb->truesize;
199 if (offset > headlen) {
200 unsigned int eat = offset - headlen;
201
202 skb_frag_off_add(&skbinfo->frags[0], eat);
203 skb_frag_size_sub(&skbinfo->frags[0], eat);
204 skb->data_len -= eat;
205 skb->len -= eat;
206 offset = headlen;
207 }
208
209 __skb_pull(skb, offset);
210
211 if (NAPI_GRO_CB(p)->last == p)
212 skb_shinfo(p)->frag_list = skb;
213 else
214 NAPI_GRO_CB(p)->last->next = skb;
215 NAPI_GRO_CB(p)->last = skb;
216 __skb_header_release(skb);
217 lp = p;
218
219done:
220 NAPI_GRO_CB(p)->count += segs;
221 p->data_len += len;
222 p->truesize += delta_truesize;
223 p->len += len;
224 if (lp != p) {
225 lp->data_len += len;
226 lp->truesize += delta_truesize;
227 lp->len += len;
228 }
229 NAPI_GRO_CB(skb)->same_flow = 1;
230 return 0;
231}
232
233
234static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
235{
236 struct list_head *head = &net_hotdata.offload_base;
237 struct packet_offload *ptype;
238 __be16 type = skb->protocol;
239 int err = -ENOENT;
240
241 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
242
243 if (NAPI_GRO_CB(skb)->count == 1) {
244 skb_shinfo(skb)->gso_size = 0;
245 goto out;
246 }
247
248 rcu_read_lock();
249 list_for_each_entry_rcu(ptype, head, list) {
250 if (ptype->type != type || !ptype->callbacks.gro_complete)
251 continue;
252
253 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
254 ipv6_gro_complete, inet_gro_complete,
255 skb, 0);
256 break;
257 }
258 rcu_read_unlock();
259
260 if (err) {
261 WARN_ON(&ptype->list == head);
262 kfree_skb(skb);
263 return;
264 }
265
266out:
267 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
268}
269
270static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
271 bool flush_old)
272{
273 struct list_head *head = &napi->gro_hash[index].list;
274 struct sk_buff *skb, *p;
275
276 list_for_each_entry_safe_reverse(skb, p, head, list) {
277 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
278 return;
279 skb_list_del_init(skb);
280 napi_gro_complete(napi, skb);
281 napi->gro_hash[index].count--;
282 }
283
284 if (!napi->gro_hash[index].count)
285 __clear_bit(index, &napi->gro_bitmask);
286}
287
288/* napi->gro_hash[].list contains packets ordered by age.
289 * youngest packets at the head of it.
290 * Complete skbs in reverse order to reduce latencies.
291 */
292void napi_gro_flush(struct napi_struct *napi, bool flush_old)
293{
294 unsigned long bitmask = napi->gro_bitmask;
295 unsigned int i, base = ~0U;
296
297 while ((i = ffs(bitmask)) != 0) {
298 bitmask >>= i;
299 base += i;
300 __napi_gro_flush_chain(napi, base, flush_old);
301 }
302}
303EXPORT_SYMBOL(napi_gro_flush);
304
305static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
306 const struct sk_buff *p,
307 unsigned long diffs)
308{
309#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
310 struct tc_skb_ext *skb_ext;
311 struct tc_skb_ext *p_ext;
312
313 skb_ext = skb_ext_find(skb, TC_SKB_EXT);
314 p_ext = skb_ext_find(p, TC_SKB_EXT);
315
316 diffs |= (!!p_ext) ^ (!!skb_ext);
317 if (!diffs && unlikely(skb_ext))
318 diffs |= p_ext->chain ^ skb_ext->chain;
319#endif
320 return diffs;
321}
322
323static void gro_list_prepare(const struct list_head *head,
324 const struct sk_buff *skb)
325{
326 unsigned int maclen = skb->dev->hard_header_len;
327 u32 hash = skb_get_hash_raw(skb);
328 struct sk_buff *p;
329
330 list_for_each_entry(p, head, list) {
331 unsigned long diffs;
332
333 NAPI_GRO_CB(p)->flush = 0;
334
335 if (hash != skb_get_hash_raw(p)) {
336 NAPI_GRO_CB(p)->same_flow = 0;
337 continue;
338 }
339
340 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
341 diffs |= p->vlan_all ^ skb->vlan_all;
342 diffs |= skb_metadata_differs(p, skb);
343 if (maclen == ETH_HLEN)
344 diffs |= compare_ether_header(skb_mac_header(p),
345 skb_mac_header(skb));
346 else if (!diffs)
347 diffs = memcmp(skb_mac_header(p),
348 skb_mac_header(skb),
349 maclen);
350
351 /* in most common scenarions 'slow_gro' is 0
352 * otherwise we are already on some slower paths
353 * either skip all the infrequent tests altogether or
354 * avoid trying too hard to skip each of them individually
355 */
356 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
357 diffs |= p->sk != skb->sk;
358 diffs |= skb_metadata_dst_cmp(p, skb);
359 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
360
361 diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
362 }
363
364 NAPI_GRO_CB(p)->same_flow = !diffs;
365 }
366}
367
368static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
369{
370 const struct skb_shared_info *pinfo;
371 const skb_frag_t *frag0;
372 unsigned int headlen;
373
374 NAPI_GRO_CB(skb)->network_offset = 0;
375 NAPI_GRO_CB(skb)->data_offset = 0;
376 headlen = skb_headlen(skb);
377 NAPI_GRO_CB(skb)->frag0 = skb->data;
378 NAPI_GRO_CB(skb)->frag0_len = headlen;
379 if (headlen)
380 return;
381
382 pinfo = skb_shinfo(skb);
383 frag0 = &pinfo->frags[0];
384
385 if (pinfo->nr_frags && !PageHighMem(skb_frag_page(frag0)) &&
386 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
387 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
388 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
389 skb_frag_size(frag0),
390 skb->end - skb->tail);
391 }
392}
393
394static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
395{
396 struct skb_shared_info *pinfo = skb_shinfo(skb);
397
398 BUG_ON(skb->end - skb->tail < grow);
399
400 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
401
402 skb->data_len -= grow;
403 skb->tail += grow;
404
405 skb_frag_off_add(&pinfo->frags[0], grow);
406 skb_frag_size_sub(&pinfo->frags[0], grow);
407
408 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
409 skb_frag_unref(skb, 0);
410 memmove(pinfo->frags, pinfo->frags + 1,
411 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
412 }
413}
414
415static void gro_try_pull_from_frag0(struct sk_buff *skb)
416{
417 int grow = skb_gro_offset(skb) - skb_headlen(skb);
418
419 if (grow > 0)
420 gro_pull_from_frag0(skb, grow);
421}
422
423static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
424{
425 struct sk_buff *oldest;
426
427 oldest = list_last_entry(head, struct sk_buff, list);
428
429 /* We are called with head length >= MAX_GRO_SKBS, so this is
430 * impossible.
431 */
432 if (WARN_ON_ONCE(!oldest))
433 return;
434
435 /* Do not adjust napi->gro_hash[].count, caller is adding a new
436 * SKB to the chain.
437 */
438 skb_list_del_init(oldest);
439 napi_gro_complete(napi, oldest);
440}
441
442static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
443{
444 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
445 struct gro_list *gro_list = &napi->gro_hash[bucket];
446 struct list_head *head = &net_hotdata.offload_base;
447 struct packet_offload *ptype;
448 __be16 type = skb->protocol;
449 struct sk_buff *pp = NULL;
450 enum gro_result ret;
451 int same_flow;
452
453 if (netif_elide_gro(skb->dev))
454 goto normal;
455
456 gro_list_prepare(&gro_list->list, skb);
457
458 rcu_read_lock();
459 list_for_each_entry_rcu(ptype, head, list) {
460 if (ptype->type == type && ptype->callbacks.gro_receive)
461 goto found_ptype;
462 }
463 rcu_read_unlock();
464 goto normal;
465
466found_ptype:
467 skb_set_network_header(skb, skb_gro_offset(skb));
468 skb_reset_mac_len(skb);
469 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
470 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
471 sizeof(u32))); /* Avoid slow unaligned acc */
472 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
473 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
474 NAPI_GRO_CB(skb)->is_atomic = 1;
475 NAPI_GRO_CB(skb)->count = 1;
476 if (unlikely(skb_is_gso(skb))) {
477 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
478 /* Only support TCP and non DODGY users. */
479 if (!skb_is_gso_tcp(skb) ||
480 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
481 NAPI_GRO_CB(skb)->flush = 1;
482 }
483
484 /* Setup for GRO checksum validation */
485 switch (skb->ip_summed) {
486 case CHECKSUM_COMPLETE:
487 NAPI_GRO_CB(skb)->csum = skb->csum;
488 NAPI_GRO_CB(skb)->csum_valid = 1;
489 break;
490 case CHECKSUM_UNNECESSARY:
491 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
492 break;
493 }
494
495 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
496 ipv6_gro_receive, inet_gro_receive,
497 &gro_list->list, skb);
498
499 rcu_read_unlock();
500
501 if (PTR_ERR(pp) == -EINPROGRESS) {
502 ret = GRO_CONSUMED;
503 goto ok;
504 }
505
506 same_flow = NAPI_GRO_CB(skb)->same_flow;
507 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
508
509 if (pp) {
510 skb_list_del_init(pp);
511 napi_gro_complete(napi, pp);
512 gro_list->count--;
513 }
514
515 if (same_flow)
516 goto ok;
517
518 if (NAPI_GRO_CB(skb)->flush)
519 goto normal;
520
521 if (unlikely(gro_list->count >= MAX_GRO_SKBS))
522 gro_flush_oldest(napi, &gro_list->list);
523 else
524 gro_list->count++;
525
526 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
527 gro_try_pull_from_frag0(skb);
528 NAPI_GRO_CB(skb)->age = jiffies;
529 NAPI_GRO_CB(skb)->last = skb;
530 if (!skb_is_gso(skb))
531 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
532 list_add(&skb->list, &gro_list->list);
533 ret = GRO_HELD;
534ok:
535 if (gro_list->count) {
536 if (!test_bit(bucket, &napi->gro_bitmask))
537 __set_bit(bucket, &napi->gro_bitmask);
538 } else if (test_bit(bucket, &napi->gro_bitmask)) {
539 __clear_bit(bucket, &napi->gro_bitmask);
540 }
541
542 return ret;
543
544normal:
545 ret = GRO_NORMAL;
546 gro_try_pull_from_frag0(skb);
547 goto ok;
548}
549
550struct packet_offload *gro_find_receive_by_type(__be16 type)
551{
552 struct list_head *offload_head = &net_hotdata.offload_base;
553 struct packet_offload *ptype;
554
555 list_for_each_entry_rcu(ptype, offload_head, list) {
556 if (ptype->type != type || !ptype->callbacks.gro_receive)
557 continue;
558 return ptype;
559 }
560 return NULL;
561}
562EXPORT_SYMBOL(gro_find_receive_by_type);
563
564struct packet_offload *gro_find_complete_by_type(__be16 type)
565{
566 struct list_head *offload_head = &net_hotdata.offload_base;
567 struct packet_offload *ptype;
568
569 list_for_each_entry_rcu(ptype, offload_head, list) {
570 if (ptype->type != type || !ptype->callbacks.gro_complete)
571 continue;
572 return ptype;
573 }
574 return NULL;
575}
576EXPORT_SYMBOL(gro_find_complete_by_type);
577
578static gro_result_t napi_skb_finish(struct napi_struct *napi,
579 struct sk_buff *skb,
580 gro_result_t ret)
581{
582 switch (ret) {
583 case GRO_NORMAL:
584 gro_normal_one(napi, skb, 1);
585 break;
586
587 case GRO_MERGED_FREE:
588 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
589 napi_skb_free_stolen_head(skb);
590 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
591 __kfree_skb(skb);
592 else
593 __napi_kfree_skb(skb, SKB_CONSUMED);
594 break;
595
596 case GRO_HELD:
597 case GRO_MERGED:
598 case GRO_CONSUMED:
599 break;
600 }
601
602 return ret;
603}
604
605gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
606{
607 gro_result_t ret;
608
609 skb_mark_napi_id(skb, napi);
610 trace_napi_gro_receive_entry(skb);
611
612 skb_gro_reset_offset(skb, 0);
613
614 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
615 trace_napi_gro_receive_exit(ret);
616
617 return ret;
618}
619EXPORT_SYMBOL(napi_gro_receive);
620
621static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
622{
623 if (unlikely(skb->pfmemalloc)) {
624 consume_skb(skb);
625 return;
626 }
627 __skb_pull(skb, skb_headlen(skb));
628 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
629 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
630 __vlan_hwaccel_clear_tag(skb);
631 skb->dev = napi->dev;
632 skb->skb_iif = 0;
633
634 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
635 skb->pkt_type = PACKET_HOST;
636
637 skb->encapsulation = 0;
638 skb_shinfo(skb)->gso_type = 0;
639 skb_shinfo(skb)->gso_size = 0;
640 if (unlikely(skb->slow_gro)) {
641 skb_orphan(skb);
642 skb_ext_reset(skb);
643 nf_reset_ct(skb);
644 skb->slow_gro = 0;
645 }
646
647 napi->skb = skb;
648}
649
650struct sk_buff *napi_get_frags(struct napi_struct *napi)
651{
652 struct sk_buff *skb = napi->skb;
653
654 if (!skb) {
655 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
656 if (skb) {
657 napi->skb = skb;
658 skb_mark_napi_id(skb, napi);
659 }
660 }
661 return skb;
662}
663EXPORT_SYMBOL(napi_get_frags);
664
665static gro_result_t napi_frags_finish(struct napi_struct *napi,
666 struct sk_buff *skb,
667 gro_result_t ret)
668{
669 switch (ret) {
670 case GRO_NORMAL:
671 case GRO_HELD:
672 __skb_push(skb, ETH_HLEN);
673 skb->protocol = eth_type_trans(skb, skb->dev);
674 if (ret == GRO_NORMAL)
675 gro_normal_one(napi, skb, 1);
676 break;
677
678 case GRO_MERGED_FREE:
679 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
680 napi_skb_free_stolen_head(skb);
681 else
682 napi_reuse_skb(napi, skb);
683 break;
684
685 case GRO_MERGED:
686 case GRO_CONSUMED:
687 break;
688 }
689
690 return ret;
691}
692
693/* Upper GRO stack assumes network header starts at gro_offset=0
694 * Drivers could call both napi_gro_frags() and napi_gro_receive()
695 * We copy ethernet header into skb->data to have a common layout.
696 */
697static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
698{
699 struct sk_buff *skb = napi->skb;
700 const struct ethhdr *eth;
701 unsigned int hlen = sizeof(*eth);
702
703 napi->skb = NULL;
704
705 skb_reset_mac_header(skb);
706 skb_gro_reset_offset(skb, hlen);
707
708 if (unlikely(!skb_gro_may_pull(skb, hlen))) {
709 eth = skb_gro_header_slow(skb, hlen, 0);
710 if (unlikely(!eth)) {
711 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
712 __func__, napi->dev->name);
713 napi_reuse_skb(napi, skb);
714 return NULL;
715 }
716 } else {
717 eth = (const struct ethhdr *)skb->data;
718
719 if (NAPI_GRO_CB(skb)->frag0 != skb->data)
720 gro_pull_from_frag0(skb, hlen);
721
722 NAPI_GRO_CB(skb)->frag0 += hlen;
723 NAPI_GRO_CB(skb)->frag0_len -= hlen;
724 }
725 __skb_pull(skb, hlen);
726
727 /*
728 * This works because the only protocols we care about don't require
729 * special handling.
730 * We'll fix it up properly in napi_frags_finish()
731 */
732 skb->protocol = eth->h_proto;
733
734 return skb;
735}
736
737gro_result_t napi_gro_frags(struct napi_struct *napi)
738{
739 gro_result_t ret;
740 struct sk_buff *skb = napi_frags_skb(napi);
741
742 trace_napi_gro_frags_entry(skb);
743
744 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
745 trace_napi_gro_frags_exit(ret);
746
747 return ret;
748}
749EXPORT_SYMBOL(napi_gro_frags);
750
751/* Compute the checksum from gro_offset and return the folded value
752 * after adding in any pseudo checksum.
753 */
754__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
755{
756 __wsum wsum;
757 __sum16 sum;
758
759 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
760
761 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
762 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
763 /* See comments in __skb_checksum_complete(). */
764 if (likely(!sum)) {
765 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
766 !skb->csum_complete_sw)
767 netdev_rx_csum_fault(skb->dev, skb);
768 }
769
770 NAPI_GRO_CB(skb)->csum = wsum;
771 NAPI_GRO_CB(skb)->csum_valid = 1;
772
773 return sum;
774}
775EXPORT_SYMBOL(__skb_gro_checksum_complete);
1// SPDX-License-Identifier: GPL-2.0-or-later
2#include <net/gro.h>
3#include <net/dst_metadata.h>
4#include <net/busy_poll.h>
5#include <trace/events/net.h>
6
7#define MAX_GRO_SKBS 8
8
9/* This should be increased if a protocol with a bigger head is added. */
10#define GRO_MAX_HEAD (MAX_HEADER + 128)
11
12static DEFINE_SPINLOCK(offload_lock);
13struct list_head offload_base __read_mostly = LIST_HEAD_INIT(offload_base);
14/* Maximum number of GRO_NORMAL skbs to batch up for list-RX */
15int gro_normal_batch __read_mostly = 8;
16
17/**
18 * dev_add_offload - register offload handlers
19 * @po: protocol offload declaration
20 *
21 * Add protocol offload handlers to the networking stack. The passed
22 * &proto_offload is linked into kernel lists and may not be freed until
23 * it has been removed from the kernel lists.
24 *
25 * This call does not sleep therefore it can not
26 * guarantee all CPU's that are in middle of receiving packets
27 * will see the new offload handlers (until the next received packet).
28 */
29void dev_add_offload(struct packet_offload *po)
30{
31 struct packet_offload *elem;
32
33 spin_lock(&offload_lock);
34 list_for_each_entry(elem, &offload_base, list) {
35 if (po->priority < elem->priority)
36 break;
37 }
38 list_add_rcu(&po->list, elem->list.prev);
39 spin_unlock(&offload_lock);
40}
41EXPORT_SYMBOL(dev_add_offload);
42
43/**
44 * __dev_remove_offload - remove offload handler
45 * @po: packet offload declaration
46 *
47 * Remove a protocol offload handler that was previously added to the
48 * kernel offload handlers by dev_add_offload(). The passed &offload_type
49 * is removed from the kernel lists and can be freed or reused once this
50 * function returns.
51 *
52 * The packet type might still be in use by receivers
53 * and must not be freed until after all the CPU's have gone
54 * through a quiescent state.
55 */
56static void __dev_remove_offload(struct packet_offload *po)
57{
58 struct list_head *head = &offload_base;
59 struct packet_offload *po1;
60
61 spin_lock(&offload_lock);
62
63 list_for_each_entry(po1, head, list) {
64 if (po == po1) {
65 list_del_rcu(&po->list);
66 goto out;
67 }
68 }
69
70 pr_warn("dev_remove_offload: %p not found\n", po);
71out:
72 spin_unlock(&offload_lock);
73}
74
75/**
76 * dev_remove_offload - remove packet offload handler
77 * @po: packet offload declaration
78 *
79 * Remove a packet offload handler that was previously added to the kernel
80 * offload handlers by dev_add_offload(). The passed &offload_type is
81 * removed from the kernel lists and can be freed or reused once this
82 * function returns.
83 *
84 * This call sleeps to guarantee that no CPU is looking at the packet
85 * type after return.
86 */
87void dev_remove_offload(struct packet_offload *po)
88{
89 __dev_remove_offload(po);
90
91 synchronize_net();
92}
93EXPORT_SYMBOL(dev_remove_offload);
94
95
96int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
97{
98 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
99 unsigned int offset = skb_gro_offset(skb);
100 unsigned int headlen = skb_headlen(skb);
101 unsigned int len = skb_gro_len(skb);
102 unsigned int delta_truesize;
103 unsigned int gro_max_size;
104 unsigned int new_truesize;
105 struct sk_buff *lp;
106 int segs;
107
108 /* Do not splice page pool based packets w/ non-page pool
109 * packets. This can result in reference count issues as page
110 * pool pages will not decrement the reference count and will
111 * instead be immediately returned to the pool or have frag
112 * count decremented.
113 */
114 if (p->pp_recycle != skb->pp_recycle)
115 return -ETOOMANYREFS;
116
117 /* pairs with WRITE_ONCE() in netif_set_gro(_ipv4)_max_size() */
118 gro_max_size = p->protocol == htons(ETH_P_IPV6) ?
119 READ_ONCE(p->dev->gro_max_size) :
120 READ_ONCE(p->dev->gro_ipv4_max_size);
121
122 if (unlikely(p->len + len >= gro_max_size || NAPI_GRO_CB(skb)->flush))
123 return -E2BIG;
124
125 if (unlikely(p->len + len >= GRO_LEGACY_MAX_SIZE)) {
126 if (NAPI_GRO_CB(skb)->proto != IPPROTO_TCP ||
127 (p->protocol == htons(ETH_P_IPV6) &&
128 skb_headroom(p) < sizeof(struct hop_jumbo_hdr)) ||
129 p->encapsulation)
130 return -E2BIG;
131 }
132
133 segs = NAPI_GRO_CB(skb)->count;
134 lp = NAPI_GRO_CB(p)->last;
135 pinfo = skb_shinfo(lp);
136
137 if (headlen <= offset) {
138 skb_frag_t *frag;
139 skb_frag_t *frag2;
140 int i = skbinfo->nr_frags;
141 int nr_frags = pinfo->nr_frags + i;
142
143 if (nr_frags > MAX_SKB_FRAGS)
144 goto merge;
145
146 offset -= headlen;
147 pinfo->nr_frags = nr_frags;
148 skbinfo->nr_frags = 0;
149
150 frag = pinfo->frags + nr_frags;
151 frag2 = skbinfo->frags + i;
152 do {
153 *--frag = *--frag2;
154 } while (--i);
155
156 skb_frag_off_add(frag, offset);
157 skb_frag_size_sub(frag, offset);
158
159 /* all fragments truesize : remove (head size + sk_buff) */
160 new_truesize = SKB_TRUESIZE(skb_end_offset(skb));
161 delta_truesize = skb->truesize - new_truesize;
162
163 skb->truesize = new_truesize;
164 skb->len -= skb->data_len;
165 skb->data_len = 0;
166
167 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
168 goto done;
169 } else if (skb->head_frag) {
170 int nr_frags = pinfo->nr_frags;
171 skb_frag_t *frag = pinfo->frags + nr_frags;
172 struct page *page = virt_to_head_page(skb->head);
173 unsigned int first_size = headlen - offset;
174 unsigned int first_offset;
175
176 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
177 goto merge;
178
179 first_offset = skb->data -
180 (unsigned char *)page_address(page) +
181 offset;
182
183 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
184
185 skb_frag_fill_page_desc(frag, page, first_offset, first_size);
186
187 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
188 /* We dont need to clear skbinfo->nr_frags here */
189
190 new_truesize = SKB_DATA_ALIGN(sizeof(struct sk_buff));
191 delta_truesize = skb->truesize - new_truesize;
192 skb->truesize = new_truesize;
193 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
194 goto done;
195 }
196
197merge:
198 /* sk owenrship - if any - completely transferred to the aggregated packet */
199 skb->destructor = NULL;
200 delta_truesize = skb->truesize;
201 if (offset > headlen) {
202 unsigned int eat = offset - headlen;
203
204 skb_frag_off_add(&skbinfo->frags[0], eat);
205 skb_frag_size_sub(&skbinfo->frags[0], eat);
206 skb->data_len -= eat;
207 skb->len -= eat;
208 offset = headlen;
209 }
210
211 __skb_pull(skb, offset);
212
213 if (NAPI_GRO_CB(p)->last == p)
214 skb_shinfo(p)->frag_list = skb;
215 else
216 NAPI_GRO_CB(p)->last->next = skb;
217 NAPI_GRO_CB(p)->last = skb;
218 __skb_header_release(skb);
219 lp = p;
220
221done:
222 NAPI_GRO_CB(p)->count += segs;
223 p->data_len += len;
224 p->truesize += delta_truesize;
225 p->len += len;
226 if (lp != p) {
227 lp->data_len += len;
228 lp->truesize += delta_truesize;
229 lp->len += len;
230 }
231 NAPI_GRO_CB(skb)->same_flow = 1;
232 return 0;
233}
234
235
236static void napi_gro_complete(struct napi_struct *napi, struct sk_buff *skb)
237{
238 struct packet_offload *ptype;
239 __be16 type = skb->protocol;
240 struct list_head *head = &offload_base;
241 int err = -ENOENT;
242
243 BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
244
245 if (NAPI_GRO_CB(skb)->count == 1) {
246 skb_shinfo(skb)->gso_size = 0;
247 goto out;
248 }
249
250 rcu_read_lock();
251 list_for_each_entry_rcu(ptype, head, list) {
252 if (ptype->type != type || !ptype->callbacks.gro_complete)
253 continue;
254
255 err = INDIRECT_CALL_INET(ptype->callbacks.gro_complete,
256 ipv6_gro_complete, inet_gro_complete,
257 skb, 0);
258 break;
259 }
260 rcu_read_unlock();
261
262 if (err) {
263 WARN_ON(&ptype->list == head);
264 kfree_skb(skb);
265 return;
266 }
267
268out:
269 gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
270}
271
272static void __napi_gro_flush_chain(struct napi_struct *napi, u32 index,
273 bool flush_old)
274{
275 struct list_head *head = &napi->gro_hash[index].list;
276 struct sk_buff *skb, *p;
277
278 list_for_each_entry_safe_reverse(skb, p, head, list) {
279 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
280 return;
281 skb_list_del_init(skb);
282 napi_gro_complete(napi, skb);
283 napi->gro_hash[index].count--;
284 }
285
286 if (!napi->gro_hash[index].count)
287 __clear_bit(index, &napi->gro_bitmask);
288}
289
290/* napi->gro_hash[].list contains packets ordered by age.
291 * youngest packets at the head of it.
292 * Complete skbs in reverse order to reduce latencies.
293 */
294void napi_gro_flush(struct napi_struct *napi, bool flush_old)
295{
296 unsigned long bitmask = napi->gro_bitmask;
297 unsigned int i, base = ~0U;
298
299 while ((i = ffs(bitmask)) != 0) {
300 bitmask >>= i;
301 base += i;
302 __napi_gro_flush_chain(napi, base, flush_old);
303 }
304}
305EXPORT_SYMBOL(napi_gro_flush);
306
307static unsigned long gro_list_prepare_tc_ext(const struct sk_buff *skb,
308 const struct sk_buff *p,
309 unsigned long diffs)
310{
311#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
312 struct tc_skb_ext *skb_ext;
313 struct tc_skb_ext *p_ext;
314
315 skb_ext = skb_ext_find(skb, TC_SKB_EXT);
316 p_ext = skb_ext_find(p, TC_SKB_EXT);
317
318 diffs |= (!!p_ext) ^ (!!skb_ext);
319 if (!diffs && unlikely(skb_ext))
320 diffs |= p_ext->chain ^ skb_ext->chain;
321#endif
322 return diffs;
323}
324
325static void gro_list_prepare(const struct list_head *head,
326 const struct sk_buff *skb)
327{
328 unsigned int maclen = skb->dev->hard_header_len;
329 u32 hash = skb_get_hash_raw(skb);
330 struct sk_buff *p;
331
332 list_for_each_entry(p, head, list) {
333 unsigned long diffs;
334
335 NAPI_GRO_CB(p)->flush = 0;
336
337 if (hash != skb_get_hash_raw(p)) {
338 NAPI_GRO_CB(p)->same_flow = 0;
339 continue;
340 }
341
342 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
343 diffs |= p->vlan_all ^ skb->vlan_all;
344 diffs |= skb_metadata_differs(p, skb);
345 if (maclen == ETH_HLEN)
346 diffs |= compare_ether_header(skb_mac_header(p),
347 skb_mac_header(skb));
348 else if (!diffs)
349 diffs = memcmp(skb_mac_header(p),
350 skb_mac_header(skb),
351 maclen);
352
353 /* in most common scenarions 'slow_gro' is 0
354 * otherwise we are already on some slower paths
355 * either skip all the infrequent tests altogether or
356 * avoid trying too hard to skip each of them individually
357 */
358 if (!diffs && unlikely(skb->slow_gro | p->slow_gro)) {
359 diffs |= p->sk != skb->sk;
360 diffs |= skb_metadata_dst_cmp(p, skb);
361 diffs |= skb_get_nfct(p) ^ skb_get_nfct(skb);
362
363 diffs |= gro_list_prepare_tc_ext(skb, p, diffs);
364 }
365
366 NAPI_GRO_CB(p)->same_flow = !diffs;
367 }
368}
369
370static inline void skb_gro_reset_offset(struct sk_buff *skb, u32 nhoff)
371{
372 const struct skb_shared_info *pinfo = skb_shinfo(skb);
373 const skb_frag_t *frag0 = &pinfo->frags[0];
374
375 NAPI_GRO_CB(skb)->data_offset = 0;
376 NAPI_GRO_CB(skb)->frag0 = NULL;
377 NAPI_GRO_CB(skb)->frag0_len = 0;
378
379 if (!skb_headlen(skb) && pinfo->nr_frags &&
380 !PageHighMem(skb_frag_page(frag0)) &&
381 (!NET_IP_ALIGN || !((skb_frag_off(frag0) + nhoff) & 3))) {
382 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
383 NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
384 skb_frag_size(frag0),
385 skb->end - skb->tail);
386 }
387}
388
389static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
390{
391 struct skb_shared_info *pinfo = skb_shinfo(skb);
392
393 BUG_ON(skb->end - skb->tail < grow);
394
395 memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
396
397 skb->data_len -= grow;
398 skb->tail += grow;
399
400 skb_frag_off_add(&pinfo->frags[0], grow);
401 skb_frag_size_sub(&pinfo->frags[0], grow);
402
403 if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
404 skb_frag_unref(skb, 0);
405 memmove(pinfo->frags, pinfo->frags + 1,
406 --pinfo->nr_frags * sizeof(pinfo->frags[0]));
407 }
408}
409
410static void gro_try_pull_from_frag0(struct sk_buff *skb)
411{
412 int grow = skb_gro_offset(skb) - skb_headlen(skb);
413
414 if (grow > 0)
415 gro_pull_from_frag0(skb, grow);
416}
417
418static void gro_flush_oldest(struct napi_struct *napi, struct list_head *head)
419{
420 struct sk_buff *oldest;
421
422 oldest = list_last_entry(head, struct sk_buff, list);
423
424 /* We are called with head length >= MAX_GRO_SKBS, so this is
425 * impossible.
426 */
427 if (WARN_ON_ONCE(!oldest))
428 return;
429
430 /* Do not adjust napi->gro_hash[].count, caller is adding a new
431 * SKB to the chain.
432 */
433 skb_list_del_init(oldest);
434 napi_gro_complete(napi, oldest);
435}
436
437static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
438{
439 u32 bucket = skb_get_hash_raw(skb) & (GRO_HASH_BUCKETS - 1);
440 struct gro_list *gro_list = &napi->gro_hash[bucket];
441 struct list_head *head = &offload_base;
442 struct packet_offload *ptype;
443 __be16 type = skb->protocol;
444 struct sk_buff *pp = NULL;
445 enum gro_result ret;
446 int same_flow;
447
448 if (netif_elide_gro(skb->dev))
449 goto normal;
450
451 gro_list_prepare(&gro_list->list, skb);
452
453 rcu_read_lock();
454 list_for_each_entry_rcu(ptype, head, list) {
455 if (ptype->type == type && ptype->callbacks.gro_receive)
456 goto found_ptype;
457 }
458 rcu_read_unlock();
459 goto normal;
460
461found_ptype:
462 skb_set_network_header(skb, skb_gro_offset(skb));
463 skb_reset_mac_len(skb);
464 BUILD_BUG_ON(sizeof_field(struct napi_gro_cb, zeroed) != sizeof(u32));
465 BUILD_BUG_ON(!IS_ALIGNED(offsetof(struct napi_gro_cb, zeroed),
466 sizeof(u32))); /* Avoid slow unaligned acc */
467 *(u32 *)&NAPI_GRO_CB(skb)->zeroed = 0;
468 NAPI_GRO_CB(skb)->flush = skb_has_frag_list(skb);
469 NAPI_GRO_CB(skb)->is_atomic = 1;
470 NAPI_GRO_CB(skb)->count = 1;
471 if (unlikely(skb_is_gso(skb))) {
472 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs;
473 /* Only support TCP and non DODGY users. */
474 if (!skb_is_gso_tcp(skb) ||
475 (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY))
476 NAPI_GRO_CB(skb)->flush = 1;
477 }
478
479 /* Setup for GRO checksum validation */
480 switch (skb->ip_summed) {
481 case CHECKSUM_COMPLETE:
482 NAPI_GRO_CB(skb)->csum = skb->csum;
483 NAPI_GRO_CB(skb)->csum_valid = 1;
484 break;
485 case CHECKSUM_UNNECESSARY:
486 NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
487 break;
488 }
489
490 pp = INDIRECT_CALL_INET(ptype->callbacks.gro_receive,
491 ipv6_gro_receive, inet_gro_receive,
492 &gro_list->list, skb);
493
494 rcu_read_unlock();
495
496 if (PTR_ERR(pp) == -EINPROGRESS) {
497 ret = GRO_CONSUMED;
498 goto ok;
499 }
500
501 same_flow = NAPI_GRO_CB(skb)->same_flow;
502 ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
503
504 if (pp) {
505 skb_list_del_init(pp);
506 napi_gro_complete(napi, pp);
507 gro_list->count--;
508 }
509
510 if (same_flow)
511 goto ok;
512
513 if (NAPI_GRO_CB(skb)->flush)
514 goto normal;
515
516 if (unlikely(gro_list->count >= MAX_GRO_SKBS))
517 gro_flush_oldest(napi, &gro_list->list);
518 else
519 gro_list->count++;
520
521 /* Must be called before setting NAPI_GRO_CB(skb)->{age|last} */
522 gro_try_pull_from_frag0(skb);
523 NAPI_GRO_CB(skb)->age = jiffies;
524 NAPI_GRO_CB(skb)->last = skb;
525 if (!skb_is_gso(skb))
526 skb_shinfo(skb)->gso_size = skb_gro_len(skb);
527 list_add(&skb->list, &gro_list->list);
528 ret = GRO_HELD;
529ok:
530 if (gro_list->count) {
531 if (!test_bit(bucket, &napi->gro_bitmask))
532 __set_bit(bucket, &napi->gro_bitmask);
533 } else if (test_bit(bucket, &napi->gro_bitmask)) {
534 __clear_bit(bucket, &napi->gro_bitmask);
535 }
536
537 return ret;
538
539normal:
540 ret = GRO_NORMAL;
541 gro_try_pull_from_frag0(skb);
542 goto ok;
543}
544
545struct packet_offload *gro_find_receive_by_type(__be16 type)
546{
547 struct list_head *offload_head = &offload_base;
548 struct packet_offload *ptype;
549
550 list_for_each_entry_rcu(ptype, offload_head, list) {
551 if (ptype->type != type || !ptype->callbacks.gro_receive)
552 continue;
553 return ptype;
554 }
555 return NULL;
556}
557EXPORT_SYMBOL(gro_find_receive_by_type);
558
559struct packet_offload *gro_find_complete_by_type(__be16 type)
560{
561 struct list_head *offload_head = &offload_base;
562 struct packet_offload *ptype;
563
564 list_for_each_entry_rcu(ptype, offload_head, list) {
565 if (ptype->type != type || !ptype->callbacks.gro_complete)
566 continue;
567 return ptype;
568 }
569 return NULL;
570}
571EXPORT_SYMBOL(gro_find_complete_by_type);
572
573static gro_result_t napi_skb_finish(struct napi_struct *napi,
574 struct sk_buff *skb,
575 gro_result_t ret)
576{
577 switch (ret) {
578 case GRO_NORMAL:
579 gro_normal_one(napi, skb, 1);
580 break;
581
582 case GRO_MERGED_FREE:
583 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
584 napi_skb_free_stolen_head(skb);
585 else if (skb->fclone != SKB_FCLONE_UNAVAILABLE)
586 __kfree_skb(skb);
587 else
588 __napi_kfree_skb(skb, SKB_CONSUMED);
589 break;
590
591 case GRO_HELD:
592 case GRO_MERGED:
593 case GRO_CONSUMED:
594 break;
595 }
596
597 return ret;
598}
599
600gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
601{
602 gro_result_t ret;
603
604 skb_mark_napi_id(skb, napi);
605 trace_napi_gro_receive_entry(skb);
606
607 skb_gro_reset_offset(skb, 0);
608
609 ret = napi_skb_finish(napi, skb, dev_gro_receive(napi, skb));
610 trace_napi_gro_receive_exit(ret);
611
612 return ret;
613}
614EXPORT_SYMBOL(napi_gro_receive);
615
616static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
617{
618 if (unlikely(skb->pfmemalloc)) {
619 consume_skb(skb);
620 return;
621 }
622 __skb_pull(skb, skb_headlen(skb));
623 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
624 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
625 __vlan_hwaccel_clear_tag(skb);
626 skb->dev = napi->dev;
627 skb->skb_iif = 0;
628
629 /* eth_type_trans() assumes pkt_type is PACKET_HOST */
630 skb->pkt_type = PACKET_HOST;
631
632 skb->encapsulation = 0;
633 skb_shinfo(skb)->gso_type = 0;
634 skb_shinfo(skb)->gso_size = 0;
635 if (unlikely(skb->slow_gro)) {
636 skb_orphan(skb);
637 skb_ext_reset(skb);
638 nf_reset_ct(skb);
639 skb->slow_gro = 0;
640 }
641
642 napi->skb = skb;
643}
644
645struct sk_buff *napi_get_frags(struct napi_struct *napi)
646{
647 struct sk_buff *skb = napi->skb;
648
649 if (!skb) {
650 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
651 if (skb) {
652 napi->skb = skb;
653 skb_mark_napi_id(skb, napi);
654 }
655 }
656 return skb;
657}
658EXPORT_SYMBOL(napi_get_frags);
659
660static gro_result_t napi_frags_finish(struct napi_struct *napi,
661 struct sk_buff *skb,
662 gro_result_t ret)
663{
664 switch (ret) {
665 case GRO_NORMAL:
666 case GRO_HELD:
667 __skb_push(skb, ETH_HLEN);
668 skb->protocol = eth_type_trans(skb, skb->dev);
669 if (ret == GRO_NORMAL)
670 gro_normal_one(napi, skb, 1);
671 break;
672
673 case GRO_MERGED_FREE:
674 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
675 napi_skb_free_stolen_head(skb);
676 else
677 napi_reuse_skb(napi, skb);
678 break;
679
680 case GRO_MERGED:
681 case GRO_CONSUMED:
682 break;
683 }
684
685 return ret;
686}
687
688/* Upper GRO stack assumes network header starts at gro_offset=0
689 * Drivers could call both napi_gro_frags() and napi_gro_receive()
690 * We copy ethernet header into skb->data to have a common layout.
691 */
692static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
693{
694 struct sk_buff *skb = napi->skb;
695 const struct ethhdr *eth;
696 unsigned int hlen = sizeof(*eth);
697
698 napi->skb = NULL;
699
700 skb_reset_mac_header(skb);
701 skb_gro_reset_offset(skb, hlen);
702
703 if (unlikely(skb_gro_header_hard(skb, hlen))) {
704 eth = skb_gro_header_slow(skb, hlen, 0);
705 if (unlikely(!eth)) {
706 net_warn_ratelimited("%s: dropping impossible skb from %s\n",
707 __func__, napi->dev->name);
708 napi_reuse_skb(napi, skb);
709 return NULL;
710 }
711 } else {
712 eth = (const struct ethhdr *)skb->data;
713 gro_pull_from_frag0(skb, hlen);
714 NAPI_GRO_CB(skb)->frag0 += hlen;
715 NAPI_GRO_CB(skb)->frag0_len -= hlen;
716 }
717 __skb_pull(skb, hlen);
718
719 /*
720 * This works because the only protocols we care about don't require
721 * special handling.
722 * We'll fix it up properly in napi_frags_finish()
723 */
724 skb->protocol = eth->h_proto;
725
726 return skb;
727}
728
729gro_result_t napi_gro_frags(struct napi_struct *napi)
730{
731 gro_result_t ret;
732 struct sk_buff *skb = napi_frags_skb(napi);
733
734 trace_napi_gro_frags_entry(skb);
735
736 ret = napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
737 trace_napi_gro_frags_exit(ret);
738
739 return ret;
740}
741EXPORT_SYMBOL(napi_gro_frags);
742
743/* Compute the checksum from gro_offset and return the folded value
744 * after adding in any pseudo checksum.
745 */
746__sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
747{
748 __wsum wsum;
749 __sum16 sum;
750
751 wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
752
753 /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
754 sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
755 /* See comments in __skb_checksum_complete(). */
756 if (likely(!sum)) {
757 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
758 !skb->csum_complete_sw)
759 netdev_rx_csum_fault(skb->dev, skb);
760 }
761
762 NAPI_GRO_CB(skb)->csum = wsum;
763 NAPI_GRO_CB(skb)->csum_valid = 1;
764
765 return sum;
766}
767EXPORT_SYMBOL(__skb_gro_checksum_complete);