Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * TCPv4 GSO/GRO support
7 */
8
9#include <linux/indirect_call_wrapper.h>
10#include <linux/skbuff.h>
11#include <net/gro.h>
12#include <net/gso.h>
13#include <net/tcp.h>
14#include <net/protocol.h>
15
16static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb,
17 unsigned int seq, unsigned int mss)
18{
19 u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP;
20 u32 ts_seq = skb_shinfo(gso_skb)->tskey;
21
22 while (skb) {
23 if (before(ts_seq, seq + mss)) {
24 skb_shinfo(skb)->tx_flags |= flags;
25 skb_shinfo(skb)->tskey = ts_seq;
26 return;
27 }
28
29 skb = skb->next;
30 seq += mss;
31 }
32}
33
34static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
35 __be32 *oldip, __be32 newip,
36 __be16 *oldport, __be16 newport)
37{
38 struct tcphdr *th;
39 struct iphdr *iph;
40
41 if (*oldip == newip && *oldport == newport)
42 return;
43
44 th = tcp_hdr(seg);
45 iph = ip_hdr(seg);
46
47 inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true);
48 inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
49 *oldport = newport;
50
51 csum_replace4(&iph->check, *oldip, newip);
52 *oldip = newip;
53}
54
55static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
56{
57 const struct tcphdr *th;
58 const struct iphdr *iph;
59 struct sk_buff *seg;
60 struct tcphdr *th2;
61 struct iphdr *iph2;
62
63 seg = segs;
64 th = tcp_hdr(seg);
65 iph = ip_hdr(seg);
66 th2 = tcp_hdr(seg->next);
67 iph2 = ip_hdr(seg->next);
68
69 if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
70 iph->daddr == iph2->daddr && iph->saddr == iph2->saddr)
71 return segs;
72
73 while ((seg = seg->next)) {
74 th2 = tcp_hdr(seg);
75 iph2 = ip_hdr(seg);
76
77 __tcpv4_gso_segment_csum(seg,
78 &iph2->saddr, iph->saddr,
79 &th2->source, th->source);
80 __tcpv4_gso_segment_csum(seg,
81 &iph2->daddr, iph->daddr,
82 &th2->dest, th->dest);
83 }
84
85 return segs;
86}
87
88static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
89 netdev_features_t features)
90{
91 skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
92 if (IS_ERR(skb))
93 return skb;
94
95 return __tcpv4_gso_segment_list_csum(skb);
96}
97
98static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
99 netdev_features_t features)
100{
101 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
102 return ERR_PTR(-EINVAL);
103
104 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
105 return ERR_PTR(-EINVAL);
106
107 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
108 struct tcphdr *th = tcp_hdr(skb);
109
110 if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
111 return __tcp4_gso_segment_list(skb, features);
112
113 skb->ip_summed = CHECKSUM_NONE;
114 }
115
116 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
117 const struct iphdr *iph = ip_hdr(skb);
118 struct tcphdr *th = tcp_hdr(skb);
119
120 /* Set up checksum pseudo header, usually expect stack to
121 * have done this already.
122 */
123
124 th->check = 0;
125 skb->ip_summed = CHECKSUM_PARTIAL;
126 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
127 }
128
129 return tcp_gso_segment(skb, features);
130}
131
132struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
133 netdev_features_t features)
134{
135 struct sk_buff *segs = ERR_PTR(-EINVAL);
136 unsigned int sum_truesize = 0;
137 struct tcphdr *th;
138 unsigned int thlen;
139 unsigned int seq;
140 unsigned int oldlen;
141 unsigned int mss;
142 struct sk_buff *gso_skb = skb;
143 __sum16 newcheck;
144 bool ooo_okay, copy_destructor;
145 __wsum delta;
146
147 th = tcp_hdr(skb);
148 thlen = th->doff * 4;
149 if (thlen < sizeof(*th))
150 goto out;
151
152 if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
153 goto out;
154
155 if (!pskb_may_pull(skb, thlen))
156 goto out;
157
158 oldlen = ~skb->len;
159 __skb_pull(skb, thlen);
160
161 mss = skb_shinfo(skb)->gso_size;
162 if (unlikely(skb->len <= mss))
163 goto out;
164
165 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
166 /* Packet is from an untrusted source, reset gso_segs. */
167
168 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
169
170 segs = NULL;
171 goto out;
172 }
173
174 copy_destructor = gso_skb->destructor == tcp_wfree;
175 ooo_okay = gso_skb->ooo_okay;
176 /* All segments but the first should have ooo_okay cleared */
177 skb->ooo_okay = 0;
178
179 segs = skb_segment(skb, features);
180 if (IS_ERR(segs))
181 goto out;
182
183 /* Only first segment might have ooo_okay set */
184 segs->ooo_okay = ooo_okay;
185
186 /* GSO partial and frag_list segmentation only requires splitting
187 * the frame into an MSS multiple and possibly a remainder, both
188 * cases return a GSO skb. So update the mss now.
189 */
190 if (skb_is_gso(segs))
191 mss *= skb_shinfo(segs)->gso_segs;
192
193 delta = (__force __wsum)htonl(oldlen + thlen + mss);
194
195 skb = segs;
196 th = tcp_hdr(skb);
197 seq = ntohl(th->seq);
198
199 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP))
200 tcp_gso_tstamp(segs, gso_skb, seq, mss);
201
202 newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
203
204 while (skb->next) {
205 th->fin = th->psh = 0;
206 th->check = newcheck;
207
208 if (skb->ip_summed == CHECKSUM_PARTIAL)
209 gso_reset_checksum(skb, ~th->check);
210 else
211 th->check = gso_make_checksum(skb, ~th->check);
212
213 seq += mss;
214 if (copy_destructor) {
215 skb->destructor = gso_skb->destructor;
216 skb->sk = gso_skb->sk;
217 sum_truesize += skb->truesize;
218 }
219 skb = skb->next;
220 th = tcp_hdr(skb);
221
222 th->seq = htonl(seq);
223 th->cwr = 0;
224 }
225
226 /* Following permits TCP Small Queues to work well with GSO :
227 * The callback to TCP stack will be called at the time last frag
228 * is freed at TX completion, and not right now when gso_skb
229 * is freed by GSO engine
230 */
231 if (copy_destructor) {
232 int delta;
233
234 swap(gso_skb->sk, skb->sk);
235 swap(gso_skb->destructor, skb->destructor);
236 sum_truesize += skb->truesize;
237 delta = sum_truesize - gso_skb->truesize;
238 /* In some pathological cases, delta can be negative.
239 * We need to either use refcount_add() or refcount_sub_and_test()
240 */
241 if (likely(delta >= 0))
242 refcount_add(delta, &skb->sk->sk_wmem_alloc);
243 else
244 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
245 }
246
247 delta = (__force __wsum)htonl(oldlen +
248 (skb_tail_pointer(skb) -
249 skb_transport_header(skb)) +
250 skb->data_len);
251 th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
252 if (skb->ip_summed == CHECKSUM_PARTIAL)
253 gso_reset_checksum(skb, ~th->check);
254 else
255 th->check = gso_make_checksum(skb, ~th->check);
256out:
257 return segs;
258}
259
260struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
261{
262 struct tcphdr *th2;
263 struct sk_buff *p;
264
265 list_for_each_entry(p, head, list) {
266 if (!NAPI_GRO_CB(p)->same_flow)
267 continue;
268
269 th2 = tcp_hdr(p);
270 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
271 NAPI_GRO_CB(p)->same_flow = 0;
272 continue;
273 }
274
275 return p;
276 }
277
278 return NULL;
279}
280
281struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
282{
283 unsigned int thlen, hlen, off;
284 struct tcphdr *th;
285
286 off = skb_gro_offset(skb);
287 hlen = off + sizeof(*th);
288 th = skb_gro_header(skb, hlen, off);
289 if (unlikely(!th))
290 return NULL;
291
292 thlen = th->doff * 4;
293 if (thlen < sizeof(*th))
294 return NULL;
295
296 hlen = off + thlen;
297 if (!skb_gro_may_pull(skb, hlen)) {
298 th = skb_gro_header_slow(skb, hlen, off);
299 if (unlikely(!th))
300 return NULL;
301 }
302
303 skb_gro_pull(skb, thlen);
304
305 return th;
306}
307
308struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
309 struct tcphdr *th)
310{
311 unsigned int thlen = th->doff * 4;
312 struct sk_buff *pp = NULL;
313 struct sk_buff *p;
314 struct tcphdr *th2;
315 unsigned int len;
316 __be32 flags;
317 unsigned int mss = 1;
318 int flush = 1;
319 int i;
320
321 len = skb_gro_len(skb);
322 flags = tcp_flag_word(th);
323
324 p = tcp_gro_lookup(head, th);
325 if (!p)
326 goto out_check_final;
327
328 th2 = tcp_hdr(p);
329 flush = (__force int)(flags & TCP_FLAG_CWR);
330 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
331 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
332 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
333 for (i = sizeof(*th); i < thlen; i += 4)
334 flush |= *(u32 *)((u8 *)th + i) ^
335 *(u32 *)((u8 *)th2 + i);
336
337 flush |= gro_receive_network_flush(th, th2, p);
338
339 mss = skb_shinfo(p)->gso_size;
340
341 /* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
342 * If it is a single frame, do not aggregate it if its length
343 * is bigger than our mss.
344 */
345 if (unlikely(skb_is_gso(skb)))
346 flush |= (mss != skb_shinfo(skb)->gso_size);
347 else
348 flush |= (len - 1) >= mss;
349
350 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
351 flush |= skb_cmp_decrypted(p, skb);
352
353 if (unlikely(NAPI_GRO_CB(p)->is_flist)) {
354 flush |= (__force int)(flags ^ tcp_flag_word(th2));
355 flush |= skb->ip_summed != p->ip_summed;
356 flush |= skb->csum_level != p->csum_level;
357 flush |= NAPI_GRO_CB(p)->count >= 64;
358
359 if (flush || skb_gro_receive_list(p, skb))
360 mss = 1;
361
362 goto out_check_final;
363 }
364
365 if (flush || skb_gro_receive(p, skb)) {
366 mss = 1;
367 goto out_check_final;
368 }
369
370 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
371
372out_check_final:
373 /* Force a flush if last segment is smaller than mss. */
374 if (unlikely(skb_is_gso(skb)))
375 flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
376 else
377 flush = len < mss;
378
379 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
380 TCP_FLAG_RST | TCP_FLAG_SYN |
381 TCP_FLAG_FIN));
382
383 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
384 pp = p;
385
386 NAPI_GRO_CB(skb)->flush |= (flush != 0);
387
388 return pp;
389}
390
391void tcp_gro_complete(struct sk_buff *skb)
392{
393 struct tcphdr *th = tcp_hdr(skb);
394 struct skb_shared_info *shinfo;
395
396 if (skb->encapsulation)
397 skb->inner_transport_header = skb->transport_header;
398
399 skb->csum_start = (unsigned char *)th - skb->head;
400 skb->csum_offset = offsetof(struct tcphdr, check);
401 skb->ip_summed = CHECKSUM_PARTIAL;
402
403 shinfo = skb_shinfo(skb);
404 shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
405
406 if (th->cwr)
407 shinfo->gso_type |= SKB_GSO_TCP_ECN;
408}
409EXPORT_SYMBOL(tcp_gro_complete);
410
411static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
412 struct tcphdr *th)
413{
414 const struct iphdr *iph;
415 struct sk_buff *p;
416 struct sock *sk;
417 struct net *net;
418 int iif, sdif;
419
420 if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
421 return;
422
423 p = tcp_gro_lookup(head, th);
424 if (p) {
425 NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
426 return;
427 }
428
429 inet_get_iif_sdif(skb, &iif, &sdif);
430 iph = skb_gro_network_header(skb);
431 net = dev_net(skb->dev);
432 sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
433 iph->saddr, th->source,
434 iph->daddr, ntohs(th->dest),
435 iif, sdif);
436 NAPI_GRO_CB(skb)->is_flist = !sk;
437 if (sk)
438 sock_put(sk);
439}
440
441INDIRECT_CALLABLE_SCOPE
442struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
443{
444 struct tcphdr *th;
445
446 /* Don't bother verifying checksum if we're going to flush anyway. */
447 if (!NAPI_GRO_CB(skb)->flush &&
448 skb_gro_checksum_validate(skb, IPPROTO_TCP,
449 inet_gro_compute_pseudo))
450 goto flush;
451
452 th = tcp_gro_pull_header(skb);
453 if (!th)
454 goto flush;
455
456 tcp4_check_fraglist_gro(head, skb, th);
457
458 return tcp_gro_receive(head, skb, th);
459
460flush:
461 NAPI_GRO_CB(skb)->flush = 1;
462 return NULL;
463}
464
465INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
466{
467 const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
468 const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
469 struct tcphdr *th = tcp_hdr(skb);
470
471 if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
472 skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
473 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
474
475 __skb_incr_checksum_unnecessary(skb);
476
477 return 0;
478 }
479
480 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
481 iph->daddr, 0);
482
483 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
484 (NAPI_GRO_CB(skb)->ip_fixedid * SKB_GSO_TCP_FIXEDID);
485
486 tcp_gro_complete(skb);
487 return 0;
488}
489
490int __init tcpv4_offload_init(void)
491{
492 net_hotdata.tcpv4_offload = (struct net_offload) {
493 .callbacks = {
494 .gso_segment = tcp4_gso_segment,
495 .gro_receive = tcp4_gro_receive,
496 .gro_complete = tcp4_gro_complete,
497 },
498 };
499 return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP);
500}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * TCPv4 GSO/GRO support
7 */
8
9#include <linux/indirect_call_wrapper.h>
10#include <linux/skbuff.h>
11#include <net/gro.h>
12#include <net/gso.h>
13#include <net/tcp.h>
14#include <net/protocol.h>
15
16static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
17 unsigned int seq, unsigned int mss)
18{
19 while (skb) {
20 if (before(ts_seq, seq + mss)) {
21 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
22 skb_shinfo(skb)->tskey = ts_seq;
23 return;
24 }
25
26 skb = skb->next;
27 seq += mss;
28 }
29}
30
31static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
32 netdev_features_t features)
33{
34 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
35 return ERR_PTR(-EINVAL);
36
37 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
38 return ERR_PTR(-EINVAL);
39
40 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
41 const struct iphdr *iph = ip_hdr(skb);
42 struct tcphdr *th = tcp_hdr(skb);
43
44 /* Set up checksum pseudo header, usually expect stack to
45 * have done this already.
46 */
47
48 th->check = 0;
49 skb->ip_summed = CHECKSUM_PARTIAL;
50 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
51 }
52
53 return tcp_gso_segment(skb, features);
54}
55
56struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
57 netdev_features_t features)
58{
59 struct sk_buff *segs = ERR_PTR(-EINVAL);
60 unsigned int sum_truesize = 0;
61 struct tcphdr *th;
62 unsigned int thlen;
63 unsigned int seq;
64 unsigned int oldlen;
65 unsigned int mss;
66 struct sk_buff *gso_skb = skb;
67 __sum16 newcheck;
68 bool ooo_okay, copy_destructor;
69 __wsum delta;
70
71 th = tcp_hdr(skb);
72 thlen = th->doff * 4;
73 if (thlen < sizeof(*th))
74 goto out;
75
76 if (!pskb_may_pull(skb, thlen))
77 goto out;
78
79 oldlen = ~skb->len;
80 __skb_pull(skb, thlen);
81
82 mss = skb_shinfo(skb)->gso_size;
83 if (unlikely(skb->len <= mss))
84 goto out;
85
86 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
87 /* Packet is from an untrusted source, reset gso_segs. */
88
89 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
90
91 segs = NULL;
92 goto out;
93 }
94
95 copy_destructor = gso_skb->destructor == tcp_wfree;
96 ooo_okay = gso_skb->ooo_okay;
97 /* All segments but the first should have ooo_okay cleared */
98 skb->ooo_okay = 0;
99
100 segs = skb_segment(skb, features);
101 if (IS_ERR(segs))
102 goto out;
103
104 /* Only first segment might have ooo_okay set */
105 segs->ooo_okay = ooo_okay;
106
107 /* GSO partial and frag_list segmentation only requires splitting
108 * the frame into an MSS multiple and possibly a remainder, both
109 * cases return a GSO skb. So update the mss now.
110 */
111 if (skb_is_gso(segs))
112 mss *= skb_shinfo(segs)->gso_segs;
113
114 delta = (__force __wsum)htonl(oldlen + thlen + mss);
115
116 skb = segs;
117 th = tcp_hdr(skb);
118 seq = ntohl(th->seq);
119
120 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
121 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
122
123 newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
124
125 while (skb->next) {
126 th->fin = th->psh = 0;
127 th->check = newcheck;
128
129 if (skb->ip_summed == CHECKSUM_PARTIAL)
130 gso_reset_checksum(skb, ~th->check);
131 else
132 th->check = gso_make_checksum(skb, ~th->check);
133
134 seq += mss;
135 if (copy_destructor) {
136 skb->destructor = gso_skb->destructor;
137 skb->sk = gso_skb->sk;
138 sum_truesize += skb->truesize;
139 }
140 skb = skb->next;
141 th = tcp_hdr(skb);
142
143 th->seq = htonl(seq);
144 th->cwr = 0;
145 }
146
147 /* Following permits TCP Small Queues to work well with GSO :
148 * The callback to TCP stack will be called at the time last frag
149 * is freed at TX completion, and not right now when gso_skb
150 * is freed by GSO engine
151 */
152 if (copy_destructor) {
153 int delta;
154
155 swap(gso_skb->sk, skb->sk);
156 swap(gso_skb->destructor, skb->destructor);
157 sum_truesize += skb->truesize;
158 delta = sum_truesize - gso_skb->truesize;
159 /* In some pathological cases, delta can be negative.
160 * We need to either use refcount_add() or refcount_sub_and_test()
161 */
162 if (likely(delta >= 0))
163 refcount_add(delta, &skb->sk->sk_wmem_alloc);
164 else
165 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
166 }
167
168 delta = (__force __wsum)htonl(oldlen +
169 (skb_tail_pointer(skb) -
170 skb_transport_header(skb)) +
171 skb->data_len);
172 th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
173 if (skb->ip_summed == CHECKSUM_PARTIAL)
174 gso_reset_checksum(skb, ~th->check);
175 else
176 th->check = gso_make_checksum(skb, ~th->check);
177out:
178 return segs;
179}
180
181struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
182{
183 struct sk_buff *pp = NULL;
184 struct sk_buff *p;
185 struct tcphdr *th;
186 struct tcphdr *th2;
187 unsigned int len;
188 unsigned int thlen;
189 __be32 flags;
190 unsigned int mss = 1;
191 unsigned int hlen;
192 unsigned int off;
193 int flush = 1;
194 int i;
195
196 off = skb_gro_offset(skb);
197 hlen = off + sizeof(*th);
198 th = skb_gro_header(skb, hlen, off);
199 if (unlikely(!th))
200 goto out;
201
202 thlen = th->doff * 4;
203 if (thlen < sizeof(*th))
204 goto out;
205
206 hlen = off + thlen;
207 if (skb_gro_header_hard(skb, hlen)) {
208 th = skb_gro_header_slow(skb, hlen, off);
209 if (unlikely(!th))
210 goto out;
211 }
212
213 skb_gro_pull(skb, thlen);
214
215 len = skb_gro_len(skb);
216 flags = tcp_flag_word(th);
217
218 list_for_each_entry(p, head, list) {
219 if (!NAPI_GRO_CB(p)->same_flow)
220 continue;
221
222 th2 = tcp_hdr(p);
223
224 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
225 NAPI_GRO_CB(p)->same_flow = 0;
226 continue;
227 }
228
229 goto found;
230 }
231 p = NULL;
232 goto out_check_final;
233
234found:
235 /* Include the IP ID check below from the inner most IP hdr */
236 flush = NAPI_GRO_CB(p)->flush;
237 flush |= (__force int)(flags & TCP_FLAG_CWR);
238 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
239 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
240 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
241 for (i = sizeof(*th); i < thlen; i += 4)
242 flush |= *(u32 *)((u8 *)th + i) ^
243 *(u32 *)((u8 *)th2 + i);
244
245 /* When we receive our second frame we can made a decision on if we
246 * continue this flow as an atomic flow with a fixed ID or if we use
247 * an incrementing ID.
248 */
249 if (NAPI_GRO_CB(p)->flush_id != 1 ||
250 NAPI_GRO_CB(p)->count != 1 ||
251 !NAPI_GRO_CB(p)->is_atomic)
252 flush |= NAPI_GRO_CB(p)->flush_id;
253 else
254 NAPI_GRO_CB(p)->is_atomic = false;
255
256 mss = skb_shinfo(p)->gso_size;
257
258 /* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
259 * If it is a single frame, do not aggregate it if its length
260 * is bigger than our mss.
261 */
262 if (unlikely(skb_is_gso(skb)))
263 flush |= (mss != skb_shinfo(skb)->gso_size);
264 else
265 flush |= (len - 1) >= mss;
266
267 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
268#ifdef CONFIG_TLS_DEVICE
269 flush |= p->decrypted ^ skb->decrypted;
270#endif
271
272 if (flush || skb_gro_receive(p, skb)) {
273 mss = 1;
274 goto out_check_final;
275 }
276
277 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
278
279out_check_final:
280 /* Force a flush if last segment is smaller than mss. */
281 if (unlikely(skb_is_gso(skb)))
282 flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
283 else
284 flush = len < mss;
285
286 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
287 TCP_FLAG_RST | TCP_FLAG_SYN |
288 TCP_FLAG_FIN));
289
290 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
291 pp = p;
292
293out:
294 NAPI_GRO_CB(skb)->flush |= (flush != 0);
295
296 return pp;
297}
298
299void tcp_gro_complete(struct sk_buff *skb)
300{
301 struct tcphdr *th = tcp_hdr(skb);
302
303 skb->csum_start = (unsigned char *)th - skb->head;
304 skb->csum_offset = offsetof(struct tcphdr, check);
305 skb->ip_summed = CHECKSUM_PARTIAL;
306
307 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
308
309 if (th->cwr)
310 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
311
312 if (skb->encapsulation)
313 skb->inner_transport_header = skb->transport_header;
314}
315EXPORT_SYMBOL(tcp_gro_complete);
316
317INDIRECT_CALLABLE_SCOPE
318struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
319{
320 /* Don't bother verifying checksum if we're going to flush anyway. */
321 if (!NAPI_GRO_CB(skb)->flush &&
322 skb_gro_checksum_validate(skb, IPPROTO_TCP,
323 inet_gro_compute_pseudo)) {
324 NAPI_GRO_CB(skb)->flush = 1;
325 return NULL;
326 }
327
328 return tcp_gro_receive(head, skb);
329}
330
331INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
332{
333 const struct iphdr *iph = ip_hdr(skb);
334 struct tcphdr *th = tcp_hdr(skb);
335
336 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
337 iph->daddr, 0);
338 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
339
340 if (NAPI_GRO_CB(skb)->is_atomic)
341 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
342
343 tcp_gro_complete(skb);
344 return 0;
345}
346
347static const struct net_offload tcpv4_offload = {
348 .callbacks = {
349 .gso_segment = tcp4_gso_segment,
350 .gro_receive = tcp4_gro_receive,
351 .gro_complete = tcp4_gro_complete,
352 },
353};
354
355int __init tcpv4_offload_init(void)
356{
357 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
358}