Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *	IPV4 GSO/GRO offload support
  4 *	Linux INET implementation
  5 *
  6 *	TCPv4 GSO/GRO support
  7 */
  8
  9#include <linux/indirect_call_wrapper.h>
 10#include <linux/skbuff.h>
 
 
 11#include <net/tcp.h>
 12#include <net/protocol.h>
 13
 14static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
 15			   unsigned int seq, unsigned int mss)
 16{
 17	while (skb) {
 18		if (before(ts_seq, seq + mss)) {
 19			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
 20			skb_shinfo(skb)->tskey = ts_seq;
 21			return;
 22		}
 23
 24		skb = skb->next;
 25		seq += mss;
 26	}
 27}
 28
 29static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
 30					netdev_features_t features)
 31{
 32	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
 33		return ERR_PTR(-EINVAL);
 34
 35	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
 36		return ERR_PTR(-EINVAL);
 37
 38	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
 39		const struct iphdr *iph = ip_hdr(skb);
 40		struct tcphdr *th = tcp_hdr(skb);
 41
 42		/* Set up checksum pseudo header, usually expect stack to
 43		 * have done this already.
 44		 */
 45
 46		th->check = 0;
 47		skb->ip_summed = CHECKSUM_PARTIAL;
 48		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
 49	}
 50
 51	return tcp_gso_segment(skb, features);
 52}
 53
 54struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 55				netdev_features_t features)
 56{
 57	struct sk_buff *segs = ERR_PTR(-EINVAL);
 58	unsigned int sum_truesize = 0;
 59	struct tcphdr *th;
 60	unsigned int thlen;
 61	unsigned int seq;
 62	__be32 delta;
 63	unsigned int oldlen;
 64	unsigned int mss;
 65	struct sk_buff *gso_skb = skb;
 66	__sum16 newcheck;
 67	bool ooo_okay, copy_destructor;
 
 68
 69	th = tcp_hdr(skb);
 70	thlen = th->doff * 4;
 71	if (thlen < sizeof(*th))
 72		goto out;
 73
 74	if (!pskb_may_pull(skb, thlen))
 75		goto out;
 76
 77	oldlen = (u16)~skb->len;
 78	__skb_pull(skb, thlen);
 79
 80	mss = skb_shinfo(skb)->gso_size;
 81	if (unlikely(skb->len <= mss))
 82		goto out;
 83
 84	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
 85		/* Packet is from an untrusted source, reset gso_segs. */
 86
 87		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 88
 89		segs = NULL;
 90		goto out;
 91	}
 92
 93	copy_destructor = gso_skb->destructor == tcp_wfree;
 94	ooo_okay = gso_skb->ooo_okay;
 95	/* All segments but the first should have ooo_okay cleared */
 96	skb->ooo_okay = 0;
 97
 98	segs = skb_segment(skb, features);
 99	if (IS_ERR(segs))
100		goto out;
101
102	/* Only first segment might have ooo_okay set */
103	segs->ooo_okay = ooo_okay;
104
105	/* GSO partial and frag_list segmentation only requires splitting
106	 * the frame into an MSS multiple and possibly a remainder, both
107	 * cases return a GSO skb. So update the mss now.
108	 */
109	if (skb_is_gso(segs))
110		mss *= skb_shinfo(segs)->gso_segs;
111
112	delta = htonl(oldlen + (thlen + mss));
113
114	skb = segs;
115	th = tcp_hdr(skb);
116	seq = ntohl(th->seq);
117
118	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
119		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
120
121	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
122					       (__force u32)delta));
123
124	while (skb->next) {
125		th->fin = th->psh = 0;
126		th->check = newcheck;
127
128		if (skb->ip_summed == CHECKSUM_PARTIAL)
129			gso_reset_checksum(skb, ~th->check);
130		else
131			th->check = gso_make_checksum(skb, ~th->check);
132
133		seq += mss;
134		if (copy_destructor) {
135			skb->destructor = gso_skb->destructor;
136			skb->sk = gso_skb->sk;
137			sum_truesize += skb->truesize;
138		}
139		skb = skb->next;
140		th = tcp_hdr(skb);
141
142		th->seq = htonl(seq);
143		th->cwr = 0;
144	}
145
146	/* Following permits TCP Small Queues to work well with GSO :
147	 * The callback to TCP stack will be called at the time last frag
148	 * is freed at TX completion, and not right now when gso_skb
149	 * is freed by GSO engine
150	 */
151	if (copy_destructor) {
152		int delta;
153
154		swap(gso_skb->sk, skb->sk);
155		swap(gso_skb->destructor, skb->destructor);
156		sum_truesize += skb->truesize;
157		delta = sum_truesize - gso_skb->truesize;
158		/* In some pathological cases, delta can be negative.
159		 * We need to either use refcount_add() or refcount_sub_and_test()
160		 */
161		if (likely(delta >= 0))
162			refcount_add(delta, &skb->sk->sk_wmem_alloc);
163		else
164			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
165	}
166
167	delta = htonl(oldlen + (skb_tail_pointer(skb) -
168				skb_transport_header(skb)) +
169		      skb->data_len);
170	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
171				(__force u32)delta));
172	if (skb->ip_summed == CHECKSUM_PARTIAL)
173		gso_reset_checksum(skb, ~th->check);
174	else
175		th->check = gso_make_checksum(skb, ~th->check);
176out:
177	return segs;
178}
179
180struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
181{
182	struct sk_buff *pp = NULL;
183	struct sk_buff *p;
184	struct tcphdr *th;
185	struct tcphdr *th2;
186	unsigned int len;
187	unsigned int thlen;
188	__be32 flags;
189	unsigned int mss = 1;
190	unsigned int hlen;
191	unsigned int off;
192	int flush = 1;
193	int i;
194
195	off = skb_gro_offset(skb);
196	hlen = off + sizeof(*th);
197	th = skb_gro_header_fast(skb, off);
198	if (skb_gro_header_hard(skb, hlen)) {
199		th = skb_gro_header_slow(skb, hlen, off);
200		if (unlikely(!th))
201			goto out;
202	}
203
204	thlen = th->doff * 4;
205	if (thlen < sizeof(*th))
206		goto out;
207
208	hlen = off + thlen;
209	if (skb_gro_header_hard(skb, hlen)) {
210		th = skb_gro_header_slow(skb, hlen, off);
211		if (unlikely(!th))
212			goto out;
213	}
214
215	skb_gro_pull(skb, thlen);
216
217	len = skb_gro_len(skb);
218	flags = tcp_flag_word(th);
219
220	list_for_each_entry(p, head, list) {
221		if (!NAPI_GRO_CB(p)->same_flow)
222			continue;
223
224		th2 = tcp_hdr(p);
225
226		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
227			NAPI_GRO_CB(p)->same_flow = 0;
228			continue;
229		}
230
231		goto found;
232	}
233	p = NULL;
234	goto out_check_final;
235
236found:
237	/* Include the IP ID check below from the inner most IP hdr */
238	flush = NAPI_GRO_CB(p)->flush;
239	flush |= (__force int)(flags & TCP_FLAG_CWR);
240	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
241		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
242	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
243	for (i = sizeof(*th); i < thlen; i += 4)
244		flush |= *(u32 *)((u8 *)th + i) ^
245			 *(u32 *)((u8 *)th2 + i);
246
247	/* When we receive our second frame we can made a decision on if we
248	 * continue this flow as an atomic flow with a fixed ID or if we use
249	 * an incrementing ID.
250	 */
251	if (NAPI_GRO_CB(p)->flush_id != 1 ||
252	    NAPI_GRO_CB(p)->count != 1 ||
253	    !NAPI_GRO_CB(p)->is_atomic)
254		flush |= NAPI_GRO_CB(p)->flush_id;
255	else
256		NAPI_GRO_CB(p)->is_atomic = false;
257
258	mss = skb_shinfo(p)->gso_size;
259
260	flush |= (len - 1) >= mss;
 
 
 
 
 
 
 
 
261	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
262#ifdef CONFIG_TLS_DEVICE
263	flush |= p->decrypted ^ skb->decrypted;
264#endif
265
266	if (flush || skb_gro_receive(p, skb)) {
267		mss = 1;
268		goto out_check_final;
269	}
270
271	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
272
273out_check_final:
274	flush = len < mss;
 
 
 
 
 
275	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
276					TCP_FLAG_RST | TCP_FLAG_SYN |
277					TCP_FLAG_FIN));
278
279	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
280		pp = p;
281
282out:
283	NAPI_GRO_CB(skb)->flush |= (flush != 0);
284
285	return pp;
286}
287
288int tcp_gro_complete(struct sk_buff *skb)
289{
290	struct tcphdr *th = tcp_hdr(skb);
 
 
 
 
291
292	skb->csum_start = (unsigned char *)th - skb->head;
293	skb->csum_offset = offsetof(struct tcphdr, check);
294	skb->ip_summed = CHECKSUM_PARTIAL;
295
296	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
 
297
298	if (th->cwr)
299		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
300
301	if (skb->encapsulation)
302		skb->inner_transport_header = skb->transport_header;
303
304	return 0;
305}
306EXPORT_SYMBOL(tcp_gro_complete);
307
308INDIRECT_CALLABLE_SCOPE
309struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
310{
311	/* Don't bother verifying checksum if we're going to flush anyway. */
312	if (!NAPI_GRO_CB(skb)->flush &&
313	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
314				      inet_gro_compute_pseudo)) {
315		NAPI_GRO_CB(skb)->flush = 1;
316		return NULL;
317	}
318
319	return tcp_gro_receive(head, skb);
320}
321
322INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
323{
324	const struct iphdr *iph = ip_hdr(skb);
325	struct tcphdr *th = tcp_hdr(skb);
326
327	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
328				  iph->daddr, 0);
329	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
330
331	if (NAPI_GRO_CB(skb)->is_atomic)
332		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
333
334	return tcp_gro_complete(skb);
 
335}
336
337static const struct net_offload tcpv4_offload = {
338	.callbacks = {
339		.gso_segment	=	tcp4_gso_segment,
340		.gro_receive	=	tcp4_gro_receive,
341		.gro_complete	=	tcp4_gro_complete,
342	},
343};
344
345int __init tcpv4_offload_init(void)
346{
347	return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
 
 
 
 
 
 
 
348}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *	IPV4 GSO/GRO offload support
  4 *	Linux INET implementation
  5 *
  6 *	TCPv4 GSO/GRO support
  7 */
  8
  9#include <linux/indirect_call_wrapper.h>
 10#include <linux/skbuff.h>
 11#include <net/gro.h>
 12#include <net/gso.h>
 13#include <net/tcp.h>
 14#include <net/protocol.h>
 15
 16static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
 17			   unsigned int seq, unsigned int mss)
 18{
 19	while (skb) {
 20		if (before(ts_seq, seq + mss)) {
 21			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
 22			skb_shinfo(skb)->tskey = ts_seq;
 23			return;
 24		}
 25
 26		skb = skb->next;
 27		seq += mss;
 28	}
 29}
 30
 31static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
 32					netdev_features_t features)
 33{
 34	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
 35		return ERR_PTR(-EINVAL);
 36
 37	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
 38		return ERR_PTR(-EINVAL);
 39
 40	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
 41		const struct iphdr *iph = ip_hdr(skb);
 42		struct tcphdr *th = tcp_hdr(skb);
 43
 44		/* Set up checksum pseudo header, usually expect stack to
 45		 * have done this already.
 46		 */
 47
 48		th->check = 0;
 49		skb->ip_summed = CHECKSUM_PARTIAL;
 50		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
 51	}
 52
 53	return tcp_gso_segment(skb, features);
 54}
 55
 56struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 57				netdev_features_t features)
 58{
 59	struct sk_buff *segs = ERR_PTR(-EINVAL);
 60	unsigned int sum_truesize = 0;
 61	struct tcphdr *th;
 62	unsigned int thlen;
 63	unsigned int seq;
 
 64	unsigned int oldlen;
 65	unsigned int mss;
 66	struct sk_buff *gso_skb = skb;
 67	__sum16 newcheck;
 68	bool ooo_okay, copy_destructor;
 69	__wsum delta;
 70
 71	th = tcp_hdr(skb);
 72	thlen = th->doff * 4;
 73	if (thlen < sizeof(*th))
 74		goto out;
 75
 76	if (!pskb_may_pull(skb, thlen))
 77		goto out;
 78
 79	oldlen = ~skb->len;
 80	__skb_pull(skb, thlen);
 81
 82	mss = skb_shinfo(skb)->gso_size;
 83	if (unlikely(skb->len <= mss))
 84		goto out;
 85
 86	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
 87		/* Packet is from an untrusted source, reset gso_segs. */
 88
 89		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 90
 91		segs = NULL;
 92		goto out;
 93	}
 94
 95	copy_destructor = gso_skb->destructor == tcp_wfree;
 96	ooo_okay = gso_skb->ooo_okay;
 97	/* All segments but the first should have ooo_okay cleared */
 98	skb->ooo_okay = 0;
 99
100	segs = skb_segment(skb, features);
101	if (IS_ERR(segs))
102		goto out;
103
104	/* Only first segment might have ooo_okay set */
105	segs->ooo_okay = ooo_okay;
106
107	/* GSO partial and frag_list segmentation only requires splitting
108	 * the frame into an MSS multiple and possibly a remainder, both
109	 * cases return a GSO skb. So update the mss now.
110	 */
111	if (skb_is_gso(segs))
112		mss *= skb_shinfo(segs)->gso_segs;
113
114	delta = (__force __wsum)htonl(oldlen + thlen + mss);
115
116	skb = segs;
117	th = tcp_hdr(skb);
118	seq = ntohl(th->seq);
119
120	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
121		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
122
123	newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
 
124
125	while (skb->next) {
126		th->fin = th->psh = 0;
127		th->check = newcheck;
128
129		if (skb->ip_summed == CHECKSUM_PARTIAL)
130			gso_reset_checksum(skb, ~th->check);
131		else
132			th->check = gso_make_checksum(skb, ~th->check);
133
134		seq += mss;
135		if (copy_destructor) {
136			skb->destructor = gso_skb->destructor;
137			skb->sk = gso_skb->sk;
138			sum_truesize += skb->truesize;
139		}
140		skb = skb->next;
141		th = tcp_hdr(skb);
142
143		th->seq = htonl(seq);
144		th->cwr = 0;
145	}
146
147	/* Following permits TCP Small Queues to work well with GSO :
148	 * The callback to TCP stack will be called at the time last frag
149	 * is freed at TX completion, and not right now when gso_skb
150	 * is freed by GSO engine
151	 */
152	if (copy_destructor) {
153		int delta;
154
155		swap(gso_skb->sk, skb->sk);
156		swap(gso_skb->destructor, skb->destructor);
157		sum_truesize += skb->truesize;
158		delta = sum_truesize - gso_skb->truesize;
159		/* In some pathological cases, delta can be negative.
160		 * We need to either use refcount_add() or refcount_sub_and_test()
161		 */
162		if (likely(delta >= 0))
163			refcount_add(delta, &skb->sk->sk_wmem_alloc);
164		else
165			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
166	}
167
168	delta = (__force __wsum)htonl(oldlen +
169				      (skb_tail_pointer(skb) -
170				       skb_transport_header(skb)) +
171				      skb->data_len);
172	th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
173	if (skb->ip_summed == CHECKSUM_PARTIAL)
174		gso_reset_checksum(skb, ~th->check);
175	else
176		th->check = gso_make_checksum(skb, ~th->check);
177out:
178	return segs;
179}
180
181struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
182{
183	struct sk_buff *pp = NULL;
184	struct sk_buff *p;
185	struct tcphdr *th;
186	struct tcphdr *th2;
187	unsigned int len;
188	unsigned int thlen;
189	__be32 flags;
190	unsigned int mss = 1;
191	unsigned int hlen;
192	unsigned int off;
193	int flush = 1;
194	int i;
195
196	off = skb_gro_offset(skb);
197	hlen = off + sizeof(*th);
198	th = skb_gro_header(skb, hlen, off);
199	if (unlikely(!th))
200		goto out;
 
 
 
201
202	thlen = th->doff * 4;
203	if (thlen < sizeof(*th))
204		goto out;
205
206	hlen = off + thlen;
207	if (!skb_gro_may_pull(skb, hlen)) {
208		th = skb_gro_header_slow(skb, hlen, off);
209		if (unlikely(!th))
210			goto out;
211	}
212
213	skb_gro_pull(skb, thlen);
214
215	len = skb_gro_len(skb);
216	flags = tcp_flag_word(th);
217
218	list_for_each_entry(p, head, list) {
219		if (!NAPI_GRO_CB(p)->same_flow)
220			continue;
221
222		th2 = tcp_hdr(p);
223
224		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
225			NAPI_GRO_CB(p)->same_flow = 0;
226			continue;
227		}
228
229		goto found;
230	}
231	p = NULL;
232	goto out_check_final;
233
234found:
235	/* Include the IP ID check below from the inner most IP hdr */
236	flush = NAPI_GRO_CB(p)->flush;
237	flush |= (__force int)(flags & TCP_FLAG_CWR);
238	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
239		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
240	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
241	for (i = sizeof(*th); i < thlen; i += 4)
242		flush |= *(u32 *)((u8 *)th + i) ^
243			 *(u32 *)((u8 *)th2 + i);
244
245	/* When we receive our second frame we can made a decision on if we
246	 * continue this flow as an atomic flow with a fixed ID or if we use
247	 * an incrementing ID.
248	 */
249	if (NAPI_GRO_CB(p)->flush_id != 1 ||
250	    NAPI_GRO_CB(p)->count != 1 ||
251	    !NAPI_GRO_CB(p)->is_atomic)
252		flush |= NAPI_GRO_CB(p)->flush_id;
253	else
254		NAPI_GRO_CB(p)->is_atomic = false;
255
256	mss = skb_shinfo(p)->gso_size;
257
258	/* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
259	 * If it is a single frame, do not aggregate it if its length
260	 * is bigger than our mss.
261	 */
262	if (unlikely(skb_is_gso(skb)))
263		flush |= (mss != skb_shinfo(skb)->gso_size);
264	else
265		flush |= (len - 1) >= mss;
266
267	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
268#ifdef CONFIG_TLS_DEVICE
269	flush |= p->decrypted ^ skb->decrypted;
270#endif
271
272	if (flush || skb_gro_receive(p, skb)) {
273		mss = 1;
274		goto out_check_final;
275	}
276
277	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
278
279out_check_final:
280	/* Force a flush if last segment is smaller than mss. */
281	if (unlikely(skb_is_gso(skb)))
282		flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
283	else
284		flush = len < mss;
285
286	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
287					TCP_FLAG_RST | TCP_FLAG_SYN |
288					TCP_FLAG_FIN));
289
290	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
291		pp = p;
292
293out:
294	NAPI_GRO_CB(skb)->flush |= (flush != 0);
295
296	return pp;
297}
298
299void tcp_gro_complete(struct sk_buff *skb)
300{
301	struct tcphdr *th = tcp_hdr(skb);
302	struct skb_shared_info *shinfo;
303
304	if (skb->encapsulation)
305		skb->inner_transport_header = skb->transport_header;
306
307	skb->csum_start = (unsigned char *)th - skb->head;
308	skb->csum_offset = offsetof(struct tcphdr, check);
309	skb->ip_summed = CHECKSUM_PARTIAL;
310
311	shinfo = skb_shinfo(skb);
312	shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
313
314	if (th->cwr)
315		shinfo->gso_type |= SKB_GSO_TCP_ECN;
 
 
 
 
 
316}
317EXPORT_SYMBOL(tcp_gro_complete);
318
319INDIRECT_CALLABLE_SCOPE
320struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
321{
322	/* Don't bother verifying checksum if we're going to flush anyway. */
323	if (!NAPI_GRO_CB(skb)->flush &&
324	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
325				      inet_gro_compute_pseudo)) {
326		NAPI_GRO_CB(skb)->flush = 1;
327		return NULL;
328	}
329
330	return tcp_gro_receive(head, skb);
331}
332
333INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
334{
335	const struct iphdr *iph = ip_hdr(skb);
336	struct tcphdr *th = tcp_hdr(skb);
337
338	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
339				  iph->daddr, 0);
 
340
341	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
342			(NAPI_GRO_CB(skb)->is_atomic * SKB_GSO_TCP_FIXEDID);
343
344	tcp_gro_complete(skb);
345	return 0;
346}
347
 
 
 
 
 
 
 
 
348int __init tcpv4_offload_init(void)
349{
350	net_hotdata.tcpv4_offload = (struct net_offload) {
351		.callbacks = {
352			.gso_segment	=	tcp4_gso_segment,
353			.gro_receive	=	tcp4_gro_receive,
354			.gro_complete	=	tcp4_gro_complete,
355		},
356	};
357	return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP);
358}