Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 *	IPV4 GSO/GRO offload support
  3 *	Linux INET implementation
  4 *
  5 *	This program is free software; you can redistribute it and/or
  6 *	modify it under the terms of the GNU General Public License
  7 *	as published by the Free Software Foundation; either version
  8 *	2 of the License, or (at your option) any later version.
  9 *
 10 *	TCPv4 GSO/GRO support
 11 */
 12
 
 13#include <linux/skbuff.h>
 
 
 14#include <net/tcp.h>
 15#include <net/protocol.h>
 16
 17static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
 18			   unsigned int seq, unsigned int mss)
 19{
 20	while (skb) {
 21		if (before(ts_seq, seq + mss)) {
 22			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
 23			skb_shinfo(skb)->tskey = ts_seq;
 24			return;
 25		}
 26
 27		skb = skb->next;
 28		seq += mss;
 29	}
 30}
 31
 32static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
 33					netdev_features_t features)
 34{
 35	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
 36		return ERR_PTR(-EINVAL);
 37
 38	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
 39		return ERR_PTR(-EINVAL);
 40
 41	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
 42		const struct iphdr *iph = ip_hdr(skb);
 43		struct tcphdr *th = tcp_hdr(skb);
 44
 45		/* Set up checksum pseudo header, usually expect stack to
 46		 * have done this already.
 47		 */
 48
 49		th->check = 0;
 50		skb->ip_summed = CHECKSUM_PARTIAL;
 51		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
 52	}
 53
 54	return tcp_gso_segment(skb, features);
 55}
 56
 57struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 58				netdev_features_t features)
 59{
 60	struct sk_buff *segs = ERR_PTR(-EINVAL);
 61	unsigned int sum_truesize = 0;
 62	struct tcphdr *th;
 63	unsigned int thlen;
 64	unsigned int seq;
 65	__be32 delta;
 66	unsigned int oldlen;
 67	unsigned int mss;
 68	struct sk_buff *gso_skb = skb;
 69	__sum16 newcheck;
 70	bool ooo_okay, copy_destructor;
 
 71
 72	th = tcp_hdr(skb);
 73	thlen = th->doff * 4;
 74	if (thlen < sizeof(*th))
 75		goto out;
 76
 77	if (!pskb_may_pull(skb, thlen))
 78		goto out;
 79
 80	oldlen = (u16)~skb->len;
 81	__skb_pull(skb, thlen);
 82
 83	mss = skb_shinfo(skb)->gso_size;
 84	if (unlikely(skb->len <= mss))
 85		goto out;
 86
 87	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
 88		/* Packet is from an untrusted source, reset gso_segs. */
 89
 90		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 91
 92		segs = NULL;
 93		goto out;
 94	}
 95
 96	copy_destructor = gso_skb->destructor == tcp_wfree;
 97	ooo_okay = gso_skb->ooo_okay;
 98	/* All segments but the first should have ooo_okay cleared */
 99	skb->ooo_okay = 0;
100
101	segs = skb_segment(skb, features);
102	if (IS_ERR(segs))
103		goto out;
104
105	/* Only first segment might have ooo_okay set */
106	segs->ooo_okay = ooo_okay;
107
108	/* GSO partial and frag_list segmentation only requires splitting
109	 * the frame into an MSS multiple and possibly a remainder, both
110	 * cases return a GSO skb. So update the mss now.
111	 */
112	if (skb_is_gso(segs))
113		mss *= skb_shinfo(segs)->gso_segs;
114
115	delta = htonl(oldlen + (thlen + mss));
116
117	skb = segs;
118	th = tcp_hdr(skb);
119	seq = ntohl(th->seq);
120
121	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
122		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
123
124	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
125					       (__force u32)delta));
126
127	while (skb->next) {
128		th->fin = th->psh = 0;
129		th->check = newcheck;
130
131		if (skb->ip_summed == CHECKSUM_PARTIAL)
132			gso_reset_checksum(skb, ~th->check);
133		else
134			th->check = gso_make_checksum(skb, ~th->check);
135
136		seq += mss;
137		if (copy_destructor) {
138			skb->destructor = gso_skb->destructor;
139			skb->sk = gso_skb->sk;
140			sum_truesize += skb->truesize;
141		}
142		skb = skb->next;
143		th = tcp_hdr(skb);
144
145		th->seq = htonl(seq);
146		th->cwr = 0;
147	}
148
149	/* Following permits TCP Small Queues to work well with GSO :
150	 * The callback to TCP stack will be called at the time last frag
151	 * is freed at TX completion, and not right now when gso_skb
152	 * is freed by GSO engine
153	 */
154	if (copy_destructor) {
155		int delta;
156
157		swap(gso_skb->sk, skb->sk);
158		swap(gso_skb->destructor, skb->destructor);
159		sum_truesize += skb->truesize;
160		delta = sum_truesize - gso_skb->truesize;
161		/* In some pathological cases, delta can be negative.
162		 * We need to either use refcount_add() or refcount_sub_and_test()
163		 */
164		if (likely(delta >= 0))
165			refcount_add(delta, &skb->sk->sk_wmem_alloc);
166		else
167			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
168	}
169
170	delta = htonl(oldlen + (skb_tail_pointer(skb) -
171				skb_transport_header(skb)) +
172		      skb->data_len);
173	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
174				(__force u32)delta));
175	if (skb->ip_summed == CHECKSUM_PARTIAL)
176		gso_reset_checksum(skb, ~th->check);
177	else
178		th->check = gso_make_checksum(skb, ~th->check);
179out:
180	return segs;
181}
182
183struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
184{
185	struct sk_buff **pp = NULL;
186	struct sk_buff *p;
187	struct tcphdr *th;
188	struct tcphdr *th2;
189	unsigned int len;
190	unsigned int thlen;
191	__be32 flags;
192	unsigned int mss = 1;
193	unsigned int hlen;
194	unsigned int off;
195	int flush = 1;
196	int i;
197
198	off = skb_gro_offset(skb);
199	hlen = off + sizeof(*th);
200	th = skb_gro_header_fast(skb, off);
201	if (skb_gro_header_hard(skb, hlen)) {
202		th = skb_gro_header_slow(skb, hlen, off);
203		if (unlikely(!th))
204			goto out;
205	}
206
207	thlen = th->doff * 4;
208	if (thlen < sizeof(*th))
209		goto out;
210
211	hlen = off + thlen;
212	if (skb_gro_header_hard(skb, hlen)) {
213		th = skb_gro_header_slow(skb, hlen, off);
214		if (unlikely(!th))
215			goto out;
216	}
217
218	skb_gro_pull(skb, thlen);
219
220	len = skb_gro_len(skb);
221	flags = tcp_flag_word(th);
222
223	for (; (p = *head); head = &p->next) {
224		if (!NAPI_GRO_CB(p)->same_flow)
225			continue;
226
227		th2 = tcp_hdr(p);
228
229		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
230			NAPI_GRO_CB(p)->same_flow = 0;
231			continue;
232		}
233
234		goto found;
235	}
236
237	goto out_check_final;
238
239found:
240	/* Include the IP ID check below from the inner most IP hdr */
241	flush = NAPI_GRO_CB(p)->flush;
242	flush |= (__force int)(flags & TCP_FLAG_CWR);
243	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
244		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
245	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
246	for (i = sizeof(*th); i < thlen; i += 4)
247		flush |= *(u32 *)((u8 *)th + i) ^
248			 *(u32 *)((u8 *)th2 + i);
249
250	/* When we receive our second frame we can made a decision on if we
251	 * continue this flow as an atomic flow with a fixed ID or if we use
252	 * an incrementing ID.
253	 */
254	if (NAPI_GRO_CB(p)->flush_id != 1 ||
255	    NAPI_GRO_CB(p)->count != 1 ||
256	    !NAPI_GRO_CB(p)->is_atomic)
257		flush |= NAPI_GRO_CB(p)->flush_id;
258	else
259		NAPI_GRO_CB(p)->is_atomic = false;
260
261	mss = skb_shinfo(p)->gso_size;
262
263	flush |= (len - 1) >= mss;
 
 
 
 
 
 
 
 
264	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
 
 
 
265
266	if (flush || skb_gro_receive(head, skb)) {
267		mss = 1;
268		goto out_check_final;
269	}
270
271	p = *head;
272	th2 = tcp_hdr(p);
273	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
274
275out_check_final:
276	flush = len < mss;
 
 
 
 
 
277	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
278					TCP_FLAG_RST | TCP_FLAG_SYN |
279					TCP_FLAG_FIN));
280
281	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
282		pp = head;
283
284out:
285	NAPI_GRO_CB(skb)->flush |= (flush != 0);
286
287	return pp;
288}
289
290int tcp_gro_complete(struct sk_buff *skb)
291{
292	struct tcphdr *th = tcp_hdr(skb);
 
 
 
 
293
294	skb->csum_start = (unsigned char *)th - skb->head;
295	skb->csum_offset = offsetof(struct tcphdr, check);
296	skb->ip_summed = CHECKSUM_PARTIAL;
297
298	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
 
299
300	if (th->cwr)
301		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
302
303	return 0;
304}
305EXPORT_SYMBOL(tcp_gro_complete);
306
307static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 
308{
309	/* Don't bother verifying checksum if we're going to flush anyway. */
310	if (!NAPI_GRO_CB(skb)->flush &&
311	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
312				      inet_gro_compute_pseudo)) {
313		NAPI_GRO_CB(skb)->flush = 1;
314		return NULL;
315	}
316
317	return tcp_gro_receive(head, skb);
318}
319
320static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
321{
322	const struct iphdr *iph = ip_hdr(skb);
323	struct tcphdr *th = tcp_hdr(skb);
324
325	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
326				  iph->daddr, 0);
327	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
328
329	if (NAPI_GRO_CB(skb)->is_atomic)
330		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
331
332	return tcp_gro_complete(skb);
 
333}
334
335static const struct net_offload tcpv4_offload = {
336	.callbacks = {
337		.gso_segment	=	tcp4_gso_segment,
338		.gro_receive	=	tcp4_gro_receive,
339		.gro_complete	=	tcp4_gro_complete,
340	},
341};
342
343int __init tcpv4_offload_init(void)
344{
345	return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
 
 
 
 
 
 
 
346}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *	IPV4 GSO/GRO offload support
  4 *	Linux INET implementation
  5 *
 
 
 
 
 
  6 *	TCPv4 GSO/GRO support
  7 */
  8
  9#include <linux/indirect_call_wrapper.h>
 10#include <linux/skbuff.h>
 11#include <net/gro.h>
 12#include <net/gso.h>
 13#include <net/tcp.h>
 14#include <net/protocol.h>
 15
 16static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
 17			   unsigned int seq, unsigned int mss)
 18{
 19	while (skb) {
 20		if (before(ts_seq, seq + mss)) {
 21			skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
 22			skb_shinfo(skb)->tskey = ts_seq;
 23			return;
 24		}
 25
 26		skb = skb->next;
 27		seq += mss;
 28	}
 29}
 30
 31static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
 32					netdev_features_t features)
 33{
 34	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
 35		return ERR_PTR(-EINVAL);
 36
 37	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
 38		return ERR_PTR(-EINVAL);
 39
 40	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
 41		const struct iphdr *iph = ip_hdr(skb);
 42		struct tcphdr *th = tcp_hdr(skb);
 43
 44		/* Set up checksum pseudo header, usually expect stack to
 45		 * have done this already.
 46		 */
 47
 48		th->check = 0;
 49		skb->ip_summed = CHECKSUM_PARTIAL;
 50		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
 51	}
 52
 53	return tcp_gso_segment(skb, features);
 54}
 55
 56struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 57				netdev_features_t features)
 58{
 59	struct sk_buff *segs = ERR_PTR(-EINVAL);
 60	unsigned int sum_truesize = 0;
 61	struct tcphdr *th;
 62	unsigned int thlen;
 63	unsigned int seq;
 
 64	unsigned int oldlen;
 65	unsigned int mss;
 66	struct sk_buff *gso_skb = skb;
 67	__sum16 newcheck;
 68	bool ooo_okay, copy_destructor;
 69	__wsum delta;
 70
 71	th = tcp_hdr(skb);
 72	thlen = th->doff * 4;
 73	if (thlen < sizeof(*th))
 74		goto out;
 75
 76	if (!pskb_may_pull(skb, thlen))
 77		goto out;
 78
 79	oldlen = ~skb->len;
 80	__skb_pull(skb, thlen);
 81
 82	mss = skb_shinfo(skb)->gso_size;
 83	if (unlikely(skb->len <= mss))
 84		goto out;
 85
 86	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
 87		/* Packet is from an untrusted source, reset gso_segs. */
 88
 89		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 90
 91		segs = NULL;
 92		goto out;
 93	}
 94
 95	copy_destructor = gso_skb->destructor == tcp_wfree;
 96	ooo_okay = gso_skb->ooo_okay;
 97	/* All segments but the first should have ooo_okay cleared */
 98	skb->ooo_okay = 0;
 99
100	segs = skb_segment(skb, features);
101	if (IS_ERR(segs))
102		goto out;
103
104	/* Only first segment might have ooo_okay set */
105	segs->ooo_okay = ooo_okay;
106
107	/* GSO partial and frag_list segmentation only requires splitting
108	 * the frame into an MSS multiple and possibly a remainder, both
109	 * cases return a GSO skb. So update the mss now.
110	 */
111	if (skb_is_gso(segs))
112		mss *= skb_shinfo(segs)->gso_segs;
113
114	delta = (__force __wsum)htonl(oldlen + thlen + mss);
115
116	skb = segs;
117	th = tcp_hdr(skb);
118	seq = ntohl(th->seq);
119
120	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
121		tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
122
123	newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
 
124
125	while (skb->next) {
126		th->fin = th->psh = 0;
127		th->check = newcheck;
128
129		if (skb->ip_summed == CHECKSUM_PARTIAL)
130			gso_reset_checksum(skb, ~th->check);
131		else
132			th->check = gso_make_checksum(skb, ~th->check);
133
134		seq += mss;
135		if (copy_destructor) {
136			skb->destructor = gso_skb->destructor;
137			skb->sk = gso_skb->sk;
138			sum_truesize += skb->truesize;
139		}
140		skb = skb->next;
141		th = tcp_hdr(skb);
142
143		th->seq = htonl(seq);
144		th->cwr = 0;
145	}
146
147	/* Following permits TCP Small Queues to work well with GSO :
148	 * The callback to TCP stack will be called at the time last frag
149	 * is freed at TX completion, and not right now when gso_skb
150	 * is freed by GSO engine
151	 */
152	if (copy_destructor) {
153		int delta;
154
155		swap(gso_skb->sk, skb->sk);
156		swap(gso_skb->destructor, skb->destructor);
157		sum_truesize += skb->truesize;
158		delta = sum_truesize - gso_skb->truesize;
159		/* In some pathological cases, delta can be negative.
160		 * We need to either use refcount_add() or refcount_sub_and_test()
161		 */
162		if (likely(delta >= 0))
163			refcount_add(delta, &skb->sk->sk_wmem_alloc);
164		else
165			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
166	}
167
168	delta = (__force __wsum)htonl(oldlen +
169				      (skb_tail_pointer(skb) -
170				       skb_transport_header(skb)) +
171				      skb->data_len);
172	th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
173	if (skb->ip_summed == CHECKSUM_PARTIAL)
174		gso_reset_checksum(skb, ~th->check);
175	else
176		th->check = gso_make_checksum(skb, ~th->check);
177out:
178	return segs;
179}
180
181struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
182{
183	struct sk_buff *pp = NULL;
184	struct sk_buff *p;
185	struct tcphdr *th;
186	struct tcphdr *th2;
187	unsigned int len;
188	unsigned int thlen;
189	__be32 flags;
190	unsigned int mss = 1;
191	unsigned int hlen;
192	unsigned int off;
193	int flush = 1;
194	int i;
195
196	off = skb_gro_offset(skb);
197	hlen = off + sizeof(*th);
198	th = skb_gro_header(skb, hlen, off);
199	if (unlikely(!th))
200		goto out;
 
 
 
201
202	thlen = th->doff * 4;
203	if (thlen < sizeof(*th))
204		goto out;
205
206	hlen = off + thlen;
207	if (!skb_gro_may_pull(skb, hlen)) {
208		th = skb_gro_header_slow(skb, hlen, off);
209		if (unlikely(!th))
210			goto out;
211	}
212
213	skb_gro_pull(skb, thlen);
214
215	len = skb_gro_len(skb);
216	flags = tcp_flag_word(th);
217
218	list_for_each_entry(p, head, list) {
219		if (!NAPI_GRO_CB(p)->same_flow)
220			continue;
221
222		th2 = tcp_hdr(p);
223
224		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
225			NAPI_GRO_CB(p)->same_flow = 0;
226			continue;
227		}
228
229		goto found;
230	}
231	p = NULL;
232	goto out_check_final;
233
234found:
235	/* Include the IP ID check below from the inner most IP hdr */
236	flush = NAPI_GRO_CB(p)->flush;
237	flush |= (__force int)(flags & TCP_FLAG_CWR);
238	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
239		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
240	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
241	for (i = sizeof(*th); i < thlen; i += 4)
242		flush |= *(u32 *)((u8 *)th + i) ^
243			 *(u32 *)((u8 *)th2 + i);
244
245	/* When we receive our second frame we can made a decision on if we
246	 * continue this flow as an atomic flow with a fixed ID or if we use
247	 * an incrementing ID.
248	 */
249	if (NAPI_GRO_CB(p)->flush_id != 1 ||
250	    NAPI_GRO_CB(p)->count != 1 ||
251	    !NAPI_GRO_CB(p)->is_atomic)
252		flush |= NAPI_GRO_CB(p)->flush_id;
253	else
254		NAPI_GRO_CB(p)->is_atomic = false;
255
256	mss = skb_shinfo(p)->gso_size;
257
258	/* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
259	 * If it is a single frame, do not aggregate it if its length
260	 * is bigger than our mss.
261	 */
262	if (unlikely(skb_is_gso(skb)))
263		flush |= (mss != skb_shinfo(skb)->gso_size);
264	else
265		flush |= (len - 1) >= mss;
266
267	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
268#ifdef CONFIG_TLS_DEVICE
269	flush |= p->decrypted ^ skb->decrypted;
270#endif
271
272	if (flush || skb_gro_receive(p, skb)) {
273		mss = 1;
274		goto out_check_final;
275	}
276
 
 
277	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
278
279out_check_final:
280	/* Force a flush if last segment is smaller than mss. */
281	if (unlikely(skb_is_gso(skb)))
282		flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
283	else
284		flush = len < mss;
285
286	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
287					TCP_FLAG_RST | TCP_FLAG_SYN |
288					TCP_FLAG_FIN));
289
290	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
291		pp = p;
292
293out:
294	NAPI_GRO_CB(skb)->flush |= (flush != 0);
295
296	return pp;
297}
298
299void tcp_gro_complete(struct sk_buff *skb)
300{
301	struct tcphdr *th = tcp_hdr(skb);
302	struct skb_shared_info *shinfo;
303
304	if (skb->encapsulation)
305		skb->inner_transport_header = skb->transport_header;
306
307	skb->csum_start = (unsigned char *)th - skb->head;
308	skb->csum_offset = offsetof(struct tcphdr, check);
309	skb->ip_summed = CHECKSUM_PARTIAL;
310
311	shinfo = skb_shinfo(skb);
312	shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
313
314	if (th->cwr)
315		shinfo->gso_type |= SKB_GSO_TCP_ECN;
 
 
316}
317EXPORT_SYMBOL(tcp_gro_complete);
318
319INDIRECT_CALLABLE_SCOPE
320struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
321{
322	/* Don't bother verifying checksum if we're going to flush anyway. */
323	if (!NAPI_GRO_CB(skb)->flush &&
324	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
325				      inet_gro_compute_pseudo)) {
326		NAPI_GRO_CB(skb)->flush = 1;
327		return NULL;
328	}
329
330	return tcp_gro_receive(head, skb);
331}
332
333INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
334{
335	const struct iphdr *iph = ip_hdr(skb);
336	struct tcphdr *th = tcp_hdr(skb);
337
338	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
339				  iph->daddr, 0);
 
340
341	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
342			(NAPI_GRO_CB(skb)->is_atomic * SKB_GSO_TCP_FIXEDID);
343
344	tcp_gro_complete(skb);
345	return 0;
346}
347
 
 
 
 
 
 
 
 
348int __init tcpv4_offload_init(void)
349{
350	net_hotdata.tcpv4_offload = (struct net_offload) {
351		.callbacks = {
352			.gso_segment	=	tcp4_gso_segment,
353			.gro_receive	=	tcp4_gro_receive,
354			.gro_complete	=	tcp4_gro_complete,
355		},
356	};
357	return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP);
358}