Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 *	IPV4 GSO/GRO offload support
  3 *	Linux INET implementation
  4 *
  5 *	This program is free software; you can redistribute it and/or
  6 *	modify it under the terms of the GNU General Public License
  7 *	as published by the Free Software Foundation; either version
  8 *	2 of the License, or (at your option) any later version.
  9 *
 10 *	TCPv4 GSO/GRO support
 11 */
 12
 
 13#include <linux/skbuff.h>
 
 
 14#include <net/tcp.h>
 15#include <net/protocol.h>
 16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 17struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
 18				netdev_features_t features)
 19{
 20	struct sk_buff *segs = ERR_PTR(-EINVAL);
 21	unsigned int sum_truesize = 0;
 22	struct tcphdr *th;
 23	unsigned int thlen;
 24	unsigned int seq;
 25	__be32 delta;
 26	unsigned int oldlen;
 27	unsigned int mss;
 28	struct sk_buff *gso_skb = skb;
 29	__sum16 newcheck;
 30	bool ooo_okay, copy_destructor;
 31
 32	if (!pskb_may_pull(skb, sizeof(*th)))
 33		goto out;
 34
 35	th = tcp_hdr(skb);
 36	thlen = th->doff * 4;
 37	if (thlen < sizeof(*th))
 38		goto out;
 39
 
 
 
 40	if (!pskb_may_pull(skb, thlen))
 41		goto out;
 42
 43	oldlen = (u16)~skb->len;
 44	__skb_pull(skb, thlen);
 45
 46	mss = tcp_skb_mss(skb);
 47	if (unlikely(skb->len <= mss))
 48		goto out;
 49
 50	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
 51		/* Packet is from an untrusted source, reset gso_segs. */
 52		int type = skb_shinfo(skb)->gso_type;
 53
 54		if (unlikely(type &
 55			     ~(SKB_GSO_TCPV4 |
 56			       SKB_GSO_DODGY |
 57			       SKB_GSO_TCP_ECN |
 58			       SKB_GSO_TCPV6 |
 59			       SKB_GSO_GRE |
 60			       SKB_GSO_IPIP |
 61			       SKB_GSO_SIT |
 62			       SKB_GSO_MPLS |
 63			       SKB_GSO_UDP_TUNNEL |
 64			       0) ||
 65			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
 66			goto out;
 67
 68		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
 69
 70		segs = NULL;
 71		goto out;
 72	}
 73
 74	copy_destructor = gso_skb->destructor == tcp_wfree;
 75	ooo_okay = gso_skb->ooo_okay;
 76	/* All segments but the first should have ooo_okay cleared */
 77	skb->ooo_okay = 0;
 78
 79	segs = skb_segment(skb, features);
 80	if (IS_ERR(segs))
 81		goto out;
 82
 83	/* Only first segment might have ooo_okay set */
 84	segs->ooo_okay = ooo_okay;
 85
 86	delta = htonl(oldlen + (thlen + mss));
 
 
 
 
 
 
 
 87
 88	skb = segs;
 89	th = tcp_hdr(skb);
 90	seq = ntohl(th->seq);
 91
 92	newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
 93					       (__force u32)delta));
 
 
 94
 95	do {
 96		th->fin = th->psh = 0;
 97		th->check = newcheck;
 98
 99		if (skb->ip_summed != CHECKSUM_PARTIAL)
100			th->check =
101			     csum_fold(csum_partial(skb_transport_header(skb),
102						    thlen, skb->csum));
103
104		seq += mss;
105		if (copy_destructor) {
106			skb->destructor = gso_skb->destructor;
107			skb->sk = gso_skb->sk;
108			sum_truesize += skb->truesize;
109		}
110		skb = skb->next;
111		th = tcp_hdr(skb);
112
113		th->seq = htonl(seq);
114		th->cwr = 0;
115	} while (skb->next);
116
117	/* Following permits TCP Small Queues to work well with GSO :
118	 * The callback to TCP stack will be called at the time last frag
119	 * is freed at TX completion, and not right now when gso_skb
120	 * is freed by GSO engine
121	 */
122	if (copy_destructor) {
 
 
123		swap(gso_skb->sk, skb->sk);
124		swap(gso_skb->destructor, skb->destructor);
125		sum_truesize += skb->truesize;
126		atomic_add(sum_truesize - gso_skb->truesize,
127			   &skb->sk->sk_wmem_alloc);
 
 
 
 
 
 
128	}
129
130	delta = htonl(oldlen + (skb_tail_pointer(skb) -
131				skb_transport_header(skb)) +
132		      skb->data_len);
133	th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
134				(__force u32)delta));
135	if (skb->ip_summed != CHECKSUM_PARTIAL)
136		th->check = csum_fold(csum_partial(skb_transport_header(skb),
137						   thlen, skb->csum));
 
138out:
139	return segs;
140}
141
142struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
143{
144	struct sk_buff **pp = NULL;
145	struct sk_buff *p;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146	struct tcphdr *th;
147	struct tcphdr *th2;
148	unsigned int len;
149	unsigned int thlen;
150	__be32 flags;
151	unsigned int mss = 1;
152	unsigned int hlen;
153	unsigned int off;
154	int flush = 1;
155	int i;
156
157	off = skb_gro_offset(skb);
158	hlen = off + sizeof(*th);
159	th = skb_gro_header_fast(skb, off);
160	if (skb_gro_header_hard(skb, hlen)) {
161		th = skb_gro_header_slow(skb, hlen, off);
162		if (unlikely(!th))
163			goto out;
164	}
165
166	thlen = th->doff * 4;
167	if (thlen < sizeof(*th))
168		goto out;
169
170	hlen = off + thlen;
171	if (skb_gro_header_hard(skb, hlen)) {
172		th = skb_gro_header_slow(skb, hlen, off);
173		if (unlikely(!th))
174			goto out;
175	}
176
177	skb_gro_pull(skb, thlen);
178
179	len = skb_gro_len(skb);
180	flags = tcp_flag_word(th);
181
182	for (; (p = *head); head = &p->next) {
183		if (!NAPI_GRO_CB(p)->same_flow)
184			continue;
185
186		th2 = tcp_hdr(p);
187
188		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
189			NAPI_GRO_CB(p)->same_flow = 0;
190			continue;
191		}
 
 
 
 
 
 
 
 
192
193		goto found;
194	}
195
196	goto out_check_final;
 
 
197
198found:
199	/* Include the IP ID check below from the inner most IP hdr */
200	flush = NAPI_GRO_CB(p)->flush | NAPI_GRO_CB(p)->flush_id;
201	flush |= (__force int)(flags & TCP_FLAG_CWR);
202	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
203		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
204	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
205	for (i = sizeof(*th); i < thlen; i += 4)
206		flush |= *(u32 *)((u8 *)th + i) ^
207			 *(u32 *)((u8 *)th2 + i);
208
209	mss = tcp_skb_mss(p);
 
 
 
 
 
 
 
 
 
 
 
210
211	flush |= (len - 1) >= mss;
212	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
 
 
 
 
 
 
 
 
 
 
213
214	if (flush || skb_gro_receive(head, skb)) {
 
 
 
215		mss = 1;
216		goto out_check_final;
217	}
218
219	p = *head;
220	th2 = tcp_hdr(p);
221	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
222
223out_check_final:
224	flush = len < mss;
 
 
 
 
 
225	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
226					TCP_FLAG_RST | TCP_FLAG_SYN |
227					TCP_FLAG_FIN));
228
229	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
230		pp = head;
231
232out:
233	NAPI_GRO_CB(skb)->flush |= (flush != 0);
234
235	return pp;
236}
237
238int tcp_gro_complete(struct sk_buff *skb)
239{
240	struct tcphdr *th = tcp_hdr(skb);
 
 
 
 
241
242	skb->csum_start = (unsigned char *)th - skb->head;
243	skb->csum_offset = offsetof(struct tcphdr, check);
244	skb->ip_summed = CHECKSUM_PARTIAL;
245
246	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
 
247
248	if (th->cwr)
249		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
250
251	return 0;
252}
253EXPORT_SYMBOL(tcp_gro_complete);
254
255static int tcp_v4_gso_send_check(struct sk_buff *skb)
 
256{
257	const struct iphdr *iph;
258	struct tcphdr *th;
259
260	if (!pskb_may_pull(skb, sizeof(*th)))
261		return -EINVAL;
262
263	iph = ip_hdr(skb);
264	th = tcp_hdr(skb);
 
 
 
 
 
 
265
266	th->check = 0;
267	skb->ip_summed = CHECKSUM_PARTIAL;
268	__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
269	return 0;
 
 
 
 
 
 
270}
271
272static struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 
273{
274	/* Use the IP hdr immediately proceeding for this transport */
275	const struct iphdr *iph = skb_gro_network_header(skb);
276	__wsum wsum;
277
278	/* Don't bother verifying checksum if we're going to flush anyway. */
279	if (NAPI_GRO_CB(skb)->flush)
280		goto skip_csum;
281
282	wsum = NAPI_GRO_CB(skb)->csum;
283
284	switch (skb->ip_summed) {
285	case CHECKSUM_NONE:
286		wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb),
287				    0);
288
289		/* fall through */
290
291	case CHECKSUM_COMPLETE:
292		if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
293				  wsum)) {
294			skb->ip_summed = CHECKSUM_UNNECESSARY;
295			break;
296		}
297
298		NAPI_GRO_CB(skb)->flush = 1;
299		return NULL;
300	}
301
302skip_csum:
303	return tcp_gro_receive(head, skb);
304}
305
306static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
307{
308	const struct iphdr *iph = ip_hdr(skb);
 
309	struct tcphdr *th = tcp_hdr(skb);
310
 
 
 
 
 
 
 
 
 
311	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
312				  iph->daddr, 0);
313	skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
314
315	return tcp_gro_complete(skb);
316}
317
318static const struct net_offload tcpv4_offload = {
319	.callbacks = {
320		.gso_send_check	=	tcp_v4_gso_send_check,
321		.gso_segment	=	tcp_gso_segment,
322		.gro_receive	=	tcp4_gro_receive,
323		.gro_complete	=	tcp4_gro_complete,
324	},
325};
326
327int __init tcpv4_offload_init(void)
328{
329	return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
 
 
 
 
 
 
 
330}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *	IPV4 GSO/GRO offload support
  4 *	Linux INET implementation
  5 *
 
 
 
 
 
  6 *	TCPv4 GSO/GRO support
  7 */
  8
  9#include <linux/indirect_call_wrapper.h>
 10#include <linux/skbuff.h>
 11#include <net/gro.h>
 12#include <net/gso.h>
 13#include <net/tcp.h>
 14#include <net/protocol.h>
 15
 16static void tcp_gso_tstamp(struct sk_buff *skb, struct sk_buff *gso_skb,
 17			   unsigned int seq, unsigned int mss)
 18{
 19	u32 flags = skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP;
 20	u32 ts_seq = skb_shinfo(gso_skb)->tskey;
 21
 22	while (skb) {
 23		if (before(ts_seq, seq + mss)) {
 24			skb_shinfo(skb)->tx_flags |= flags;
 25			skb_shinfo(skb)->tskey = ts_seq;
 26			return;
 27		}
 28
 29		skb = skb->next;
 30		seq += mss;
 31	}
 32}
 33
 34static void __tcpv4_gso_segment_csum(struct sk_buff *seg,
 35				     __be32 *oldip, __be32 newip,
 36				     __be16 *oldport, __be16 newport)
 37{
 38	struct tcphdr *th;
 39	struct iphdr *iph;
 40
 41	if (*oldip == newip && *oldport == newport)
 42		return;
 43
 44	th = tcp_hdr(seg);
 45	iph = ip_hdr(seg);
 46
 47	inet_proto_csum_replace4(&th->check, seg, *oldip, newip, true);
 48	inet_proto_csum_replace2(&th->check, seg, *oldport, newport, false);
 49	*oldport = newport;
 50
 51	csum_replace4(&iph->check, *oldip, newip);
 52	*oldip = newip;
 53}
 54
 55static struct sk_buff *__tcpv4_gso_segment_list_csum(struct sk_buff *segs)
 56{
 57	const struct tcphdr *th;
 58	const struct iphdr *iph;
 59	struct sk_buff *seg;
 60	struct tcphdr *th2;
 61	struct iphdr *iph2;
 62
 63	seg = segs;
 64	th = tcp_hdr(seg);
 65	iph = ip_hdr(seg);
 66	th2 = tcp_hdr(seg->next);
 67	iph2 = ip_hdr(seg->next);
 68
 69	if (!(*(const u32 *)&th->source ^ *(const u32 *)&th2->source) &&
 70	    iph->daddr == iph2->daddr && iph->saddr == iph2->saddr)
 71		return segs;
 72
 73	while ((seg = seg->next)) {
 74		th2 = tcp_hdr(seg);
 75		iph2 = ip_hdr(seg);
 76
 77		__tcpv4_gso_segment_csum(seg,
 78					 &iph2->saddr, iph->saddr,
 79					 &th2->source, th->source);
 80		__tcpv4_gso_segment_csum(seg,
 81					 &iph2->daddr, iph->daddr,
 82					 &th2->dest, th->dest);
 83	}
 84
 85	return segs;
 86}
 87
 88static struct sk_buff *__tcp4_gso_segment_list(struct sk_buff *skb,
 89					      netdev_features_t features)
 90{
 91	skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
 92	if (IS_ERR(skb))
 93		return skb;
 94
 95	return __tcpv4_gso_segment_list_csum(skb);
 96}
 97
 98static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
 99					netdev_features_t features)
100{
101	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
102		return ERR_PTR(-EINVAL);
103
104	if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
105		return ERR_PTR(-EINVAL);
106
107	if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) {
108		struct tcphdr *th = tcp_hdr(skb);
109
110		if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size)
111			return __tcp4_gso_segment_list(skb, features);
112
113		skb->ip_summed = CHECKSUM_NONE;
114	}
115
116	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
117		const struct iphdr *iph = ip_hdr(skb);
118		struct tcphdr *th = tcp_hdr(skb);
119
120		/* Set up checksum pseudo header, usually expect stack to
121		 * have done this already.
122		 */
123
124		th->check = 0;
125		skb->ip_summed = CHECKSUM_PARTIAL;
126		__tcp_v4_send_check(skb, iph->saddr, iph->daddr);
127	}
128
129	return tcp_gso_segment(skb, features);
130}
131
132struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
133				netdev_features_t features)
134{
135	struct sk_buff *segs = ERR_PTR(-EINVAL);
136	unsigned int sum_truesize = 0;
137	struct tcphdr *th;
138	unsigned int thlen;
139	unsigned int seq;
 
140	unsigned int oldlen;
141	unsigned int mss;
142	struct sk_buff *gso_skb = skb;
143	__sum16 newcheck;
144	bool ooo_okay, copy_destructor;
145	__wsum delta;
 
 
146
147	th = tcp_hdr(skb);
148	thlen = th->doff * 4;
149	if (thlen < sizeof(*th))
150		goto out;
151
152	if (unlikely(skb_checksum_start(skb) != skb_transport_header(skb)))
153		goto out;
154
155	if (!pskb_may_pull(skb, thlen))
156		goto out;
157
158	oldlen = ~skb->len;
159	__skb_pull(skb, thlen);
160
161	mss = skb_shinfo(skb)->gso_size;
162	if (unlikely(skb->len <= mss))
163		goto out;
164
165	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
166		/* Packet is from an untrusted source, reset gso_segs. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
167
168		skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
169
170		segs = NULL;
171		goto out;
172	}
173
174	copy_destructor = gso_skb->destructor == tcp_wfree;
175	ooo_okay = gso_skb->ooo_okay;
176	/* All segments but the first should have ooo_okay cleared */
177	skb->ooo_okay = 0;
178
179	segs = skb_segment(skb, features);
180	if (IS_ERR(segs))
181		goto out;
182
183	/* Only first segment might have ooo_okay set */
184	segs->ooo_okay = ooo_okay;
185
186	/* GSO partial and frag_list segmentation only requires splitting
187	 * the frame into an MSS multiple and possibly a remainder, both
188	 * cases return a GSO skb. So update the mss now.
189	 */
190	if (skb_is_gso(segs))
191		mss *= skb_shinfo(segs)->gso_segs;
192
193	delta = (__force __wsum)htonl(oldlen + thlen + mss);
194
195	skb = segs;
196	th = tcp_hdr(skb);
197	seq = ntohl(th->seq);
198
199	if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP))
200		tcp_gso_tstamp(segs, gso_skb, seq, mss);
201
202	newcheck = ~csum_fold(csum_add(csum_unfold(th->check), delta));
203
204	while (skb->next) {
205		th->fin = th->psh = 0;
206		th->check = newcheck;
207
208		if (skb->ip_summed == CHECKSUM_PARTIAL)
209			gso_reset_checksum(skb, ~th->check);
210		else
211			th->check = gso_make_checksum(skb, ~th->check);
212
213		seq += mss;
214		if (copy_destructor) {
215			skb->destructor = gso_skb->destructor;
216			skb->sk = gso_skb->sk;
217			sum_truesize += skb->truesize;
218		}
219		skb = skb->next;
220		th = tcp_hdr(skb);
221
222		th->seq = htonl(seq);
223		th->cwr = 0;
224	}
225
226	/* Following permits TCP Small Queues to work well with GSO :
227	 * The callback to TCP stack will be called at the time last frag
228	 * is freed at TX completion, and not right now when gso_skb
229	 * is freed by GSO engine
230	 */
231	if (copy_destructor) {
232		int delta;
233
234		swap(gso_skb->sk, skb->sk);
235		swap(gso_skb->destructor, skb->destructor);
236		sum_truesize += skb->truesize;
237		delta = sum_truesize - gso_skb->truesize;
238		/* In some pathological cases, delta can be negative.
239		 * We need to either use refcount_add() or refcount_sub_and_test()
240		 */
241		if (likely(delta >= 0))
242			refcount_add(delta, &skb->sk->sk_wmem_alloc);
243		else
244			WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
245	}
246
247	delta = (__force __wsum)htonl(oldlen +
248				      (skb_tail_pointer(skb) -
249				       skb_transport_header(skb)) +
250				      skb->data_len);
251	th->check = ~csum_fold(csum_add(csum_unfold(th->check), delta));
252	if (skb->ip_summed == CHECKSUM_PARTIAL)
253		gso_reset_checksum(skb, ~th->check);
254	else
255		th->check = gso_make_checksum(skb, ~th->check);
256out:
257	return segs;
258}
259
260struct sk_buff *tcp_gro_lookup(struct list_head *head, struct tcphdr *th)
261{
262	struct tcphdr *th2;
263	struct sk_buff *p;
264
265	list_for_each_entry(p, head, list) {
266		if (!NAPI_GRO_CB(p)->same_flow)
267			continue;
268
269		th2 = tcp_hdr(p);
270		if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
271			NAPI_GRO_CB(p)->same_flow = 0;
272			continue;
273		}
274
275		return p;
276	}
277
278	return NULL;
279}
280
281struct tcphdr *tcp_gro_pull_header(struct sk_buff *skb)
282{
283	unsigned int thlen, hlen, off;
284	struct tcphdr *th;
 
 
 
 
 
 
 
 
 
285
286	off = skb_gro_offset(skb);
287	hlen = off + sizeof(*th);
288	th = skb_gro_header(skb, hlen, off);
289	if (unlikely(!th))
290		return NULL;
 
 
 
291
292	thlen = th->doff * 4;
293	if (thlen < sizeof(*th))
294		return NULL;
295
296	hlen = off + thlen;
297	if (!skb_gro_may_pull(skb, hlen)) {
298		th = skb_gro_header_slow(skb, hlen, off);
299		if (unlikely(!th))
300			return NULL;
301	}
302
303	skb_gro_pull(skb, thlen);
304
305	return th;
306}
 
 
 
 
 
 
307
308struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb,
309				struct tcphdr *th)
310{
311	unsigned int thlen = th->doff * 4;
312	struct sk_buff *pp = NULL;
313	struct sk_buff *p;
314	struct tcphdr *th2;
315	unsigned int len;
316	__be32 flags;
317	unsigned int mss = 1;
318	int flush = 1;
319	int i;
320
321	len = skb_gro_len(skb);
322	flags = tcp_flag_word(th);
323
324	p = tcp_gro_lookup(head, th);
325	if (!p)
326		goto out_check_final;
327
328	th2 = tcp_hdr(p);
329	flush = (__force int)(flags & TCP_FLAG_CWR);
 
 
330	flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
331		  ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
332	flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
333	for (i = sizeof(*th); i < thlen; i += 4)
334		flush |= *(u32 *)((u8 *)th + i) ^
335			 *(u32 *)((u8 *)th2 + i);
336
337	flush |= gro_receive_network_flush(th, th2, p);
338
339	mss = skb_shinfo(p)->gso_size;
340
341	/* If skb is a GRO packet, make sure its gso_size matches prior packet mss.
342	 * If it is a single frame, do not aggregate it if its length
343	 * is bigger than our mss.
344	 */
345	if (unlikely(skb_is_gso(skb)))
346		flush |= (mss != skb_shinfo(skb)->gso_size);
347	else
348		flush |= (len - 1) >= mss;
349
 
350	flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
351	flush |= skb_cmp_decrypted(p, skb);
352
353	if (unlikely(NAPI_GRO_CB(p)->is_flist)) {
354		flush |= (__force int)(flags ^ tcp_flag_word(th2));
355		flush |= skb->ip_summed != p->ip_summed;
356		flush |= skb->csum_level != p->csum_level;
357		flush |= NAPI_GRO_CB(p)->count >= 64;
358
359		if (flush || skb_gro_receive_list(p, skb))
360			mss = 1;
361
362		goto out_check_final;
363	}
364
365	if (flush || skb_gro_receive(p, skb)) {
366		mss = 1;
367		goto out_check_final;
368	}
369
 
 
370	tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
371
372out_check_final:
373	/* Force a flush if last segment is smaller than mss. */
374	if (unlikely(skb_is_gso(skb)))
375		flush = len != NAPI_GRO_CB(skb)->count * skb_shinfo(skb)->gso_size;
376	else
377		flush = len < mss;
378
379	flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
380					TCP_FLAG_RST | TCP_FLAG_SYN |
381					TCP_FLAG_FIN));
382
383	if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
384		pp = p;
385
 
386	NAPI_GRO_CB(skb)->flush |= (flush != 0);
387
388	return pp;
389}
390
391void tcp_gro_complete(struct sk_buff *skb)
392{
393	struct tcphdr *th = tcp_hdr(skb);
394	struct skb_shared_info *shinfo;
395
396	if (skb->encapsulation)
397		skb->inner_transport_header = skb->transport_header;
398
399	skb->csum_start = (unsigned char *)th - skb->head;
400	skb->csum_offset = offsetof(struct tcphdr, check);
401	skb->ip_summed = CHECKSUM_PARTIAL;
402
403	shinfo = skb_shinfo(skb);
404	shinfo->gso_segs = NAPI_GRO_CB(skb)->count;
405
406	if (th->cwr)
407		shinfo->gso_type |= SKB_GSO_TCP_ECN;
 
 
408}
409EXPORT_SYMBOL(tcp_gro_complete);
410
411static void tcp4_check_fraglist_gro(struct list_head *head, struct sk_buff *skb,
412				    struct tcphdr *th)
413{
414	const struct iphdr *iph;
415	struct sk_buff *p;
416	struct sock *sk;
417	struct net *net;
418	int iif, sdif;
419
420	if (likely(!(skb->dev->features & NETIF_F_GRO_FRAGLIST)))
421		return;
422
423	p = tcp_gro_lookup(head, th);
424	if (p) {
425		NAPI_GRO_CB(skb)->is_flist = NAPI_GRO_CB(p)->is_flist;
426		return;
427	}
428
429	inet_get_iif_sdif(skb, &iif, &sdif);
430	iph = skb_gro_network_header(skb);
431	net = dev_net(skb->dev);
432	sk = __inet_lookup_established(net, net->ipv4.tcp_death_row.hashinfo,
433				       iph->saddr, th->source,
434				       iph->daddr, ntohs(th->dest),
435				       iif, sdif);
436	NAPI_GRO_CB(skb)->is_flist = !sk;
437	if (sk)
438		sock_put(sk);
439}
440
441INDIRECT_CALLABLE_SCOPE
442struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
443{
444	struct tcphdr *th;
 
 
445
446	/* Don't bother verifying checksum if we're going to flush anyway. */
447	if (!NAPI_GRO_CB(skb)->flush &&
448	    skb_gro_checksum_validate(skb, IPPROTO_TCP,
449				      inet_gro_compute_pseudo))
450		goto flush;
451
452	th = tcp_gro_pull_header(skb);
453	if (!th)
454		goto flush;
455
456	tcp4_check_fraglist_gro(head, skb, th);
457
458	return tcp_gro_receive(head, skb, th);
459
460flush:
461	NAPI_GRO_CB(skb)->flush = 1;
462	return NULL;
 
 
 
 
 
 
 
 
 
463}
464
465INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
466{
467	const u16 offset = NAPI_GRO_CB(skb)->network_offsets[skb->encapsulation];
468	const struct iphdr *iph = (struct iphdr *)(skb->data + offset);
469	struct tcphdr *th = tcp_hdr(skb);
470
471	if (unlikely(NAPI_GRO_CB(skb)->is_flist)) {
472		skb_shinfo(skb)->gso_type |= SKB_GSO_FRAGLIST | SKB_GSO_TCPV4;
473		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
474
475		__skb_incr_checksum_unnecessary(skb);
476
477		return 0;
478	}
479
480	th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
481				  iph->daddr, 0);
 
482
483	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4 |
484			(NAPI_GRO_CB(skb)->ip_fixedid * SKB_GSO_TCP_FIXEDID);
485
486	tcp_gro_complete(skb);
487	return 0;
488}
 
 
 
 
 
489
490int __init tcpv4_offload_init(void)
491{
492	net_hotdata.tcpv4_offload = (struct net_offload) {
493		.callbacks = {
494			.gso_segment	=	tcp4_gso_segment,
495			.gro_receive	=	tcp4_gro_receive,
496			.gro_complete	=	tcp4_gro_complete,
497		},
498	};
499	return inet_add_offload(&net_hotdata.tcpv4_offload, IPPROTO_TCP);
500}