Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 *	IPV4 GSO/GRO offload support
  3 *	Linux INET implementation
  4 *
  5 *	This program is free software; you can redistribute it and/or
  6 *	modify it under the terms of the GNU General Public License
  7 *	as published by the Free Software Foundation; either version
  8 *	2 of the License, or (at your option) any later version.
  9 *
 10 *	UDPv4 GSO support
 11 */
 12
 13#include <linux/skbuff.h>
 
 14#include <net/udp.h>
 15#include <net/protocol.h>
 
 16
 17static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
 18	netdev_features_t features,
 19	struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
 20					     netdev_features_t features),
 21	__be16 new_protocol, bool is_ipv6)
 22{
 23	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
 24	bool remcsum, need_csum, offload_csum, gso_partial;
 25	struct sk_buff *segs = ERR_PTR(-EINVAL);
 26	struct udphdr *uh = udp_hdr(skb);
 27	u16 mac_offset = skb->mac_header;
 28	__be16 protocol = skb->protocol;
 29	u16 mac_len = skb->mac_len;
 30	int udp_offset, outer_hlen;
 31	__wsum partial;
 32	bool need_ipsec;
 33
 34	if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
 35		goto out;
 36
 37	/* Adjust partial header checksum to negate old length.
 38	 * We cannot rely on the value contained in uh->len as it is
 39	 * possible that the actual value exceeds the boundaries of the
 40	 * 16 bit length field due to the header being added outside of an
 41	 * IP or IPv6 frame that was already limited to 64K - 1.
 42	 */
 43	if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)
 44		partial = (__force __wsum)uh->len;
 45	else
 46		partial = (__force __wsum)htonl(skb->len);
 47	partial = csum_sub(csum_unfold(uh->check), partial);
 48
 49	/* setup inner skb. */
 50	skb->encapsulation = 0;
 51	SKB_GSO_CB(skb)->encap_level = 0;
 52	__skb_pull(skb, tnl_hlen);
 53	skb_reset_mac_header(skb);
 54	skb_set_network_header(skb, skb_inner_network_offset(skb));
 
 55	skb->mac_len = skb_inner_network_offset(skb);
 56	skb->protocol = new_protocol;
 57
 58	need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
 59	skb->encap_hdr_csum = need_csum;
 60
 61	remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
 62	skb->remcsum_offload = remcsum;
 63
 64	need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
 65	/* Try to offload checksum if possible */
 66	offload_csum = !!(need_csum &&
 67			  !need_ipsec &&
 68			  (skb->dev->features &
 69			   (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) :
 70				      (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
 71
 72	features &= skb->dev->hw_enc_features;
 
 
 73
 74	/* The only checksum offload we care about from here on out is the
 75	 * outer one so strip the existing checksum feature flags and
 76	 * instead set the flag based on our outer checksum offload value.
 77	 */
 78	if (remcsum) {
 79		features &= ~NETIF_F_CSUM_MASK;
 80		if (!need_csum || offload_csum)
 81			features |= NETIF_F_HW_CSUM;
 82	}
 83
 84	/* segment inner packet. */
 85	segs = gso_inner_segment(skb, features);
 86	if (IS_ERR_OR_NULL(segs)) {
 87		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
 88				     mac_len);
 89		goto out;
 90	}
 91
 92	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
 93
 94	outer_hlen = skb_tnl_header_len(skb);
 95	udp_offset = outer_hlen - tnl_hlen;
 96	skb = segs;
 97	do {
 98		unsigned int len;
 99
100		if (remcsum)
101			skb->ip_summed = CHECKSUM_NONE;
102
103		/* Set up inner headers if we are offloading inner checksum */
104		if (skb->ip_summed == CHECKSUM_PARTIAL) {
105			skb_reset_inner_headers(skb);
106			skb->encapsulation = 1;
107		}
108
109		skb->mac_len = mac_len;
110		skb->protocol = protocol;
111
112		__skb_push(skb, outer_hlen);
113		skb_reset_mac_header(skb);
114		skb_set_network_header(skb, mac_len);
115		skb_set_transport_header(skb, udp_offset);
116		len = skb->len - udp_offset;
117		uh = udp_hdr(skb);
118
119		/* If we are only performing partial GSO the inner header
120		 * will be using a length value equal to only one MSS sized
121		 * segment instead of the entire frame.
122		 */
123		if (gso_partial && skb_is_gso(skb)) {
124			uh->len = htons(skb_shinfo(skb)->gso_size +
125					SKB_GSO_CB(skb)->data_offset +
126					skb->head - (unsigned char *)uh);
127		} else {
128			uh->len = htons(len);
129		}
130
131		if (!need_csum)
132			continue;
133
134		uh->check = ~csum_fold(csum_add(partial,
135				       (__force __wsum)htonl(len)));
136
137		if (skb->encapsulation || !offload_csum) {
138			uh->check = gso_make_checksum(skb, ~uh->check);
139			if (uh->check == 0)
140				uh->check = CSUM_MANGLED_0;
141		} else {
142			skb->ip_summed = CHECKSUM_PARTIAL;
143			skb->csum_start = skb_transport_header(skb) - skb->head;
144			skb->csum_offset = offsetof(struct udphdr, check);
145		}
146	} while ((skb = skb->next));
147out:
148	return segs;
149}
150
151struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
152				       netdev_features_t features,
153				       bool is_ipv6)
154{
 
155	__be16 protocol = skb->protocol;
156	const struct net_offload **offloads;
157	const struct net_offload *ops;
158	struct sk_buff *segs = ERR_PTR(-EINVAL);
159	struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
160					     netdev_features_t features);
161
162	rcu_read_lock();
163
164	switch (skb->inner_protocol_type) {
165	case ENCAP_TYPE_ETHER:
166		protocol = skb->inner_protocol;
167		gso_inner_segment = skb_mac_gso_segment;
168		break;
169	case ENCAP_TYPE_IPPROTO:
170		offloads = is_ipv6 ? inet6_offloads : inet_offloads;
171		ops = rcu_dereference(offloads[skb->inner_ipproto]);
172		if (!ops || !ops->callbacks.gso_segment)
173			goto out_unlock;
174		gso_inner_segment = ops->callbacks.gso_segment;
175		break;
176	default:
177		goto out_unlock;
178	}
179
180	segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
181					protocol, is_ipv6);
182
183out_unlock:
184	rcu_read_unlock();
185
186	return segs;
187}
188EXPORT_SYMBOL(skb_udp_tunnel_segment);
189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
191					 netdev_features_t features)
192{
193	struct sk_buff *segs = ERR_PTR(-EINVAL);
194	unsigned int mss;
195	__wsum csum;
196	struct udphdr *uh;
197	struct iphdr *iph;
198
199	if (skb->encapsulation &&
200	    (skb_shinfo(skb)->gso_type &
201	     (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
202		segs = skb_udp_tunnel_segment(skb, features, false);
203		goto out;
204	}
205
206	if (!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP))
207		goto out;
208
209	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
210		goto out;
211
 
 
 
 
212	mss = skb_shinfo(skb)->gso_size;
213	if (unlikely(skb->len <= mss))
214		goto out;
215
216	/* Do software UFO. Complete and fill in the UDP checksum as
217	 * HW cannot do checksum of UDP packets sent as multiple
218	 * IP fragments.
219	 */
220
221	uh = udp_hdr(skb);
222	iph = ip_hdr(skb);
223
224	uh->check = 0;
225	csum = skb_checksum(skb, 0, skb->len, 0);
226	uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
227	if (uh->check == 0)
228		uh->check = CSUM_MANGLED_0;
229
230	skb->ip_summed = CHECKSUM_UNNECESSARY;
231
232	/* If there is no outer header we can fake a checksum offload
233	 * due to the fact that we have already done the checksum in
234	 * software prior to segmenting the frame.
235	 */
236	if (!skb->encap_hdr_csum)
237		features |= NETIF_F_HW_CSUM;
238
239	/* Fragment the skb. IP headers of the fragments are updated in
240	 * inet_gso_segment()
241	 */
242	segs = skb_segment(skb, features);
243out:
244	return segs;
245}
246
247struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
248				 struct udphdr *uh, udp_lookup_t lookup)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249{
250	struct sk_buff *p, **pp = NULL;
 
251	struct udphdr *uh2;
252	unsigned int off = skb_gro_offset(skb);
253	int flush = 1;
254	struct sock *sk;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
255
256	if (NAPI_GRO_CB(skb)->encap_mark ||
257	    (skb->ip_summed != CHECKSUM_PARTIAL &&
258	     NAPI_GRO_CB(skb)->csum_cnt == 0 &&
259	     !NAPI_GRO_CB(skb)->csum_valid))
260		goto out;
261
262	/* mark that this skb passed once through the tunnel gro layer */
263	NAPI_GRO_CB(skb)->encap_mark = 1;
264
265	rcu_read_lock();
266	sk = (*lookup)(skb, uh->source, uh->dest);
267
268	if (sk && udp_sk(sk)->gro_receive)
269		goto unflush;
270	goto out_unlock;
271
272unflush:
273	flush = 0;
274
275	for (p = *head; p; p = p->next) {
276		if (!NAPI_GRO_CB(p)->same_flow)
277			continue;
278
279		uh2 = (struct udphdr   *)(p->data + off);
280
281		/* Match ports and either checksums are either both zero
282		 * or nonzero.
283		 */
284		if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
285		    (!uh->check ^ !uh2->check)) {
286			NAPI_GRO_CB(p)->same_flow = 0;
287			continue;
288		}
289	}
290
291	skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
292	skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
293	pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
294
295out_unlock:
296	rcu_read_unlock();
297out:
298	NAPI_GRO_CB(skb)->flush |= flush;
299	return pp;
300}
301EXPORT_SYMBOL(udp_gro_receive);
302
303static struct sk_buff **udp4_gro_receive(struct sk_buff **head,
304					 struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
305{
306	struct udphdr *uh = udp_gro_udphdr(skb);
 
 
307
308	if (unlikely(!uh))
309		goto flush;
310
311	/* Don't bother verifying checksum if we're going to flush anyway. */
312	if (NAPI_GRO_CB(skb)->flush)
313		goto skip;
314
315	if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
316						 inet_gro_compute_pseudo))
317		goto flush;
318	else if (uh->check)
319		skb_gro_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
320					     inet_gro_compute_pseudo);
321skip:
322	NAPI_GRO_CB(skb)->is_ipv6 = 0;
323	return udp_gro_receive(head, skb, uh, udp4_lib_lookup_skb);
 
 
 
 
 
324
325flush:
326	NAPI_GRO_CB(skb)->flush = 1;
327	return NULL;
328}
329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
330int udp_gro_complete(struct sk_buff *skb, int nhoff,
331		     udp_lookup_t lookup)
332{
333	__be16 newlen = htons(skb->len - nhoff);
334	struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
335	int err = -ENOSYS;
336	struct sock *sk;
 
337
338	uh->len = newlen;
339
340	/* Set encapsulation before calling into inner gro_complete() functions
341	 * to make them set up the inner offsets.
342	 */
343	skb->encapsulation = 1;
 
344
345	rcu_read_lock();
346	sk = (*lookup)(skb, uh->source, uh->dest);
347	if (sk && udp_sk(sk)->gro_complete)
 
 
 
 
 
 
348		err = udp_sk(sk)->gro_complete(sk, skb,
349				nhoff + sizeof(struct udphdr));
350	rcu_read_unlock();
 
 
351
352	if (skb->remcsum_offload)
353		skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
354
355	return err;
356}
357EXPORT_SYMBOL(udp_gro_complete);
358
359static int udp4_gro_complete(struct sk_buff *skb, int nhoff)
360{
361	const struct iphdr *iph = ip_hdr(skb);
362	struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
363
364	if (uh->check) {
365		skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366		uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
367					  iph->daddr, 0);
368	} else {
369		skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
370	}
371
372	return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
373}
374
375static const struct net_offload udpv4_offload = {
376	.callbacks = {
377		.gso_segment = udp4_ufo_fragment,
378		.gro_receive  =	udp4_gro_receive,
379		.gro_complete =	udp4_gro_complete,
380	},
381};
382
383int __init udpv4_offload_init(void)
384{
385	return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
386}
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *	IPV4 GSO/GRO offload support
  4 *	Linux INET implementation
  5 *
 
 
 
 
 
  6 *	UDPv4 GSO support
  7 */
  8
  9#include <linux/skbuff.h>
 10#include <net/gro.h>
 11#include <net/udp.h>
 12#include <net/protocol.h>
 13#include <net/inet_common.h>
 14
 15static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
 16	netdev_features_t features,
 17	struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
 18					     netdev_features_t features),
 19	__be16 new_protocol, bool is_ipv6)
 20{
 21	int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
 22	bool remcsum, need_csum, offload_csum, gso_partial;
 23	struct sk_buff *segs = ERR_PTR(-EINVAL);
 24	struct udphdr *uh = udp_hdr(skb);
 25	u16 mac_offset = skb->mac_header;
 26	__be16 protocol = skb->protocol;
 27	u16 mac_len = skb->mac_len;
 28	int udp_offset, outer_hlen;
 29	__wsum partial;
 30	bool need_ipsec;
 31
 32	if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
 33		goto out;
 34
 35	/* Adjust partial header checksum to negate old length.
 36	 * We cannot rely on the value contained in uh->len as it is
 37	 * possible that the actual value exceeds the boundaries of the
 38	 * 16 bit length field due to the header being added outside of an
 39	 * IP or IPv6 frame that was already limited to 64K - 1.
 40	 */
 41	if (skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL)
 42		partial = (__force __wsum)uh->len;
 43	else
 44		partial = (__force __wsum)htonl(skb->len);
 45	partial = csum_sub(csum_unfold(uh->check), partial);
 46
 47	/* setup inner skb. */
 48	skb->encapsulation = 0;
 49	SKB_GSO_CB(skb)->encap_level = 0;
 50	__skb_pull(skb, tnl_hlen);
 51	skb_reset_mac_header(skb);
 52	skb_set_network_header(skb, skb_inner_network_offset(skb));
 53	skb_set_transport_header(skb, skb_inner_transport_offset(skb));
 54	skb->mac_len = skb_inner_network_offset(skb);
 55	skb->protocol = new_protocol;
 56
 57	need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
 58	skb->encap_hdr_csum = need_csum;
 59
 60	remcsum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TUNNEL_REMCSUM);
 61	skb->remcsum_offload = remcsum;
 62
 63	need_ipsec = skb_dst(skb) && dst_xfrm(skb_dst(skb));
 64	/* Try to offload checksum if possible */
 65	offload_csum = !!(need_csum &&
 66			  !need_ipsec &&
 67			  (skb->dev->features &
 68			   (is_ipv6 ? (NETIF_F_HW_CSUM | NETIF_F_IPV6_CSUM) :
 69				      (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM))));
 70
 71	features &= skb->dev->hw_enc_features;
 72	if (need_csum)
 73		features &= ~NETIF_F_SCTP_CRC;
 74
 75	/* The only checksum offload we care about from here on out is the
 76	 * outer one so strip the existing checksum feature flags and
 77	 * instead set the flag based on our outer checksum offload value.
 78	 */
 79	if (remcsum) {
 80		features &= ~NETIF_F_CSUM_MASK;
 81		if (!need_csum || offload_csum)
 82			features |= NETIF_F_HW_CSUM;
 83	}
 84
 85	/* segment inner packet. */
 86	segs = gso_inner_segment(skb, features);
 87	if (IS_ERR_OR_NULL(segs)) {
 88		skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
 89				     mac_len);
 90		goto out;
 91	}
 92
 93	gso_partial = !!(skb_shinfo(segs)->gso_type & SKB_GSO_PARTIAL);
 94
 95	outer_hlen = skb_tnl_header_len(skb);
 96	udp_offset = outer_hlen - tnl_hlen;
 97	skb = segs;
 98	do {
 99		unsigned int len;
100
101		if (remcsum)
102			skb->ip_summed = CHECKSUM_NONE;
103
104		/* Set up inner headers if we are offloading inner checksum */
105		if (skb->ip_summed == CHECKSUM_PARTIAL) {
106			skb_reset_inner_headers(skb);
107			skb->encapsulation = 1;
108		}
109
110		skb->mac_len = mac_len;
111		skb->protocol = protocol;
112
113		__skb_push(skb, outer_hlen);
114		skb_reset_mac_header(skb);
115		skb_set_network_header(skb, mac_len);
116		skb_set_transport_header(skb, udp_offset);
117		len = skb->len - udp_offset;
118		uh = udp_hdr(skb);
119
120		/* If we are only performing partial GSO the inner header
121		 * will be using a length value equal to only one MSS sized
122		 * segment instead of the entire frame.
123		 */
124		if (gso_partial && skb_is_gso(skb)) {
125			uh->len = htons(skb_shinfo(skb)->gso_size +
126					SKB_GSO_CB(skb)->data_offset +
127					skb->head - (unsigned char *)uh);
128		} else {
129			uh->len = htons(len);
130		}
131
132		if (!need_csum)
133			continue;
134
135		uh->check = ~csum_fold(csum_add(partial,
136				       (__force __wsum)htonl(len)));
137
138		if (skb->encapsulation || !offload_csum) {
139			uh->check = gso_make_checksum(skb, ~uh->check);
140			if (uh->check == 0)
141				uh->check = CSUM_MANGLED_0;
142		} else {
143			skb->ip_summed = CHECKSUM_PARTIAL;
144			skb->csum_start = skb_transport_header(skb) - skb->head;
145			skb->csum_offset = offsetof(struct udphdr, check);
146		}
147	} while ((skb = skb->next));
148out:
149	return segs;
150}
151
152struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
153				       netdev_features_t features,
154				       bool is_ipv6)
155{
156	const struct net_offload __rcu **offloads;
157	__be16 protocol = skb->protocol;
 
158	const struct net_offload *ops;
159	struct sk_buff *segs = ERR_PTR(-EINVAL);
160	struct sk_buff *(*gso_inner_segment)(struct sk_buff *skb,
161					     netdev_features_t features);
162
163	rcu_read_lock();
164
165	switch (skb->inner_protocol_type) {
166	case ENCAP_TYPE_ETHER:
167		protocol = skb->inner_protocol;
168		gso_inner_segment = skb_mac_gso_segment;
169		break;
170	case ENCAP_TYPE_IPPROTO:
171		offloads = is_ipv6 ? inet6_offloads : inet_offloads;
172		ops = rcu_dereference(offloads[skb->inner_ipproto]);
173		if (!ops || !ops->callbacks.gso_segment)
174			goto out_unlock;
175		gso_inner_segment = ops->callbacks.gso_segment;
176		break;
177	default:
178		goto out_unlock;
179	}
180
181	segs = __skb_udp_tunnel_segment(skb, features, gso_inner_segment,
182					protocol, is_ipv6);
183
184out_unlock:
185	rcu_read_unlock();
186
187	return segs;
188}
189EXPORT_SYMBOL(skb_udp_tunnel_segment);
190
191static void __udpv4_gso_segment_csum(struct sk_buff *seg,
192				     __be32 *oldip, __be32 *newip,
193				     __be16 *oldport, __be16 *newport)
194{
195	struct udphdr *uh;
196	struct iphdr *iph;
197
198	if (*oldip == *newip && *oldport == *newport)
199		return;
200
201	uh = udp_hdr(seg);
202	iph = ip_hdr(seg);
203
204	if (uh->check) {
205		inet_proto_csum_replace4(&uh->check, seg, *oldip, *newip,
206					 true);
207		inet_proto_csum_replace2(&uh->check, seg, *oldport, *newport,
208					 false);
209		if (!uh->check)
210			uh->check = CSUM_MANGLED_0;
211	}
212	*oldport = *newport;
213
214	csum_replace4(&iph->check, *oldip, *newip);
215	*oldip = *newip;
216}
217
218static struct sk_buff *__udpv4_gso_segment_list_csum(struct sk_buff *segs)
219{
220	struct sk_buff *seg;
221	struct udphdr *uh, *uh2;
222	struct iphdr *iph, *iph2;
223
224	seg = segs;
225	uh = udp_hdr(seg);
226	iph = ip_hdr(seg);
227
228	if ((udp_hdr(seg)->dest == udp_hdr(seg->next)->dest) &&
229	    (udp_hdr(seg)->source == udp_hdr(seg->next)->source) &&
230	    (ip_hdr(seg)->daddr == ip_hdr(seg->next)->daddr) &&
231	    (ip_hdr(seg)->saddr == ip_hdr(seg->next)->saddr))
232		return segs;
233
234	while ((seg = seg->next)) {
235		uh2 = udp_hdr(seg);
236		iph2 = ip_hdr(seg);
237
238		__udpv4_gso_segment_csum(seg,
239					 &iph2->saddr, &iph->saddr,
240					 &uh2->source, &uh->source);
241		__udpv4_gso_segment_csum(seg,
242					 &iph2->daddr, &iph->daddr,
243					 &uh2->dest, &uh->dest);
244	}
245
246	return segs;
247}
248
249static struct sk_buff *__udp_gso_segment_list(struct sk_buff *skb,
250					      netdev_features_t features,
251					      bool is_ipv6)
252{
253	unsigned int mss = skb_shinfo(skb)->gso_size;
254
255	skb = skb_segment_list(skb, features, skb_mac_header_len(skb));
256	if (IS_ERR(skb))
257		return skb;
258
259	udp_hdr(skb)->len = htons(sizeof(struct udphdr) + mss);
260
261	return is_ipv6 ? skb : __udpv4_gso_segment_list_csum(skb);
262}
263
264struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
265				  netdev_features_t features, bool is_ipv6)
266{
267	struct sock *sk = gso_skb->sk;
268	unsigned int sum_truesize = 0;
269	struct sk_buff *segs, *seg;
270	struct udphdr *uh;
271	unsigned int mss;
272	bool copy_dtor;
273	__sum16 check;
274	__be16 newlen;
275
276	if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST)
277		return __udp_gso_segment_list(gso_skb, features, is_ipv6);
278
279	mss = skb_shinfo(gso_skb)->gso_size;
280	if (gso_skb->len <= sizeof(*uh) + mss)
281		return ERR_PTR(-EINVAL);
282
283	skb_pull(gso_skb, sizeof(*uh));
284
285	/* clear destructor to avoid skb_segment assigning it to tail */
286	copy_dtor = gso_skb->destructor == sock_wfree;
287	if (copy_dtor)
288		gso_skb->destructor = NULL;
289
290	segs = skb_segment(gso_skb, features);
291	if (IS_ERR_OR_NULL(segs)) {
292		if (copy_dtor)
293			gso_skb->destructor = sock_wfree;
294		return segs;
295	}
296
297	/* GSO partial and frag_list segmentation only requires splitting
298	 * the frame into an MSS multiple and possibly a remainder, both
299	 * cases return a GSO skb. So update the mss now.
300	 */
301	if (skb_is_gso(segs))
302		mss *= skb_shinfo(segs)->gso_segs;
303
304	seg = segs;
305	uh = udp_hdr(seg);
306
307	/* preserve TX timestamp flags and TS key for first segment */
308	skb_shinfo(seg)->tskey = skb_shinfo(gso_skb)->tskey;
309	skb_shinfo(seg)->tx_flags |=
310			(skb_shinfo(gso_skb)->tx_flags & SKBTX_ANY_TSTAMP);
311
312	/* compute checksum adjustment based on old length versus new */
313	newlen = htons(sizeof(*uh) + mss);
314	check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
315
316	for (;;) {
317		if (copy_dtor) {
318			seg->destructor = sock_wfree;
319			seg->sk = sk;
320			sum_truesize += seg->truesize;
321		}
322
323		if (!seg->next)
324			break;
325
326		uh->len = newlen;
327		uh->check = check;
328
329		if (seg->ip_summed == CHECKSUM_PARTIAL)
330			gso_reset_checksum(seg, ~check);
331		else
332			uh->check = gso_make_checksum(seg, ~check) ? :
333				    CSUM_MANGLED_0;
334
335		seg = seg->next;
336		uh = udp_hdr(seg);
337	}
338
339	/* last packet can be partial gso_size, account for that in checksum */
340	newlen = htons(skb_tail_pointer(seg) - skb_transport_header(seg) +
341		       seg->data_len);
342	check = csum16_add(csum16_sub(uh->check, uh->len), newlen);
343
344	uh->len = newlen;
345	uh->check = check;
346
347	if (seg->ip_summed == CHECKSUM_PARTIAL)
348		gso_reset_checksum(seg, ~check);
349	else
350		uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0;
351
352	/* update refcount for the packet */
353	if (copy_dtor) {
354		int delta = sum_truesize - gso_skb->truesize;
355
356		/* In some pathological cases, delta can be negative.
357		 * We need to either use refcount_add() or refcount_sub_and_test()
358		 */
359		if (likely(delta >= 0))
360			refcount_add(delta, &sk->sk_wmem_alloc);
361		else
362			WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc));
363	}
364	return segs;
365}
366EXPORT_SYMBOL_GPL(__udp_gso_segment);
367
368static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
369					 netdev_features_t features)
370{
371	struct sk_buff *segs = ERR_PTR(-EINVAL);
372	unsigned int mss;
373	__wsum csum;
374	struct udphdr *uh;
375	struct iphdr *iph;
376
377	if (skb->encapsulation &&
378	    (skb_shinfo(skb)->gso_type &
379	     (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
380		segs = skb_udp_tunnel_segment(skb, features, false);
381		goto out;
382	}
383
384	if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4)))
385		goto out;
386
387	if (!pskb_may_pull(skb, sizeof(struct udphdr)))
388		goto out;
389
390	if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
391	    !skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST))
392		return __udp_gso_segment(skb, features, false);
393
394	mss = skb_shinfo(skb)->gso_size;
395	if (unlikely(skb->len <= mss))
396		goto out;
397
398	/* Do software UFO. Complete and fill in the UDP checksum as
399	 * HW cannot do checksum of UDP packets sent as multiple
400	 * IP fragments.
401	 */
402
403	uh = udp_hdr(skb);
404	iph = ip_hdr(skb);
405
406	uh->check = 0;
407	csum = skb_checksum(skb, 0, skb->len, 0);
408	uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
409	if (uh->check == 0)
410		uh->check = CSUM_MANGLED_0;
411
412	skb->ip_summed = CHECKSUM_UNNECESSARY;
413
414	/* If there is no outer header we can fake a checksum offload
415	 * due to the fact that we have already done the checksum in
416	 * software prior to segmenting the frame.
417	 */
418	if (!skb->encap_hdr_csum)
419		features |= NETIF_F_HW_CSUM;
420
421	/* Fragment the skb. IP headers of the fragments are updated in
422	 * inet_gso_segment()
423	 */
424	segs = skb_segment(skb, features);
425out:
426	return segs;
427}
428
429static int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
430{
431	if (unlikely(p->len + skb->len >= 65536))
432		return -E2BIG;
433
434	if (NAPI_GRO_CB(p)->last == p)
435		skb_shinfo(p)->frag_list = skb;
436	else
437		NAPI_GRO_CB(p)->last->next = skb;
438
439	skb_pull(skb, skb_gro_offset(skb));
440
441	NAPI_GRO_CB(p)->last = skb;
442	NAPI_GRO_CB(p)->count++;
443	p->data_len += skb->len;
444
445	/* sk owenrship - if any - completely transferred to the aggregated packet */
446	skb->destructor = NULL;
447	p->truesize += skb->truesize;
448	p->len += skb->len;
449
450	NAPI_GRO_CB(skb)->same_flow = 1;
451
452	return 0;
453}
454
455
456#define UDP_GRO_CNT_MAX 64
457static struct sk_buff *udp_gro_receive_segment(struct list_head *head,
458					       struct sk_buff *skb)
459{
460	struct udphdr *uh = udp_gro_udphdr(skb);
461	struct sk_buff *pp = NULL;
462	struct udphdr *uh2;
463	struct sk_buff *p;
464	unsigned int ulen;
465	int ret = 0;
466
467	/* requires non zero csum, for symmetry with GSO */
468	if (!uh->check) {
469		NAPI_GRO_CB(skb)->flush = 1;
470		return NULL;
471	}
472
473	/* Do not deal with padded or malicious packets, sorry ! */
474	ulen = ntohs(uh->len);
475	if (ulen <= sizeof(*uh) || ulen != skb_gro_len(skb)) {
476		NAPI_GRO_CB(skb)->flush = 1;
477		return NULL;
478	}
479	/* pull encapsulating udp header */
480	skb_gro_pull(skb, sizeof(struct udphdr));
481
482	list_for_each_entry(p, head, list) {
483		if (!NAPI_GRO_CB(p)->same_flow)
484			continue;
485
486		uh2 = udp_hdr(p);
487
488		/* Match ports only, as csum is always non zero */
489		if ((*(u32 *)&uh->source != *(u32 *)&uh2->source)) {
490			NAPI_GRO_CB(p)->same_flow = 0;
491			continue;
492		}
493
494		if (NAPI_GRO_CB(skb)->is_flist != NAPI_GRO_CB(p)->is_flist) {
495			NAPI_GRO_CB(skb)->flush = 1;
496			return p;
497		}
498
499		/* Terminate the flow on len mismatch or if it grow "too much".
500		 * Under small packet flood GRO count could elsewhere grow a lot
501		 * leading to excessive truesize values.
502		 * On len mismatch merge the first packet shorter than gso_size,
503		 * otherwise complete the GRO packet.
504		 */
505		if (ulen > ntohs(uh2->len)) {
506			pp = p;
507		} else {
508			if (NAPI_GRO_CB(skb)->is_flist) {
509				if (!pskb_may_pull(skb, skb_gro_offset(skb))) {
510					NAPI_GRO_CB(skb)->flush = 1;
511					return NULL;
512				}
513				if ((skb->ip_summed != p->ip_summed) ||
514				    (skb->csum_level != p->csum_level)) {
515					NAPI_GRO_CB(skb)->flush = 1;
516					return NULL;
517				}
518				ret = skb_gro_receive_list(p, skb);
519			} else {
520				skb_gro_postpull_rcsum(skb, uh,
521						       sizeof(struct udphdr));
522
523				ret = skb_gro_receive(p, skb);
524			}
525		}
526
527		if (ret || ulen != ntohs(uh2->len) ||
528		    NAPI_GRO_CB(p)->count >= UDP_GRO_CNT_MAX)
529			pp = p;
530
531		return pp;
532	}
533
534	/* mismatch, but we never need to flush */
535	return NULL;
536}
537
538struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
539				struct udphdr *uh, struct sock *sk)
540{
541	struct sk_buff *pp = NULL;
542	struct sk_buff *p;
543	struct udphdr *uh2;
544	unsigned int off = skb_gro_offset(skb);
545	int flush = 1;
546
547	/* we can do L4 aggregation only if the packet can't land in a tunnel
548	 * otherwise we could corrupt the inner stream
549	 */
550	NAPI_GRO_CB(skb)->is_flist = 0;
551	if (!sk || !udp_sk(sk)->gro_receive) {
552		if (skb->dev->features & NETIF_F_GRO_FRAGLIST)
553			NAPI_GRO_CB(skb)->is_flist = sk ? !udp_sk(sk)->gro_enabled : 1;
554
555		if ((!sk && (skb->dev->features & NETIF_F_GRO_UDP_FWD)) ||
556		    (sk && udp_sk(sk)->gro_enabled) || NAPI_GRO_CB(skb)->is_flist)
557			return call_gro_receive(udp_gro_receive_segment, head, skb);
558
559		/* no GRO, be sure flush the current packet */
560		goto out;
561	}
562
563	if (NAPI_GRO_CB(skb)->encap_mark ||
564	    (uh->check && skb->ip_summed != CHECKSUM_PARTIAL &&
565	     NAPI_GRO_CB(skb)->csum_cnt == 0 &&
566	     !NAPI_GRO_CB(skb)->csum_valid))
567		goto out;
568
569	/* mark that this skb passed once through the tunnel gro layer */
570	NAPI_GRO_CB(skb)->encap_mark = 1;
571
 
 
 
 
 
 
 
 
572	flush = 0;
573
574	list_for_each_entry(p, head, list) {
575		if (!NAPI_GRO_CB(p)->same_flow)
576			continue;
577
578		uh2 = (struct udphdr   *)(p->data + off);
579
580		/* Match ports and either checksums are either both zero
581		 * or nonzero.
582		 */
583		if ((*(u32 *)&uh->source != *(u32 *)&uh2->source) ||
584		    (!uh->check ^ !uh2->check)) {
585			NAPI_GRO_CB(p)->same_flow = 0;
586			continue;
587		}
588	}
589
590	skb_gro_pull(skb, sizeof(struct udphdr)); /* pull encapsulating udp header */
591	skb_gro_postpull_rcsum(skb, uh, sizeof(struct udphdr));
592	pp = call_gro_receive_sk(udp_sk(sk)->gro_receive, sk, head, skb);
593
 
 
594out:
595	skb_gro_flush_final(skb, pp, flush);
596	return pp;
597}
598EXPORT_SYMBOL(udp_gro_receive);
599
600static struct sock *udp4_gro_lookup_skb(struct sk_buff *skb, __be16 sport,
601					__be16 dport)
602{
603	const struct iphdr *iph = skb_gro_network_header(skb);
604	struct net *net = dev_net(skb->dev);
605
606	return __udp4_lib_lookup(net, iph->saddr, sport,
607				 iph->daddr, dport, inet_iif(skb),
608				 inet_sdif(skb), net->ipv4.udp_table, NULL);
609}
610
611INDIRECT_CALLABLE_SCOPE
612struct sk_buff *udp4_gro_receive(struct list_head *head, struct sk_buff *skb)
613{
614	struct udphdr *uh = udp_gro_udphdr(skb);
615	struct sock *sk = NULL;
616	struct sk_buff *pp;
617
618	if (unlikely(!uh))
619		goto flush;
620
621	/* Don't bother verifying checksum if we're going to flush anyway. */
622	if (NAPI_GRO_CB(skb)->flush)
623		goto skip;
624
625	if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check,
626						 inet_gro_compute_pseudo))
627		goto flush;
628	else if (uh->check)
629		skb_gro_checksum_try_convert(skb, IPPROTO_UDP,
630					     inet_gro_compute_pseudo);
631skip:
632	NAPI_GRO_CB(skb)->is_ipv6 = 0;
633
634	if (static_branch_unlikely(&udp_encap_needed_key))
635		sk = udp4_gro_lookup_skb(skb, uh->source, uh->dest);
636
637	pp = udp_gro_receive(head, skb, uh, sk);
638	return pp;
639
640flush:
641	NAPI_GRO_CB(skb)->flush = 1;
642	return NULL;
643}
644
645static int udp_gro_complete_segment(struct sk_buff *skb)
646{
647	struct udphdr *uh = udp_hdr(skb);
648
649	skb->csum_start = (unsigned char *)uh - skb->head;
650	skb->csum_offset = offsetof(struct udphdr, check);
651	skb->ip_summed = CHECKSUM_PARTIAL;
652
653	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
654	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
655
656	if (skb->encapsulation)
657		skb->inner_transport_header = skb->transport_header;
658
659	return 0;
660}
661
662int udp_gro_complete(struct sk_buff *skb, int nhoff,
663		     udp_lookup_t lookup)
664{
665	__be16 newlen = htons(skb->len - nhoff);
666	struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
 
667	struct sock *sk;
668	int err;
669
670	uh->len = newlen;
671
672	sk = INDIRECT_CALL_INET(lookup, udp6_lib_lookup_skb,
673				udp4_lib_lookup_skb, skb, uh->source, uh->dest);
674	if (sk && udp_sk(sk)->gro_complete) {
675		skb_shinfo(skb)->gso_type = uh->check ? SKB_GSO_UDP_TUNNEL_CSUM
676					: SKB_GSO_UDP_TUNNEL;
677
678		/* clear the encap mark, so that inner frag_list gro_complete
679		 * can take place
680		 */
681		NAPI_GRO_CB(skb)->encap_mark = 0;
682
683		/* Set encapsulation before calling into inner gro_complete()
684		 * functions to make them set up the inner offsets.
685		 */
686		skb->encapsulation = 1;
687		err = udp_sk(sk)->gro_complete(sk, skb,
688				nhoff + sizeof(struct udphdr));
689	} else {
690		err = udp_gro_complete_segment(skb);
691	}
692
693	if (skb->remcsum_offload)
694		skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM;
695
696	return err;
697}
698EXPORT_SYMBOL(udp_gro_complete);
699
700INDIRECT_CALLABLE_SCOPE int udp4_gro_complete(struct sk_buff *skb, int nhoff)
701{
702	const struct iphdr *iph = ip_hdr(skb);
703	struct udphdr *uh = (struct udphdr *)(skb->data + nhoff);
704
705	/* do fraglist only if there is no outer UDP encap (or we already processed it) */
706	if (NAPI_GRO_CB(skb)->is_flist && !NAPI_GRO_CB(skb)->encap_mark) {
707		uh->len = htons(skb->len - nhoff);
708
709		skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4);
710		skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
711
712		if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
713			if (skb->csum_level < SKB_MAX_CSUM_LEVEL)
714				skb->csum_level++;
715		} else {
716			skb->ip_summed = CHECKSUM_UNNECESSARY;
717			skb->csum_level = 0;
718		}
719
720		return 0;
721	}
722
723	if (uh->check)
724		uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr,
725					  iph->daddr, 0);
 
 
 
726
727	return udp_gro_complete(skb, nhoff, udp4_lib_lookup_skb);
728}
729
730static const struct net_offload udpv4_offload = {
731	.callbacks = {
732		.gso_segment = udp4_ufo_fragment,
733		.gro_receive  =	udp4_gro_receive,
734		.gro_complete =	udp4_gro_complete,
735	},
736};
737
738int __init udpv4_offload_init(void)
739{
740	return inet_add_offload(&udpv4_offload, IPPROTO_UDP);
741}