Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2
  3#ifndef _NET_IPV6_GRO_H
  4#define _NET_IPV6_GRO_H
  5
  6#include <linux/indirect_call_wrapper.h>
  7#include <linux/ip.h>
  8#include <linux/ipv6.h>
  9#include <net/ip6_checksum.h>
 10#include <linux/skbuff.h>
 11#include <net/udp.h>
 12
 13struct napi_gro_cb {
 14	/* Virtual address of skb_shinfo(skb)->frags[0].page + offset. */
 15	void	*frag0;
 16
 17	/* Length of frag0. */
 18	unsigned int frag0_len;
 19
 20	/* This indicates where we are processing relative to skb->data. */
 21	int	data_offset;
 22
 23	/* This is non-zero if the packet cannot be merged with the new skb. */
 24	u16	flush;
 25
 26	/* Save the IP ID here and check when we get to the transport layer */
 27	u16	flush_id;
 28
 29	/* Number of segments aggregated. */
 30	u16	count;
 31
 32	/* Used in ipv6_gro_receive() and foo-over-udp */
 33	u16	proto;
 34
 35	/* jiffies when first packet was created/queued */
 36	unsigned long age;
 37
 38/* Used in napi_gro_cb::free */
 39#define NAPI_GRO_FREE             1
 40#define NAPI_GRO_FREE_STOLEN_HEAD 2
 41	/* portion of the cb set to zero at every gro iteration */
 42	struct_group(zeroed,
 43
 44		/* Start offset for remote checksum offload */
 45		u16	gro_remcsum_start;
 46
 47		/* This is non-zero if the packet may be of the same flow. */
 48		u8	same_flow:1;
 49
 50		/* Used in tunnel GRO receive */
 51		u8	encap_mark:1;
 52
 53		/* GRO checksum is valid */
 54		u8	csum_valid:1;
 55
 56		/* Number of checksums via CHECKSUM_UNNECESSARY */
 57		u8	csum_cnt:3;
 58
 59		/* Free the skb? */
 60		u8	free:2;
 61
 62		/* Used in foo-over-udp, set in udp[46]_gro_receive */
 63		u8	is_ipv6:1;
 64
 65		/* Used in GRE, set in fou/gue_gro_receive */
 66		u8	is_fou:1;
 67
 68		/* Used to determine if flush_id can be ignored */
 69		u8	is_atomic:1;
 70
 71		/* Number of gro_receive callbacks this packet already went through */
 72		u8 recursion_counter:4;
 73
 74		/* GRO is done by frag_list pointer chaining. */
 75		u8	is_flist:1;
 76	);
 77
 78	/* used to support CHECKSUM_COMPLETE for tunneling protocols */
 79	__wsum	csum;
 80
 81	/* used in skb_gro_receive() slow path */
 82	struct sk_buff *last;
 83};
 84
 85#define NAPI_GRO_CB(skb) ((struct napi_gro_cb *)(skb)->cb)
 86
 87#define GRO_RECURSION_LIMIT 15
 88static inline int gro_recursion_inc_test(struct sk_buff *skb)
 89{
 90	return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
 91}
 92
 93typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
 94static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
 95					       struct list_head *head,
 96					       struct sk_buff *skb)
 97{
 98	if (unlikely(gro_recursion_inc_test(skb))) {
 99		NAPI_GRO_CB(skb)->flush |= 1;
100		return NULL;
101	}
102
103	return cb(head, skb);
104}
105
106typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
107					    struct sk_buff *);
108static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
109						  struct sock *sk,
110						  struct list_head *head,
111						  struct sk_buff *skb)
112{
113	if (unlikely(gro_recursion_inc_test(skb))) {
114		NAPI_GRO_CB(skb)->flush |= 1;
115		return NULL;
116	}
117
118	return cb(sk, head, skb);
119}
120
121static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
122{
123	return NAPI_GRO_CB(skb)->data_offset;
124}
125
126static inline unsigned int skb_gro_len(const struct sk_buff *skb)
127{
128	return skb->len - NAPI_GRO_CB(skb)->data_offset;
129}
130
131static inline void skb_gro_pull(struct sk_buff *skb, unsigned int len)
132{
133	NAPI_GRO_CB(skb)->data_offset += len;
134}
135
136static inline void *skb_gro_header_fast(struct sk_buff *skb,
137					unsigned int offset)
138{
139	return NAPI_GRO_CB(skb)->frag0 + offset;
140}
141
142static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
143{
144	return NAPI_GRO_CB(skb)->frag0_len < hlen;
145}
146
147static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
148{
149	NAPI_GRO_CB(skb)->frag0 = NULL;
150	NAPI_GRO_CB(skb)->frag0_len = 0;
151}
152
153static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
154					unsigned int offset)
155{
156	if (!pskb_may_pull(skb, hlen))
157		return NULL;
158
159	skb_gro_frag0_invalidate(skb);
160	return skb->data + offset;
161}
162
163static inline void *skb_gro_header(struct sk_buff *skb,
164					unsigned int hlen, unsigned int offset)
165{
166	void *ptr;
167
168	ptr = skb_gro_header_fast(skb, offset);
169	if (skb_gro_header_hard(skb, hlen))
170		ptr = skb_gro_header_slow(skb, hlen, offset);
171	return ptr;
172}
173
174static inline void *skb_gro_network_header(struct sk_buff *skb)
175{
176	return (NAPI_GRO_CB(skb)->frag0 ?: skb->data) +
177	       skb_network_offset(skb);
178}
179
180static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
181{
182	const struct iphdr *iph = skb_gro_network_header(skb);
183
184	return csum_tcpudp_nofold(iph->saddr, iph->daddr,
185				  skb_gro_len(skb), proto, 0);
186}
187
188static inline void skb_gro_postpull_rcsum(struct sk_buff *skb,
189					const void *start, unsigned int len)
190{
191	if (NAPI_GRO_CB(skb)->csum_valid)
192		NAPI_GRO_CB(skb)->csum = wsum_negate(csum_partial(start, len,
193						wsum_negate(NAPI_GRO_CB(skb)->csum)));
194}
195
196/* GRO checksum functions. These are logical equivalents of the normal
197 * checksum functions (in skbuff.h) except that they operate on the GRO
198 * offsets and fields in sk_buff.
199 */
200
201__sum16 __skb_gro_checksum_complete(struct sk_buff *skb);
202
203static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb)
204{
205	return (NAPI_GRO_CB(skb)->gro_remcsum_start == skb_gro_offset(skb));
206}
207
208static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb,
209						      bool zero_okay,
210						      __sum16 check)
211{
212	return ((skb->ip_summed != CHECKSUM_PARTIAL ||
213		skb_checksum_start_offset(skb) <
214		 skb_gro_offset(skb)) &&
215		!skb_at_gro_remcsum_start(skb) &&
216		NAPI_GRO_CB(skb)->csum_cnt == 0 &&
217		(!zero_okay || check));
218}
219
220static inline __sum16 __skb_gro_checksum_validate_complete(struct sk_buff *skb,
221							   __wsum psum)
222{
223	if (NAPI_GRO_CB(skb)->csum_valid &&
224	    !csum_fold(csum_add(psum, NAPI_GRO_CB(skb)->csum)))
225		return 0;
226
227	NAPI_GRO_CB(skb)->csum = psum;
228
229	return __skb_gro_checksum_complete(skb);
230}
231
232static inline void skb_gro_incr_csum_unnecessary(struct sk_buff *skb)
233{
234	if (NAPI_GRO_CB(skb)->csum_cnt > 0) {
235		/* Consume a checksum from CHECKSUM_UNNECESSARY */
236		NAPI_GRO_CB(skb)->csum_cnt--;
237	} else {
238		/* Update skb for CHECKSUM_UNNECESSARY and csum_level when we
239		 * verified a new top level checksum or an encapsulated one
240		 * during GRO. This saves work if we fallback to normal path.
241		 */
242		__skb_incr_checksum_unnecessary(skb);
243	}
244}
245
246#define __skb_gro_checksum_validate(skb, proto, zero_okay, check,	\
247				    compute_pseudo)			\
248({									\
249	__sum16 __ret = 0;						\
250	if (__skb_gro_checksum_validate_needed(skb, zero_okay, check))	\
251		__ret = __skb_gro_checksum_validate_complete(skb,	\
252				compute_pseudo(skb, proto));		\
253	if (!__ret)							\
254		skb_gro_incr_csum_unnecessary(skb);			\
255	__ret;								\
256})
257
258#define skb_gro_checksum_validate(skb, proto, compute_pseudo)		\
259	__skb_gro_checksum_validate(skb, proto, false, 0, compute_pseudo)
260
261#define skb_gro_checksum_validate_zero_check(skb, proto, check,		\
262					     compute_pseudo)		\
263	__skb_gro_checksum_validate(skb, proto, true, check, compute_pseudo)
264
265#define skb_gro_checksum_simple_validate(skb)				\
266	__skb_gro_checksum_validate(skb, 0, false, 0, null_compute_pseudo)
267
268static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
269{
270	return (NAPI_GRO_CB(skb)->csum_cnt == 0 &&
271		!NAPI_GRO_CB(skb)->csum_valid);
272}
273
274static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
275					      __wsum pseudo)
276{
277	NAPI_GRO_CB(skb)->csum = ~pseudo;
278	NAPI_GRO_CB(skb)->csum_valid = 1;
279}
280
281#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo)	\
282do {									\
283	if (__skb_gro_checksum_convert_check(skb))			\
284		__skb_gro_checksum_convert(skb, 			\
285					   compute_pseudo(skb, proto));	\
286} while (0)
287
288struct gro_remcsum {
289	int offset;
290	__wsum delta;
291};
292
293static inline void skb_gro_remcsum_init(struct gro_remcsum *grc)
294{
295	grc->offset = 0;
296	grc->delta = 0;
297}
298
299static inline void *skb_gro_remcsum_process(struct sk_buff *skb, void *ptr,
300					    unsigned int off, size_t hdrlen,
301					    int start, int offset,
302					    struct gro_remcsum *grc,
303					    bool nopartial)
304{
305	__wsum delta;
306	size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start);
307
308	BUG_ON(!NAPI_GRO_CB(skb)->csum_valid);
309
310	if (!nopartial) {
311		NAPI_GRO_CB(skb)->gro_remcsum_start = off + hdrlen + start;
312		return ptr;
313	}
314
315	ptr = skb_gro_header(skb, off + plen, off);
316	if (!ptr)
317		return NULL;
318
319	delta = remcsum_adjust(ptr + hdrlen, NAPI_GRO_CB(skb)->csum,
320			       start, offset);
321
322	/* Adjust skb->csum since we changed the packet */
323	NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta);
324
325	grc->offset = off + hdrlen + offset;
326	grc->delta = delta;
327
328	return ptr;
329}
330
331static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
332					   struct gro_remcsum *grc)
333{
334	void *ptr;
335	size_t plen = grc->offset + sizeof(u16);
336
337	if (!grc->delta)
338		return;
339
340	ptr = skb_gro_header(skb, plen, grc->offset);
341	if (!ptr)
342		return;
343
344	remcsum_unadjust((__sum16 *)ptr, grc->delta);
345}
346
347#ifdef CONFIG_XFRM_OFFLOAD
348static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
349{
350	if (PTR_ERR(pp) != -EINPROGRESS)
351		NAPI_GRO_CB(skb)->flush |= flush;
352}
353static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
354					       struct sk_buff *pp,
355					       int flush,
356					       struct gro_remcsum *grc)
357{
358	if (PTR_ERR(pp) != -EINPROGRESS) {
359		NAPI_GRO_CB(skb)->flush |= flush;
360		skb_gro_remcsum_cleanup(skb, grc);
361		skb->remcsum_offload = 0;
362	}
363}
364#else
365static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
366{
367	NAPI_GRO_CB(skb)->flush |= flush;
368}
369static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
370					       struct sk_buff *pp,
371					       int flush,
372					       struct gro_remcsum *grc)
373{
374	NAPI_GRO_CB(skb)->flush |= flush;
375	skb_gro_remcsum_cleanup(skb, grc);
376	skb->remcsum_offload = 0;
377}
378#endif
379
380INDIRECT_CALLABLE_DECLARE(struct sk_buff *ipv6_gro_receive(struct list_head *,
381							   struct sk_buff *));
382INDIRECT_CALLABLE_DECLARE(int ipv6_gro_complete(struct sk_buff *, int));
383INDIRECT_CALLABLE_DECLARE(struct sk_buff *inet_gro_receive(struct list_head *,
384							   struct sk_buff *));
385INDIRECT_CALLABLE_DECLARE(int inet_gro_complete(struct sk_buff *, int));
386
387INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp4_gro_receive(struct list_head *,
388							   struct sk_buff *));
389INDIRECT_CALLABLE_DECLARE(int udp4_gro_complete(struct sk_buff *, int));
390
391INDIRECT_CALLABLE_DECLARE(struct sk_buff *udp6_gro_receive(struct list_head *,
392							   struct sk_buff *));
393INDIRECT_CALLABLE_DECLARE(int udp6_gro_complete(struct sk_buff *, int));
394
395#define indirect_call_gro_receive_inet(cb, f2, f1, head, skb)	\
396({								\
397	unlikely(gro_recursion_inc_test(skb)) ?			\
398		NAPI_GRO_CB(skb)->flush |= 1, NULL :		\
399		INDIRECT_CALL_INET(cb, f2, f1, head, skb);	\
400})
401
402struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
403				struct udphdr *uh, struct sock *sk);
404int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
405
406static inline struct udphdr *udp_gro_udphdr(struct sk_buff *skb)
407{
408	struct udphdr *uh;
409	unsigned int hlen, off;
410
411	off  = skb_gro_offset(skb);
412	hlen = off + sizeof(*uh);
413	uh   = skb_gro_header(skb, hlen, off);
414
415	return uh;
416}
417
418static inline __wsum ip6_gro_compute_pseudo(struct sk_buff *skb, int proto)
419{
420	const struct ipv6hdr *iph = skb_gro_network_header(skb);
421
422	return ~csum_unfold(csum_ipv6_magic(&iph->saddr, &iph->daddr,
423					    skb_gro_len(skb), proto, 0));
424}
425
426int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
427
428/* Pass the currently batched GRO_NORMAL SKBs up to the stack. */
429static inline void gro_normal_list(struct napi_struct *napi)
430{
431	if (!napi->rx_count)
432		return;
433	netif_receive_skb_list_internal(&napi->rx_list);
434	INIT_LIST_HEAD(&napi->rx_list);
435	napi->rx_count = 0;
436}
437
438/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
439 * pass the whole batch up to the stack.
440 */
441static inline void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
442{
443	list_add_tail(&skb->list, &napi->rx_list);
444	napi->rx_count += segs;
445	if (napi->rx_count >= READ_ONCE(gro_normal_batch))
446		gro_normal_list(napi);
447}
448
449
450#endif /* _NET_IPV6_GRO_H */