Loading...
1/*
2 * Routines having to do with the 'struct sk_buff' memory handlers.
3 *
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 *
7 * Fixes:
8 * Alan Cox : Fixed the worst of the load
9 * balancer bugs.
10 * Dave Platt : Interrupt stacking fix.
11 * Richard Kooijman : Timestamp fixes.
12 * Alan Cox : Changed buffer format.
13 * Alan Cox : destructor hook for AF_UNIX etc.
14 * Linus Torvalds : Better skb_clone.
15 * Alan Cox : Added skb_copy.
16 * Alan Cox : Added all the changed routines Linus
17 * only put in the headers
18 * Ray VanTassle : Fixed --skb->lock in free
19 * Alan Cox : skb_copy copy arp field
20 * Andi Kleen : slabified it.
21 * Robert Olsson : Removed skb_head_pool
22 *
23 * NOTE:
24 * The __skb_ routines should be called with interrupts
25 * disabled, or you better be *real* sure that the operation is atomic
26 * with respect to whatever list is being frobbed (e.g. via lock_sock()
27 * or via disabling bottom half handlers, etc).
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35/*
36 * The functions in this file will not compile correctly with gcc 2.4.x
37 */
38
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
41#include <linux/module.h>
42#include <linux/types.h>
43#include <linux/kernel.h>
44#include <linux/mm.h>
45#include <linux/interrupt.h>
46#include <linux/in.h>
47#include <linux/inet.h>
48#include <linux/slab.h>
49#include <linux/tcp.h>
50#include <linux/udp.h>
51#include <linux/sctp.h>
52#include <linux/netdevice.h>
53#ifdef CONFIG_NET_CLS_ACT
54#include <net/pkt_sched.h>
55#endif
56#include <linux/string.h>
57#include <linux/skbuff.h>
58#include <linux/splice.h>
59#include <linux/cache.h>
60#include <linux/rtnetlink.h>
61#include <linux/init.h>
62#include <linux/scatterlist.h>
63#include <linux/errqueue.h>
64#include <linux/prefetch.h>
65#include <linux/if_vlan.h>
66
67#include <net/protocol.h>
68#include <net/dst.h>
69#include <net/sock.h>
70#include <net/checksum.h>
71#include <net/ip6_checksum.h>
72#include <net/xfrm.h>
73
74#include <linux/uaccess.h>
75#include <trace/events/skb.h>
76#include <linux/highmem.h>
77#include <linux/capability.h>
78#include <linux/user_namespace.h>
79
80struct kmem_cache *skbuff_head_cache __ro_after_init;
81static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
82int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
83EXPORT_SYMBOL(sysctl_max_skb_frags);
84
85/**
86 * skb_panic - private function for out-of-line support
87 * @skb: buffer
88 * @sz: size
89 * @addr: address
90 * @msg: skb_over_panic or skb_under_panic
91 *
92 * Out-of-line support for skb_put() and skb_push().
93 * Called via the wrapper skb_over_panic() or skb_under_panic().
94 * Keep out of line to prevent kernel bloat.
95 * __builtin_return_address is not used because it is not always reliable.
96 */
97static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
98 const char msg[])
99{
100 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
101 msg, addr, skb->len, sz, skb->head, skb->data,
102 (unsigned long)skb->tail, (unsigned long)skb->end,
103 skb->dev ? skb->dev->name : "<NULL>");
104 BUG();
105}
106
107static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
108{
109 skb_panic(skb, sz, addr, __func__);
110}
111
112static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
113{
114 skb_panic(skb, sz, addr, __func__);
115}
116
117/*
118 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
119 * the caller if emergency pfmemalloc reserves are being used. If it is and
120 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
121 * may be used. Otherwise, the packet data may be discarded until enough
122 * memory is free
123 */
124#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
125 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
126
127static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
128 unsigned long ip, bool *pfmemalloc)
129{
130 void *obj;
131 bool ret_pfmemalloc = false;
132
133 /*
134 * Try a regular allocation, when that fails and we're not entitled
135 * to the reserves, fail.
136 */
137 obj = kmalloc_node_track_caller(size,
138 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
139 node);
140 if (obj || !(gfp_pfmemalloc_allowed(flags)))
141 goto out;
142
143 /* Try again but now we are using pfmemalloc reserves */
144 ret_pfmemalloc = true;
145 obj = kmalloc_node_track_caller(size, flags, node);
146
147out:
148 if (pfmemalloc)
149 *pfmemalloc = ret_pfmemalloc;
150
151 return obj;
152}
153
154/* Allocate a new skbuff. We do this ourselves so we can fill in a few
155 * 'private' fields and also do memory statistics to find all the
156 * [BEEP] leaks.
157 *
158 */
159
160/**
161 * __alloc_skb - allocate a network buffer
162 * @size: size to allocate
163 * @gfp_mask: allocation mask
164 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
165 * instead of head cache and allocate a cloned (child) skb.
166 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
167 * allocations in case the data is required for writeback
168 * @node: numa node to allocate memory on
169 *
170 * Allocate a new &sk_buff. The returned buffer has no headroom and a
171 * tail room of at least size bytes. The object has a reference count
172 * of one. The return is the buffer. On a failure the return is %NULL.
173 *
174 * Buffers may only be allocated from interrupts using a @gfp_mask of
175 * %GFP_ATOMIC.
176 */
177struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
178 int flags, int node)
179{
180 struct kmem_cache *cache;
181 struct skb_shared_info *shinfo;
182 struct sk_buff *skb;
183 u8 *data;
184 bool pfmemalloc;
185
186 cache = (flags & SKB_ALLOC_FCLONE)
187 ? skbuff_fclone_cache : skbuff_head_cache;
188
189 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
190 gfp_mask |= __GFP_MEMALLOC;
191
192 /* Get the HEAD */
193 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
194 if (!skb)
195 goto out;
196 prefetchw(skb);
197
198 /* We do our best to align skb_shared_info on a separate cache
199 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
200 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
201 * Both skb->head and skb_shared_info are cache line aligned.
202 */
203 size = SKB_DATA_ALIGN(size);
204 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
205 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
206 if (!data)
207 goto nodata;
208 /* kmalloc(size) might give us more room than requested.
209 * Put skb_shared_info exactly at the end of allocated zone,
210 * to allow max possible filling before reallocation.
211 */
212 size = SKB_WITH_OVERHEAD(ksize(data));
213 prefetchw(data + size);
214
215 /*
216 * Only clear those fields we need to clear, not those that we will
217 * actually initialise below. Hence, don't put any more fields after
218 * the tail pointer in struct sk_buff!
219 */
220 memset(skb, 0, offsetof(struct sk_buff, tail));
221 /* Account for allocated memory : skb + skb->head */
222 skb->truesize = SKB_TRUESIZE(size);
223 skb->pfmemalloc = pfmemalloc;
224 refcount_set(&skb->users, 1);
225 skb->head = data;
226 skb->data = data;
227 skb_reset_tail_pointer(skb);
228 skb->end = skb->tail + size;
229 skb->mac_header = (typeof(skb->mac_header))~0U;
230 skb->transport_header = (typeof(skb->transport_header))~0U;
231
232 /* make sure we initialize shinfo sequentially */
233 shinfo = skb_shinfo(skb);
234 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
235 atomic_set(&shinfo->dataref, 1);
236
237 if (flags & SKB_ALLOC_FCLONE) {
238 struct sk_buff_fclones *fclones;
239
240 fclones = container_of(skb, struct sk_buff_fclones, skb1);
241
242 skb->fclone = SKB_FCLONE_ORIG;
243 refcount_set(&fclones->fclone_ref, 1);
244
245 fclones->skb2.fclone = SKB_FCLONE_CLONE;
246 }
247out:
248 return skb;
249nodata:
250 kmem_cache_free(cache, skb);
251 skb = NULL;
252 goto out;
253}
254EXPORT_SYMBOL(__alloc_skb);
255
256/**
257 * __build_skb - build a network buffer
258 * @data: data buffer provided by caller
259 * @frag_size: size of data, or 0 if head was kmalloced
260 *
261 * Allocate a new &sk_buff. Caller provides space holding head and
262 * skb_shared_info. @data must have been allocated by kmalloc() only if
263 * @frag_size is 0, otherwise data should come from the page allocator
264 * or vmalloc()
265 * The return is the new skb buffer.
266 * On a failure the return is %NULL, and @data is not freed.
267 * Notes :
268 * Before IO, driver allocates only data buffer where NIC put incoming frame
269 * Driver should add room at head (NET_SKB_PAD) and
270 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
271 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
272 * before giving packet to stack.
273 * RX rings only contains data buffers, not full skbs.
274 */
275struct sk_buff *__build_skb(void *data, unsigned int frag_size)
276{
277 struct skb_shared_info *shinfo;
278 struct sk_buff *skb;
279 unsigned int size = frag_size ? : ksize(data);
280
281 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
282 if (!skb)
283 return NULL;
284
285 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
286
287 memset(skb, 0, offsetof(struct sk_buff, tail));
288 skb->truesize = SKB_TRUESIZE(size);
289 refcount_set(&skb->users, 1);
290 skb->head = data;
291 skb->data = data;
292 skb_reset_tail_pointer(skb);
293 skb->end = skb->tail + size;
294 skb->mac_header = (typeof(skb->mac_header))~0U;
295 skb->transport_header = (typeof(skb->transport_header))~0U;
296
297 /* make sure we initialize shinfo sequentially */
298 shinfo = skb_shinfo(skb);
299 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
300 atomic_set(&shinfo->dataref, 1);
301
302 return skb;
303}
304
305/* build_skb() is wrapper over __build_skb(), that specifically
306 * takes care of skb->head and skb->pfmemalloc
307 * This means that if @frag_size is not zero, then @data must be backed
308 * by a page fragment, not kmalloc() or vmalloc()
309 */
310struct sk_buff *build_skb(void *data, unsigned int frag_size)
311{
312 struct sk_buff *skb = __build_skb(data, frag_size);
313
314 if (skb && frag_size) {
315 skb->head_frag = 1;
316 if (page_is_pfmemalloc(virt_to_head_page(data)))
317 skb->pfmemalloc = 1;
318 }
319 return skb;
320}
321EXPORT_SYMBOL(build_skb);
322
323#define NAPI_SKB_CACHE_SIZE 64
324
325struct napi_alloc_cache {
326 struct page_frag_cache page;
327 unsigned int skb_count;
328 void *skb_cache[NAPI_SKB_CACHE_SIZE];
329};
330
331static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
332static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
333
334static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
335{
336 struct page_frag_cache *nc;
337 unsigned long flags;
338 void *data;
339
340 local_irq_save(flags);
341 nc = this_cpu_ptr(&netdev_alloc_cache);
342 data = page_frag_alloc(nc, fragsz, gfp_mask);
343 local_irq_restore(flags);
344 return data;
345}
346
347/**
348 * netdev_alloc_frag - allocate a page fragment
349 * @fragsz: fragment size
350 *
351 * Allocates a frag from a page for receive buffer.
352 * Uses GFP_ATOMIC allocations.
353 */
354void *netdev_alloc_frag(unsigned int fragsz)
355{
356 return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
357}
358EXPORT_SYMBOL(netdev_alloc_frag);
359
360static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
361{
362 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
363
364 return page_frag_alloc(&nc->page, fragsz, gfp_mask);
365}
366
367void *napi_alloc_frag(unsigned int fragsz)
368{
369 return __napi_alloc_frag(fragsz, GFP_ATOMIC);
370}
371EXPORT_SYMBOL(napi_alloc_frag);
372
373/**
374 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
375 * @dev: network device to receive on
376 * @len: length to allocate
377 * @gfp_mask: get_free_pages mask, passed to alloc_skb
378 *
379 * Allocate a new &sk_buff and assign it a usage count of one. The
380 * buffer has NET_SKB_PAD headroom built in. Users should allocate
381 * the headroom they think they need without accounting for the
382 * built in space. The built in space is used for optimisations.
383 *
384 * %NULL is returned if there is no free memory.
385 */
386struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
387 gfp_t gfp_mask)
388{
389 struct page_frag_cache *nc;
390 unsigned long flags;
391 struct sk_buff *skb;
392 bool pfmemalloc;
393 void *data;
394
395 len += NET_SKB_PAD;
396
397 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
398 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
399 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
400 if (!skb)
401 goto skb_fail;
402 goto skb_success;
403 }
404
405 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
406 len = SKB_DATA_ALIGN(len);
407
408 if (sk_memalloc_socks())
409 gfp_mask |= __GFP_MEMALLOC;
410
411 local_irq_save(flags);
412
413 nc = this_cpu_ptr(&netdev_alloc_cache);
414 data = page_frag_alloc(nc, len, gfp_mask);
415 pfmemalloc = nc->pfmemalloc;
416
417 local_irq_restore(flags);
418
419 if (unlikely(!data))
420 return NULL;
421
422 skb = __build_skb(data, len);
423 if (unlikely(!skb)) {
424 skb_free_frag(data);
425 return NULL;
426 }
427
428 /* use OR instead of assignment to avoid clearing of bits in mask */
429 if (pfmemalloc)
430 skb->pfmemalloc = 1;
431 skb->head_frag = 1;
432
433skb_success:
434 skb_reserve(skb, NET_SKB_PAD);
435 skb->dev = dev;
436
437skb_fail:
438 return skb;
439}
440EXPORT_SYMBOL(__netdev_alloc_skb);
441
442/**
443 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
444 * @napi: napi instance this buffer was allocated for
445 * @len: length to allocate
446 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
447 *
448 * Allocate a new sk_buff for use in NAPI receive. This buffer will
449 * attempt to allocate the head from a special reserved region used
450 * only for NAPI Rx allocation. By doing this we can save several
451 * CPU cycles by avoiding having to disable and re-enable IRQs.
452 *
453 * %NULL is returned if there is no free memory.
454 */
455struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
456 gfp_t gfp_mask)
457{
458 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
459 struct sk_buff *skb;
460 void *data;
461
462 len += NET_SKB_PAD + NET_IP_ALIGN;
463
464 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
465 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
466 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
467 if (!skb)
468 goto skb_fail;
469 goto skb_success;
470 }
471
472 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
473 len = SKB_DATA_ALIGN(len);
474
475 if (sk_memalloc_socks())
476 gfp_mask |= __GFP_MEMALLOC;
477
478 data = page_frag_alloc(&nc->page, len, gfp_mask);
479 if (unlikely(!data))
480 return NULL;
481
482 skb = __build_skb(data, len);
483 if (unlikely(!skb)) {
484 skb_free_frag(data);
485 return NULL;
486 }
487
488 /* use OR instead of assignment to avoid clearing of bits in mask */
489 if (nc->page.pfmemalloc)
490 skb->pfmemalloc = 1;
491 skb->head_frag = 1;
492
493skb_success:
494 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
495 skb->dev = napi->dev;
496
497skb_fail:
498 return skb;
499}
500EXPORT_SYMBOL(__napi_alloc_skb);
501
502void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
503 int size, unsigned int truesize)
504{
505 skb_fill_page_desc(skb, i, page, off, size);
506 skb->len += size;
507 skb->data_len += size;
508 skb->truesize += truesize;
509}
510EXPORT_SYMBOL(skb_add_rx_frag);
511
512void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
513 unsigned int truesize)
514{
515 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
516
517 skb_frag_size_add(frag, size);
518 skb->len += size;
519 skb->data_len += size;
520 skb->truesize += truesize;
521}
522EXPORT_SYMBOL(skb_coalesce_rx_frag);
523
524static void skb_drop_list(struct sk_buff **listp)
525{
526 kfree_skb_list(*listp);
527 *listp = NULL;
528}
529
530static inline void skb_drop_fraglist(struct sk_buff *skb)
531{
532 skb_drop_list(&skb_shinfo(skb)->frag_list);
533}
534
535static void skb_clone_fraglist(struct sk_buff *skb)
536{
537 struct sk_buff *list;
538
539 skb_walk_frags(skb, list)
540 skb_get(list);
541}
542
543static void skb_free_head(struct sk_buff *skb)
544{
545 unsigned char *head = skb->head;
546
547 if (skb->head_frag)
548 skb_free_frag(head);
549 else
550 kfree(head);
551}
552
553static void skb_release_data(struct sk_buff *skb)
554{
555 struct skb_shared_info *shinfo = skb_shinfo(skb);
556 int i;
557
558 if (skb->cloned &&
559 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
560 &shinfo->dataref))
561 return;
562
563 for (i = 0; i < shinfo->nr_frags; i++)
564 __skb_frag_unref(&shinfo->frags[i]);
565
566 if (shinfo->frag_list)
567 kfree_skb_list(shinfo->frag_list);
568
569 skb_zcopy_clear(skb, true);
570 skb_free_head(skb);
571}
572
573/*
574 * Free an skbuff by memory without cleaning the state.
575 */
576static void kfree_skbmem(struct sk_buff *skb)
577{
578 struct sk_buff_fclones *fclones;
579
580 switch (skb->fclone) {
581 case SKB_FCLONE_UNAVAILABLE:
582 kmem_cache_free(skbuff_head_cache, skb);
583 return;
584
585 case SKB_FCLONE_ORIG:
586 fclones = container_of(skb, struct sk_buff_fclones, skb1);
587
588 /* We usually free the clone (TX completion) before original skb
589 * This test would have no chance to be true for the clone,
590 * while here, branch prediction will be good.
591 */
592 if (refcount_read(&fclones->fclone_ref) == 1)
593 goto fastpath;
594 break;
595
596 default: /* SKB_FCLONE_CLONE */
597 fclones = container_of(skb, struct sk_buff_fclones, skb2);
598 break;
599 }
600 if (!refcount_dec_and_test(&fclones->fclone_ref))
601 return;
602fastpath:
603 kmem_cache_free(skbuff_fclone_cache, fclones);
604}
605
606void skb_release_head_state(struct sk_buff *skb)
607{
608 skb_dst_drop(skb);
609 secpath_reset(skb);
610 if (skb->destructor) {
611 WARN_ON(in_irq());
612 skb->destructor(skb);
613 }
614#if IS_ENABLED(CONFIG_NF_CONNTRACK)
615 nf_conntrack_put(skb_nfct(skb));
616#endif
617#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
618 nf_bridge_put(skb->nf_bridge);
619#endif
620}
621
622/* Free everything but the sk_buff shell. */
623static void skb_release_all(struct sk_buff *skb)
624{
625 skb_release_head_state(skb);
626 if (likely(skb->head))
627 skb_release_data(skb);
628}
629
630/**
631 * __kfree_skb - private function
632 * @skb: buffer
633 *
634 * Free an sk_buff. Release anything attached to the buffer.
635 * Clean the state. This is an internal helper function. Users should
636 * always call kfree_skb
637 */
638
639void __kfree_skb(struct sk_buff *skb)
640{
641 skb_release_all(skb);
642 kfree_skbmem(skb);
643}
644EXPORT_SYMBOL(__kfree_skb);
645
646/**
647 * kfree_skb - free an sk_buff
648 * @skb: buffer to free
649 *
650 * Drop a reference to the buffer and free it if the usage count has
651 * hit zero.
652 */
653void kfree_skb(struct sk_buff *skb)
654{
655 if (!skb_unref(skb))
656 return;
657
658 trace_kfree_skb(skb, __builtin_return_address(0));
659 __kfree_skb(skb);
660}
661EXPORT_SYMBOL(kfree_skb);
662
663void kfree_skb_list(struct sk_buff *segs)
664{
665 while (segs) {
666 struct sk_buff *next = segs->next;
667
668 kfree_skb(segs);
669 segs = next;
670 }
671}
672EXPORT_SYMBOL(kfree_skb_list);
673
674/**
675 * skb_tx_error - report an sk_buff xmit error
676 * @skb: buffer that triggered an error
677 *
678 * Report xmit error if a device callback is tracking this skb.
679 * skb must be freed afterwards.
680 */
681void skb_tx_error(struct sk_buff *skb)
682{
683 skb_zcopy_clear(skb, true);
684}
685EXPORT_SYMBOL(skb_tx_error);
686
687/**
688 * consume_skb - free an skbuff
689 * @skb: buffer to free
690 *
691 * Drop a ref to the buffer and free it if the usage count has hit zero
692 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
693 * is being dropped after a failure and notes that
694 */
695void consume_skb(struct sk_buff *skb)
696{
697 if (!skb_unref(skb))
698 return;
699
700 trace_consume_skb(skb);
701 __kfree_skb(skb);
702}
703EXPORT_SYMBOL(consume_skb);
704
705/**
706 * consume_stateless_skb - free an skbuff, assuming it is stateless
707 * @skb: buffer to free
708 *
709 * Alike consume_skb(), but this variant assumes that this is the last
710 * skb reference and all the head states have been already dropped
711 */
712void __consume_stateless_skb(struct sk_buff *skb)
713{
714 trace_consume_skb(skb);
715 skb_release_data(skb);
716 kfree_skbmem(skb);
717}
718
719void __kfree_skb_flush(void)
720{
721 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
722
723 /* flush skb_cache if containing objects */
724 if (nc->skb_count) {
725 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
726 nc->skb_cache);
727 nc->skb_count = 0;
728 }
729}
730
731static inline void _kfree_skb_defer(struct sk_buff *skb)
732{
733 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
734
735 /* drop skb->head and call any destructors for packet */
736 skb_release_all(skb);
737
738 /* record skb to CPU local list */
739 nc->skb_cache[nc->skb_count++] = skb;
740
741#ifdef CONFIG_SLUB
742 /* SLUB writes into objects when freeing */
743 prefetchw(skb);
744#endif
745
746 /* flush skb_cache if it is filled */
747 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
748 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
749 nc->skb_cache);
750 nc->skb_count = 0;
751 }
752}
753void __kfree_skb_defer(struct sk_buff *skb)
754{
755 _kfree_skb_defer(skb);
756}
757
758void napi_consume_skb(struct sk_buff *skb, int budget)
759{
760 if (unlikely(!skb))
761 return;
762
763 /* Zero budget indicate non-NAPI context called us, like netpoll */
764 if (unlikely(!budget)) {
765 dev_consume_skb_any(skb);
766 return;
767 }
768
769 if (!skb_unref(skb))
770 return;
771
772 /* if reaching here SKB is ready to free */
773 trace_consume_skb(skb);
774
775 /* if SKB is a clone, don't handle this case */
776 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
777 __kfree_skb(skb);
778 return;
779 }
780
781 _kfree_skb_defer(skb);
782}
783EXPORT_SYMBOL(napi_consume_skb);
784
785/* Make sure a field is enclosed inside headers_start/headers_end section */
786#define CHECK_SKB_FIELD(field) \
787 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
788 offsetof(struct sk_buff, headers_start)); \
789 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
790 offsetof(struct sk_buff, headers_end)); \
791
792static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
793{
794 new->tstamp = old->tstamp;
795 /* We do not copy old->sk */
796 new->dev = old->dev;
797 memcpy(new->cb, old->cb, sizeof(old->cb));
798 skb_dst_copy(new, old);
799#ifdef CONFIG_XFRM
800 new->sp = secpath_get(old->sp);
801#endif
802 __nf_copy(new, old, false);
803
804 /* Note : this field could be in headers_start/headers_end section
805 * It is not yet because we do not want to have a 16 bit hole
806 */
807 new->queue_mapping = old->queue_mapping;
808
809 memcpy(&new->headers_start, &old->headers_start,
810 offsetof(struct sk_buff, headers_end) -
811 offsetof(struct sk_buff, headers_start));
812 CHECK_SKB_FIELD(protocol);
813 CHECK_SKB_FIELD(csum);
814 CHECK_SKB_FIELD(hash);
815 CHECK_SKB_FIELD(priority);
816 CHECK_SKB_FIELD(skb_iif);
817 CHECK_SKB_FIELD(vlan_proto);
818 CHECK_SKB_FIELD(vlan_tci);
819 CHECK_SKB_FIELD(transport_header);
820 CHECK_SKB_FIELD(network_header);
821 CHECK_SKB_FIELD(mac_header);
822 CHECK_SKB_FIELD(inner_protocol);
823 CHECK_SKB_FIELD(inner_transport_header);
824 CHECK_SKB_FIELD(inner_network_header);
825 CHECK_SKB_FIELD(inner_mac_header);
826 CHECK_SKB_FIELD(mark);
827#ifdef CONFIG_NETWORK_SECMARK
828 CHECK_SKB_FIELD(secmark);
829#endif
830#ifdef CONFIG_NET_RX_BUSY_POLL
831 CHECK_SKB_FIELD(napi_id);
832#endif
833#ifdef CONFIG_XPS
834 CHECK_SKB_FIELD(sender_cpu);
835#endif
836#ifdef CONFIG_NET_SCHED
837 CHECK_SKB_FIELD(tc_index);
838#endif
839
840}
841
842/*
843 * You should not add any new code to this function. Add it to
844 * __copy_skb_header above instead.
845 */
846static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
847{
848#define C(x) n->x = skb->x
849
850 n->next = n->prev = NULL;
851 n->sk = NULL;
852 __copy_skb_header(n, skb);
853
854 C(len);
855 C(data_len);
856 C(mac_len);
857 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
858 n->cloned = 1;
859 n->nohdr = 0;
860 n->peeked = 0;
861 n->destructor = NULL;
862 C(tail);
863 C(end);
864 C(head);
865 C(head_frag);
866 C(data);
867 C(truesize);
868 refcount_set(&n->users, 1);
869
870 atomic_inc(&(skb_shinfo(skb)->dataref));
871 skb->cloned = 1;
872
873 return n;
874#undef C
875}
876
877/**
878 * skb_morph - morph one skb into another
879 * @dst: the skb to receive the contents
880 * @src: the skb to supply the contents
881 *
882 * This is identical to skb_clone except that the target skb is
883 * supplied by the user.
884 *
885 * The target skb is returned upon exit.
886 */
887struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
888{
889 skb_release_all(dst);
890 return __skb_clone(dst, src);
891}
892EXPORT_SYMBOL_GPL(skb_morph);
893
894int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
895{
896 unsigned long max_pg, num_pg, new_pg, old_pg;
897 struct user_struct *user;
898
899 if (capable(CAP_IPC_LOCK) || !size)
900 return 0;
901
902 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
903 max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
904 user = mmp->user ? : current_user();
905
906 do {
907 old_pg = atomic_long_read(&user->locked_vm);
908 new_pg = old_pg + num_pg;
909 if (new_pg > max_pg)
910 return -ENOBUFS;
911 } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
912 old_pg);
913
914 if (!mmp->user) {
915 mmp->user = get_uid(user);
916 mmp->num_pg = num_pg;
917 } else {
918 mmp->num_pg += num_pg;
919 }
920
921 return 0;
922}
923EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
924
925void mm_unaccount_pinned_pages(struct mmpin *mmp)
926{
927 if (mmp->user) {
928 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
929 free_uid(mmp->user);
930 }
931}
932EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
933
934struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
935{
936 struct ubuf_info *uarg;
937 struct sk_buff *skb;
938
939 WARN_ON_ONCE(!in_task());
940
941 if (!sock_flag(sk, SOCK_ZEROCOPY))
942 return NULL;
943
944 skb = sock_omalloc(sk, 0, GFP_KERNEL);
945 if (!skb)
946 return NULL;
947
948 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
949 uarg = (void *)skb->cb;
950 uarg->mmp.user = NULL;
951
952 if (mm_account_pinned_pages(&uarg->mmp, size)) {
953 kfree_skb(skb);
954 return NULL;
955 }
956
957 uarg->callback = sock_zerocopy_callback;
958 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
959 uarg->len = 1;
960 uarg->bytelen = size;
961 uarg->zerocopy = 1;
962 refcount_set(&uarg->refcnt, 1);
963 sock_hold(sk);
964
965 return uarg;
966}
967EXPORT_SYMBOL_GPL(sock_zerocopy_alloc);
968
969static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
970{
971 return container_of((void *)uarg, struct sk_buff, cb);
972}
973
974struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
975 struct ubuf_info *uarg)
976{
977 if (uarg) {
978 const u32 byte_limit = 1 << 19; /* limit to a few TSO */
979 u32 bytelen, next;
980
981 /* realloc only when socket is locked (TCP, UDP cork),
982 * so uarg->len and sk_zckey access is serialized
983 */
984 if (!sock_owned_by_user(sk)) {
985 WARN_ON_ONCE(1);
986 return NULL;
987 }
988
989 bytelen = uarg->bytelen + size;
990 if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
991 /* TCP can create new skb to attach new uarg */
992 if (sk->sk_type == SOCK_STREAM)
993 goto new_alloc;
994 return NULL;
995 }
996
997 next = (u32)atomic_read(&sk->sk_zckey);
998 if ((u32)(uarg->id + uarg->len) == next) {
999 if (mm_account_pinned_pages(&uarg->mmp, size))
1000 return NULL;
1001 uarg->len++;
1002 uarg->bytelen = bytelen;
1003 atomic_set(&sk->sk_zckey, ++next);
1004 sock_zerocopy_get(uarg);
1005 return uarg;
1006 }
1007 }
1008
1009new_alloc:
1010 return sock_zerocopy_alloc(sk, size);
1011}
1012EXPORT_SYMBOL_GPL(sock_zerocopy_realloc);
1013
1014static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1015{
1016 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1017 u32 old_lo, old_hi;
1018 u64 sum_len;
1019
1020 old_lo = serr->ee.ee_info;
1021 old_hi = serr->ee.ee_data;
1022 sum_len = old_hi - old_lo + 1ULL + len;
1023
1024 if (sum_len >= (1ULL << 32))
1025 return false;
1026
1027 if (lo != old_hi + 1)
1028 return false;
1029
1030 serr->ee.ee_data += len;
1031 return true;
1032}
1033
1034void sock_zerocopy_callback(struct ubuf_info *uarg, bool success)
1035{
1036 struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1037 struct sock_exterr_skb *serr;
1038 struct sock *sk = skb->sk;
1039 struct sk_buff_head *q;
1040 unsigned long flags;
1041 u32 lo, hi;
1042 u16 len;
1043
1044 mm_unaccount_pinned_pages(&uarg->mmp);
1045
1046 /* if !len, there was only 1 call, and it was aborted
1047 * so do not queue a completion notification
1048 */
1049 if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1050 goto release;
1051
1052 len = uarg->len;
1053 lo = uarg->id;
1054 hi = uarg->id + len - 1;
1055
1056 serr = SKB_EXT_ERR(skb);
1057 memset(serr, 0, sizeof(*serr));
1058 serr->ee.ee_errno = 0;
1059 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1060 serr->ee.ee_data = hi;
1061 serr->ee.ee_info = lo;
1062 if (!success)
1063 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1064
1065 q = &sk->sk_error_queue;
1066 spin_lock_irqsave(&q->lock, flags);
1067 tail = skb_peek_tail(q);
1068 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1069 !skb_zerocopy_notify_extend(tail, lo, len)) {
1070 __skb_queue_tail(q, skb);
1071 skb = NULL;
1072 }
1073 spin_unlock_irqrestore(&q->lock, flags);
1074
1075 sk->sk_error_report(sk);
1076
1077release:
1078 consume_skb(skb);
1079 sock_put(sk);
1080}
1081EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
1082
1083void sock_zerocopy_put(struct ubuf_info *uarg)
1084{
1085 if (uarg && refcount_dec_and_test(&uarg->refcnt)) {
1086 if (uarg->callback)
1087 uarg->callback(uarg, uarg->zerocopy);
1088 else
1089 consume_skb(skb_from_uarg(uarg));
1090 }
1091}
1092EXPORT_SYMBOL_GPL(sock_zerocopy_put);
1093
1094void sock_zerocopy_put_abort(struct ubuf_info *uarg)
1095{
1096 if (uarg) {
1097 struct sock *sk = skb_from_uarg(uarg)->sk;
1098
1099 atomic_dec(&sk->sk_zckey);
1100 uarg->len--;
1101
1102 sock_zerocopy_put(uarg);
1103 }
1104}
1105EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
1106
1107extern int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1108 struct iov_iter *from, size_t length);
1109
1110int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1111 struct msghdr *msg, int len,
1112 struct ubuf_info *uarg)
1113{
1114 struct ubuf_info *orig_uarg = skb_zcopy(skb);
1115 struct iov_iter orig_iter = msg->msg_iter;
1116 int err, orig_len = skb->len;
1117
1118 /* An skb can only point to one uarg. This edge case happens when
1119 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1120 */
1121 if (orig_uarg && uarg != orig_uarg)
1122 return -EEXIST;
1123
1124 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1125 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1126 struct sock *save_sk = skb->sk;
1127
1128 /* Streams do not free skb on error. Reset to prev state. */
1129 msg->msg_iter = orig_iter;
1130 skb->sk = sk;
1131 ___pskb_trim(skb, orig_len);
1132 skb->sk = save_sk;
1133 return err;
1134 }
1135
1136 skb_zcopy_set(skb, uarg);
1137 return skb->len - orig_len;
1138}
1139EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1140
1141static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1142 gfp_t gfp_mask)
1143{
1144 if (skb_zcopy(orig)) {
1145 if (skb_zcopy(nskb)) {
1146 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1147 if (!gfp_mask) {
1148 WARN_ON_ONCE(1);
1149 return -ENOMEM;
1150 }
1151 if (skb_uarg(nskb) == skb_uarg(orig))
1152 return 0;
1153 if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1154 return -EIO;
1155 }
1156 skb_zcopy_set(nskb, skb_uarg(orig));
1157 }
1158 return 0;
1159}
1160
1161/**
1162 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1163 * @skb: the skb to modify
1164 * @gfp_mask: allocation priority
1165 *
1166 * This must be called on SKBTX_DEV_ZEROCOPY skb.
1167 * It will copy all frags into kernel and drop the reference
1168 * to userspace pages.
1169 *
1170 * If this function is called from an interrupt gfp_mask() must be
1171 * %GFP_ATOMIC.
1172 *
1173 * Returns 0 on success or a negative error code on failure
1174 * to allocate kernel memory to copy to.
1175 */
1176int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1177{
1178 int num_frags = skb_shinfo(skb)->nr_frags;
1179 struct page *page, *head = NULL;
1180 int i, new_frags;
1181 u32 d_off;
1182
1183 if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1184 return -EINVAL;
1185
1186 if (!num_frags)
1187 goto release;
1188
1189 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1190 for (i = 0; i < new_frags; i++) {
1191 page = alloc_page(gfp_mask);
1192 if (!page) {
1193 while (head) {
1194 struct page *next = (struct page *)page_private(head);
1195 put_page(head);
1196 head = next;
1197 }
1198 return -ENOMEM;
1199 }
1200 set_page_private(page, (unsigned long)head);
1201 head = page;
1202 }
1203
1204 page = head;
1205 d_off = 0;
1206 for (i = 0; i < num_frags; i++) {
1207 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1208 u32 p_off, p_len, copied;
1209 struct page *p;
1210 u8 *vaddr;
1211
1212 skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f),
1213 p, p_off, p_len, copied) {
1214 u32 copy, done = 0;
1215 vaddr = kmap_atomic(p);
1216
1217 while (done < p_len) {
1218 if (d_off == PAGE_SIZE) {
1219 d_off = 0;
1220 page = (struct page *)page_private(page);
1221 }
1222 copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
1223 memcpy(page_address(page) + d_off,
1224 vaddr + p_off + done, copy);
1225 done += copy;
1226 d_off += copy;
1227 }
1228 kunmap_atomic(vaddr);
1229 }
1230 }
1231
1232 /* skb frags release userspace buffers */
1233 for (i = 0; i < num_frags; i++)
1234 skb_frag_unref(skb, i);
1235
1236 /* skb frags point to kernel buffers */
1237 for (i = 0; i < new_frags - 1; i++) {
1238 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
1239 head = (struct page *)page_private(head);
1240 }
1241 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1242 skb_shinfo(skb)->nr_frags = new_frags;
1243
1244release:
1245 skb_zcopy_clear(skb, false);
1246 return 0;
1247}
1248EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1249
1250/**
1251 * skb_clone - duplicate an sk_buff
1252 * @skb: buffer to clone
1253 * @gfp_mask: allocation priority
1254 *
1255 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
1256 * copies share the same packet data but not structure. The new
1257 * buffer has a reference count of 1. If the allocation fails the
1258 * function returns %NULL otherwise the new buffer is returned.
1259 *
1260 * If this function is called from an interrupt gfp_mask() must be
1261 * %GFP_ATOMIC.
1262 */
1263
1264struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1265{
1266 struct sk_buff_fclones *fclones = container_of(skb,
1267 struct sk_buff_fclones,
1268 skb1);
1269 struct sk_buff *n;
1270
1271 if (skb_orphan_frags(skb, gfp_mask))
1272 return NULL;
1273
1274 if (skb->fclone == SKB_FCLONE_ORIG &&
1275 refcount_read(&fclones->fclone_ref) == 1) {
1276 n = &fclones->skb2;
1277 refcount_set(&fclones->fclone_ref, 2);
1278 } else {
1279 if (skb_pfmemalloc(skb))
1280 gfp_mask |= __GFP_MEMALLOC;
1281
1282 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1283 if (!n)
1284 return NULL;
1285
1286 n->fclone = SKB_FCLONE_UNAVAILABLE;
1287 }
1288
1289 return __skb_clone(n, skb);
1290}
1291EXPORT_SYMBOL(skb_clone);
1292
1293static void skb_headers_offset_update(struct sk_buff *skb, int off)
1294{
1295 /* Only adjust this if it actually is csum_start rather than csum */
1296 if (skb->ip_summed == CHECKSUM_PARTIAL)
1297 skb->csum_start += off;
1298 /* {transport,network,mac}_header and tail are relative to skb->head */
1299 skb->transport_header += off;
1300 skb->network_header += off;
1301 if (skb_mac_header_was_set(skb))
1302 skb->mac_header += off;
1303 skb->inner_transport_header += off;
1304 skb->inner_network_header += off;
1305 skb->inner_mac_header += off;
1306}
1307
1308static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1309{
1310 __copy_skb_header(new, old);
1311
1312 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1313 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1314 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1315}
1316
1317static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1318{
1319 if (skb_pfmemalloc(skb))
1320 return SKB_ALLOC_RX;
1321 return 0;
1322}
1323
1324/**
1325 * skb_copy - create private copy of an sk_buff
1326 * @skb: buffer to copy
1327 * @gfp_mask: allocation priority
1328 *
1329 * Make a copy of both an &sk_buff and its data. This is used when the
1330 * caller wishes to modify the data and needs a private copy of the
1331 * data to alter. Returns %NULL on failure or the pointer to the buffer
1332 * on success. The returned buffer has a reference count of 1.
1333 *
1334 * As by-product this function converts non-linear &sk_buff to linear
1335 * one, so that &sk_buff becomes completely private and caller is allowed
1336 * to modify all the data of returned buffer. This means that this
1337 * function is not recommended for use in circumstances when only
1338 * header is going to be modified. Use pskb_copy() instead.
1339 */
1340
1341struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1342{
1343 int headerlen = skb_headroom(skb);
1344 unsigned int size = skb_end_offset(skb) + skb->data_len;
1345 struct sk_buff *n = __alloc_skb(size, gfp_mask,
1346 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1347
1348 if (!n)
1349 return NULL;
1350
1351 /* Set the data pointer */
1352 skb_reserve(n, headerlen);
1353 /* Set the tail pointer and length */
1354 skb_put(n, skb->len);
1355
1356 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1357
1358 copy_skb_header(n, skb);
1359 return n;
1360}
1361EXPORT_SYMBOL(skb_copy);
1362
1363/**
1364 * __pskb_copy_fclone - create copy of an sk_buff with private head.
1365 * @skb: buffer to copy
1366 * @headroom: headroom of new skb
1367 * @gfp_mask: allocation priority
1368 * @fclone: if true allocate the copy of the skb from the fclone
1369 * cache instead of the head cache; it is recommended to set this
1370 * to true for the cases where the copy will likely be cloned
1371 *
1372 * Make a copy of both an &sk_buff and part of its data, located
1373 * in header. Fragmented data remain shared. This is used when
1374 * the caller wishes to modify only header of &sk_buff and needs
1375 * private copy of the header to alter. Returns %NULL on failure
1376 * or the pointer to the buffer on success.
1377 * The returned buffer has a reference count of 1.
1378 */
1379
1380struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1381 gfp_t gfp_mask, bool fclone)
1382{
1383 unsigned int size = skb_headlen(skb) + headroom;
1384 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1385 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1386
1387 if (!n)
1388 goto out;
1389
1390 /* Set the data pointer */
1391 skb_reserve(n, headroom);
1392 /* Set the tail pointer and length */
1393 skb_put(n, skb_headlen(skb));
1394 /* Copy the bytes */
1395 skb_copy_from_linear_data(skb, n->data, n->len);
1396
1397 n->truesize += skb->data_len;
1398 n->data_len = skb->data_len;
1399 n->len = skb->len;
1400
1401 if (skb_shinfo(skb)->nr_frags) {
1402 int i;
1403
1404 if (skb_orphan_frags(skb, gfp_mask) ||
1405 skb_zerocopy_clone(n, skb, gfp_mask)) {
1406 kfree_skb(n);
1407 n = NULL;
1408 goto out;
1409 }
1410 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1411 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1412 skb_frag_ref(skb, i);
1413 }
1414 skb_shinfo(n)->nr_frags = i;
1415 }
1416
1417 if (skb_has_frag_list(skb)) {
1418 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1419 skb_clone_fraglist(n);
1420 }
1421
1422 copy_skb_header(n, skb);
1423out:
1424 return n;
1425}
1426EXPORT_SYMBOL(__pskb_copy_fclone);
1427
1428/**
1429 * pskb_expand_head - reallocate header of &sk_buff
1430 * @skb: buffer to reallocate
1431 * @nhead: room to add at head
1432 * @ntail: room to add at tail
1433 * @gfp_mask: allocation priority
1434 *
1435 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1436 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1437 * reference count of 1. Returns zero in the case of success or error,
1438 * if expansion failed. In the last case, &sk_buff is not changed.
1439 *
1440 * All the pointers pointing into skb header may change and must be
1441 * reloaded after call to this function.
1442 */
1443
1444int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1445 gfp_t gfp_mask)
1446{
1447 int i, osize = skb_end_offset(skb);
1448 int size = osize + nhead + ntail;
1449 long off;
1450 u8 *data;
1451
1452 BUG_ON(nhead < 0);
1453
1454 BUG_ON(skb_shared(skb));
1455
1456 size = SKB_DATA_ALIGN(size);
1457
1458 if (skb_pfmemalloc(skb))
1459 gfp_mask |= __GFP_MEMALLOC;
1460 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1461 gfp_mask, NUMA_NO_NODE, NULL);
1462 if (!data)
1463 goto nodata;
1464 size = SKB_WITH_OVERHEAD(ksize(data));
1465
1466 /* Copy only real data... and, alas, header. This should be
1467 * optimized for the cases when header is void.
1468 */
1469 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1470
1471 memcpy((struct skb_shared_info *)(data + size),
1472 skb_shinfo(skb),
1473 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1474
1475 /*
1476 * if shinfo is shared we must drop the old head gracefully, but if it
1477 * is not we can just drop the old head and let the existing refcount
1478 * be since all we did is relocate the values
1479 */
1480 if (skb_cloned(skb)) {
1481 if (skb_orphan_frags(skb, gfp_mask))
1482 goto nofrags;
1483 if (skb_zcopy(skb))
1484 refcount_inc(&skb_uarg(skb)->refcnt);
1485 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1486 skb_frag_ref(skb, i);
1487
1488 if (skb_has_frag_list(skb))
1489 skb_clone_fraglist(skb);
1490
1491 skb_release_data(skb);
1492 } else {
1493 skb_free_head(skb);
1494 }
1495 off = (data + nhead) - skb->head;
1496
1497 skb->head = data;
1498 skb->head_frag = 0;
1499 skb->data += off;
1500#ifdef NET_SKBUFF_DATA_USES_OFFSET
1501 skb->end = size;
1502 off = nhead;
1503#else
1504 skb->end = skb->head + size;
1505#endif
1506 skb->tail += off;
1507 skb_headers_offset_update(skb, nhead);
1508 skb->cloned = 0;
1509 skb->hdr_len = 0;
1510 skb->nohdr = 0;
1511 atomic_set(&skb_shinfo(skb)->dataref, 1);
1512
1513 skb_metadata_clear(skb);
1514
1515 /* It is not generally safe to change skb->truesize.
1516 * For the moment, we really care of rx path, or
1517 * when skb is orphaned (not attached to a socket).
1518 */
1519 if (!skb->sk || skb->destructor == sock_edemux)
1520 skb->truesize += size - osize;
1521
1522 return 0;
1523
1524nofrags:
1525 kfree(data);
1526nodata:
1527 return -ENOMEM;
1528}
1529EXPORT_SYMBOL(pskb_expand_head);
1530
1531/* Make private copy of skb with writable head and some headroom */
1532
1533struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1534{
1535 struct sk_buff *skb2;
1536 int delta = headroom - skb_headroom(skb);
1537
1538 if (delta <= 0)
1539 skb2 = pskb_copy(skb, GFP_ATOMIC);
1540 else {
1541 skb2 = skb_clone(skb, GFP_ATOMIC);
1542 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1543 GFP_ATOMIC)) {
1544 kfree_skb(skb2);
1545 skb2 = NULL;
1546 }
1547 }
1548 return skb2;
1549}
1550EXPORT_SYMBOL(skb_realloc_headroom);
1551
1552/**
1553 * skb_copy_expand - copy and expand sk_buff
1554 * @skb: buffer to copy
1555 * @newheadroom: new free bytes at head
1556 * @newtailroom: new free bytes at tail
1557 * @gfp_mask: allocation priority
1558 *
1559 * Make a copy of both an &sk_buff and its data and while doing so
1560 * allocate additional space.
1561 *
1562 * This is used when the caller wishes to modify the data and needs a
1563 * private copy of the data to alter as well as more space for new fields.
1564 * Returns %NULL on failure or the pointer to the buffer
1565 * on success. The returned buffer has a reference count of 1.
1566 *
1567 * You must pass %GFP_ATOMIC as the allocation priority if this function
1568 * is called from an interrupt.
1569 */
1570struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1571 int newheadroom, int newtailroom,
1572 gfp_t gfp_mask)
1573{
1574 /*
1575 * Allocate the copy buffer
1576 */
1577 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1578 gfp_mask, skb_alloc_rx_flag(skb),
1579 NUMA_NO_NODE);
1580 int oldheadroom = skb_headroom(skb);
1581 int head_copy_len, head_copy_off;
1582
1583 if (!n)
1584 return NULL;
1585
1586 skb_reserve(n, newheadroom);
1587
1588 /* Set the tail pointer and length */
1589 skb_put(n, skb->len);
1590
1591 head_copy_len = oldheadroom;
1592 head_copy_off = 0;
1593 if (newheadroom <= head_copy_len)
1594 head_copy_len = newheadroom;
1595 else
1596 head_copy_off = newheadroom - head_copy_len;
1597
1598 /* Copy the linear header and data. */
1599 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1600 skb->len + head_copy_len));
1601
1602 copy_skb_header(n, skb);
1603
1604 skb_headers_offset_update(n, newheadroom - oldheadroom);
1605
1606 return n;
1607}
1608EXPORT_SYMBOL(skb_copy_expand);
1609
1610/**
1611 * __skb_pad - zero pad the tail of an skb
1612 * @skb: buffer to pad
1613 * @pad: space to pad
1614 * @free_on_error: free buffer on error
1615 *
1616 * Ensure that a buffer is followed by a padding area that is zero
1617 * filled. Used by network drivers which may DMA or transfer data
1618 * beyond the buffer end onto the wire.
1619 *
1620 * May return error in out of memory cases. The skb is freed on error
1621 * if @free_on_error is true.
1622 */
1623
1624int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
1625{
1626 int err;
1627 int ntail;
1628
1629 /* If the skbuff is non linear tailroom is always zero.. */
1630 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1631 memset(skb->data+skb->len, 0, pad);
1632 return 0;
1633 }
1634
1635 ntail = skb->data_len + pad - (skb->end - skb->tail);
1636 if (likely(skb_cloned(skb) || ntail > 0)) {
1637 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1638 if (unlikely(err))
1639 goto free_skb;
1640 }
1641
1642 /* FIXME: The use of this function with non-linear skb's really needs
1643 * to be audited.
1644 */
1645 err = skb_linearize(skb);
1646 if (unlikely(err))
1647 goto free_skb;
1648
1649 memset(skb->data + skb->len, 0, pad);
1650 return 0;
1651
1652free_skb:
1653 if (free_on_error)
1654 kfree_skb(skb);
1655 return err;
1656}
1657EXPORT_SYMBOL(__skb_pad);
1658
1659/**
1660 * pskb_put - add data to the tail of a potentially fragmented buffer
1661 * @skb: start of the buffer to use
1662 * @tail: tail fragment of the buffer to use
1663 * @len: amount of data to add
1664 *
1665 * This function extends the used data area of the potentially
1666 * fragmented buffer. @tail must be the last fragment of @skb -- or
1667 * @skb itself. If this would exceed the total buffer size the kernel
1668 * will panic. A pointer to the first byte of the extra data is
1669 * returned.
1670 */
1671
1672void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1673{
1674 if (tail != skb) {
1675 skb->data_len += len;
1676 skb->len += len;
1677 }
1678 return skb_put(tail, len);
1679}
1680EXPORT_SYMBOL_GPL(pskb_put);
1681
1682/**
1683 * skb_put - add data to a buffer
1684 * @skb: buffer to use
1685 * @len: amount of data to add
1686 *
1687 * This function extends the used data area of the buffer. If this would
1688 * exceed the total buffer size the kernel will panic. A pointer to the
1689 * first byte of the extra data is returned.
1690 */
1691void *skb_put(struct sk_buff *skb, unsigned int len)
1692{
1693 void *tmp = skb_tail_pointer(skb);
1694 SKB_LINEAR_ASSERT(skb);
1695 skb->tail += len;
1696 skb->len += len;
1697 if (unlikely(skb->tail > skb->end))
1698 skb_over_panic(skb, len, __builtin_return_address(0));
1699 return tmp;
1700}
1701EXPORT_SYMBOL(skb_put);
1702
1703/**
1704 * skb_push - add data to the start of a buffer
1705 * @skb: buffer to use
1706 * @len: amount of data to add
1707 *
1708 * This function extends the used data area of the buffer at the buffer
1709 * start. If this would exceed the total buffer headroom the kernel will
1710 * panic. A pointer to the first byte of the extra data is returned.
1711 */
1712void *skb_push(struct sk_buff *skb, unsigned int len)
1713{
1714 skb->data -= len;
1715 skb->len += len;
1716 if (unlikely(skb->data<skb->head))
1717 skb_under_panic(skb, len, __builtin_return_address(0));
1718 return skb->data;
1719}
1720EXPORT_SYMBOL(skb_push);
1721
1722/**
1723 * skb_pull - remove data from the start of a buffer
1724 * @skb: buffer to use
1725 * @len: amount of data to remove
1726 *
1727 * This function removes data from the start of a buffer, returning
1728 * the memory to the headroom. A pointer to the next data in the buffer
1729 * is returned. Once the data has been pulled future pushes will overwrite
1730 * the old data.
1731 */
1732void *skb_pull(struct sk_buff *skb, unsigned int len)
1733{
1734 return skb_pull_inline(skb, len);
1735}
1736EXPORT_SYMBOL(skb_pull);
1737
1738/**
1739 * skb_trim - remove end from a buffer
1740 * @skb: buffer to alter
1741 * @len: new length
1742 *
1743 * Cut the length of a buffer down by removing data from the tail. If
1744 * the buffer is already under the length specified it is not modified.
1745 * The skb must be linear.
1746 */
1747void skb_trim(struct sk_buff *skb, unsigned int len)
1748{
1749 if (skb->len > len)
1750 __skb_trim(skb, len);
1751}
1752EXPORT_SYMBOL(skb_trim);
1753
1754/* Trims skb to length len. It can change skb pointers.
1755 */
1756
1757int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1758{
1759 struct sk_buff **fragp;
1760 struct sk_buff *frag;
1761 int offset = skb_headlen(skb);
1762 int nfrags = skb_shinfo(skb)->nr_frags;
1763 int i;
1764 int err;
1765
1766 if (skb_cloned(skb) &&
1767 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1768 return err;
1769
1770 i = 0;
1771 if (offset >= len)
1772 goto drop_pages;
1773
1774 for (; i < nfrags; i++) {
1775 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1776
1777 if (end < len) {
1778 offset = end;
1779 continue;
1780 }
1781
1782 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1783
1784drop_pages:
1785 skb_shinfo(skb)->nr_frags = i;
1786
1787 for (; i < nfrags; i++)
1788 skb_frag_unref(skb, i);
1789
1790 if (skb_has_frag_list(skb))
1791 skb_drop_fraglist(skb);
1792 goto done;
1793 }
1794
1795 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1796 fragp = &frag->next) {
1797 int end = offset + frag->len;
1798
1799 if (skb_shared(frag)) {
1800 struct sk_buff *nfrag;
1801
1802 nfrag = skb_clone(frag, GFP_ATOMIC);
1803 if (unlikely(!nfrag))
1804 return -ENOMEM;
1805
1806 nfrag->next = frag->next;
1807 consume_skb(frag);
1808 frag = nfrag;
1809 *fragp = frag;
1810 }
1811
1812 if (end < len) {
1813 offset = end;
1814 continue;
1815 }
1816
1817 if (end > len &&
1818 unlikely((err = pskb_trim(frag, len - offset))))
1819 return err;
1820
1821 if (frag->next)
1822 skb_drop_list(&frag->next);
1823 break;
1824 }
1825
1826done:
1827 if (len > skb_headlen(skb)) {
1828 skb->data_len -= skb->len - len;
1829 skb->len = len;
1830 } else {
1831 skb->len = len;
1832 skb->data_len = 0;
1833 skb_set_tail_pointer(skb, len);
1834 }
1835
1836 if (!skb->sk || skb->destructor == sock_edemux)
1837 skb_condense(skb);
1838 return 0;
1839}
1840EXPORT_SYMBOL(___pskb_trim);
1841
1842/**
1843 * __pskb_pull_tail - advance tail of skb header
1844 * @skb: buffer to reallocate
1845 * @delta: number of bytes to advance tail
1846 *
1847 * The function makes a sense only on a fragmented &sk_buff,
1848 * it expands header moving its tail forward and copying necessary
1849 * data from fragmented part.
1850 *
1851 * &sk_buff MUST have reference count of 1.
1852 *
1853 * Returns %NULL (and &sk_buff does not change) if pull failed
1854 * or value of new tail of skb in the case of success.
1855 *
1856 * All the pointers pointing into skb header may change and must be
1857 * reloaded after call to this function.
1858 */
1859
1860/* Moves tail of skb head forward, copying data from fragmented part,
1861 * when it is necessary.
1862 * 1. It may fail due to malloc failure.
1863 * 2. It may change skb pointers.
1864 *
1865 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1866 */
1867void *__pskb_pull_tail(struct sk_buff *skb, int delta)
1868{
1869 /* If skb has not enough free space at tail, get new one
1870 * plus 128 bytes for future expansions. If we have enough
1871 * room at tail, reallocate without expansion only if skb is cloned.
1872 */
1873 int i, k, eat = (skb->tail + delta) - skb->end;
1874
1875 if (eat > 0 || skb_cloned(skb)) {
1876 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1877 GFP_ATOMIC))
1878 return NULL;
1879 }
1880
1881 BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
1882 skb_tail_pointer(skb), delta));
1883
1884 /* Optimization: no fragments, no reasons to preestimate
1885 * size of pulled pages. Superb.
1886 */
1887 if (!skb_has_frag_list(skb))
1888 goto pull_pages;
1889
1890 /* Estimate size of pulled pages. */
1891 eat = delta;
1892 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1893 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1894
1895 if (size >= eat)
1896 goto pull_pages;
1897 eat -= size;
1898 }
1899
1900 /* If we need update frag list, we are in troubles.
1901 * Certainly, it is possible to add an offset to skb data,
1902 * but taking into account that pulling is expected to
1903 * be very rare operation, it is worth to fight against
1904 * further bloating skb head and crucify ourselves here instead.
1905 * Pure masohism, indeed. 8)8)
1906 */
1907 if (eat) {
1908 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1909 struct sk_buff *clone = NULL;
1910 struct sk_buff *insp = NULL;
1911
1912 do {
1913 BUG_ON(!list);
1914
1915 if (list->len <= eat) {
1916 /* Eaten as whole. */
1917 eat -= list->len;
1918 list = list->next;
1919 insp = list;
1920 } else {
1921 /* Eaten partially. */
1922
1923 if (skb_shared(list)) {
1924 /* Sucks! We need to fork list. :-( */
1925 clone = skb_clone(list, GFP_ATOMIC);
1926 if (!clone)
1927 return NULL;
1928 insp = list->next;
1929 list = clone;
1930 } else {
1931 /* This may be pulled without
1932 * problems. */
1933 insp = list;
1934 }
1935 if (!pskb_pull(list, eat)) {
1936 kfree_skb(clone);
1937 return NULL;
1938 }
1939 break;
1940 }
1941 } while (eat);
1942
1943 /* Free pulled out fragments. */
1944 while ((list = skb_shinfo(skb)->frag_list) != insp) {
1945 skb_shinfo(skb)->frag_list = list->next;
1946 kfree_skb(list);
1947 }
1948 /* And insert new clone at head. */
1949 if (clone) {
1950 clone->next = list;
1951 skb_shinfo(skb)->frag_list = clone;
1952 }
1953 }
1954 /* Success! Now we may commit changes to skb data. */
1955
1956pull_pages:
1957 eat = delta;
1958 k = 0;
1959 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1960 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1961
1962 if (size <= eat) {
1963 skb_frag_unref(skb, i);
1964 eat -= size;
1965 } else {
1966 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1967 if (eat) {
1968 skb_shinfo(skb)->frags[k].page_offset += eat;
1969 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1970 if (!i)
1971 goto end;
1972 eat = 0;
1973 }
1974 k++;
1975 }
1976 }
1977 skb_shinfo(skb)->nr_frags = k;
1978
1979end:
1980 skb->tail += delta;
1981 skb->data_len -= delta;
1982
1983 if (!skb->data_len)
1984 skb_zcopy_clear(skb, false);
1985
1986 return skb_tail_pointer(skb);
1987}
1988EXPORT_SYMBOL(__pskb_pull_tail);
1989
1990/**
1991 * skb_copy_bits - copy bits from skb to kernel buffer
1992 * @skb: source skb
1993 * @offset: offset in source
1994 * @to: destination buffer
1995 * @len: number of bytes to copy
1996 *
1997 * Copy the specified number of bytes from the source skb to the
1998 * destination buffer.
1999 *
2000 * CAUTION ! :
2001 * If its prototype is ever changed,
2002 * check arch/{*}/net/{*}.S files,
2003 * since it is called from BPF assembly code.
2004 */
2005int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2006{
2007 int start = skb_headlen(skb);
2008 struct sk_buff *frag_iter;
2009 int i, copy;
2010
2011 if (offset > (int)skb->len - len)
2012 goto fault;
2013
2014 /* Copy header. */
2015 if ((copy = start - offset) > 0) {
2016 if (copy > len)
2017 copy = len;
2018 skb_copy_from_linear_data_offset(skb, offset, to, copy);
2019 if ((len -= copy) == 0)
2020 return 0;
2021 offset += copy;
2022 to += copy;
2023 }
2024
2025 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2026 int end;
2027 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2028
2029 WARN_ON(start > offset + len);
2030
2031 end = start + skb_frag_size(f);
2032 if ((copy = end - offset) > 0) {
2033 u32 p_off, p_len, copied;
2034 struct page *p;
2035 u8 *vaddr;
2036
2037 if (copy > len)
2038 copy = len;
2039
2040 skb_frag_foreach_page(f,
2041 f->page_offset + offset - start,
2042 copy, p, p_off, p_len, copied) {
2043 vaddr = kmap_atomic(p);
2044 memcpy(to + copied, vaddr + p_off, p_len);
2045 kunmap_atomic(vaddr);
2046 }
2047
2048 if ((len -= copy) == 0)
2049 return 0;
2050 offset += copy;
2051 to += copy;
2052 }
2053 start = end;
2054 }
2055
2056 skb_walk_frags(skb, frag_iter) {
2057 int end;
2058
2059 WARN_ON(start > offset + len);
2060
2061 end = start + frag_iter->len;
2062 if ((copy = end - offset) > 0) {
2063 if (copy > len)
2064 copy = len;
2065 if (skb_copy_bits(frag_iter, offset - start, to, copy))
2066 goto fault;
2067 if ((len -= copy) == 0)
2068 return 0;
2069 offset += copy;
2070 to += copy;
2071 }
2072 start = end;
2073 }
2074
2075 if (!len)
2076 return 0;
2077
2078fault:
2079 return -EFAULT;
2080}
2081EXPORT_SYMBOL(skb_copy_bits);
2082
2083/*
2084 * Callback from splice_to_pipe(), if we need to release some pages
2085 * at the end of the spd in case we error'ed out in filling the pipe.
2086 */
2087static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2088{
2089 put_page(spd->pages[i]);
2090}
2091
2092static struct page *linear_to_page(struct page *page, unsigned int *len,
2093 unsigned int *offset,
2094 struct sock *sk)
2095{
2096 struct page_frag *pfrag = sk_page_frag(sk);
2097
2098 if (!sk_page_frag_refill(sk, pfrag))
2099 return NULL;
2100
2101 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
2102
2103 memcpy(page_address(pfrag->page) + pfrag->offset,
2104 page_address(page) + *offset, *len);
2105 *offset = pfrag->offset;
2106 pfrag->offset += *len;
2107
2108 return pfrag->page;
2109}
2110
2111static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2112 struct page *page,
2113 unsigned int offset)
2114{
2115 return spd->nr_pages &&
2116 spd->pages[spd->nr_pages - 1] == page &&
2117 (spd->partial[spd->nr_pages - 1].offset +
2118 spd->partial[spd->nr_pages - 1].len == offset);
2119}
2120
2121/*
2122 * Fill page/offset/length into spd, if it can hold more pages.
2123 */
2124static bool spd_fill_page(struct splice_pipe_desc *spd,
2125 struct pipe_inode_info *pipe, struct page *page,
2126 unsigned int *len, unsigned int offset,
2127 bool linear,
2128 struct sock *sk)
2129{
2130 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2131 return true;
2132
2133 if (linear) {
2134 page = linear_to_page(page, len, &offset, sk);
2135 if (!page)
2136 return true;
2137 }
2138 if (spd_can_coalesce(spd, page, offset)) {
2139 spd->partial[spd->nr_pages - 1].len += *len;
2140 return false;
2141 }
2142 get_page(page);
2143 spd->pages[spd->nr_pages] = page;
2144 spd->partial[spd->nr_pages].len = *len;
2145 spd->partial[spd->nr_pages].offset = offset;
2146 spd->nr_pages++;
2147
2148 return false;
2149}
2150
2151static bool __splice_segment(struct page *page, unsigned int poff,
2152 unsigned int plen, unsigned int *off,
2153 unsigned int *len,
2154 struct splice_pipe_desc *spd, bool linear,
2155 struct sock *sk,
2156 struct pipe_inode_info *pipe)
2157{
2158 if (!*len)
2159 return true;
2160
2161 /* skip this segment if already processed */
2162 if (*off >= plen) {
2163 *off -= plen;
2164 return false;
2165 }
2166
2167 /* ignore any bits we already processed */
2168 poff += *off;
2169 plen -= *off;
2170 *off = 0;
2171
2172 do {
2173 unsigned int flen = min(*len, plen);
2174
2175 if (spd_fill_page(spd, pipe, page, &flen, poff,
2176 linear, sk))
2177 return true;
2178 poff += flen;
2179 plen -= flen;
2180 *len -= flen;
2181 } while (*len && plen);
2182
2183 return false;
2184}
2185
2186/*
2187 * Map linear and fragment data from the skb to spd. It reports true if the
2188 * pipe is full or if we already spliced the requested length.
2189 */
2190static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2191 unsigned int *offset, unsigned int *len,
2192 struct splice_pipe_desc *spd, struct sock *sk)
2193{
2194 int seg;
2195 struct sk_buff *iter;
2196
2197 /* map the linear part :
2198 * If skb->head_frag is set, this 'linear' part is backed by a
2199 * fragment, and if the head is not shared with any clones then
2200 * we can avoid a copy since we own the head portion of this page.
2201 */
2202 if (__splice_segment(virt_to_page(skb->data),
2203 (unsigned long) skb->data & (PAGE_SIZE - 1),
2204 skb_headlen(skb),
2205 offset, len, spd,
2206 skb_head_is_locked(skb),
2207 sk, pipe))
2208 return true;
2209
2210 /*
2211 * then map the fragments
2212 */
2213 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2214 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2215
2216 if (__splice_segment(skb_frag_page(f),
2217 f->page_offset, skb_frag_size(f),
2218 offset, len, spd, false, sk, pipe))
2219 return true;
2220 }
2221
2222 skb_walk_frags(skb, iter) {
2223 if (*offset >= iter->len) {
2224 *offset -= iter->len;
2225 continue;
2226 }
2227 /* __skb_splice_bits() only fails if the output has no room
2228 * left, so no point in going over the frag_list for the error
2229 * case.
2230 */
2231 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2232 return true;
2233 }
2234
2235 return false;
2236}
2237
2238/*
2239 * Map data from the skb to a pipe. Should handle both the linear part,
2240 * the fragments, and the frag list.
2241 */
2242int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2243 struct pipe_inode_info *pipe, unsigned int tlen,
2244 unsigned int flags)
2245{
2246 struct partial_page partial[MAX_SKB_FRAGS];
2247 struct page *pages[MAX_SKB_FRAGS];
2248 struct splice_pipe_desc spd = {
2249 .pages = pages,
2250 .partial = partial,
2251 .nr_pages_max = MAX_SKB_FRAGS,
2252 .ops = &nosteal_pipe_buf_ops,
2253 .spd_release = sock_spd_release,
2254 };
2255 int ret = 0;
2256
2257 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
2258
2259 if (spd.nr_pages)
2260 ret = splice_to_pipe(pipe, &spd);
2261
2262 return ret;
2263}
2264EXPORT_SYMBOL_GPL(skb_splice_bits);
2265
2266/* Send skb data on a socket. Socket must be locked. */
2267int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
2268 int len)
2269{
2270 unsigned int orig_len = len;
2271 struct sk_buff *head = skb;
2272 unsigned short fragidx;
2273 int slen, ret;
2274
2275do_frag_list:
2276
2277 /* Deal with head data */
2278 while (offset < skb_headlen(skb) && len) {
2279 struct kvec kv;
2280 struct msghdr msg;
2281
2282 slen = min_t(int, len, skb_headlen(skb) - offset);
2283 kv.iov_base = skb->data + offset;
2284 kv.iov_len = slen;
2285 memset(&msg, 0, sizeof(msg));
2286
2287 ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
2288 if (ret <= 0)
2289 goto error;
2290
2291 offset += ret;
2292 len -= ret;
2293 }
2294
2295 /* All the data was skb head? */
2296 if (!len)
2297 goto out;
2298
2299 /* Make offset relative to start of frags */
2300 offset -= skb_headlen(skb);
2301
2302 /* Find where we are in frag list */
2303 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2304 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2305
2306 if (offset < frag->size)
2307 break;
2308
2309 offset -= frag->size;
2310 }
2311
2312 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2313 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2314
2315 slen = min_t(size_t, len, frag->size - offset);
2316
2317 while (slen) {
2318 ret = kernel_sendpage_locked(sk, frag->page.p,
2319 frag->page_offset + offset,
2320 slen, MSG_DONTWAIT);
2321 if (ret <= 0)
2322 goto error;
2323
2324 len -= ret;
2325 offset += ret;
2326 slen -= ret;
2327 }
2328
2329 offset = 0;
2330 }
2331
2332 if (len) {
2333 /* Process any frag lists */
2334
2335 if (skb == head) {
2336 if (skb_has_frag_list(skb)) {
2337 skb = skb_shinfo(skb)->frag_list;
2338 goto do_frag_list;
2339 }
2340 } else if (skb->next) {
2341 skb = skb->next;
2342 goto do_frag_list;
2343 }
2344 }
2345
2346out:
2347 return orig_len - len;
2348
2349error:
2350 return orig_len == len ? ret : orig_len - len;
2351}
2352EXPORT_SYMBOL_GPL(skb_send_sock_locked);
2353
2354/* Send skb data on a socket. */
2355int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
2356{
2357 int ret = 0;
2358
2359 lock_sock(sk);
2360 ret = skb_send_sock_locked(sk, skb, offset, len);
2361 release_sock(sk);
2362
2363 return ret;
2364}
2365EXPORT_SYMBOL_GPL(skb_send_sock);
2366
2367/**
2368 * skb_store_bits - store bits from kernel buffer to skb
2369 * @skb: destination buffer
2370 * @offset: offset in destination
2371 * @from: source buffer
2372 * @len: number of bytes to copy
2373 *
2374 * Copy the specified number of bytes from the source buffer to the
2375 * destination skb. This function handles all the messy bits of
2376 * traversing fragment lists and such.
2377 */
2378
2379int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2380{
2381 int start = skb_headlen(skb);
2382 struct sk_buff *frag_iter;
2383 int i, copy;
2384
2385 if (offset > (int)skb->len - len)
2386 goto fault;
2387
2388 if ((copy = start - offset) > 0) {
2389 if (copy > len)
2390 copy = len;
2391 skb_copy_to_linear_data_offset(skb, offset, from, copy);
2392 if ((len -= copy) == 0)
2393 return 0;
2394 offset += copy;
2395 from += copy;
2396 }
2397
2398 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2399 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2400 int end;
2401
2402 WARN_ON(start > offset + len);
2403
2404 end = start + skb_frag_size(frag);
2405 if ((copy = end - offset) > 0) {
2406 u32 p_off, p_len, copied;
2407 struct page *p;
2408 u8 *vaddr;
2409
2410 if (copy > len)
2411 copy = len;
2412
2413 skb_frag_foreach_page(frag,
2414 frag->page_offset + offset - start,
2415 copy, p, p_off, p_len, copied) {
2416 vaddr = kmap_atomic(p);
2417 memcpy(vaddr + p_off, from + copied, p_len);
2418 kunmap_atomic(vaddr);
2419 }
2420
2421 if ((len -= copy) == 0)
2422 return 0;
2423 offset += copy;
2424 from += copy;
2425 }
2426 start = end;
2427 }
2428
2429 skb_walk_frags(skb, frag_iter) {
2430 int end;
2431
2432 WARN_ON(start > offset + len);
2433
2434 end = start + frag_iter->len;
2435 if ((copy = end - offset) > 0) {
2436 if (copy > len)
2437 copy = len;
2438 if (skb_store_bits(frag_iter, offset - start,
2439 from, copy))
2440 goto fault;
2441 if ((len -= copy) == 0)
2442 return 0;
2443 offset += copy;
2444 from += copy;
2445 }
2446 start = end;
2447 }
2448 if (!len)
2449 return 0;
2450
2451fault:
2452 return -EFAULT;
2453}
2454EXPORT_SYMBOL(skb_store_bits);
2455
2456/* Checksum skb data. */
2457__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2458 __wsum csum, const struct skb_checksum_ops *ops)
2459{
2460 int start = skb_headlen(skb);
2461 int i, copy = start - offset;
2462 struct sk_buff *frag_iter;
2463 int pos = 0;
2464
2465 /* Checksum header. */
2466 if (copy > 0) {
2467 if (copy > len)
2468 copy = len;
2469 csum = ops->update(skb->data + offset, copy, csum);
2470 if ((len -= copy) == 0)
2471 return csum;
2472 offset += copy;
2473 pos = copy;
2474 }
2475
2476 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2477 int end;
2478 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2479
2480 WARN_ON(start > offset + len);
2481
2482 end = start + skb_frag_size(frag);
2483 if ((copy = end - offset) > 0) {
2484 u32 p_off, p_len, copied;
2485 struct page *p;
2486 __wsum csum2;
2487 u8 *vaddr;
2488
2489 if (copy > len)
2490 copy = len;
2491
2492 skb_frag_foreach_page(frag,
2493 frag->page_offset + offset - start,
2494 copy, p, p_off, p_len, copied) {
2495 vaddr = kmap_atomic(p);
2496 csum2 = ops->update(vaddr + p_off, p_len, 0);
2497 kunmap_atomic(vaddr);
2498 csum = ops->combine(csum, csum2, pos, p_len);
2499 pos += p_len;
2500 }
2501
2502 if (!(len -= copy))
2503 return csum;
2504 offset += copy;
2505 }
2506 start = end;
2507 }
2508
2509 skb_walk_frags(skb, frag_iter) {
2510 int end;
2511
2512 WARN_ON(start > offset + len);
2513
2514 end = start + frag_iter->len;
2515 if ((copy = end - offset) > 0) {
2516 __wsum csum2;
2517 if (copy > len)
2518 copy = len;
2519 csum2 = __skb_checksum(frag_iter, offset - start,
2520 copy, 0, ops);
2521 csum = ops->combine(csum, csum2, pos, copy);
2522 if ((len -= copy) == 0)
2523 return csum;
2524 offset += copy;
2525 pos += copy;
2526 }
2527 start = end;
2528 }
2529 BUG_ON(len);
2530
2531 return csum;
2532}
2533EXPORT_SYMBOL(__skb_checksum);
2534
2535__wsum skb_checksum(const struct sk_buff *skb, int offset,
2536 int len, __wsum csum)
2537{
2538 const struct skb_checksum_ops ops = {
2539 .update = csum_partial_ext,
2540 .combine = csum_block_add_ext,
2541 };
2542
2543 return __skb_checksum(skb, offset, len, csum, &ops);
2544}
2545EXPORT_SYMBOL(skb_checksum);
2546
2547/* Both of above in one bottle. */
2548
2549__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2550 u8 *to, int len, __wsum csum)
2551{
2552 int start = skb_headlen(skb);
2553 int i, copy = start - offset;
2554 struct sk_buff *frag_iter;
2555 int pos = 0;
2556
2557 /* Copy header. */
2558 if (copy > 0) {
2559 if (copy > len)
2560 copy = len;
2561 csum = csum_partial_copy_nocheck(skb->data + offset, to,
2562 copy, csum);
2563 if ((len -= copy) == 0)
2564 return csum;
2565 offset += copy;
2566 to += copy;
2567 pos = copy;
2568 }
2569
2570 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2571 int end;
2572
2573 WARN_ON(start > offset + len);
2574
2575 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2576 if ((copy = end - offset) > 0) {
2577 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2578 u32 p_off, p_len, copied;
2579 struct page *p;
2580 __wsum csum2;
2581 u8 *vaddr;
2582
2583 if (copy > len)
2584 copy = len;
2585
2586 skb_frag_foreach_page(frag,
2587 frag->page_offset + offset - start,
2588 copy, p, p_off, p_len, copied) {
2589 vaddr = kmap_atomic(p);
2590 csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2591 to + copied,
2592 p_len, 0);
2593 kunmap_atomic(vaddr);
2594 csum = csum_block_add(csum, csum2, pos);
2595 pos += p_len;
2596 }
2597
2598 if (!(len -= copy))
2599 return csum;
2600 offset += copy;
2601 to += copy;
2602 }
2603 start = end;
2604 }
2605
2606 skb_walk_frags(skb, frag_iter) {
2607 __wsum csum2;
2608 int end;
2609
2610 WARN_ON(start > offset + len);
2611
2612 end = start + frag_iter->len;
2613 if ((copy = end - offset) > 0) {
2614 if (copy > len)
2615 copy = len;
2616 csum2 = skb_copy_and_csum_bits(frag_iter,
2617 offset - start,
2618 to, copy, 0);
2619 csum = csum_block_add(csum, csum2, pos);
2620 if ((len -= copy) == 0)
2621 return csum;
2622 offset += copy;
2623 to += copy;
2624 pos += copy;
2625 }
2626 start = end;
2627 }
2628 BUG_ON(len);
2629 return csum;
2630}
2631EXPORT_SYMBOL(skb_copy_and_csum_bits);
2632
2633static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
2634{
2635 net_warn_ratelimited(
2636 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2637 __func__);
2638 return 0;
2639}
2640
2641static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
2642 int offset, int len)
2643{
2644 net_warn_ratelimited(
2645 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2646 __func__);
2647 return 0;
2648}
2649
2650static const struct skb_checksum_ops default_crc32c_ops = {
2651 .update = warn_crc32c_csum_update,
2652 .combine = warn_crc32c_csum_combine,
2653};
2654
2655const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
2656 &default_crc32c_ops;
2657EXPORT_SYMBOL(crc32c_csum_stub);
2658
2659 /**
2660 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2661 * @from: source buffer
2662 *
2663 * Calculates the amount of linear headroom needed in the 'to' skb passed
2664 * into skb_zerocopy().
2665 */
2666unsigned int
2667skb_zerocopy_headlen(const struct sk_buff *from)
2668{
2669 unsigned int hlen = 0;
2670
2671 if (!from->head_frag ||
2672 skb_headlen(from) < L1_CACHE_BYTES ||
2673 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2674 hlen = skb_headlen(from);
2675
2676 if (skb_has_frag_list(from))
2677 hlen = from->len;
2678
2679 return hlen;
2680}
2681EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2682
2683/**
2684 * skb_zerocopy - Zero copy skb to skb
2685 * @to: destination buffer
2686 * @from: source buffer
2687 * @len: number of bytes to copy from source buffer
2688 * @hlen: size of linear headroom in destination buffer
2689 *
2690 * Copies up to `len` bytes from `from` to `to` by creating references
2691 * to the frags in the source buffer.
2692 *
2693 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2694 * headroom in the `to` buffer.
2695 *
2696 * Return value:
2697 * 0: everything is OK
2698 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
2699 * -EFAULT: skb_copy_bits() found some problem with skb geometry
2700 */
2701int
2702skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2703{
2704 int i, j = 0;
2705 int plen = 0; /* length of skb->head fragment */
2706 int ret;
2707 struct page *page;
2708 unsigned int offset;
2709
2710 BUG_ON(!from->head_frag && !hlen);
2711
2712 /* dont bother with small payloads */
2713 if (len <= skb_tailroom(to))
2714 return skb_copy_bits(from, 0, skb_put(to, len), len);
2715
2716 if (hlen) {
2717 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2718 if (unlikely(ret))
2719 return ret;
2720 len -= hlen;
2721 } else {
2722 plen = min_t(int, skb_headlen(from), len);
2723 if (plen) {
2724 page = virt_to_head_page(from->head);
2725 offset = from->data - (unsigned char *)page_address(page);
2726 __skb_fill_page_desc(to, 0, page, offset, plen);
2727 get_page(page);
2728 j = 1;
2729 len -= plen;
2730 }
2731 }
2732
2733 to->truesize += len + plen;
2734 to->len += len + plen;
2735 to->data_len += len + plen;
2736
2737 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2738 skb_tx_error(from);
2739 return -ENOMEM;
2740 }
2741 skb_zerocopy_clone(to, from, GFP_ATOMIC);
2742
2743 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2744 if (!len)
2745 break;
2746 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2747 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2748 len -= skb_shinfo(to)->frags[j].size;
2749 skb_frag_ref(to, j);
2750 j++;
2751 }
2752 skb_shinfo(to)->nr_frags = j;
2753
2754 return 0;
2755}
2756EXPORT_SYMBOL_GPL(skb_zerocopy);
2757
2758void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2759{
2760 __wsum csum;
2761 long csstart;
2762
2763 if (skb->ip_summed == CHECKSUM_PARTIAL)
2764 csstart = skb_checksum_start_offset(skb);
2765 else
2766 csstart = skb_headlen(skb);
2767
2768 BUG_ON(csstart > skb_headlen(skb));
2769
2770 skb_copy_from_linear_data(skb, to, csstart);
2771
2772 csum = 0;
2773 if (csstart != skb->len)
2774 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2775 skb->len - csstart, 0);
2776
2777 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2778 long csstuff = csstart + skb->csum_offset;
2779
2780 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
2781 }
2782}
2783EXPORT_SYMBOL(skb_copy_and_csum_dev);
2784
2785/**
2786 * skb_dequeue - remove from the head of the queue
2787 * @list: list to dequeue from
2788 *
2789 * Remove the head of the list. The list lock is taken so the function
2790 * may be used safely with other locking list functions. The head item is
2791 * returned or %NULL if the list is empty.
2792 */
2793
2794struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2795{
2796 unsigned long flags;
2797 struct sk_buff *result;
2798
2799 spin_lock_irqsave(&list->lock, flags);
2800 result = __skb_dequeue(list);
2801 spin_unlock_irqrestore(&list->lock, flags);
2802 return result;
2803}
2804EXPORT_SYMBOL(skb_dequeue);
2805
2806/**
2807 * skb_dequeue_tail - remove from the tail of the queue
2808 * @list: list to dequeue from
2809 *
2810 * Remove the tail of the list. The list lock is taken so the function
2811 * may be used safely with other locking list functions. The tail item is
2812 * returned or %NULL if the list is empty.
2813 */
2814struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2815{
2816 unsigned long flags;
2817 struct sk_buff *result;
2818
2819 spin_lock_irqsave(&list->lock, flags);
2820 result = __skb_dequeue_tail(list);
2821 spin_unlock_irqrestore(&list->lock, flags);
2822 return result;
2823}
2824EXPORT_SYMBOL(skb_dequeue_tail);
2825
2826/**
2827 * skb_queue_purge - empty a list
2828 * @list: list to empty
2829 *
2830 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2831 * the list and one reference dropped. This function takes the list
2832 * lock and is atomic with respect to other list locking functions.
2833 */
2834void skb_queue_purge(struct sk_buff_head *list)
2835{
2836 struct sk_buff *skb;
2837 while ((skb = skb_dequeue(list)) != NULL)
2838 kfree_skb(skb);
2839}
2840EXPORT_SYMBOL(skb_queue_purge);
2841
2842/**
2843 * skb_rbtree_purge - empty a skb rbtree
2844 * @root: root of the rbtree to empty
2845 *
2846 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
2847 * the list and one reference dropped. This function does not take
2848 * any lock. Synchronization should be handled by the caller (e.g., TCP
2849 * out-of-order queue is protected by the socket lock).
2850 */
2851void skb_rbtree_purge(struct rb_root *root)
2852{
2853 struct rb_node *p = rb_first(root);
2854
2855 while (p) {
2856 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
2857
2858 p = rb_next(p);
2859 rb_erase(&skb->rbnode, root);
2860 kfree_skb(skb);
2861 }
2862}
2863
2864/**
2865 * skb_queue_head - queue a buffer at the list head
2866 * @list: list to use
2867 * @newsk: buffer to queue
2868 *
2869 * Queue a buffer at the start of the list. This function takes the
2870 * list lock and can be used safely with other locking &sk_buff functions
2871 * safely.
2872 *
2873 * A buffer cannot be placed on two lists at the same time.
2874 */
2875void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2876{
2877 unsigned long flags;
2878
2879 spin_lock_irqsave(&list->lock, flags);
2880 __skb_queue_head(list, newsk);
2881 spin_unlock_irqrestore(&list->lock, flags);
2882}
2883EXPORT_SYMBOL(skb_queue_head);
2884
2885/**
2886 * skb_queue_tail - queue a buffer at the list tail
2887 * @list: list to use
2888 * @newsk: buffer to queue
2889 *
2890 * Queue a buffer at the tail of the list. This function takes the
2891 * list lock and can be used safely with other locking &sk_buff functions
2892 * safely.
2893 *
2894 * A buffer cannot be placed on two lists at the same time.
2895 */
2896void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2897{
2898 unsigned long flags;
2899
2900 spin_lock_irqsave(&list->lock, flags);
2901 __skb_queue_tail(list, newsk);
2902 spin_unlock_irqrestore(&list->lock, flags);
2903}
2904EXPORT_SYMBOL(skb_queue_tail);
2905
2906/**
2907 * skb_unlink - remove a buffer from a list
2908 * @skb: buffer to remove
2909 * @list: list to use
2910 *
2911 * Remove a packet from a list. The list locks are taken and this
2912 * function is atomic with respect to other list locked calls
2913 *
2914 * You must know what list the SKB is on.
2915 */
2916void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2917{
2918 unsigned long flags;
2919
2920 spin_lock_irqsave(&list->lock, flags);
2921 __skb_unlink(skb, list);
2922 spin_unlock_irqrestore(&list->lock, flags);
2923}
2924EXPORT_SYMBOL(skb_unlink);
2925
2926/**
2927 * skb_append - append a buffer
2928 * @old: buffer to insert after
2929 * @newsk: buffer to insert
2930 * @list: list to use
2931 *
2932 * Place a packet after a given packet in a list. The list locks are taken
2933 * and this function is atomic with respect to other list locked calls.
2934 * A buffer cannot be placed on two lists at the same time.
2935 */
2936void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2937{
2938 unsigned long flags;
2939
2940 spin_lock_irqsave(&list->lock, flags);
2941 __skb_queue_after(list, old, newsk);
2942 spin_unlock_irqrestore(&list->lock, flags);
2943}
2944EXPORT_SYMBOL(skb_append);
2945
2946/**
2947 * skb_insert - insert a buffer
2948 * @old: buffer to insert before
2949 * @newsk: buffer to insert
2950 * @list: list to use
2951 *
2952 * Place a packet before a given packet in a list. The list locks are
2953 * taken and this function is atomic with respect to other list locked
2954 * calls.
2955 *
2956 * A buffer cannot be placed on two lists at the same time.
2957 */
2958void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2959{
2960 unsigned long flags;
2961
2962 spin_lock_irqsave(&list->lock, flags);
2963 __skb_insert(newsk, old->prev, old, list);
2964 spin_unlock_irqrestore(&list->lock, flags);
2965}
2966EXPORT_SYMBOL(skb_insert);
2967
2968static inline void skb_split_inside_header(struct sk_buff *skb,
2969 struct sk_buff* skb1,
2970 const u32 len, const int pos)
2971{
2972 int i;
2973
2974 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2975 pos - len);
2976 /* And move data appendix as is. */
2977 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2978 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2979
2980 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2981 skb_shinfo(skb)->nr_frags = 0;
2982 skb1->data_len = skb->data_len;
2983 skb1->len += skb1->data_len;
2984 skb->data_len = 0;
2985 skb->len = len;
2986 skb_set_tail_pointer(skb, len);
2987}
2988
2989static inline void skb_split_no_header(struct sk_buff *skb,
2990 struct sk_buff* skb1,
2991 const u32 len, int pos)
2992{
2993 int i, k = 0;
2994 const int nfrags = skb_shinfo(skb)->nr_frags;
2995
2996 skb_shinfo(skb)->nr_frags = 0;
2997 skb1->len = skb1->data_len = skb->len - len;
2998 skb->len = len;
2999 skb->data_len = len - pos;
3000
3001 for (i = 0; i < nfrags; i++) {
3002 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3003
3004 if (pos + size > len) {
3005 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3006
3007 if (pos < len) {
3008 /* Split frag.
3009 * We have two variants in this case:
3010 * 1. Move all the frag to the second
3011 * part, if it is possible. F.e.
3012 * this approach is mandatory for TUX,
3013 * where splitting is expensive.
3014 * 2. Split is accurately. We make this.
3015 */
3016 skb_frag_ref(skb, i);
3017 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
3018 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3019 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3020 skb_shinfo(skb)->nr_frags++;
3021 }
3022 k++;
3023 } else
3024 skb_shinfo(skb)->nr_frags++;
3025 pos += size;
3026 }
3027 skb_shinfo(skb1)->nr_frags = k;
3028}
3029
3030/**
3031 * skb_split - Split fragmented skb to two parts at length len.
3032 * @skb: the buffer to split
3033 * @skb1: the buffer to receive the second part
3034 * @len: new length for skb
3035 */
3036void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3037{
3038 int pos = skb_headlen(skb);
3039
3040 skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
3041 SKBTX_SHARED_FRAG;
3042 skb_zerocopy_clone(skb1, skb, 0);
3043 if (len < pos) /* Split line is inside header. */
3044 skb_split_inside_header(skb, skb1, len, pos);
3045 else /* Second chunk has no header, nothing to copy. */
3046 skb_split_no_header(skb, skb1, len, pos);
3047}
3048EXPORT_SYMBOL(skb_split);
3049
3050/* Shifting from/to a cloned skb is a no-go.
3051 *
3052 * Caller cannot keep skb_shinfo related pointers past calling here!
3053 */
3054static int skb_prepare_for_shift(struct sk_buff *skb)
3055{
3056 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3057}
3058
3059/**
3060 * skb_shift - Shifts paged data partially from skb to another
3061 * @tgt: buffer into which tail data gets added
3062 * @skb: buffer from which the paged data comes from
3063 * @shiftlen: shift up to this many bytes
3064 *
3065 * Attempts to shift up to shiftlen worth of bytes, which may be less than
3066 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3067 * It's up to caller to free skb if everything was shifted.
3068 *
3069 * If @tgt runs out of frags, the whole operation is aborted.
3070 *
3071 * Skb cannot include anything else but paged data while tgt is allowed
3072 * to have non-paged data as well.
3073 *
3074 * TODO: full sized shift could be optimized but that would need
3075 * specialized skb free'er to handle frags without up-to-date nr_frags.
3076 */
3077int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3078{
3079 int from, to, merge, todo;
3080 struct skb_frag_struct *fragfrom, *fragto;
3081
3082 BUG_ON(shiftlen > skb->len);
3083
3084 if (skb_headlen(skb))
3085 return 0;
3086 if (skb_zcopy(tgt) || skb_zcopy(skb))
3087 return 0;
3088
3089 todo = shiftlen;
3090 from = 0;
3091 to = skb_shinfo(tgt)->nr_frags;
3092 fragfrom = &skb_shinfo(skb)->frags[from];
3093
3094 /* Actual merge is delayed until the point when we know we can
3095 * commit all, so that we don't have to undo partial changes
3096 */
3097 if (!to ||
3098 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3099 fragfrom->page_offset)) {
3100 merge = -1;
3101 } else {
3102 merge = to - 1;
3103
3104 todo -= skb_frag_size(fragfrom);
3105 if (todo < 0) {
3106 if (skb_prepare_for_shift(skb) ||
3107 skb_prepare_for_shift(tgt))
3108 return 0;
3109
3110 /* All previous frag pointers might be stale! */
3111 fragfrom = &skb_shinfo(skb)->frags[from];
3112 fragto = &skb_shinfo(tgt)->frags[merge];
3113
3114 skb_frag_size_add(fragto, shiftlen);
3115 skb_frag_size_sub(fragfrom, shiftlen);
3116 fragfrom->page_offset += shiftlen;
3117
3118 goto onlymerged;
3119 }
3120
3121 from++;
3122 }
3123
3124 /* Skip full, not-fitting skb to avoid expensive operations */
3125 if ((shiftlen == skb->len) &&
3126 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3127 return 0;
3128
3129 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3130 return 0;
3131
3132 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3133 if (to == MAX_SKB_FRAGS)
3134 return 0;
3135
3136 fragfrom = &skb_shinfo(skb)->frags[from];
3137 fragto = &skb_shinfo(tgt)->frags[to];
3138
3139 if (todo >= skb_frag_size(fragfrom)) {
3140 *fragto = *fragfrom;
3141 todo -= skb_frag_size(fragfrom);
3142 from++;
3143 to++;
3144
3145 } else {
3146 __skb_frag_ref(fragfrom);
3147 fragto->page = fragfrom->page;
3148 fragto->page_offset = fragfrom->page_offset;
3149 skb_frag_size_set(fragto, todo);
3150
3151 fragfrom->page_offset += todo;
3152 skb_frag_size_sub(fragfrom, todo);
3153 todo = 0;
3154
3155 to++;
3156 break;
3157 }
3158 }
3159
3160 /* Ready to "commit" this state change to tgt */
3161 skb_shinfo(tgt)->nr_frags = to;
3162
3163 if (merge >= 0) {
3164 fragfrom = &skb_shinfo(skb)->frags[0];
3165 fragto = &skb_shinfo(tgt)->frags[merge];
3166
3167 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
3168 __skb_frag_unref(fragfrom);
3169 }
3170
3171 /* Reposition in the original skb */
3172 to = 0;
3173 while (from < skb_shinfo(skb)->nr_frags)
3174 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3175 skb_shinfo(skb)->nr_frags = to;
3176
3177 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3178
3179onlymerged:
3180 /* Most likely the tgt won't ever need its checksum anymore, skb on
3181 * the other hand might need it if it needs to be resent
3182 */
3183 tgt->ip_summed = CHECKSUM_PARTIAL;
3184 skb->ip_summed = CHECKSUM_PARTIAL;
3185
3186 /* Yak, is it really working this way? Some helper please? */
3187 skb->len -= shiftlen;
3188 skb->data_len -= shiftlen;
3189 skb->truesize -= shiftlen;
3190 tgt->len += shiftlen;
3191 tgt->data_len += shiftlen;
3192 tgt->truesize += shiftlen;
3193
3194 return shiftlen;
3195}
3196
3197/**
3198 * skb_prepare_seq_read - Prepare a sequential read of skb data
3199 * @skb: the buffer to read
3200 * @from: lower offset of data to be read
3201 * @to: upper offset of data to be read
3202 * @st: state variable
3203 *
3204 * Initializes the specified state variable. Must be called before
3205 * invoking skb_seq_read() for the first time.
3206 */
3207void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3208 unsigned int to, struct skb_seq_state *st)
3209{
3210 st->lower_offset = from;
3211 st->upper_offset = to;
3212 st->root_skb = st->cur_skb = skb;
3213 st->frag_idx = st->stepped_offset = 0;
3214 st->frag_data = NULL;
3215}
3216EXPORT_SYMBOL(skb_prepare_seq_read);
3217
3218/**
3219 * skb_seq_read - Sequentially read skb data
3220 * @consumed: number of bytes consumed by the caller so far
3221 * @data: destination pointer for data to be returned
3222 * @st: state variable
3223 *
3224 * Reads a block of skb data at @consumed relative to the
3225 * lower offset specified to skb_prepare_seq_read(). Assigns
3226 * the head of the data block to @data and returns the length
3227 * of the block or 0 if the end of the skb data or the upper
3228 * offset has been reached.
3229 *
3230 * The caller is not required to consume all of the data
3231 * returned, i.e. @consumed is typically set to the number
3232 * of bytes already consumed and the next call to
3233 * skb_seq_read() will return the remaining part of the block.
3234 *
3235 * Note 1: The size of each block of data returned can be arbitrary,
3236 * this limitation is the cost for zerocopy sequential
3237 * reads of potentially non linear data.
3238 *
3239 * Note 2: Fragment lists within fragments are not implemented
3240 * at the moment, state->root_skb could be replaced with
3241 * a stack for this purpose.
3242 */
3243unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3244 struct skb_seq_state *st)
3245{
3246 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3247 skb_frag_t *frag;
3248
3249 if (unlikely(abs_offset >= st->upper_offset)) {
3250 if (st->frag_data) {
3251 kunmap_atomic(st->frag_data);
3252 st->frag_data = NULL;
3253 }
3254 return 0;
3255 }
3256
3257next_skb:
3258 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3259
3260 if (abs_offset < block_limit && !st->frag_data) {
3261 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3262 return block_limit - abs_offset;
3263 }
3264
3265 if (st->frag_idx == 0 && !st->frag_data)
3266 st->stepped_offset += skb_headlen(st->cur_skb);
3267
3268 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3269 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
3270 block_limit = skb_frag_size(frag) + st->stepped_offset;
3271
3272 if (abs_offset < block_limit) {
3273 if (!st->frag_data)
3274 st->frag_data = kmap_atomic(skb_frag_page(frag));
3275
3276 *data = (u8 *) st->frag_data + frag->page_offset +
3277 (abs_offset - st->stepped_offset);
3278
3279 return block_limit - abs_offset;
3280 }
3281
3282 if (st->frag_data) {
3283 kunmap_atomic(st->frag_data);
3284 st->frag_data = NULL;
3285 }
3286
3287 st->frag_idx++;
3288 st->stepped_offset += skb_frag_size(frag);
3289 }
3290
3291 if (st->frag_data) {
3292 kunmap_atomic(st->frag_data);
3293 st->frag_data = NULL;
3294 }
3295
3296 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3297 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
3298 st->frag_idx = 0;
3299 goto next_skb;
3300 } else if (st->cur_skb->next) {
3301 st->cur_skb = st->cur_skb->next;
3302 st->frag_idx = 0;
3303 goto next_skb;
3304 }
3305
3306 return 0;
3307}
3308EXPORT_SYMBOL(skb_seq_read);
3309
3310/**
3311 * skb_abort_seq_read - Abort a sequential read of skb data
3312 * @st: state variable
3313 *
3314 * Must be called if skb_seq_read() was not called until it
3315 * returned 0.
3316 */
3317void skb_abort_seq_read(struct skb_seq_state *st)
3318{
3319 if (st->frag_data)
3320 kunmap_atomic(st->frag_data);
3321}
3322EXPORT_SYMBOL(skb_abort_seq_read);
3323
3324#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
3325
3326static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
3327 struct ts_config *conf,
3328 struct ts_state *state)
3329{
3330 return skb_seq_read(offset, text, TS_SKB_CB(state));
3331}
3332
3333static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
3334{
3335 skb_abort_seq_read(TS_SKB_CB(state));
3336}
3337
3338/**
3339 * skb_find_text - Find a text pattern in skb data
3340 * @skb: the buffer to look in
3341 * @from: search offset
3342 * @to: search limit
3343 * @config: textsearch configuration
3344 *
3345 * Finds a pattern in the skb data according to the specified
3346 * textsearch configuration. Use textsearch_next() to retrieve
3347 * subsequent occurrences of the pattern. Returns the offset
3348 * to the first occurrence or UINT_MAX if no match was found.
3349 */
3350unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3351 unsigned int to, struct ts_config *config)
3352{
3353 struct ts_state state;
3354 unsigned int ret;
3355
3356 config->get_next_block = skb_ts_get_next_block;
3357 config->finish = skb_ts_finish;
3358
3359 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
3360
3361 ret = textsearch_find(config, &state);
3362 return (ret <= to - from ? ret : UINT_MAX);
3363}
3364EXPORT_SYMBOL(skb_find_text);
3365
3366/**
3367 * skb_append_datato_frags - append the user data to a skb
3368 * @sk: sock structure
3369 * @skb: skb structure to be appended with user data.
3370 * @getfrag: call back function to be used for getting the user data
3371 * @from: pointer to user message iov
3372 * @length: length of the iov message
3373 *
3374 * Description: This procedure append the user data in the fragment part
3375 * of the skb if any page alloc fails user this procedure returns -ENOMEM
3376 */
3377int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
3378 int (*getfrag)(void *from, char *to, int offset,
3379 int len, int odd, struct sk_buff *skb),
3380 void *from, int length)
3381{
3382 int frg_cnt = skb_shinfo(skb)->nr_frags;
3383 int copy;
3384 int offset = 0;
3385 int ret;
3386 struct page_frag *pfrag = ¤t->task_frag;
3387
3388 do {
3389 /* Return error if we don't have space for new frag */
3390 if (frg_cnt >= MAX_SKB_FRAGS)
3391 return -EMSGSIZE;
3392
3393 if (!sk_page_frag_refill(sk, pfrag))
3394 return -ENOMEM;
3395
3396 /* copy the user data to page */
3397 copy = min_t(int, length, pfrag->size - pfrag->offset);
3398
3399 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
3400 offset, copy, 0, skb);
3401 if (ret < 0)
3402 return -EFAULT;
3403
3404 /* copy was successful so update the size parameters */
3405 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
3406 copy);
3407 frg_cnt++;
3408 pfrag->offset += copy;
3409 get_page(pfrag->page);
3410
3411 skb->truesize += copy;
3412 refcount_add(copy, &sk->sk_wmem_alloc);
3413 skb->len += copy;
3414 skb->data_len += copy;
3415 offset += copy;
3416 length -= copy;
3417
3418 } while (length > 0);
3419
3420 return 0;
3421}
3422EXPORT_SYMBOL(skb_append_datato_frags);
3423
3424int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3425 int offset, size_t size)
3426{
3427 int i = skb_shinfo(skb)->nr_frags;
3428
3429 if (skb_can_coalesce(skb, i, page, offset)) {
3430 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3431 } else if (i < MAX_SKB_FRAGS) {
3432 get_page(page);
3433 skb_fill_page_desc(skb, i, page, offset, size);
3434 } else {
3435 return -EMSGSIZE;
3436 }
3437
3438 return 0;
3439}
3440EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3441
3442/**
3443 * skb_pull_rcsum - pull skb and update receive checksum
3444 * @skb: buffer to update
3445 * @len: length of data pulled
3446 *
3447 * This function performs an skb_pull on the packet and updates
3448 * the CHECKSUM_COMPLETE checksum. It should be used on
3449 * receive path processing instead of skb_pull unless you know
3450 * that the checksum difference is zero (e.g., a valid IP header)
3451 * or you are setting ip_summed to CHECKSUM_NONE.
3452 */
3453void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3454{
3455 unsigned char *data = skb->data;
3456
3457 BUG_ON(len > skb->len);
3458 __skb_pull(skb, len);
3459 skb_postpull_rcsum(skb, data, len);
3460 return skb->data;
3461}
3462EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3463
3464static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
3465{
3466 skb_frag_t head_frag;
3467 struct page *page;
3468
3469 page = virt_to_head_page(frag_skb->head);
3470 head_frag.page.p = page;
3471 head_frag.page_offset = frag_skb->data -
3472 (unsigned char *)page_address(page);
3473 head_frag.size = skb_headlen(frag_skb);
3474 return head_frag;
3475}
3476
3477/**
3478 * skb_segment - Perform protocol segmentation on skb.
3479 * @head_skb: buffer to segment
3480 * @features: features for the output path (see dev->features)
3481 *
3482 * This function performs segmentation on the given skb. It returns
3483 * a pointer to the first in a list of new skbs for the segments.
3484 * In case of error it returns ERR_PTR(err).
3485 */
3486struct sk_buff *skb_segment(struct sk_buff *head_skb,
3487 netdev_features_t features)
3488{
3489 struct sk_buff *segs = NULL;
3490 struct sk_buff *tail = NULL;
3491 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3492 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3493 unsigned int mss = skb_shinfo(head_skb)->gso_size;
3494 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
3495 struct sk_buff *frag_skb = head_skb;
3496 unsigned int offset = doffset;
3497 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3498 unsigned int partial_segs = 0;
3499 unsigned int headroom;
3500 unsigned int len = head_skb->len;
3501 __be16 proto;
3502 bool csum, sg;
3503 int nfrags = skb_shinfo(head_skb)->nr_frags;
3504 int err = -ENOMEM;
3505 int i = 0;
3506 int pos;
3507 int dummy;
3508
3509 __skb_push(head_skb, doffset);
3510 proto = skb_network_protocol(head_skb, &dummy);
3511 if (unlikely(!proto))
3512 return ERR_PTR(-EINVAL);
3513
3514 sg = !!(features & NETIF_F_SG);
3515 csum = !!can_checksum_protocol(features, proto);
3516
3517 if (sg && csum && (mss != GSO_BY_FRAGS)) {
3518 if (!(features & NETIF_F_GSO_PARTIAL)) {
3519 struct sk_buff *iter;
3520 unsigned int frag_len;
3521
3522 if (!list_skb ||
3523 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3524 goto normal;
3525
3526 /* If we get here then all the required
3527 * GSO features except frag_list are supported.
3528 * Try to split the SKB to multiple GSO SKBs
3529 * with no frag_list.
3530 * Currently we can do that only when the buffers don't
3531 * have a linear part and all the buffers except
3532 * the last are of the same length.
3533 */
3534 frag_len = list_skb->len;
3535 skb_walk_frags(head_skb, iter) {
3536 if (frag_len != iter->len && iter->next)
3537 goto normal;
3538 if (skb_headlen(iter) && !iter->head_frag)
3539 goto normal;
3540
3541 len -= iter->len;
3542 }
3543
3544 if (len != frag_len)
3545 goto normal;
3546 }
3547
3548 /* GSO partial only requires that we trim off any excess that
3549 * doesn't fit into an MSS sized block, so take care of that
3550 * now.
3551 */
3552 partial_segs = len / mss;
3553 if (partial_segs > 1)
3554 mss *= partial_segs;
3555 else
3556 partial_segs = 0;
3557 }
3558
3559normal:
3560 headroom = skb_headroom(head_skb);
3561 pos = skb_headlen(head_skb);
3562
3563 do {
3564 struct sk_buff *nskb;
3565 skb_frag_t *nskb_frag;
3566 int hsize;
3567 int size;
3568
3569 if (unlikely(mss == GSO_BY_FRAGS)) {
3570 len = list_skb->len;
3571 } else {
3572 len = head_skb->len - offset;
3573 if (len > mss)
3574 len = mss;
3575 }
3576
3577 hsize = skb_headlen(head_skb) - offset;
3578 if (hsize < 0)
3579 hsize = 0;
3580 if (hsize > len || !sg)
3581 hsize = len;
3582
3583 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3584 (skb_headlen(list_skb) == len || sg)) {
3585 BUG_ON(skb_headlen(list_skb) > len);
3586
3587 i = 0;
3588 nfrags = skb_shinfo(list_skb)->nr_frags;
3589 frag = skb_shinfo(list_skb)->frags;
3590 frag_skb = list_skb;
3591 pos += skb_headlen(list_skb);
3592
3593 while (pos < offset + len) {
3594 BUG_ON(i >= nfrags);
3595
3596 size = skb_frag_size(frag);
3597 if (pos + size > offset + len)
3598 break;
3599
3600 i++;
3601 pos += size;
3602 frag++;
3603 }
3604
3605 nskb = skb_clone(list_skb, GFP_ATOMIC);
3606 list_skb = list_skb->next;
3607
3608 if (unlikely(!nskb))
3609 goto err;
3610
3611 if (unlikely(pskb_trim(nskb, len))) {
3612 kfree_skb(nskb);
3613 goto err;
3614 }
3615
3616 hsize = skb_end_offset(nskb);
3617 if (skb_cow_head(nskb, doffset + headroom)) {
3618 kfree_skb(nskb);
3619 goto err;
3620 }
3621
3622 nskb->truesize += skb_end_offset(nskb) - hsize;
3623 skb_release_head_state(nskb);
3624 __skb_push(nskb, doffset);
3625 } else {
3626 nskb = __alloc_skb(hsize + doffset + headroom,
3627 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3628 NUMA_NO_NODE);
3629
3630 if (unlikely(!nskb))
3631 goto err;
3632
3633 skb_reserve(nskb, headroom);
3634 __skb_put(nskb, doffset);
3635 }
3636
3637 if (segs)
3638 tail->next = nskb;
3639 else
3640 segs = nskb;
3641 tail = nskb;
3642
3643 __copy_skb_header(nskb, head_skb);
3644
3645 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3646 skb_reset_mac_len(nskb);
3647
3648 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
3649 nskb->data - tnl_hlen,
3650 doffset + tnl_hlen);
3651
3652 if (nskb->len == len + doffset)
3653 goto perform_csum_check;
3654
3655 if (!sg) {
3656 if (!nskb->remcsum_offload)
3657 nskb->ip_summed = CHECKSUM_NONE;
3658 SKB_GSO_CB(nskb)->csum =
3659 skb_copy_and_csum_bits(head_skb, offset,
3660 skb_put(nskb, len),
3661 len, 0);
3662 SKB_GSO_CB(nskb)->csum_start =
3663 skb_headroom(nskb) + doffset;
3664 continue;
3665 }
3666
3667 nskb_frag = skb_shinfo(nskb)->frags;
3668
3669 skb_copy_from_linear_data_offset(head_skb, offset,
3670 skb_put(nskb, hsize), hsize);
3671
3672 skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3673 SKBTX_SHARED_FRAG;
3674
3675 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3676 skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
3677 goto err;
3678
3679 while (pos < offset + len) {
3680 if (i >= nfrags) {
3681 i = 0;
3682 nfrags = skb_shinfo(list_skb)->nr_frags;
3683 frag = skb_shinfo(list_skb)->frags;
3684 frag_skb = list_skb;
3685 if (!skb_headlen(list_skb)) {
3686 BUG_ON(!nfrags);
3687 } else {
3688 BUG_ON(!list_skb->head_frag);
3689
3690 /* to make room for head_frag. */
3691 i--;
3692 frag--;
3693 }
3694 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3695 skb_zerocopy_clone(nskb, frag_skb,
3696 GFP_ATOMIC))
3697 goto err;
3698
3699 list_skb = list_skb->next;
3700 }
3701
3702 if (unlikely(skb_shinfo(nskb)->nr_frags >=
3703 MAX_SKB_FRAGS)) {
3704 net_warn_ratelimited(
3705 "skb_segment: too many frags: %u %u\n",
3706 pos, mss);
3707 goto err;
3708 }
3709
3710 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
3711 __skb_frag_ref(nskb_frag);
3712 size = skb_frag_size(nskb_frag);
3713
3714 if (pos < offset) {
3715 nskb_frag->page_offset += offset - pos;
3716 skb_frag_size_sub(nskb_frag, offset - pos);
3717 }
3718
3719 skb_shinfo(nskb)->nr_frags++;
3720
3721 if (pos + size <= offset + len) {
3722 i++;
3723 frag++;
3724 pos += size;
3725 } else {
3726 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3727 goto skip_fraglist;
3728 }
3729
3730 nskb_frag++;
3731 }
3732
3733skip_fraglist:
3734 nskb->data_len = len - hsize;
3735 nskb->len += nskb->data_len;
3736 nskb->truesize += nskb->data_len;
3737
3738perform_csum_check:
3739 if (!csum) {
3740 if (skb_has_shared_frag(nskb)) {
3741 err = __skb_linearize(nskb);
3742 if (err)
3743 goto err;
3744 }
3745 if (!nskb->remcsum_offload)
3746 nskb->ip_summed = CHECKSUM_NONE;
3747 SKB_GSO_CB(nskb)->csum =
3748 skb_checksum(nskb, doffset,
3749 nskb->len - doffset, 0);
3750 SKB_GSO_CB(nskb)->csum_start =
3751 skb_headroom(nskb) + doffset;
3752 }
3753 } while ((offset += len) < head_skb->len);
3754
3755 /* Some callers want to get the end of the list.
3756 * Put it in segs->prev to avoid walking the list.
3757 * (see validate_xmit_skb_list() for example)
3758 */
3759 segs->prev = tail;
3760
3761 if (partial_segs) {
3762 struct sk_buff *iter;
3763 int type = skb_shinfo(head_skb)->gso_type;
3764 unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
3765
3766 /* Update type to add partial and then remove dodgy if set */
3767 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
3768 type &= ~SKB_GSO_DODGY;
3769
3770 /* Update GSO info and prepare to start updating headers on
3771 * our way back down the stack of protocols.
3772 */
3773 for (iter = segs; iter; iter = iter->next) {
3774 skb_shinfo(iter)->gso_size = gso_size;
3775 skb_shinfo(iter)->gso_segs = partial_segs;
3776 skb_shinfo(iter)->gso_type = type;
3777 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
3778 }
3779
3780 if (tail->len - doffset <= gso_size)
3781 skb_shinfo(tail)->gso_size = 0;
3782 else if (tail != segs)
3783 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
3784 }
3785
3786 /* Following permits correct backpressure, for protocols
3787 * using skb_set_owner_w().
3788 * Idea is to tranfert ownership from head_skb to last segment.
3789 */
3790 if (head_skb->destructor == sock_wfree) {
3791 swap(tail->truesize, head_skb->truesize);
3792 swap(tail->destructor, head_skb->destructor);
3793 swap(tail->sk, head_skb->sk);
3794 }
3795 return segs;
3796
3797err:
3798 kfree_skb_list(segs);
3799 return ERR_PTR(err);
3800}
3801EXPORT_SYMBOL_GPL(skb_segment);
3802
3803int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3804{
3805 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3806 unsigned int offset = skb_gro_offset(skb);
3807 unsigned int headlen = skb_headlen(skb);
3808 unsigned int len = skb_gro_len(skb);
3809 struct sk_buff *lp, *p = *head;
3810 unsigned int delta_truesize;
3811
3812 if (unlikely(p->len + len >= 65536))
3813 return -E2BIG;
3814
3815 lp = NAPI_GRO_CB(p)->last;
3816 pinfo = skb_shinfo(lp);
3817
3818 if (headlen <= offset) {
3819 skb_frag_t *frag;
3820 skb_frag_t *frag2;
3821 int i = skbinfo->nr_frags;
3822 int nr_frags = pinfo->nr_frags + i;
3823
3824 if (nr_frags > MAX_SKB_FRAGS)
3825 goto merge;
3826
3827 offset -= headlen;
3828 pinfo->nr_frags = nr_frags;
3829 skbinfo->nr_frags = 0;
3830
3831 frag = pinfo->frags + nr_frags;
3832 frag2 = skbinfo->frags + i;
3833 do {
3834 *--frag = *--frag2;
3835 } while (--i);
3836
3837 frag->page_offset += offset;
3838 skb_frag_size_sub(frag, offset);
3839
3840 /* all fragments truesize : remove (head size + sk_buff) */
3841 delta_truesize = skb->truesize -
3842 SKB_TRUESIZE(skb_end_offset(skb));
3843
3844 skb->truesize -= skb->data_len;
3845 skb->len -= skb->data_len;
3846 skb->data_len = 0;
3847
3848 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
3849 goto done;
3850 } else if (skb->head_frag) {
3851 int nr_frags = pinfo->nr_frags;
3852 skb_frag_t *frag = pinfo->frags + nr_frags;
3853 struct page *page = virt_to_head_page(skb->head);
3854 unsigned int first_size = headlen - offset;
3855 unsigned int first_offset;
3856
3857 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
3858 goto merge;
3859
3860 first_offset = skb->data -
3861 (unsigned char *)page_address(page) +
3862 offset;
3863
3864 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3865
3866 frag->page.p = page;
3867 frag->page_offset = first_offset;
3868 skb_frag_size_set(frag, first_size);
3869
3870 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3871 /* We dont need to clear skbinfo->nr_frags here */
3872
3873 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3874 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3875 goto done;
3876 }
3877
3878merge:
3879 delta_truesize = skb->truesize;
3880 if (offset > headlen) {
3881 unsigned int eat = offset - headlen;
3882
3883 skbinfo->frags[0].page_offset += eat;
3884 skb_frag_size_sub(&skbinfo->frags[0], eat);
3885 skb->data_len -= eat;
3886 skb->len -= eat;
3887 offset = headlen;
3888 }
3889
3890 __skb_pull(skb, offset);
3891
3892 if (NAPI_GRO_CB(p)->last == p)
3893 skb_shinfo(p)->frag_list = skb;
3894 else
3895 NAPI_GRO_CB(p)->last->next = skb;
3896 NAPI_GRO_CB(p)->last = skb;
3897 __skb_header_release(skb);
3898 lp = p;
3899
3900done:
3901 NAPI_GRO_CB(p)->count++;
3902 p->data_len += len;
3903 p->truesize += delta_truesize;
3904 p->len += len;
3905 if (lp != p) {
3906 lp->data_len += len;
3907 lp->truesize += delta_truesize;
3908 lp->len += len;
3909 }
3910 NAPI_GRO_CB(skb)->same_flow = 1;
3911 return 0;
3912}
3913EXPORT_SYMBOL_GPL(skb_gro_receive);
3914
3915void __init skb_init(void)
3916{
3917 skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
3918 sizeof(struct sk_buff),
3919 0,
3920 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3921 offsetof(struct sk_buff, cb),
3922 sizeof_field(struct sk_buff, cb),
3923 NULL);
3924 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3925 sizeof(struct sk_buff_fclones),
3926 0,
3927 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3928 NULL);
3929}
3930
3931static int
3932__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
3933 unsigned int recursion_level)
3934{
3935 int start = skb_headlen(skb);
3936 int i, copy = start - offset;
3937 struct sk_buff *frag_iter;
3938 int elt = 0;
3939
3940 if (unlikely(recursion_level >= 24))
3941 return -EMSGSIZE;
3942
3943 if (copy > 0) {
3944 if (copy > len)
3945 copy = len;
3946 sg_set_buf(sg, skb->data + offset, copy);
3947 elt++;
3948 if ((len -= copy) == 0)
3949 return elt;
3950 offset += copy;
3951 }
3952
3953 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3954 int end;
3955
3956 WARN_ON(start > offset + len);
3957
3958 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3959 if ((copy = end - offset) > 0) {
3960 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3961 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3962 return -EMSGSIZE;
3963
3964 if (copy > len)
3965 copy = len;
3966 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3967 frag->page_offset+offset-start);
3968 elt++;
3969 if (!(len -= copy))
3970 return elt;
3971 offset += copy;
3972 }
3973 start = end;
3974 }
3975
3976 skb_walk_frags(skb, frag_iter) {
3977 int end, ret;
3978
3979 WARN_ON(start > offset + len);
3980
3981 end = start + frag_iter->len;
3982 if ((copy = end - offset) > 0) {
3983 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3984 return -EMSGSIZE;
3985
3986 if (copy > len)
3987 copy = len;
3988 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3989 copy, recursion_level + 1);
3990 if (unlikely(ret < 0))
3991 return ret;
3992 elt += ret;
3993 if ((len -= copy) == 0)
3994 return elt;
3995 offset += copy;
3996 }
3997 start = end;
3998 }
3999 BUG_ON(len);
4000 return elt;
4001}
4002
4003/**
4004 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4005 * @skb: Socket buffer containing the buffers to be mapped
4006 * @sg: The scatter-gather list to map into
4007 * @offset: The offset into the buffer's contents to start mapping
4008 * @len: Length of buffer space to be mapped
4009 *
4010 * Fill the specified scatter-gather list with mappings/pointers into a
4011 * region of the buffer space attached to a socket buffer. Returns either
4012 * the number of scatterlist items used, or -EMSGSIZE if the contents
4013 * could not fit.
4014 */
4015int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4016{
4017 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4018
4019 if (nsg <= 0)
4020 return nsg;
4021
4022 sg_mark_end(&sg[nsg - 1]);
4023
4024 return nsg;
4025}
4026EXPORT_SYMBOL_GPL(skb_to_sgvec);
4027
4028/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4029 * sglist without mark the sg which contain last skb data as the end.
4030 * So the caller can mannipulate sg list as will when padding new data after
4031 * the first call without calling sg_unmark_end to expend sg list.
4032 *
4033 * Scenario to use skb_to_sgvec_nomark:
4034 * 1. sg_init_table
4035 * 2. skb_to_sgvec_nomark(payload1)
4036 * 3. skb_to_sgvec_nomark(payload2)
4037 *
4038 * This is equivalent to:
4039 * 1. sg_init_table
4040 * 2. skb_to_sgvec(payload1)
4041 * 3. sg_unmark_end
4042 * 4. skb_to_sgvec(payload2)
4043 *
4044 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4045 * is more preferable.
4046 */
4047int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4048 int offset, int len)
4049{
4050 return __skb_to_sgvec(skb, sg, offset, len, 0);
4051}
4052EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
4053
4054
4055
4056/**
4057 * skb_cow_data - Check that a socket buffer's data buffers are writable
4058 * @skb: The socket buffer to check.
4059 * @tailbits: Amount of trailing space to be added
4060 * @trailer: Returned pointer to the skb where the @tailbits space begins
4061 *
4062 * Make sure that the data buffers attached to a socket buffer are
4063 * writable. If they are not, private copies are made of the data buffers
4064 * and the socket buffer is set to use these instead.
4065 *
4066 * If @tailbits is given, make sure that there is space to write @tailbits
4067 * bytes of data beyond current end of socket buffer. @trailer will be
4068 * set to point to the skb in which this space begins.
4069 *
4070 * The number of scatterlist elements required to completely map the
4071 * COW'd and extended socket buffer will be returned.
4072 */
4073int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4074{
4075 int copyflag;
4076 int elt;
4077 struct sk_buff *skb1, **skb_p;
4078
4079 /* If skb is cloned or its head is paged, reallocate
4080 * head pulling out all the pages (pages are considered not writable
4081 * at the moment even if they are anonymous).
4082 */
4083 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4084 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
4085 return -ENOMEM;
4086
4087 /* Easy case. Most of packets will go this way. */
4088 if (!skb_has_frag_list(skb)) {
4089 /* A little of trouble, not enough of space for trailer.
4090 * This should not happen, when stack is tuned to generate
4091 * good frames. OK, on miss we reallocate and reserve even more
4092 * space, 128 bytes is fair. */
4093
4094 if (skb_tailroom(skb) < tailbits &&
4095 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4096 return -ENOMEM;
4097
4098 /* Voila! */
4099 *trailer = skb;
4100 return 1;
4101 }
4102
4103 /* Misery. We are in troubles, going to mincer fragments... */
4104
4105 elt = 1;
4106 skb_p = &skb_shinfo(skb)->frag_list;
4107 copyflag = 0;
4108
4109 while ((skb1 = *skb_p) != NULL) {
4110 int ntail = 0;
4111
4112 /* The fragment is partially pulled by someone,
4113 * this can happen on input. Copy it and everything
4114 * after it. */
4115
4116 if (skb_shared(skb1))
4117 copyflag = 1;
4118
4119 /* If the skb is the last, worry about trailer. */
4120
4121 if (skb1->next == NULL && tailbits) {
4122 if (skb_shinfo(skb1)->nr_frags ||
4123 skb_has_frag_list(skb1) ||
4124 skb_tailroom(skb1) < tailbits)
4125 ntail = tailbits + 128;
4126 }
4127
4128 if (copyflag ||
4129 skb_cloned(skb1) ||
4130 ntail ||
4131 skb_shinfo(skb1)->nr_frags ||
4132 skb_has_frag_list(skb1)) {
4133 struct sk_buff *skb2;
4134
4135 /* Fuck, we are miserable poor guys... */
4136 if (ntail == 0)
4137 skb2 = skb_copy(skb1, GFP_ATOMIC);
4138 else
4139 skb2 = skb_copy_expand(skb1,
4140 skb_headroom(skb1),
4141 ntail,
4142 GFP_ATOMIC);
4143 if (unlikely(skb2 == NULL))
4144 return -ENOMEM;
4145
4146 if (skb1->sk)
4147 skb_set_owner_w(skb2, skb1->sk);
4148
4149 /* Looking around. Are we still alive?
4150 * OK, link new skb, drop old one */
4151
4152 skb2->next = skb1->next;
4153 *skb_p = skb2;
4154 kfree_skb(skb1);
4155 skb1 = skb2;
4156 }
4157 elt++;
4158 *trailer = skb1;
4159 skb_p = &skb1->next;
4160 }
4161
4162 return elt;
4163}
4164EXPORT_SYMBOL_GPL(skb_cow_data);
4165
4166static void sock_rmem_free(struct sk_buff *skb)
4167{
4168 struct sock *sk = skb->sk;
4169
4170 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4171}
4172
4173static void skb_set_err_queue(struct sk_buff *skb)
4174{
4175 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
4176 * So, it is safe to (mis)use it to mark skbs on the error queue.
4177 */
4178 skb->pkt_type = PACKET_OUTGOING;
4179 BUILD_BUG_ON(PACKET_OUTGOING == 0);
4180}
4181
4182/*
4183 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4184 */
4185int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4186{
4187 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
4188 (unsigned int)sk->sk_rcvbuf)
4189 return -ENOMEM;
4190
4191 skb_orphan(skb);
4192 skb->sk = sk;
4193 skb->destructor = sock_rmem_free;
4194 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
4195 skb_set_err_queue(skb);
4196
4197 /* before exiting rcu section, make sure dst is refcounted */
4198 skb_dst_force(skb);
4199
4200 skb_queue_tail(&sk->sk_error_queue, skb);
4201 if (!sock_flag(sk, SOCK_DEAD))
4202 sk->sk_error_report(sk);
4203 return 0;
4204}
4205EXPORT_SYMBOL(sock_queue_err_skb);
4206
4207static bool is_icmp_err_skb(const struct sk_buff *skb)
4208{
4209 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
4210 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
4211}
4212
4213struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4214{
4215 struct sk_buff_head *q = &sk->sk_error_queue;
4216 struct sk_buff *skb, *skb_next = NULL;
4217 bool icmp_next = false;
4218 unsigned long flags;
4219
4220 spin_lock_irqsave(&q->lock, flags);
4221 skb = __skb_dequeue(q);
4222 if (skb && (skb_next = skb_peek(q))) {
4223 icmp_next = is_icmp_err_skb(skb_next);
4224 if (icmp_next)
4225 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
4226 }
4227 spin_unlock_irqrestore(&q->lock, flags);
4228
4229 if (is_icmp_err_skb(skb) && !icmp_next)
4230 sk->sk_err = 0;
4231
4232 if (skb_next)
4233 sk->sk_error_report(sk);
4234
4235 return skb;
4236}
4237EXPORT_SYMBOL(sock_dequeue_err_skb);
4238
4239/**
4240 * skb_clone_sk - create clone of skb, and take reference to socket
4241 * @skb: the skb to clone
4242 *
4243 * This function creates a clone of a buffer that holds a reference on
4244 * sk_refcnt. Buffers created via this function are meant to be
4245 * returned using sock_queue_err_skb, or free via kfree_skb.
4246 *
4247 * When passing buffers allocated with this function to sock_queue_err_skb
4248 * it is necessary to wrap the call with sock_hold/sock_put in order to
4249 * prevent the socket from being released prior to being enqueued on
4250 * the sk_error_queue.
4251 */
4252struct sk_buff *skb_clone_sk(struct sk_buff *skb)
4253{
4254 struct sock *sk = skb->sk;
4255 struct sk_buff *clone;
4256
4257 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
4258 return NULL;
4259
4260 clone = skb_clone(skb, GFP_ATOMIC);
4261 if (!clone) {
4262 sock_put(sk);
4263 return NULL;
4264 }
4265
4266 clone->sk = sk;
4267 clone->destructor = sock_efree;
4268
4269 return clone;
4270}
4271EXPORT_SYMBOL(skb_clone_sk);
4272
4273static void __skb_complete_tx_timestamp(struct sk_buff *skb,
4274 struct sock *sk,
4275 int tstype,
4276 bool opt_stats)
4277{
4278 struct sock_exterr_skb *serr;
4279 int err;
4280
4281 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
4282
4283 serr = SKB_EXT_ERR(skb);
4284 memset(serr, 0, sizeof(*serr));
4285 serr->ee.ee_errno = ENOMSG;
4286 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4287 serr->ee.ee_info = tstype;
4288 serr->opt_stats = opt_stats;
4289 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
4290 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
4291 serr->ee.ee_data = skb_shinfo(skb)->tskey;
4292 if (sk->sk_protocol == IPPROTO_TCP &&
4293 sk->sk_type == SOCK_STREAM)
4294 serr->ee.ee_data -= sk->sk_tskey;
4295 }
4296
4297 err = sock_queue_err_skb(sk, skb);
4298
4299 if (err)
4300 kfree_skb(skb);
4301}
4302
4303static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4304{
4305 bool ret;
4306
4307 if (likely(sysctl_tstamp_allow_data || tsonly))
4308 return true;
4309
4310 read_lock_bh(&sk->sk_callback_lock);
4311 ret = sk->sk_socket && sk->sk_socket->file &&
4312 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4313 read_unlock_bh(&sk->sk_callback_lock);
4314 return ret;
4315}
4316
4317void skb_complete_tx_timestamp(struct sk_buff *skb,
4318 struct skb_shared_hwtstamps *hwtstamps)
4319{
4320 struct sock *sk = skb->sk;
4321
4322 if (!skb_may_tx_timestamp(sk, false))
4323 goto err;
4324
4325 /* Take a reference to prevent skb_orphan() from freeing the socket,
4326 * but only if the socket refcount is not zero.
4327 */
4328 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4329 *skb_hwtstamps(skb) = *hwtstamps;
4330 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
4331 sock_put(sk);
4332 return;
4333 }
4334
4335err:
4336 kfree_skb(skb);
4337}
4338EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4339
4340void __skb_tstamp_tx(struct sk_buff *orig_skb,
4341 struct skb_shared_hwtstamps *hwtstamps,
4342 struct sock *sk, int tstype)
4343{
4344 struct sk_buff *skb;
4345 bool tsonly, opt_stats = false;
4346
4347 if (!sk)
4348 return;
4349
4350 if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4351 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4352 return;
4353
4354 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
4355 if (!skb_may_tx_timestamp(sk, tsonly))
4356 return;
4357
4358 if (tsonly) {
4359#ifdef CONFIG_INET
4360 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
4361 sk->sk_protocol == IPPROTO_TCP &&
4362 sk->sk_type == SOCK_STREAM) {
4363 skb = tcp_get_timestamping_opt_stats(sk);
4364 opt_stats = true;
4365 } else
4366#endif
4367 skb = alloc_skb(0, GFP_ATOMIC);
4368 } else {
4369 skb = skb_clone(orig_skb, GFP_ATOMIC);
4370 }
4371 if (!skb)
4372 return;
4373
4374 if (tsonly) {
4375 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4376 SKBTX_ANY_TSTAMP;
4377 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
4378 }
4379
4380 if (hwtstamps)
4381 *skb_hwtstamps(skb) = *hwtstamps;
4382 else
4383 skb->tstamp = ktime_get_real();
4384
4385 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
4386}
4387EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4388
4389void skb_tstamp_tx(struct sk_buff *orig_skb,
4390 struct skb_shared_hwtstamps *hwtstamps)
4391{
4392 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
4393 SCM_TSTAMP_SND);
4394}
4395EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4396
4397void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
4398{
4399 struct sock *sk = skb->sk;
4400 struct sock_exterr_skb *serr;
4401 int err = 1;
4402
4403 skb->wifi_acked_valid = 1;
4404 skb->wifi_acked = acked;
4405
4406 serr = SKB_EXT_ERR(skb);
4407 memset(serr, 0, sizeof(*serr));
4408 serr->ee.ee_errno = ENOMSG;
4409 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
4410
4411 /* Take a reference to prevent skb_orphan() from freeing the socket,
4412 * but only if the socket refcount is not zero.
4413 */
4414 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4415 err = sock_queue_err_skb(sk, skb);
4416 sock_put(sk);
4417 }
4418 if (err)
4419 kfree_skb(skb);
4420}
4421EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
4422
4423/**
4424 * skb_partial_csum_set - set up and verify partial csum values for packet
4425 * @skb: the skb to set
4426 * @start: the number of bytes after skb->data to start checksumming.
4427 * @off: the offset from start to place the checksum.
4428 *
4429 * For untrusted partially-checksummed packets, we need to make sure the values
4430 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4431 *
4432 * This function checks and sets those values and skb->ip_summed: if this
4433 * returns false you should drop the packet.
4434 */
4435bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4436{
4437 if (unlikely(start > skb_headlen(skb)) ||
4438 unlikely((int)start + off > skb_headlen(skb) - 2)) {
4439 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
4440 start, off, skb_headlen(skb));
4441 return false;
4442 }
4443 skb->ip_summed = CHECKSUM_PARTIAL;
4444 skb->csum_start = skb_headroom(skb) + start;
4445 skb->csum_offset = off;
4446 skb_set_transport_header(skb, start);
4447 return true;
4448}
4449EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4450
4451static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4452 unsigned int max)
4453{
4454 if (skb_headlen(skb) >= len)
4455 return 0;
4456
4457 /* If we need to pullup then pullup to the max, so we
4458 * won't need to do it again.
4459 */
4460 if (max > skb->len)
4461 max = skb->len;
4462
4463 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4464 return -ENOMEM;
4465
4466 if (skb_headlen(skb) < len)
4467 return -EPROTO;
4468
4469 return 0;
4470}
4471
4472#define MAX_TCP_HDR_LEN (15 * 4)
4473
4474static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4475 typeof(IPPROTO_IP) proto,
4476 unsigned int off)
4477{
4478 switch (proto) {
4479 int err;
4480
4481 case IPPROTO_TCP:
4482 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4483 off + MAX_TCP_HDR_LEN);
4484 if (!err && !skb_partial_csum_set(skb, off,
4485 offsetof(struct tcphdr,
4486 check)))
4487 err = -EPROTO;
4488 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4489
4490 case IPPROTO_UDP:
4491 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4492 off + sizeof(struct udphdr));
4493 if (!err && !skb_partial_csum_set(skb, off,
4494 offsetof(struct udphdr,
4495 check)))
4496 err = -EPROTO;
4497 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4498 }
4499
4500 return ERR_PTR(-EPROTO);
4501}
4502
4503/* This value should be large enough to cover a tagged ethernet header plus
4504 * maximally sized IP and TCP or UDP headers.
4505 */
4506#define MAX_IP_HDR_LEN 128
4507
4508static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
4509{
4510 unsigned int off;
4511 bool fragment;
4512 __sum16 *csum;
4513 int err;
4514
4515 fragment = false;
4516
4517 err = skb_maybe_pull_tail(skb,
4518 sizeof(struct iphdr),
4519 MAX_IP_HDR_LEN);
4520 if (err < 0)
4521 goto out;
4522
4523 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
4524 fragment = true;
4525
4526 off = ip_hdrlen(skb);
4527
4528 err = -EPROTO;
4529
4530 if (fragment)
4531 goto out;
4532
4533 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
4534 if (IS_ERR(csum))
4535 return PTR_ERR(csum);
4536
4537 if (recalculate)
4538 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
4539 ip_hdr(skb)->daddr,
4540 skb->len - off,
4541 ip_hdr(skb)->protocol, 0);
4542 err = 0;
4543
4544out:
4545 return err;
4546}
4547
4548/* This value should be large enough to cover a tagged ethernet header plus
4549 * an IPv6 header, all options, and a maximal TCP or UDP header.
4550 */
4551#define MAX_IPV6_HDR_LEN 256
4552
4553#define OPT_HDR(type, skb, off) \
4554 (type *)(skb_network_header(skb) + (off))
4555
4556static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
4557{
4558 int err;
4559 u8 nexthdr;
4560 unsigned int off;
4561 unsigned int len;
4562 bool fragment;
4563 bool done;
4564 __sum16 *csum;
4565
4566 fragment = false;
4567 done = false;
4568
4569 off = sizeof(struct ipv6hdr);
4570
4571 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4572 if (err < 0)
4573 goto out;
4574
4575 nexthdr = ipv6_hdr(skb)->nexthdr;
4576
4577 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4578 while (off <= len && !done) {
4579 switch (nexthdr) {
4580 case IPPROTO_DSTOPTS:
4581 case IPPROTO_HOPOPTS:
4582 case IPPROTO_ROUTING: {
4583 struct ipv6_opt_hdr *hp;
4584
4585 err = skb_maybe_pull_tail(skb,
4586 off +
4587 sizeof(struct ipv6_opt_hdr),
4588 MAX_IPV6_HDR_LEN);
4589 if (err < 0)
4590 goto out;
4591
4592 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
4593 nexthdr = hp->nexthdr;
4594 off += ipv6_optlen(hp);
4595 break;
4596 }
4597 case IPPROTO_AH: {
4598 struct ip_auth_hdr *hp;
4599
4600 err = skb_maybe_pull_tail(skb,
4601 off +
4602 sizeof(struct ip_auth_hdr),
4603 MAX_IPV6_HDR_LEN);
4604 if (err < 0)
4605 goto out;
4606
4607 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
4608 nexthdr = hp->nexthdr;
4609 off += ipv6_authlen(hp);
4610 break;
4611 }
4612 case IPPROTO_FRAGMENT: {
4613 struct frag_hdr *hp;
4614
4615 err = skb_maybe_pull_tail(skb,
4616 off +
4617 sizeof(struct frag_hdr),
4618 MAX_IPV6_HDR_LEN);
4619 if (err < 0)
4620 goto out;
4621
4622 hp = OPT_HDR(struct frag_hdr, skb, off);
4623
4624 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
4625 fragment = true;
4626
4627 nexthdr = hp->nexthdr;
4628 off += sizeof(struct frag_hdr);
4629 break;
4630 }
4631 default:
4632 done = true;
4633 break;
4634 }
4635 }
4636
4637 err = -EPROTO;
4638
4639 if (!done || fragment)
4640 goto out;
4641
4642 csum = skb_checksum_setup_ip(skb, nexthdr, off);
4643 if (IS_ERR(csum))
4644 return PTR_ERR(csum);
4645
4646 if (recalculate)
4647 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4648 &ipv6_hdr(skb)->daddr,
4649 skb->len - off, nexthdr, 0);
4650 err = 0;
4651
4652out:
4653 return err;
4654}
4655
4656/**
4657 * skb_checksum_setup - set up partial checksum offset
4658 * @skb: the skb to set up
4659 * @recalculate: if true the pseudo-header checksum will be recalculated
4660 */
4661int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
4662{
4663 int err;
4664
4665 switch (skb->protocol) {
4666 case htons(ETH_P_IP):
4667 err = skb_checksum_setup_ipv4(skb, recalculate);
4668 break;
4669
4670 case htons(ETH_P_IPV6):
4671 err = skb_checksum_setup_ipv6(skb, recalculate);
4672 break;
4673
4674 default:
4675 err = -EPROTO;
4676 break;
4677 }
4678
4679 return err;
4680}
4681EXPORT_SYMBOL(skb_checksum_setup);
4682
4683/**
4684 * skb_checksum_maybe_trim - maybe trims the given skb
4685 * @skb: the skb to check
4686 * @transport_len: the data length beyond the network header
4687 *
4688 * Checks whether the given skb has data beyond the given transport length.
4689 * If so, returns a cloned skb trimmed to this transport length.
4690 * Otherwise returns the provided skb. Returns NULL in error cases
4691 * (e.g. transport_len exceeds skb length or out-of-memory).
4692 *
4693 * Caller needs to set the skb transport header and free any returned skb if it
4694 * differs from the provided skb.
4695 */
4696static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4697 unsigned int transport_len)
4698{
4699 struct sk_buff *skb_chk;
4700 unsigned int len = skb_transport_offset(skb) + transport_len;
4701 int ret;
4702
4703 if (skb->len < len)
4704 return NULL;
4705 else if (skb->len == len)
4706 return skb;
4707
4708 skb_chk = skb_clone(skb, GFP_ATOMIC);
4709 if (!skb_chk)
4710 return NULL;
4711
4712 ret = pskb_trim_rcsum(skb_chk, len);
4713 if (ret) {
4714 kfree_skb(skb_chk);
4715 return NULL;
4716 }
4717
4718 return skb_chk;
4719}
4720
4721/**
4722 * skb_checksum_trimmed - validate checksum of an skb
4723 * @skb: the skb to check
4724 * @transport_len: the data length beyond the network header
4725 * @skb_chkf: checksum function to use
4726 *
4727 * Applies the given checksum function skb_chkf to the provided skb.
4728 * Returns a checked and maybe trimmed skb. Returns NULL on error.
4729 *
4730 * If the skb has data beyond the given transport length, then a
4731 * trimmed & cloned skb is checked and returned.
4732 *
4733 * Caller needs to set the skb transport header and free any returned skb if it
4734 * differs from the provided skb.
4735 */
4736struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4737 unsigned int transport_len,
4738 __sum16(*skb_chkf)(struct sk_buff *skb))
4739{
4740 struct sk_buff *skb_chk;
4741 unsigned int offset = skb_transport_offset(skb);
4742 __sum16 ret;
4743
4744 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
4745 if (!skb_chk)
4746 goto err;
4747
4748 if (!pskb_may_pull(skb_chk, offset))
4749 goto err;
4750
4751 skb_pull_rcsum(skb_chk, offset);
4752 ret = skb_chkf(skb_chk);
4753 skb_push_rcsum(skb_chk, offset);
4754
4755 if (ret)
4756 goto err;
4757
4758 return skb_chk;
4759
4760err:
4761 if (skb_chk && skb_chk != skb)
4762 kfree_skb(skb_chk);
4763
4764 return NULL;
4765
4766}
4767EXPORT_SYMBOL(skb_checksum_trimmed);
4768
4769void __skb_warn_lro_forwarding(const struct sk_buff *skb)
4770{
4771 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
4772 skb->dev->name);
4773}
4774EXPORT_SYMBOL(__skb_warn_lro_forwarding);
4775
4776void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4777{
4778 if (head_stolen) {
4779 skb_release_head_state(skb);
4780 kmem_cache_free(skbuff_head_cache, skb);
4781 } else {
4782 __kfree_skb(skb);
4783 }
4784}
4785EXPORT_SYMBOL(kfree_skb_partial);
4786
4787/**
4788 * skb_try_coalesce - try to merge skb to prior one
4789 * @to: prior buffer
4790 * @from: buffer to add
4791 * @fragstolen: pointer to boolean
4792 * @delta_truesize: how much more was allocated than was requested
4793 */
4794bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
4795 bool *fragstolen, int *delta_truesize)
4796{
4797 struct skb_shared_info *to_shinfo, *from_shinfo;
4798 int i, delta, len = from->len;
4799
4800 *fragstolen = false;
4801
4802 if (skb_cloned(to))
4803 return false;
4804
4805 if (len <= skb_tailroom(to)) {
4806 if (len)
4807 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4808 *delta_truesize = 0;
4809 return true;
4810 }
4811
4812 to_shinfo = skb_shinfo(to);
4813 from_shinfo = skb_shinfo(from);
4814 if (to_shinfo->frag_list || from_shinfo->frag_list)
4815 return false;
4816 if (skb_zcopy(to) || skb_zcopy(from))
4817 return false;
4818
4819 if (skb_headlen(from) != 0) {
4820 struct page *page;
4821 unsigned int offset;
4822
4823 if (to_shinfo->nr_frags +
4824 from_shinfo->nr_frags >= MAX_SKB_FRAGS)
4825 return false;
4826
4827 if (skb_head_is_locked(from))
4828 return false;
4829
4830 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4831
4832 page = virt_to_head_page(from->head);
4833 offset = from->data - (unsigned char *)page_address(page);
4834
4835 skb_fill_page_desc(to, to_shinfo->nr_frags,
4836 page, offset, skb_headlen(from));
4837 *fragstolen = true;
4838 } else {
4839 if (to_shinfo->nr_frags +
4840 from_shinfo->nr_frags > MAX_SKB_FRAGS)
4841 return false;
4842
4843 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
4844 }
4845
4846 WARN_ON_ONCE(delta < len);
4847
4848 memcpy(to_shinfo->frags + to_shinfo->nr_frags,
4849 from_shinfo->frags,
4850 from_shinfo->nr_frags * sizeof(skb_frag_t));
4851 to_shinfo->nr_frags += from_shinfo->nr_frags;
4852
4853 if (!skb_cloned(from))
4854 from_shinfo->nr_frags = 0;
4855
4856 /* if the skb is not cloned this does nothing
4857 * since we set nr_frags to 0.
4858 */
4859 for (i = 0; i < from_shinfo->nr_frags; i++)
4860 __skb_frag_ref(&from_shinfo->frags[i]);
4861
4862 to->truesize += delta;
4863 to->len += len;
4864 to->data_len += len;
4865
4866 *delta_truesize = delta;
4867 return true;
4868}
4869EXPORT_SYMBOL(skb_try_coalesce);
4870
4871/**
4872 * skb_scrub_packet - scrub an skb
4873 *
4874 * @skb: buffer to clean
4875 * @xnet: packet is crossing netns
4876 *
4877 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
4878 * into/from a tunnel. Some information have to be cleared during these
4879 * operations.
4880 * skb_scrub_packet can also be used to clean a skb before injecting it in
4881 * another namespace (@xnet == true). We have to clear all information in the
4882 * skb that could impact namespace isolation.
4883 */
4884void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4885{
4886 skb->tstamp = 0;
4887 skb->pkt_type = PACKET_HOST;
4888 skb->skb_iif = 0;
4889 skb->ignore_df = 0;
4890 skb_dst_drop(skb);
4891 secpath_reset(skb);
4892 nf_reset(skb);
4893 nf_reset_trace(skb);
4894
4895 if (!xnet)
4896 return;
4897
4898 ipvs_reset(skb);
4899 skb_orphan(skb);
4900 skb->mark = 0;
4901}
4902EXPORT_SYMBOL_GPL(skb_scrub_packet);
4903
4904/**
4905 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
4906 *
4907 * @skb: GSO skb
4908 *
4909 * skb_gso_transport_seglen is used to determine the real size of the
4910 * individual segments, including Layer4 headers (TCP/UDP).
4911 *
4912 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4913 */
4914static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4915{
4916 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4917 unsigned int thlen = 0;
4918
4919 if (skb->encapsulation) {
4920 thlen = skb_inner_transport_header(skb) -
4921 skb_transport_header(skb);
4922
4923 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4924 thlen += inner_tcp_hdrlen(skb);
4925 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4926 thlen = tcp_hdrlen(skb);
4927 } else if (unlikely(skb_is_gso_sctp(skb))) {
4928 thlen = sizeof(struct sctphdr);
4929 }
4930 /* UFO sets gso_size to the size of the fragmentation
4931 * payload, i.e. the size of the L4 (UDP) header is already
4932 * accounted for.
4933 */
4934 return thlen + shinfo->gso_size;
4935}
4936
4937/**
4938 * skb_gso_network_seglen - Return length of individual segments of a gso packet
4939 *
4940 * @skb: GSO skb
4941 *
4942 * skb_gso_network_seglen is used to determine the real size of the
4943 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
4944 *
4945 * The MAC/L2 header is not accounted for.
4946 */
4947static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
4948{
4949 unsigned int hdr_len = skb_transport_header(skb) -
4950 skb_network_header(skb);
4951
4952 return hdr_len + skb_gso_transport_seglen(skb);
4953}
4954
4955/**
4956 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
4957 *
4958 * @skb: GSO skb
4959 *
4960 * skb_gso_mac_seglen is used to determine the real size of the
4961 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
4962 * headers (TCP/UDP).
4963 */
4964static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4965{
4966 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
4967
4968 return hdr_len + skb_gso_transport_seglen(skb);
4969}
4970
4971/**
4972 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
4973 *
4974 * There are a couple of instances where we have a GSO skb, and we
4975 * want to determine what size it would be after it is segmented.
4976 *
4977 * We might want to check:
4978 * - L3+L4+payload size (e.g. IP forwarding)
4979 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
4980 *
4981 * This is a helper to do that correctly considering GSO_BY_FRAGS.
4982 *
4983 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
4984 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
4985 *
4986 * @max_len: The maximum permissible length.
4987 *
4988 * Returns true if the segmented length <= max length.
4989 */
4990static inline bool skb_gso_size_check(const struct sk_buff *skb,
4991 unsigned int seg_len,
4992 unsigned int max_len) {
4993 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4994 const struct sk_buff *iter;
4995
4996 if (shinfo->gso_size != GSO_BY_FRAGS)
4997 return seg_len <= max_len;
4998
4999 /* Undo this so we can re-use header sizes */
5000 seg_len -= GSO_BY_FRAGS;
5001
5002 skb_walk_frags(skb, iter) {
5003 if (seg_len + skb_headlen(iter) > max_len)
5004 return false;
5005 }
5006
5007 return true;
5008}
5009
5010/**
5011 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5012 *
5013 * @skb: GSO skb
5014 * @mtu: MTU to validate against
5015 *
5016 * skb_gso_validate_network_len validates if a given skb will fit a
5017 * wanted MTU once split. It considers L3 headers, L4 headers, and the
5018 * payload.
5019 */
5020bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5021{
5022 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5023}
5024EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5025
5026/**
5027 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
5028 *
5029 * @skb: GSO skb
5030 * @len: length to validate against
5031 *
5032 * skb_gso_validate_mac_len validates if a given skb will fit a wanted
5033 * length once split, including L2, L3 and L4 headers and the payload.
5034 */
5035bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
5036{
5037 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
5038}
5039EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5040
5041static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5042{
5043 int mac_len;
5044
5045 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5046 kfree_skb(skb);
5047 return NULL;
5048 }
5049
5050 mac_len = skb->data - skb_mac_header(skb);
5051 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5052 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5053 mac_len - VLAN_HLEN - ETH_TLEN);
5054 }
5055 skb->mac_header += VLAN_HLEN;
5056 return skb;
5057}
5058
5059struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5060{
5061 struct vlan_hdr *vhdr;
5062 u16 vlan_tci;
5063
5064 if (unlikely(skb_vlan_tag_present(skb))) {
5065 /* vlan_tci is already set-up so leave this for another time */
5066 return skb;
5067 }
5068
5069 skb = skb_share_check(skb, GFP_ATOMIC);
5070 if (unlikely(!skb))
5071 goto err_free;
5072
5073 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
5074 goto err_free;
5075
5076 vhdr = (struct vlan_hdr *)skb->data;
5077 vlan_tci = ntohs(vhdr->h_vlan_TCI);
5078 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5079
5080 skb_pull_rcsum(skb, VLAN_HLEN);
5081 vlan_set_encap_proto(skb, vhdr);
5082
5083 skb = skb_reorder_vlan_header(skb);
5084 if (unlikely(!skb))
5085 goto err_free;
5086
5087 skb_reset_network_header(skb);
5088 skb_reset_transport_header(skb);
5089 skb_reset_mac_len(skb);
5090
5091 return skb;
5092
5093err_free:
5094 kfree_skb(skb);
5095 return NULL;
5096}
5097EXPORT_SYMBOL(skb_vlan_untag);
5098
5099int skb_ensure_writable(struct sk_buff *skb, int write_len)
5100{
5101 if (!pskb_may_pull(skb, write_len))
5102 return -ENOMEM;
5103
5104 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5105 return 0;
5106
5107 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5108}
5109EXPORT_SYMBOL(skb_ensure_writable);
5110
5111/* remove VLAN header from packet and update csum accordingly.
5112 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5113 */
5114int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5115{
5116 struct vlan_hdr *vhdr;
5117 int offset = skb->data - skb_mac_header(skb);
5118 int err;
5119
5120 if (WARN_ONCE(offset,
5121 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5122 offset)) {
5123 return -EINVAL;
5124 }
5125
5126 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5127 if (unlikely(err))
5128 return err;
5129
5130 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5131
5132 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
5133 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
5134
5135 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
5136 __skb_pull(skb, VLAN_HLEN);
5137
5138 vlan_set_encap_proto(skb, vhdr);
5139 skb->mac_header += VLAN_HLEN;
5140
5141 if (skb_network_offset(skb) < ETH_HLEN)
5142 skb_set_network_header(skb, ETH_HLEN);
5143
5144 skb_reset_mac_len(skb);
5145
5146 return err;
5147}
5148EXPORT_SYMBOL(__skb_vlan_pop);
5149
5150/* Pop a vlan tag either from hwaccel or from payload.
5151 * Expects skb->data at mac header.
5152 */
5153int skb_vlan_pop(struct sk_buff *skb)
5154{
5155 u16 vlan_tci;
5156 __be16 vlan_proto;
5157 int err;
5158
5159 if (likely(skb_vlan_tag_present(skb))) {
5160 skb->vlan_tci = 0;
5161 } else {
5162 if (unlikely(!eth_type_vlan(skb->protocol)))
5163 return 0;
5164
5165 err = __skb_vlan_pop(skb, &vlan_tci);
5166 if (err)
5167 return err;
5168 }
5169 /* move next vlan tag to hw accel tag */
5170 if (likely(!eth_type_vlan(skb->protocol)))
5171 return 0;
5172
5173 vlan_proto = skb->protocol;
5174 err = __skb_vlan_pop(skb, &vlan_tci);
5175 if (unlikely(err))
5176 return err;
5177
5178 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5179 return 0;
5180}
5181EXPORT_SYMBOL(skb_vlan_pop);
5182
5183/* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5184 * Expects skb->data at mac header.
5185 */
5186int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
5187{
5188 if (skb_vlan_tag_present(skb)) {
5189 int offset = skb->data - skb_mac_header(skb);
5190 int err;
5191
5192 if (WARN_ONCE(offset,
5193 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5194 offset)) {
5195 return -EINVAL;
5196 }
5197
5198 err = __vlan_insert_tag(skb, skb->vlan_proto,
5199 skb_vlan_tag_get(skb));
5200 if (err)
5201 return err;
5202
5203 skb->protocol = skb->vlan_proto;
5204 skb->mac_len += VLAN_HLEN;
5205
5206 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5207 }
5208 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5209 return 0;
5210}
5211EXPORT_SYMBOL(skb_vlan_push);
5212
5213/**
5214 * alloc_skb_with_frags - allocate skb with page frags
5215 *
5216 * @header_len: size of linear part
5217 * @data_len: needed length in frags
5218 * @max_page_order: max page order desired.
5219 * @errcode: pointer to error code if any
5220 * @gfp_mask: allocation mask
5221 *
5222 * This can be used to allocate a paged skb, given a maximal order for frags.
5223 */
5224struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5225 unsigned long data_len,
5226 int max_page_order,
5227 int *errcode,
5228 gfp_t gfp_mask)
5229{
5230 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
5231 unsigned long chunk;
5232 struct sk_buff *skb;
5233 struct page *page;
5234 gfp_t gfp_head;
5235 int i;
5236
5237 *errcode = -EMSGSIZE;
5238 /* Note this test could be relaxed, if we succeed to allocate
5239 * high order pages...
5240 */
5241 if (npages > MAX_SKB_FRAGS)
5242 return NULL;
5243
5244 gfp_head = gfp_mask;
5245 if (gfp_head & __GFP_DIRECT_RECLAIM)
5246 gfp_head |= __GFP_RETRY_MAYFAIL;
5247
5248 *errcode = -ENOBUFS;
5249 skb = alloc_skb(header_len, gfp_head);
5250 if (!skb)
5251 return NULL;
5252
5253 skb->truesize += npages << PAGE_SHIFT;
5254
5255 for (i = 0; npages > 0; i++) {
5256 int order = max_page_order;
5257
5258 while (order) {
5259 if (npages >= 1 << order) {
5260 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
5261 __GFP_COMP |
5262 __GFP_NOWARN |
5263 __GFP_NORETRY,
5264 order);
5265 if (page)
5266 goto fill_page;
5267 /* Do not retry other high order allocations */
5268 order = 1;
5269 max_page_order = 0;
5270 }
5271 order--;
5272 }
5273 page = alloc_page(gfp_mask);
5274 if (!page)
5275 goto failure;
5276fill_page:
5277 chunk = min_t(unsigned long, data_len,
5278 PAGE_SIZE << order);
5279 skb_fill_page_desc(skb, i, page, 0, chunk);
5280 data_len -= chunk;
5281 npages -= 1 << order;
5282 }
5283 return skb;
5284
5285failure:
5286 kfree_skb(skb);
5287 return NULL;
5288}
5289EXPORT_SYMBOL(alloc_skb_with_frags);
5290
5291/* carve out the first off bytes from skb when off < headlen */
5292static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
5293 const int headlen, gfp_t gfp_mask)
5294{
5295 int i;
5296 int size = skb_end_offset(skb);
5297 int new_hlen = headlen - off;
5298 u8 *data;
5299
5300 size = SKB_DATA_ALIGN(size);
5301
5302 if (skb_pfmemalloc(skb))
5303 gfp_mask |= __GFP_MEMALLOC;
5304 data = kmalloc_reserve(size +
5305 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5306 gfp_mask, NUMA_NO_NODE, NULL);
5307 if (!data)
5308 return -ENOMEM;
5309
5310 size = SKB_WITH_OVERHEAD(ksize(data));
5311
5312 /* Copy real data, and all frags */
5313 skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
5314 skb->len -= off;
5315
5316 memcpy((struct skb_shared_info *)(data + size),
5317 skb_shinfo(skb),
5318 offsetof(struct skb_shared_info,
5319 frags[skb_shinfo(skb)->nr_frags]));
5320 if (skb_cloned(skb)) {
5321 /* drop the old head gracefully */
5322 if (skb_orphan_frags(skb, gfp_mask)) {
5323 kfree(data);
5324 return -ENOMEM;
5325 }
5326 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
5327 skb_frag_ref(skb, i);
5328 if (skb_has_frag_list(skb))
5329 skb_clone_fraglist(skb);
5330 skb_release_data(skb);
5331 } else {
5332 /* we can reuse existing recount- all we did was
5333 * relocate values
5334 */
5335 skb_free_head(skb);
5336 }
5337
5338 skb->head = data;
5339 skb->data = data;
5340 skb->head_frag = 0;
5341#ifdef NET_SKBUFF_DATA_USES_OFFSET
5342 skb->end = size;
5343#else
5344 skb->end = skb->head + size;
5345#endif
5346 skb_set_tail_pointer(skb, skb_headlen(skb));
5347 skb_headers_offset_update(skb, 0);
5348 skb->cloned = 0;
5349 skb->hdr_len = 0;
5350 skb->nohdr = 0;
5351 atomic_set(&skb_shinfo(skb)->dataref, 1);
5352
5353 return 0;
5354}
5355
5356static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
5357
5358/* carve out the first eat bytes from skb's frag_list. May recurse into
5359 * pskb_carve()
5360 */
5361static int pskb_carve_frag_list(struct sk_buff *skb,
5362 struct skb_shared_info *shinfo, int eat,
5363 gfp_t gfp_mask)
5364{
5365 struct sk_buff *list = shinfo->frag_list;
5366 struct sk_buff *clone = NULL;
5367 struct sk_buff *insp = NULL;
5368
5369 do {
5370 if (!list) {
5371 pr_err("Not enough bytes to eat. Want %d\n", eat);
5372 return -EFAULT;
5373 }
5374 if (list->len <= eat) {
5375 /* Eaten as whole. */
5376 eat -= list->len;
5377 list = list->next;
5378 insp = list;
5379 } else {
5380 /* Eaten partially. */
5381 if (skb_shared(list)) {
5382 clone = skb_clone(list, gfp_mask);
5383 if (!clone)
5384 return -ENOMEM;
5385 insp = list->next;
5386 list = clone;
5387 } else {
5388 /* This may be pulled without problems. */
5389 insp = list;
5390 }
5391 if (pskb_carve(list, eat, gfp_mask) < 0) {
5392 kfree_skb(clone);
5393 return -ENOMEM;
5394 }
5395 break;
5396 }
5397 } while (eat);
5398
5399 /* Free pulled out fragments. */
5400 while ((list = shinfo->frag_list) != insp) {
5401 shinfo->frag_list = list->next;
5402 kfree_skb(list);
5403 }
5404 /* And insert new clone at head. */
5405 if (clone) {
5406 clone->next = list;
5407 shinfo->frag_list = clone;
5408 }
5409 return 0;
5410}
5411
5412/* carve off first len bytes from skb. Split line (off) is in the
5413 * non-linear part of skb
5414 */
5415static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
5416 int pos, gfp_t gfp_mask)
5417{
5418 int i, k = 0;
5419 int size = skb_end_offset(skb);
5420 u8 *data;
5421 const int nfrags = skb_shinfo(skb)->nr_frags;
5422 struct skb_shared_info *shinfo;
5423
5424 size = SKB_DATA_ALIGN(size);
5425
5426 if (skb_pfmemalloc(skb))
5427 gfp_mask |= __GFP_MEMALLOC;
5428 data = kmalloc_reserve(size +
5429 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5430 gfp_mask, NUMA_NO_NODE, NULL);
5431 if (!data)
5432 return -ENOMEM;
5433
5434 size = SKB_WITH_OVERHEAD(ksize(data));
5435
5436 memcpy((struct skb_shared_info *)(data + size),
5437 skb_shinfo(skb), offsetof(struct skb_shared_info,
5438 frags[skb_shinfo(skb)->nr_frags]));
5439 if (skb_orphan_frags(skb, gfp_mask)) {
5440 kfree(data);
5441 return -ENOMEM;
5442 }
5443 shinfo = (struct skb_shared_info *)(data + size);
5444 for (i = 0; i < nfrags; i++) {
5445 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
5446
5447 if (pos + fsize > off) {
5448 shinfo->frags[k] = skb_shinfo(skb)->frags[i];
5449
5450 if (pos < off) {
5451 /* Split frag.
5452 * We have two variants in this case:
5453 * 1. Move all the frag to the second
5454 * part, if it is possible. F.e.
5455 * this approach is mandatory for TUX,
5456 * where splitting is expensive.
5457 * 2. Split is accurately. We make this.
5458 */
5459 shinfo->frags[0].page_offset += off - pos;
5460 skb_frag_size_sub(&shinfo->frags[0], off - pos);
5461 }
5462 skb_frag_ref(skb, i);
5463 k++;
5464 }
5465 pos += fsize;
5466 }
5467 shinfo->nr_frags = k;
5468 if (skb_has_frag_list(skb))
5469 skb_clone_fraglist(skb);
5470
5471 if (k == 0) {
5472 /* split line is in frag list */
5473 pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
5474 }
5475 skb_release_data(skb);
5476
5477 skb->head = data;
5478 skb->head_frag = 0;
5479 skb->data = data;
5480#ifdef NET_SKBUFF_DATA_USES_OFFSET
5481 skb->end = size;
5482#else
5483 skb->end = skb->head + size;
5484#endif
5485 skb_reset_tail_pointer(skb);
5486 skb_headers_offset_update(skb, 0);
5487 skb->cloned = 0;
5488 skb->hdr_len = 0;
5489 skb->nohdr = 0;
5490 skb->len -= off;
5491 skb->data_len = skb->len;
5492 atomic_set(&skb_shinfo(skb)->dataref, 1);
5493 return 0;
5494}
5495
5496/* remove len bytes from the beginning of the skb */
5497static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
5498{
5499 int headlen = skb_headlen(skb);
5500
5501 if (len < headlen)
5502 return pskb_carve_inside_header(skb, len, headlen, gfp);
5503 else
5504 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
5505}
5506
5507/* Extract to_copy bytes starting at off from skb, and return this in
5508 * a new skb
5509 */
5510struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
5511 int to_copy, gfp_t gfp)
5512{
5513 struct sk_buff *clone = skb_clone(skb, gfp);
5514
5515 if (!clone)
5516 return NULL;
5517
5518 if (pskb_carve(clone, off, gfp) < 0 ||
5519 pskb_trim(clone, to_copy)) {
5520 kfree_skb(clone);
5521 return NULL;
5522 }
5523 return clone;
5524}
5525EXPORT_SYMBOL(pskb_extract);
5526
5527/**
5528 * skb_condense - try to get rid of fragments/frag_list if possible
5529 * @skb: buffer
5530 *
5531 * Can be used to save memory before skb is added to a busy queue.
5532 * If packet has bytes in frags and enough tail room in skb->head,
5533 * pull all of them, so that we can free the frags right now and adjust
5534 * truesize.
5535 * Notes:
5536 * We do not reallocate skb->head thus can not fail.
5537 * Caller must re-evaluate skb->truesize if needed.
5538 */
5539void skb_condense(struct sk_buff *skb)
5540{
5541 if (skb->data_len) {
5542 if (skb->data_len > skb->end - skb->tail ||
5543 skb_cloned(skb))
5544 return;
5545
5546 /* Nice, we can free page frag(s) right now */
5547 __pskb_pull_tail(skb, skb->data_len);
5548 }
5549 /* At this point, skb->truesize might be over estimated,
5550 * because skb had a fragment, and fragments do not tell
5551 * their truesize.
5552 * When we pulled its content into skb->head, fragment
5553 * was freed, but __pskb_pull_tail() could not possibly
5554 * adjust skb->truesize, not knowing the frag truesize.
5555 */
5556 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5557}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Routines having to do with the 'struct sk_buff' memory handlers.
4 *
5 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
6 * Florian La Roche <rzsfl@rz.uni-sb.de>
7 *
8 * Fixes:
9 * Alan Cox : Fixed the worst of the load
10 * balancer bugs.
11 * Dave Platt : Interrupt stacking fix.
12 * Richard Kooijman : Timestamp fixes.
13 * Alan Cox : Changed buffer format.
14 * Alan Cox : destructor hook for AF_UNIX etc.
15 * Linus Torvalds : Better skb_clone.
16 * Alan Cox : Added skb_copy.
17 * Alan Cox : Added all the changed routines Linus
18 * only put in the headers
19 * Ray VanTassle : Fixed --skb->lock in free
20 * Alan Cox : skb_copy copy arp field
21 * Andi Kleen : slabified it.
22 * Robert Olsson : Removed skb_head_pool
23 *
24 * NOTE:
25 * The __skb_ routines should be called with interrupts
26 * disabled, or you better be *real* sure that the operation is atomic
27 * with respect to whatever list is being frobbed (e.g. via lock_sock()
28 * or via disabling bottom half handlers, etc).
29 */
30
31/*
32 * The functions in this file will not compile correctly with gcc 2.4.x
33 */
34
35#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36
37#include <linux/module.h>
38#include <linux/types.h>
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/interrupt.h>
42#include <linux/in.h>
43#include <linux/inet.h>
44#include <linux/slab.h>
45#include <linux/tcp.h>
46#include <linux/udp.h>
47#include <linux/sctp.h>
48#include <linux/netdevice.h>
49#ifdef CONFIG_NET_CLS_ACT
50#include <net/pkt_sched.h>
51#endif
52#include <linux/string.h>
53#include <linux/skbuff.h>
54#include <linux/splice.h>
55#include <linux/cache.h>
56#include <linux/rtnetlink.h>
57#include <linux/init.h>
58#include <linux/scatterlist.h>
59#include <linux/errqueue.h>
60#include <linux/prefetch.h>
61#include <linux/if_vlan.h>
62#include <linux/mpls.h>
63#include <linux/kcov.h>
64
65#include <net/protocol.h>
66#include <net/dst.h>
67#include <net/sock.h>
68#include <net/checksum.h>
69#include <net/ip6_checksum.h>
70#include <net/xfrm.h>
71#include <net/mpls.h>
72#include <net/mptcp.h>
73#include <net/page_pool.h>
74
75#include <linux/uaccess.h>
76#include <trace/events/skb.h>
77#include <linux/highmem.h>
78#include <linux/capability.h>
79#include <linux/user_namespace.h>
80#include <linux/indirect_call_wrapper.h>
81
82#include "datagram.h"
83
84struct kmem_cache *skbuff_head_cache __ro_after_init;
85static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
86#ifdef CONFIG_SKB_EXTENSIONS
87static struct kmem_cache *skbuff_ext_cache __ro_after_init;
88#endif
89int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
90EXPORT_SYMBOL(sysctl_max_skb_frags);
91
92/**
93 * skb_panic - private function for out-of-line support
94 * @skb: buffer
95 * @sz: size
96 * @addr: address
97 * @msg: skb_over_panic or skb_under_panic
98 *
99 * Out-of-line support for skb_put() and skb_push().
100 * Called via the wrapper skb_over_panic() or skb_under_panic().
101 * Keep out of line to prevent kernel bloat.
102 * __builtin_return_address is not used because it is not always reliable.
103 */
104static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
105 const char msg[])
106{
107 pr_emerg("%s: text:%px len:%d put:%d head:%px data:%px tail:%#lx end:%#lx dev:%s\n",
108 msg, addr, skb->len, sz, skb->head, skb->data,
109 (unsigned long)skb->tail, (unsigned long)skb->end,
110 skb->dev ? skb->dev->name : "<NULL>");
111 BUG();
112}
113
114static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
115{
116 skb_panic(skb, sz, addr, __func__);
117}
118
119static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
120{
121 skb_panic(skb, sz, addr, __func__);
122}
123
124#define NAPI_SKB_CACHE_SIZE 64
125#define NAPI_SKB_CACHE_BULK 16
126#define NAPI_SKB_CACHE_HALF (NAPI_SKB_CACHE_SIZE / 2)
127
128struct napi_alloc_cache {
129 struct page_frag_cache page;
130 unsigned int skb_count;
131 void *skb_cache[NAPI_SKB_CACHE_SIZE];
132};
133
134static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
135static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
136
137static void *__alloc_frag_align(unsigned int fragsz, gfp_t gfp_mask,
138 unsigned int align_mask)
139{
140 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
141
142 return page_frag_alloc_align(&nc->page, fragsz, gfp_mask, align_mask);
143}
144
145void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
146{
147 fragsz = SKB_DATA_ALIGN(fragsz);
148
149 return __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
150}
151EXPORT_SYMBOL(__napi_alloc_frag_align);
152
153void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask)
154{
155 struct page_frag_cache *nc;
156 void *data;
157
158 fragsz = SKB_DATA_ALIGN(fragsz);
159 if (in_irq() || irqs_disabled()) {
160 nc = this_cpu_ptr(&netdev_alloc_cache);
161 data = page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask);
162 } else {
163 local_bh_disable();
164 data = __alloc_frag_align(fragsz, GFP_ATOMIC, align_mask);
165 local_bh_enable();
166 }
167 return data;
168}
169EXPORT_SYMBOL(__netdev_alloc_frag_align);
170
171static struct sk_buff *napi_skb_cache_get(void)
172{
173 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
174 struct sk_buff *skb;
175
176 if (unlikely(!nc->skb_count))
177 nc->skb_count = kmem_cache_alloc_bulk(skbuff_head_cache,
178 GFP_ATOMIC,
179 NAPI_SKB_CACHE_BULK,
180 nc->skb_cache);
181 if (unlikely(!nc->skb_count))
182 return NULL;
183
184 skb = nc->skb_cache[--nc->skb_count];
185 kasan_unpoison_object_data(skbuff_head_cache, skb);
186
187 return skb;
188}
189
190/* Caller must provide SKB that is memset cleared */
191static void __build_skb_around(struct sk_buff *skb, void *data,
192 unsigned int frag_size)
193{
194 struct skb_shared_info *shinfo;
195 unsigned int size = frag_size ? : ksize(data);
196
197 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
198
199 /* Assumes caller memset cleared SKB */
200 skb->truesize = SKB_TRUESIZE(size);
201 refcount_set(&skb->users, 1);
202 skb->head = data;
203 skb->data = data;
204 skb_reset_tail_pointer(skb);
205 skb->end = skb->tail + size;
206 skb->mac_header = (typeof(skb->mac_header))~0U;
207 skb->transport_header = (typeof(skb->transport_header))~0U;
208
209 /* make sure we initialize shinfo sequentially */
210 shinfo = skb_shinfo(skb);
211 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
212 atomic_set(&shinfo->dataref, 1);
213
214 skb_set_kcov_handle(skb, kcov_common_handle());
215}
216
217/**
218 * __build_skb - build a network buffer
219 * @data: data buffer provided by caller
220 * @frag_size: size of data, or 0 if head was kmalloced
221 *
222 * Allocate a new &sk_buff. Caller provides space holding head and
223 * skb_shared_info. @data must have been allocated by kmalloc() only if
224 * @frag_size is 0, otherwise data should come from the page allocator
225 * or vmalloc()
226 * The return is the new skb buffer.
227 * On a failure the return is %NULL, and @data is not freed.
228 * Notes :
229 * Before IO, driver allocates only data buffer where NIC put incoming frame
230 * Driver should add room at head (NET_SKB_PAD) and
231 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
232 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
233 * before giving packet to stack.
234 * RX rings only contains data buffers, not full skbs.
235 */
236struct sk_buff *__build_skb(void *data, unsigned int frag_size)
237{
238 struct sk_buff *skb;
239
240 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
241 if (unlikely(!skb))
242 return NULL;
243
244 memset(skb, 0, offsetof(struct sk_buff, tail));
245 __build_skb_around(skb, data, frag_size);
246
247 return skb;
248}
249
250/* build_skb() is wrapper over __build_skb(), that specifically
251 * takes care of skb->head and skb->pfmemalloc
252 * This means that if @frag_size is not zero, then @data must be backed
253 * by a page fragment, not kmalloc() or vmalloc()
254 */
255struct sk_buff *build_skb(void *data, unsigned int frag_size)
256{
257 struct sk_buff *skb = __build_skb(data, frag_size);
258
259 if (skb && frag_size) {
260 skb->head_frag = 1;
261 if (page_is_pfmemalloc(virt_to_head_page(data)))
262 skb->pfmemalloc = 1;
263 }
264 return skb;
265}
266EXPORT_SYMBOL(build_skb);
267
268/**
269 * build_skb_around - build a network buffer around provided skb
270 * @skb: sk_buff provide by caller, must be memset cleared
271 * @data: data buffer provided by caller
272 * @frag_size: size of data, or 0 if head was kmalloced
273 */
274struct sk_buff *build_skb_around(struct sk_buff *skb,
275 void *data, unsigned int frag_size)
276{
277 if (unlikely(!skb))
278 return NULL;
279
280 __build_skb_around(skb, data, frag_size);
281
282 if (frag_size) {
283 skb->head_frag = 1;
284 if (page_is_pfmemalloc(virt_to_head_page(data)))
285 skb->pfmemalloc = 1;
286 }
287 return skb;
288}
289EXPORT_SYMBOL(build_skb_around);
290
291/**
292 * __napi_build_skb - build a network buffer
293 * @data: data buffer provided by caller
294 * @frag_size: size of data, or 0 if head was kmalloced
295 *
296 * Version of __build_skb() that uses NAPI percpu caches to obtain
297 * skbuff_head instead of inplace allocation.
298 *
299 * Returns a new &sk_buff on success, %NULL on allocation failure.
300 */
301static struct sk_buff *__napi_build_skb(void *data, unsigned int frag_size)
302{
303 struct sk_buff *skb;
304
305 skb = napi_skb_cache_get();
306 if (unlikely(!skb))
307 return NULL;
308
309 memset(skb, 0, offsetof(struct sk_buff, tail));
310 __build_skb_around(skb, data, frag_size);
311
312 return skb;
313}
314
315/**
316 * napi_build_skb - build a network buffer
317 * @data: data buffer provided by caller
318 * @frag_size: size of data, or 0 if head was kmalloced
319 *
320 * Version of __napi_build_skb() that takes care of skb->head_frag
321 * and skb->pfmemalloc when the data is a page or page fragment.
322 *
323 * Returns a new &sk_buff on success, %NULL on allocation failure.
324 */
325struct sk_buff *napi_build_skb(void *data, unsigned int frag_size)
326{
327 struct sk_buff *skb = __napi_build_skb(data, frag_size);
328
329 if (likely(skb) && frag_size) {
330 skb->head_frag = 1;
331 skb_propagate_pfmemalloc(virt_to_head_page(data), skb);
332 }
333
334 return skb;
335}
336EXPORT_SYMBOL(napi_build_skb);
337
338/*
339 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
340 * the caller if emergency pfmemalloc reserves are being used. If it is and
341 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
342 * may be used. Otherwise, the packet data may be discarded until enough
343 * memory is free
344 */
345static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
346 bool *pfmemalloc)
347{
348 void *obj;
349 bool ret_pfmemalloc = false;
350
351 /*
352 * Try a regular allocation, when that fails and we're not entitled
353 * to the reserves, fail.
354 */
355 obj = kmalloc_node_track_caller(size,
356 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
357 node);
358 if (obj || !(gfp_pfmemalloc_allowed(flags)))
359 goto out;
360
361 /* Try again but now we are using pfmemalloc reserves */
362 ret_pfmemalloc = true;
363 obj = kmalloc_node_track_caller(size, flags, node);
364
365out:
366 if (pfmemalloc)
367 *pfmemalloc = ret_pfmemalloc;
368
369 return obj;
370}
371
372/* Allocate a new skbuff. We do this ourselves so we can fill in a few
373 * 'private' fields and also do memory statistics to find all the
374 * [BEEP] leaks.
375 *
376 */
377
378/**
379 * __alloc_skb - allocate a network buffer
380 * @size: size to allocate
381 * @gfp_mask: allocation mask
382 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
383 * instead of head cache and allocate a cloned (child) skb.
384 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
385 * allocations in case the data is required for writeback
386 * @node: numa node to allocate memory on
387 *
388 * Allocate a new &sk_buff. The returned buffer has no headroom and a
389 * tail room of at least size bytes. The object has a reference count
390 * of one. The return is the buffer. On a failure the return is %NULL.
391 *
392 * Buffers may only be allocated from interrupts using a @gfp_mask of
393 * %GFP_ATOMIC.
394 */
395struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
396 int flags, int node)
397{
398 struct kmem_cache *cache;
399 struct sk_buff *skb;
400 u8 *data;
401 bool pfmemalloc;
402
403 cache = (flags & SKB_ALLOC_FCLONE)
404 ? skbuff_fclone_cache : skbuff_head_cache;
405
406 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
407 gfp_mask |= __GFP_MEMALLOC;
408
409 /* Get the HEAD */
410 if ((flags & (SKB_ALLOC_FCLONE | SKB_ALLOC_NAPI)) == SKB_ALLOC_NAPI &&
411 likely(node == NUMA_NO_NODE || node == numa_mem_id()))
412 skb = napi_skb_cache_get();
413 else
414 skb = kmem_cache_alloc_node(cache, gfp_mask & ~GFP_DMA, node);
415 if (unlikely(!skb))
416 return NULL;
417 prefetchw(skb);
418
419 /* We do our best to align skb_shared_info on a separate cache
420 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
421 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
422 * Both skb->head and skb_shared_info are cache line aligned.
423 */
424 size = SKB_DATA_ALIGN(size);
425 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
426 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
427 if (unlikely(!data))
428 goto nodata;
429 /* kmalloc(size) might give us more room than requested.
430 * Put skb_shared_info exactly at the end of allocated zone,
431 * to allow max possible filling before reallocation.
432 */
433 size = SKB_WITH_OVERHEAD(ksize(data));
434 prefetchw(data + size);
435
436 /*
437 * Only clear those fields we need to clear, not those that we will
438 * actually initialise below. Hence, don't put any more fields after
439 * the tail pointer in struct sk_buff!
440 */
441 memset(skb, 0, offsetof(struct sk_buff, tail));
442 __build_skb_around(skb, data, 0);
443 skb->pfmemalloc = pfmemalloc;
444
445 if (flags & SKB_ALLOC_FCLONE) {
446 struct sk_buff_fclones *fclones;
447
448 fclones = container_of(skb, struct sk_buff_fclones, skb1);
449
450 skb->fclone = SKB_FCLONE_ORIG;
451 refcount_set(&fclones->fclone_ref, 1);
452
453 fclones->skb2.fclone = SKB_FCLONE_CLONE;
454 }
455
456 return skb;
457
458nodata:
459 kmem_cache_free(cache, skb);
460 return NULL;
461}
462EXPORT_SYMBOL(__alloc_skb);
463
464/**
465 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
466 * @dev: network device to receive on
467 * @len: length to allocate
468 * @gfp_mask: get_free_pages mask, passed to alloc_skb
469 *
470 * Allocate a new &sk_buff and assign it a usage count of one. The
471 * buffer has NET_SKB_PAD headroom built in. Users should allocate
472 * the headroom they think they need without accounting for the
473 * built in space. The built in space is used for optimisations.
474 *
475 * %NULL is returned if there is no free memory.
476 */
477struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
478 gfp_t gfp_mask)
479{
480 struct page_frag_cache *nc;
481 struct sk_buff *skb;
482 bool pfmemalloc;
483 void *data;
484
485 len += NET_SKB_PAD;
486
487 /* If requested length is either too small or too big,
488 * we use kmalloc() for skb->head allocation.
489 */
490 if (len <= SKB_WITH_OVERHEAD(1024) ||
491 len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
492 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
493 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
494 if (!skb)
495 goto skb_fail;
496 goto skb_success;
497 }
498
499 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
500 len = SKB_DATA_ALIGN(len);
501
502 if (sk_memalloc_socks())
503 gfp_mask |= __GFP_MEMALLOC;
504
505 if (in_irq() || irqs_disabled()) {
506 nc = this_cpu_ptr(&netdev_alloc_cache);
507 data = page_frag_alloc(nc, len, gfp_mask);
508 pfmemalloc = nc->pfmemalloc;
509 } else {
510 local_bh_disable();
511 nc = this_cpu_ptr(&napi_alloc_cache.page);
512 data = page_frag_alloc(nc, len, gfp_mask);
513 pfmemalloc = nc->pfmemalloc;
514 local_bh_enable();
515 }
516
517 if (unlikely(!data))
518 return NULL;
519
520 skb = __build_skb(data, len);
521 if (unlikely(!skb)) {
522 skb_free_frag(data);
523 return NULL;
524 }
525
526 if (pfmemalloc)
527 skb->pfmemalloc = 1;
528 skb->head_frag = 1;
529
530skb_success:
531 skb_reserve(skb, NET_SKB_PAD);
532 skb->dev = dev;
533
534skb_fail:
535 return skb;
536}
537EXPORT_SYMBOL(__netdev_alloc_skb);
538
539/**
540 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
541 * @napi: napi instance this buffer was allocated for
542 * @len: length to allocate
543 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
544 *
545 * Allocate a new sk_buff for use in NAPI receive. This buffer will
546 * attempt to allocate the head from a special reserved region used
547 * only for NAPI Rx allocation. By doing this we can save several
548 * CPU cycles by avoiding having to disable and re-enable IRQs.
549 *
550 * %NULL is returned if there is no free memory.
551 */
552struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
553 gfp_t gfp_mask)
554{
555 struct napi_alloc_cache *nc;
556 struct sk_buff *skb;
557 void *data;
558
559 len += NET_SKB_PAD + NET_IP_ALIGN;
560
561 /* If requested length is either too small or too big,
562 * we use kmalloc() for skb->head allocation.
563 */
564 if (len <= SKB_WITH_OVERHEAD(1024) ||
565 len > SKB_WITH_OVERHEAD(PAGE_SIZE) ||
566 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
567 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX | SKB_ALLOC_NAPI,
568 NUMA_NO_NODE);
569 if (!skb)
570 goto skb_fail;
571 goto skb_success;
572 }
573
574 nc = this_cpu_ptr(&napi_alloc_cache);
575 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
576 len = SKB_DATA_ALIGN(len);
577
578 if (sk_memalloc_socks())
579 gfp_mask |= __GFP_MEMALLOC;
580
581 data = page_frag_alloc(&nc->page, len, gfp_mask);
582 if (unlikely(!data))
583 return NULL;
584
585 skb = __napi_build_skb(data, len);
586 if (unlikely(!skb)) {
587 skb_free_frag(data);
588 return NULL;
589 }
590
591 if (nc->page.pfmemalloc)
592 skb->pfmemalloc = 1;
593 skb->head_frag = 1;
594
595skb_success:
596 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
597 skb->dev = napi->dev;
598
599skb_fail:
600 return skb;
601}
602EXPORT_SYMBOL(__napi_alloc_skb);
603
604void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
605 int size, unsigned int truesize)
606{
607 skb_fill_page_desc(skb, i, page, off, size);
608 skb->len += size;
609 skb->data_len += size;
610 skb->truesize += truesize;
611}
612EXPORT_SYMBOL(skb_add_rx_frag);
613
614void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
615 unsigned int truesize)
616{
617 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
618
619 skb_frag_size_add(frag, size);
620 skb->len += size;
621 skb->data_len += size;
622 skb->truesize += truesize;
623}
624EXPORT_SYMBOL(skb_coalesce_rx_frag);
625
626static void skb_drop_list(struct sk_buff **listp)
627{
628 kfree_skb_list(*listp);
629 *listp = NULL;
630}
631
632static inline void skb_drop_fraglist(struct sk_buff *skb)
633{
634 skb_drop_list(&skb_shinfo(skb)->frag_list);
635}
636
637static void skb_clone_fraglist(struct sk_buff *skb)
638{
639 struct sk_buff *list;
640
641 skb_walk_frags(skb, list)
642 skb_get(list);
643}
644
645static void skb_free_head(struct sk_buff *skb)
646{
647 unsigned char *head = skb->head;
648
649 if (skb->head_frag) {
650 if (skb_pp_recycle(skb, head))
651 return;
652 skb_free_frag(head);
653 } else {
654 kfree(head);
655 }
656}
657
658static void skb_release_data(struct sk_buff *skb)
659{
660 struct skb_shared_info *shinfo = skb_shinfo(skb);
661 int i;
662
663 if (skb->cloned &&
664 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
665 &shinfo->dataref))
666 goto exit;
667
668 skb_zcopy_clear(skb, true);
669
670 for (i = 0; i < shinfo->nr_frags; i++)
671 __skb_frag_unref(&shinfo->frags[i], skb->pp_recycle);
672
673 if (shinfo->frag_list)
674 kfree_skb_list(shinfo->frag_list);
675
676 skb_free_head(skb);
677exit:
678 /* When we clone an SKB we copy the reycling bit. The pp_recycle
679 * bit is only set on the head though, so in order to avoid races
680 * while trying to recycle fragments on __skb_frag_unref() we need
681 * to make one SKB responsible for triggering the recycle path.
682 * So disable the recycling bit if an SKB is cloned and we have
683 * additional references to to the fragmented part of the SKB.
684 * Eventually the last SKB will have the recycling bit set and it's
685 * dataref set to 0, which will trigger the recycling
686 */
687 skb->pp_recycle = 0;
688}
689
690/*
691 * Free an skbuff by memory without cleaning the state.
692 */
693static void kfree_skbmem(struct sk_buff *skb)
694{
695 struct sk_buff_fclones *fclones;
696
697 switch (skb->fclone) {
698 case SKB_FCLONE_UNAVAILABLE:
699 kmem_cache_free(skbuff_head_cache, skb);
700 return;
701
702 case SKB_FCLONE_ORIG:
703 fclones = container_of(skb, struct sk_buff_fclones, skb1);
704
705 /* We usually free the clone (TX completion) before original skb
706 * This test would have no chance to be true for the clone,
707 * while here, branch prediction will be good.
708 */
709 if (refcount_read(&fclones->fclone_ref) == 1)
710 goto fastpath;
711 break;
712
713 default: /* SKB_FCLONE_CLONE */
714 fclones = container_of(skb, struct sk_buff_fclones, skb2);
715 break;
716 }
717 if (!refcount_dec_and_test(&fclones->fclone_ref))
718 return;
719fastpath:
720 kmem_cache_free(skbuff_fclone_cache, fclones);
721}
722
723void skb_release_head_state(struct sk_buff *skb)
724{
725 skb_dst_drop(skb);
726 if (skb->destructor) {
727 WARN_ON(in_irq());
728 skb->destructor(skb);
729 }
730#if IS_ENABLED(CONFIG_NF_CONNTRACK)
731 nf_conntrack_put(skb_nfct(skb));
732#endif
733 skb_ext_put(skb);
734}
735
736/* Free everything but the sk_buff shell. */
737static void skb_release_all(struct sk_buff *skb)
738{
739 skb_release_head_state(skb);
740 if (likely(skb->head))
741 skb_release_data(skb);
742}
743
744/**
745 * __kfree_skb - private function
746 * @skb: buffer
747 *
748 * Free an sk_buff. Release anything attached to the buffer.
749 * Clean the state. This is an internal helper function. Users should
750 * always call kfree_skb
751 */
752
753void __kfree_skb(struct sk_buff *skb)
754{
755 skb_release_all(skb);
756 kfree_skbmem(skb);
757}
758EXPORT_SYMBOL(__kfree_skb);
759
760/**
761 * kfree_skb - free an sk_buff
762 * @skb: buffer to free
763 *
764 * Drop a reference to the buffer and free it if the usage count has
765 * hit zero.
766 */
767void kfree_skb(struct sk_buff *skb)
768{
769 if (!skb_unref(skb))
770 return;
771
772 trace_kfree_skb(skb, __builtin_return_address(0));
773 __kfree_skb(skb);
774}
775EXPORT_SYMBOL(kfree_skb);
776
777void kfree_skb_list(struct sk_buff *segs)
778{
779 while (segs) {
780 struct sk_buff *next = segs->next;
781
782 kfree_skb(segs);
783 segs = next;
784 }
785}
786EXPORT_SYMBOL(kfree_skb_list);
787
788/* Dump skb information and contents.
789 *
790 * Must only be called from net_ratelimit()-ed paths.
791 *
792 * Dumps whole packets if full_pkt, only headers otherwise.
793 */
794void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
795{
796 struct skb_shared_info *sh = skb_shinfo(skb);
797 struct net_device *dev = skb->dev;
798 struct sock *sk = skb->sk;
799 struct sk_buff *list_skb;
800 bool has_mac, has_trans;
801 int headroom, tailroom;
802 int i, len, seg_len;
803
804 if (full_pkt)
805 len = skb->len;
806 else
807 len = min_t(int, skb->len, MAX_HEADER + 128);
808
809 headroom = skb_headroom(skb);
810 tailroom = skb_tailroom(skb);
811
812 has_mac = skb_mac_header_was_set(skb);
813 has_trans = skb_transport_header_was_set(skb);
814
815 printk("%sskb len=%u headroom=%u headlen=%u tailroom=%u\n"
816 "mac=(%d,%d) net=(%d,%d) trans=%d\n"
817 "shinfo(txflags=%u nr_frags=%u gso(size=%hu type=%u segs=%hu))\n"
818 "csum(0x%x ip_summed=%u complete_sw=%u valid=%u level=%u)\n"
819 "hash(0x%x sw=%u l4=%u) proto=0x%04x pkttype=%u iif=%d\n",
820 level, skb->len, headroom, skb_headlen(skb), tailroom,
821 has_mac ? skb->mac_header : -1,
822 has_mac ? skb_mac_header_len(skb) : -1,
823 skb->network_header,
824 has_trans ? skb_network_header_len(skb) : -1,
825 has_trans ? skb->transport_header : -1,
826 sh->tx_flags, sh->nr_frags,
827 sh->gso_size, sh->gso_type, sh->gso_segs,
828 skb->csum, skb->ip_summed, skb->csum_complete_sw,
829 skb->csum_valid, skb->csum_level,
830 skb->hash, skb->sw_hash, skb->l4_hash,
831 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
832
833 if (dev)
834 printk("%sdev name=%s feat=0x%pNF\n",
835 level, dev->name, &dev->features);
836 if (sk)
837 printk("%ssk family=%hu type=%u proto=%u\n",
838 level, sk->sk_family, sk->sk_type, sk->sk_protocol);
839
840 if (full_pkt && headroom)
841 print_hex_dump(level, "skb headroom: ", DUMP_PREFIX_OFFSET,
842 16, 1, skb->head, headroom, false);
843
844 seg_len = min_t(int, skb_headlen(skb), len);
845 if (seg_len)
846 print_hex_dump(level, "skb linear: ", DUMP_PREFIX_OFFSET,
847 16, 1, skb->data, seg_len, false);
848 len -= seg_len;
849
850 if (full_pkt && tailroom)
851 print_hex_dump(level, "skb tailroom: ", DUMP_PREFIX_OFFSET,
852 16, 1, skb_tail_pointer(skb), tailroom, false);
853
854 for (i = 0; len && i < skb_shinfo(skb)->nr_frags; i++) {
855 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
856 u32 p_off, p_len, copied;
857 struct page *p;
858 u8 *vaddr;
859
860 skb_frag_foreach_page(frag, skb_frag_off(frag),
861 skb_frag_size(frag), p, p_off, p_len,
862 copied) {
863 seg_len = min_t(int, p_len, len);
864 vaddr = kmap_atomic(p);
865 print_hex_dump(level, "skb frag: ",
866 DUMP_PREFIX_OFFSET,
867 16, 1, vaddr + p_off, seg_len, false);
868 kunmap_atomic(vaddr);
869 len -= seg_len;
870 if (!len)
871 break;
872 }
873 }
874
875 if (full_pkt && skb_has_frag_list(skb)) {
876 printk("skb fraglist:\n");
877 skb_walk_frags(skb, list_skb)
878 skb_dump(level, list_skb, true);
879 }
880}
881EXPORT_SYMBOL(skb_dump);
882
883/**
884 * skb_tx_error - report an sk_buff xmit error
885 * @skb: buffer that triggered an error
886 *
887 * Report xmit error if a device callback is tracking this skb.
888 * skb must be freed afterwards.
889 */
890void skb_tx_error(struct sk_buff *skb)
891{
892 skb_zcopy_clear(skb, true);
893}
894EXPORT_SYMBOL(skb_tx_error);
895
896#ifdef CONFIG_TRACEPOINTS
897/**
898 * consume_skb - free an skbuff
899 * @skb: buffer to free
900 *
901 * Drop a ref to the buffer and free it if the usage count has hit zero
902 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
903 * is being dropped after a failure and notes that
904 */
905void consume_skb(struct sk_buff *skb)
906{
907 if (!skb_unref(skb))
908 return;
909
910 trace_consume_skb(skb);
911 __kfree_skb(skb);
912}
913EXPORT_SYMBOL(consume_skb);
914#endif
915
916/**
917 * __consume_stateless_skb - free an skbuff, assuming it is stateless
918 * @skb: buffer to free
919 *
920 * Alike consume_skb(), but this variant assumes that this is the last
921 * skb reference and all the head states have been already dropped
922 */
923void __consume_stateless_skb(struct sk_buff *skb)
924{
925 trace_consume_skb(skb);
926 skb_release_data(skb);
927 kfree_skbmem(skb);
928}
929
930static void napi_skb_cache_put(struct sk_buff *skb)
931{
932 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
933 u32 i;
934
935 kasan_poison_object_data(skbuff_head_cache, skb);
936 nc->skb_cache[nc->skb_count++] = skb;
937
938 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
939 for (i = NAPI_SKB_CACHE_HALF; i < NAPI_SKB_CACHE_SIZE; i++)
940 kasan_unpoison_object_data(skbuff_head_cache,
941 nc->skb_cache[i]);
942
943 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_HALF,
944 nc->skb_cache + NAPI_SKB_CACHE_HALF);
945 nc->skb_count = NAPI_SKB_CACHE_HALF;
946 }
947}
948
949void __kfree_skb_defer(struct sk_buff *skb)
950{
951 skb_release_all(skb);
952 napi_skb_cache_put(skb);
953}
954
955void napi_skb_free_stolen_head(struct sk_buff *skb)
956{
957 nf_reset_ct(skb);
958 skb_dst_drop(skb);
959 skb_ext_put(skb);
960 napi_skb_cache_put(skb);
961}
962
963void napi_consume_skb(struct sk_buff *skb, int budget)
964{
965 /* Zero budget indicate non-NAPI context called us, like netpoll */
966 if (unlikely(!budget)) {
967 dev_consume_skb_any(skb);
968 return;
969 }
970
971 lockdep_assert_in_softirq();
972
973 if (!skb_unref(skb))
974 return;
975
976 /* if reaching here SKB is ready to free */
977 trace_consume_skb(skb);
978
979 /* if SKB is a clone, don't handle this case */
980 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
981 __kfree_skb(skb);
982 return;
983 }
984
985 skb_release_all(skb);
986 napi_skb_cache_put(skb);
987}
988EXPORT_SYMBOL(napi_consume_skb);
989
990/* Make sure a field is enclosed inside headers_start/headers_end section */
991#define CHECK_SKB_FIELD(field) \
992 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
993 offsetof(struct sk_buff, headers_start)); \
994 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
995 offsetof(struct sk_buff, headers_end)); \
996
997static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
998{
999 new->tstamp = old->tstamp;
1000 /* We do not copy old->sk */
1001 new->dev = old->dev;
1002 memcpy(new->cb, old->cb, sizeof(old->cb));
1003 skb_dst_copy(new, old);
1004 __skb_ext_copy(new, old);
1005 __nf_copy(new, old, false);
1006
1007 /* Note : this field could be in headers_start/headers_end section
1008 * It is not yet because we do not want to have a 16 bit hole
1009 */
1010 new->queue_mapping = old->queue_mapping;
1011
1012 memcpy(&new->headers_start, &old->headers_start,
1013 offsetof(struct sk_buff, headers_end) -
1014 offsetof(struct sk_buff, headers_start));
1015 CHECK_SKB_FIELD(protocol);
1016 CHECK_SKB_FIELD(csum);
1017 CHECK_SKB_FIELD(hash);
1018 CHECK_SKB_FIELD(priority);
1019 CHECK_SKB_FIELD(skb_iif);
1020 CHECK_SKB_FIELD(vlan_proto);
1021 CHECK_SKB_FIELD(vlan_tci);
1022 CHECK_SKB_FIELD(transport_header);
1023 CHECK_SKB_FIELD(network_header);
1024 CHECK_SKB_FIELD(mac_header);
1025 CHECK_SKB_FIELD(inner_protocol);
1026 CHECK_SKB_FIELD(inner_transport_header);
1027 CHECK_SKB_FIELD(inner_network_header);
1028 CHECK_SKB_FIELD(inner_mac_header);
1029 CHECK_SKB_FIELD(mark);
1030#ifdef CONFIG_NETWORK_SECMARK
1031 CHECK_SKB_FIELD(secmark);
1032#endif
1033#ifdef CONFIG_NET_RX_BUSY_POLL
1034 CHECK_SKB_FIELD(napi_id);
1035#endif
1036#ifdef CONFIG_XPS
1037 CHECK_SKB_FIELD(sender_cpu);
1038#endif
1039#ifdef CONFIG_NET_SCHED
1040 CHECK_SKB_FIELD(tc_index);
1041#endif
1042
1043}
1044
1045/*
1046 * You should not add any new code to this function. Add it to
1047 * __copy_skb_header above instead.
1048 */
1049static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
1050{
1051#define C(x) n->x = skb->x
1052
1053 n->next = n->prev = NULL;
1054 n->sk = NULL;
1055 __copy_skb_header(n, skb);
1056
1057 C(len);
1058 C(data_len);
1059 C(mac_len);
1060 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
1061 n->cloned = 1;
1062 n->nohdr = 0;
1063 n->peeked = 0;
1064 C(pfmemalloc);
1065 C(pp_recycle);
1066 n->destructor = NULL;
1067 C(tail);
1068 C(end);
1069 C(head);
1070 C(head_frag);
1071 C(data);
1072 C(truesize);
1073 refcount_set(&n->users, 1);
1074
1075 atomic_inc(&(skb_shinfo(skb)->dataref));
1076 skb->cloned = 1;
1077
1078 return n;
1079#undef C
1080}
1081
1082/**
1083 * alloc_skb_for_msg() - allocate sk_buff to wrap frag list forming a msg
1084 * @first: first sk_buff of the msg
1085 */
1086struct sk_buff *alloc_skb_for_msg(struct sk_buff *first)
1087{
1088 struct sk_buff *n;
1089
1090 n = alloc_skb(0, GFP_ATOMIC);
1091 if (!n)
1092 return NULL;
1093
1094 n->len = first->len;
1095 n->data_len = first->len;
1096 n->truesize = first->truesize;
1097
1098 skb_shinfo(n)->frag_list = first;
1099
1100 __copy_skb_header(n, first);
1101 n->destructor = NULL;
1102
1103 return n;
1104}
1105EXPORT_SYMBOL_GPL(alloc_skb_for_msg);
1106
1107/**
1108 * skb_morph - morph one skb into another
1109 * @dst: the skb to receive the contents
1110 * @src: the skb to supply the contents
1111 *
1112 * This is identical to skb_clone except that the target skb is
1113 * supplied by the user.
1114 *
1115 * The target skb is returned upon exit.
1116 */
1117struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
1118{
1119 skb_release_all(dst);
1120 return __skb_clone(dst, src);
1121}
1122EXPORT_SYMBOL_GPL(skb_morph);
1123
1124int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
1125{
1126 unsigned long max_pg, num_pg, new_pg, old_pg;
1127 struct user_struct *user;
1128
1129 if (capable(CAP_IPC_LOCK) || !size)
1130 return 0;
1131
1132 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
1133 max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1134 user = mmp->user ? : current_user();
1135
1136 do {
1137 old_pg = atomic_long_read(&user->locked_vm);
1138 new_pg = old_pg + num_pg;
1139 if (new_pg > max_pg)
1140 return -ENOBUFS;
1141 } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
1142 old_pg);
1143
1144 if (!mmp->user) {
1145 mmp->user = get_uid(user);
1146 mmp->num_pg = num_pg;
1147 } else {
1148 mmp->num_pg += num_pg;
1149 }
1150
1151 return 0;
1152}
1153EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
1154
1155void mm_unaccount_pinned_pages(struct mmpin *mmp)
1156{
1157 if (mmp->user) {
1158 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
1159 free_uid(mmp->user);
1160 }
1161}
1162EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
1163
1164struct ubuf_info *msg_zerocopy_alloc(struct sock *sk, size_t size)
1165{
1166 struct ubuf_info *uarg;
1167 struct sk_buff *skb;
1168
1169 WARN_ON_ONCE(!in_task());
1170
1171 skb = sock_omalloc(sk, 0, GFP_KERNEL);
1172 if (!skb)
1173 return NULL;
1174
1175 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
1176 uarg = (void *)skb->cb;
1177 uarg->mmp.user = NULL;
1178
1179 if (mm_account_pinned_pages(&uarg->mmp, size)) {
1180 kfree_skb(skb);
1181 return NULL;
1182 }
1183
1184 uarg->callback = msg_zerocopy_callback;
1185 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
1186 uarg->len = 1;
1187 uarg->bytelen = size;
1188 uarg->zerocopy = 1;
1189 uarg->flags = SKBFL_ZEROCOPY_FRAG;
1190 refcount_set(&uarg->refcnt, 1);
1191 sock_hold(sk);
1192
1193 return uarg;
1194}
1195EXPORT_SYMBOL_GPL(msg_zerocopy_alloc);
1196
1197static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
1198{
1199 return container_of((void *)uarg, struct sk_buff, cb);
1200}
1201
1202struct ubuf_info *msg_zerocopy_realloc(struct sock *sk, size_t size,
1203 struct ubuf_info *uarg)
1204{
1205 if (uarg) {
1206 const u32 byte_limit = 1 << 19; /* limit to a few TSO */
1207 u32 bytelen, next;
1208
1209 /* realloc only when socket is locked (TCP, UDP cork),
1210 * so uarg->len and sk_zckey access is serialized
1211 */
1212 if (!sock_owned_by_user(sk)) {
1213 WARN_ON_ONCE(1);
1214 return NULL;
1215 }
1216
1217 bytelen = uarg->bytelen + size;
1218 if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
1219 /* TCP can create new skb to attach new uarg */
1220 if (sk->sk_type == SOCK_STREAM)
1221 goto new_alloc;
1222 return NULL;
1223 }
1224
1225 next = (u32)atomic_read(&sk->sk_zckey);
1226 if ((u32)(uarg->id + uarg->len) == next) {
1227 if (mm_account_pinned_pages(&uarg->mmp, size))
1228 return NULL;
1229 uarg->len++;
1230 uarg->bytelen = bytelen;
1231 atomic_set(&sk->sk_zckey, ++next);
1232
1233 /* no extra ref when appending to datagram (MSG_MORE) */
1234 if (sk->sk_type == SOCK_STREAM)
1235 net_zcopy_get(uarg);
1236
1237 return uarg;
1238 }
1239 }
1240
1241new_alloc:
1242 return msg_zerocopy_alloc(sk, size);
1243}
1244EXPORT_SYMBOL_GPL(msg_zerocopy_realloc);
1245
1246static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1247{
1248 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1249 u32 old_lo, old_hi;
1250 u64 sum_len;
1251
1252 old_lo = serr->ee.ee_info;
1253 old_hi = serr->ee.ee_data;
1254 sum_len = old_hi - old_lo + 1ULL + len;
1255
1256 if (sum_len >= (1ULL << 32))
1257 return false;
1258
1259 if (lo != old_hi + 1)
1260 return false;
1261
1262 serr->ee.ee_data += len;
1263 return true;
1264}
1265
1266static void __msg_zerocopy_callback(struct ubuf_info *uarg)
1267{
1268 struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1269 struct sock_exterr_skb *serr;
1270 struct sock *sk = skb->sk;
1271 struct sk_buff_head *q;
1272 unsigned long flags;
1273 bool is_zerocopy;
1274 u32 lo, hi;
1275 u16 len;
1276
1277 mm_unaccount_pinned_pages(&uarg->mmp);
1278
1279 /* if !len, there was only 1 call, and it was aborted
1280 * so do not queue a completion notification
1281 */
1282 if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1283 goto release;
1284
1285 len = uarg->len;
1286 lo = uarg->id;
1287 hi = uarg->id + len - 1;
1288 is_zerocopy = uarg->zerocopy;
1289
1290 serr = SKB_EXT_ERR(skb);
1291 memset(serr, 0, sizeof(*serr));
1292 serr->ee.ee_errno = 0;
1293 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1294 serr->ee.ee_data = hi;
1295 serr->ee.ee_info = lo;
1296 if (!is_zerocopy)
1297 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1298
1299 q = &sk->sk_error_queue;
1300 spin_lock_irqsave(&q->lock, flags);
1301 tail = skb_peek_tail(q);
1302 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1303 !skb_zerocopy_notify_extend(tail, lo, len)) {
1304 __skb_queue_tail(q, skb);
1305 skb = NULL;
1306 }
1307 spin_unlock_irqrestore(&q->lock, flags);
1308
1309 sk_error_report(sk);
1310
1311release:
1312 consume_skb(skb);
1313 sock_put(sk);
1314}
1315
1316void msg_zerocopy_callback(struct sk_buff *skb, struct ubuf_info *uarg,
1317 bool success)
1318{
1319 uarg->zerocopy = uarg->zerocopy & success;
1320
1321 if (refcount_dec_and_test(&uarg->refcnt))
1322 __msg_zerocopy_callback(uarg);
1323}
1324EXPORT_SYMBOL_GPL(msg_zerocopy_callback);
1325
1326void msg_zerocopy_put_abort(struct ubuf_info *uarg, bool have_uref)
1327{
1328 struct sock *sk = skb_from_uarg(uarg)->sk;
1329
1330 atomic_dec(&sk->sk_zckey);
1331 uarg->len--;
1332
1333 if (have_uref)
1334 msg_zerocopy_callback(NULL, uarg, true);
1335}
1336EXPORT_SYMBOL_GPL(msg_zerocopy_put_abort);
1337
1338int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len)
1339{
1340 return __zerocopy_sg_from_iter(skb->sk, skb, &msg->msg_iter, len);
1341}
1342EXPORT_SYMBOL_GPL(skb_zerocopy_iter_dgram);
1343
1344int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1345 struct msghdr *msg, int len,
1346 struct ubuf_info *uarg)
1347{
1348 struct ubuf_info *orig_uarg = skb_zcopy(skb);
1349 struct iov_iter orig_iter = msg->msg_iter;
1350 int err, orig_len = skb->len;
1351
1352 /* An skb can only point to one uarg. This edge case happens when
1353 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1354 */
1355 if (orig_uarg && uarg != orig_uarg)
1356 return -EEXIST;
1357
1358 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1359 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1360 struct sock *save_sk = skb->sk;
1361
1362 /* Streams do not free skb on error. Reset to prev state. */
1363 msg->msg_iter = orig_iter;
1364 skb->sk = sk;
1365 ___pskb_trim(skb, orig_len);
1366 skb->sk = save_sk;
1367 return err;
1368 }
1369
1370 skb_zcopy_set(skb, uarg, NULL);
1371 return skb->len - orig_len;
1372}
1373EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1374
1375static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1376 gfp_t gfp_mask)
1377{
1378 if (skb_zcopy(orig)) {
1379 if (skb_zcopy(nskb)) {
1380 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1381 if (!gfp_mask) {
1382 WARN_ON_ONCE(1);
1383 return -ENOMEM;
1384 }
1385 if (skb_uarg(nskb) == skb_uarg(orig))
1386 return 0;
1387 if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1388 return -EIO;
1389 }
1390 skb_zcopy_set(nskb, skb_uarg(orig), NULL);
1391 }
1392 return 0;
1393}
1394
1395/**
1396 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1397 * @skb: the skb to modify
1398 * @gfp_mask: allocation priority
1399 *
1400 * This must be called on skb with SKBFL_ZEROCOPY_ENABLE.
1401 * It will copy all frags into kernel and drop the reference
1402 * to userspace pages.
1403 *
1404 * If this function is called from an interrupt gfp_mask() must be
1405 * %GFP_ATOMIC.
1406 *
1407 * Returns 0 on success or a negative error code on failure
1408 * to allocate kernel memory to copy to.
1409 */
1410int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1411{
1412 int num_frags = skb_shinfo(skb)->nr_frags;
1413 struct page *page, *head = NULL;
1414 int i, new_frags;
1415 u32 d_off;
1416
1417 if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1418 return -EINVAL;
1419
1420 if (!num_frags)
1421 goto release;
1422
1423 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1424 for (i = 0; i < new_frags; i++) {
1425 page = alloc_page(gfp_mask);
1426 if (!page) {
1427 while (head) {
1428 struct page *next = (struct page *)page_private(head);
1429 put_page(head);
1430 head = next;
1431 }
1432 return -ENOMEM;
1433 }
1434 set_page_private(page, (unsigned long)head);
1435 head = page;
1436 }
1437
1438 page = head;
1439 d_off = 0;
1440 for (i = 0; i < num_frags; i++) {
1441 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1442 u32 p_off, p_len, copied;
1443 struct page *p;
1444 u8 *vaddr;
1445
1446 skb_frag_foreach_page(f, skb_frag_off(f), skb_frag_size(f),
1447 p, p_off, p_len, copied) {
1448 u32 copy, done = 0;
1449 vaddr = kmap_atomic(p);
1450
1451 while (done < p_len) {
1452 if (d_off == PAGE_SIZE) {
1453 d_off = 0;
1454 page = (struct page *)page_private(page);
1455 }
1456 copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
1457 memcpy(page_address(page) + d_off,
1458 vaddr + p_off + done, copy);
1459 done += copy;
1460 d_off += copy;
1461 }
1462 kunmap_atomic(vaddr);
1463 }
1464 }
1465
1466 /* skb frags release userspace buffers */
1467 for (i = 0; i < num_frags; i++)
1468 skb_frag_unref(skb, i);
1469
1470 /* skb frags point to kernel buffers */
1471 for (i = 0; i < new_frags - 1; i++) {
1472 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
1473 head = (struct page *)page_private(head);
1474 }
1475 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1476 skb_shinfo(skb)->nr_frags = new_frags;
1477
1478release:
1479 skb_zcopy_clear(skb, false);
1480 return 0;
1481}
1482EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1483
1484/**
1485 * skb_clone - duplicate an sk_buff
1486 * @skb: buffer to clone
1487 * @gfp_mask: allocation priority
1488 *
1489 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
1490 * copies share the same packet data but not structure. The new
1491 * buffer has a reference count of 1. If the allocation fails the
1492 * function returns %NULL otherwise the new buffer is returned.
1493 *
1494 * If this function is called from an interrupt gfp_mask() must be
1495 * %GFP_ATOMIC.
1496 */
1497
1498struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1499{
1500 struct sk_buff_fclones *fclones = container_of(skb,
1501 struct sk_buff_fclones,
1502 skb1);
1503 struct sk_buff *n;
1504
1505 if (skb_orphan_frags(skb, gfp_mask))
1506 return NULL;
1507
1508 if (skb->fclone == SKB_FCLONE_ORIG &&
1509 refcount_read(&fclones->fclone_ref) == 1) {
1510 n = &fclones->skb2;
1511 refcount_set(&fclones->fclone_ref, 2);
1512 } else {
1513 if (skb_pfmemalloc(skb))
1514 gfp_mask |= __GFP_MEMALLOC;
1515
1516 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1517 if (!n)
1518 return NULL;
1519
1520 n->fclone = SKB_FCLONE_UNAVAILABLE;
1521 }
1522
1523 return __skb_clone(n, skb);
1524}
1525EXPORT_SYMBOL(skb_clone);
1526
1527void skb_headers_offset_update(struct sk_buff *skb, int off)
1528{
1529 /* Only adjust this if it actually is csum_start rather than csum */
1530 if (skb->ip_summed == CHECKSUM_PARTIAL)
1531 skb->csum_start += off;
1532 /* {transport,network,mac}_header and tail are relative to skb->head */
1533 skb->transport_header += off;
1534 skb->network_header += off;
1535 if (skb_mac_header_was_set(skb))
1536 skb->mac_header += off;
1537 skb->inner_transport_header += off;
1538 skb->inner_network_header += off;
1539 skb->inner_mac_header += off;
1540}
1541EXPORT_SYMBOL(skb_headers_offset_update);
1542
1543void skb_copy_header(struct sk_buff *new, const struct sk_buff *old)
1544{
1545 __copy_skb_header(new, old);
1546
1547 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1548 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1549 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1550}
1551EXPORT_SYMBOL(skb_copy_header);
1552
1553static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1554{
1555 if (skb_pfmemalloc(skb))
1556 return SKB_ALLOC_RX;
1557 return 0;
1558}
1559
1560/**
1561 * skb_copy - create private copy of an sk_buff
1562 * @skb: buffer to copy
1563 * @gfp_mask: allocation priority
1564 *
1565 * Make a copy of both an &sk_buff and its data. This is used when the
1566 * caller wishes to modify the data and needs a private copy of the
1567 * data to alter. Returns %NULL on failure or the pointer to the buffer
1568 * on success. The returned buffer has a reference count of 1.
1569 *
1570 * As by-product this function converts non-linear &sk_buff to linear
1571 * one, so that &sk_buff becomes completely private and caller is allowed
1572 * to modify all the data of returned buffer. This means that this
1573 * function is not recommended for use in circumstances when only
1574 * header is going to be modified. Use pskb_copy() instead.
1575 */
1576
1577struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1578{
1579 int headerlen = skb_headroom(skb);
1580 unsigned int size = skb_end_offset(skb) + skb->data_len;
1581 struct sk_buff *n = __alloc_skb(size, gfp_mask,
1582 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1583
1584 if (!n)
1585 return NULL;
1586
1587 /* Set the data pointer */
1588 skb_reserve(n, headerlen);
1589 /* Set the tail pointer and length */
1590 skb_put(n, skb->len);
1591
1592 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1593
1594 skb_copy_header(n, skb);
1595 return n;
1596}
1597EXPORT_SYMBOL(skb_copy);
1598
1599/**
1600 * __pskb_copy_fclone - create copy of an sk_buff with private head.
1601 * @skb: buffer to copy
1602 * @headroom: headroom of new skb
1603 * @gfp_mask: allocation priority
1604 * @fclone: if true allocate the copy of the skb from the fclone
1605 * cache instead of the head cache; it is recommended to set this
1606 * to true for the cases where the copy will likely be cloned
1607 *
1608 * Make a copy of both an &sk_buff and part of its data, located
1609 * in header. Fragmented data remain shared. This is used when
1610 * the caller wishes to modify only header of &sk_buff and needs
1611 * private copy of the header to alter. Returns %NULL on failure
1612 * or the pointer to the buffer on success.
1613 * The returned buffer has a reference count of 1.
1614 */
1615
1616struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1617 gfp_t gfp_mask, bool fclone)
1618{
1619 unsigned int size = skb_headlen(skb) + headroom;
1620 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1621 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1622
1623 if (!n)
1624 goto out;
1625
1626 /* Set the data pointer */
1627 skb_reserve(n, headroom);
1628 /* Set the tail pointer and length */
1629 skb_put(n, skb_headlen(skb));
1630 /* Copy the bytes */
1631 skb_copy_from_linear_data(skb, n->data, n->len);
1632
1633 n->truesize += skb->data_len;
1634 n->data_len = skb->data_len;
1635 n->len = skb->len;
1636
1637 if (skb_shinfo(skb)->nr_frags) {
1638 int i;
1639
1640 if (skb_orphan_frags(skb, gfp_mask) ||
1641 skb_zerocopy_clone(n, skb, gfp_mask)) {
1642 kfree_skb(n);
1643 n = NULL;
1644 goto out;
1645 }
1646 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1647 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1648 skb_frag_ref(skb, i);
1649 }
1650 skb_shinfo(n)->nr_frags = i;
1651 }
1652
1653 if (skb_has_frag_list(skb)) {
1654 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1655 skb_clone_fraglist(n);
1656 }
1657
1658 skb_copy_header(n, skb);
1659out:
1660 return n;
1661}
1662EXPORT_SYMBOL(__pskb_copy_fclone);
1663
1664/**
1665 * pskb_expand_head - reallocate header of &sk_buff
1666 * @skb: buffer to reallocate
1667 * @nhead: room to add at head
1668 * @ntail: room to add at tail
1669 * @gfp_mask: allocation priority
1670 *
1671 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1672 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1673 * reference count of 1. Returns zero in the case of success or error,
1674 * if expansion failed. In the last case, &sk_buff is not changed.
1675 *
1676 * All the pointers pointing into skb header may change and must be
1677 * reloaded after call to this function.
1678 */
1679
1680int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1681 gfp_t gfp_mask)
1682{
1683 int i, osize = skb_end_offset(skb);
1684 int size = osize + nhead + ntail;
1685 long off;
1686 u8 *data;
1687
1688 BUG_ON(nhead < 0);
1689
1690 BUG_ON(skb_shared(skb));
1691
1692 size = SKB_DATA_ALIGN(size);
1693
1694 if (skb_pfmemalloc(skb))
1695 gfp_mask |= __GFP_MEMALLOC;
1696 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1697 gfp_mask, NUMA_NO_NODE, NULL);
1698 if (!data)
1699 goto nodata;
1700 size = SKB_WITH_OVERHEAD(ksize(data));
1701
1702 /* Copy only real data... and, alas, header. This should be
1703 * optimized for the cases when header is void.
1704 */
1705 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1706
1707 memcpy((struct skb_shared_info *)(data + size),
1708 skb_shinfo(skb),
1709 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1710
1711 /*
1712 * if shinfo is shared we must drop the old head gracefully, but if it
1713 * is not we can just drop the old head and let the existing refcount
1714 * be since all we did is relocate the values
1715 */
1716 if (skb_cloned(skb)) {
1717 if (skb_orphan_frags(skb, gfp_mask))
1718 goto nofrags;
1719 if (skb_zcopy(skb))
1720 refcount_inc(&skb_uarg(skb)->refcnt);
1721 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1722 skb_frag_ref(skb, i);
1723
1724 if (skb_has_frag_list(skb))
1725 skb_clone_fraglist(skb);
1726
1727 skb_release_data(skb);
1728 } else {
1729 skb_free_head(skb);
1730 }
1731 off = (data + nhead) - skb->head;
1732
1733 skb->head = data;
1734 skb->head_frag = 0;
1735 skb->data += off;
1736#ifdef NET_SKBUFF_DATA_USES_OFFSET
1737 skb->end = size;
1738 off = nhead;
1739#else
1740 skb->end = skb->head + size;
1741#endif
1742 skb->tail += off;
1743 skb_headers_offset_update(skb, nhead);
1744 skb->cloned = 0;
1745 skb->hdr_len = 0;
1746 skb->nohdr = 0;
1747 atomic_set(&skb_shinfo(skb)->dataref, 1);
1748
1749 skb_metadata_clear(skb);
1750
1751 /* It is not generally safe to change skb->truesize.
1752 * For the moment, we really care of rx path, or
1753 * when skb is orphaned (not attached to a socket).
1754 */
1755 if (!skb->sk || skb->destructor == sock_edemux)
1756 skb->truesize += size - osize;
1757
1758 return 0;
1759
1760nofrags:
1761 kfree(data);
1762nodata:
1763 return -ENOMEM;
1764}
1765EXPORT_SYMBOL(pskb_expand_head);
1766
1767/* Make private copy of skb with writable head and some headroom */
1768
1769struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1770{
1771 struct sk_buff *skb2;
1772 int delta = headroom - skb_headroom(skb);
1773
1774 if (delta <= 0)
1775 skb2 = pskb_copy(skb, GFP_ATOMIC);
1776 else {
1777 skb2 = skb_clone(skb, GFP_ATOMIC);
1778 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1779 GFP_ATOMIC)) {
1780 kfree_skb(skb2);
1781 skb2 = NULL;
1782 }
1783 }
1784 return skb2;
1785}
1786EXPORT_SYMBOL(skb_realloc_headroom);
1787
1788/**
1789 * skb_copy_expand - copy and expand sk_buff
1790 * @skb: buffer to copy
1791 * @newheadroom: new free bytes at head
1792 * @newtailroom: new free bytes at tail
1793 * @gfp_mask: allocation priority
1794 *
1795 * Make a copy of both an &sk_buff and its data and while doing so
1796 * allocate additional space.
1797 *
1798 * This is used when the caller wishes to modify the data and needs a
1799 * private copy of the data to alter as well as more space for new fields.
1800 * Returns %NULL on failure or the pointer to the buffer
1801 * on success. The returned buffer has a reference count of 1.
1802 *
1803 * You must pass %GFP_ATOMIC as the allocation priority if this function
1804 * is called from an interrupt.
1805 */
1806struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1807 int newheadroom, int newtailroom,
1808 gfp_t gfp_mask)
1809{
1810 /*
1811 * Allocate the copy buffer
1812 */
1813 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1814 gfp_mask, skb_alloc_rx_flag(skb),
1815 NUMA_NO_NODE);
1816 int oldheadroom = skb_headroom(skb);
1817 int head_copy_len, head_copy_off;
1818
1819 if (!n)
1820 return NULL;
1821
1822 skb_reserve(n, newheadroom);
1823
1824 /* Set the tail pointer and length */
1825 skb_put(n, skb->len);
1826
1827 head_copy_len = oldheadroom;
1828 head_copy_off = 0;
1829 if (newheadroom <= head_copy_len)
1830 head_copy_len = newheadroom;
1831 else
1832 head_copy_off = newheadroom - head_copy_len;
1833
1834 /* Copy the linear header and data. */
1835 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1836 skb->len + head_copy_len));
1837
1838 skb_copy_header(n, skb);
1839
1840 skb_headers_offset_update(n, newheadroom - oldheadroom);
1841
1842 return n;
1843}
1844EXPORT_SYMBOL(skb_copy_expand);
1845
1846/**
1847 * __skb_pad - zero pad the tail of an skb
1848 * @skb: buffer to pad
1849 * @pad: space to pad
1850 * @free_on_error: free buffer on error
1851 *
1852 * Ensure that a buffer is followed by a padding area that is zero
1853 * filled. Used by network drivers which may DMA or transfer data
1854 * beyond the buffer end onto the wire.
1855 *
1856 * May return error in out of memory cases. The skb is freed on error
1857 * if @free_on_error is true.
1858 */
1859
1860int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
1861{
1862 int err;
1863 int ntail;
1864
1865 /* If the skbuff is non linear tailroom is always zero.. */
1866 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1867 memset(skb->data+skb->len, 0, pad);
1868 return 0;
1869 }
1870
1871 ntail = skb->data_len + pad - (skb->end - skb->tail);
1872 if (likely(skb_cloned(skb) || ntail > 0)) {
1873 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1874 if (unlikely(err))
1875 goto free_skb;
1876 }
1877
1878 /* FIXME: The use of this function with non-linear skb's really needs
1879 * to be audited.
1880 */
1881 err = skb_linearize(skb);
1882 if (unlikely(err))
1883 goto free_skb;
1884
1885 memset(skb->data + skb->len, 0, pad);
1886 return 0;
1887
1888free_skb:
1889 if (free_on_error)
1890 kfree_skb(skb);
1891 return err;
1892}
1893EXPORT_SYMBOL(__skb_pad);
1894
1895/**
1896 * pskb_put - add data to the tail of a potentially fragmented buffer
1897 * @skb: start of the buffer to use
1898 * @tail: tail fragment of the buffer to use
1899 * @len: amount of data to add
1900 *
1901 * This function extends the used data area of the potentially
1902 * fragmented buffer. @tail must be the last fragment of @skb -- or
1903 * @skb itself. If this would exceed the total buffer size the kernel
1904 * will panic. A pointer to the first byte of the extra data is
1905 * returned.
1906 */
1907
1908void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1909{
1910 if (tail != skb) {
1911 skb->data_len += len;
1912 skb->len += len;
1913 }
1914 return skb_put(tail, len);
1915}
1916EXPORT_SYMBOL_GPL(pskb_put);
1917
1918/**
1919 * skb_put - add data to a buffer
1920 * @skb: buffer to use
1921 * @len: amount of data to add
1922 *
1923 * This function extends the used data area of the buffer. If this would
1924 * exceed the total buffer size the kernel will panic. A pointer to the
1925 * first byte of the extra data is returned.
1926 */
1927void *skb_put(struct sk_buff *skb, unsigned int len)
1928{
1929 void *tmp = skb_tail_pointer(skb);
1930 SKB_LINEAR_ASSERT(skb);
1931 skb->tail += len;
1932 skb->len += len;
1933 if (unlikely(skb->tail > skb->end))
1934 skb_over_panic(skb, len, __builtin_return_address(0));
1935 return tmp;
1936}
1937EXPORT_SYMBOL(skb_put);
1938
1939/**
1940 * skb_push - add data to the start of a buffer
1941 * @skb: buffer to use
1942 * @len: amount of data to add
1943 *
1944 * This function extends the used data area of the buffer at the buffer
1945 * start. If this would exceed the total buffer headroom the kernel will
1946 * panic. A pointer to the first byte of the extra data is returned.
1947 */
1948void *skb_push(struct sk_buff *skb, unsigned int len)
1949{
1950 skb->data -= len;
1951 skb->len += len;
1952 if (unlikely(skb->data < skb->head))
1953 skb_under_panic(skb, len, __builtin_return_address(0));
1954 return skb->data;
1955}
1956EXPORT_SYMBOL(skb_push);
1957
1958/**
1959 * skb_pull - remove data from the start of a buffer
1960 * @skb: buffer to use
1961 * @len: amount of data to remove
1962 *
1963 * This function removes data from the start of a buffer, returning
1964 * the memory to the headroom. A pointer to the next data in the buffer
1965 * is returned. Once the data has been pulled future pushes will overwrite
1966 * the old data.
1967 */
1968void *skb_pull(struct sk_buff *skb, unsigned int len)
1969{
1970 return skb_pull_inline(skb, len);
1971}
1972EXPORT_SYMBOL(skb_pull);
1973
1974/**
1975 * skb_trim - remove end from a buffer
1976 * @skb: buffer to alter
1977 * @len: new length
1978 *
1979 * Cut the length of a buffer down by removing data from the tail. If
1980 * the buffer is already under the length specified it is not modified.
1981 * The skb must be linear.
1982 */
1983void skb_trim(struct sk_buff *skb, unsigned int len)
1984{
1985 if (skb->len > len)
1986 __skb_trim(skb, len);
1987}
1988EXPORT_SYMBOL(skb_trim);
1989
1990/* Trims skb to length len. It can change skb pointers.
1991 */
1992
1993int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1994{
1995 struct sk_buff **fragp;
1996 struct sk_buff *frag;
1997 int offset = skb_headlen(skb);
1998 int nfrags = skb_shinfo(skb)->nr_frags;
1999 int i;
2000 int err;
2001
2002 if (skb_cloned(skb) &&
2003 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
2004 return err;
2005
2006 i = 0;
2007 if (offset >= len)
2008 goto drop_pages;
2009
2010 for (; i < nfrags; i++) {
2011 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2012
2013 if (end < len) {
2014 offset = end;
2015 continue;
2016 }
2017
2018 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
2019
2020drop_pages:
2021 skb_shinfo(skb)->nr_frags = i;
2022
2023 for (; i < nfrags; i++)
2024 skb_frag_unref(skb, i);
2025
2026 if (skb_has_frag_list(skb))
2027 skb_drop_fraglist(skb);
2028 goto done;
2029 }
2030
2031 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
2032 fragp = &frag->next) {
2033 int end = offset + frag->len;
2034
2035 if (skb_shared(frag)) {
2036 struct sk_buff *nfrag;
2037
2038 nfrag = skb_clone(frag, GFP_ATOMIC);
2039 if (unlikely(!nfrag))
2040 return -ENOMEM;
2041
2042 nfrag->next = frag->next;
2043 consume_skb(frag);
2044 frag = nfrag;
2045 *fragp = frag;
2046 }
2047
2048 if (end < len) {
2049 offset = end;
2050 continue;
2051 }
2052
2053 if (end > len &&
2054 unlikely((err = pskb_trim(frag, len - offset))))
2055 return err;
2056
2057 if (frag->next)
2058 skb_drop_list(&frag->next);
2059 break;
2060 }
2061
2062done:
2063 if (len > skb_headlen(skb)) {
2064 skb->data_len -= skb->len - len;
2065 skb->len = len;
2066 } else {
2067 skb->len = len;
2068 skb->data_len = 0;
2069 skb_set_tail_pointer(skb, len);
2070 }
2071
2072 if (!skb->sk || skb->destructor == sock_edemux)
2073 skb_condense(skb);
2074 return 0;
2075}
2076EXPORT_SYMBOL(___pskb_trim);
2077
2078/* Note : use pskb_trim_rcsum() instead of calling this directly
2079 */
2080int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
2081{
2082 if (skb->ip_summed == CHECKSUM_COMPLETE) {
2083 int delta = skb->len - len;
2084
2085 skb->csum = csum_block_sub(skb->csum,
2086 skb_checksum(skb, len, delta, 0),
2087 len);
2088 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
2089 int hdlen = (len > skb_headlen(skb)) ? skb_headlen(skb) : len;
2090 int offset = skb_checksum_start_offset(skb) + skb->csum_offset;
2091
2092 if (offset + sizeof(__sum16) > hdlen)
2093 return -EINVAL;
2094 }
2095 return __pskb_trim(skb, len);
2096}
2097EXPORT_SYMBOL(pskb_trim_rcsum_slow);
2098
2099/**
2100 * __pskb_pull_tail - advance tail of skb header
2101 * @skb: buffer to reallocate
2102 * @delta: number of bytes to advance tail
2103 *
2104 * The function makes a sense only on a fragmented &sk_buff,
2105 * it expands header moving its tail forward and copying necessary
2106 * data from fragmented part.
2107 *
2108 * &sk_buff MUST have reference count of 1.
2109 *
2110 * Returns %NULL (and &sk_buff does not change) if pull failed
2111 * or value of new tail of skb in the case of success.
2112 *
2113 * All the pointers pointing into skb header may change and must be
2114 * reloaded after call to this function.
2115 */
2116
2117/* Moves tail of skb head forward, copying data from fragmented part,
2118 * when it is necessary.
2119 * 1. It may fail due to malloc failure.
2120 * 2. It may change skb pointers.
2121 *
2122 * It is pretty complicated. Luckily, it is called only in exceptional cases.
2123 */
2124void *__pskb_pull_tail(struct sk_buff *skb, int delta)
2125{
2126 /* If skb has not enough free space at tail, get new one
2127 * plus 128 bytes for future expansions. If we have enough
2128 * room at tail, reallocate without expansion only if skb is cloned.
2129 */
2130 int i, k, eat = (skb->tail + delta) - skb->end;
2131
2132 if (eat > 0 || skb_cloned(skb)) {
2133 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
2134 GFP_ATOMIC))
2135 return NULL;
2136 }
2137
2138 BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
2139 skb_tail_pointer(skb), delta));
2140
2141 /* Optimization: no fragments, no reasons to preestimate
2142 * size of pulled pages. Superb.
2143 */
2144 if (!skb_has_frag_list(skb))
2145 goto pull_pages;
2146
2147 /* Estimate size of pulled pages. */
2148 eat = delta;
2149 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2150 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2151
2152 if (size >= eat)
2153 goto pull_pages;
2154 eat -= size;
2155 }
2156
2157 /* If we need update frag list, we are in troubles.
2158 * Certainly, it is possible to add an offset to skb data,
2159 * but taking into account that pulling is expected to
2160 * be very rare operation, it is worth to fight against
2161 * further bloating skb head and crucify ourselves here instead.
2162 * Pure masohism, indeed. 8)8)
2163 */
2164 if (eat) {
2165 struct sk_buff *list = skb_shinfo(skb)->frag_list;
2166 struct sk_buff *clone = NULL;
2167 struct sk_buff *insp = NULL;
2168
2169 do {
2170 if (list->len <= eat) {
2171 /* Eaten as whole. */
2172 eat -= list->len;
2173 list = list->next;
2174 insp = list;
2175 } else {
2176 /* Eaten partially. */
2177
2178 if (skb_shared(list)) {
2179 /* Sucks! We need to fork list. :-( */
2180 clone = skb_clone(list, GFP_ATOMIC);
2181 if (!clone)
2182 return NULL;
2183 insp = list->next;
2184 list = clone;
2185 } else {
2186 /* This may be pulled without
2187 * problems. */
2188 insp = list;
2189 }
2190 if (!pskb_pull(list, eat)) {
2191 kfree_skb(clone);
2192 return NULL;
2193 }
2194 break;
2195 }
2196 } while (eat);
2197
2198 /* Free pulled out fragments. */
2199 while ((list = skb_shinfo(skb)->frag_list) != insp) {
2200 skb_shinfo(skb)->frag_list = list->next;
2201 kfree_skb(list);
2202 }
2203 /* And insert new clone at head. */
2204 if (clone) {
2205 clone->next = list;
2206 skb_shinfo(skb)->frag_list = clone;
2207 }
2208 }
2209 /* Success! Now we may commit changes to skb data. */
2210
2211pull_pages:
2212 eat = delta;
2213 k = 0;
2214 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2215 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2216
2217 if (size <= eat) {
2218 skb_frag_unref(skb, i);
2219 eat -= size;
2220 } else {
2221 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2222
2223 *frag = skb_shinfo(skb)->frags[i];
2224 if (eat) {
2225 skb_frag_off_add(frag, eat);
2226 skb_frag_size_sub(frag, eat);
2227 if (!i)
2228 goto end;
2229 eat = 0;
2230 }
2231 k++;
2232 }
2233 }
2234 skb_shinfo(skb)->nr_frags = k;
2235
2236end:
2237 skb->tail += delta;
2238 skb->data_len -= delta;
2239
2240 if (!skb->data_len)
2241 skb_zcopy_clear(skb, false);
2242
2243 return skb_tail_pointer(skb);
2244}
2245EXPORT_SYMBOL(__pskb_pull_tail);
2246
2247/**
2248 * skb_copy_bits - copy bits from skb to kernel buffer
2249 * @skb: source skb
2250 * @offset: offset in source
2251 * @to: destination buffer
2252 * @len: number of bytes to copy
2253 *
2254 * Copy the specified number of bytes from the source skb to the
2255 * destination buffer.
2256 *
2257 * CAUTION ! :
2258 * If its prototype is ever changed,
2259 * check arch/{*}/net/{*}.S files,
2260 * since it is called from BPF assembly code.
2261 */
2262int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2263{
2264 int start = skb_headlen(skb);
2265 struct sk_buff *frag_iter;
2266 int i, copy;
2267
2268 if (offset > (int)skb->len - len)
2269 goto fault;
2270
2271 /* Copy header. */
2272 if ((copy = start - offset) > 0) {
2273 if (copy > len)
2274 copy = len;
2275 skb_copy_from_linear_data_offset(skb, offset, to, copy);
2276 if ((len -= copy) == 0)
2277 return 0;
2278 offset += copy;
2279 to += copy;
2280 }
2281
2282 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2283 int end;
2284 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2285
2286 WARN_ON(start > offset + len);
2287
2288 end = start + skb_frag_size(f);
2289 if ((copy = end - offset) > 0) {
2290 u32 p_off, p_len, copied;
2291 struct page *p;
2292 u8 *vaddr;
2293
2294 if (copy > len)
2295 copy = len;
2296
2297 skb_frag_foreach_page(f,
2298 skb_frag_off(f) + offset - start,
2299 copy, p, p_off, p_len, copied) {
2300 vaddr = kmap_atomic(p);
2301 memcpy(to + copied, vaddr + p_off, p_len);
2302 kunmap_atomic(vaddr);
2303 }
2304
2305 if ((len -= copy) == 0)
2306 return 0;
2307 offset += copy;
2308 to += copy;
2309 }
2310 start = end;
2311 }
2312
2313 skb_walk_frags(skb, frag_iter) {
2314 int end;
2315
2316 WARN_ON(start > offset + len);
2317
2318 end = start + frag_iter->len;
2319 if ((copy = end - offset) > 0) {
2320 if (copy > len)
2321 copy = len;
2322 if (skb_copy_bits(frag_iter, offset - start, to, copy))
2323 goto fault;
2324 if ((len -= copy) == 0)
2325 return 0;
2326 offset += copy;
2327 to += copy;
2328 }
2329 start = end;
2330 }
2331
2332 if (!len)
2333 return 0;
2334
2335fault:
2336 return -EFAULT;
2337}
2338EXPORT_SYMBOL(skb_copy_bits);
2339
2340/*
2341 * Callback from splice_to_pipe(), if we need to release some pages
2342 * at the end of the spd in case we error'ed out in filling the pipe.
2343 */
2344static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2345{
2346 put_page(spd->pages[i]);
2347}
2348
2349static struct page *linear_to_page(struct page *page, unsigned int *len,
2350 unsigned int *offset,
2351 struct sock *sk)
2352{
2353 struct page_frag *pfrag = sk_page_frag(sk);
2354
2355 if (!sk_page_frag_refill(sk, pfrag))
2356 return NULL;
2357
2358 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
2359
2360 memcpy(page_address(pfrag->page) + pfrag->offset,
2361 page_address(page) + *offset, *len);
2362 *offset = pfrag->offset;
2363 pfrag->offset += *len;
2364
2365 return pfrag->page;
2366}
2367
2368static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2369 struct page *page,
2370 unsigned int offset)
2371{
2372 return spd->nr_pages &&
2373 spd->pages[spd->nr_pages - 1] == page &&
2374 (spd->partial[spd->nr_pages - 1].offset +
2375 spd->partial[spd->nr_pages - 1].len == offset);
2376}
2377
2378/*
2379 * Fill page/offset/length into spd, if it can hold more pages.
2380 */
2381static bool spd_fill_page(struct splice_pipe_desc *spd,
2382 struct pipe_inode_info *pipe, struct page *page,
2383 unsigned int *len, unsigned int offset,
2384 bool linear,
2385 struct sock *sk)
2386{
2387 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2388 return true;
2389
2390 if (linear) {
2391 page = linear_to_page(page, len, &offset, sk);
2392 if (!page)
2393 return true;
2394 }
2395 if (spd_can_coalesce(spd, page, offset)) {
2396 spd->partial[spd->nr_pages - 1].len += *len;
2397 return false;
2398 }
2399 get_page(page);
2400 spd->pages[spd->nr_pages] = page;
2401 spd->partial[spd->nr_pages].len = *len;
2402 spd->partial[spd->nr_pages].offset = offset;
2403 spd->nr_pages++;
2404
2405 return false;
2406}
2407
2408static bool __splice_segment(struct page *page, unsigned int poff,
2409 unsigned int plen, unsigned int *off,
2410 unsigned int *len,
2411 struct splice_pipe_desc *spd, bool linear,
2412 struct sock *sk,
2413 struct pipe_inode_info *pipe)
2414{
2415 if (!*len)
2416 return true;
2417
2418 /* skip this segment if already processed */
2419 if (*off >= plen) {
2420 *off -= plen;
2421 return false;
2422 }
2423
2424 /* ignore any bits we already processed */
2425 poff += *off;
2426 plen -= *off;
2427 *off = 0;
2428
2429 do {
2430 unsigned int flen = min(*len, plen);
2431
2432 if (spd_fill_page(spd, pipe, page, &flen, poff,
2433 linear, sk))
2434 return true;
2435 poff += flen;
2436 plen -= flen;
2437 *len -= flen;
2438 } while (*len && plen);
2439
2440 return false;
2441}
2442
2443/*
2444 * Map linear and fragment data from the skb to spd. It reports true if the
2445 * pipe is full or if we already spliced the requested length.
2446 */
2447static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2448 unsigned int *offset, unsigned int *len,
2449 struct splice_pipe_desc *spd, struct sock *sk)
2450{
2451 int seg;
2452 struct sk_buff *iter;
2453
2454 /* map the linear part :
2455 * If skb->head_frag is set, this 'linear' part is backed by a
2456 * fragment, and if the head is not shared with any clones then
2457 * we can avoid a copy since we own the head portion of this page.
2458 */
2459 if (__splice_segment(virt_to_page(skb->data),
2460 (unsigned long) skb->data & (PAGE_SIZE - 1),
2461 skb_headlen(skb),
2462 offset, len, spd,
2463 skb_head_is_locked(skb),
2464 sk, pipe))
2465 return true;
2466
2467 /*
2468 * then map the fragments
2469 */
2470 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2471 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2472
2473 if (__splice_segment(skb_frag_page(f),
2474 skb_frag_off(f), skb_frag_size(f),
2475 offset, len, spd, false, sk, pipe))
2476 return true;
2477 }
2478
2479 skb_walk_frags(skb, iter) {
2480 if (*offset >= iter->len) {
2481 *offset -= iter->len;
2482 continue;
2483 }
2484 /* __skb_splice_bits() only fails if the output has no room
2485 * left, so no point in going over the frag_list for the error
2486 * case.
2487 */
2488 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2489 return true;
2490 }
2491
2492 return false;
2493}
2494
2495/*
2496 * Map data from the skb to a pipe. Should handle both the linear part,
2497 * the fragments, and the frag list.
2498 */
2499int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2500 struct pipe_inode_info *pipe, unsigned int tlen,
2501 unsigned int flags)
2502{
2503 struct partial_page partial[MAX_SKB_FRAGS];
2504 struct page *pages[MAX_SKB_FRAGS];
2505 struct splice_pipe_desc spd = {
2506 .pages = pages,
2507 .partial = partial,
2508 .nr_pages_max = MAX_SKB_FRAGS,
2509 .ops = &nosteal_pipe_buf_ops,
2510 .spd_release = sock_spd_release,
2511 };
2512 int ret = 0;
2513
2514 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
2515
2516 if (spd.nr_pages)
2517 ret = splice_to_pipe(pipe, &spd);
2518
2519 return ret;
2520}
2521EXPORT_SYMBOL_GPL(skb_splice_bits);
2522
2523static int sendmsg_unlocked(struct sock *sk, struct msghdr *msg,
2524 struct kvec *vec, size_t num, size_t size)
2525{
2526 struct socket *sock = sk->sk_socket;
2527
2528 if (!sock)
2529 return -EINVAL;
2530 return kernel_sendmsg(sock, msg, vec, num, size);
2531}
2532
2533static int sendpage_unlocked(struct sock *sk, struct page *page, int offset,
2534 size_t size, int flags)
2535{
2536 struct socket *sock = sk->sk_socket;
2537
2538 if (!sock)
2539 return -EINVAL;
2540 return kernel_sendpage(sock, page, offset, size, flags);
2541}
2542
2543typedef int (*sendmsg_func)(struct sock *sk, struct msghdr *msg,
2544 struct kvec *vec, size_t num, size_t size);
2545typedef int (*sendpage_func)(struct sock *sk, struct page *page, int offset,
2546 size_t size, int flags);
2547static int __skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset,
2548 int len, sendmsg_func sendmsg, sendpage_func sendpage)
2549{
2550 unsigned int orig_len = len;
2551 struct sk_buff *head = skb;
2552 unsigned short fragidx;
2553 int slen, ret;
2554
2555do_frag_list:
2556
2557 /* Deal with head data */
2558 while (offset < skb_headlen(skb) && len) {
2559 struct kvec kv;
2560 struct msghdr msg;
2561
2562 slen = min_t(int, len, skb_headlen(skb) - offset);
2563 kv.iov_base = skb->data + offset;
2564 kv.iov_len = slen;
2565 memset(&msg, 0, sizeof(msg));
2566 msg.msg_flags = MSG_DONTWAIT;
2567
2568 ret = INDIRECT_CALL_2(sendmsg, kernel_sendmsg_locked,
2569 sendmsg_unlocked, sk, &msg, &kv, 1, slen);
2570 if (ret <= 0)
2571 goto error;
2572
2573 offset += ret;
2574 len -= ret;
2575 }
2576
2577 /* All the data was skb head? */
2578 if (!len)
2579 goto out;
2580
2581 /* Make offset relative to start of frags */
2582 offset -= skb_headlen(skb);
2583
2584 /* Find where we are in frag list */
2585 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2586 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2587
2588 if (offset < skb_frag_size(frag))
2589 break;
2590
2591 offset -= skb_frag_size(frag);
2592 }
2593
2594 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2595 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2596
2597 slen = min_t(size_t, len, skb_frag_size(frag) - offset);
2598
2599 while (slen) {
2600 ret = INDIRECT_CALL_2(sendpage, kernel_sendpage_locked,
2601 sendpage_unlocked, sk,
2602 skb_frag_page(frag),
2603 skb_frag_off(frag) + offset,
2604 slen, MSG_DONTWAIT);
2605 if (ret <= 0)
2606 goto error;
2607
2608 len -= ret;
2609 offset += ret;
2610 slen -= ret;
2611 }
2612
2613 offset = 0;
2614 }
2615
2616 if (len) {
2617 /* Process any frag lists */
2618
2619 if (skb == head) {
2620 if (skb_has_frag_list(skb)) {
2621 skb = skb_shinfo(skb)->frag_list;
2622 goto do_frag_list;
2623 }
2624 } else if (skb->next) {
2625 skb = skb->next;
2626 goto do_frag_list;
2627 }
2628 }
2629
2630out:
2631 return orig_len - len;
2632
2633error:
2634 return orig_len == len ? ret : orig_len - len;
2635}
2636
2637/* Send skb data on a socket. Socket must be locked. */
2638int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
2639 int len)
2640{
2641 return __skb_send_sock(sk, skb, offset, len, kernel_sendmsg_locked,
2642 kernel_sendpage_locked);
2643}
2644EXPORT_SYMBOL_GPL(skb_send_sock_locked);
2645
2646/* Send skb data on a socket. Socket must be unlocked. */
2647int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
2648{
2649 return __skb_send_sock(sk, skb, offset, len, sendmsg_unlocked,
2650 sendpage_unlocked);
2651}
2652
2653/**
2654 * skb_store_bits - store bits from kernel buffer to skb
2655 * @skb: destination buffer
2656 * @offset: offset in destination
2657 * @from: source buffer
2658 * @len: number of bytes to copy
2659 *
2660 * Copy the specified number of bytes from the source buffer to the
2661 * destination skb. This function handles all the messy bits of
2662 * traversing fragment lists and such.
2663 */
2664
2665int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2666{
2667 int start = skb_headlen(skb);
2668 struct sk_buff *frag_iter;
2669 int i, copy;
2670
2671 if (offset > (int)skb->len - len)
2672 goto fault;
2673
2674 if ((copy = start - offset) > 0) {
2675 if (copy > len)
2676 copy = len;
2677 skb_copy_to_linear_data_offset(skb, offset, from, copy);
2678 if ((len -= copy) == 0)
2679 return 0;
2680 offset += copy;
2681 from += copy;
2682 }
2683
2684 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2685 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2686 int end;
2687
2688 WARN_ON(start > offset + len);
2689
2690 end = start + skb_frag_size(frag);
2691 if ((copy = end - offset) > 0) {
2692 u32 p_off, p_len, copied;
2693 struct page *p;
2694 u8 *vaddr;
2695
2696 if (copy > len)
2697 copy = len;
2698
2699 skb_frag_foreach_page(frag,
2700 skb_frag_off(frag) + offset - start,
2701 copy, p, p_off, p_len, copied) {
2702 vaddr = kmap_atomic(p);
2703 memcpy(vaddr + p_off, from + copied, p_len);
2704 kunmap_atomic(vaddr);
2705 }
2706
2707 if ((len -= copy) == 0)
2708 return 0;
2709 offset += copy;
2710 from += copy;
2711 }
2712 start = end;
2713 }
2714
2715 skb_walk_frags(skb, frag_iter) {
2716 int end;
2717
2718 WARN_ON(start > offset + len);
2719
2720 end = start + frag_iter->len;
2721 if ((copy = end - offset) > 0) {
2722 if (copy > len)
2723 copy = len;
2724 if (skb_store_bits(frag_iter, offset - start,
2725 from, copy))
2726 goto fault;
2727 if ((len -= copy) == 0)
2728 return 0;
2729 offset += copy;
2730 from += copy;
2731 }
2732 start = end;
2733 }
2734 if (!len)
2735 return 0;
2736
2737fault:
2738 return -EFAULT;
2739}
2740EXPORT_SYMBOL(skb_store_bits);
2741
2742/* Checksum skb data. */
2743__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2744 __wsum csum, const struct skb_checksum_ops *ops)
2745{
2746 int start = skb_headlen(skb);
2747 int i, copy = start - offset;
2748 struct sk_buff *frag_iter;
2749 int pos = 0;
2750
2751 /* Checksum header. */
2752 if (copy > 0) {
2753 if (copy > len)
2754 copy = len;
2755 csum = INDIRECT_CALL_1(ops->update, csum_partial_ext,
2756 skb->data + offset, copy, csum);
2757 if ((len -= copy) == 0)
2758 return csum;
2759 offset += copy;
2760 pos = copy;
2761 }
2762
2763 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2764 int end;
2765 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2766
2767 WARN_ON(start > offset + len);
2768
2769 end = start + skb_frag_size(frag);
2770 if ((copy = end - offset) > 0) {
2771 u32 p_off, p_len, copied;
2772 struct page *p;
2773 __wsum csum2;
2774 u8 *vaddr;
2775
2776 if (copy > len)
2777 copy = len;
2778
2779 skb_frag_foreach_page(frag,
2780 skb_frag_off(frag) + offset - start,
2781 copy, p, p_off, p_len, copied) {
2782 vaddr = kmap_atomic(p);
2783 csum2 = INDIRECT_CALL_1(ops->update,
2784 csum_partial_ext,
2785 vaddr + p_off, p_len, 0);
2786 kunmap_atomic(vaddr);
2787 csum = INDIRECT_CALL_1(ops->combine,
2788 csum_block_add_ext, csum,
2789 csum2, pos, p_len);
2790 pos += p_len;
2791 }
2792
2793 if (!(len -= copy))
2794 return csum;
2795 offset += copy;
2796 }
2797 start = end;
2798 }
2799
2800 skb_walk_frags(skb, frag_iter) {
2801 int end;
2802
2803 WARN_ON(start > offset + len);
2804
2805 end = start + frag_iter->len;
2806 if ((copy = end - offset) > 0) {
2807 __wsum csum2;
2808 if (copy > len)
2809 copy = len;
2810 csum2 = __skb_checksum(frag_iter, offset - start,
2811 copy, 0, ops);
2812 csum = INDIRECT_CALL_1(ops->combine, csum_block_add_ext,
2813 csum, csum2, pos, copy);
2814 if ((len -= copy) == 0)
2815 return csum;
2816 offset += copy;
2817 pos += copy;
2818 }
2819 start = end;
2820 }
2821 BUG_ON(len);
2822
2823 return csum;
2824}
2825EXPORT_SYMBOL(__skb_checksum);
2826
2827__wsum skb_checksum(const struct sk_buff *skb, int offset,
2828 int len, __wsum csum)
2829{
2830 const struct skb_checksum_ops ops = {
2831 .update = csum_partial_ext,
2832 .combine = csum_block_add_ext,
2833 };
2834
2835 return __skb_checksum(skb, offset, len, csum, &ops);
2836}
2837EXPORT_SYMBOL(skb_checksum);
2838
2839/* Both of above in one bottle. */
2840
2841__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2842 u8 *to, int len)
2843{
2844 int start = skb_headlen(skb);
2845 int i, copy = start - offset;
2846 struct sk_buff *frag_iter;
2847 int pos = 0;
2848 __wsum csum = 0;
2849
2850 /* Copy header. */
2851 if (copy > 0) {
2852 if (copy > len)
2853 copy = len;
2854 csum = csum_partial_copy_nocheck(skb->data + offset, to,
2855 copy);
2856 if ((len -= copy) == 0)
2857 return csum;
2858 offset += copy;
2859 to += copy;
2860 pos = copy;
2861 }
2862
2863 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2864 int end;
2865
2866 WARN_ON(start > offset + len);
2867
2868 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2869 if ((copy = end - offset) > 0) {
2870 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2871 u32 p_off, p_len, copied;
2872 struct page *p;
2873 __wsum csum2;
2874 u8 *vaddr;
2875
2876 if (copy > len)
2877 copy = len;
2878
2879 skb_frag_foreach_page(frag,
2880 skb_frag_off(frag) + offset - start,
2881 copy, p, p_off, p_len, copied) {
2882 vaddr = kmap_atomic(p);
2883 csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2884 to + copied,
2885 p_len);
2886 kunmap_atomic(vaddr);
2887 csum = csum_block_add(csum, csum2, pos);
2888 pos += p_len;
2889 }
2890
2891 if (!(len -= copy))
2892 return csum;
2893 offset += copy;
2894 to += copy;
2895 }
2896 start = end;
2897 }
2898
2899 skb_walk_frags(skb, frag_iter) {
2900 __wsum csum2;
2901 int end;
2902
2903 WARN_ON(start > offset + len);
2904
2905 end = start + frag_iter->len;
2906 if ((copy = end - offset) > 0) {
2907 if (copy > len)
2908 copy = len;
2909 csum2 = skb_copy_and_csum_bits(frag_iter,
2910 offset - start,
2911 to, copy);
2912 csum = csum_block_add(csum, csum2, pos);
2913 if ((len -= copy) == 0)
2914 return csum;
2915 offset += copy;
2916 to += copy;
2917 pos += copy;
2918 }
2919 start = end;
2920 }
2921 BUG_ON(len);
2922 return csum;
2923}
2924EXPORT_SYMBOL(skb_copy_and_csum_bits);
2925
2926__sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
2927{
2928 __sum16 sum;
2929
2930 sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
2931 /* See comments in __skb_checksum_complete(). */
2932 if (likely(!sum)) {
2933 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
2934 !skb->csum_complete_sw)
2935 netdev_rx_csum_fault(skb->dev, skb);
2936 }
2937 if (!skb_shared(skb))
2938 skb->csum_valid = !sum;
2939 return sum;
2940}
2941EXPORT_SYMBOL(__skb_checksum_complete_head);
2942
2943/* This function assumes skb->csum already holds pseudo header's checksum,
2944 * which has been changed from the hardware checksum, for example, by
2945 * __skb_checksum_validate_complete(). And, the original skb->csum must
2946 * have been validated unsuccessfully for CHECKSUM_COMPLETE case.
2947 *
2948 * It returns non-zero if the recomputed checksum is still invalid, otherwise
2949 * zero. The new checksum is stored back into skb->csum unless the skb is
2950 * shared.
2951 */
2952__sum16 __skb_checksum_complete(struct sk_buff *skb)
2953{
2954 __wsum csum;
2955 __sum16 sum;
2956
2957 csum = skb_checksum(skb, 0, skb->len, 0);
2958
2959 sum = csum_fold(csum_add(skb->csum, csum));
2960 /* This check is inverted, because we already knew the hardware
2961 * checksum is invalid before calling this function. So, if the
2962 * re-computed checksum is valid instead, then we have a mismatch
2963 * between the original skb->csum and skb_checksum(). This means either
2964 * the original hardware checksum is incorrect or we screw up skb->csum
2965 * when moving skb->data around.
2966 */
2967 if (likely(!sum)) {
2968 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
2969 !skb->csum_complete_sw)
2970 netdev_rx_csum_fault(skb->dev, skb);
2971 }
2972
2973 if (!skb_shared(skb)) {
2974 /* Save full packet checksum */
2975 skb->csum = csum;
2976 skb->ip_summed = CHECKSUM_COMPLETE;
2977 skb->csum_complete_sw = 1;
2978 skb->csum_valid = !sum;
2979 }
2980
2981 return sum;
2982}
2983EXPORT_SYMBOL(__skb_checksum_complete);
2984
2985static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
2986{
2987 net_warn_ratelimited(
2988 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2989 __func__);
2990 return 0;
2991}
2992
2993static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
2994 int offset, int len)
2995{
2996 net_warn_ratelimited(
2997 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2998 __func__);
2999 return 0;
3000}
3001
3002static const struct skb_checksum_ops default_crc32c_ops = {
3003 .update = warn_crc32c_csum_update,
3004 .combine = warn_crc32c_csum_combine,
3005};
3006
3007const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
3008 &default_crc32c_ops;
3009EXPORT_SYMBOL(crc32c_csum_stub);
3010
3011 /**
3012 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
3013 * @from: source buffer
3014 *
3015 * Calculates the amount of linear headroom needed in the 'to' skb passed
3016 * into skb_zerocopy().
3017 */
3018unsigned int
3019skb_zerocopy_headlen(const struct sk_buff *from)
3020{
3021 unsigned int hlen = 0;
3022
3023 if (!from->head_frag ||
3024 skb_headlen(from) < L1_CACHE_BYTES ||
3025 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS) {
3026 hlen = skb_headlen(from);
3027 if (!hlen)
3028 hlen = from->len;
3029 }
3030
3031 if (skb_has_frag_list(from))
3032 hlen = from->len;
3033
3034 return hlen;
3035}
3036EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
3037
3038/**
3039 * skb_zerocopy - Zero copy skb to skb
3040 * @to: destination buffer
3041 * @from: source buffer
3042 * @len: number of bytes to copy from source buffer
3043 * @hlen: size of linear headroom in destination buffer
3044 *
3045 * Copies up to `len` bytes from `from` to `to` by creating references
3046 * to the frags in the source buffer.
3047 *
3048 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
3049 * headroom in the `to` buffer.
3050 *
3051 * Return value:
3052 * 0: everything is OK
3053 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
3054 * -EFAULT: skb_copy_bits() found some problem with skb geometry
3055 */
3056int
3057skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
3058{
3059 int i, j = 0;
3060 int plen = 0; /* length of skb->head fragment */
3061 int ret;
3062 struct page *page;
3063 unsigned int offset;
3064
3065 BUG_ON(!from->head_frag && !hlen);
3066
3067 /* dont bother with small payloads */
3068 if (len <= skb_tailroom(to))
3069 return skb_copy_bits(from, 0, skb_put(to, len), len);
3070
3071 if (hlen) {
3072 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
3073 if (unlikely(ret))
3074 return ret;
3075 len -= hlen;
3076 } else {
3077 plen = min_t(int, skb_headlen(from), len);
3078 if (plen) {
3079 page = virt_to_head_page(from->head);
3080 offset = from->data - (unsigned char *)page_address(page);
3081 __skb_fill_page_desc(to, 0, page, offset, plen);
3082 get_page(page);
3083 j = 1;
3084 len -= plen;
3085 }
3086 }
3087
3088 to->truesize += len + plen;
3089 to->len += len + plen;
3090 to->data_len += len + plen;
3091
3092 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
3093 skb_tx_error(from);
3094 return -ENOMEM;
3095 }
3096 skb_zerocopy_clone(to, from, GFP_ATOMIC);
3097
3098 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
3099 int size;
3100
3101 if (!len)
3102 break;
3103 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
3104 size = min_t(int, skb_frag_size(&skb_shinfo(to)->frags[j]),
3105 len);
3106 skb_frag_size_set(&skb_shinfo(to)->frags[j], size);
3107 len -= size;
3108 skb_frag_ref(to, j);
3109 j++;
3110 }
3111 skb_shinfo(to)->nr_frags = j;
3112
3113 return 0;
3114}
3115EXPORT_SYMBOL_GPL(skb_zerocopy);
3116
3117void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
3118{
3119 __wsum csum;
3120 long csstart;
3121
3122 if (skb->ip_summed == CHECKSUM_PARTIAL)
3123 csstart = skb_checksum_start_offset(skb);
3124 else
3125 csstart = skb_headlen(skb);
3126
3127 BUG_ON(csstart > skb_headlen(skb));
3128
3129 skb_copy_from_linear_data(skb, to, csstart);
3130
3131 csum = 0;
3132 if (csstart != skb->len)
3133 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
3134 skb->len - csstart);
3135
3136 if (skb->ip_summed == CHECKSUM_PARTIAL) {
3137 long csstuff = csstart + skb->csum_offset;
3138
3139 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
3140 }
3141}
3142EXPORT_SYMBOL(skb_copy_and_csum_dev);
3143
3144/**
3145 * skb_dequeue - remove from the head of the queue
3146 * @list: list to dequeue from
3147 *
3148 * Remove the head of the list. The list lock is taken so the function
3149 * may be used safely with other locking list functions. The head item is
3150 * returned or %NULL if the list is empty.
3151 */
3152
3153struct sk_buff *skb_dequeue(struct sk_buff_head *list)
3154{
3155 unsigned long flags;
3156 struct sk_buff *result;
3157
3158 spin_lock_irqsave(&list->lock, flags);
3159 result = __skb_dequeue(list);
3160 spin_unlock_irqrestore(&list->lock, flags);
3161 return result;
3162}
3163EXPORT_SYMBOL(skb_dequeue);
3164
3165/**
3166 * skb_dequeue_tail - remove from the tail of the queue
3167 * @list: list to dequeue from
3168 *
3169 * Remove the tail of the list. The list lock is taken so the function
3170 * may be used safely with other locking list functions. The tail item is
3171 * returned or %NULL if the list is empty.
3172 */
3173struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
3174{
3175 unsigned long flags;
3176 struct sk_buff *result;
3177
3178 spin_lock_irqsave(&list->lock, flags);
3179 result = __skb_dequeue_tail(list);
3180 spin_unlock_irqrestore(&list->lock, flags);
3181 return result;
3182}
3183EXPORT_SYMBOL(skb_dequeue_tail);
3184
3185/**
3186 * skb_queue_purge - empty a list
3187 * @list: list to empty
3188 *
3189 * Delete all buffers on an &sk_buff list. Each buffer is removed from
3190 * the list and one reference dropped. This function takes the list
3191 * lock and is atomic with respect to other list locking functions.
3192 */
3193void skb_queue_purge(struct sk_buff_head *list)
3194{
3195 struct sk_buff *skb;
3196 while ((skb = skb_dequeue(list)) != NULL)
3197 kfree_skb(skb);
3198}
3199EXPORT_SYMBOL(skb_queue_purge);
3200
3201/**
3202 * skb_rbtree_purge - empty a skb rbtree
3203 * @root: root of the rbtree to empty
3204 * Return value: the sum of truesizes of all purged skbs.
3205 *
3206 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
3207 * the list and one reference dropped. This function does not take
3208 * any lock. Synchronization should be handled by the caller (e.g., TCP
3209 * out-of-order queue is protected by the socket lock).
3210 */
3211unsigned int skb_rbtree_purge(struct rb_root *root)
3212{
3213 struct rb_node *p = rb_first(root);
3214 unsigned int sum = 0;
3215
3216 while (p) {
3217 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
3218
3219 p = rb_next(p);
3220 rb_erase(&skb->rbnode, root);
3221 sum += skb->truesize;
3222 kfree_skb(skb);
3223 }
3224 return sum;
3225}
3226
3227/**
3228 * skb_queue_head - queue a buffer at the list head
3229 * @list: list to use
3230 * @newsk: buffer to queue
3231 *
3232 * Queue a buffer at the start of the list. This function takes the
3233 * list lock and can be used safely with other locking &sk_buff functions
3234 * safely.
3235 *
3236 * A buffer cannot be placed on two lists at the same time.
3237 */
3238void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
3239{
3240 unsigned long flags;
3241
3242 spin_lock_irqsave(&list->lock, flags);
3243 __skb_queue_head(list, newsk);
3244 spin_unlock_irqrestore(&list->lock, flags);
3245}
3246EXPORT_SYMBOL(skb_queue_head);
3247
3248/**
3249 * skb_queue_tail - queue a buffer at the list tail
3250 * @list: list to use
3251 * @newsk: buffer to queue
3252 *
3253 * Queue a buffer at the tail of the list. This function takes the
3254 * list lock and can be used safely with other locking &sk_buff functions
3255 * safely.
3256 *
3257 * A buffer cannot be placed on two lists at the same time.
3258 */
3259void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
3260{
3261 unsigned long flags;
3262
3263 spin_lock_irqsave(&list->lock, flags);
3264 __skb_queue_tail(list, newsk);
3265 spin_unlock_irqrestore(&list->lock, flags);
3266}
3267EXPORT_SYMBOL(skb_queue_tail);
3268
3269/**
3270 * skb_unlink - remove a buffer from a list
3271 * @skb: buffer to remove
3272 * @list: list to use
3273 *
3274 * Remove a packet from a list. The list locks are taken and this
3275 * function is atomic with respect to other list locked calls
3276 *
3277 * You must know what list the SKB is on.
3278 */
3279void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
3280{
3281 unsigned long flags;
3282
3283 spin_lock_irqsave(&list->lock, flags);
3284 __skb_unlink(skb, list);
3285 spin_unlock_irqrestore(&list->lock, flags);
3286}
3287EXPORT_SYMBOL(skb_unlink);
3288
3289/**
3290 * skb_append - append a buffer
3291 * @old: buffer to insert after
3292 * @newsk: buffer to insert
3293 * @list: list to use
3294 *
3295 * Place a packet after a given packet in a list. The list locks are taken
3296 * and this function is atomic with respect to other list locked calls.
3297 * A buffer cannot be placed on two lists at the same time.
3298 */
3299void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
3300{
3301 unsigned long flags;
3302
3303 spin_lock_irqsave(&list->lock, flags);
3304 __skb_queue_after(list, old, newsk);
3305 spin_unlock_irqrestore(&list->lock, flags);
3306}
3307EXPORT_SYMBOL(skb_append);
3308
3309static inline void skb_split_inside_header(struct sk_buff *skb,
3310 struct sk_buff* skb1,
3311 const u32 len, const int pos)
3312{
3313 int i;
3314
3315 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
3316 pos - len);
3317 /* And move data appendix as is. */
3318 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
3319 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
3320
3321 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
3322 skb_shinfo(skb)->nr_frags = 0;
3323 skb1->data_len = skb->data_len;
3324 skb1->len += skb1->data_len;
3325 skb->data_len = 0;
3326 skb->len = len;
3327 skb_set_tail_pointer(skb, len);
3328}
3329
3330static inline void skb_split_no_header(struct sk_buff *skb,
3331 struct sk_buff* skb1,
3332 const u32 len, int pos)
3333{
3334 int i, k = 0;
3335 const int nfrags = skb_shinfo(skb)->nr_frags;
3336
3337 skb_shinfo(skb)->nr_frags = 0;
3338 skb1->len = skb1->data_len = skb->len - len;
3339 skb->len = len;
3340 skb->data_len = len - pos;
3341
3342 for (i = 0; i < nfrags; i++) {
3343 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3344
3345 if (pos + size > len) {
3346 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3347
3348 if (pos < len) {
3349 /* Split frag.
3350 * We have two variants in this case:
3351 * 1. Move all the frag to the second
3352 * part, if it is possible. F.e.
3353 * this approach is mandatory for TUX,
3354 * where splitting is expensive.
3355 * 2. Split is accurately. We make this.
3356 */
3357 skb_frag_ref(skb, i);
3358 skb_frag_off_add(&skb_shinfo(skb1)->frags[0], len - pos);
3359 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3360 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3361 skb_shinfo(skb)->nr_frags++;
3362 }
3363 k++;
3364 } else
3365 skb_shinfo(skb)->nr_frags++;
3366 pos += size;
3367 }
3368 skb_shinfo(skb1)->nr_frags = k;
3369}
3370
3371/**
3372 * skb_split - Split fragmented skb to two parts at length len.
3373 * @skb: the buffer to split
3374 * @skb1: the buffer to receive the second part
3375 * @len: new length for skb
3376 */
3377void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3378{
3379 int pos = skb_headlen(skb);
3380
3381 skb_shinfo(skb1)->flags |= skb_shinfo(skb)->flags & SKBFL_SHARED_FRAG;
3382 skb_zerocopy_clone(skb1, skb, 0);
3383 if (len < pos) /* Split line is inside header. */
3384 skb_split_inside_header(skb, skb1, len, pos);
3385 else /* Second chunk has no header, nothing to copy. */
3386 skb_split_no_header(skb, skb1, len, pos);
3387}
3388EXPORT_SYMBOL(skb_split);
3389
3390/* Shifting from/to a cloned skb is a no-go.
3391 *
3392 * Caller cannot keep skb_shinfo related pointers past calling here!
3393 */
3394static int skb_prepare_for_shift(struct sk_buff *skb)
3395{
3396 int ret = 0;
3397
3398 if (skb_cloned(skb)) {
3399 /* Save and restore truesize: pskb_expand_head() may reallocate
3400 * memory where ksize(kmalloc(S)) != ksize(kmalloc(S)), but we
3401 * cannot change truesize at this point.
3402 */
3403 unsigned int save_truesize = skb->truesize;
3404
3405 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3406 skb->truesize = save_truesize;
3407 }
3408 return ret;
3409}
3410
3411/**
3412 * skb_shift - Shifts paged data partially from skb to another
3413 * @tgt: buffer into which tail data gets added
3414 * @skb: buffer from which the paged data comes from
3415 * @shiftlen: shift up to this many bytes
3416 *
3417 * Attempts to shift up to shiftlen worth of bytes, which may be less than
3418 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3419 * It's up to caller to free skb if everything was shifted.
3420 *
3421 * If @tgt runs out of frags, the whole operation is aborted.
3422 *
3423 * Skb cannot include anything else but paged data while tgt is allowed
3424 * to have non-paged data as well.
3425 *
3426 * TODO: full sized shift could be optimized but that would need
3427 * specialized skb free'er to handle frags without up-to-date nr_frags.
3428 */
3429int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3430{
3431 int from, to, merge, todo;
3432 skb_frag_t *fragfrom, *fragto;
3433
3434 BUG_ON(shiftlen > skb->len);
3435
3436 if (skb_headlen(skb))
3437 return 0;
3438 if (skb_zcopy(tgt) || skb_zcopy(skb))
3439 return 0;
3440
3441 todo = shiftlen;
3442 from = 0;
3443 to = skb_shinfo(tgt)->nr_frags;
3444 fragfrom = &skb_shinfo(skb)->frags[from];
3445
3446 /* Actual merge is delayed until the point when we know we can
3447 * commit all, so that we don't have to undo partial changes
3448 */
3449 if (!to ||
3450 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3451 skb_frag_off(fragfrom))) {
3452 merge = -1;
3453 } else {
3454 merge = to - 1;
3455
3456 todo -= skb_frag_size(fragfrom);
3457 if (todo < 0) {
3458 if (skb_prepare_for_shift(skb) ||
3459 skb_prepare_for_shift(tgt))
3460 return 0;
3461
3462 /* All previous frag pointers might be stale! */
3463 fragfrom = &skb_shinfo(skb)->frags[from];
3464 fragto = &skb_shinfo(tgt)->frags[merge];
3465
3466 skb_frag_size_add(fragto, shiftlen);
3467 skb_frag_size_sub(fragfrom, shiftlen);
3468 skb_frag_off_add(fragfrom, shiftlen);
3469
3470 goto onlymerged;
3471 }
3472
3473 from++;
3474 }
3475
3476 /* Skip full, not-fitting skb to avoid expensive operations */
3477 if ((shiftlen == skb->len) &&
3478 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3479 return 0;
3480
3481 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3482 return 0;
3483
3484 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3485 if (to == MAX_SKB_FRAGS)
3486 return 0;
3487
3488 fragfrom = &skb_shinfo(skb)->frags[from];
3489 fragto = &skb_shinfo(tgt)->frags[to];
3490
3491 if (todo >= skb_frag_size(fragfrom)) {
3492 *fragto = *fragfrom;
3493 todo -= skb_frag_size(fragfrom);
3494 from++;
3495 to++;
3496
3497 } else {
3498 __skb_frag_ref(fragfrom);
3499 skb_frag_page_copy(fragto, fragfrom);
3500 skb_frag_off_copy(fragto, fragfrom);
3501 skb_frag_size_set(fragto, todo);
3502
3503 skb_frag_off_add(fragfrom, todo);
3504 skb_frag_size_sub(fragfrom, todo);
3505 todo = 0;
3506
3507 to++;
3508 break;
3509 }
3510 }
3511
3512 /* Ready to "commit" this state change to tgt */
3513 skb_shinfo(tgt)->nr_frags = to;
3514
3515 if (merge >= 0) {
3516 fragfrom = &skb_shinfo(skb)->frags[0];
3517 fragto = &skb_shinfo(tgt)->frags[merge];
3518
3519 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
3520 __skb_frag_unref(fragfrom, skb->pp_recycle);
3521 }
3522
3523 /* Reposition in the original skb */
3524 to = 0;
3525 while (from < skb_shinfo(skb)->nr_frags)
3526 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3527 skb_shinfo(skb)->nr_frags = to;
3528
3529 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3530
3531onlymerged:
3532 /* Most likely the tgt won't ever need its checksum anymore, skb on
3533 * the other hand might need it if it needs to be resent
3534 */
3535 tgt->ip_summed = CHECKSUM_PARTIAL;
3536 skb->ip_summed = CHECKSUM_PARTIAL;
3537
3538 /* Yak, is it really working this way? Some helper please? */
3539 skb->len -= shiftlen;
3540 skb->data_len -= shiftlen;
3541 skb->truesize -= shiftlen;
3542 tgt->len += shiftlen;
3543 tgt->data_len += shiftlen;
3544 tgt->truesize += shiftlen;
3545
3546 return shiftlen;
3547}
3548
3549/**
3550 * skb_prepare_seq_read - Prepare a sequential read of skb data
3551 * @skb: the buffer to read
3552 * @from: lower offset of data to be read
3553 * @to: upper offset of data to be read
3554 * @st: state variable
3555 *
3556 * Initializes the specified state variable. Must be called before
3557 * invoking skb_seq_read() for the first time.
3558 */
3559void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3560 unsigned int to, struct skb_seq_state *st)
3561{
3562 st->lower_offset = from;
3563 st->upper_offset = to;
3564 st->root_skb = st->cur_skb = skb;
3565 st->frag_idx = st->stepped_offset = 0;
3566 st->frag_data = NULL;
3567 st->frag_off = 0;
3568}
3569EXPORT_SYMBOL(skb_prepare_seq_read);
3570
3571/**
3572 * skb_seq_read - Sequentially read skb data
3573 * @consumed: number of bytes consumed by the caller so far
3574 * @data: destination pointer for data to be returned
3575 * @st: state variable
3576 *
3577 * Reads a block of skb data at @consumed relative to the
3578 * lower offset specified to skb_prepare_seq_read(). Assigns
3579 * the head of the data block to @data and returns the length
3580 * of the block or 0 if the end of the skb data or the upper
3581 * offset has been reached.
3582 *
3583 * The caller is not required to consume all of the data
3584 * returned, i.e. @consumed is typically set to the number
3585 * of bytes already consumed and the next call to
3586 * skb_seq_read() will return the remaining part of the block.
3587 *
3588 * Note 1: The size of each block of data returned can be arbitrary,
3589 * this limitation is the cost for zerocopy sequential
3590 * reads of potentially non linear data.
3591 *
3592 * Note 2: Fragment lists within fragments are not implemented
3593 * at the moment, state->root_skb could be replaced with
3594 * a stack for this purpose.
3595 */
3596unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3597 struct skb_seq_state *st)
3598{
3599 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3600 skb_frag_t *frag;
3601
3602 if (unlikely(abs_offset >= st->upper_offset)) {
3603 if (st->frag_data) {
3604 kunmap_atomic(st->frag_data);
3605 st->frag_data = NULL;
3606 }
3607 return 0;
3608 }
3609
3610next_skb:
3611 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3612
3613 if (abs_offset < block_limit && !st->frag_data) {
3614 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3615 return block_limit - abs_offset;
3616 }
3617
3618 if (st->frag_idx == 0 && !st->frag_data)
3619 st->stepped_offset += skb_headlen(st->cur_skb);
3620
3621 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3622 unsigned int pg_idx, pg_off, pg_sz;
3623
3624 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
3625
3626 pg_idx = 0;
3627 pg_off = skb_frag_off(frag);
3628 pg_sz = skb_frag_size(frag);
3629
3630 if (skb_frag_must_loop(skb_frag_page(frag))) {
3631 pg_idx = (pg_off + st->frag_off) >> PAGE_SHIFT;
3632 pg_off = offset_in_page(pg_off + st->frag_off);
3633 pg_sz = min_t(unsigned int, pg_sz - st->frag_off,
3634 PAGE_SIZE - pg_off);
3635 }
3636
3637 block_limit = pg_sz + st->stepped_offset;
3638 if (abs_offset < block_limit) {
3639 if (!st->frag_data)
3640 st->frag_data = kmap_atomic(skb_frag_page(frag) + pg_idx);
3641
3642 *data = (u8 *)st->frag_data + pg_off +
3643 (abs_offset - st->stepped_offset);
3644
3645 return block_limit - abs_offset;
3646 }
3647
3648 if (st->frag_data) {
3649 kunmap_atomic(st->frag_data);
3650 st->frag_data = NULL;
3651 }
3652
3653 st->stepped_offset += pg_sz;
3654 st->frag_off += pg_sz;
3655 if (st->frag_off == skb_frag_size(frag)) {
3656 st->frag_off = 0;
3657 st->frag_idx++;
3658 }
3659 }
3660
3661 if (st->frag_data) {
3662 kunmap_atomic(st->frag_data);
3663 st->frag_data = NULL;
3664 }
3665
3666 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3667 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
3668 st->frag_idx = 0;
3669 goto next_skb;
3670 } else if (st->cur_skb->next) {
3671 st->cur_skb = st->cur_skb->next;
3672 st->frag_idx = 0;
3673 goto next_skb;
3674 }
3675
3676 return 0;
3677}
3678EXPORT_SYMBOL(skb_seq_read);
3679
3680/**
3681 * skb_abort_seq_read - Abort a sequential read of skb data
3682 * @st: state variable
3683 *
3684 * Must be called if skb_seq_read() was not called until it
3685 * returned 0.
3686 */
3687void skb_abort_seq_read(struct skb_seq_state *st)
3688{
3689 if (st->frag_data)
3690 kunmap_atomic(st->frag_data);
3691}
3692EXPORT_SYMBOL(skb_abort_seq_read);
3693
3694#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
3695
3696static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
3697 struct ts_config *conf,
3698 struct ts_state *state)
3699{
3700 return skb_seq_read(offset, text, TS_SKB_CB(state));
3701}
3702
3703static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
3704{
3705 skb_abort_seq_read(TS_SKB_CB(state));
3706}
3707
3708/**
3709 * skb_find_text - Find a text pattern in skb data
3710 * @skb: the buffer to look in
3711 * @from: search offset
3712 * @to: search limit
3713 * @config: textsearch configuration
3714 *
3715 * Finds a pattern in the skb data according to the specified
3716 * textsearch configuration. Use textsearch_next() to retrieve
3717 * subsequent occurrences of the pattern. Returns the offset
3718 * to the first occurrence or UINT_MAX if no match was found.
3719 */
3720unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3721 unsigned int to, struct ts_config *config)
3722{
3723 struct ts_state state;
3724 unsigned int ret;
3725
3726 BUILD_BUG_ON(sizeof(struct skb_seq_state) > sizeof(state.cb));
3727
3728 config->get_next_block = skb_ts_get_next_block;
3729 config->finish = skb_ts_finish;
3730
3731 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
3732
3733 ret = textsearch_find(config, &state);
3734 return (ret <= to - from ? ret : UINT_MAX);
3735}
3736EXPORT_SYMBOL(skb_find_text);
3737
3738int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3739 int offset, size_t size)
3740{
3741 int i = skb_shinfo(skb)->nr_frags;
3742
3743 if (skb_can_coalesce(skb, i, page, offset)) {
3744 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3745 } else if (i < MAX_SKB_FRAGS) {
3746 get_page(page);
3747 skb_fill_page_desc(skb, i, page, offset, size);
3748 } else {
3749 return -EMSGSIZE;
3750 }
3751
3752 return 0;
3753}
3754EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3755
3756/**
3757 * skb_pull_rcsum - pull skb and update receive checksum
3758 * @skb: buffer to update
3759 * @len: length of data pulled
3760 *
3761 * This function performs an skb_pull on the packet and updates
3762 * the CHECKSUM_COMPLETE checksum. It should be used on
3763 * receive path processing instead of skb_pull unless you know
3764 * that the checksum difference is zero (e.g., a valid IP header)
3765 * or you are setting ip_summed to CHECKSUM_NONE.
3766 */
3767void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3768{
3769 unsigned char *data = skb->data;
3770
3771 BUG_ON(len > skb->len);
3772 __skb_pull(skb, len);
3773 skb_postpull_rcsum(skb, data, len);
3774 return skb->data;
3775}
3776EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3777
3778static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
3779{
3780 skb_frag_t head_frag;
3781 struct page *page;
3782
3783 page = virt_to_head_page(frag_skb->head);
3784 __skb_frag_set_page(&head_frag, page);
3785 skb_frag_off_set(&head_frag, frag_skb->data -
3786 (unsigned char *)page_address(page));
3787 skb_frag_size_set(&head_frag, skb_headlen(frag_skb));
3788 return head_frag;
3789}
3790
3791struct sk_buff *skb_segment_list(struct sk_buff *skb,
3792 netdev_features_t features,
3793 unsigned int offset)
3794{
3795 struct sk_buff *list_skb = skb_shinfo(skb)->frag_list;
3796 unsigned int tnl_hlen = skb_tnl_header_len(skb);
3797 unsigned int delta_truesize = 0;
3798 unsigned int delta_len = 0;
3799 struct sk_buff *tail = NULL;
3800 struct sk_buff *nskb, *tmp;
3801 int err;
3802
3803 skb_push(skb, -skb_network_offset(skb) + offset);
3804
3805 skb_shinfo(skb)->frag_list = NULL;
3806
3807 do {
3808 nskb = list_skb;
3809 list_skb = list_skb->next;
3810
3811 err = 0;
3812 if (skb_shared(nskb)) {
3813 tmp = skb_clone(nskb, GFP_ATOMIC);
3814 if (tmp) {
3815 consume_skb(nskb);
3816 nskb = tmp;
3817 err = skb_unclone(nskb, GFP_ATOMIC);
3818 } else {
3819 err = -ENOMEM;
3820 }
3821 }
3822
3823 if (!tail)
3824 skb->next = nskb;
3825 else
3826 tail->next = nskb;
3827
3828 if (unlikely(err)) {
3829 nskb->next = list_skb;
3830 goto err_linearize;
3831 }
3832
3833 tail = nskb;
3834
3835 delta_len += nskb->len;
3836 delta_truesize += nskb->truesize;
3837
3838 skb_push(nskb, -skb_network_offset(nskb) + offset);
3839
3840 skb_release_head_state(nskb);
3841 __copy_skb_header(nskb, skb);
3842
3843 skb_headers_offset_update(nskb, skb_headroom(nskb) - skb_headroom(skb));
3844 skb_copy_from_linear_data_offset(skb, -tnl_hlen,
3845 nskb->data - tnl_hlen,
3846 offset + tnl_hlen);
3847
3848 if (skb_needs_linearize(nskb, features) &&
3849 __skb_linearize(nskb))
3850 goto err_linearize;
3851
3852 } while (list_skb);
3853
3854 skb->truesize = skb->truesize - delta_truesize;
3855 skb->data_len = skb->data_len - delta_len;
3856 skb->len = skb->len - delta_len;
3857
3858 skb_gso_reset(skb);
3859
3860 skb->prev = tail;
3861
3862 if (skb_needs_linearize(skb, features) &&
3863 __skb_linearize(skb))
3864 goto err_linearize;
3865
3866 skb_get(skb);
3867
3868 return skb;
3869
3870err_linearize:
3871 kfree_skb_list(skb->next);
3872 skb->next = NULL;
3873 return ERR_PTR(-ENOMEM);
3874}
3875EXPORT_SYMBOL_GPL(skb_segment_list);
3876
3877int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb)
3878{
3879 if (unlikely(p->len + skb->len >= 65536))
3880 return -E2BIG;
3881
3882 if (NAPI_GRO_CB(p)->last == p)
3883 skb_shinfo(p)->frag_list = skb;
3884 else
3885 NAPI_GRO_CB(p)->last->next = skb;
3886
3887 skb_pull(skb, skb_gro_offset(skb));
3888
3889 NAPI_GRO_CB(p)->last = skb;
3890 NAPI_GRO_CB(p)->count++;
3891 p->data_len += skb->len;
3892 p->truesize += skb->truesize;
3893 p->len += skb->len;
3894
3895 NAPI_GRO_CB(skb)->same_flow = 1;
3896
3897 return 0;
3898}
3899
3900/**
3901 * skb_segment - Perform protocol segmentation on skb.
3902 * @head_skb: buffer to segment
3903 * @features: features for the output path (see dev->features)
3904 *
3905 * This function performs segmentation on the given skb. It returns
3906 * a pointer to the first in a list of new skbs for the segments.
3907 * In case of error it returns ERR_PTR(err).
3908 */
3909struct sk_buff *skb_segment(struct sk_buff *head_skb,
3910 netdev_features_t features)
3911{
3912 struct sk_buff *segs = NULL;
3913 struct sk_buff *tail = NULL;
3914 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3915 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3916 unsigned int mss = skb_shinfo(head_skb)->gso_size;
3917 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
3918 struct sk_buff *frag_skb = head_skb;
3919 unsigned int offset = doffset;
3920 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3921 unsigned int partial_segs = 0;
3922 unsigned int headroom;
3923 unsigned int len = head_skb->len;
3924 __be16 proto;
3925 bool csum, sg;
3926 int nfrags = skb_shinfo(head_skb)->nr_frags;
3927 int err = -ENOMEM;
3928 int i = 0;
3929 int pos;
3930
3931 if (list_skb && !list_skb->head_frag && skb_headlen(list_skb) &&
3932 (skb_shinfo(head_skb)->gso_type & SKB_GSO_DODGY)) {
3933 /* gso_size is untrusted, and we have a frag_list with a linear
3934 * non head_frag head.
3935 *
3936 * (we assume checking the first list_skb member suffices;
3937 * i.e if either of the list_skb members have non head_frag
3938 * head, then the first one has too).
3939 *
3940 * If head_skb's headlen does not fit requested gso_size, it
3941 * means that the frag_list members do NOT terminate on exact
3942 * gso_size boundaries. Hence we cannot perform skb_frag_t page
3943 * sharing. Therefore we must fallback to copying the frag_list
3944 * skbs; we do so by disabling SG.
3945 */
3946 if (mss != GSO_BY_FRAGS && mss != skb_headlen(head_skb))
3947 features &= ~NETIF_F_SG;
3948 }
3949
3950 __skb_push(head_skb, doffset);
3951 proto = skb_network_protocol(head_skb, NULL);
3952 if (unlikely(!proto))
3953 return ERR_PTR(-EINVAL);
3954
3955 sg = !!(features & NETIF_F_SG);
3956 csum = !!can_checksum_protocol(features, proto);
3957
3958 if (sg && csum && (mss != GSO_BY_FRAGS)) {
3959 if (!(features & NETIF_F_GSO_PARTIAL)) {
3960 struct sk_buff *iter;
3961 unsigned int frag_len;
3962
3963 if (!list_skb ||
3964 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3965 goto normal;
3966
3967 /* If we get here then all the required
3968 * GSO features except frag_list are supported.
3969 * Try to split the SKB to multiple GSO SKBs
3970 * with no frag_list.
3971 * Currently we can do that only when the buffers don't
3972 * have a linear part and all the buffers except
3973 * the last are of the same length.
3974 */
3975 frag_len = list_skb->len;
3976 skb_walk_frags(head_skb, iter) {
3977 if (frag_len != iter->len && iter->next)
3978 goto normal;
3979 if (skb_headlen(iter) && !iter->head_frag)
3980 goto normal;
3981
3982 len -= iter->len;
3983 }
3984
3985 if (len != frag_len)
3986 goto normal;
3987 }
3988
3989 /* GSO partial only requires that we trim off any excess that
3990 * doesn't fit into an MSS sized block, so take care of that
3991 * now.
3992 */
3993 partial_segs = len / mss;
3994 if (partial_segs > 1)
3995 mss *= partial_segs;
3996 else
3997 partial_segs = 0;
3998 }
3999
4000normal:
4001 headroom = skb_headroom(head_skb);
4002 pos = skb_headlen(head_skb);
4003
4004 do {
4005 struct sk_buff *nskb;
4006 skb_frag_t *nskb_frag;
4007 int hsize;
4008 int size;
4009
4010 if (unlikely(mss == GSO_BY_FRAGS)) {
4011 len = list_skb->len;
4012 } else {
4013 len = head_skb->len - offset;
4014 if (len > mss)
4015 len = mss;
4016 }
4017
4018 hsize = skb_headlen(head_skb) - offset;
4019
4020 if (hsize <= 0 && i >= nfrags && skb_headlen(list_skb) &&
4021 (skb_headlen(list_skb) == len || sg)) {
4022 BUG_ON(skb_headlen(list_skb) > len);
4023
4024 i = 0;
4025 nfrags = skb_shinfo(list_skb)->nr_frags;
4026 frag = skb_shinfo(list_skb)->frags;
4027 frag_skb = list_skb;
4028 pos += skb_headlen(list_skb);
4029
4030 while (pos < offset + len) {
4031 BUG_ON(i >= nfrags);
4032
4033 size = skb_frag_size(frag);
4034 if (pos + size > offset + len)
4035 break;
4036
4037 i++;
4038 pos += size;
4039 frag++;
4040 }
4041
4042 nskb = skb_clone(list_skb, GFP_ATOMIC);
4043 list_skb = list_skb->next;
4044
4045 if (unlikely(!nskb))
4046 goto err;
4047
4048 if (unlikely(pskb_trim(nskb, len))) {
4049 kfree_skb(nskb);
4050 goto err;
4051 }
4052
4053 hsize = skb_end_offset(nskb);
4054 if (skb_cow_head(nskb, doffset + headroom)) {
4055 kfree_skb(nskb);
4056 goto err;
4057 }
4058
4059 nskb->truesize += skb_end_offset(nskb) - hsize;
4060 skb_release_head_state(nskb);
4061 __skb_push(nskb, doffset);
4062 } else {
4063 if (hsize < 0)
4064 hsize = 0;
4065 if (hsize > len || !sg)
4066 hsize = len;
4067
4068 nskb = __alloc_skb(hsize + doffset + headroom,
4069 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
4070 NUMA_NO_NODE);
4071
4072 if (unlikely(!nskb))
4073 goto err;
4074
4075 skb_reserve(nskb, headroom);
4076 __skb_put(nskb, doffset);
4077 }
4078
4079 if (segs)
4080 tail->next = nskb;
4081 else
4082 segs = nskb;
4083 tail = nskb;
4084
4085 __copy_skb_header(nskb, head_skb);
4086
4087 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
4088 skb_reset_mac_len(nskb);
4089
4090 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
4091 nskb->data - tnl_hlen,
4092 doffset + tnl_hlen);
4093
4094 if (nskb->len == len + doffset)
4095 goto perform_csum_check;
4096
4097 if (!sg) {
4098 if (!csum) {
4099 if (!nskb->remcsum_offload)
4100 nskb->ip_summed = CHECKSUM_NONE;
4101 SKB_GSO_CB(nskb)->csum =
4102 skb_copy_and_csum_bits(head_skb, offset,
4103 skb_put(nskb,
4104 len),
4105 len);
4106 SKB_GSO_CB(nskb)->csum_start =
4107 skb_headroom(nskb) + doffset;
4108 } else {
4109 skb_copy_bits(head_skb, offset,
4110 skb_put(nskb, len),
4111 len);
4112 }
4113 continue;
4114 }
4115
4116 nskb_frag = skb_shinfo(nskb)->frags;
4117
4118 skb_copy_from_linear_data_offset(head_skb, offset,
4119 skb_put(nskb, hsize), hsize);
4120
4121 skb_shinfo(nskb)->flags |= skb_shinfo(head_skb)->flags &
4122 SKBFL_SHARED_FRAG;
4123
4124 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
4125 skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
4126 goto err;
4127
4128 while (pos < offset + len) {
4129 if (i >= nfrags) {
4130 i = 0;
4131 nfrags = skb_shinfo(list_skb)->nr_frags;
4132 frag = skb_shinfo(list_skb)->frags;
4133 frag_skb = list_skb;
4134 if (!skb_headlen(list_skb)) {
4135 BUG_ON(!nfrags);
4136 } else {
4137 BUG_ON(!list_skb->head_frag);
4138
4139 /* to make room for head_frag. */
4140 i--;
4141 frag--;
4142 }
4143 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
4144 skb_zerocopy_clone(nskb, frag_skb,
4145 GFP_ATOMIC))
4146 goto err;
4147
4148 list_skb = list_skb->next;
4149 }
4150
4151 if (unlikely(skb_shinfo(nskb)->nr_frags >=
4152 MAX_SKB_FRAGS)) {
4153 net_warn_ratelimited(
4154 "skb_segment: too many frags: %u %u\n",
4155 pos, mss);
4156 err = -EINVAL;
4157 goto err;
4158 }
4159
4160 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
4161 __skb_frag_ref(nskb_frag);
4162 size = skb_frag_size(nskb_frag);
4163
4164 if (pos < offset) {
4165 skb_frag_off_add(nskb_frag, offset - pos);
4166 skb_frag_size_sub(nskb_frag, offset - pos);
4167 }
4168
4169 skb_shinfo(nskb)->nr_frags++;
4170
4171 if (pos + size <= offset + len) {
4172 i++;
4173 frag++;
4174 pos += size;
4175 } else {
4176 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
4177 goto skip_fraglist;
4178 }
4179
4180 nskb_frag++;
4181 }
4182
4183skip_fraglist:
4184 nskb->data_len = len - hsize;
4185 nskb->len += nskb->data_len;
4186 nskb->truesize += nskb->data_len;
4187
4188perform_csum_check:
4189 if (!csum) {
4190 if (skb_has_shared_frag(nskb) &&
4191 __skb_linearize(nskb))
4192 goto err;
4193
4194 if (!nskb->remcsum_offload)
4195 nskb->ip_summed = CHECKSUM_NONE;
4196 SKB_GSO_CB(nskb)->csum =
4197 skb_checksum(nskb, doffset,
4198 nskb->len - doffset, 0);
4199 SKB_GSO_CB(nskb)->csum_start =
4200 skb_headroom(nskb) + doffset;
4201 }
4202 } while ((offset += len) < head_skb->len);
4203
4204 /* Some callers want to get the end of the list.
4205 * Put it in segs->prev to avoid walking the list.
4206 * (see validate_xmit_skb_list() for example)
4207 */
4208 segs->prev = tail;
4209
4210 if (partial_segs) {
4211 struct sk_buff *iter;
4212 int type = skb_shinfo(head_skb)->gso_type;
4213 unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
4214
4215 /* Update type to add partial and then remove dodgy if set */
4216 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
4217 type &= ~SKB_GSO_DODGY;
4218
4219 /* Update GSO info and prepare to start updating headers on
4220 * our way back down the stack of protocols.
4221 */
4222 for (iter = segs; iter; iter = iter->next) {
4223 skb_shinfo(iter)->gso_size = gso_size;
4224 skb_shinfo(iter)->gso_segs = partial_segs;
4225 skb_shinfo(iter)->gso_type = type;
4226 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
4227 }
4228
4229 if (tail->len - doffset <= gso_size)
4230 skb_shinfo(tail)->gso_size = 0;
4231 else if (tail != segs)
4232 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
4233 }
4234
4235 /* Following permits correct backpressure, for protocols
4236 * using skb_set_owner_w().
4237 * Idea is to tranfert ownership from head_skb to last segment.
4238 */
4239 if (head_skb->destructor == sock_wfree) {
4240 swap(tail->truesize, head_skb->truesize);
4241 swap(tail->destructor, head_skb->destructor);
4242 swap(tail->sk, head_skb->sk);
4243 }
4244 return segs;
4245
4246err:
4247 kfree_skb_list(segs);
4248 return ERR_PTR(err);
4249}
4250EXPORT_SYMBOL_GPL(skb_segment);
4251
4252int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb)
4253{
4254 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
4255 unsigned int offset = skb_gro_offset(skb);
4256 unsigned int headlen = skb_headlen(skb);
4257 unsigned int len = skb_gro_len(skb);
4258 unsigned int delta_truesize;
4259 struct sk_buff *lp;
4260
4261 if (unlikely(p->len + len >= 65536 || NAPI_GRO_CB(skb)->flush))
4262 return -E2BIG;
4263
4264 lp = NAPI_GRO_CB(p)->last;
4265 pinfo = skb_shinfo(lp);
4266
4267 if (headlen <= offset) {
4268 skb_frag_t *frag;
4269 skb_frag_t *frag2;
4270 int i = skbinfo->nr_frags;
4271 int nr_frags = pinfo->nr_frags + i;
4272
4273 if (nr_frags > MAX_SKB_FRAGS)
4274 goto merge;
4275
4276 offset -= headlen;
4277 pinfo->nr_frags = nr_frags;
4278 skbinfo->nr_frags = 0;
4279
4280 frag = pinfo->frags + nr_frags;
4281 frag2 = skbinfo->frags + i;
4282 do {
4283 *--frag = *--frag2;
4284 } while (--i);
4285
4286 skb_frag_off_add(frag, offset);
4287 skb_frag_size_sub(frag, offset);
4288
4289 /* all fragments truesize : remove (head size + sk_buff) */
4290 delta_truesize = skb->truesize -
4291 SKB_TRUESIZE(skb_end_offset(skb));
4292
4293 skb->truesize -= skb->data_len;
4294 skb->len -= skb->data_len;
4295 skb->data_len = 0;
4296
4297 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
4298 goto done;
4299 } else if (skb->head_frag) {
4300 int nr_frags = pinfo->nr_frags;
4301 skb_frag_t *frag = pinfo->frags + nr_frags;
4302 struct page *page = virt_to_head_page(skb->head);
4303 unsigned int first_size = headlen - offset;
4304 unsigned int first_offset;
4305
4306 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
4307 goto merge;
4308
4309 first_offset = skb->data -
4310 (unsigned char *)page_address(page) +
4311 offset;
4312
4313 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
4314
4315 __skb_frag_set_page(frag, page);
4316 skb_frag_off_set(frag, first_offset);
4317 skb_frag_size_set(frag, first_size);
4318
4319 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
4320 /* We dont need to clear skbinfo->nr_frags here */
4321
4322 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4323 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
4324 goto done;
4325 }
4326
4327merge:
4328 delta_truesize = skb->truesize;
4329 if (offset > headlen) {
4330 unsigned int eat = offset - headlen;
4331
4332 skb_frag_off_add(&skbinfo->frags[0], eat);
4333 skb_frag_size_sub(&skbinfo->frags[0], eat);
4334 skb->data_len -= eat;
4335 skb->len -= eat;
4336 offset = headlen;
4337 }
4338
4339 __skb_pull(skb, offset);
4340
4341 if (NAPI_GRO_CB(p)->last == p)
4342 skb_shinfo(p)->frag_list = skb;
4343 else
4344 NAPI_GRO_CB(p)->last->next = skb;
4345 NAPI_GRO_CB(p)->last = skb;
4346 __skb_header_release(skb);
4347 lp = p;
4348
4349done:
4350 NAPI_GRO_CB(p)->count++;
4351 p->data_len += len;
4352 p->truesize += delta_truesize;
4353 p->len += len;
4354 if (lp != p) {
4355 lp->data_len += len;
4356 lp->truesize += delta_truesize;
4357 lp->len += len;
4358 }
4359 NAPI_GRO_CB(skb)->same_flow = 1;
4360 return 0;
4361}
4362
4363#ifdef CONFIG_SKB_EXTENSIONS
4364#define SKB_EXT_ALIGN_VALUE 8
4365#define SKB_EXT_CHUNKSIZEOF(x) (ALIGN((sizeof(x)), SKB_EXT_ALIGN_VALUE) / SKB_EXT_ALIGN_VALUE)
4366
4367static const u8 skb_ext_type_len[] = {
4368#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4369 [SKB_EXT_BRIDGE_NF] = SKB_EXT_CHUNKSIZEOF(struct nf_bridge_info),
4370#endif
4371#ifdef CONFIG_XFRM
4372 [SKB_EXT_SEC_PATH] = SKB_EXT_CHUNKSIZEOF(struct sec_path),
4373#endif
4374#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4375 [TC_SKB_EXT] = SKB_EXT_CHUNKSIZEOF(struct tc_skb_ext),
4376#endif
4377#if IS_ENABLED(CONFIG_MPTCP)
4378 [SKB_EXT_MPTCP] = SKB_EXT_CHUNKSIZEOF(struct mptcp_ext),
4379#endif
4380};
4381
4382static __always_inline unsigned int skb_ext_total_length(void)
4383{
4384 return SKB_EXT_CHUNKSIZEOF(struct skb_ext) +
4385#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
4386 skb_ext_type_len[SKB_EXT_BRIDGE_NF] +
4387#endif
4388#ifdef CONFIG_XFRM
4389 skb_ext_type_len[SKB_EXT_SEC_PATH] +
4390#endif
4391#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
4392 skb_ext_type_len[TC_SKB_EXT] +
4393#endif
4394#if IS_ENABLED(CONFIG_MPTCP)
4395 skb_ext_type_len[SKB_EXT_MPTCP] +
4396#endif
4397 0;
4398}
4399
4400static void skb_extensions_init(void)
4401{
4402 BUILD_BUG_ON(SKB_EXT_NUM >= 8);
4403 BUILD_BUG_ON(skb_ext_total_length() > 255);
4404
4405 skbuff_ext_cache = kmem_cache_create("skbuff_ext_cache",
4406 SKB_EXT_ALIGN_VALUE * skb_ext_total_length(),
4407 0,
4408 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4409 NULL);
4410}
4411#else
4412static void skb_extensions_init(void) {}
4413#endif
4414
4415void __init skb_init(void)
4416{
4417 skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
4418 sizeof(struct sk_buff),
4419 0,
4420 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4421 offsetof(struct sk_buff, cb),
4422 sizeof_field(struct sk_buff, cb),
4423 NULL);
4424 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
4425 sizeof(struct sk_buff_fclones),
4426 0,
4427 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
4428 NULL);
4429 skb_extensions_init();
4430}
4431
4432static int
4433__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
4434 unsigned int recursion_level)
4435{
4436 int start = skb_headlen(skb);
4437 int i, copy = start - offset;
4438 struct sk_buff *frag_iter;
4439 int elt = 0;
4440
4441 if (unlikely(recursion_level >= 24))
4442 return -EMSGSIZE;
4443
4444 if (copy > 0) {
4445 if (copy > len)
4446 copy = len;
4447 sg_set_buf(sg, skb->data + offset, copy);
4448 elt++;
4449 if ((len -= copy) == 0)
4450 return elt;
4451 offset += copy;
4452 }
4453
4454 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4455 int end;
4456
4457 WARN_ON(start > offset + len);
4458
4459 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
4460 if ((copy = end - offset) > 0) {
4461 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4462 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4463 return -EMSGSIZE;
4464
4465 if (copy > len)
4466 copy = len;
4467 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
4468 skb_frag_off(frag) + offset - start);
4469 elt++;
4470 if (!(len -= copy))
4471 return elt;
4472 offset += copy;
4473 }
4474 start = end;
4475 }
4476
4477 skb_walk_frags(skb, frag_iter) {
4478 int end, ret;
4479
4480 WARN_ON(start > offset + len);
4481
4482 end = start + frag_iter->len;
4483 if ((copy = end - offset) > 0) {
4484 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
4485 return -EMSGSIZE;
4486
4487 if (copy > len)
4488 copy = len;
4489 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
4490 copy, recursion_level + 1);
4491 if (unlikely(ret < 0))
4492 return ret;
4493 elt += ret;
4494 if ((len -= copy) == 0)
4495 return elt;
4496 offset += copy;
4497 }
4498 start = end;
4499 }
4500 BUG_ON(len);
4501 return elt;
4502}
4503
4504/**
4505 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4506 * @skb: Socket buffer containing the buffers to be mapped
4507 * @sg: The scatter-gather list to map into
4508 * @offset: The offset into the buffer's contents to start mapping
4509 * @len: Length of buffer space to be mapped
4510 *
4511 * Fill the specified scatter-gather list with mappings/pointers into a
4512 * region of the buffer space attached to a socket buffer. Returns either
4513 * the number of scatterlist items used, or -EMSGSIZE if the contents
4514 * could not fit.
4515 */
4516int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4517{
4518 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4519
4520 if (nsg <= 0)
4521 return nsg;
4522
4523 sg_mark_end(&sg[nsg - 1]);
4524
4525 return nsg;
4526}
4527EXPORT_SYMBOL_GPL(skb_to_sgvec);
4528
4529/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4530 * sglist without mark the sg which contain last skb data as the end.
4531 * So the caller can mannipulate sg list as will when padding new data after
4532 * the first call without calling sg_unmark_end to expend sg list.
4533 *
4534 * Scenario to use skb_to_sgvec_nomark:
4535 * 1. sg_init_table
4536 * 2. skb_to_sgvec_nomark(payload1)
4537 * 3. skb_to_sgvec_nomark(payload2)
4538 *
4539 * This is equivalent to:
4540 * 1. sg_init_table
4541 * 2. skb_to_sgvec(payload1)
4542 * 3. sg_unmark_end
4543 * 4. skb_to_sgvec(payload2)
4544 *
4545 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4546 * is more preferable.
4547 */
4548int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4549 int offset, int len)
4550{
4551 return __skb_to_sgvec(skb, sg, offset, len, 0);
4552}
4553EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
4554
4555
4556
4557/**
4558 * skb_cow_data - Check that a socket buffer's data buffers are writable
4559 * @skb: The socket buffer to check.
4560 * @tailbits: Amount of trailing space to be added
4561 * @trailer: Returned pointer to the skb where the @tailbits space begins
4562 *
4563 * Make sure that the data buffers attached to a socket buffer are
4564 * writable. If they are not, private copies are made of the data buffers
4565 * and the socket buffer is set to use these instead.
4566 *
4567 * If @tailbits is given, make sure that there is space to write @tailbits
4568 * bytes of data beyond current end of socket buffer. @trailer will be
4569 * set to point to the skb in which this space begins.
4570 *
4571 * The number of scatterlist elements required to completely map the
4572 * COW'd and extended socket buffer will be returned.
4573 */
4574int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4575{
4576 int copyflag;
4577 int elt;
4578 struct sk_buff *skb1, **skb_p;
4579
4580 /* If skb is cloned or its head is paged, reallocate
4581 * head pulling out all the pages (pages are considered not writable
4582 * at the moment even if they are anonymous).
4583 */
4584 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4585 !__pskb_pull_tail(skb, __skb_pagelen(skb)))
4586 return -ENOMEM;
4587
4588 /* Easy case. Most of packets will go this way. */
4589 if (!skb_has_frag_list(skb)) {
4590 /* A little of trouble, not enough of space for trailer.
4591 * This should not happen, when stack is tuned to generate
4592 * good frames. OK, on miss we reallocate and reserve even more
4593 * space, 128 bytes is fair. */
4594
4595 if (skb_tailroom(skb) < tailbits &&
4596 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4597 return -ENOMEM;
4598
4599 /* Voila! */
4600 *trailer = skb;
4601 return 1;
4602 }
4603
4604 /* Misery. We are in troubles, going to mincer fragments... */
4605
4606 elt = 1;
4607 skb_p = &skb_shinfo(skb)->frag_list;
4608 copyflag = 0;
4609
4610 while ((skb1 = *skb_p) != NULL) {
4611 int ntail = 0;
4612
4613 /* The fragment is partially pulled by someone,
4614 * this can happen on input. Copy it and everything
4615 * after it. */
4616
4617 if (skb_shared(skb1))
4618 copyflag = 1;
4619
4620 /* If the skb is the last, worry about trailer. */
4621
4622 if (skb1->next == NULL && tailbits) {
4623 if (skb_shinfo(skb1)->nr_frags ||
4624 skb_has_frag_list(skb1) ||
4625 skb_tailroom(skb1) < tailbits)
4626 ntail = tailbits + 128;
4627 }
4628
4629 if (copyflag ||
4630 skb_cloned(skb1) ||
4631 ntail ||
4632 skb_shinfo(skb1)->nr_frags ||
4633 skb_has_frag_list(skb1)) {
4634 struct sk_buff *skb2;
4635
4636 /* Fuck, we are miserable poor guys... */
4637 if (ntail == 0)
4638 skb2 = skb_copy(skb1, GFP_ATOMIC);
4639 else
4640 skb2 = skb_copy_expand(skb1,
4641 skb_headroom(skb1),
4642 ntail,
4643 GFP_ATOMIC);
4644 if (unlikely(skb2 == NULL))
4645 return -ENOMEM;
4646
4647 if (skb1->sk)
4648 skb_set_owner_w(skb2, skb1->sk);
4649
4650 /* Looking around. Are we still alive?
4651 * OK, link new skb, drop old one */
4652
4653 skb2->next = skb1->next;
4654 *skb_p = skb2;
4655 kfree_skb(skb1);
4656 skb1 = skb2;
4657 }
4658 elt++;
4659 *trailer = skb1;
4660 skb_p = &skb1->next;
4661 }
4662
4663 return elt;
4664}
4665EXPORT_SYMBOL_GPL(skb_cow_data);
4666
4667static void sock_rmem_free(struct sk_buff *skb)
4668{
4669 struct sock *sk = skb->sk;
4670
4671 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4672}
4673
4674static void skb_set_err_queue(struct sk_buff *skb)
4675{
4676 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
4677 * So, it is safe to (mis)use it to mark skbs on the error queue.
4678 */
4679 skb->pkt_type = PACKET_OUTGOING;
4680 BUILD_BUG_ON(PACKET_OUTGOING == 0);
4681}
4682
4683/*
4684 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4685 */
4686int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4687{
4688 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
4689 (unsigned int)READ_ONCE(sk->sk_rcvbuf))
4690 return -ENOMEM;
4691
4692 skb_orphan(skb);
4693 skb->sk = sk;
4694 skb->destructor = sock_rmem_free;
4695 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
4696 skb_set_err_queue(skb);
4697
4698 /* before exiting rcu section, make sure dst is refcounted */
4699 skb_dst_force(skb);
4700
4701 skb_queue_tail(&sk->sk_error_queue, skb);
4702 if (!sock_flag(sk, SOCK_DEAD))
4703 sk_error_report(sk);
4704 return 0;
4705}
4706EXPORT_SYMBOL(sock_queue_err_skb);
4707
4708static bool is_icmp_err_skb(const struct sk_buff *skb)
4709{
4710 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
4711 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
4712}
4713
4714struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4715{
4716 struct sk_buff_head *q = &sk->sk_error_queue;
4717 struct sk_buff *skb, *skb_next = NULL;
4718 bool icmp_next = false;
4719 unsigned long flags;
4720
4721 spin_lock_irqsave(&q->lock, flags);
4722 skb = __skb_dequeue(q);
4723 if (skb && (skb_next = skb_peek(q))) {
4724 icmp_next = is_icmp_err_skb(skb_next);
4725 if (icmp_next)
4726 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
4727 }
4728 spin_unlock_irqrestore(&q->lock, flags);
4729
4730 if (is_icmp_err_skb(skb) && !icmp_next)
4731 sk->sk_err = 0;
4732
4733 if (skb_next)
4734 sk_error_report(sk);
4735
4736 return skb;
4737}
4738EXPORT_SYMBOL(sock_dequeue_err_skb);
4739
4740/**
4741 * skb_clone_sk - create clone of skb, and take reference to socket
4742 * @skb: the skb to clone
4743 *
4744 * This function creates a clone of a buffer that holds a reference on
4745 * sk_refcnt. Buffers created via this function are meant to be
4746 * returned using sock_queue_err_skb, or free via kfree_skb.
4747 *
4748 * When passing buffers allocated with this function to sock_queue_err_skb
4749 * it is necessary to wrap the call with sock_hold/sock_put in order to
4750 * prevent the socket from being released prior to being enqueued on
4751 * the sk_error_queue.
4752 */
4753struct sk_buff *skb_clone_sk(struct sk_buff *skb)
4754{
4755 struct sock *sk = skb->sk;
4756 struct sk_buff *clone;
4757
4758 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
4759 return NULL;
4760
4761 clone = skb_clone(skb, GFP_ATOMIC);
4762 if (!clone) {
4763 sock_put(sk);
4764 return NULL;
4765 }
4766
4767 clone->sk = sk;
4768 clone->destructor = sock_efree;
4769
4770 return clone;
4771}
4772EXPORT_SYMBOL(skb_clone_sk);
4773
4774static void __skb_complete_tx_timestamp(struct sk_buff *skb,
4775 struct sock *sk,
4776 int tstype,
4777 bool opt_stats)
4778{
4779 struct sock_exterr_skb *serr;
4780 int err;
4781
4782 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
4783
4784 serr = SKB_EXT_ERR(skb);
4785 memset(serr, 0, sizeof(*serr));
4786 serr->ee.ee_errno = ENOMSG;
4787 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4788 serr->ee.ee_info = tstype;
4789 serr->opt_stats = opt_stats;
4790 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
4791 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
4792 serr->ee.ee_data = skb_shinfo(skb)->tskey;
4793 if (sk->sk_protocol == IPPROTO_TCP &&
4794 sk->sk_type == SOCK_STREAM)
4795 serr->ee.ee_data -= sk->sk_tskey;
4796 }
4797
4798 err = sock_queue_err_skb(sk, skb);
4799
4800 if (err)
4801 kfree_skb(skb);
4802}
4803
4804static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4805{
4806 bool ret;
4807
4808 if (likely(sysctl_tstamp_allow_data || tsonly))
4809 return true;
4810
4811 read_lock_bh(&sk->sk_callback_lock);
4812 ret = sk->sk_socket && sk->sk_socket->file &&
4813 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4814 read_unlock_bh(&sk->sk_callback_lock);
4815 return ret;
4816}
4817
4818void skb_complete_tx_timestamp(struct sk_buff *skb,
4819 struct skb_shared_hwtstamps *hwtstamps)
4820{
4821 struct sock *sk = skb->sk;
4822
4823 if (!skb_may_tx_timestamp(sk, false))
4824 goto err;
4825
4826 /* Take a reference to prevent skb_orphan() from freeing the socket,
4827 * but only if the socket refcount is not zero.
4828 */
4829 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4830 *skb_hwtstamps(skb) = *hwtstamps;
4831 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
4832 sock_put(sk);
4833 return;
4834 }
4835
4836err:
4837 kfree_skb(skb);
4838}
4839EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4840
4841void __skb_tstamp_tx(struct sk_buff *orig_skb,
4842 const struct sk_buff *ack_skb,
4843 struct skb_shared_hwtstamps *hwtstamps,
4844 struct sock *sk, int tstype)
4845{
4846 struct sk_buff *skb;
4847 bool tsonly, opt_stats = false;
4848
4849 if (!sk)
4850 return;
4851
4852 if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4853 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4854 return;
4855
4856 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
4857 if (!skb_may_tx_timestamp(sk, tsonly))
4858 return;
4859
4860 if (tsonly) {
4861#ifdef CONFIG_INET
4862 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
4863 sk->sk_protocol == IPPROTO_TCP &&
4864 sk->sk_type == SOCK_STREAM) {
4865 skb = tcp_get_timestamping_opt_stats(sk, orig_skb,
4866 ack_skb);
4867 opt_stats = true;
4868 } else
4869#endif
4870 skb = alloc_skb(0, GFP_ATOMIC);
4871 } else {
4872 skb = skb_clone(orig_skb, GFP_ATOMIC);
4873 }
4874 if (!skb)
4875 return;
4876
4877 if (tsonly) {
4878 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4879 SKBTX_ANY_TSTAMP;
4880 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
4881 }
4882
4883 if (hwtstamps)
4884 *skb_hwtstamps(skb) = *hwtstamps;
4885 else
4886 skb->tstamp = ktime_get_real();
4887
4888 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
4889}
4890EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4891
4892void skb_tstamp_tx(struct sk_buff *orig_skb,
4893 struct skb_shared_hwtstamps *hwtstamps)
4894{
4895 return __skb_tstamp_tx(orig_skb, NULL, hwtstamps, orig_skb->sk,
4896 SCM_TSTAMP_SND);
4897}
4898EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4899
4900void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
4901{
4902 struct sock *sk = skb->sk;
4903 struct sock_exterr_skb *serr;
4904 int err = 1;
4905
4906 skb->wifi_acked_valid = 1;
4907 skb->wifi_acked = acked;
4908
4909 serr = SKB_EXT_ERR(skb);
4910 memset(serr, 0, sizeof(*serr));
4911 serr->ee.ee_errno = ENOMSG;
4912 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
4913
4914 /* Take a reference to prevent skb_orphan() from freeing the socket,
4915 * but only if the socket refcount is not zero.
4916 */
4917 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4918 err = sock_queue_err_skb(sk, skb);
4919 sock_put(sk);
4920 }
4921 if (err)
4922 kfree_skb(skb);
4923}
4924EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
4925
4926/**
4927 * skb_partial_csum_set - set up and verify partial csum values for packet
4928 * @skb: the skb to set
4929 * @start: the number of bytes after skb->data to start checksumming.
4930 * @off: the offset from start to place the checksum.
4931 *
4932 * For untrusted partially-checksummed packets, we need to make sure the values
4933 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4934 *
4935 * This function checks and sets those values and skb->ip_summed: if this
4936 * returns false you should drop the packet.
4937 */
4938bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4939{
4940 u32 csum_end = (u32)start + (u32)off + sizeof(__sum16);
4941 u32 csum_start = skb_headroom(skb) + (u32)start;
4942
4943 if (unlikely(csum_start > U16_MAX || csum_end > skb_headlen(skb))) {
4944 net_warn_ratelimited("bad partial csum: csum=%u/%u headroom=%u headlen=%u\n",
4945 start, off, skb_headroom(skb), skb_headlen(skb));
4946 return false;
4947 }
4948 skb->ip_summed = CHECKSUM_PARTIAL;
4949 skb->csum_start = csum_start;
4950 skb->csum_offset = off;
4951 skb_set_transport_header(skb, start);
4952 return true;
4953}
4954EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4955
4956static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4957 unsigned int max)
4958{
4959 if (skb_headlen(skb) >= len)
4960 return 0;
4961
4962 /* If we need to pullup then pullup to the max, so we
4963 * won't need to do it again.
4964 */
4965 if (max > skb->len)
4966 max = skb->len;
4967
4968 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4969 return -ENOMEM;
4970
4971 if (skb_headlen(skb) < len)
4972 return -EPROTO;
4973
4974 return 0;
4975}
4976
4977#define MAX_TCP_HDR_LEN (15 * 4)
4978
4979static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4980 typeof(IPPROTO_IP) proto,
4981 unsigned int off)
4982{
4983 int err;
4984
4985 switch (proto) {
4986 case IPPROTO_TCP:
4987 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4988 off + MAX_TCP_HDR_LEN);
4989 if (!err && !skb_partial_csum_set(skb, off,
4990 offsetof(struct tcphdr,
4991 check)))
4992 err = -EPROTO;
4993 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4994
4995 case IPPROTO_UDP:
4996 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4997 off + sizeof(struct udphdr));
4998 if (!err && !skb_partial_csum_set(skb, off,
4999 offsetof(struct udphdr,
5000 check)))
5001 err = -EPROTO;
5002 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
5003 }
5004
5005 return ERR_PTR(-EPROTO);
5006}
5007
5008/* This value should be large enough to cover a tagged ethernet header plus
5009 * maximally sized IP and TCP or UDP headers.
5010 */
5011#define MAX_IP_HDR_LEN 128
5012
5013static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
5014{
5015 unsigned int off;
5016 bool fragment;
5017 __sum16 *csum;
5018 int err;
5019
5020 fragment = false;
5021
5022 err = skb_maybe_pull_tail(skb,
5023 sizeof(struct iphdr),
5024 MAX_IP_HDR_LEN);
5025 if (err < 0)
5026 goto out;
5027
5028 if (ip_is_fragment(ip_hdr(skb)))
5029 fragment = true;
5030
5031 off = ip_hdrlen(skb);
5032
5033 err = -EPROTO;
5034
5035 if (fragment)
5036 goto out;
5037
5038 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
5039 if (IS_ERR(csum))
5040 return PTR_ERR(csum);
5041
5042 if (recalculate)
5043 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
5044 ip_hdr(skb)->daddr,
5045 skb->len - off,
5046 ip_hdr(skb)->protocol, 0);
5047 err = 0;
5048
5049out:
5050 return err;
5051}
5052
5053/* This value should be large enough to cover a tagged ethernet header plus
5054 * an IPv6 header, all options, and a maximal TCP or UDP header.
5055 */
5056#define MAX_IPV6_HDR_LEN 256
5057
5058#define OPT_HDR(type, skb, off) \
5059 (type *)(skb_network_header(skb) + (off))
5060
5061static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
5062{
5063 int err;
5064 u8 nexthdr;
5065 unsigned int off;
5066 unsigned int len;
5067 bool fragment;
5068 bool done;
5069 __sum16 *csum;
5070
5071 fragment = false;
5072 done = false;
5073
5074 off = sizeof(struct ipv6hdr);
5075
5076 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
5077 if (err < 0)
5078 goto out;
5079
5080 nexthdr = ipv6_hdr(skb)->nexthdr;
5081
5082 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
5083 while (off <= len && !done) {
5084 switch (nexthdr) {
5085 case IPPROTO_DSTOPTS:
5086 case IPPROTO_HOPOPTS:
5087 case IPPROTO_ROUTING: {
5088 struct ipv6_opt_hdr *hp;
5089
5090 err = skb_maybe_pull_tail(skb,
5091 off +
5092 sizeof(struct ipv6_opt_hdr),
5093 MAX_IPV6_HDR_LEN);
5094 if (err < 0)
5095 goto out;
5096
5097 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
5098 nexthdr = hp->nexthdr;
5099 off += ipv6_optlen(hp);
5100 break;
5101 }
5102 case IPPROTO_AH: {
5103 struct ip_auth_hdr *hp;
5104
5105 err = skb_maybe_pull_tail(skb,
5106 off +
5107 sizeof(struct ip_auth_hdr),
5108 MAX_IPV6_HDR_LEN);
5109 if (err < 0)
5110 goto out;
5111
5112 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
5113 nexthdr = hp->nexthdr;
5114 off += ipv6_authlen(hp);
5115 break;
5116 }
5117 case IPPROTO_FRAGMENT: {
5118 struct frag_hdr *hp;
5119
5120 err = skb_maybe_pull_tail(skb,
5121 off +
5122 sizeof(struct frag_hdr),
5123 MAX_IPV6_HDR_LEN);
5124 if (err < 0)
5125 goto out;
5126
5127 hp = OPT_HDR(struct frag_hdr, skb, off);
5128
5129 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
5130 fragment = true;
5131
5132 nexthdr = hp->nexthdr;
5133 off += sizeof(struct frag_hdr);
5134 break;
5135 }
5136 default:
5137 done = true;
5138 break;
5139 }
5140 }
5141
5142 err = -EPROTO;
5143
5144 if (!done || fragment)
5145 goto out;
5146
5147 csum = skb_checksum_setup_ip(skb, nexthdr, off);
5148 if (IS_ERR(csum))
5149 return PTR_ERR(csum);
5150
5151 if (recalculate)
5152 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5153 &ipv6_hdr(skb)->daddr,
5154 skb->len - off, nexthdr, 0);
5155 err = 0;
5156
5157out:
5158 return err;
5159}
5160
5161/**
5162 * skb_checksum_setup - set up partial checksum offset
5163 * @skb: the skb to set up
5164 * @recalculate: if true the pseudo-header checksum will be recalculated
5165 */
5166int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
5167{
5168 int err;
5169
5170 switch (skb->protocol) {
5171 case htons(ETH_P_IP):
5172 err = skb_checksum_setup_ipv4(skb, recalculate);
5173 break;
5174
5175 case htons(ETH_P_IPV6):
5176 err = skb_checksum_setup_ipv6(skb, recalculate);
5177 break;
5178
5179 default:
5180 err = -EPROTO;
5181 break;
5182 }
5183
5184 return err;
5185}
5186EXPORT_SYMBOL(skb_checksum_setup);
5187
5188/**
5189 * skb_checksum_maybe_trim - maybe trims the given skb
5190 * @skb: the skb to check
5191 * @transport_len: the data length beyond the network header
5192 *
5193 * Checks whether the given skb has data beyond the given transport length.
5194 * If so, returns a cloned skb trimmed to this transport length.
5195 * Otherwise returns the provided skb. Returns NULL in error cases
5196 * (e.g. transport_len exceeds skb length or out-of-memory).
5197 *
5198 * Caller needs to set the skb transport header and free any returned skb if it
5199 * differs from the provided skb.
5200 */
5201static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
5202 unsigned int transport_len)
5203{
5204 struct sk_buff *skb_chk;
5205 unsigned int len = skb_transport_offset(skb) + transport_len;
5206 int ret;
5207
5208 if (skb->len < len)
5209 return NULL;
5210 else if (skb->len == len)
5211 return skb;
5212
5213 skb_chk = skb_clone(skb, GFP_ATOMIC);
5214 if (!skb_chk)
5215 return NULL;
5216
5217 ret = pskb_trim_rcsum(skb_chk, len);
5218 if (ret) {
5219 kfree_skb(skb_chk);
5220 return NULL;
5221 }
5222
5223 return skb_chk;
5224}
5225
5226/**
5227 * skb_checksum_trimmed - validate checksum of an skb
5228 * @skb: the skb to check
5229 * @transport_len: the data length beyond the network header
5230 * @skb_chkf: checksum function to use
5231 *
5232 * Applies the given checksum function skb_chkf to the provided skb.
5233 * Returns a checked and maybe trimmed skb. Returns NULL on error.
5234 *
5235 * If the skb has data beyond the given transport length, then a
5236 * trimmed & cloned skb is checked and returned.
5237 *
5238 * Caller needs to set the skb transport header and free any returned skb if it
5239 * differs from the provided skb.
5240 */
5241struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
5242 unsigned int transport_len,
5243 __sum16(*skb_chkf)(struct sk_buff *skb))
5244{
5245 struct sk_buff *skb_chk;
5246 unsigned int offset = skb_transport_offset(skb);
5247 __sum16 ret;
5248
5249 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
5250 if (!skb_chk)
5251 goto err;
5252
5253 if (!pskb_may_pull(skb_chk, offset))
5254 goto err;
5255
5256 skb_pull_rcsum(skb_chk, offset);
5257 ret = skb_chkf(skb_chk);
5258 skb_push_rcsum(skb_chk, offset);
5259
5260 if (ret)
5261 goto err;
5262
5263 return skb_chk;
5264
5265err:
5266 if (skb_chk && skb_chk != skb)
5267 kfree_skb(skb_chk);
5268
5269 return NULL;
5270
5271}
5272EXPORT_SYMBOL(skb_checksum_trimmed);
5273
5274void __skb_warn_lro_forwarding(const struct sk_buff *skb)
5275{
5276 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
5277 skb->dev->name);
5278}
5279EXPORT_SYMBOL(__skb_warn_lro_forwarding);
5280
5281void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
5282{
5283 if (head_stolen) {
5284 skb_release_head_state(skb);
5285 kmem_cache_free(skbuff_head_cache, skb);
5286 } else {
5287 __kfree_skb(skb);
5288 }
5289}
5290EXPORT_SYMBOL(kfree_skb_partial);
5291
5292/**
5293 * skb_try_coalesce - try to merge skb to prior one
5294 * @to: prior buffer
5295 * @from: buffer to add
5296 * @fragstolen: pointer to boolean
5297 * @delta_truesize: how much more was allocated than was requested
5298 */
5299bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
5300 bool *fragstolen, int *delta_truesize)
5301{
5302 struct skb_shared_info *to_shinfo, *from_shinfo;
5303 int i, delta, len = from->len;
5304
5305 *fragstolen = false;
5306
5307 if (skb_cloned(to))
5308 return false;
5309
5310 /* The page pool signature of struct page will eventually figure out
5311 * which pages can be recycled or not but for now let's prohibit slab
5312 * allocated and page_pool allocated SKBs from being coalesced.
5313 */
5314 if (to->pp_recycle != from->pp_recycle)
5315 return false;
5316
5317 if (len <= skb_tailroom(to)) {
5318 if (len)
5319 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
5320 *delta_truesize = 0;
5321 return true;
5322 }
5323
5324 to_shinfo = skb_shinfo(to);
5325 from_shinfo = skb_shinfo(from);
5326 if (to_shinfo->frag_list || from_shinfo->frag_list)
5327 return false;
5328 if (skb_zcopy(to) || skb_zcopy(from))
5329 return false;
5330
5331 if (skb_headlen(from) != 0) {
5332 struct page *page;
5333 unsigned int offset;
5334
5335 if (to_shinfo->nr_frags +
5336 from_shinfo->nr_frags >= MAX_SKB_FRAGS)
5337 return false;
5338
5339 if (skb_head_is_locked(from))
5340 return false;
5341
5342 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
5343
5344 page = virt_to_head_page(from->head);
5345 offset = from->data - (unsigned char *)page_address(page);
5346
5347 skb_fill_page_desc(to, to_shinfo->nr_frags,
5348 page, offset, skb_headlen(from));
5349 *fragstolen = true;
5350 } else {
5351 if (to_shinfo->nr_frags +
5352 from_shinfo->nr_frags > MAX_SKB_FRAGS)
5353 return false;
5354
5355 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
5356 }
5357
5358 WARN_ON_ONCE(delta < len);
5359
5360 memcpy(to_shinfo->frags + to_shinfo->nr_frags,
5361 from_shinfo->frags,
5362 from_shinfo->nr_frags * sizeof(skb_frag_t));
5363 to_shinfo->nr_frags += from_shinfo->nr_frags;
5364
5365 if (!skb_cloned(from))
5366 from_shinfo->nr_frags = 0;
5367
5368 /* if the skb is not cloned this does nothing
5369 * since we set nr_frags to 0.
5370 */
5371 for (i = 0; i < from_shinfo->nr_frags; i++)
5372 __skb_frag_ref(&from_shinfo->frags[i]);
5373
5374 to->truesize += delta;
5375 to->len += len;
5376 to->data_len += len;
5377
5378 *delta_truesize = delta;
5379 return true;
5380}
5381EXPORT_SYMBOL(skb_try_coalesce);
5382
5383/**
5384 * skb_scrub_packet - scrub an skb
5385 *
5386 * @skb: buffer to clean
5387 * @xnet: packet is crossing netns
5388 *
5389 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
5390 * into/from a tunnel. Some information have to be cleared during these
5391 * operations.
5392 * skb_scrub_packet can also be used to clean a skb before injecting it in
5393 * another namespace (@xnet == true). We have to clear all information in the
5394 * skb that could impact namespace isolation.
5395 */
5396void skb_scrub_packet(struct sk_buff *skb, bool xnet)
5397{
5398 skb->pkt_type = PACKET_HOST;
5399 skb->skb_iif = 0;
5400 skb->ignore_df = 0;
5401 skb_dst_drop(skb);
5402 skb_ext_reset(skb);
5403 nf_reset_ct(skb);
5404 nf_reset_trace(skb);
5405
5406#ifdef CONFIG_NET_SWITCHDEV
5407 skb->offload_fwd_mark = 0;
5408 skb->offload_l3_fwd_mark = 0;
5409#endif
5410
5411 if (!xnet)
5412 return;
5413
5414 ipvs_reset(skb);
5415 skb->mark = 0;
5416 skb->tstamp = 0;
5417}
5418EXPORT_SYMBOL_GPL(skb_scrub_packet);
5419
5420/**
5421 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
5422 *
5423 * @skb: GSO skb
5424 *
5425 * skb_gso_transport_seglen is used to determine the real size of the
5426 * individual segments, including Layer4 headers (TCP/UDP).
5427 *
5428 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
5429 */
5430static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
5431{
5432 const struct skb_shared_info *shinfo = skb_shinfo(skb);
5433 unsigned int thlen = 0;
5434
5435 if (skb->encapsulation) {
5436 thlen = skb_inner_transport_header(skb) -
5437 skb_transport_header(skb);
5438
5439 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
5440 thlen += inner_tcp_hdrlen(skb);
5441 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
5442 thlen = tcp_hdrlen(skb);
5443 } else if (unlikely(skb_is_gso_sctp(skb))) {
5444 thlen = sizeof(struct sctphdr);
5445 } else if (shinfo->gso_type & SKB_GSO_UDP_L4) {
5446 thlen = sizeof(struct udphdr);
5447 }
5448 /* UFO sets gso_size to the size of the fragmentation
5449 * payload, i.e. the size of the L4 (UDP) header is already
5450 * accounted for.
5451 */
5452 return thlen + shinfo->gso_size;
5453}
5454
5455/**
5456 * skb_gso_network_seglen - Return length of individual segments of a gso packet
5457 *
5458 * @skb: GSO skb
5459 *
5460 * skb_gso_network_seglen is used to determine the real size of the
5461 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
5462 *
5463 * The MAC/L2 header is not accounted for.
5464 */
5465static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
5466{
5467 unsigned int hdr_len = skb_transport_header(skb) -
5468 skb_network_header(skb);
5469
5470 return hdr_len + skb_gso_transport_seglen(skb);
5471}
5472
5473/**
5474 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
5475 *
5476 * @skb: GSO skb
5477 *
5478 * skb_gso_mac_seglen is used to determine the real size of the
5479 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
5480 * headers (TCP/UDP).
5481 */
5482static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
5483{
5484 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
5485
5486 return hdr_len + skb_gso_transport_seglen(skb);
5487}
5488
5489/**
5490 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
5491 *
5492 * There are a couple of instances where we have a GSO skb, and we
5493 * want to determine what size it would be after it is segmented.
5494 *
5495 * We might want to check:
5496 * - L3+L4+payload size (e.g. IP forwarding)
5497 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
5498 *
5499 * This is a helper to do that correctly considering GSO_BY_FRAGS.
5500 *
5501 * @skb: GSO skb
5502 *
5503 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
5504 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
5505 *
5506 * @max_len: The maximum permissible length.
5507 *
5508 * Returns true if the segmented length <= max length.
5509 */
5510static inline bool skb_gso_size_check(const struct sk_buff *skb,
5511 unsigned int seg_len,
5512 unsigned int max_len) {
5513 const struct skb_shared_info *shinfo = skb_shinfo(skb);
5514 const struct sk_buff *iter;
5515
5516 if (shinfo->gso_size != GSO_BY_FRAGS)
5517 return seg_len <= max_len;
5518
5519 /* Undo this so we can re-use header sizes */
5520 seg_len -= GSO_BY_FRAGS;
5521
5522 skb_walk_frags(skb, iter) {
5523 if (seg_len + skb_headlen(iter) > max_len)
5524 return false;
5525 }
5526
5527 return true;
5528}
5529
5530/**
5531 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5532 *
5533 * @skb: GSO skb
5534 * @mtu: MTU to validate against
5535 *
5536 * skb_gso_validate_network_len validates if a given skb will fit a
5537 * wanted MTU once split. It considers L3 headers, L4 headers, and the
5538 * payload.
5539 */
5540bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5541{
5542 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5543}
5544EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5545
5546/**
5547 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
5548 *
5549 * @skb: GSO skb
5550 * @len: length to validate against
5551 *
5552 * skb_gso_validate_mac_len validates if a given skb will fit a wanted
5553 * length once split, including L2, L3 and L4 headers and the payload.
5554 */
5555bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
5556{
5557 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
5558}
5559EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5560
5561static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5562{
5563 int mac_len, meta_len;
5564 void *meta;
5565
5566 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5567 kfree_skb(skb);
5568 return NULL;
5569 }
5570
5571 mac_len = skb->data - skb_mac_header(skb);
5572 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5573 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5574 mac_len - VLAN_HLEN - ETH_TLEN);
5575 }
5576
5577 meta_len = skb_metadata_len(skb);
5578 if (meta_len) {
5579 meta = skb_metadata_end(skb) - meta_len;
5580 memmove(meta + VLAN_HLEN, meta, meta_len);
5581 }
5582
5583 skb->mac_header += VLAN_HLEN;
5584 return skb;
5585}
5586
5587struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5588{
5589 struct vlan_hdr *vhdr;
5590 u16 vlan_tci;
5591
5592 if (unlikely(skb_vlan_tag_present(skb))) {
5593 /* vlan_tci is already set-up so leave this for another time */
5594 return skb;
5595 }
5596
5597 skb = skb_share_check(skb, GFP_ATOMIC);
5598 if (unlikely(!skb))
5599 goto err_free;
5600 /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
5601 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
5602 goto err_free;
5603
5604 vhdr = (struct vlan_hdr *)skb->data;
5605 vlan_tci = ntohs(vhdr->h_vlan_TCI);
5606 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5607
5608 skb_pull_rcsum(skb, VLAN_HLEN);
5609 vlan_set_encap_proto(skb, vhdr);
5610
5611 skb = skb_reorder_vlan_header(skb);
5612 if (unlikely(!skb))
5613 goto err_free;
5614
5615 skb_reset_network_header(skb);
5616 if (!skb_transport_header_was_set(skb))
5617 skb_reset_transport_header(skb);
5618 skb_reset_mac_len(skb);
5619
5620 return skb;
5621
5622err_free:
5623 kfree_skb(skb);
5624 return NULL;
5625}
5626EXPORT_SYMBOL(skb_vlan_untag);
5627
5628int skb_ensure_writable(struct sk_buff *skb, int write_len)
5629{
5630 if (!pskb_may_pull(skb, write_len))
5631 return -ENOMEM;
5632
5633 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5634 return 0;
5635
5636 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5637}
5638EXPORT_SYMBOL(skb_ensure_writable);
5639
5640/* remove VLAN header from packet and update csum accordingly.
5641 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5642 */
5643int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5644{
5645 struct vlan_hdr *vhdr;
5646 int offset = skb->data - skb_mac_header(skb);
5647 int err;
5648
5649 if (WARN_ONCE(offset,
5650 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5651 offset)) {
5652 return -EINVAL;
5653 }
5654
5655 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5656 if (unlikely(err))
5657 return err;
5658
5659 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5660
5661 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
5662 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
5663
5664 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
5665 __skb_pull(skb, VLAN_HLEN);
5666
5667 vlan_set_encap_proto(skb, vhdr);
5668 skb->mac_header += VLAN_HLEN;
5669
5670 if (skb_network_offset(skb) < ETH_HLEN)
5671 skb_set_network_header(skb, ETH_HLEN);
5672
5673 skb_reset_mac_len(skb);
5674
5675 return err;
5676}
5677EXPORT_SYMBOL(__skb_vlan_pop);
5678
5679/* Pop a vlan tag either from hwaccel or from payload.
5680 * Expects skb->data at mac header.
5681 */
5682int skb_vlan_pop(struct sk_buff *skb)
5683{
5684 u16 vlan_tci;
5685 __be16 vlan_proto;
5686 int err;
5687
5688 if (likely(skb_vlan_tag_present(skb))) {
5689 __vlan_hwaccel_clear_tag(skb);
5690 } else {
5691 if (unlikely(!eth_type_vlan(skb->protocol)))
5692 return 0;
5693
5694 err = __skb_vlan_pop(skb, &vlan_tci);
5695 if (err)
5696 return err;
5697 }
5698 /* move next vlan tag to hw accel tag */
5699 if (likely(!eth_type_vlan(skb->protocol)))
5700 return 0;
5701
5702 vlan_proto = skb->protocol;
5703 err = __skb_vlan_pop(skb, &vlan_tci);
5704 if (unlikely(err))
5705 return err;
5706
5707 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5708 return 0;
5709}
5710EXPORT_SYMBOL(skb_vlan_pop);
5711
5712/* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5713 * Expects skb->data at mac header.
5714 */
5715int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
5716{
5717 if (skb_vlan_tag_present(skb)) {
5718 int offset = skb->data - skb_mac_header(skb);
5719 int err;
5720
5721 if (WARN_ONCE(offset,
5722 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5723 offset)) {
5724 return -EINVAL;
5725 }
5726
5727 err = __vlan_insert_tag(skb, skb->vlan_proto,
5728 skb_vlan_tag_get(skb));
5729 if (err)
5730 return err;
5731
5732 skb->protocol = skb->vlan_proto;
5733 skb->mac_len += VLAN_HLEN;
5734
5735 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5736 }
5737 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5738 return 0;
5739}
5740EXPORT_SYMBOL(skb_vlan_push);
5741
5742/**
5743 * skb_eth_pop() - Drop the Ethernet header at the head of a packet
5744 *
5745 * @skb: Socket buffer to modify
5746 *
5747 * Drop the Ethernet header of @skb.
5748 *
5749 * Expects that skb->data points to the mac header and that no VLAN tags are
5750 * present.
5751 *
5752 * Returns 0 on success, -errno otherwise.
5753 */
5754int skb_eth_pop(struct sk_buff *skb)
5755{
5756 if (!pskb_may_pull(skb, ETH_HLEN) || skb_vlan_tagged(skb) ||
5757 skb_network_offset(skb) < ETH_HLEN)
5758 return -EPROTO;
5759
5760 skb_pull_rcsum(skb, ETH_HLEN);
5761 skb_reset_mac_header(skb);
5762 skb_reset_mac_len(skb);
5763
5764 return 0;
5765}
5766EXPORT_SYMBOL(skb_eth_pop);
5767
5768/**
5769 * skb_eth_push() - Add a new Ethernet header at the head of a packet
5770 *
5771 * @skb: Socket buffer to modify
5772 * @dst: Destination MAC address of the new header
5773 * @src: Source MAC address of the new header
5774 *
5775 * Prepend @skb with a new Ethernet header.
5776 *
5777 * Expects that skb->data points to the mac header, which must be empty.
5778 *
5779 * Returns 0 on success, -errno otherwise.
5780 */
5781int skb_eth_push(struct sk_buff *skb, const unsigned char *dst,
5782 const unsigned char *src)
5783{
5784 struct ethhdr *eth;
5785 int err;
5786
5787 if (skb_network_offset(skb) || skb_vlan_tag_present(skb))
5788 return -EPROTO;
5789
5790 err = skb_cow_head(skb, sizeof(*eth));
5791 if (err < 0)
5792 return err;
5793
5794 skb_push(skb, sizeof(*eth));
5795 skb_reset_mac_header(skb);
5796 skb_reset_mac_len(skb);
5797
5798 eth = eth_hdr(skb);
5799 ether_addr_copy(eth->h_dest, dst);
5800 ether_addr_copy(eth->h_source, src);
5801 eth->h_proto = skb->protocol;
5802
5803 skb_postpush_rcsum(skb, eth, sizeof(*eth));
5804
5805 return 0;
5806}
5807EXPORT_SYMBOL(skb_eth_push);
5808
5809/* Update the ethertype of hdr and the skb csum value if required. */
5810static void skb_mod_eth_type(struct sk_buff *skb, struct ethhdr *hdr,
5811 __be16 ethertype)
5812{
5813 if (skb->ip_summed == CHECKSUM_COMPLETE) {
5814 __be16 diff[] = { ~hdr->h_proto, ethertype };
5815
5816 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5817 }
5818
5819 hdr->h_proto = ethertype;
5820}
5821
5822/**
5823 * skb_mpls_push() - push a new MPLS header after mac_len bytes from start of
5824 * the packet
5825 *
5826 * @skb: buffer
5827 * @mpls_lse: MPLS label stack entry to push
5828 * @mpls_proto: ethertype of the new MPLS header (expects 0x8847 or 0x8848)
5829 * @mac_len: length of the MAC header
5830 * @ethernet: flag to indicate if the resulting packet after skb_mpls_push is
5831 * ethernet
5832 *
5833 * Expects skb->data at mac header.
5834 *
5835 * Returns 0 on success, -errno otherwise.
5836 */
5837int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
5838 int mac_len, bool ethernet)
5839{
5840 struct mpls_shim_hdr *lse;
5841 int err;
5842
5843 if (unlikely(!eth_p_mpls(mpls_proto)))
5844 return -EINVAL;
5845
5846 /* Networking stack does not allow simultaneous Tunnel and MPLS GSO. */
5847 if (skb->encapsulation)
5848 return -EINVAL;
5849
5850 err = skb_cow_head(skb, MPLS_HLEN);
5851 if (unlikely(err))
5852 return err;
5853
5854 if (!skb->inner_protocol) {
5855 skb_set_inner_network_header(skb, skb_network_offset(skb));
5856 skb_set_inner_protocol(skb, skb->protocol);
5857 }
5858
5859 skb_push(skb, MPLS_HLEN);
5860 memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb),
5861 mac_len);
5862 skb_reset_mac_header(skb);
5863 skb_set_network_header(skb, mac_len);
5864 skb_reset_mac_len(skb);
5865
5866 lse = mpls_hdr(skb);
5867 lse->label_stack_entry = mpls_lse;
5868 skb_postpush_rcsum(skb, lse, MPLS_HLEN);
5869
5870 if (ethernet && mac_len >= ETH_HLEN)
5871 skb_mod_eth_type(skb, eth_hdr(skb), mpls_proto);
5872 skb->protocol = mpls_proto;
5873
5874 return 0;
5875}
5876EXPORT_SYMBOL_GPL(skb_mpls_push);
5877
5878/**
5879 * skb_mpls_pop() - pop the outermost MPLS header
5880 *
5881 * @skb: buffer
5882 * @next_proto: ethertype of header after popped MPLS header
5883 * @mac_len: length of the MAC header
5884 * @ethernet: flag to indicate if the packet is ethernet
5885 *
5886 * Expects skb->data at mac header.
5887 *
5888 * Returns 0 on success, -errno otherwise.
5889 */
5890int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
5891 bool ethernet)
5892{
5893 int err;
5894
5895 if (unlikely(!eth_p_mpls(skb->protocol)))
5896 return 0;
5897
5898 err = skb_ensure_writable(skb, mac_len + MPLS_HLEN);
5899 if (unlikely(err))
5900 return err;
5901
5902 skb_postpull_rcsum(skb, mpls_hdr(skb), MPLS_HLEN);
5903 memmove(skb_mac_header(skb) + MPLS_HLEN, skb_mac_header(skb),
5904 mac_len);
5905
5906 __skb_pull(skb, MPLS_HLEN);
5907 skb_reset_mac_header(skb);
5908 skb_set_network_header(skb, mac_len);
5909
5910 if (ethernet && mac_len >= ETH_HLEN) {
5911 struct ethhdr *hdr;
5912
5913 /* use mpls_hdr() to get ethertype to account for VLANs. */
5914 hdr = (struct ethhdr *)((void *)mpls_hdr(skb) - ETH_HLEN);
5915 skb_mod_eth_type(skb, hdr, next_proto);
5916 }
5917 skb->protocol = next_proto;
5918
5919 return 0;
5920}
5921EXPORT_SYMBOL_GPL(skb_mpls_pop);
5922
5923/**
5924 * skb_mpls_update_lse() - modify outermost MPLS header and update csum
5925 *
5926 * @skb: buffer
5927 * @mpls_lse: new MPLS label stack entry to update to
5928 *
5929 * Expects skb->data at mac header.
5930 *
5931 * Returns 0 on success, -errno otherwise.
5932 */
5933int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse)
5934{
5935 int err;
5936
5937 if (unlikely(!eth_p_mpls(skb->protocol)))
5938 return -EINVAL;
5939
5940 err = skb_ensure_writable(skb, skb->mac_len + MPLS_HLEN);
5941 if (unlikely(err))
5942 return err;
5943
5944 if (skb->ip_summed == CHECKSUM_COMPLETE) {
5945 __be32 diff[] = { ~mpls_hdr(skb)->label_stack_entry, mpls_lse };
5946
5947 skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
5948 }
5949
5950 mpls_hdr(skb)->label_stack_entry = mpls_lse;
5951
5952 return 0;
5953}
5954EXPORT_SYMBOL_GPL(skb_mpls_update_lse);
5955
5956/**
5957 * skb_mpls_dec_ttl() - decrement the TTL of the outermost MPLS header
5958 *
5959 * @skb: buffer
5960 *
5961 * Expects skb->data at mac header.
5962 *
5963 * Returns 0 on success, -errno otherwise.
5964 */
5965int skb_mpls_dec_ttl(struct sk_buff *skb)
5966{
5967 u32 lse;
5968 u8 ttl;
5969
5970 if (unlikely(!eth_p_mpls(skb->protocol)))
5971 return -EINVAL;
5972
5973 if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN))
5974 return -ENOMEM;
5975
5976 lse = be32_to_cpu(mpls_hdr(skb)->label_stack_entry);
5977 ttl = (lse & MPLS_LS_TTL_MASK) >> MPLS_LS_TTL_SHIFT;
5978 if (!--ttl)
5979 return -EINVAL;
5980
5981 lse &= ~MPLS_LS_TTL_MASK;
5982 lse |= ttl << MPLS_LS_TTL_SHIFT;
5983
5984 return skb_mpls_update_lse(skb, cpu_to_be32(lse));
5985}
5986EXPORT_SYMBOL_GPL(skb_mpls_dec_ttl);
5987
5988/**
5989 * alloc_skb_with_frags - allocate skb with page frags
5990 *
5991 * @header_len: size of linear part
5992 * @data_len: needed length in frags
5993 * @max_page_order: max page order desired.
5994 * @errcode: pointer to error code if any
5995 * @gfp_mask: allocation mask
5996 *
5997 * This can be used to allocate a paged skb, given a maximal order for frags.
5998 */
5999struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
6000 unsigned long data_len,
6001 int max_page_order,
6002 int *errcode,
6003 gfp_t gfp_mask)
6004{
6005 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
6006 unsigned long chunk;
6007 struct sk_buff *skb;
6008 struct page *page;
6009 int i;
6010
6011 *errcode = -EMSGSIZE;
6012 /* Note this test could be relaxed, if we succeed to allocate
6013 * high order pages...
6014 */
6015 if (npages > MAX_SKB_FRAGS)
6016 return NULL;
6017
6018 *errcode = -ENOBUFS;
6019 skb = alloc_skb(header_len, gfp_mask);
6020 if (!skb)
6021 return NULL;
6022
6023 skb->truesize += npages << PAGE_SHIFT;
6024
6025 for (i = 0; npages > 0; i++) {
6026 int order = max_page_order;
6027
6028 while (order) {
6029 if (npages >= 1 << order) {
6030 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
6031 __GFP_COMP |
6032 __GFP_NOWARN,
6033 order);
6034 if (page)
6035 goto fill_page;
6036 /* Do not retry other high order allocations */
6037 order = 1;
6038 max_page_order = 0;
6039 }
6040 order--;
6041 }
6042 page = alloc_page(gfp_mask);
6043 if (!page)
6044 goto failure;
6045fill_page:
6046 chunk = min_t(unsigned long, data_len,
6047 PAGE_SIZE << order);
6048 skb_fill_page_desc(skb, i, page, 0, chunk);
6049 data_len -= chunk;
6050 npages -= 1 << order;
6051 }
6052 return skb;
6053
6054failure:
6055 kfree_skb(skb);
6056 return NULL;
6057}
6058EXPORT_SYMBOL(alloc_skb_with_frags);
6059
6060/* carve out the first off bytes from skb when off < headlen */
6061static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
6062 const int headlen, gfp_t gfp_mask)
6063{
6064 int i;
6065 int size = skb_end_offset(skb);
6066 int new_hlen = headlen - off;
6067 u8 *data;
6068
6069 size = SKB_DATA_ALIGN(size);
6070
6071 if (skb_pfmemalloc(skb))
6072 gfp_mask |= __GFP_MEMALLOC;
6073 data = kmalloc_reserve(size +
6074 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
6075 gfp_mask, NUMA_NO_NODE, NULL);
6076 if (!data)
6077 return -ENOMEM;
6078
6079 size = SKB_WITH_OVERHEAD(ksize(data));
6080
6081 /* Copy real data, and all frags */
6082 skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
6083 skb->len -= off;
6084
6085 memcpy((struct skb_shared_info *)(data + size),
6086 skb_shinfo(skb),
6087 offsetof(struct skb_shared_info,
6088 frags[skb_shinfo(skb)->nr_frags]));
6089 if (skb_cloned(skb)) {
6090 /* drop the old head gracefully */
6091 if (skb_orphan_frags(skb, gfp_mask)) {
6092 kfree(data);
6093 return -ENOMEM;
6094 }
6095 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
6096 skb_frag_ref(skb, i);
6097 if (skb_has_frag_list(skb))
6098 skb_clone_fraglist(skb);
6099 skb_release_data(skb);
6100 } else {
6101 /* we can reuse existing recount- all we did was
6102 * relocate values
6103 */
6104 skb_free_head(skb);
6105 }
6106
6107 skb->head = data;
6108 skb->data = data;
6109 skb->head_frag = 0;
6110#ifdef NET_SKBUFF_DATA_USES_OFFSET
6111 skb->end = size;
6112#else
6113 skb->end = skb->head + size;
6114#endif
6115 skb_set_tail_pointer(skb, skb_headlen(skb));
6116 skb_headers_offset_update(skb, 0);
6117 skb->cloned = 0;
6118 skb->hdr_len = 0;
6119 skb->nohdr = 0;
6120 atomic_set(&skb_shinfo(skb)->dataref, 1);
6121
6122 return 0;
6123}
6124
6125static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
6126
6127/* carve out the first eat bytes from skb's frag_list. May recurse into
6128 * pskb_carve()
6129 */
6130static int pskb_carve_frag_list(struct sk_buff *skb,
6131 struct skb_shared_info *shinfo, int eat,
6132 gfp_t gfp_mask)
6133{
6134 struct sk_buff *list = shinfo->frag_list;
6135 struct sk_buff *clone = NULL;
6136 struct sk_buff *insp = NULL;
6137
6138 do {
6139 if (!list) {
6140 pr_err("Not enough bytes to eat. Want %d\n", eat);
6141 return -EFAULT;
6142 }
6143 if (list->len <= eat) {
6144 /* Eaten as whole. */
6145 eat -= list->len;
6146 list = list->next;
6147 insp = list;
6148 } else {
6149 /* Eaten partially. */
6150 if (skb_shared(list)) {
6151 clone = skb_clone(list, gfp_mask);
6152 if (!clone)
6153 return -ENOMEM;
6154 insp = list->next;
6155 list = clone;
6156 } else {
6157 /* This may be pulled without problems. */
6158 insp = list;
6159 }
6160 if (pskb_carve(list, eat, gfp_mask) < 0) {
6161 kfree_skb(clone);
6162 return -ENOMEM;
6163 }
6164 break;
6165 }
6166 } while (eat);
6167
6168 /* Free pulled out fragments. */
6169 while ((list = shinfo->frag_list) != insp) {
6170 shinfo->frag_list = list->next;
6171 kfree_skb(list);
6172 }
6173 /* And insert new clone at head. */
6174 if (clone) {
6175 clone->next = list;
6176 shinfo->frag_list = clone;
6177 }
6178 return 0;
6179}
6180
6181/* carve off first len bytes from skb. Split line (off) is in the
6182 * non-linear part of skb
6183 */
6184static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
6185 int pos, gfp_t gfp_mask)
6186{
6187 int i, k = 0;
6188 int size = skb_end_offset(skb);
6189 u8 *data;
6190 const int nfrags = skb_shinfo(skb)->nr_frags;
6191 struct skb_shared_info *shinfo;
6192
6193 size = SKB_DATA_ALIGN(size);
6194
6195 if (skb_pfmemalloc(skb))
6196 gfp_mask |= __GFP_MEMALLOC;
6197 data = kmalloc_reserve(size +
6198 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
6199 gfp_mask, NUMA_NO_NODE, NULL);
6200 if (!data)
6201 return -ENOMEM;
6202
6203 size = SKB_WITH_OVERHEAD(ksize(data));
6204
6205 memcpy((struct skb_shared_info *)(data + size),
6206 skb_shinfo(skb), offsetof(struct skb_shared_info, frags[0]));
6207 if (skb_orphan_frags(skb, gfp_mask)) {
6208 kfree(data);
6209 return -ENOMEM;
6210 }
6211 shinfo = (struct skb_shared_info *)(data + size);
6212 for (i = 0; i < nfrags; i++) {
6213 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
6214
6215 if (pos + fsize > off) {
6216 shinfo->frags[k] = skb_shinfo(skb)->frags[i];
6217
6218 if (pos < off) {
6219 /* Split frag.
6220 * We have two variants in this case:
6221 * 1. Move all the frag to the second
6222 * part, if it is possible. F.e.
6223 * this approach is mandatory for TUX,
6224 * where splitting is expensive.
6225 * 2. Split is accurately. We make this.
6226 */
6227 skb_frag_off_add(&shinfo->frags[0], off - pos);
6228 skb_frag_size_sub(&shinfo->frags[0], off - pos);
6229 }
6230 skb_frag_ref(skb, i);
6231 k++;
6232 }
6233 pos += fsize;
6234 }
6235 shinfo->nr_frags = k;
6236 if (skb_has_frag_list(skb))
6237 skb_clone_fraglist(skb);
6238
6239 /* split line is in frag list */
6240 if (k == 0 && pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask)) {
6241 /* skb_frag_unref() is not needed here as shinfo->nr_frags = 0. */
6242 if (skb_has_frag_list(skb))
6243 kfree_skb_list(skb_shinfo(skb)->frag_list);
6244 kfree(data);
6245 return -ENOMEM;
6246 }
6247 skb_release_data(skb);
6248
6249 skb->head = data;
6250 skb->head_frag = 0;
6251 skb->data = data;
6252#ifdef NET_SKBUFF_DATA_USES_OFFSET
6253 skb->end = size;
6254#else
6255 skb->end = skb->head + size;
6256#endif
6257 skb_reset_tail_pointer(skb);
6258 skb_headers_offset_update(skb, 0);
6259 skb->cloned = 0;
6260 skb->hdr_len = 0;
6261 skb->nohdr = 0;
6262 skb->len -= off;
6263 skb->data_len = skb->len;
6264 atomic_set(&skb_shinfo(skb)->dataref, 1);
6265 return 0;
6266}
6267
6268/* remove len bytes from the beginning of the skb */
6269static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
6270{
6271 int headlen = skb_headlen(skb);
6272
6273 if (len < headlen)
6274 return pskb_carve_inside_header(skb, len, headlen, gfp);
6275 else
6276 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
6277}
6278
6279/* Extract to_copy bytes starting at off from skb, and return this in
6280 * a new skb
6281 */
6282struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
6283 int to_copy, gfp_t gfp)
6284{
6285 struct sk_buff *clone = skb_clone(skb, gfp);
6286
6287 if (!clone)
6288 return NULL;
6289
6290 if (pskb_carve(clone, off, gfp) < 0 ||
6291 pskb_trim(clone, to_copy)) {
6292 kfree_skb(clone);
6293 return NULL;
6294 }
6295 return clone;
6296}
6297EXPORT_SYMBOL(pskb_extract);
6298
6299/**
6300 * skb_condense - try to get rid of fragments/frag_list if possible
6301 * @skb: buffer
6302 *
6303 * Can be used to save memory before skb is added to a busy queue.
6304 * If packet has bytes in frags and enough tail room in skb->head,
6305 * pull all of them, so that we can free the frags right now and adjust
6306 * truesize.
6307 * Notes:
6308 * We do not reallocate skb->head thus can not fail.
6309 * Caller must re-evaluate skb->truesize if needed.
6310 */
6311void skb_condense(struct sk_buff *skb)
6312{
6313 if (skb->data_len) {
6314 if (skb->data_len > skb->end - skb->tail ||
6315 skb_cloned(skb))
6316 return;
6317
6318 /* Nice, we can free page frag(s) right now */
6319 __pskb_pull_tail(skb, skb->data_len);
6320 }
6321 /* At this point, skb->truesize might be over estimated,
6322 * because skb had a fragment, and fragments do not tell
6323 * their truesize.
6324 * When we pulled its content into skb->head, fragment
6325 * was freed, but __pskb_pull_tail() could not possibly
6326 * adjust skb->truesize, not knowing the frag truesize.
6327 */
6328 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
6329}
6330
6331#ifdef CONFIG_SKB_EXTENSIONS
6332static void *skb_ext_get_ptr(struct skb_ext *ext, enum skb_ext_id id)
6333{
6334 return (void *)ext + (ext->offset[id] * SKB_EXT_ALIGN_VALUE);
6335}
6336
6337/**
6338 * __skb_ext_alloc - allocate a new skb extensions storage
6339 *
6340 * @flags: See kmalloc().
6341 *
6342 * Returns the newly allocated pointer. The pointer can later attached to a
6343 * skb via __skb_ext_set().
6344 * Note: caller must handle the skb_ext as an opaque data.
6345 */
6346struct skb_ext *__skb_ext_alloc(gfp_t flags)
6347{
6348 struct skb_ext *new = kmem_cache_alloc(skbuff_ext_cache, flags);
6349
6350 if (new) {
6351 memset(new->offset, 0, sizeof(new->offset));
6352 refcount_set(&new->refcnt, 1);
6353 }
6354
6355 return new;
6356}
6357
6358static struct skb_ext *skb_ext_maybe_cow(struct skb_ext *old,
6359 unsigned int old_active)
6360{
6361 struct skb_ext *new;
6362
6363 if (refcount_read(&old->refcnt) == 1)
6364 return old;
6365
6366 new = kmem_cache_alloc(skbuff_ext_cache, GFP_ATOMIC);
6367 if (!new)
6368 return NULL;
6369
6370 memcpy(new, old, old->chunks * SKB_EXT_ALIGN_VALUE);
6371 refcount_set(&new->refcnt, 1);
6372
6373#ifdef CONFIG_XFRM
6374 if (old_active & (1 << SKB_EXT_SEC_PATH)) {
6375 struct sec_path *sp = skb_ext_get_ptr(old, SKB_EXT_SEC_PATH);
6376 unsigned int i;
6377
6378 for (i = 0; i < sp->len; i++)
6379 xfrm_state_hold(sp->xvec[i]);
6380 }
6381#endif
6382 __skb_ext_put(old);
6383 return new;
6384}
6385
6386/**
6387 * __skb_ext_set - attach the specified extension storage to this skb
6388 * @skb: buffer
6389 * @id: extension id
6390 * @ext: extension storage previously allocated via __skb_ext_alloc()
6391 *
6392 * Existing extensions, if any, are cleared.
6393 *
6394 * Returns the pointer to the extension.
6395 */
6396void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
6397 struct skb_ext *ext)
6398{
6399 unsigned int newlen, newoff = SKB_EXT_CHUNKSIZEOF(*ext);
6400
6401 skb_ext_put(skb);
6402 newlen = newoff + skb_ext_type_len[id];
6403 ext->chunks = newlen;
6404 ext->offset[id] = newoff;
6405 skb->extensions = ext;
6406 skb->active_extensions = 1 << id;
6407 return skb_ext_get_ptr(ext, id);
6408}
6409
6410/**
6411 * skb_ext_add - allocate space for given extension, COW if needed
6412 * @skb: buffer
6413 * @id: extension to allocate space for
6414 *
6415 * Allocates enough space for the given extension.
6416 * If the extension is already present, a pointer to that extension
6417 * is returned.
6418 *
6419 * If the skb was cloned, COW applies and the returned memory can be
6420 * modified without changing the extension space of clones buffers.
6421 *
6422 * Returns pointer to the extension or NULL on allocation failure.
6423 */
6424void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id)
6425{
6426 struct skb_ext *new, *old = NULL;
6427 unsigned int newlen, newoff;
6428
6429 if (skb->active_extensions) {
6430 old = skb->extensions;
6431
6432 new = skb_ext_maybe_cow(old, skb->active_extensions);
6433 if (!new)
6434 return NULL;
6435
6436 if (__skb_ext_exist(new, id))
6437 goto set_active;
6438
6439 newoff = new->chunks;
6440 } else {
6441 newoff = SKB_EXT_CHUNKSIZEOF(*new);
6442
6443 new = __skb_ext_alloc(GFP_ATOMIC);
6444 if (!new)
6445 return NULL;
6446 }
6447
6448 newlen = newoff + skb_ext_type_len[id];
6449 new->chunks = newlen;
6450 new->offset[id] = newoff;
6451set_active:
6452 skb->extensions = new;
6453 skb->active_extensions |= 1 << id;
6454 return skb_ext_get_ptr(new, id);
6455}
6456EXPORT_SYMBOL(skb_ext_add);
6457
6458#ifdef CONFIG_XFRM
6459static void skb_ext_put_sp(struct sec_path *sp)
6460{
6461 unsigned int i;
6462
6463 for (i = 0; i < sp->len; i++)
6464 xfrm_state_put(sp->xvec[i]);
6465}
6466#endif
6467
6468void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id)
6469{
6470 struct skb_ext *ext = skb->extensions;
6471
6472 skb->active_extensions &= ~(1 << id);
6473 if (skb->active_extensions == 0) {
6474 skb->extensions = NULL;
6475 __skb_ext_put(ext);
6476#ifdef CONFIG_XFRM
6477 } else if (id == SKB_EXT_SEC_PATH &&
6478 refcount_read(&ext->refcnt) == 1) {
6479 struct sec_path *sp = skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH);
6480
6481 skb_ext_put_sp(sp);
6482 sp->len = 0;
6483#endif
6484 }
6485}
6486EXPORT_SYMBOL(__skb_ext_del);
6487
6488void __skb_ext_put(struct skb_ext *ext)
6489{
6490 /* If this is last clone, nothing can increment
6491 * it after check passes. Avoids one atomic op.
6492 */
6493 if (refcount_read(&ext->refcnt) == 1)
6494 goto free_now;
6495
6496 if (!refcount_dec_and_test(&ext->refcnt))
6497 return;
6498free_now:
6499#ifdef CONFIG_XFRM
6500 if (__skb_ext_exist(ext, SKB_EXT_SEC_PATH))
6501 skb_ext_put_sp(skb_ext_get_ptr(ext, SKB_EXT_SEC_PATH));
6502#endif
6503
6504 kmem_cache_free(skbuff_ext_cache, ext);
6505}
6506EXPORT_SYMBOL(__skb_ext_put);
6507#endif /* CONFIG_SKB_EXTENSIONS */