Loading...
1/*
2 * Routines having to do with the 'struct sk_buff' memory handlers.
3 *
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 *
7 * Fixes:
8 * Alan Cox : Fixed the worst of the load
9 * balancer bugs.
10 * Dave Platt : Interrupt stacking fix.
11 * Richard Kooijman : Timestamp fixes.
12 * Alan Cox : Changed buffer format.
13 * Alan Cox : destructor hook for AF_UNIX etc.
14 * Linus Torvalds : Better skb_clone.
15 * Alan Cox : Added skb_copy.
16 * Alan Cox : Added all the changed routines Linus
17 * only put in the headers
18 * Ray VanTassle : Fixed --skb->lock in free
19 * Alan Cox : skb_copy copy arp field
20 * Andi Kleen : slabified it.
21 * Robert Olsson : Removed skb_head_pool
22 *
23 * NOTE:
24 * The __skb_ routines should be called with interrupts
25 * disabled, or you better be *real* sure that the operation is atomic
26 * with respect to whatever list is being frobbed (e.g. via lock_sock()
27 * or via disabling bottom half handlers, etc).
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35/*
36 * The functions in this file will not compile correctly with gcc 2.4.x
37 */
38
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
41#include <linux/module.h>
42#include <linux/types.h>
43#include <linux/kernel.h>
44#include <linux/kmemcheck.h>
45#include <linux/mm.h>
46#include <linux/interrupt.h>
47#include <linux/in.h>
48#include <linux/inet.h>
49#include <linux/slab.h>
50#include <linux/tcp.h>
51#include <linux/udp.h>
52#include <linux/netdevice.h>
53#ifdef CONFIG_NET_CLS_ACT
54#include <net/pkt_sched.h>
55#endif
56#include <linux/string.h>
57#include <linux/skbuff.h>
58#include <linux/splice.h>
59#include <linux/cache.h>
60#include <linux/rtnetlink.h>
61#include <linux/init.h>
62#include <linux/scatterlist.h>
63#include <linux/errqueue.h>
64#include <linux/prefetch.h>
65#include <linux/if_vlan.h>
66
67#include <net/protocol.h>
68#include <net/dst.h>
69#include <net/sock.h>
70#include <net/checksum.h>
71#include <net/ip6_checksum.h>
72#include <net/xfrm.h>
73
74#include <asm/uaccess.h>
75#include <trace/events/skb.h>
76#include <linux/highmem.h>
77#include <linux/capability.h>
78#include <linux/user_namespace.h>
79
80struct kmem_cache *skbuff_head_cache __read_mostly;
81static struct kmem_cache *skbuff_fclone_cache __read_mostly;
82int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
83EXPORT_SYMBOL(sysctl_max_skb_frags);
84
85/**
86 * skb_panic - private function for out-of-line support
87 * @skb: buffer
88 * @sz: size
89 * @addr: address
90 * @msg: skb_over_panic or skb_under_panic
91 *
92 * Out-of-line support for skb_put() and skb_push().
93 * Called via the wrapper skb_over_panic() or skb_under_panic().
94 * Keep out of line to prevent kernel bloat.
95 * __builtin_return_address is not used because it is not always reliable.
96 */
97static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
98 const char msg[])
99{
100 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
101 msg, addr, skb->len, sz, skb->head, skb->data,
102 (unsigned long)skb->tail, (unsigned long)skb->end,
103 skb->dev ? skb->dev->name : "<NULL>");
104 BUG();
105}
106
107static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
108{
109 skb_panic(skb, sz, addr, __func__);
110}
111
112static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
113{
114 skb_panic(skb, sz, addr, __func__);
115}
116
117/*
118 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
119 * the caller if emergency pfmemalloc reserves are being used. If it is and
120 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
121 * may be used. Otherwise, the packet data may be discarded until enough
122 * memory is free
123 */
124#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
125 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
126
127static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
128 unsigned long ip, bool *pfmemalloc)
129{
130 void *obj;
131 bool ret_pfmemalloc = false;
132
133 /*
134 * Try a regular allocation, when that fails and we're not entitled
135 * to the reserves, fail.
136 */
137 obj = kmalloc_node_track_caller(size,
138 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
139 node);
140 if (obj || !(gfp_pfmemalloc_allowed(flags)))
141 goto out;
142
143 /* Try again but now we are using pfmemalloc reserves */
144 ret_pfmemalloc = true;
145 obj = kmalloc_node_track_caller(size, flags, node);
146
147out:
148 if (pfmemalloc)
149 *pfmemalloc = ret_pfmemalloc;
150
151 return obj;
152}
153
154/* Allocate a new skbuff. We do this ourselves so we can fill in a few
155 * 'private' fields and also do memory statistics to find all the
156 * [BEEP] leaks.
157 *
158 */
159
160struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
161{
162 struct sk_buff *skb;
163
164 /* Get the HEAD */
165 skb = kmem_cache_alloc_node(skbuff_head_cache,
166 gfp_mask & ~__GFP_DMA, node);
167 if (!skb)
168 goto out;
169
170 /*
171 * Only clear those fields we need to clear, not those that we will
172 * actually initialise below. Hence, don't put any more fields after
173 * the tail pointer in struct sk_buff!
174 */
175 memset(skb, 0, offsetof(struct sk_buff, tail));
176 skb->head = NULL;
177 skb->truesize = sizeof(struct sk_buff);
178 atomic_set(&skb->users, 1);
179
180 skb->mac_header = (typeof(skb->mac_header))~0U;
181out:
182 return skb;
183}
184
185/**
186 * __alloc_skb - allocate a network buffer
187 * @size: size to allocate
188 * @gfp_mask: allocation mask
189 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
190 * instead of head cache and allocate a cloned (child) skb.
191 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
192 * allocations in case the data is required for writeback
193 * @node: numa node to allocate memory on
194 *
195 * Allocate a new &sk_buff. The returned buffer has no headroom and a
196 * tail room of at least size bytes. The object has a reference count
197 * of one. The return is the buffer. On a failure the return is %NULL.
198 *
199 * Buffers may only be allocated from interrupts using a @gfp_mask of
200 * %GFP_ATOMIC.
201 */
202struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
203 int flags, int node)
204{
205 struct kmem_cache *cache;
206 struct skb_shared_info *shinfo;
207 struct sk_buff *skb;
208 u8 *data;
209 bool pfmemalloc;
210
211 cache = (flags & SKB_ALLOC_FCLONE)
212 ? skbuff_fclone_cache : skbuff_head_cache;
213
214 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
215 gfp_mask |= __GFP_MEMALLOC;
216
217 /* Get the HEAD */
218 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
219 if (!skb)
220 goto out;
221 prefetchw(skb);
222
223 /* We do our best to align skb_shared_info on a separate cache
224 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
225 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
226 * Both skb->head and skb_shared_info are cache line aligned.
227 */
228 size = SKB_DATA_ALIGN(size);
229 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
230 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
231 if (!data)
232 goto nodata;
233 /* kmalloc(size) might give us more room than requested.
234 * Put skb_shared_info exactly at the end of allocated zone,
235 * to allow max possible filling before reallocation.
236 */
237 size = SKB_WITH_OVERHEAD(ksize(data));
238 prefetchw(data + size);
239
240 /*
241 * Only clear those fields we need to clear, not those that we will
242 * actually initialise below. Hence, don't put any more fields after
243 * the tail pointer in struct sk_buff!
244 */
245 memset(skb, 0, offsetof(struct sk_buff, tail));
246 /* Account for allocated memory : skb + skb->head */
247 skb->truesize = SKB_TRUESIZE(size);
248 skb->pfmemalloc = pfmemalloc;
249 atomic_set(&skb->users, 1);
250 skb->head = data;
251 skb->data = data;
252 skb_reset_tail_pointer(skb);
253 skb->end = skb->tail + size;
254 skb->mac_header = (typeof(skb->mac_header))~0U;
255 skb->transport_header = (typeof(skb->transport_header))~0U;
256
257 /* make sure we initialize shinfo sequentially */
258 shinfo = skb_shinfo(skb);
259 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
260 atomic_set(&shinfo->dataref, 1);
261 kmemcheck_annotate_variable(shinfo->destructor_arg);
262
263 if (flags & SKB_ALLOC_FCLONE) {
264 struct sk_buff_fclones *fclones;
265
266 fclones = container_of(skb, struct sk_buff_fclones, skb1);
267
268 kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
269 skb->fclone = SKB_FCLONE_ORIG;
270 atomic_set(&fclones->fclone_ref, 1);
271
272 fclones->skb2.fclone = SKB_FCLONE_CLONE;
273 fclones->skb2.pfmemalloc = pfmemalloc;
274 }
275out:
276 return skb;
277nodata:
278 kmem_cache_free(cache, skb);
279 skb = NULL;
280 goto out;
281}
282EXPORT_SYMBOL(__alloc_skb);
283
284/**
285 * __build_skb - build a network buffer
286 * @data: data buffer provided by caller
287 * @frag_size: size of data, or 0 if head was kmalloced
288 *
289 * Allocate a new &sk_buff. Caller provides space holding head and
290 * skb_shared_info. @data must have been allocated by kmalloc() only if
291 * @frag_size is 0, otherwise data should come from the page allocator
292 * or vmalloc()
293 * The return is the new skb buffer.
294 * On a failure the return is %NULL, and @data is not freed.
295 * Notes :
296 * Before IO, driver allocates only data buffer where NIC put incoming frame
297 * Driver should add room at head (NET_SKB_PAD) and
298 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
299 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
300 * before giving packet to stack.
301 * RX rings only contains data buffers, not full skbs.
302 */
303struct sk_buff *__build_skb(void *data, unsigned int frag_size)
304{
305 struct skb_shared_info *shinfo;
306 struct sk_buff *skb;
307 unsigned int size = frag_size ? : ksize(data);
308
309 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
310 if (!skb)
311 return NULL;
312
313 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
314
315 memset(skb, 0, offsetof(struct sk_buff, tail));
316 skb->truesize = SKB_TRUESIZE(size);
317 atomic_set(&skb->users, 1);
318 skb->head = data;
319 skb->data = data;
320 skb_reset_tail_pointer(skb);
321 skb->end = skb->tail + size;
322 skb->mac_header = (typeof(skb->mac_header))~0U;
323 skb->transport_header = (typeof(skb->transport_header))~0U;
324
325 /* make sure we initialize shinfo sequentially */
326 shinfo = skb_shinfo(skb);
327 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
328 atomic_set(&shinfo->dataref, 1);
329 kmemcheck_annotate_variable(shinfo->destructor_arg);
330
331 return skb;
332}
333
334/* build_skb() is wrapper over __build_skb(), that specifically
335 * takes care of skb->head and skb->pfmemalloc
336 * This means that if @frag_size is not zero, then @data must be backed
337 * by a page fragment, not kmalloc() or vmalloc()
338 */
339struct sk_buff *build_skb(void *data, unsigned int frag_size)
340{
341 struct sk_buff *skb = __build_skb(data, frag_size);
342
343 if (skb && frag_size) {
344 skb->head_frag = 1;
345 if (page_is_pfmemalloc(virt_to_head_page(data)))
346 skb->pfmemalloc = 1;
347 }
348 return skb;
349}
350EXPORT_SYMBOL(build_skb);
351
352#define NAPI_SKB_CACHE_SIZE 64
353
354struct napi_alloc_cache {
355 struct page_frag_cache page;
356 size_t skb_count;
357 void *skb_cache[NAPI_SKB_CACHE_SIZE];
358};
359
360static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
361static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
362
363static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
364{
365 struct page_frag_cache *nc;
366 unsigned long flags;
367 void *data;
368
369 local_irq_save(flags);
370 nc = this_cpu_ptr(&netdev_alloc_cache);
371 data = __alloc_page_frag(nc, fragsz, gfp_mask);
372 local_irq_restore(flags);
373 return data;
374}
375
376/**
377 * netdev_alloc_frag - allocate a page fragment
378 * @fragsz: fragment size
379 *
380 * Allocates a frag from a page for receive buffer.
381 * Uses GFP_ATOMIC allocations.
382 */
383void *netdev_alloc_frag(unsigned int fragsz)
384{
385 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
386}
387EXPORT_SYMBOL(netdev_alloc_frag);
388
389static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
390{
391 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
392
393 return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
394}
395
396void *napi_alloc_frag(unsigned int fragsz)
397{
398 return __napi_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
399}
400EXPORT_SYMBOL(napi_alloc_frag);
401
402/**
403 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
404 * @dev: network device to receive on
405 * @len: length to allocate
406 * @gfp_mask: get_free_pages mask, passed to alloc_skb
407 *
408 * Allocate a new &sk_buff and assign it a usage count of one. The
409 * buffer has NET_SKB_PAD headroom built in. Users should allocate
410 * the headroom they think they need without accounting for the
411 * built in space. The built in space is used for optimisations.
412 *
413 * %NULL is returned if there is no free memory.
414 */
415struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
416 gfp_t gfp_mask)
417{
418 struct page_frag_cache *nc;
419 unsigned long flags;
420 struct sk_buff *skb;
421 bool pfmemalloc;
422 void *data;
423
424 len += NET_SKB_PAD;
425
426 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
427 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
428 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
429 if (!skb)
430 goto skb_fail;
431 goto skb_success;
432 }
433
434 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
435 len = SKB_DATA_ALIGN(len);
436
437 if (sk_memalloc_socks())
438 gfp_mask |= __GFP_MEMALLOC;
439
440 local_irq_save(flags);
441
442 nc = this_cpu_ptr(&netdev_alloc_cache);
443 data = __alloc_page_frag(nc, len, gfp_mask);
444 pfmemalloc = nc->pfmemalloc;
445
446 local_irq_restore(flags);
447
448 if (unlikely(!data))
449 return NULL;
450
451 skb = __build_skb(data, len);
452 if (unlikely(!skb)) {
453 skb_free_frag(data);
454 return NULL;
455 }
456
457 /* use OR instead of assignment to avoid clearing of bits in mask */
458 if (pfmemalloc)
459 skb->pfmemalloc = 1;
460 skb->head_frag = 1;
461
462skb_success:
463 skb_reserve(skb, NET_SKB_PAD);
464 skb->dev = dev;
465
466skb_fail:
467 return skb;
468}
469EXPORT_SYMBOL(__netdev_alloc_skb);
470
471/**
472 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
473 * @napi: napi instance this buffer was allocated for
474 * @len: length to allocate
475 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
476 *
477 * Allocate a new sk_buff for use in NAPI receive. This buffer will
478 * attempt to allocate the head from a special reserved region used
479 * only for NAPI Rx allocation. By doing this we can save several
480 * CPU cycles by avoiding having to disable and re-enable IRQs.
481 *
482 * %NULL is returned if there is no free memory.
483 */
484struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
485 gfp_t gfp_mask)
486{
487 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
488 struct sk_buff *skb;
489 void *data;
490
491 len += NET_SKB_PAD + NET_IP_ALIGN;
492
493 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
494 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
495 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
496 if (!skb)
497 goto skb_fail;
498 goto skb_success;
499 }
500
501 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
502 len = SKB_DATA_ALIGN(len);
503
504 if (sk_memalloc_socks())
505 gfp_mask |= __GFP_MEMALLOC;
506
507 data = __alloc_page_frag(&nc->page, len, gfp_mask);
508 if (unlikely(!data))
509 return NULL;
510
511 skb = __build_skb(data, len);
512 if (unlikely(!skb)) {
513 skb_free_frag(data);
514 return NULL;
515 }
516
517 /* use OR instead of assignment to avoid clearing of bits in mask */
518 if (nc->page.pfmemalloc)
519 skb->pfmemalloc = 1;
520 skb->head_frag = 1;
521
522skb_success:
523 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
524 skb->dev = napi->dev;
525
526skb_fail:
527 return skb;
528}
529EXPORT_SYMBOL(__napi_alloc_skb);
530
531void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
532 int size, unsigned int truesize)
533{
534 skb_fill_page_desc(skb, i, page, off, size);
535 skb->len += size;
536 skb->data_len += size;
537 skb->truesize += truesize;
538}
539EXPORT_SYMBOL(skb_add_rx_frag);
540
541void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
542 unsigned int truesize)
543{
544 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
545
546 skb_frag_size_add(frag, size);
547 skb->len += size;
548 skb->data_len += size;
549 skb->truesize += truesize;
550}
551EXPORT_SYMBOL(skb_coalesce_rx_frag);
552
553static void skb_drop_list(struct sk_buff **listp)
554{
555 kfree_skb_list(*listp);
556 *listp = NULL;
557}
558
559static inline void skb_drop_fraglist(struct sk_buff *skb)
560{
561 skb_drop_list(&skb_shinfo(skb)->frag_list);
562}
563
564static void skb_clone_fraglist(struct sk_buff *skb)
565{
566 struct sk_buff *list;
567
568 skb_walk_frags(skb, list)
569 skb_get(list);
570}
571
572static void skb_free_head(struct sk_buff *skb)
573{
574 unsigned char *head = skb->head;
575
576 if (skb->head_frag)
577 skb_free_frag(head);
578 else
579 kfree(head);
580}
581
582static void skb_release_data(struct sk_buff *skb)
583{
584 struct skb_shared_info *shinfo = skb_shinfo(skb);
585 int i;
586
587 if (skb->cloned &&
588 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
589 &shinfo->dataref))
590 return;
591
592 for (i = 0; i < shinfo->nr_frags; i++)
593 __skb_frag_unref(&shinfo->frags[i]);
594
595 /*
596 * If skb buf is from userspace, we need to notify the caller
597 * the lower device DMA has done;
598 */
599 if (shinfo->tx_flags & SKBTX_DEV_ZEROCOPY) {
600 struct ubuf_info *uarg;
601
602 uarg = shinfo->destructor_arg;
603 if (uarg->callback)
604 uarg->callback(uarg, true);
605 }
606
607 if (shinfo->frag_list)
608 kfree_skb_list(shinfo->frag_list);
609
610 skb_free_head(skb);
611}
612
613/*
614 * Free an skbuff by memory without cleaning the state.
615 */
616static void kfree_skbmem(struct sk_buff *skb)
617{
618 struct sk_buff_fclones *fclones;
619
620 switch (skb->fclone) {
621 case SKB_FCLONE_UNAVAILABLE:
622 kmem_cache_free(skbuff_head_cache, skb);
623 return;
624
625 case SKB_FCLONE_ORIG:
626 fclones = container_of(skb, struct sk_buff_fclones, skb1);
627
628 /* We usually free the clone (TX completion) before original skb
629 * This test would have no chance to be true for the clone,
630 * while here, branch prediction will be good.
631 */
632 if (atomic_read(&fclones->fclone_ref) == 1)
633 goto fastpath;
634 break;
635
636 default: /* SKB_FCLONE_CLONE */
637 fclones = container_of(skb, struct sk_buff_fclones, skb2);
638 break;
639 }
640 if (!atomic_dec_and_test(&fclones->fclone_ref))
641 return;
642fastpath:
643 kmem_cache_free(skbuff_fclone_cache, fclones);
644}
645
646static void skb_release_head_state(struct sk_buff *skb)
647{
648 skb_dst_drop(skb);
649#ifdef CONFIG_XFRM
650 secpath_put(skb->sp);
651#endif
652 if (skb->destructor) {
653 WARN_ON(in_irq());
654 skb->destructor(skb);
655 }
656#if IS_ENABLED(CONFIG_NF_CONNTRACK)
657 nf_conntrack_put(skb->nfct);
658#endif
659#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
660 nf_bridge_put(skb->nf_bridge);
661#endif
662}
663
664/* Free everything but the sk_buff shell. */
665static void skb_release_all(struct sk_buff *skb)
666{
667 skb_release_head_state(skb);
668 if (likely(skb->head))
669 skb_release_data(skb);
670}
671
672/**
673 * __kfree_skb - private function
674 * @skb: buffer
675 *
676 * Free an sk_buff. Release anything attached to the buffer.
677 * Clean the state. This is an internal helper function. Users should
678 * always call kfree_skb
679 */
680
681void __kfree_skb(struct sk_buff *skb)
682{
683 skb_release_all(skb);
684 kfree_skbmem(skb);
685}
686EXPORT_SYMBOL(__kfree_skb);
687
688/**
689 * kfree_skb - free an sk_buff
690 * @skb: buffer to free
691 *
692 * Drop a reference to the buffer and free it if the usage count has
693 * hit zero.
694 */
695void kfree_skb(struct sk_buff *skb)
696{
697 if (unlikely(!skb))
698 return;
699 if (likely(atomic_read(&skb->users) == 1))
700 smp_rmb();
701 else if (likely(!atomic_dec_and_test(&skb->users)))
702 return;
703 trace_kfree_skb(skb, __builtin_return_address(0));
704 __kfree_skb(skb);
705}
706EXPORT_SYMBOL(kfree_skb);
707
708void kfree_skb_list(struct sk_buff *segs)
709{
710 while (segs) {
711 struct sk_buff *next = segs->next;
712
713 kfree_skb(segs);
714 segs = next;
715 }
716}
717EXPORT_SYMBOL(kfree_skb_list);
718
719/**
720 * skb_tx_error - report an sk_buff xmit error
721 * @skb: buffer that triggered an error
722 *
723 * Report xmit error if a device callback is tracking this skb.
724 * skb must be freed afterwards.
725 */
726void skb_tx_error(struct sk_buff *skb)
727{
728 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
729 struct ubuf_info *uarg;
730
731 uarg = skb_shinfo(skb)->destructor_arg;
732 if (uarg->callback)
733 uarg->callback(uarg, false);
734 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
735 }
736}
737EXPORT_SYMBOL(skb_tx_error);
738
739/**
740 * consume_skb - free an skbuff
741 * @skb: buffer to free
742 *
743 * Drop a ref to the buffer and free it if the usage count has hit zero
744 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
745 * is being dropped after a failure and notes that
746 */
747void consume_skb(struct sk_buff *skb)
748{
749 if (unlikely(!skb))
750 return;
751 if (likely(atomic_read(&skb->users) == 1))
752 smp_rmb();
753 else if (likely(!atomic_dec_and_test(&skb->users)))
754 return;
755 trace_consume_skb(skb);
756 __kfree_skb(skb);
757}
758EXPORT_SYMBOL(consume_skb);
759
760void __kfree_skb_flush(void)
761{
762 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
763
764 /* flush skb_cache if containing objects */
765 if (nc->skb_count) {
766 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
767 nc->skb_cache);
768 nc->skb_count = 0;
769 }
770}
771
772static inline void _kfree_skb_defer(struct sk_buff *skb)
773{
774 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
775
776 /* drop skb->head and call any destructors for packet */
777 skb_release_all(skb);
778
779 /* record skb to CPU local list */
780 nc->skb_cache[nc->skb_count++] = skb;
781
782#ifdef CONFIG_SLUB
783 /* SLUB writes into objects when freeing */
784 prefetchw(skb);
785#endif
786
787 /* flush skb_cache if it is filled */
788 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
789 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
790 nc->skb_cache);
791 nc->skb_count = 0;
792 }
793}
794void __kfree_skb_defer(struct sk_buff *skb)
795{
796 _kfree_skb_defer(skb);
797}
798
799void napi_consume_skb(struct sk_buff *skb, int budget)
800{
801 if (unlikely(!skb))
802 return;
803
804 /* Zero budget indicate non-NAPI context called us, like netpoll */
805 if (unlikely(!budget)) {
806 dev_consume_skb_any(skb);
807 return;
808 }
809
810 if (likely(atomic_read(&skb->users) == 1))
811 smp_rmb();
812 else if (likely(!atomic_dec_and_test(&skb->users)))
813 return;
814 /* if reaching here SKB is ready to free */
815 trace_consume_skb(skb);
816
817 /* if SKB is a clone, don't handle this case */
818 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
819 __kfree_skb(skb);
820 return;
821 }
822
823 _kfree_skb_defer(skb);
824}
825EXPORT_SYMBOL(napi_consume_skb);
826
827/* Make sure a field is enclosed inside headers_start/headers_end section */
828#define CHECK_SKB_FIELD(field) \
829 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
830 offsetof(struct sk_buff, headers_start)); \
831 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
832 offsetof(struct sk_buff, headers_end)); \
833
834static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
835{
836 new->tstamp = old->tstamp;
837 /* We do not copy old->sk */
838 new->dev = old->dev;
839 memcpy(new->cb, old->cb, sizeof(old->cb));
840 skb_dst_copy(new, old);
841#ifdef CONFIG_XFRM
842 new->sp = secpath_get(old->sp);
843#endif
844 __nf_copy(new, old, false);
845
846 /* Note : this field could be in headers_start/headers_end section
847 * It is not yet because we do not want to have a 16 bit hole
848 */
849 new->queue_mapping = old->queue_mapping;
850
851 memcpy(&new->headers_start, &old->headers_start,
852 offsetof(struct sk_buff, headers_end) -
853 offsetof(struct sk_buff, headers_start));
854 CHECK_SKB_FIELD(protocol);
855 CHECK_SKB_FIELD(csum);
856 CHECK_SKB_FIELD(hash);
857 CHECK_SKB_FIELD(priority);
858 CHECK_SKB_FIELD(skb_iif);
859 CHECK_SKB_FIELD(vlan_proto);
860 CHECK_SKB_FIELD(vlan_tci);
861 CHECK_SKB_FIELD(transport_header);
862 CHECK_SKB_FIELD(network_header);
863 CHECK_SKB_FIELD(mac_header);
864 CHECK_SKB_FIELD(inner_protocol);
865 CHECK_SKB_FIELD(inner_transport_header);
866 CHECK_SKB_FIELD(inner_network_header);
867 CHECK_SKB_FIELD(inner_mac_header);
868 CHECK_SKB_FIELD(mark);
869#ifdef CONFIG_NETWORK_SECMARK
870 CHECK_SKB_FIELD(secmark);
871#endif
872#ifdef CONFIG_NET_RX_BUSY_POLL
873 CHECK_SKB_FIELD(napi_id);
874#endif
875#ifdef CONFIG_XPS
876 CHECK_SKB_FIELD(sender_cpu);
877#endif
878#ifdef CONFIG_NET_SCHED
879 CHECK_SKB_FIELD(tc_index);
880#ifdef CONFIG_NET_CLS_ACT
881 CHECK_SKB_FIELD(tc_verd);
882#endif
883#endif
884
885}
886
887/*
888 * You should not add any new code to this function. Add it to
889 * __copy_skb_header above instead.
890 */
891static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
892{
893#define C(x) n->x = skb->x
894
895 n->next = n->prev = NULL;
896 n->sk = NULL;
897 __copy_skb_header(n, skb);
898
899 C(len);
900 C(data_len);
901 C(mac_len);
902 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
903 n->cloned = 1;
904 n->nohdr = 0;
905 n->destructor = NULL;
906 C(tail);
907 C(end);
908 C(head);
909 C(head_frag);
910 C(data);
911 C(truesize);
912 atomic_set(&n->users, 1);
913
914 atomic_inc(&(skb_shinfo(skb)->dataref));
915 skb->cloned = 1;
916
917 return n;
918#undef C
919}
920
921/**
922 * skb_morph - morph one skb into another
923 * @dst: the skb to receive the contents
924 * @src: the skb to supply the contents
925 *
926 * This is identical to skb_clone except that the target skb is
927 * supplied by the user.
928 *
929 * The target skb is returned upon exit.
930 */
931struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
932{
933 skb_release_all(dst);
934 return __skb_clone(dst, src);
935}
936EXPORT_SYMBOL_GPL(skb_morph);
937
938/**
939 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
940 * @skb: the skb to modify
941 * @gfp_mask: allocation priority
942 *
943 * This must be called on SKBTX_DEV_ZEROCOPY skb.
944 * It will copy all frags into kernel and drop the reference
945 * to userspace pages.
946 *
947 * If this function is called from an interrupt gfp_mask() must be
948 * %GFP_ATOMIC.
949 *
950 * Returns 0 on success or a negative error code on failure
951 * to allocate kernel memory to copy to.
952 */
953int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
954{
955 int i;
956 int num_frags = skb_shinfo(skb)->nr_frags;
957 struct page *page, *head = NULL;
958 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
959
960 for (i = 0; i < num_frags; i++) {
961 u8 *vaddr;
962 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
963
964 page = alloc_page(gfp_mask);
965 if (!page) {
966 while (head) {
967 struct page *next = (struct page *)page_private(head);
968 put_page(head);
969 head = next;
970 }
971 return -ENOMEM;
972 }
973 vaddr = kmap_atomic(skb_frag_page(f));
974 memcpy(page_address(page),
975 vaddr + f->page_offset, skb_frag_size(f));
976 kunmap_atomic(vaddr);
977 set_page_private(page, (unsigned long)head);
978 head = page;
979 }
980
981 /* skb frags release userspace buffers */
982 for (i = 0; i < num_frags; i++)
983 skb_frag_unref(skb, i);
984
985 uarg->callback(uarg, false);
986
987 /* skb frags point to kernel buffers */
988 for (i = num_frags - 1; i >= 0; i--) {
989 __skb_fill_page_desc(skb, i, head, 0,
990 skb_shinfo(skb)->frags[i].size);
991 head = (struct page *)page_private(head);
992 }
993
994 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
995 return 0;
996}
997EXPORT_SYMBOL_GPL(skb_copy_ubufs);
998
999/**
1000 * skb_clone - duplicate an sk_buff
1001 * @skb: buffer to clone
1002 * @gfp_mask: allocation priority
1003 *
1004 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
1005 * copies share the same packet data but not structure. The new
1006 * buffer has a reference count of 1. If the allocation fails the
1007 * function returns %NULL otherwise the new buffer is returned.
1008 *
1009 * If this function is called from an interrupt gfp_mask() must be
1010 * %GFP_ATOMIC.
1011 */
1012
1013struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1014{
1015 struct sk_buff_fclones *fclones = container_of(skb,
1016 struct sk_buff_fclones,
1017 skb1);
1018 struct sk_buff *n;
1019
1020 if (skb_orphan_frags(skb, gfp_mask))
1021 return NULL;
1022
1023 if (skb->fclone == SKB_FCLONE_ORIG &&
1024 atomic_read(&fclones->fclone_ref) == 1) {
1025 n = &fclones->skb2;
1026 atomic_set(&fclones->fclone_ref, 2);
1027 } else {
1028 if (skb_pfmemalloc(skb))
1029 gfp_mask |= __GFP_MEMALLOC;
1030
1031 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1032 if (!n)
1033 return NULL;
1034
1035 kmemcheck_annotate_bitfield(n, flags1);
1036 n->fclone = SKB_FCLONE_UNAVAILABLE;
1037 }
1038
1039 return __skb_clone(n, skb);
1040}
1041EXPORT_SYMBOL(skb_clone);
1042
1043static void skb_headers_offset_update(struct sk_buff *skb, int off)
1044{
1045 /* Only adjust this if it actually is csum_start rather than csum */
1046 if (skb->ip_summed == CHECKSUM_PARTIAL)
1047 skb->csum_start += off;
1048 /* {transport,network,mac}_header and tail are relative to skb->head */
1049 skb->transport_header += off;
1050 skb->network_header += off;
1051 if (skb_mac_header_was_set(skb))
1052 skb->mac_header += off;
1053 skb->inner_transport_header += off;
1054 skb->inner_network_header += off;
1055 skb->inner_mac_header += off;
1056}
1057
1058static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1059{
1060 __copy_skb_header(new, old);
1061
1062 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1063 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1064 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1065}
1066
1067static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1068{
1069 if (skb_pfmemalloc(skb))
1070 return SKB_ALLOC_RX;
1071 return 0;
1072}
1073
1074/**
1075 * skb_copy - create private copy of an sk_buff
1076 * @skb: buffer to copy
1077 * @gfp_mask: allocation priority
1078 *
1079 * Make a copy of both an &sk_buff and its data. This is used when the
1080 * caller wishes to modify the data and needs a private copy of the
1081 * data to alter. Returns %NULL on failure or the pointer to the buffer
1082 * on success. The returned buffer has a reference count of 1.
1083 *
1084 * As by-product this function converts non-linear &sk_buff to linear
1085 * one, so that &sk_buff becomes completely private and caller is allowed
1086 * to modify all the data of returned buffer. This means that this
1087 * function is not recommended for use in circumstances when only
1088 * header is going to be modified. Use pskb_copy() instead.
1089 */
1090
1091struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1092{
1093 int headerlen = skb_headroom(skb);
1094 unsigned int size = skb_end_offset(skb) + skb->data_len;
1095 struct sk_buff *n = __alloc_skb(size, gfp_mask,
1096 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1097
1098 if (!n)
1099 return NULL;
1100
1101 /* Set the data pointer */
1102 skb_reserve(n, headerlen);
1103 /* Set the tail pointer and length */
1104 skb_put(n, skb->len);
1105
1106 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
1107 BUG();
1108
1109 copy_skb_header(n, skb);
1110 return n;
1111}
1112EXPORT_SYMBOL(skb_copy);
1113
1114/**
1115 * __pskb_copy_fclone - create copy of an sk_buff with private head.
1116 * @skb: buffer to copy
1117 * @headroom: headroom of new skb
1118 * @gfp_mask: allocation priority
1119 * @fclone: if true allocate the copy of the skb from the fclone
1120 * cache instead of the head cache; it is recommended to set this
1121 * to true for the cases where the copy will likely be cloned
1122 *
1123 * Make a copy of both an &sk_buff and part of its data, located
1124 * in header. Fragmented data remain shared. This is used when
1125 * the caller wishes to modify only header of &sk_buff and needs
1126 * private copy of the header to alter. Returns %NULL on failure
1127 * or the pointer to the buffer on success.
1128 * The returned buffer has a reference count of 1.
1129 */
1130
1131struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1132 gfp_t gfp_mask, bool fclone)
1133{
1134 unsigned int size = skb_headlen(skb) + headroom;
1135 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1136 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1137
1138 if (!n)
1139 goto out;
1140
1141 /* Set the data pointer */
1142 skb_reserve(n, headroom);
1143 /* Set the tail pointer and length */
1144 skb_put(n, skb_headlen(skb));
1145 /* Copy the bytes */
1146 skb_copy_from_linear_data(skb, n->data, n->len);
1147
1148 n->truesize += skb->data_len;
1149 n->data_len = skb->data_len;
1150 n->len = skb->len;
1151
1152 if (skb_shinfo(skb)->nr_frags) {
1153 int i;
1154
1155 if (skb_orphan_frags(skb, gfp_mask)) {
1156 kfree_skb(n);
1157 n = NULL;
1158 goto out;
1159 }
1160 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1161 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1162 skb_frag_ref(skb, i);
1163 }
1164 skb_shinfo(n)->nr_frags = i;
1165 }
1166
1167 if (skb_has_frag_list(skb)) {
1168 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1169 skb_clone_fraglist(n);
1170 }
1171
1172 copy_skb_header(n, skb);
1173out:
1174 return n;
1175}
1176EXPORT_SYMBOL(__pskb_copy_fclone);
1177
1178/**
1179 * pskb_expand_head - reallocate header of &sk_buff
1180 * @skb: buffer to reallocate
1181 * @nhead: room to add at head
1182 * @ntail: room to add at tail
1183 * @gfp_mask: allocation priority
1184 *
1185 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1186 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1187 * reference count of 1. Returns zero in the case of success or error,
1188 * if expansion failed. In the last case, &sk_buff is not changed.
1189 *
1190 * All the pointers pointing into skb header may change and must be
1191 * reloaded after call to this function.
1192 */
1193
1194int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1195 gfp_t gfp_mask)
1196{
1197 int i;
1198 u8 *data;
1199 int size = nhead + skb_end_offset(skb) + ntail;
1200 long off;
1201
1202 BUG_ON(nhead < 0);
1203
1204 if (skb_shared(skb))
1205 BUG();
1206
1207 size = SKB_DATA_ALIGN(size);
1208
1209 if (skb_pfmemalloc(skb))
1210 gfp_mask |= __GFP_MEMALLOC;
1211 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1212 gfp_mask, NUMA_NO_NODE, NULL);
1213 if (!data)
1214 goto nodata;
1215 size = SKB_WITH_OVERHEAD(ksize(data));
1216
1217 /* Copy only real data... and, alas, header. This should be
1218 * optimized for the cases when header is void.
1219 */
1220 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1221
1222 memcpy((struct skb_shared_info *)(data + size),
1223 skb_shinfo(skb),
1224 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1225
1226 /*
1227 * if shinfo is shared we must drop the old head gracefully, but if it
1228 * is not we can just drop the old head and let the existing refcount
1229 * be since all we did is relocate the values
1230 */
1231 if (skb_cloned(skb)) {
1232 /* copy this zero copy skb frags */
1233 if (skb_orphan_frags(skb, gfp_mask))
1234 goto nofrags;
1235 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1236 skb_frag_ref(skb, i);
1237
1238 if (skb_has_frag_list(skb))
1239 skb_clone_fraglist(skb);
1240
1241 skb_release_data(skb);
1242 } else {
1243 skb_free_head(skb);
1244 }
1245 off = (data + nhead) - skb->head;
1246
1247 skb->head = data;
1248 skb->head_frag = 0;
1249 skb->data += off;
1250#ifdef NET_SKBUFF_DATA_USES_OFFSET
1251 skb->end = size;
1252 off = nhead;
1253#else
1254 skb->end = skb->head + size;
1255#endif
1256 skb->tail += off;
1257 skb_headers_offset_update(skb, nhead);
1258 skb->cloned = 0;
1259 skb->hdr_len = 0;
1260 skb->nohdr = 0;
1261 atomic_set(&skb_shinfo(skb)->dataref, 1);
1262 return 0;
1263
1264nofrags:
1265 kfree(data);
1266nodata:
1267 return -ENOMEM;
1268}
1269EXPORT_SYMBOL(pskb_expand_head);
1270
1271/* Make private copy of skb with writable head and some headroom */
1272
1273struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1274{
1275 struct sk_buff *skb2;
1276 int delta = headroom - skb_headroom(skb);
1277
1278 if (delta <= 0)
1279 skb2 = pskb_copy(skb, GFP_ATOMIC);
1280 else {
1281 skb2 = skb_clone(skb, GFP_ATOMIC);
1282 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1283 GFP_ATOMIC)) {
1284 kfree_skb(skb2);
1285 skb2 = NULL;
1286 }
1287 }
1288 return skb2;
1289}
1290EXPORT_SYMBOL(skb_realloc_headroom);
1291
1292/**
1293 * skb_copy_expand - copy and expand sk_buff
1294 * @skb: buffer to copy
1295 * @newheadroom: new free bytes at head
1296 * @newtailroom: new free bytes at tail
1297 * @gfp_mask: allocation priority
1298 *
1299 * Make a copy of both an &sk_buff and its data and while doing so
1300 * allocate additional space.
1301 *
1302 * This is used when the caller wishes to modify the data and needs a
1303 * private copy of the data to alter as well as more space for new fields.
1304 * Returns %NULL on failure or the pointer to the buffer
1305 * on success. The returned buffer has a reference count of 1.
1306 *
1307 * You must pass %GFP_ATOMIC as the allocation priority if this function
1308 * is called from an interrupt.
1309 */
1310struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1311 int newheadroom, int newtailroom,
1312 gfp_t gfp_mask)
1313{
1314 /*
1315 * Allocate the copy buffer
1316 */
1317 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1318 gfp_mask, skb_alloc_rx_flag(skb),
1319 NUMA_NO_NODE);
1320 int oldheadroom = skb_headroom(skb);
1321 int head_copy_len, head_copy_off;
1322
1323 if (!n)
1324 return NULL;
1325
1326 skb_reserve(n, newheadroom);
1327
1328 /* Set the tail pointer and length */
1329 skb_put(n, skb->len);
1330
1331 head_copy_len = oldheadroom;
1332 head_copy_off = 0;
1333 if (newheadroom <= head_copy_len)
1334 head_copy_len = newheadroom;
1335 else
1336 head_copy_off = newheadroom - head_copy_len;
1337
1338 /* Copy the linear header and data. */
1339 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1340 skb->len + head_copy_len))
1341 BUG();
1342
1343 copy_skb_header(n, skb);
1344
1345 skb_headers_offset_update(n, newheadroom - oldheadroom);
1346
1347 return n;
1348}
1349EXPORT_SYMBOL(skb_copy_expand);
1350
1351/**
1352 * skb_pad - zero pad the tail of an skb
1353 * @skb: buffer to pad
1354 * @pad: space to pad
1355 *
1356 * Ensure that a buffer is followed by a padding area that is zero
1357 * filled. Used by network drivers which may DMA or transfer data
1358 * beyond the buffer end onto the wire.
1359 *
1360 * May return error in out of memory cases. The skb is freed on error.
1361 */
1362
1363int skb_pad(struct sk_buff *skb, int pad)
1364{
1365 int err;
1366 int ntail;
1367
1368 /* If the skbuff is non linear tailroom is always zero.. */
1369 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1370 memset(skb->data+skb->len, 0, pad);
1371 return 0;
1372 }
1373
1374 ntail = skb->data_len + pad - (skb->end - skb->tail);
1375 if (likely(skb_cloned(skb) || ntail > 0)) {
1376 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1377 if (unlikely(err))
1378 goto free_skb;
1379 }
1380
1381 /* FIXME: The use of this function with non-linear skb's really needs
1382 * to be audited.
1383 */
1384 err = skb_linearize(skb);
1385 if (unlikely(err))
1386 goto free_skb;
1387
1388 memset(skb->data + skb->len, 0, pad);
1389 return 0;
1390
1391free_skb:
1392 kfree_skb(skb);
1393 return err;
1394}
1395EXPORT_SYMBOL(skb_pad);
1396
1397/**
1398 * pskb_put - add data to the tail of a potentially fragmented buffer
1399 * @skb: start of the buffer to use
1400 * @tail: tail fragment of the buffer to use
1401 * @len: amount of data to add
1402 *
1403 * This function extends the used data area of the potentially
1404 * fragmented buffer. @tail must be the last fragment of @skb -- or
1405 * @skb itself. If this would exceed the total buffer size the kernel
1406 * will panic. A pointer to the first byte of the extra data is
1407 * returned.
1408 */
1409
1410unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1411{
1412 if (tail != skb) {
1413 skb->data_len += len;
1414 skb->len += len;
1415 }
1416 return skb_put(tail, len);
1417}
1418EXPORT_SYMBOL_GPL(pskb_put);
1419
1420/**
1421 * skb_put - add data to a buffer
1422 * @skb: buffer to use
1423 * @len: amount of data to add
1424 *
1425 * This function extends the used data area of the buffer. If this would
1426 * exceed the total buffer size the kernel will panic. A pointer to the
1427 * first byte of the extra data is returned.
1428 */
1429unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1430{
1431 unsigned char *tmp = skb_tail_pointer(skb);
1432 SKB_LINEAR_ASSERT(skb);
1433 skb->tail += len;
1434 skb->len += len;
1435 if (unlikely(skb->tail > skb->end))
1436 skb_over_panic(skb, len, __builtin_return_address(0));
1437 return tmp;
1438}
1439EXPORT_SYMBOL(skb_put);
1440
1441/**
1442 * skb_push - add data to the start of a buffer
1443 * @skb: buffer to use
1444 * @len: amount of data to add
1445 *
1446 * This function extends the used data area of the buffer at the buffer
1447 * start. If this would exceed the total buffer headroom the kernel will
1448 * panic. A pointer to the first byte of the extra data is returned.
1449 */
1450unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1451{
1452 skb->data -= len;
1453 skb->len += len;
1454 if (unlikely(skb->data<skb->head))
1455 skb_under_panic(skb, len, __builtin_return_address(0));
1456 return skb->data;
1457}
1458EXPORT_SYMBOL(skb_push);
1459
1460/**
1461 * skb_pull - remove data from the start of a buffer
1462 * @skb: buffer to use
1463 * @len: amount of data to remove
1464 *
1465 * This function removes data from the start of a buffer, returning
1466 * the memory to the headroom. A pointer to the next data in the buffer
1467 * is returned. Once the data has been pulled future pushes will overwrite
1468 * the old data.
1469 */
1470unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1471{
1472 return skb_pull_inline(skb, len);
1473}
1474EXPORT_SYMBOL(skb_pull);
1475
1476/**
1477 * skb_trim - remove end from a buffer
1478 * @skb: buffer to alter
1479 * @len: new length
1480 *
1481 * Cut the length of a buffer down by removing data from the tail. If
1482 * the buffer is already under the length specified it is not modified.
1483 * The skb must be linear.
1484 */
1485void skb_trim(struct sk_buff *skb, unsigned int len)
1486{
1487 if (skb->len > len)
1488 __skb_trim(skb, len);
1489}
1490EXPORT_SYMBOL(skb_trim);
1491
1492/* Trims skb to length len. It can change skb pointers.
1493 */
1494
1495int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1496{
1497 struct sk_buff **fragp;
1498 struct sk_buff *frag;
1499 int offset = skb_headlen(skb);
1500 int nfrags = skb_shinfo(skb)->nr_frags;
1501 int i;
1502 int err;
1503
1504 if (skb_cloned(skb) &&
1505 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1506 return err;
1507
1508 i = 0;
1509 if (offset >= len)
1510 goto drop_pages;
1511
1512 for (; i < nfrags; i++) {
1513 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1514
1515 if (end < len) {
1516 offset = end;
1517 continue;
1518 }
1519
1520 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1521
1522drop_pages:
1523 skb_shinfo(skb)->nr_frags = i;
1524
1525 for (; i < nfrags; i++)
1526 skb_frag_unref(skb, i);
1527
1528 if (skb_has_frag_list(skb))
1529 skb_drop_fraglist(skb);
1530 goto done;
1531 }
1532
1533 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1534 fragp = &frag->next) {
1535 int end = offset + frag->len;
1536
1537 if (skb_shared(frag)) {
1538 struct sk_buff *nfrag;
1539
1540 nfrag = skb_clone(frag, GFP_ATOMIC);
1541 if (unlikely(!nfrag))
1542 return -ENOMEM;
1543
1544 nfrag->next = frag->next;
1545 consume_skb(frag);
1546 frag = nfrag;
1547 *fragp = frag;
1548 }
1549
1550 if (end < len) {
1551 offset = end;
1552 continue;
1553 }
1554
1555 if (end > len &&
1556 unlikely((err = pskb_trim(frag, len - offset))))
1557 return err;
1558
1559 if (frag->next)
1560 skb_drop_list(&frag->next);
1561 break;
1562 }
1563
1564done:
1565 if (len > skb_headlen(skb)) {
1566 skb->data_len -= skb->len - len;
1567 skb->len = len;
1568 } else {
1569 skb->len = len;
1570 skb->data_len = 0;
1571 skb_set_tail_pointer(skb, len);
1572 }
1573
1574 return 0;
1575}
1576EXPORT_SYMBOL(___pskb_trim);
1577
1578/**
1579 * __pskb_pull_tail - advance tail of skb header
1580 * @skb: buffer to reallocate
1581 * @delta: number of bytes to advance tail
1582 *
1583 * The function makes a sense only on a fragmented &sk_buff,
1584 * it expands header moving its tail forward and copying necessary
1585 * data from fragmented part.
1586 *
1587 * &sk_buff MUST have reference count of 1.
1588 *
1589 * Returns %NULL (and &sk_buff does not change) if pull failed
1590 * or value of new tail of skb in the case of success.
1591 *
1592 * All the pointers pointing into skb header may change and must be
1593 * reloaded after call to this function.
1594 */
1595
1596/* Moves tail of skb head forward, copying data from fragmented part,
1597 * when it is necessary.
1598 * 1. It may fail due to malloc failure.
1599 * 2. It may change skb pointers.
1600 *
1601 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1602 */
1603unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1604{
1605 /* If skb has not enough free space at tail, get new one
1606 * plus 128 bytes for future expansions. If we have enough
1607 * room at tail, reallocate without expansion only if skb is cloned.
1608 */
1609 int i, k, eat = (skb->tail + delta) - skb->end;
1610
1611 if (eat > 0 || skb_cloned(skb)) {
1612 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1613 GFP_ATOMIC))
1614 return NULL;
1615 }
1616
1617 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1618 BUG();
1619
1620 /* Optimization: no fragments, no reasons to preestimate
1621 * size of pulled pages. Superb.
1622 */
1623 if (!skb_has_frag_list(skb))
1624 goto pull_pages;
1625
1626 /* Estimate size of pulled pages. */
1627 eat = delta;
1628 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1629 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1630
1631 if (size >= eat)
1632 goto pull_pages;
1633 eat -= size;
1634 }
1635
1636 /* If we need update frag list, we are in troubles.
1637 * Certainly, it possible to add an offset to skb data,
1638 * but taking into account that pulling is expected to
1639 * be very rare operation, it is worth to fight against
1640 * further bloating skb head and crucify ourselves here instead.
1641 * Pure masohism, indeed. 8)8)
1642 */
1643 if (eat) {
1644 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1645 struct sk_buff *clone = NULL;
1646 struct sk_buff *insp = NULL;
1647
1648 do {
1649 BUG_ON(!list);
1650
1651 if (list->len <= eat) {
1652 /* Eaten as whole. */
1653 eat -= list->len;
1654 list = list->next;
1655 insp = list;
1656 } else {
1657 /* Eaten partially. */
1658
1659 if (skb_shared(list)) {
1660 /* Sucks! We need to fork list. :-( */
1661 clone = skb_clone(list, GFP_ATOMIC);
1662 if (!clone)
1663 return NULL;
1664 insp = list->next;
1665 list = clone;
1666 } else {
1667 /* This may be pulled without
1668 * problems. */
1669 insp = list;
1670 }
1671 if (!pskb_pull(list, eat)) {
1672 kfree_skb(clone);
1673 return NULL;
1674 }
1675 break;
1676 }
1677 } while (eat);
1678
1679 /* Free pulled out fragments. */
1680 while ((list = skb_shinfo(skb)->frag_list) != insp) {
1681 skb_shinfo(skb)->frag_list = list->next;
1682 kfree_skb(list);
1683 }
1684 /* And insert new clone at head. */
1685 if (clone) {
1686 clone->next = list;
1687 skb_shinfo(skb)->frag_list = clone;
1688 }
1689 }
1690 /* Success! Now we may commit changes to skb data. */
1691
1692pull_pages:
1693 eat = delta;
1694 k = 0;
1695 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1696 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1697
1698 if (size <= eat) {
1699 skb_frag_unref(skb, i);
1700 eat -= size;
1701 } else {
1702 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1703 if (eat) {
1704 skb_shinfo(skb)->frags[k].page_offset += eat;
1705 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1706 eat = 0;
1707 }
1708 k++;
1709 }
1710 }
1711 skb_shinfo(skb)->nr_frags = k;
1712
1713 skb->tail += delta;
1714 skb->data_len -= delta;
1715
1716 return skb_tail_pointer(skb);
1717}
1718EXPORT_SYMBOL(__pskb_pull_tail);
1719
1720/**
1721 * skb_copy_bits - copy bits from skb to kernel buffer
1722 * @skb: source skb
1723 * @offset: offset in source
1724 * @to: destination buffer
1725 * @len: number of bytes to copy
1726 *
1727 * Copy the specified number of bytes from the source skb to the
1728 * destination buffer.
1729 *
1730 * CAUTION ! :
1731 * If its prototype is ever changed,
1732 * check arch/{*}/net/{*}.S files,
1733 * since it is called from BPF assembly code.
1734 */
1735int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1736{
1737 int start = skb_headlen(skb);
1738 struct sk_buff *frag_iter;
1739 int i, copy;
1740
1741 if (offset > (int)skb->len - len)
1742 goto fault;
1743
1744 /* Copy header. */
1745 if ((copy = start - offset) > 0) {
1746 if (copy > len)
1747 copy = len;
1748 skb_copy_from_linear_data_offset(skb, offset, to, copy);
1749 if ((len -= copy) == 0)
1750 return 0;
1751 offset += copy;
1752 to += copy;
1753 }
1754
1755 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1756 int end;
1757 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1758
1759 WARN_ON(start > offset + len);
1760
1761 end = start + skb_frag_size(f);
1762 if ((copy = end - offset) > 0) {
1763 u8 *vaddr;
1764
1765 if (copy > len)
1766 copy = len;
1767
1768 vaddr = kmap_atomic(skb_frag_page(f));
1769 memcpy(to,
1770 vaddr + f->page_offset + offset - start,
1771 copy);
1772 kunmap_atomic(vaddr);
1773
1774 if ((len -= copy) == 0)
1775 return 0;
1776 offset += copy;
1777 to += copy;
1778 }
1779 start = end;
1780 }
1781
1782 skb_walk_frags(skb, frag_iter) {
1783 int end;
1784
1785 WARN_ON(start > offset + len);
1786
1787 end = start + frag_iter->len;
1788 if ((copy = end - offset) > 0) {
1789 if (copy > len)
1790 copy = len;
1791 if (skb_copy_bits(frag_iter, offset - start, to, copy))
1792 goto fault;
1793 if ((len -= copy) == 0)
1794 return 0;
1795 offset += copy;
1796 to += copy;
1797 }
1798 start = end;
1799 }
1800
1801 if (!len)
1802 return 0;
1803
1804fault:
1805 return -EFAULT;
1806}
1807EXPORT_SYMBOL(skb_copy_bits);
1808
1809/*
1810 * Callback from splice_to_pipe(), if we need to release some pages
1811 * at the end of the spd in case we error'ed out in filling the pipe.
1812 */
1813static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1814{
1815 put_page(spd->pages[i]);
1816}
1817
1818static struct page *linear_to_page(struct page *page, unsigned int *len,
1819 unsigned int *offset,
1820 struct sock *sk)
1821{
1822 struct page_frag *pfrag = sk_page_frag(sk);
1823
1824 if (!sk_page_frag_refill(sk, pfrag))
1825 return NULL;
1826
1827 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
1828
1829 memcpy(page_address(pfrag->page) + pfrag->offset,
1830 page_address(page) + *offset, *len);
1831 *offset = pfrag->offset;
1832 pfrag->offset += *len;
1833
1834 return pfrag->page;
1835}
1836
1837static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1838 struct page *page,
1839 unsigned int offset)
1840{
1841 return spd->nr_pages &&
1842 spd->pages[spd->nr_pages - 1] == page &&
1843 (spd->partial[spd->nr_pages - 1].offset +
1844 spd->partial[spd->nr_pages - 1].len == offset);
1845}
1846
1847/*
1848 * Fill page/offset/length into spd, if it can hold more pages.
1849 */
1850static bool spd_fill_page(struct splice_pipe_desc *spd,
1851 struct pipe_inode_info *pipe, struct page *page,
1852 unsigned int *len, unsigned int offset,
1853 bool linear,
1854 struct sock *sk)
1855{
1856 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1857 return true;
1858
1859 if (linear) {
1860 page = linear_to_page(page, len, &offset, sk);
1861 if (!page)
1862 return true;
1863 }
1864 if (spd_can_coalesce(spd, page, offset)) {
1865 spd->partial[spd->nr_pages - 1].len += *len;
1866 return false;
1867 }
1868 get_page(page);
1869 spd->pages[spd->nr_pages] = page;
1870 spd->partial[spd->nr_pages].len = *len;
1871 spd->partial[spd->nr_pages].offset = offset;
1872 spd->nr_pages++;
1873
1874 return false;
1875}
1876
1877static bool __splice_segment(struct page *page, unsigned int poff,
1878 unsigned int plen, unsigned int *off,
1879 unsigned int *len,
1880 struct splice_pipe_desc *spd, bool linear,
1881 struct sock *sk,
1882 struct pipe_inode_info *pipe)
1883{
1884 if (!*len)
1885 return true;
1886
1887 /* skip this segment if already processed */
1888 if (*off >= plen) {
1889 *off -= plen;
1890 return false;
1891 }
1892
1893 /* ignore any bits we already processed */
1894 poff += *off;
1895 plen -= *off;
1896 *off = 0;
1897
1898 do {
1899 unsigned int flen = min(*len, plen);
1900
1901 if (spd_fill_page(spd, pipe, page, &flen, poff,
1902 linear, sk))
1903 return true;
1904 poff += flen;
1905 plen -= flen;
1906 *len -= flen;
1907 } while (*len && plen);
1908
1909 return false;
1910}
1911
1912/*
1913 * Map linear and fragment data from the skb to spd. It reports true if the
1914 * pipe is full or if we already spliced the requested length.
1915 */
1916static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1917 unsigned int *offset, unsigned int *len,
1918 struct splice_pipe_desc *spd, struct sock *sk)
1919{
1920 int seg;
1921 struct sk_buff *iter;
1922
1923 /* map the linear part :
1924 * If skb->head_frag is set, this 'linear' part is backed by a
1925 * fragment, and if the head is not shared with any clones then
1926 * we can avoid a copy since we own the head portion of this page.
1927 */
1928 if (__splice_segment(virt_to_page(skb->data),
1929 (unsigned long) skb->data & (PAGE_SIZE - 1),
1930 skb_headlen(skb),
1931 offset, len, spd,
1932 skb_head_is_locked(skb),
1933 sk, pipe))
1934 return true;
1935
1936 /*
1937 * then map the fragments
1938 */
1939 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1940 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1941
1942 if (__splice_segment(skb_frag_page(f),
1943 f->page_offset, skb_frag_size(f),
1944 offset, len, spd, false, sk, pipe))
1945 return true;
1946 }
1947
1948 skb_walk_frags(skb, iter) {
1949 if (*offset >= iter->len) {
1950 *offset -= iter->len;
1951 continue;
1952 }
1953 /* __skb_splice_bits() only fails if the output has no room
1954 * left, so no point in going over the frag_list for the error
1955 * case.
1956 */
1957 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
1958 return true;
1959 }
1960
1961 return false;
1962}
1963
1964ssize_t skb_socket_splice(struct sock *sk,
1965 struct pipe_inode_info *pipe,
1966 struct splice_pipe_desc *spd)
1967{
1968 int ret;
1969
1970 /* Drop the socket lock, otherwise we have reverse
1971 * locking dependencies between sk_lock and i_mutex
1972 * here as compared to sendfile(). We enter here
1973 * with the socket lock held, and splice_to_pipe() will
1974 * grab the pipe inode lock. For sendfile() emulation,
1975 * we call into ->sendpage() with the i_mutex lock held
1976 * and networking will grab the socket lock.
1977 */
1978 release_sock(sk);
1979 ret = splice_to_pipe(pipe, spd);
1980 lock_sock(sk);
1981
1982 return ret;
1983}
1984
1985/*
1986 * Map data from the skb to a pipe. Should handle both the linear part,
1987 * the fragments, and the frag list.
1988 */
1989int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
1990 struct pipe_inode_info *pipe, unsigned int tlen,
1991 unsigned int flags,
1992 ssize_t (*splice_cb)(struct sock *,
1993 struct pipe_inode_info *,
1994 struct splice_pipe_desc *))
1995{
1996 struct partial_page partial[MAX_SKB_FRAGS];
1997 struct page *pages[MAX_SKB_FRAGS];
1998 struct splice_pipe_desc spd = {
1999 .pages = pages,
2000 .partial = partial,
2001 .nr_pages_max = MAX_SKB_FRAGS,
2002 .flags = flags,
2003 .ops = &nosteal_pipe_buf_ops,
2004 .spd_release = sock_spd_release,
2005 };
2006 int ret = 0;
2007
2008 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
2009
2010 if (spd.nr_pages)
2011 ret = splice_cb(sk, pipe, &spd);
2012
2013 return ret;
2014}
2015EXPORT_SYMBOL_GPL(skb_splice_bits);
2016
2017/**
2018 * skb_store_bits - store bits from kernel buffer to skb
2019 * @skb: destination buffer
2020 * @offset: offset in destination
2021 * @from: source buffer
2022 * @len: number of bytes to copy
2023 *
2024 * Copy the specified number of bytes from the source buffer to the
2025 * destination skb. This function handles all the messy bits of
2026 * traversing fragment lists and such.
2027 */
2028
2029int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2030{
2031 int start = skb_headlen(skb);
2032 struct sk_buff *frag_iter;
2033 int i, copy;
2034
2035 if (offset > (int)skb->len - len)
2036 goto fault;
2037
2038 if ((copy = start - offset) > 0) {
2039 if (copy > len)
2040 copy = len;
2041 skb_copy_to_linear_data_offset(skb, offset, from, copy);
2042 if ((len -= copy) == 0)
2043 return 0;
2044 offset += copy;
2045 from += copy;
2046 }
2047
2048 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2049 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2050 int end;
2051
2052 WARN_ON(start > offset + len);
2053
2054 end = start + skb_frag_size(frag);
2055 if ((copy = end - offset) > 0) {
2056 u8 *vaddr;
2057
2058 if (copy > len)
2059 copy = len;
2060
2061 vaddr = kmap_atomic(skb_frag_page(frag));
2062 memcpy(vaddr + frag->page_offset + offset - start,
2063 from, copy);
2064 kunmap_atomic(vaddr);
2065
2066 if ((len -= copy) == 0)
2067 return 0;
2068 offset += copy;
2069 from += copy;
2070 }
2071 start = end;
2072 }
2073
2074 skb_walk_frags(skb, frag_iter) {
2075 int end;
2076
2077 WARN_ON(start > offset + len);
2078
2079 end = start + frag_iter->len;
2080 if ((copy = end - offset) > 0) {
2081 if (copy > len)
2082 copy = len;
2083 if (skb_store_bits(frag_iter, offset - start,
2084 from, copy))
2085 goto fault;
2086 if ((len -= copy) == 0)
2087 return 0;
2088 offset += copy;
2089 from += copy;
2090 }
2091 start = end;
2092 }
2093 if (!len)
2094 return 0;
2095
2096fault:
2097 return -EFAULT;
2098}
2099EXPORT_SYMBOL(skb_store_bits);
2100
2101/* Checksum skb data. */
2102__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2103 __wsum csum, const struct skb_checksum_ops *ops)
2104{
2105 int start = skb_headlen(skb);
2106 int i, copy = start - offset;
2107 struct sk_buff *frag_iter;
2108 int pos = 0;
2109
2110 /* Checksum header. */
2111 if (copy > 0) {
2112 if (copy > len)
2113 copy = len;
2114 csum = ops->update(skb->data + offset, copy, csum);
2115 if ((len -= copy) == 0)
2116 return csum;
2117 offset += copy;
2118 pos = copy;
2119 }
2120
2121 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2122 int end;
2123 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2124
2125 WARN_ON(start > offset + len);
2126
2127 end = start + skb_frag_size(frag);
2128 if ((copy = end - offset) > 0) {
2129 __wsum csum2;
2130 u8 *vaddr;
2131
2132 if (copy > len)
2133 copy = len;
2134 vaddr = kmap_atomic(skb_frag_page(frag));
2135 csum2 = ops->update(vaddr + frag->page_offset +
2136 offset - start, copy, 0);
2137 kunmap_atomic(vaddr);
2138 csum = ops->combine(csum, csum2, pos, copy);
2139 if (!(len -= copy))
2140 return csum;
2141 offset += copy;
2142 pos += copy;
2143 }
2144 start = end;
2145 }
2146
2147 skb_walk_frags(skb, frag_iter) {
2148 int end;
2149
2150 WARN_ON(start > offset + len);
2151
2152 end = start + frag_iter->len;
2153 if ((copy = end - offset) > 0) {
2154 __wsum csum2;
2155 if (copy > len)
2156 copy = len;
2157 csum2 = __skb_checksum(frag_iter, offset - start,
2158 copy, 0, ops);
2159 csum = ops->combine(csum, csum2, pos, copy);
2160 if ((len -= copy) == 0)
2161 return csum;
2162 offset += copy;
2163 pos += copy;
2164 }
2165 start = end;
2166 }
2167 BUG_ON(len);
2168
2169 return csum;
2170}
2171EXPORT_SYMBOL(__skb_checksum);
2172
2173__wsum skb_checksum(const struct sk_buff *skb, int offset,
2174 int len, __wsum csum)
2175{
2176 const struct skb_checksum_ops ops = {
2177 .update = csum_partial_ext,
2178 .combine = csum_block_add_ext,
2179 };
2180
2181 return __skb_checksum(skb, offset, len, csum, &ops);
2182}
2183EXPORT_SYMBOL(skb_checksum);
2184
2185/* Both of above in one bottle. */
2186
2187__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2188 u8 *to, int len, __wsum csum)
2189{
2190 int start = skb_headlen(skb);
2191 int i, copy = start - offset;
2192 struct sk_buff *frag_iter;
2193 int pos = 0;
2194
2195 /* Copy header. */
2196 if (copy > 0) {
2197 if (copy > len)
2198 copy = len;
2199 csum = csum_partial_copy_nocheck(skb->data + offset, to,
2200 copy, csum);
2201 if ((len -= copy) == 0)
2202 return csum;
2203 offset += copy;
2204 to += copy;
2205 pos = copy;
2206 }
2207
2208 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2209 int end;
2210
2211 WARN_ON(start > offset + len);
2212
2213 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2214 if ((copy = end - offset) > 0) {
2215 __wsum csum2;
2216 u8 *vaddr;
2217 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2218
2219 if (copy > len)
2220 copy = len;
2221 vaddr = kmap_atomic(skb_frag_page(frag));
2222 csum2 = csum_partial_copy_nocheck(vaddr +
2223 frag->page_offset +
2224 offset - start, to,
2225 copy, 0);
2226 kunmap_atomic(vaddr);
2227 csum = csum_block_add(csum, csum2, pos);
2228 if (!(len -= copy))
2229 return csum;
2230 offset += copy;
2231 to += copy;
2232 pos += copy;
2233 }
2234 start = end;
2235 }
2236
2237 skb_walk_frags(skb, frag_iter) {
2238 __wsum csum2;
2239 int end;
2240
2241 WARN_ON(start > offset + len);
2242
2243 end = start + frag_iter->len;
2244 if ((copy = end - offset) > 0) {
2245 if (copy > len)
2246 copy = len;
2247 csum2 = skb_copy_and_csum_bits(frag_iter,
2248 offset - start,
2249 to, copy, 0);
2250 csum = csum_block_add(csum, csum2, pos);
2251 if ((len -= copy) == 0)
2252 return csum;
2253 offset += copy;
2254 to += copy;
2255 pos += copy;
2256 }
2257 start = end;
2258 }
2259 BUG_ON(len);
2260 return csum;
2261}
2262EXPORT_SYMBOL(skb_copy_and_csum_bits);
2263
2264 /**
2265 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2266 * @from: source buffer
2267 *
2268 * Calculates the amount of linear headroom needed in the 'to' skb passed
2269 * into skb_zerocopy().
2270 */
2271unsigned int
2272skb_zerocopy_headlen(const struct sk_buff *from)
2273{
2274 unsigned int hlen = 0;
2275
2276 if (!from->head_frag ||
2277 skb_headlen(from) < L1_CACHE_BYTES ||
2278 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2279 hlen = skb_headlen(from);
2280
2281 if (skb_has_frag_list(from))
2282 hlen = from->len;
2283
2284 return hlen;
2285}
2286EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2287
2288/**
2289 * skb_zerocopy - Zero copy skb to skb
2290 * @to: destination buffer
2291 * @from: source buffer
2292 * @len: number of bytes to copy from source buffer
2293 * @hlen: size of linear headroom in destination buffer
2294 *
2295 * Copies up to `len` bytes from `from` to `to` by creating references
2296 * to the frags in the source buffer.
2297 *
2298 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2299 * headroom in the `to` buffer.
2300 *
2301 * Return value:
2302 * 0: everything is OK
2303 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
2304 * -EFAULT: skb_copy_bits() found some problem with skb geometry
2305 */
2306int
2307skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2308{
2309 int i, j = 0;
2310 int plen = 0; /* length of skb->head fragment */
2311 int ret;
2312 struct page *page;
2313 unsigned int offset;
2314
2315 BUG_ON(!from->head_frag && !hlen);
2316
2317 /* dont bother with small payloads */
2318 if (len <= skb_tailroom(to))
2319 return skb_copy_bits(from, 0, skb_put(to, len), len);
2320
2321 if (hlen) {
2322 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2323 if (unlikely(ret))
2324 return ret;
2325 len -= hlen;
2326 } else {
2327 plen = min_t(int, skb_headlen(from), len);
2328 if (plen) {
2329 page = virt_to_head_page(from->head);
2330 offset = from->data - (unsigned char *)page_address(page);
2331 __skb_fill_page_desc(to, 0, page, offset, plen);
2332 get_page(page);
2333 j = 1;
2334 len -= plen;
2335 }
2336 }
2337
2338 to->truesize += len + plen;
2339 to->len += len + plen;
2340 to->data_len += len + plen;
2341
2342 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2343 skb_tx_error(from);
2344 return -ENOMEM;
2345 }
2346
2347 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2348 if (!len)
2349 break;
2350 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2351 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2352 len -= skb_shinfo(to)->frags[j].size;
2353 skb_frag_ref(to, j);
2354 j++;
2355 }
2356 skb_shinfo(to)->nr_frags = j;
2357
2358 return 0;
2359}
2360EXPORT_SYMBOL_GPL(skb_zerocopy);
2361
2362void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2363{
2364 __wsum csum;
2365 long csstart;
2366
2367 if (skb->ip_summed == CHECKSUM_PARTIAL)
2368 csstart = skb_checksum_start_offset(skb);
2369 else
2370 csstart = skb_headlen(skb);
2371
2372 BUG_ON(csstart > skb_headlen(skb));
2373
2374 skb_copy_from_linear_data(skb, to, csstart);
2375
2376 csum = 0;
2377 if (csstart != skb->len)
2378 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2379 skb->len - csstart, 0);
2380
2381 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2382 long csstuff = csstart + skb->csum_offset;
2383
2384 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
2385 }
2386}
2387EXPORT_SYMBOL(skb_copy_and_csum_dev);
2388
2389/**
2390 * skb_dequeue - remove from the head of the queue
2391 * @list: list to dequeue from
2392 *
2393 * Remove the head of the list. The list lock is taken so the function
2394 * may be used safely with other locking list functions. The head item is
2395 * returned or %NULL if the list is empty.
2396 */
2397
2398struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2399{
2400 unsigned long flags;
2401 struct sk_buff *result;
2402
2403 spin_lock_irqsave(&list->lock, flags);
2404 result = __skb_dequeue(list);
2405 spin_unlock_irqrestore(&list->lock, flags);
2406 return result;
2407}
2408EXPORT_SYMBOL(skb_dequeue);
2409
2410/**
2411 * skb_dequeue_tail - remove from the tail of the queue
2412 * @list: list to dequeue from
2413 *
2414 * Remove the tail of the list. The list lock is taken so the function
2415 * may be used safely with other locking list functions. The tail item is
2416 * returned or %NULL if the list is empty.
2417 */
2418struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2419{
2420 unsigned long flags;
2421 struct sk_buff *result;
2422
2423 spin_lock_irqsave(&list->lock, flags);
2424 result = __skb_dequeue_tail(list);
2425 spin_unlock_irqrestore(&list->lock, flags);
2426 return result;
2427}
2428EXPORT_SYMBOL(skb_dequeue_tail);
2429
2430/**
2431 * skb_queue_purge - empty a list
2432 * @list: list to empty
2433 *
2434 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2435 * the list and one reference dropped. This function takes the list
2436 * lock and is atomic with respect to other list locking functions.
2437 */
2438void skb_queue_purge(struct sk_buff_head *list)
2439{
2440 struct sk_buff *skb;
2441 while ((skb = skb_dequeue(list)) != NULL)
2442 kfree_skb(skb);
2443}
2444EXPORT_SYMBOL(skb_queue_purge);
2445
2446/**
2447 * skb_queue_head - queue a buffer at the list head
2448 * @list: list to use
2449 * @newsk: buffer to queue
2450 *
2451 * Queue a buffer at the start of the list. This function takes the
2452 * list lock and can be used safely with other locking &sk_buff functions
2453 * safely.
2454 *
2455 * A buffer cannot be placed on two lists at the same time.
2456 */
2457void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2458{
2459 unsigned long flags;
2460
2461 spin_lock_irqsave(&list->lock, flags);
2462 __skb_queue_head(list, newsk);
2463 spin_unlock_irqrestore(&list->lock, flags);
2464}
2465EXPORT_SYMBOL(skb_queue_head);
2466
2467/**
2468 * skb_queue_tail - queue a buffer at the list tail
2469 * @list: list to use
2470 * @newsk: buffer to queue
2471 *
2472 * Queue a buffer at the tail of the list. This function takes the
2473 * list lock and can be used safely with other locking &sk_buff functions
2474 * safely.
2475 *
2476 * A buffer cannot be placed on two lists at the same time.
2477 */
2478void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2479{
2480 unsigned long flags;
2481
2482 spin_lock_irqsave(&list->lock, flags);
2483 __skb_queue_tail(list, newsk);
2484 spin_unlock_irqrestore(&list->lock, flags);
2485}
2486EXPORT_SYMBOL(skb_queue_tail);
2487
2488/**
2489 * skb_unlink - remove a buffer from a list
2490 * @skb: buffer to remove
2491 * @list: list to use
2492 *
2493 * Remove a packet from a list. The list locks are taken and this
2494 * function is atomic with respect to other list locked calls
2495 *
2496 * You must know what list the SKB is on.
2497 */
2498void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2499{
2500 unsigned long flags;
2501
2502 spin_lock_irqsave(&list->lock, flags);
2503 __skb_unlink(skb, list);
2504 spin_unlock_irqrestore(&list->lock, flags);
2505}
2506EXPORT_SYMBOL(skb_unlink);
2507
2508/**
2509 * skb_append - append a buffer
2510 * @old: buffer to insert after
2511 * @newsk: buffer to insert
2512 * @list: list to use
2513 *
2514 * Place a packet after a given packet in a list. The list locks are taken
2515 * and this function is atomic with respect to other list locked calls.
2516 * A buffer cannot be placed on two lists at the same time.
2517 */
2518void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2519{
2520 unsigned long flags;
2521
2522 spin_lock_irqsave(&list->lock, flags);
2523 __skb_queue_after(list, old, newsk);
2524 spin_unlock_irqrestore(&list->lock, flags);
2525}
2526EXPORT_SYMBOL(skb_append);
2527
2528/**
2529 * skb_insert - insert a buffer
2530 * @old: buffer to insert before
2531 * @newsk: buffer to insert
2532 * @list: list to use
2533 *
2534 * Place a packet before a given packet in a list. The list locks are
2535 * taken and this function is atomic with respect to other list locked
2536 * calls.
2537 *
2538 * A buffer cannot be placed on two lists at the same time.
2539 */
2540void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2541{
2542 unsigned long flags;
2543
2544 spin_lock_irqsave(&list->lock, flags);
2545 __skb_insert(newsk, old->prev, old, list);
2546 spin_unlock_irqrestore(&list->lock, flags);
2547}
2548EXPORT_SYMBOL(skb_insert);
2549
2550static inline void skb_split_inside_header(struct sk_buff *skb,
2551 struct sk_buff* skb1,
2552 const u32 len, const int pos)
2553{
2554 int i;
2555
2556 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2557 pos - len);
2558 /* And move data appendix as is. */
2559 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2560 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2561
2562 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2563 skb_shinfo(skb)->nr_frags = 0;
2564 skb1->data_len = skb->data_len;
2565 skb1->len += skb1->data_len;
2566 skb->data_len = 0;
2567 skb->len = len;
2568 skb_set_tail_pointer(skb, len);
2569}
2570
2571static inline void skb_split_no_header(struct sk_buff *skb,
2572 struct sk_buff* skb1,
2573 const u32 len, int pos)
2574{
2575 int i, k = 0;
2576 const int nfrags = skb_shinfo(skb)->nr_frags;
2577
2578 skb_shinfo(skb)->nr_frags = 0;
2579 skb1->len = skb1->data_len = skb->len - len;
2580 skb->len = len;
2581 skb->data_len = len - pos;
2582
2583 for (i = 0; i < nfrags; i++) {
2584 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2585
2586 if (pos + size > len) {
2587 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2588
2589 if (pos < len) {
2590 /* Split frag.
2591 * We have two variants in this case:
2592 * 1. Move all the frag to the second
2593 * part, if it is possible. F.e.
2594 * this approach is mandatory for TUX,
2595 * where splitting is expensive.
2596 * 2. Split is accurately. We make this.
2597 */
2598 skb_frag_ref(skb, i);
2599 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2600 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2601 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2602 skb_shinfo(skb)->nr_frags++;
2603 }
2604 k++;
2605 } else
2606 skb_shinfo(skb)->nr_frags++;
2607 pos += size;
2608 }
2609 skb_shinfo(skb1)->nr_frags = k;
2610}
2611
2612/**
2613 * skb_split - Split fragmented skb to two parts at length len.
2614 * @skb: the buffer to split
2615 * @skb1: the buffer to receive the second part
2616 * @len: new length for skb
2617 */
2618void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2619{
2620 int pos = skb_headlen(skb);
2621
2622 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2623 if (len < pos) /* Split line is inside header. */
2624 skb_split_inside_header(skb, skb1, len, pos);
2625 else /* Second chunk has no header, nothing to copy. */
2626 skb_split_no_header(skb, skb1, len, pos);
2627}
2628EXPORT_SYMBOL(skb_split);
2629
2630/* Shifting from/to a cloned skb is a no-go.
2631 *
2632 * Caller cannot keep skb_shinfo related pointers past calling here!
2633 */
2634static int skb_prepare_for_shift(struct sk_buff *skb)
2635{
2636 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2637}
2638
2639/**
2640 * skb_shift - Shifts paged data partially from skb to another
2641 * @tgt: buffer into which tail data gets added
2642 * @skb: buffer from which the paged data comes from
2643 * @shiftlen: shift up to this many bytes
2644 *
2645 * Attempts to shift up to shiftlen worth of bytes, which may be less than
2646 * the length of the skb, from skb to tgt. Returns number bytes shifted.
2647 * It's up to caller to free skb if everything was shifted.
2648 *
2649 * If @tgt runs out of frags, the whole operation is aborted.
2650 *
2651 * Skb cannot include anything else but paged data while tgt is allowed
2652 * to have non-paged data as well.
2653 *
2654 * TODO: full sized shift could be optimized but that would need
2655 * specialized skb free'er to handle frags without up-to-date nr_frags.
2656 */
2657int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2658{
2659 int from, to, merge, todo;
2660 struct skb_frag_struct *fragfrom, *fragto;
2661
2662 BUG_ON(shiftlen > skb->len);
2663 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */
2664
2665 todo = shiftlen;
2666 from = 0;
2667 to = skb_shinfo(tgt)->nr_frags;
2668 fragfrom = &skb_shinfo(skb)->frags[from];
2669
2670 /* Actual merge is delayed until the point when we know we can
2671 * commit all, so that we don't have to undo partial changes
2672 */
2673 if (!to ||
2674 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2675 fragfrom->page_offset)) {
2676 merge = -1;
2677 } else {
2678 merge = to - 1;
2679
2680 todo -= skb_frag_size(fragfrom);
2681 if (todo < 0) {
2682 if (skb_prepare_for_shift(skb) ||
2683 skb_prepare_for_shift(tgt))
2684 return 0;
2685
2686 /* All previous frag pointers might be stale! */
2687 fragfrom = &skb_shinfo(skb)->frags[from];
2688 fragto = &skb_shinfo(tgt)->frags[merge];
2689
2690 skb_frag_size_add(fragto, shiftlen);
2691 skb_frag_size_sub(fragfrom, shiftlen);
2692 fragfrom->page_offset += shiftlen;
2693
2694 goto onlymerged;
2695 }
2696
2697 from++;
2698 }
2699
2700 /* Skip full, not-fitting skb to avoid expensive operations */
2701 if ((shiftlen == skb->len) &&
2702 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2703 return 0;
2704
2705 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2706 return 0;
2707
2708 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2709 if (to == MAX_SKB_FRAGS)
2710 return 0;
2711
2712 fragfrom = &skb_shinfo(skb)->frags[from];
2713 fragto = &skb_shinfo(tgt)->frags[to];
2714
2715 if (todo >= skb_frag_size(fragfrom)) {
2716 *fragto = *fragfrom;
2717 todo -= skb_frag_size(fragfrom);
2718 from++;
2719 to++;
2720
2721 } else {
2722 __skb_frag_ref(fragfrom);
2723 fragto->page = fragfrom->page;
2724 fragto->page_offset = fragfrom->page_offset;
2725 skb_frag_size_set(fragto, todo);
2726
2727 fragfrom->page_offset += todo;
2728 skb_frag_size_sub(fragfrom, todo);
2729 todo = 0;
2730
2731 to++;
2732 break;
2733 }
2734 }
2735
2736 /* Ready to "commit" this state change to tgt */
2737 skb_shinfo(tgt)->nr_frags = to;
2738
2739 if (merge >= 0) {
2740 fragfrom = &skb_shinfo(skb)->frags[0];
2741 fragto = &skb_shinfo(tgt)->frags[merge];
2742
2743 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2744 __skb_frag_unref(fragfrom);
2745 }
2746
2747 /* Reposition in the original skb */
2748 to = 0;
2749 while (from < skb_shinfo(skb)->nr_frags)
2750 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2751 skb_shinfo(skb)->nr_frags = to;
2752
2753 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2754
2755onlymerged:
2756 /* Most likely the tgt won't ever need its checksum anymore, skb on
2757 * the other hand might need it if it needs to be resent
2758 */
2759 tgt->ip_summed = CHECKSUM_PARTIAL;
2760 skb->ip_summed = CHECKSUM_PARTIAL;
2761
2762 /* Yak, is it really working this way? Some helper please? */
2763 skb->len -= shiftlen;
2764 skb->data_len -= shiftlen;
2765 skb->truesize -= shiftlen;
2766 tgt->len += shiftlen;
2767 tgt->data_len += shiftlen;
2768 tgt->truesize += shiftlen;
2769
2770 return shiftlen;
2771}
2772
2773/**
2774 * skb_prepare_seq_read - Prepare a sequential read of skb data
2775 * @skb: the buffer to read
2776 * @from: lower offset of data to be read
2777 * @to: upper offset of data to be read
2778 * @st: state variable
2779 *
2780 * Initializes the specified state variable. Must be called before
2781 * invoking skb_seq_read() for the first time.
2782 */
2783void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2784 unsigned int to, struct skb_seq_state *st)
2785{
2786 st->lower_offset = from;
2787 st->upper_offset = to;
2788 st->root_skb = st->cur_skb = skb;
2789 st->frag_idx = st->stepped_offset = 0;
2790 st->frag_data = NULL;
2791}
2792EXPORT_SYMBOL(skb_prepare_seq_read);
2793
2794/**
2795 * skb_seq_read - Sequentially read skb data
2796 * @consumed: number of bytes consumed by the caller so far
2797 * @data: destination pointer for data to be returned
2798 * @st: state variable
2799 *
2800 * Reads a block of skb data at @consumed relative to the
2801 * lower offset specified to skb_prepare_seq_read(). Assigns
2802 * the head of the data block to @data and returns the length
2803 * of the block or 0 if the end of the skb data or the upper
2804 * offset has been reached.
2805 *
2806 * The caller is not required to consume all of the data
2807 * returned, i.e. @consumed is typically set to the number
2808 * of bytes already consumed and the next call to
2809 * skb_seq_read() will return the remaining part of the block.
2810 *
2811 * Note 1: The size of each block of data returned can be arbitrary,
2812 * this limitation is the cost for zerocopy sequential
2813 * reads of potentially non linear data.
2814 *
2815 * Note 2: Fragment lists within fragments are not implemented
2816 * at the moment, state->root_skb could be replaced with
2817 * a stack for this purpose.
2818 */
2819unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2820 struct skb_seq_state *st)
2821{
2822 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2823 skb_frag_t *frag;
2824
2825 if (unlikely(abs_offset >= st->upper_offset)) {
2826 if (st->frag_data) {
2827 kunmap_atomic(st->frag_data);
2828 st->frag_data = NULL;
2829 }
2830 return 0;
2831 }
2832
2833next_skb:
2834 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2835
2836 if (abs_offset < block_limit && !st->frag_data) {
2837 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2838 return block_limit - abs_offset;
2839 }
2840
2841 if (st->frag_idx == 0 && !st->frag_data)
2842 st->stepped_offset += skb_headlen(st->cur_skb);
2843
2844 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2845 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2846 block_limit = skb_frag_size(frag) + st->stepped_offset;
2847
2848 if (abs_offset < block_limit) {
2849 if (!st->frag_data)
2850 st->frag_data = kmap_atomic(skb_frag_page(frag));
2851
2852 *data = (u8 *) st->frag_data + frag->page_offset +
2853 (abs_offset - st->stepped_offset);
2854
2855 return block_limit - abs_offset;
2856 }
2857
2858 if (st->frag_data) {
2859 kunmap_atomic(st->frag_data);
2860 st->frag_data = NULL;
2861 }
2862
2863 st->frag_idx++;
2864 st->stepped_offset += skb_frag_size(frag);
2865 }
2866
2867 if (st->frag_data) {
2868 kunmap_atomic(st->frag_data);
2869 st->frag_data = NULL;
2870 }
2871
2872 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2873 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2874 st->frag_idx = 0;
2875 goto next_skb;
2876 } else if (st->cur_skb->next) {
2877 st->cur_skb = st->cur_skb->next;
2878 st->frag_idx = 0;
2879 goto next_skb;
2880 }
2881
2882 return 0;
2883}
2884EXPORT_SYMBOL(skb_seq_read);
2885
2886/**
2887 * skb_abort_seq_read - Abort a sequential read of skb data
2888 * @st: state variable
2889 *
2890 * Must be called if skb_seq_read() was not called until it
2891 * returned 0.
2892 */
2893void skb_abort_seq_read(struct skb_seq_state *st)
2894{
2895 if (st->frag_data)
2896 kunmap_atomic(st->frag_data);
2897}
2898EXPORT_SYMBOL(skb_abort_seq_read);
2899
2900#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
2901
2902static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2903 struct ts_config *conf,
2904 struct ts_state *state)
2905{
2906 return skb_seq_read(offset, text, TS_SKB_CB(state));
2907}
2908
2909static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2910{
2911 skb_abort_seq_read(TS_SKB_CB(state));
2912}
2913
2914/**
2915 * skb_find_text - Find a text pattern in skb data
2916 * @skb: the buffer to look in
2917 * @from: search offset
2918 * @to: search limit
2919 * @config: textsearch configuration
2920 *
2921 * Finds a pattern in the skb data according to the specified
2922 * textsearch configuration. Use textsearch_next() to retrieve
2923 * subsequent occurrences of the pattern. Returns the offset
2924 * to the first occurrence or UINT_MAX if no match was found.
2925 */
2926unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2927 unsigned int to, struct ts_config *config)
2928{
2929 struct ts_state state;
2930 unsigned int ret;
2931
2932 config->get_next_block = skb_ts_get_next_block;
2933 config->finish = skb_ts_finish;
2934
2935 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
2936
2937 ret = textsearch_find(config, &state);
2938 return (ret <= to - from ? ret : UINT_MAX);
2939}
2940EXPORT_SYMBOL(skb_find_text);
2941
2942/**
2943 * skb_append_datato_frags - append the user data to a skb
2944 * @sk: sock structure
2945 * @skb: skb structure to be appended with user data.
2946 * @getfrag: call back function to be used for getting the user data
2947 * @from: pointer to user message iov
2948 * @length: length of the iov message
2949 *
2950 * Description: This procedure append the user data in the fragment part
2951 * of the skb if any page alloc fails user this procedure returns -ENOMEM
2952 */
2953int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2954 int (*getfrag)(void *from, char *to, int offset,
2955 int len, int odd, struct sk_buff *skb),
2956 void *from, int length)
2957{
2958 int frg_cnt = skb_shinfo(skb)->nr_frags;
2959 int copy;
2960 int offset = 0;
2961 int ret;
2962 struct page_frag *pfrag = ¤t->task_frag;
2963
2964 do {
2965 /* Return error if we don't have space for new frag */
2966 if (frg_cnt >= MAX_SKB_FRAGS)
2967 return -EMSGSIZE;
2968
2969 if (!sk_page_frag_refill(sk, pfrag))
2970 return -ENOMEM;
2971
2972 /* copy the user data to page */
2973 copy = min_t(int, length, pfrag->size - pfrag->offset);
2974
2975 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
2976 offset, copy, 0, skb);
2977 if (ret < 0)
2978 return -EFAULT;
2979
2980 /* copy was successful so update the size parameters */
2981 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
2982 copy);
2983 frg_cnt++;
2984 pfrag->offset += copy;
2985 get_page(pfrag->page);
2986
2987 skb->truesize += copy;
2988 atomic_add(copy, &sk->sk_wmem_alloc);
2989 skb->len += copy;
2990 skb->data_len += copy;
2991 offset += copy;
2992 length -= copy;
2993
2994 } while (length > 0);
2995
2996 return 0;
2997}
2998EXPORT_SYMBOL(skb_append_datato_frags);
2999
3000int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3001 int offset, size_t size)
3002{
3003 int i = skb_shinfo(skb)->nr_frags;
3004
3005 if (skb_can_coalesce(skb, i, page, offset)) {
3006 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3007 } else if (i < MAX_SKB_FRAGS) {
3008 get_page(page);
3009 skb_fill_page_desc(skb, i, page, offset, size);
3010 } else {
3011 return -EMSGSIZE;
3012 }
3013
3014 return 0;
3015}
3016EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3017
3018/**
3019 * skb_push_rcsum - push skb and update receive checksum
3020 * @skb: buffer to update
3021 * @len: length of data pulled
3022 *
3023 * This function performs an skb_push on the packet and updates
3024 * the CHECKSUM_COMPLETE checksum. It should be used on
3025 * receive path processing instead of skb_push unless you know
3026 * that the checksum difference is zero (e.g., a valid IP header)
3027 * or you are setting ip_summed to CHECKSUM_NONE.
3028 */
3029static unsigned char *skb_push_rcsum(struct sk_buff *skb, unsigned len)
3030{
3031 skb_push(skb, len);
3032 skb_postpush_rcsum(skb, skb->data, len);
3033 return skb->data;
3034}
3035
3036/**
3037 * skb_pull_rcsum - pull skb and update receive checksum
3038 * @skb: buffer to update
3039 * @len: length of data pulled
3040 *
3041 * This function performs an skb_pull on the packet and updates
3042 * the CHECKSUM_COMPLETE checksum. It should be used on
3043 * receive path processing instead of skb_pull unless you know
3044 * that the checksum difference is zero (e.g., a valid IP header)
3045 * or you are setting ip_summed to CHECKSUM_NONE.
3046 */
3047unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3048{
3049 unsigned char *data = skb->data;
3050
3051 BUG_ON(len > skb->len);
3052 __skb_pull(skb, len);
3053 skb_postpull_rcsum(skb, data, len);
3054 return skb->data;
3055}
3056EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3057
3058/**
3059 * skb_segment - Perform protocol segmentation on skb.
3060 * @head_skb: buffer to segment
3061 * @features: features for the output path (see dev->features)
3062 *
3063 * This function performs segmentation on the given skb. It returns
3064 * a pointer to the first in a list of new skbs for the segments.
3065 * In case of error it returns ERR_PTR(err).
3066 */
3067struct sk_buff *skb_segment(struct sk_buff *head_skb,
3068 netdev_features_t features)
3069{
3070 struct sk_buff *segs = NULL;
3071 struct sk_buff *tail = NULL;
3072 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3073 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3074 unsigned int mss = skb_shinfo(head_skb)->gso_size;
3075 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
3076 struct sk_buff *frag_skb = head_skb;
3077 unsigned int offset = doffset;
3078 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3079 unsigned int headroom;
3080 unsigned int len;
3081 __be16 proto;
3082 bool csum;
3083 int sg = !!(features & NETIF_F_SG);
3084 int nfrags = skb_shinfo(head_skb)->nr_frags;
3085 int err = -ENOMEM;
3086 int i = 0;
3087 int pos;
3088 int dummy;
3089
3090 __skb_push(head_skb, doffset);
3091 proto = skb_network_protocol(head_skb, &dummy);
3092 if (unlikely(!proto))
3093 return ERR_PTR(-EINVAL);
3094
3095 csum = !!can_checksum_protocol(features, proto);
3096
3097 headroom = skb_headroom(head_skb);
3098 pos = skb_headlen(head_skb);
3099
3100 do {
3101 struct sk_buff *nskb;
3102 skb_frag_t *nskb_frag;
3103 int hsize;
3104 int size;
3105
3106 len = head_skb->len - offset;
3107 if (len > mss)
3108 len = mss;
3109
3110 hsize = skb_headlen(head_skb) - offset;
3111 if (hsize < 0)
3112 hsize = 0;
3113 if (hsize > len || !sg)
3114 hsize = len;
3115
3116 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3117 (skb_headlen(list_skb) == len || sg)) {
3118 BUG_ON(skb_headlen(list_skb) > len);
3119
3120 i = 0;
3121 nfrags = skb_shinfo(list_skb)->nr_frags;
3122 frag = skb_shinfo(list_skb)->frags;
3123 frag_skb = list_skb;
3124 pos += skb_headlen(list_skb);
3125
3126 while (pos < offset + len) {
3127 BUG_ON(i >= nfrags);
3128
3129 size = skb_frag_size(frag);
3130 if (pos + size > offset + len)
3131 break;
3132
3133 i++;
3134 pos += size;
3135 frag++;
3136 }
3137
3138 nskb = skb_clone(list_skb, GFP_ATOMIC);
3139 list_skb = list_skb->next;
3140
3141 if (unlikely(!nskb))
3142 goto err;
3143
3144 if (unlikely(pskb_trim(nskb, len))) {
3145 kfree_skb(nskb);
3146 goto err;
3147 }
3148
3149 hsize = skb_end_offset(nskb);
3150 if (skb_cow_head(nskb, doffset + headroom)) {
3151 kfree_skb(nskb);
3152 goto err;
3153 }
3154
3155 nskb->truesize += skb_end_offset(nskb) - hsize;
3156 skb_release_head_state(nskb);
3157 __skb_push(nskb, doffset);
3158 } else {
3159 nskb = __alloc_skb(hsize + doffset + headroom,
3160 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3161 NUMA_NO_NODE);
3162
3163 if (unlikely(!nskb))
3164 goto err;
3165
3166 skb_reserve(nskb, headroom);
3167 __skb_put(nskb, doffset);
3168 }
3169
3170 if (segs)
3171 tail->next = nskb;
3172 else
3173 segs = nskb;
3174 tail = nskb;
3175
3176 __copy_skb_header(nskb, head_skb);
3177
3178 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3179 skb_reset_mac_len(nskb);
3180
3181 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
3182 nskb->data - tnl_hlen,
3183 doffset + tnl_hlen);
3184
3185 if (nskb->len == len + doffset)
3186 goto perform_csum_check;
3187
3188 if (!sg) {
3189 if (!nskb->remcsum_offload)
3190 nskb->ip_summed = CHECKSUM_NONE;
3191 SKB_GSO_CB(nskb)->csum =
3192 skb_copy_and_csum_bits(head_skb, offset,
3193 skb_put(nskb, len),
3194 len, 0);
3195 SKB_GSO_CB(nskb)->csum_start =
3196 skb_headroom(nskb) + doffset;
3197 continue;
3198 }
3199
3200 nskb_frag = skb_shinfo(nskb)->frags;
3201
3202 skb_copy_from_linear_data_offset(head_skb, offset,
3203 skb_put(nskb, hsize), hsize);
3204
3205 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
3206 SKBTX_SHARED_FRAG;
3207
3208 while (pos < offset + len) {
3209 if (i >= nfrags) {
3210 BUG_ON(skb_headlen(list_skb));
3211
3212 i = 0;
3213 nfrags = skb_shinfo(list_skb)->nr_frags;
3214 frag = skb_shinfo(list_skb)->frags;
3215 frag_skb = list_skb;
3216
3217 BUG_ON(!nfrags);
3218
3219 list_skb = list_skb->next;
3220 }
3221
3222 if (unlikely(skb_shinfo(nskb)->nr_frags >=
3223 MAX_SKB_FRAGS)) {
3224 net_warn_ratelimited(
3225 "skb_segment: too many frags: %u %u\n",
3226 pos, mss);
3227 goto err;
3228 }
3229
3230 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
3231 goto err;
3232
3233 *nskb_frag = *frag;
3234 __skb_frag_ref(nskb_frag);
3235 size = skb_frag_size(nskb_frag);
3236
3237 if (pos < offset) {
3238 nskb_frag->page_offset += offset - pos;
3239 skb_frag_size_sub(nskb_frag, offset - pos);
3240 }
3241
3242 skb_shinfo(nskb)->nr_frags++;
3243
3244 if (pos + size <= offset + len) {
3245 i++;
3246 frag++;
3247 pos += size;
3248 } else {
3249 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3250 goto skip_fraglist;
3251 }
3252
3253 nskb_frag++;
3254 }
3255
3256skip_fraglist:
3257 nskb->data_len = len - hsize;
3258 nskb->len += nskb->data_len;
3259 nskb->truesize += nskb->data_len;
3260
3261perform_csum_check:
3262 if (!csum) {
3263 if (skb_has_shared_frag(nskb)) {
3264 err = __skb_linearize(nskb);
3265 if (err)
3266 goto err;
3267 }
3268 if (!nskb->remcsum_offload)
3269 nskb->ip_summed = CHECKSUM_NONE;
3270 SKB_GSO_CB(nskb)->csum =
3271 skb_checksum(nskb, doffset,
3272 nskb->len - doffset, 0);
3273 SKB_GSO_CB(nskb)->csum_start =
3274 skb_headroom(nskb) + doffset;
3275 }
3276 } while ((offset += len) < head_skb->len);
3277
3278 /* Some callers want to get the end of the list.
3279 * Put it in segs->prev to avoid walking the list.
3280 * (see validate_xmit_skb_list() for example)
3281 */
3282 segs->prev = tail;
3283
3284 /* Following permits correct backpressure, for protocols
3285 * using skb_set_owner_w().
3286 * Idea is to tranfert ownership from head_skb to last segment.
3287 */
3288 if (head_skb->destructor == sock_wfree) {
3289 swap(tail->truesize, head_skb->truesize);
3290 swap(tail->destructor, head_skb->destructor);
3291 swap(tail->sk, head_skb->sk);
3292 }
3293 return segs;
3294
3295err:
3296 kfree_skb_list(segs);
3297 return ERR_PTR(err);
3298}
3299EXPORT_SYMBOL_GPL(skb_segment);
3300
3301int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3302{
3303 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3304 unsigned int offset = skb_gro_offset(skb);
3305 unsigned int headlen = skb_headlen(skb);
3306 unsigned int len = skb_gro_len(skb);
3307 struct sk_buff *lp, *p = *head;
3308 unsigned int delta_truesize;
3309
3310 if (unlikely(p->len + len >= 65536))
3311 return -E2BIG;
3312
3313 lp = NAPI_GRO_CB(p)->last;
3314 pinfo = skb_shinfo(lp);
3315
3316 if (headlen <= offset) {
3317 skb_frag_t *frag;
3318 skb_frag_t *frag2;
3319 int i = skbinfo->nr_frags;
3320 int nr_frags = pinfo->nr_frags + i;
3321
3322 if (nr_frags > MAX_SKB_FRAGS)
3323 goto merge;
3324
3325 offset -= headlen;
3326 pinfo->nr_frags = nr_frags;
3327 skbinfo->nr_frags = 0;
3328
3329 frag = pinfo->frags + nr_frags;
3330 frag2 = skbinfo->frags + i;
3331 do {
3332 *--frag = *--frag2;
3333 } while (--i);
3334
3335 frag->page_offset += offset;
3336 skb_frag_size_sub(frag, offset);
3337
3338 /* all fragments truesize : remove (head size + sk_buff) */
3339 delta_truesize = skb->truesize -
3340 SKB_TRUESIZE(skb_end_offset(skb));
3341
3342 skb->truesize -= skb->data_len;
3343 skb->len -= skb->data_len;
3344 skb->data_len = 0;
3345
3346 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
3347 goto done;
3348 } else if (skb->head_frag) {
3349 int nr_frags = pinfo->nr_frags;
3350 skb_frag_t *frag = pinfo->frags + nr_frags;
3351 struct page *page = virt_to_head_page(skb->head);
3352 unsigned int first_size = headlen - offset;
3353 unsigned int first_offset;
3354
3355 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
3356 goto merge;
3357
3358 first_offset = skb->data -
3359 (unsigned char *)page_address(page) +
3360 offset;
3361
3362 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3363
3364 frag->page.p = page;
3365 frag->page_offset = first_offset;
3366 skb_frag_size_set(frag, first_size);
3367
3368 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3369 /* We dont need to clear skbinfo->nr_frags here */
3370
3371 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3372 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3373 goto done;
3374 }
3375
3376merge:
3377 delta_truesize = skb->truesize;
3378 if (offset > headlen) {
3379 unsigned int eat = offset - headlen;
3380
3381 skbinfo->frags[0].page_offset += eat;
3382 skb_frag_size_sub(&skbinfo->frags[0], eat);
3383 skb->data_len -= eat;
3384 skb->len -= eat;
3385 offset = headlen;
3386 }
3387
3388 __skb_pull(skb, offset);
3389
3390 if (NAPI_GRO_CB(p)->last == p)
3391 skb_shinfo(p)->frag_list = skb;
3392 else
3393 NAPI_GRO_CB(p)->last->next = skb;
3394 NAPI_GRO_CB(p)->last = skb;
3395 __skb_header_release(skb);
3396 lp = p;
3397
3398done:
3399 NAPI_GRO_CB(p)->count++;
3400 p->data_len += len;
3401 p->truesize += delta_truesize;
3402 p->len += len;
3403 if (lp != p) {
3404 lp->data_len += len;
3405 lp->truesize += delta_truesize;
3406 lp->len += len;
3407 }
3408 NAPI_GRO_CB(skb)->same_flow = 1;
3409 return 0;
3410}
3411
3412void __init skb_init(void)
3413{
3414 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
3415 sizeof(struct sk_buff),
3416 0,
3417 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3418 NULL);
3419 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3420 sizeof(struct sk_buff_fclones),
3421 0,
3422 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3423 NULL);
3424}
3425
3426/**
3427 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
3428 * @skb: Socket buffer containing the buffers to be mapped
3429 * @sg: The scatter-gather list to map into
3430 * @offset: The offset into the buffer's contents to start mapping
3431 * @len: Length of buffer space to be mapped
3432 *
3433 * Fill the specified scatter-gather list with mappings/pointers into a
3434 * region of the buffer space attached to a socket buffer.
3435 */
3436static int
3437__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3438{
3439 int start = skb_headlen(skb);
3440 int i, copy = start - offset;
3441 struct sk_buff *frag_iter;
3442 int elt = 0;
3443
3444 if (copy > 0) {
3445 if (copy > len)
3446 copy = len;
3447 sg_set_buf(sg, skb->data + offset, copy);
3448 elt++;
3449 if ((len -= copy) == 0)
3450 return elt;
3451 offset += copy;
3452 }
3453
3454 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3455 int end;
3456
3457 WARN_ON(start > offset + len);
3458
3459 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3460 if ((copy = end - offset) > 0) {
3461 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3462
3463 if (copy > len)
3464 copy = len;
3465 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3466 frag->page_offset+offset-start);
3467 elt++;
3468 if (!(len -= copy))
3469 return elt;
3470 offset += copy;
3471 }
3472 start = end;
3473 }
3474
3475 skb_walk_frags(skb, frag_iter) {
3476 int end;
3477
3478 WARN_ON(start > offset + len);
3479
3480 end = start + frag_iter->len;
3481 if ((copy = end - offset) > 0) {
3482 if (copy > len)
3483 copy = len;
3484 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3485 copy);
3486 if ((len -= copy) == 0)
3487 return elt;
3488 offset += copy;
3489 }
3490 start = end;
3491 }
3492 BUG_ON(len);
3493 return elt;
3494}
3495
3496/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
3497 * sglist without mark the sg which contain last skb data as the end.
3498 * So the caller can mannipulate sg list as will when padding new data after
3499 * the first call without calling sg_unmark_end to expend sg list.
3500 *
3501 * Scenario to use skb_to_sgvec_nomark:
3502 * 1. sg_init_table
3503 * 2. skb_to_sgvec_nomark(payload1)
3504 * 3. skb_to_sgvec_nomark(payload2)
3505 *
3506 * This is equivalent to:
3507 * 1. sg_init_table
3508 * 2. skb_to_sgvec(payload1)
3509 * 3. sg_unmark_end
3510 * 4. skb_to_sgvec(payload2)
3511 *
3512 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
3513 * is more preferable.
3514 */
3515int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
3516 int offset, int len)
3517{
3518 return __skb_to_sgvec(skb, sg, offset, len);
3519}
3520EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
3521
3522int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3523{
3524 int nsg = __skb_to_sgvec(skb, sg, offset, len);
3525
3526 sg_mark_end(&sg[nsg - 1]);
3527
3528 return nsg;
3529}
3530EXPORT_SYMBOL_GPL(skb_to_sgvec);
3531
3532/**
3533 * skb_cow_data - Check that a socket buffer's data buffers are writable
3534 * @skb: The socket buffer to check.
3535 * @tailbits: Amount of trailing space to be added
3536 * @trailer: Returned pointer to the skb where the @tailbits space begins
3537 *
3538 * Make sure that the data buffers attached to a socket buffer are
3539 * writable. If they are not, private copies are made of the data buffers
3540 * and the socket buffer is set to use these instead.
3541 *
3542 * If @tailbits is given, make sure that there is space to write @tailbits
3543 * bytes of data beyond current end of socket buffer. @trailer will be
3544 * set to point to the skb in which this space begins.
3545 *
3546 * The number of scatterlist elements required to completely map the
3547 * COW'd and extended socket buffer will be returned.
3548 */
3549int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
3550{
3551 int copyflag;
3552 int elt;
3553 struct sk_buff *skb1, **skb_p;
3554
3555 /* If skb is cloned or its head is paged, reallocate
3556 * head pulling out all the pages (pages are considered not writable
3557 * at the moment even if they are anonymous).
3558 */
3559 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3560 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3561 return -ENOMEM;
3562
3563 /* Easy case. Most of packets will go this way. */
3564 if (!skb_has_frag_list(skb)) {
3565 /* A little of trouble, not enough of space for trailer.
3566 * This should not happen, when stack is tuned to generate
3567 * good frames. OK, on miss we reallocate and reserve even more
3568 * space, 128 bytes is fair. */
3569
3570 if (skb_tailroom(skb) < tailbits &&
3571 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3572 return -ENOMEM;
3573
3574 /* Voila! */
3575 *trailer = skb;
3576 return 1;
3577 }
3578
3579 /* Misery. We are in troubles, going to mincer fragments... */
3580
3581 elt = 1;
3582 skb_p = &skb_shinfo(skb)->frag_list;
3583 copyflag = 0;
3584
3585 while ((skb1 = *skb_p) != NULL) {
3586 int ntail = 0;
3587
3588 /* The fragment is partially pulled by someone,
3589 * this can happen on input. Copy it and everything
3590 * after it. */
3591
3592 if (skb_shared(skb1))
3593 copyflag = 1;
3594
3595 /* If the skb is the last, worry about trailer. */
3596
3597 if (skb1->next == NULL && tailbits) {
3598 if (skb_shinfo(skb1)->nr_frags ||
3599 skb_has_frag_list(skb1) ||
3600 skb_tailroom(skb1) < tailbits)
3601 ntail = tailbits + 128;
3602 }
3603
3604 if (copyflag ||
3605 skb_cloned(skb1) ||
3606 ntail ||
3607 skb_shinfo(skb1)->nr_frags ||
3608 skb_has_frag_list(skb1)) {
3609 struct sk_buff *skb2;
3610
3611 /* Fuck, we are miserable poor guys... */
3612 if (ntail == 0)
3613 skb2 = skb_copy(skb1, GFP_ATOMIC);
3614 else
3615 skb2 = skb_copy_expand(skb1,
3616 skb_headroom(skb1),
3617 ntail,
3618 GFP_ATOMIC);
3619 if (unlikely(skb2 == NULL))
3620 return -ENOMEM;
3621
3622 if (skb1->sk)
3623 skb_set_owner_w(skb2, skb1->sk);
3624
3625 /* Looking around. Are we still alive?
3626 * OK, link new skb, drop old one */
3627
3628 skb2->next = skb1->next;
3629 *skb_p = skb2;
3630 kfree_skb(skb1);
3631 skb1 = skb2;
3632 }
3633 elt++;
3634 *trailer = skb1;
3635 skb_p = &skb1->next;
3636 }
3637
3638 return elt;
3639}
3640EXPORT_SYMBOL_GPL(skb_cow_data);
3641
3642static void sock_rmem_free(struct sk_buff *skb)
3643{
3644 struct sock *sk = skb->sk;
3645
3646 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3647}
3648
3649/*
3650 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3651 */
3652int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3653{
3654 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3655 (unsigned int)sk->sk_rcvbuf)
3656 return -ENOMEM;
3657
3658 skb_orphan(skb);
3659 skb->sk = sk;
3660 skb->destructor = sock_rmem_free;
3661 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3662
3663 /* before exiting rcu section, make sure dst is refcounted */
3664 skb_dst_force(skb);
3665
3666 skb_queue_tail(&sk->sk_error_queue, skb);
3667 if (!sock_flag(sk, SOCK_DEAD))
3668 sk->sk_data_ready(sk);
3669 return 0;
3670}
3671EXPORT_SYMBOL(sock_queue_err_skb);
3672
3673struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
3674{
3675 struct sk_buff_head *q = &sk->sk_error_queue;
3676 struct sk_buff *skb, *skb_next;
3677 unsigned long flags;
3678 int err = 0;
3679
3680 spin_lock_irqsave(&q->lock, flags);
3681 skb = __skb_dequeue(q);
3682 if (skb && (skb_next = skb_peek(q)))
3683 err = SKB_EXT_ERR(skb_next)->ee.ee_errno;
3684 spin_unlock_irqrestore(&q->lock, flags);
3685
3686 sk->sk_err = err;
3687 if (err)
3688 sk->sk_error_report(sk);
3689
3690 return skb;
3691}
3692EXPORT_SYMBOL(sock_dequeue_err_skb);
3693
3694/**
3695 * skb_clone_sk - create clone of skb, and take reference to socket
3696 * @skb: the skb to clone
3697 *
3698 * This function creates a clone of a buffer that holds a reference on
3699 * sk_refcnt. Buffers created via this function are meant to be
3700 * returned using sock_queue_err_skb, or free via kfree_skb.
3701 *
3702 * When passing buffers allocated with this function to sock_queue_err_skb
3703 * it is necessary to wrap the call with sock_hold/sock_put in order to
3704 * prevent the socket from being released prior to being enqueued on
3705 * the sk_error_queue.
3706 */
3707struct sk_buff *skb_clone_sk(struct sk_buff *skb)
3708{
3709 struct sock *sk = skb->sk;
3710 struct sk_buff *clone;
3711
3712 if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
3713 return NULL;
3714
3715 clone = skb_clone(skb, GFP_ATOMIC);
3716 if (!clone) {
3717 sock_put(sk);
3718 return NULL;
3719 }
3720
3721 clone->sk = sk;
3722 clone->destructor = sock_efree;
3723
3724 return clone;
3725}
3726EXPORT_SYMBOL(skb_clone_sk);
3727
3728static void __skb_complete_tx_timestamp(struct sk_buff *skb,
3729 struct sock *sk,
3730 int tstype)
3731{
3732 struct sock_exterr_skb *serr;
3733 int err;
3734
3735 serr = SKB_EXT_ERR(skb);
3736 memset(serr, 0, sizeof(*serr));
3737 serr->ee.ee_errno = ENOMSG;
3738 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3739 serr->ee.ee_info = tstype;
3740 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
3741 serr->ee.ee_data = skb_shinfo(skb)->tskey;
3742 if (sk->sk_protocol == IPPROTO_TCP &&
3743 sk->sk_type == SOCK_STREAM)
3744 serr->ee.ee_data -= sk->sk_tskey;
3745 }
3746
3747 err = sock_queue_err_skb(sk, skb);
3748
3749 if (err)
3750 kfree_skb(skb);
3751}
3752
3753static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
3754{
3755 bool ret;
3756
3757 if (likely(sysctl_tstamp_allow_data || tsonly))
3758 return true;
3759
3760 read_lock_bh(&sk->sk_callback_lock);
3761 ret = sk->sk_socket && sk->sk_socket->file &&
3762 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
3763 read_unlock_bh(&sk->sk_callback_lock);
3764 return ret;
3765}
3766
3767void skb_complete_tx_timestamp(struct sk_buff *skb,
3768 struct skb_shared_hwtstamps *hwtstamps)
3769{
3770 struct sock *sk = skb->sk;
3771
3772 if (!skb_may_tx_timestamp(sk, false))
3773 return;
3774
3775 /* take a reference to prevent skb_orphan() from freeing the socket */
3776 sock_hold(sk);
3777
3778 *skb_hwtstamps(skb) = *hwtstamps;
3779 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND);
3780
3781 sock_put(sk);
3782}
3783EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
3784
3785void __skb_tstamp_tx(struct sk_buff *orig_skb,
3786 struct skb_shared_hwtstamps *hwtstamps,
3787 struct sock *sk, int tstype)
3788{
3789 struct sk_buff *skb;
3790 bool tsonly;
3791
3792 if (!sk)
3793 return;
3794
3795 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
3796 if (!skb_may_tx_timestamp(sk, tsonly))
3797 return;
3798
3799 if (tsonly)
3800 skb = alloc_skb(0, GFP_ATOMIC);
3801 else
3802 skb = skb_clone(orig_skb, GFP_ATOMIC);
3803 if (!skb)
3804 return;
3805
3806 if (tsonly) {
3807 skb_shinfo(skb)->tx_flags = skb_shinfo(orig_skb)->tx_flags;
3808 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
3809 }
3810
3811 if (hwtstamps)
3812 *skb_hwtstamps(skb) = *hwtstamps;
3813 else
3814 skb->tstamp = ktime_get_real();
3815
3816 __skb_complete_tx_timestamp(skb, sk, tstype);
3817}
3818EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
3819
3820void skb_tstamp_tx(struct sk_buff *orig_skb,
3821 struct skb_shared_hwtstamps *hwtstamps)
3822{
3823 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
3824 SCM_TSTAMP_SND);
3825}
3826EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3827
3828void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3829{
3830 struct sock *sk = skb->sk;
3831 struct sock_exterr_skb *serr;
3832 int err;
3833
3834 skb->wifi_acked_valid = 1;
3835 skb->wifi_acked = acked;
3836
3837 serr = SKB_EXT_ERR(skb);
3838 memset(serr, 0, sizeof(*serr));
3839 serr->ee.ee_errno = ENOMSG;
3840 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3841
3842 /* take a reference to prevent skb_orphan() from freeing the socket */
3843 sock_hold(sk);
3844
3845 err = sock_queue_err_skb(sk, skb);
3846 if (err)
3847 kfree_skb(skb);
3848
3849 sock_put(sk);
3850}
3851EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3852
3853/**
3854 * skb_partial_csum_set - set up and verify partial csum values for packet
3855 * @skb: the skb to set
3856 * @start: the number of bytes after skb->data to start checksumming.
3857 * @off: the offset from start to place the checksum.
3858 *
3859 * For untrusted partially-checksummed packets, we need to make sure the values
3860 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3861 *
3862 * This function checks and sets those values and skb->ip_summed: if this
3863 * returns false you should drop the packet.
3864 */
3865bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3866{
3867 if (unlikely(start > skb_headlen(skb)) ||
3868 unlikely((int)start + off > skb_headlen(skb) - 2)) {
3869 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
3870 start, off, skb_headlen(skb));
3871 return false;
3872 }
3873 skb->ip_summed = CHECKSUM_PARTIAL;
3874 skb->csum_start = skb_headroom(skb) + start;
3875 skb->csum_offset = off;
3876 skb_set_transport_header(skb, start);
3877 return true;
3878}
3879EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3880
3881static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
3882 unsigned int max)
3883{
3884 if (skb_headlen(skb) >= len)
3885 return 0;
3886
3887 /* If we need to pullup then pullup to the max, so we
3888 * won't need to do it again.
3889 */
3890 if (max > skb->len)
3891 max = skb->len;
3892
3893 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
3894 return -ENOMEM;
3895
3896 if (skb_headlen(skb) < len)
3897 return -EPROTO;
3898
3899 return 0;
3900}
3901
3902#define MAX_TCP_HDR_LEN (15 * 4)
3903
3904static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
3905 typeof(IPPROTO_IP) proto,
3906 unsigned int off)
3907{
3908 switch (proto) {
3909 int err;
3910
3911 case IPPROTO_TCP:
3912 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
3913 off + MAX_TCP_HDR_LEN);
3914 if (!err && !skb_partial_csum_set(skb, off,
3915 offsetof(struct tcphdr,
3916 check)))
3917 err = -EPROTO;
3918 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
3919
3920 case IPPROTO_UDP:
3921 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
3922 off + sizeof(struct udphdr));
3923 if (!err && !skb_partial_csum_set(skb, off,
3924 offsetof(struct udphdr,
3925 check)))
3926 err = -EPROTO;
3927 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
3928 }
3929
3930 return ERR_PTR(-EPROTO);
3931}
3932
3933/* This value should be large enough to cover a tagged ethernet header plus
3934 * maximally sized IP and TCP or UDP headers.
3935 */
3936#define MAX_IP_HDR_LEN 128
3937
3938static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
3939{
3940 unsigned int off;
3941 bool fragment;
3942 __sum16 *csum;
3943 int err;
3944
3945 fragment = false;
3946
3947 err = skb_maybe_pull_tail(skb,
3948 sizeof(struct iphdr),
3949 MAX_IP_HDR_LEN);
3950 if (err < 0)
3951 goto out;
3952
3953 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
3954 fragment = true;
3955
3956 off = ip_hdrlen(skb);
3957
3958 err = -EPROTO;
3959
3960 if (fragment)
3961 goto out;
3962
3963 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
3964 if (IS_ERR(csum))
3965 return PTR_ERR(csum);
3966
3967 if (recalculate)
3968 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3969 ip_hdr(skb)->daddr,
3970 skb->len - off,
3971 ip_hdr(skb)->protocol, 0);
3972 err = 0;
3973
3974out:
3975 return err;
3976}
3977
3978/* This value should be large enough to cover a tagged ethernet header plus
3979 * an IPv6 header, all options, and a maximal TCP or UDP header.
3980 */
3981#define MAX_IPV6_HDR_LEN 256
3982
3983#define OPT_HDR(type, skb, off) \
3984 (type *)(skb_network_header(skb) + (off))
3985
3986static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
3987{
3988 int err;
3989 u8 nexthdr;
3990 unsigned int off;
3991 unsigned int len;
3992 bool fragment;
3993 bool done;
3994 __sum16 *csum;
3995
3996 fragment = false;
3997 done = false;
3998
3999 off = sizeof(struct ipv6hdr);
4000
4001 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4002 if (err < 0)
4003 goto out;
4004
4005 nexthdr = ipv6_hdr(skb)->nexthdr;
4006
4007 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4008 while (off <= len && !done) {
4009 switch (nexthdr) {
4010 case IPPROTO_DSTOPTS:
4011 case IPPROTO_HOPOPTS:
4012 case IPPROTO_ROUTING: {
4013 struct ipv6_opt_hdr *hp;
4014
4015 err = skb_maybe_pull_tail(skb,
4016 off +
4017 sizeof(struct ipv6_opt_hdr),
4018 MAX_IPV6_HDR_LEN);
4019 if (err < 0)
4020 goto out;
4021
4022 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
4023 nexthdr = hp->nexthdr;
4024 off += ipv6_optlen(hp);
4025 break;
4026 }
4027 case IPPROTO_AH: {
4028 struct ip_auth_hdr *hp;
4029
4030 err = skb_maybe_pull_tail(skb,
4031 off +
4032 sizeof(struct ip_auth_hdr),
4033 MAX_IPV6_HDR_LEN);
4034 if (err < 0)
4035 goto out;
4036
4037 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
4038 nexthdr = hp->nexthdr;
4039 off += ipv6_authlen(hp);
4040 break;
4041 }
4042 case IPPROTO_FRAGMENT: {
4043 struct frag_hdr *hp;
4044
4045 err = skb_maybe_pull_tail(skb,
4046 off +
4047 sizeof(struct frag_hdr),
4048 MAX_IPV6_HDR_LEN);
4049 if (err < 0)
4050 goto out;
4051
4052 hp = OPT_HDR(struct frag_hdr, skb, off);
4053
4054 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
4055 fragment = true;
4056
4057 nexthdr = hp->nexthdr;
4058 off += sizeof(struct frag_hdr);
4059 break;
4060 }
4061 default:
4062 done = true;
4063 break;
4064 }
4065 }
4066
4067 err = -EPROTO;
4068
4069 if (!done || fragment)
4070 goto out;
4071
4072 csum = skb_checksum_setup_ip(skb, nexthdr, off);
4073 if (IS_ERR(csum))
4074 return PTR_ERR(csum);
4075
4076 if (recalculate)
4077 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4078 &ipv6_hdr(skb)->daddr,
4079 skb->len - off, nexthdr, 0);
4080 err = 0;
4081
4082out:
4083 return err;
4084}
4085
4086/**
4087 * skb_checksum_setup - set up partial checksum offset
4088 * @skb: the skb to set up
4089 * @recalculate: if true the pseudo-header checksum will be recalculated
4090 */
4091int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
4092{
4093 int err;
4094
4095 switch (skb->protocol) {
4096 case htons(ETH_P_IP):
4097 err = skb_checksum_setup_ipv4(skb, recalculate);
4098 break;
4099
4100 case htons(ETH_P_IPV6):
4101 err = skb_checksum_setup_ipv6(skb, recalculate);
4102 break;
4103
4104 default:
4105 err = -EPROTO;
4106 break;
4107 }
4108
4109 return err;
4110}
4111EXPORT_SYMBOL(skb_checksum_setup);
4112
4113/**
4114 * skb_checksum_maybe_trim - maybe trims the given skb
4115 * @skb: the skb to check
4116 * @transport_len: the data length beyond the network header
4117 *
4118 * Checks whether the given skb has data beyond the given transport length.
4119 * If so, returns a cloned skb trimmed to this transport length.
4120 * Otherwise returns the provided skb. Returns NULL in error cases
4121 * (e.g. transport_len exceeds skb length or out-of-memory).
4122 *
4123 * Caller needs to set the skb transport header and free any returned skb if it
4124 * differs from the provided skb.
4125 */
4126static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4127 unsigned int transport_len)
4128{
4129 struct sk_buff *skb_chk;
4130 unsigned int len = skb_transport_offset(skb) + transport_len;
4131 int ret;
4132
4133 if (skb->len < len)
4134 return NULL;
4135 else if (skb->len == len)
4136 return skb;
4137
4138 skb_chk = skb_clone(skb, GFP_ATOMIC);
4139 if (!skb_chk)
4140 return NULL;
4141
4142 ret = pskb_trim_rcsum(skb_chk, len);
4143 if (ret) {
4144 kfree_skb(skb_chk);
4145 return NULL;
4146 }
4147
4148 return skb_chk;
4149}
4150
4151/**
4152 * skb_checksum_trimmed - validate checksum of an skb
4153 * @skb: the skb to check
4154 * @transport_len: the data length beyond the network header
4155 * @skb_chkf: checksum function to use
4156 *
4157 * Applies the given checksum function skb_chkf to the provided skb.
4158 * Returns a checked and maybe trimmed skb. Returns NULL on error.
4159 *
4160 * If the skb has data beyond the given transport length, then a
4161 * trimmed & cloned skb is checked and returned.
4162 *
4163 * Caller needs to set the skb transport header and free any returned skb if it
4164 * differs from the provided skb.
4165 */
4166struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4167 unsigned int transport_len,
4168 __sum16(*skb_chkf)(struct sk_buff *skb))
4169{
4170 struct sk_buff *skb_chk;
4171 unsigned int offset = skb_transport_offset(skb);
4172 __sum16 ret;
4173
4174 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
4175 if (!skb_chk)
4176 goto err;
4177
4178 if (!pskb_may_pull(skb_chk, offset))
4179 goto err;
4180
4181 skb_pull_rcsum(skb_chk, offset);
4182 ret = skb_chkf(skb_chk);
4183 skb_push_rcsum(skb_chk, offset);
4184
4185 if (ret)
4186 goto err;
4187
4188 return skb_chk;
4189
4190err:
4191 if (skb_chk && skb_chk != skb)
4192 kfree_skb(skb_chk);
4193
4194 return NULL;
4195
4196}
4197EXPORT_SYMBOL(skb_checksum_trimmed);
4198
4199void __skb_warn_lro_forwarding(const struct sk_buff *skb)
4200{
4201 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
4202 skb->dev->name);
4203}
4204EXPORT_SYMBOL(__skb_warn_lro_forwarding);
4205
4206void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4207{
4208 if (head_stolen) {
4209 skb_release_head_state(skb);
4210 kmem_cache_free(skbuff_head_cache, skb);
4211 } else {
4212 __kfree_skb(skb);
4213 }
4214}
4215EXPORT_SYMBOL(kfree_skb_partial);
4216
4217/**
4218 * skb_try_coalesce - try to merge skb to prior one
4219 * @to: prior buffer
4220 * @from: buffer to add
4221 * @fragstolen: pointer to boolean
4222 * @delta_truesize: how much more was allocated than was requested
4223 */
4224bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
4225 bool *fragstolen, int *delta_truesize)
4226{
4227 int i, delta, len = from->len;
4228
4229 *fragstolen = false;
4230
4231 if (skb_cloned(to))
4232 return false;
4233
4234 if (len <= skb_tailroom(to)) {
4235 if (len)
4236 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4237 *delta_truesize = 0;
4238 return true;
4239 }
4240
4241 if (skb_has_frag_list(to) || skb_has_frag_list(from))
4242 return false;
4243
4244 if (skb_headlen(from) != 0) {
4245 struct page *page;
4246 unsigned int offset;
4247
4248 if (skb_shinfo(to)->nr_frags +
4249 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
4250 return false;
4251
4252 if (skb_head_is_locked(from))
4253 return false;
4254
4255 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4256
4257 page = virt_to_head_page(from->head);
4258 offset = from->data - (unsigned char *)page_address(page);
4259
4260 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
4261 page, offset, skb_headlen(from));
4262 *fragstolen = true;
4263 } else {
4264 if (skb_shinfo(to)->nr_frags +
4265 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
4266 return false;
4267
4268 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
4269 }
4270
4271 WARN_ON_ONCE(delta < len);
4272
4273 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
4274 skb_shinfo(from)->frags,
4275 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
4276 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
4277
4278 if (!skb_cloned(from))
4279 skb_shinfo(from)->nr_frags = 0;
4280
4281 /* if the skb is not cloned this does nothing
4282 * since we set nr_frags to 0.
4283 */
4284 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
4285 skb_frag_ref(from, i);
4286
4287 to->truesize += delta;
4288 to->len += len;
4289 to->data_len += len;
4290
4291 *delta_truesize = delta;
4292 return true;
4293}
4294EXPORT_SYMBOL(skb_try_coalesce);
4295
4296/**
4297 * skb_scrub_packet - scrub an skb
4298 *
4299 * @skb: buffer to clean
4300 * @xnet: packet is crossing netns
4301 *
4302 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
4303 * into/from a tunnel. Some information have to be cleared during these
4304 * operations.
4305 * skb_scrub_packet can also be used to clean a skb before injecting it in
4306 * another namespace (@xnet == true). We have to clear all information in the
4307 * skb that could impact namespace isolation.
4308 */
4309void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4310{
4311 skb->tstamp.tv64 = 0;
4312 skb->pkt_type = PACKET_HOST;
4313 skb->skb_iif = 0;
4314 skb->ignore_df = 0;
4315 skb_dst_drop(skb);
4316 secpath_reset(skb);
4317 nf_reset(skb);
4318 nf_reset_trace(skb);
4319
4320 if (!xnet)
4321 return;
4322
4323 skb_orphan(skb);
4324 skb->mark = 0;
4325}
4326EXPORT_SYMBOL_GPL(skb_scrub_packet);
4327
4328/**
4329 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
4330 *
4331 * @skb: GSO skb
4332 *
4333 * skb_gso_transport_seglen is used to determine the real size of the
4334 * individual segments, including Layer4 headers (TCP/UDP).
4335 *
4336 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4337 */
4338unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4339{
4340 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4341 unsigned int thlen = 0;
4342
4343 if (skb->encapsulation) {
4344 thlen = skb_inner_transport_header(skb) -
4345 skb_transport_header(skb);
4346
4347 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4348 thlen += inner_tcp_hdrlen(skb);
4349 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4350 thlen = tcp_hdrlen(skb);
4351 }
4352 /* UFO sets gso_size to the size of the fragmentation
4353 * payload, i.e. the size of the L4 (UDP) header is already
4354 * accounted for.
4355 */
4356 return thlen + shinfo->gso_size;
4357}
4358EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
4359
4360static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
4361{
4362 if (skb_cow(skb, skb_headroom(skb)) < 0) {
4363 kfree_skb(skb);
4364 return NULL;
4365 }
4366
4367 memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len - VLAN_HLEN,
4368 2 * ETH_ALEN);
4369 skb->mac_header += VLAN_HLEN;
4370 return skb;
4371}
4372
4373struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
4374{
4375 struct vlan_hdr *vhdr;
4376 u16 vlan_tci;
4377
4378 if (unlikely(skb_vlan_tag_present(skb))) {
4379 /* vlan_tci is already set-up so leave this for another time */
4380 return skb;
4381 }
4382
4383 skb = skb_share_check(skb, GFP_ATOMIC);
4384 if (unlikely(!skb))
4385 goto err_free;
4386
4387 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
4388 goto err_free;
4389
4390 vhdr = (struct vlan_hdr *)skb->data;
4391 vlan_tci = ntohs(vhdr->h_vlan_TCI);
4392 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
4393
4394 skb_pull_rcsum(skb, VLAN_HLEN);
4395 vlan_set_encap_proto(skb, vhdr);
4396
4397 skb = skb_reorder_vlan_header(skb);
4398 if (unlikely(!skb))
4399 goto err_free;
4400
4401 skb_reset_network_header(skb);
4402 skb_reset_transport_header(skb);
4403 skb_reset_mac_len(skb);
4404
4405 return skb;
4406
4407err_free:
4408 kfree_skb(skb);
4409 return NULL;
4410}
4411EXPORT_SYMBOL(skb_vlan_untag);
4412
4413int skb_ensure_writable(struct sk_buff *skb, int write_len)
4414{
4415 if (!pskb_may_pull(skb, write_len))
4416 return -ENOMEM;
4417
4418 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
4419 return 0;
4420
4421 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
4422}
4423EXPORT_SYMBOL(skb_ensure_writable);
4424
4425/* remove VLAN header from packet and update csum accordingly. */
4426static int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
4427{
4428 struct vlan_hdr *vhdr;
4429 unsigned int offset = skb->data - skb_mac_header(skb);
4430 int err;
4431
4432 __skb_push(skb, offset);
4433 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
4434 if (unlikely(err))
4435 goto pull;
4436
4437 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
4438
4439 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
4440 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
4441
4442 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
4443 __skb_pull(skb, VLAN_HLEN);
4444
4445 vlan_set_encap_proto(skb, vhdr);
4446 skb->mac_header += VLAN_HLEN;
4447
4448 if (skb_network_offset(skb) < ETH_HLEN)
4449 skb_set_network_header(skb, ETH_HLEN);
4450
4451 skb_reset_mac_len(skb);
4452pull:
4453 __skb_pull(skb, offset);
4454
4455 return err;
4456}
4457
4458int skb_vlan_pop(struct sk_buff *skb)
4459{
4460 u16 vlan_tci;
4461 __be16 vlan_proto;
4462 int err;
4463
4464 if (likely(skb_vlan_tag_present(skb))) {
4465 skb->vlan_tci = 0;
4466 } else {
4467 if (unlikely((skb->protocol != htons(ETH_P_8021Q) &&
4468 skb->protocol != htons(ETH_P_8021AD)) ||
4469 skb->len < VLAN_ETH_HLEN))
4470 return 0;
4471
4472 err = __skb_vlan_pop(skb, &vlan_tci);
4473 if (err)
4474 return err;
4475 }
4476 /* move next vlan tag to hw accel tag */
4477 if (likely((skb->protocol != htons(ETH_P_8021Q) &&
4478 skb->protocol != htons(ETH_P_8021AD)) ||
4479 skb->len < VLAN_ETH_HLEN))
4480 return 0;
4481
4482 vlan_proto = skb->protocol;
4483 err = __skb_vlan_pop(skb, &vlan_tci);
4484 if (unlikely(err))
4485 return err;
4486
4487 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
4488 return 0;
4489}
4490EXPORT_SYMBOL(skb_vlan_pop);
4491
4492int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
4493{
4494 if (skb_vlan_tag_present(skb)) {
4495 unsigned int offset = skb->data - skb_mac_header(skb);
4496 int err;
4497
4498 /* __vlan_insert_tag expect skb->data pointing to mac header.
4499 * So change skb->data before calling it and change back to
4500 * original position later
4501 */
4502 __skb_push(skb, offset);
4503 err = __vlan_insert_tag(skb, skb->vlan_proto,
4504 skb_vlan_tag_get(skb));
4505 if (err) {
4506 __skb_pull(skb, offset);
4507 return err;
4508 }
4509
4510 skb->protocol = skb->vlan_proto;
4511 skb->mac_len += VLAN_HLEN;
4512
4513 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
4514 __skb_pull(skb, offset);
4515 }
4516 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
4517 return 0;
4518}
4519EXPORT_SYMBOL(skb_vlan_push);
4520
4521/**
4522 * alloc_skb_with_frags - allocate skb with page frags
4523 *
4524 * @header_len: size of linear part
4525 * @data_len: needed length in frags
4526 * @max_page_order: max page order desired.
4527 * @errcode: pointer to error code if any
4528 * @gfp_mask: allocation mask
4529 *
4530 * This can be used to allocate a paged skb, given a maximal order for frags.
4531 */
4532struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
4533 unsigned long data_len,
4534 int max_page_order,
4535 int *errcode,
4536 gfp_t gfp_mask)
4537{
4538 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
4539 unsigned long chunk;
4540 struct sk_buff *skb;
4541 struct page *page;
4542 gfp_t gfp_head;
4543 int i;
4544
4545 *errcode = -EMSGSIZE;
4546 /* Note this test could be relaxed, if we succeed to allocate
4547 * high order pages...
4548 */
4549 if (npages > MAX_SKB_FRAGS)
4550 return NULL;
4551
4552 gfp_head = gfp_mask;
4553 if (gfp_head & __GFP_DIRECT_RECLAIM)
4554 gfp_head |= __GFP_REPEAT;
4555
4556 *errcode = -ENOBUFS;
4557 skb = alloc_skb(header_len, gfp_head);
4558 if (!skb)
4559 return NULL;
4560
4561 skb->truesize += npages << PAGE_SHIFT;
4562
4563 for (i = 0; npages > 0; i++) {
4564 int order = max_page_order;
4565
4566 while (order) {
4567 if (npages >= 1 << order) {
4568 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
4569 __GFP_COMP |
4570 __GFP_NOWARN |
4571 __GFP_NORETRY,
4572 order);
4573 if (page)
4574 goto fill_page;
4575 /* Do not retry other high order allocations */
4576 order = 1;
4577 max_page_order = 0;
4578 }
4579 order--;
4580 }
4581 page = alloc_page(gfp_mask);
4582 if (!page)
4583 goto failure;
4584fill_page:
4585 chunk = min_t(unsigned long, data_len,
4586 PAGE_SIZE << order);
4587 skb_fill_page_desc(skb, i, page, 0, chunk);
4588 data_len -= chunk;
4589 npages -= 1 << order;
4590 }
4591 return skb;
4592
4593failure:
4594 kfree_skb(skb);
4595 return NULL;
4596}
4597EXPORT_SYMBOL(alloc_skb_with_frags);
1/*
2 * Routines having to do with the 'struct sk_buff' memory handlers.
3 *
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 *
7 * Fixes:
8 * Alan Cox : Fixed the worst of the load
9 * balancer bugs.
10 * Dave Platt : Interrupt stacking fix.
11 * Richard Kooijman : Timestamp fixes.
12 * Alan Cox : Changed buffer format.
13 * Alan Cox : destructor hook for AF_UNIX etc.
14 * Linus Torvalds : Better skb_clone.
15 * Alan Cox : Added skb_copy.
16 * Alan Cox : Added all the changed routines Linus
17 * only put in the headers
18 * Ray VanTassle : Fixed --skb->lock in free
19 * Alan Cox : skb_copy copy arp field
20 * Andi Kleen : slabified it.
21 * Robert Olsson : Removed skb_head_pool
22 *
23 * NOTE:
24 * The __skb_ routines should be called with interrupts
25 * disabled, or you better be *real* sure that the operation is atomic
26 * with respect to whatever list is being frobbed (e.g. via lock_sock()
27 * or via disabling bottom half handlers, etc).
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35/*
36 * The functions in this file will not compile correctly with gcc 2.4.x
37 */
38
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
41#include <linux/module.h>
42#include <linux/types.h>
43#include <linux/kernel.h>
44#include <linux/mm.h>
45#include <linux/interrupt.h>
46#include <linux/in.h>
47#include <linux/inet.h>
48#include <linux/slab.h>
49#include <linux/tcp.h>
50#include <linux/udp.h>
51#include <linux/sctp.h>
52#include <linux/netdevice.h>
53#ifdef CONFIG_NET_CLS_ACT
54#include <net/pkt_sched.h>
55#endif
56#include <linux/string.h>
57#include <linux/skbuff.h>
58#include <linux/splice.h>
59#include <linux/cache.h>
60#include <linux/rtnetlink.h>
61#include <linux/init.h>
62#include <linux/scatterlist.h>
63#include <linux/errqueue.h>
64#include <linux/prefetch.h>
65#include <linux/if_vlan.h>
66
67#include <net/protocol.h>
68#include <net/dst.h>
69#include <net/sock.h>
70#include <net/checksum.h>
71#include <net/ip6_checksum.h>
72#include <net/xfrm.h>
73
74#include <linux/uaccess.h>
75#include <trace/events/skb.h>
76#include <linux/highmem.h>
77#include <linux/capability.h>
78#include <linux/user_namespace.h>
79
80struct kmem_cache *skbuff_head_cache __ro_after_init;
81static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
82int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
83EXPORT_SYMBOL(sysctl_max_skb_frags);
84
85/**
86 * skb_panic - private function for out-of-line support
87 * @skb: buffer
88 * @sz: size
89 * @addr: address
90 * @msg: skb_over_panic or skb_under_panic
91 *
92 * Out-of-line support for skb_put() and skb_push().
93 * Called via the wrapper skb_over_panic() or skb_under_panic().
94 * Keep out of line to prevent kernel bloat.
95 * __builtin_return_address is not used because it is not always reliable.
96 */
97static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
98 const char msg[])
99{
100 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
101 msg, addr, skb->len, sz, skb->head, skb->data,
102 (unsigned long)skb->tail, (unsigned long)skb->end,
103 skb->dev ? skb->dev->name : "<NULL>");
104 BUG();
105}
106
107static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
108{
109 skb_panic(skb, sz, addr, __func__);
110}
111
112static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
113{
114 skb_panic(skb, sz, addr, __func__);
115}
116
117/*
118 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
119 * the caller if emergency pfmemalloc reserves are being used. If it is and
120 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
121 * may be used. Otherwise, the packet data may be discarded until enough
122 * memory is free
123 */
124#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
125 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
126
127static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
128 unsigned long ip, bool *pfmemalloc)
129{
130 void *obj;
131 bool ret_pfmemalloc = false;
132
133 /*
134 * Try a regular allocation, when that fails and we're not entitled
135 * to the reserves, fail.
136 */
137 obj = kmalloc_node_track_caller(size,
138 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
139 node);
140 if (obj || !(gfp_pfmemalloc_allowed(flags)))
141 goto out;
142
143 /* Try again but now we are using pfmemalloc reserves */
144 ret_pfmemalloc = true;
145 obj = kmalloc_node_track_caller(size, flags, node);
146
147out:
148 if (pfmemalloc)
149 *pfmemalloc = ret_pfmemalloc;
150
151 return obj;
152}
153
154/* Allocate a new skbuff. We do this ourselves so we can fill in a few
155 * 'private' fields and also do memory statistics to find all the
156 * [BEEP] leaks.
157 *
158 */
159
160/**
161 * __alloc_skb - allocate a network buffer
162 * @size: size to allocate
163 * @gfp_mask: allocation mask
164 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
165 * instead of head cache and allocate a cloned (child) skb.
166 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
167 * allocations in case the data is required for writeback
168 * @node: numa node to allocate memory on
169 *
170 * Allocate a new &sk_buff. The returned buffer has no headroom and a
171 * tail room of at least size bytes. The object has a reference count
172 * of one. The return is the buffer. On a failure the return is %NULL.
173 *
174 * Buffers may only be allocated from interrupts using a @gfp_mask of
175 * %GFP_ATOMIC.
176 */
177struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
178 int flags, int node)
179{
180 struct kmem_cache *cache;
181 struct skb_shared_info *shinfo;
182 struct sk_buff *skb;
183 u8 *data;
184 bool pfmemalloc;
185
186 cache = (flags & SKB_ALLOC_FCLONE)
187 ? skbuff_fclone_cache : skbuff_head_cache;
188
189 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
190 gfp_mask |= __GFP_MEMALLOC;
191
192 /* Get the HEAD */
193 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
194 if (!skb)
195 goto out;
196 prefetchw(skb);
197
198 /* We do our best to align skb_shared_info on a separate cache
199 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
200 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
201 * Both skb->head and skb_shared_info are cache line aligned.
202 */
203 size = SKB_DATA_ALIGN(size);
204 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
205 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
206 if (!data)
207 goto nodata;
208 /* kmalloc(size) might give us more room than requested.
209 * Put skb_shared_info exactly at the end of allocated zone,
210 * to allow max possible filling before reallocation.
211 */
212 size = SKB_WITH_OVERHEAD(ksize(data));
213 prefetchw(data + size);
214
215 /*
216 * Only clear those fields we need to clear, not those that we will
217 * actually initialise below. Hence, don't put any more fields after
218 * the tail pointer in struct sk_buff!
219 */
220 memset(skb, 0, offsetof(struct sk_buff, tail));
221 /* Account for allocated memory : skb + skb->head */
222 skb->truesize = SKB_TRUESIZE(size);
223 skb->pfmemalloc = pfmemalloc;
224 refcount_set(&skb->users, 1);
225 skb->head = data;
226 skb->data = data;
227 skb_reset_tail_pointer(skb);
228 skb->end = skb->tail + size;
229 skb->mac_header = (typeof(skb->mac_header))~0U;
230 skb->transport_header = (typeof(skb->transport_header))~0U;
231
232 /* make sure we initialize shinfo sequentially */
233 shinfo = skb_shinfo(skb);
234 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
235 atomic_set(&shinfo->dataref, 1);
236
237 if (flags & SKB_ALLOC_FCLONE) {
238 struct sk_buff_fclones *fclones;
239
240 fclones = container_of(skb, struct sk_buff_fclones, skb1);
241
242 skb->fclone = SKB_FCLONE_ORIG;
243 refcount_set(&fclones->fclone_ref, 1);
244
245 fclones->skb2.fclone = SKB_FCLONE_CLONE;
246 }
247out:
248 return skb;
249nodata:
250 kmem_cache_free(cache, skb);
251 skb = NULL;
252 goto out;
253}
254EXPORT_SYMBOL(__alloc_skb);
255
256/**
257 * __build_skb - build a network buffer
258 * @data: data buffer provided by caller
259 * @frag_size: size of data, or 0 if head was kmalloced
260 *
261 * Allocate a new &sk_buff. Caller provides space holding head and
262 * skb_shared_info. @data must have been allocated by kmalloc() only if
263 * @frag_size is 0, otherwise data should come from the page allocator
264 * or vmalloc()
265 * The return is the new skb buffer.
266 * On a failure the return is %NULL, and @data is not freed.
267 * Notes :
268 * Before IO, driver allocates only data buffer where NIC put incoming frame
269 * Driver should add room at head (NET_SKB_PAD) and
270 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
271 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
272 * before giving packet to stack.
273 * RX rings only contains data buffers, not full skbs.
274 */
275struct sk_buff *__build_skb(void *data, unsigned int frag_size)
276{
277 struct skb_shared_info *shinfo;
278 struct sk_buff *skb;
279 unsigned int size = frag_size ? : ksize(data);
280
281 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
282 if (!skb)
283 return NULL;
284
285 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
286
287 memset(skb, 0, offsetof(struct sk_buff, tail));
288 skb->truesize = SKB_TRUESIZE(size);
289 refcount_set(&skb->users, 1);
290 skb->head = data;
291 skb->data = data;
292 skb_reset_tail_pointer(skb);
293 skb->end = skb->tail + size;
294 skb->mac_header = (typeof(skb->mac_header))~0U;
295 skb->transport_header = (typeof(skb->transport_header))~0U;
296
297 /* make sure we initialize shinfo sequentially */
298 shinfo = skb_shinfo(skb);
299 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
300 atomic_set(&shinfo->dataref, 1);
301
302 return skb;
303}
304
305/* build_skb() is wrapper over __build_skb(), that specifically
306 * takes care of skb->head and skb->pfmemalloc
307 * This means that if @frag_size is not zero, then @data must be backed
308 * by a page fragment, not kmalloc() or vmalloc()
309 */
310struct sk_buff *build_skb(void *data, unsigned int frag_size)
311{
312 struct sk_buff *skb = __build_skb(data, frag_size);
313
314 if (skb && frag_size) {
315 skb->head_frag = 1;
316 if (page_is_pfmemalloc(virt_to_head_page(data)))
317 skb->pfmemalloc = 1;
318 }
319 return skb;
320}
321EXPORT_SYMBOL(build_skb);
322
323#define NAPI_SKB_CACHE_SIZE 64
324
325struct napi_alloc_cache {
326 struct page_frag_cache page;
327 unsigned int skb_count;
328 void *skb_cache[NAPI_SKB_CACHE_SIZE];
329};
330
331static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
332static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
333
334static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
335{
336 struct page_frag_cache *nc;
337 unsigned long flags;
338 void *data;
339
340 local_irq_save(flags);
341 nc = this_cpu_ptr(&netdev_alloc_cache);
342 data = page_frag_alloc(nc, fragsz, gfp_mask);
343 local_irq_restore(flags);
344 return data;
345}
346
347/**
348 * netdev_alloc_frag - allocate a page fragment
349 * @fragsz: fragment size
350 *
351 * Allocates a frag from a page for receive buffer.
352 * Uses GFP_ATOMIC allocations.
353 */
354void *netdev_alloc_frag(unsigned int fragsz)
355{
356 return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
357}
358EXPORT_SYMBOL(netdev_alloc_frag);
359
360static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
361{
362 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
363
364 return page_frag_alloc(&nc->page, fragsz, gfp_mask);
365}
366
367void *napi_alloc_frag(unsigned int fragsz)
368{
369 return __napi_alloc_frag(fragsz, GFP_ATOMIC);
370}
371EXPORT_SYMBOL(napi_alloc_frag);
372
373/**
374 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
375 * @dev: network device to receive on
376 * @len: length to allocate
377 * @gfp_mask: get_free_pages mask, passed to alloc_skb
378 *
379 * Allocate a new &sk_buff and assign it a usage count of one. The
380 * buffer has NET_SKB_PAD headroom built in. Users should allocate
381 * the headroom they think they need without accounting for the
382 * built in space. The built in space is used for optimisations.
383 *
384 * %NULL is returned if there is no free memory.
385 */
386struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
387 gfp_t gfp_mask)
388{
389 struct page_frag_cache *nc;
390 unsigned long flags;
391 struct sk_buff *skb;
392 bool pfmemalloc;
393 void *data;
394
395 len += NET_SKB_PAD;
396
397 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
398 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
399 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
400 if (!skb)
401 goto skb_fail;
402 goto skb_success;
403 }
404
405 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
406 len = SKB_DATA_ALIGN(len);
407
408 if (sk_memalloc_socks())
409 gfp_mask |= __GFP_MEMALLOC;
410
411 local_irq_save(flags);
412
413 nc = this_cpu_ptr(&netdev_alloc_cache);
414 data = page_frag_alloc(nc, len, gfp_mask);
415 pfmemalloc = nc->pfmemalloc;
416
417 local_irq_restore(flags);
418
419 if (unlikely(!data))
420 return NULL;
421
422 skb = __build_skb(data, len);
423 if (unlikely(!skb)) {
424 skb_free_frag(data);
425 return NULL;
426 }
427
428 /* use OR instead of assignment to avoid clearing of bits in mask */
429 if (pfmemalloc)
430 skb->pfmemalloc = 1;
431 skb->head_frag = 1;
432
433skb_success:
434 skb_reserve(skb, NET_SKB_PAD);
435 skb->dev = dev;
436
437skb_fail:
438 return skb;
439}
440EXPORT_SYMBOL(__netdev_alloc_skb);
441
442/**
443 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
444 * @napi: napi instance this buffer was allocated for
445 * @len: length to allocate
446 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
447 *
448 * Allocate a new sk_buff for use in NAPI receive. This buffer will
449 * attempt to allocate the head from a special reserved region used
450 * only for NAPI Rx allocation. By doing this we can save several
451 * CPU cycles by avoiding having to disable and re-enable IRQs.
452 *
453 * %NULL is returned if there is no free memory.
454 */
455struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
456 gfp_t gfp_mask)
457{
458 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
459 struct sk_buff *skb;
460 void *data;
461
462 len += NET_SKB_PAD + NET_IP_ALIGN;
463
464 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
465 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
466 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
467 if (!skb)
468 goto skb_fail;
469 goto skb_success;
470 }
471
472 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
473 len = SKB_DATA_ALIGN(len);
474
475 if (sk_memalloc_socks())
476 gfp_mask |= __GFP_MEMALLOC;
477
478 data = page_frag_alloc(&nc->page, len, gfp_mask);
479 if (unlikely(!data))
480 return NULL;
481
482 skb = __build_skb(data, len);
483 if (unlikely(!skb)) {
484 skb_free_frag(data);
485 return NULL;
486 }
487
488 /* use OR instead of assignment to avoid clearing of bits in mask */
489 if (nc->page.pfmemalloc)
490 skb->pfmemalloc = 1;
491 skb->head_frag = 1;
492
493skb_success:
494 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
495 skb->dev = napi->dev;
496
497skb_fail:
498 return skb;
499}
500EXPORT_SYMBOL(__napi_alloc_skb);
501
502void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
503 int size, unsigned int truesize)
504{
505 skb_fill_page_desc(skb, i, page, off, size);
506 skb->len += size;
507 skb->data_len += size;
508 skb->truesize += truesize;
509}
510EXPORT_SYMBOL(skb_add_rx_frag);
511
512void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
513 unsigned int truesize)
514{
515 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
516
517 skb_frag_size_add(frag, size);
518 skb->len += size;
519 skb->data_len += size;
520 skb->truesize += truesize;
521}
522EXPORT_SYMBOL(skb_coalesce_rx_frag);
523
524static void skb_drop_list(struct sk_buff **listp)
525{
526 kfree_skb_list(*listp);
527 *listp = NULL;
528}
529
530static inline void skb_drop_fraglist(struct sk_buff *skb)
531{
532 skb_drop_list(&skb_shinfo(skb)->frag_list);
533}
534
535static void skb_clone_fraglist(struct sk_buff *skb)
536{
537 struct sk_buff *list;
538
539 skb_walk_frags(skb, list)
540 skb_get(list);
541}
542
543static void skb_free_head(struct sk_buff *skb)
544{
545 unsigned char *head = skb->head;
546
547 if (skb->head_frag)
548 skb_free_frag(head);
549 else
550 kfree(head);
551}
552
553static void skb_release_data(struct sk_buff *skb)
554{
555 struct skb_shared_info *shinfo = skb_shinfo(skb);
556 int i;
557
558 if (skb->cloned &&
559 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
560 &shinfo->dataref))
561 return;
562
563 for (i = 0; i < shinfo->nr_frags; i++)
564 __skb_frag_unref(&shinfo->frags[i]);
565
566 if (shinfo->frag_list)
567 kfree_skb_list(shinfo->frag_list);
568
569 skb_zcopy_clear(skb, true);
570 skb_free_head(skb);
571}
572
573/*
574 * Free an skbuff by memory without cleaning the state.
575 */
576static void kfree_skbmem(struct sk_buff *skb)
577{
578 struct sk_buff_fclones *fclones;
579
580 switch (skb->fclone) {
581 case SKB_FCLONE_UNAVAILABLE:
582 kmem_cache_free(skbuff_head_cache, skb);
583 return;
584
585 case SKB_FCLONE_ORIG:
586 fclones = container_of(skb, struct sk_buff_fclones, skb1);
587
588 /* We usually free the clone (TX completion) before original skb
589 * This test would have no chance to be true for the clone,
590 * while here, branch prediction will be good.
591 */
592 if (refcount_read(&fclones->fclone_ref) == 1)
593 goto fastpath;
594 break;
595
596 default: /* SKB_FCLONE_CLONE */
597 fclones = container_of(skb, struct sk_buff_fclones, skb2);
598 break;
599 }
600 if (!refcount_dec_and_test(&fclones->fclone_ref))
601 return;
602fastpath:
603 kmem_cache_free(skbuff_fclone_cache, fclones);
604}
605
606void skb_release_head_state(struct sk_buff *skb)
607{
608 skb_dst_drop(skb);
609 secpath_reset(skb);
610 if (skb->destructor) {
611 WARN_ON(in_irq());
612 skb->destructor(skb);
613 }
614#if IS_ENABLED(CONFIG_NF_CONNTRACK)
615 nf_conntrack_put(skb_nfct(skb));
616#endif
617#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
618 nf_bridge_put(skb->nf_bridge);
619#endif
620}
621
622/* Free everything but the sk_buff shell. */
623static void skb_release_all(struct sk_buff *skb)
624{
625 skb_release_head_state(skb);
626 if (likely(skb->head))
627 skb_release_data(skb);
628}
629
630/**
631 * __kfree_skb - private function
632 * @skb: buffer
633 *
634 * Free an sk_buff. Release anything attached to the buffer.
635 * Clean the state. This is an internal helper function. Users should
636 * always call kfree_skb
637 */
638
639void __kfree_skb(struct sk_buff *skb)
640{
641 skb_release_all(skb);
642 kfree_skbmem(skb);
643}
644EXPORT_SYMBOL(__kfree_skb);
645
646/**
647 * kfree_skb - free an sk_buff
648 * @skb: buffer to free
649 *
650 * Drop a reference to the buffer and free it if the usage count has
651 * hit zero.
652 */
653void kfree_skb(struct sk_buff *skb)
654{
655 if (!skb_unref(skb))
656 return;
657
658 trace_kfree_skb(skb, __builtin_return_address(0));
659 __kfree_skb(skb);
660}
661EXPORT_SYMBOL(kfree_skb);
662
663void kfree_skb_list(struct sk_buff *segs)
664{
665 while (segs) {
666 struct sk_buff *next = segs->next;
667
668 kfree_skb(segs);
669 segs = next;
670 }
671}
672EXPORT_SYMBOL(kfree_skb_list);
673
674/**
675 * skb_tx_error - report an sk_buff xmit error
676 * @skb: buffer that triggered an error
677 *
678 * Report xmit error if a device callback is tracking this skb.
679 * skb must be freed afterwards.
680 */
681void skb_tx_error(struct sk_buff *skb)
682{
683 skb_zcopy_clear(skb, true);
684}
685EXPORT_SYMBOL(skb_tx_error);
686
687/**
688 * consume_skb - free an skbuff
689 * @skb: buffer to free
690 *
691 * Drop a ref to the buffer and free it if the usage count has hit zero
692 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
693 * is being dropped after a failure and notes that
694 */
695void consume_skb(struct sk_buff *skb)
696{
697 if (!skb_unref(skb))
698 return;
699
700 trace_consume_skb(skb);
701 __kfree_skb(skb);
702}
703EXPORT_SYMBOL(consume_skb);
704
705/**
706 * consume_stateless_skb - free an skbuff, assuming it is stateless
707 * @skb: buffer to free
708 *
709 * Alike consume_skb(), but this variant assumes that this is the last
710 * skb reference and all the head states have been already dropped
711 */
712void __consume_stateless_skb(struct sk_buff *skb)
713{
714 trace_consume_skb(skb);
715 skb_release_data(skb);
716 kfree_skbmem(skb);
717}
718
719void __kfree_skb_flush(void)
720{
721 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
722
723 /* flush skb_cache if containing objects */
724 if (nc->skb_count) {
725 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
726 nc->skb_cache);
727 nc->skb_count = 0;
728 }
729}
730
731static inline void _kfree_skb_defer(struct sk_buff *skb)
732{
733 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
734
735 /* drop skb->head and call any destructors for packet */
736 skb_release_all(skb);
737
738 /* record skb to CPU local list */
739 nc->skb_cache[nc->skb_count++] = skb;
740
741#ifdef CONFIG_SLUB
742 /* SLUB writes into objects when freeing */
743 prefetchw(skb);
744#endif
745
746 /* flush skb_cache if it is filled */
747 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
748 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
749 nc->skb_cache);
750 nc->skb_count = 0;
751 }
752}
753void __kfree_skb_defer(struct sk_buff *skb)
754{
755 _kfree_skb_defer(skb);
756}
757
758void napi_consume_skb(struct sk_buff *skb, int budget)
759{
760 if (unlikely(!skb))
761 return;
762
763 /* Zero budget indicate non-NAPI context called us, like netpoll */
764 if (unlikely(!budget)) {
765 dev_consume_skb_any(skb);
766 return;
767 }
768
769 if (!skb_unref(skb))
770 return;
771
772 /* if reaching here SKB is ready to free */
773 trace_consume_skb(skb);
774
775 /* if SKB is a clone, don't handle this case */
776 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
777 __kfree_skb(skb);
778 return;
779 }
780
781 _kfree_skb_defer(skb);
782}
783EXPORT_SYMBOL(napi_consume_skb);
784
785/* Make sure a field is enclosed inside headers_start/headers_end section */
786#define CHECK_SKB_FIELD(field) \
787 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
788 offsetof(struct sk_buff, headers_start)); \
789 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
790 offsetof(struct sk_buff, headers_end)); \
791
792static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
793{
794 new->tstamp = old->tstamp;
795 /* We do not copy old->sk */
796 new->dev = old->dev;
797 memcpy(new->cb, old->cb, sizeof(old->cb));
798 skb_dst_copy(new, old);
799#ifdef CONFIG_XFRM
800 new->sp = secpath_get(old->sp);
801#endif
802 __nf_copy(new, old, false);
803
804 /* Note : this field could be in headers_start/headers_end section
805 * It is not yet because we do not want to have a 16 bit hole
806 */
807 new->queue_mapping = old->queue_mapping;
808
809 memcpy(&new->headers_start, &old->headers_start,
810 offsetof(struct sk_buff, headers_end) -
811 offsetof(struct sk_buff, headers_start));
812 CHECK_SKB_FIELD(protocol);
813 CHECK_SKB_FIELD(csum);
814 CHECK_SKB_FIELD(hash);
815 CHECK_SKB_FIELD(priority);
816 CHECK_SKB_FIELD(skb_iif);
817 CHECK_SKB_FIELD(vlan_proto);
818 CHECK_SKB_FIELD(vlan_tci);
819 CHECK_SKB_FIELD(transport_header);
820 CHECK_SKB_FIELD(network_header);
821 CHECK_SKB_FIELD(mac_header);
822 CHECK_SKB_FIELD(inner_protocol);
823 CHECK_SKB_FIELD(inner_transport_header);
824 CHECK_SKB_FIELD(inner_network_header);
825 CHECK_SKB_FIELD(inner_mac_header);
826 CHECK_SKB_FIELD(mark);
827#ifdef CONFIG_NETWORK_SECMARK
828 CHECK_SKB_FIELD(secmark);
829#endif
830#ifdef CONFIG_NET_RX_BUSY_POLL
831 CHECK_SKB_FIELD(napi_id);
832#endif
833#ifdef CONFIG_XPS
834 CHECK_SKB_FIELD(sender_cpu);
835#endif
836#ifdef CONFIG_NET_SCHED
837 CHECK_SKB_FIELD(tc_index);
838#endif
839
840}
841
842/*
843 * You should not add any new code to this function. Add it to
844 * __copy_skb_header above instead.
845 */
846static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
847{
848#define C(x) n->x = skb->x
849
850 n->next = n->prev = NULL;
851 n->sk = NULL;
852 __copy_skb_header(n, skb);
853
854 C(len);
855 C(data_len);
856 C(mac_len);
857 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
858 n->cloned = 1;
859 n->nohdr = 0;
860 n->peeked = 0;
861 n->destructor = NULL;
862 C(tail);
863 C(end);
864 C(head);
865 C(head_frag);
866 C(data);
867 C(truesize);
868 refcount_set(&n->users, 1);
869
870 atomic_inc(&(skb_shinfo(skb)->dataref));
871 skb->cloned = 1;
872
873 return n;
874#undef C
875}
876
877/**
878 * skb_morph - morph one skb into another
879 * @dst: the skb to receive the contents
880 * @src: the skb to supply the contents
881 *
882 * This is identical to skb_clone except that the target skb is
883 * supplied by the user.
884 *
885 * The target skb is returned upon exit.
886 */
887struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
888{
889 skb_release_all(dst);
890 return __skb_clone(dst, src);
891}
892EXPORT_SYMBOL_GPL(skb_morph);
893
894int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
895{
896 unsigned long max_pg, num_pg, new_pg, old_pg;
897 struct user_struct *user;
898
899 if (capable(CAP_IPC_LOCK) || !size)
900 return 0;
901
902 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
903 max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
904 user = mmp->user ? : current_user();
905
906 do {
907 old_pg = atomic_long_read(&user->locked_vm);
908 new_pg = old_pg + num_pg;
909 if (new_pg > max_pg)
910 return -ENOBUFS;
911 } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
912 old_pg);
913
914 if (!mmp->user) {
915 mmp->user = get_uid(user);
916 mmp->num_pg = num_pg;
917 } else {
918 mmp->num_pg += num_pg;
919 }
920
921 return 0;
922}
923EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
924
925void mm_unaccount_pinned_pages(struct mmpin *mmp)
926{
927 if (mmp->user) {
928 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
929 free_uid(mmp->user);
930 }
931}
932EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
933
934struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
935{
936 struct ubuf_info *uarg;
937 struct sk_buff *skb;
938
939 WARN_ON_ONCE(!in_task());
940
941 if (!sock_flag(sk, SOCK_ZEROCOPY))
942 return NULL;
943
944 skb = sock_omalloc(sk, 0, GFP_KERNEL);
945 if (!skb)
946 return NULL;
947
948 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
949 uarg = (void *)skb->cb;
950 uarg->mmp.user = NULL;
951
952 if (mm_account_pinned_pages(&uarg->mmp, size)) {
953 kfree_skb(skb);
954 return NULL;
955 }
956
957 uarg->callback = sock_zerocopy_callback;
958 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
959 uarg->len = 1;
960 uarg->bytelen = size;
961 uarg->zerocopy = 1;
962 refcount_set(&uarg->refcnt, 1);
963 sock_hold(sk);
964
965 return uarg;
966}
967EXPORT_SYMBOL_GPL(sock_zerocopy_alloc);
968
969static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
970{
971 return container_of((void *)uarg, struct sk_buff, cb);
972}
973
974struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
975 struct ubuf_info *uarg)
976{
977 if (uarg) {
978 const u32 byte_limit = 1 << 19; /* limit to a few TSO */
979 u32 bytelen, next;
980
981 /* realloc only when socket is locked (TCP, UDP cork),
982 * so uarg->len and sk_zckey access is serialized
983 */
984 if (!sock_owned_by_user(sk)) {
985 WARN_ON_ONCE(1);
986 return NULL;
987 }
988
989 bytelen = uarg->bytelen + size;
990 if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
991 /* TCP can create new skb to attach new uarg */
992 if (sk->sk_type == SOCK_STREAM)
993 goto new_alloc;
994 return NULL;
995 }
996
997 next = (u32)atomic_read(&sk->sk_zckey);
998 if ((u32)(uarg->id + uarg->len) == next) {
999 if (mm_account_pinned_pages(&uarg->mmp, size))
1000 return NULL;
1001 uarg->len++;
1002 uarg->bytelen = bytelen;
1003 atomic_set(&sk->sk_zckey, ++next);
1004 sock_zerocopy_get(uarg);
1005 return uarg;
1006 }
1007 }
1008
1009new_alloc:
1010 return sock_zerocopy_alloc(sk, size);
1011}
1012EXPORT_SYMBOL_GPL(sock_zerocopy_realloc);
1013
1014static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1015{
1016 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1017 u32 old_lo, old_hi;
1018 u64 sum_len;
1019
1020 old_lo = serr->ee.ee_info;
1021 old_hi = serr->ee.ee_data;
1022 sum_len = old_hi - old_lo + 1ULL + len;
1023
1024 if (sum_len >= (1ULL << 32))
1025 return false;
1026
1027 if (lo != old_hi + 1)
1028 return false;
1029
1030 serr->ee.ee_data += len;
1031 return true;
1032}
1033
1034void sock_zerocopy_callback(struct ubuf_info *uarg, bool success)
1035{
1036 struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1037 struct sock_exterr_skb *serr;
1038 struct sock *sk = skb->sk;
1039 struct sk_buff_head *q;
1040 unsigned long flags;
1041 u32 lo, hi;
1042 u16 len;
1043
1044 mm_unaccount_pinned_pages(&uarg->mmp);
1045
1046 /* if !len, there was only 1 call, and it was aborted
1047 * so do not queue a completion notification
1048 */
1049 if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1050 goto release;
1051
1052 len = uarg->len;
1053 lo = uarg->id;
1054 hi = uarg->id + len - 1;
1055
1056 serr = SKB_EXT_ERR(skb);
1057 memset(serr, 0, sizeof(*serr));
1058 serr->ee.ee_errno = 0;
1059 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1060 serr->ee.ee_data = hi;
1061 serr->ee.ee_info = lo;
1062 if (!success)
1063 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1064
1065 q = &sk->sk_error_queue;
1066 spin_lock_irqsave(&q->lock, flags);
1067 tail = skb_peek_tail(q);
1068 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1069 !skb_zerocopy_notify_extend(tail, lo, len)) {
1070 __skb_queue_tail(q, skb);
1071 skb = NULL;
1072 }
1073 spin_unlock_irqrestore(&q->lock, flags);
1074
1075 sk->sk_error_report(sk);
1076
1077release:
1078 consume_skb(skb);
1079 sock_put(sk);
1080}
1081EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
1082
1083void sock_zerocopy_put(struct ubuf_info *uarg)
1084{
1085 if (uarg && refcount_dec_and_test(&uarg->refcnt)) {
1086 if (uarg->callback)
1087 uarg->callback(uarg, uarg->zerocopy);
1088 else
1089 consume_skb(skb_from_uarg(uarg));
1090 }
1091}
1092EXPORT_SYMBOL_GPL(sock_zerocopy_put);
1093
1094void sock_zerocopy_put_abort(struct ubuf_info *uarg)
1095{
1096 if (uarg) {
1097 struct sock *sk = skb_from_uarg(uarg)->sk;
1098
1099 atomic_dec(&sk->sk_zckey);
1100 uarg->len--;
1101
1102 sock_zerocopy_put(uarg);
1103 }
1104}
1105EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
1106
1107extern int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1108 struct iov_iter *from, size_t length);
1109
1110int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1111 struct msghdr *msg, int len,
1112 struct ubuf_info *uarg)
1113{
1114 struct ubuf_info *orig_uarg = skb_zcopy(skb);
1115 struct iov_iter orig_iter = msg->msg_iter;
1116 int err, orig_len = skb->len;
1117
1118 /* An skb can only point to one uarg. This edge case happens when
1119 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1120 */
1121 if (orig_uarg && uarg != orig_uarg)
1122 return -EEXIST;
1123
1124 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1125 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1126 struct sock *save_sk = skb->sk;
1127
1128 /* Streams do not free skb on error. Reset to prev state. */
1129 msg->msg_iter = orig_iter;
1130 skb->sk = sk;
1131 ___pskb_trim(skb, orig_len);
1132 skb->sk = save_sk;
1133 return err;
1134 }
1135
1136 skb_zcopy_set(skb, uarg);
1137 return skb->len - orig_len;
1138}
1139EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1140
1141static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1142 gfp_t gfp_mask)
1143{
1144 if (skb_zcopy(orig)) {
1145 if (skb_zcopy(nskb)) {
1146 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1147 if (!gfp_mask) {
1148 WARN_ON_ONCE(1);
1149 return -ENOMEM;
1150 }
1151 if (skb_uarg(nskb) == skb_uarg(orig))
1152 return 0;
1153 if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1154 return -EIO;
1155 }
1156 skb_zcopy_set(nskb, skb_uarg(orig));
1157 }
1158 return 0;
1159}
1160
1161/**
1162 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1163 * @skb: the skb to modify
1164 * @gfp_mask: allocation priority
1165 *
1166 * This must be called on SKBTX_DEV_ZEROCOPY skb.
1167 * It will copy all frags into kernel and drop the reference
1168 * to userspace pages.
1169 *
1170 * If this function is called from an interrupt gfp_mask() must be
1171 * %GFP_ATOMIC.
1172 *
1173 * Returns 0 on success or a negative error code on failure
1174 * to allocate kernel memory to copy to.
1175 */
1176int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1177{
1178 int num_frags = skb_shinfo(skb)->nr_frags;
1179 struct page *page, *head = NULL;
1180 int i, new_frags;
1181 u32 d_off;
1182
1183 if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1184 return -EINVAL;
1185
1186 if (!num_frags)
1187 goto release;
1188
1189 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1190 for (i = 0; i < new_frags; i++) {
1191 page = alloc_page(gfp_mask);
1192 if (!page) {
1193 while (head) {
1194 struct page *next = (struct page *)page_private(head);
1195 put_page(head);
1196 head = next;
1197 }
1198 return -ENOMEM;
1199 }
1200 set_page_private(page, (unsigned long)head);
1201 head = page;
1202 }
1203
1204 page = head;
1205 d_off = 0;
1206 for (i = 0; i < num_frags; i++) {
1207 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1208 u32 p_off, p_len, copied;
1209 struct page *p;
1210 u8 *vaddr;
1211
1212 skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f),
1213 p, p_off, p_len, copied) {
1214 u32 copy, done = 0;
1215 vaddr = kmap_atomic(p);
1216
1217 while (done < p_len) {
1218 if (d_off == PAGE_SIZE) {
1219 d_off = 0;
1220 page = (struct page *)page_private(page);
1221 }
1222 copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
1223 memcpy(page_address(page) + d_off,
1224 vaddr + p_off + done, copy);
1225 done += copy;
1226 d_off += copy;
1227 }
1228 kunmap_atomic(vaddr);
1229 }
1230 }
1231
1232 /* skb frags release userspace buffers */
1233 for (i = 0; i < num_frags; i++)
1234 skb_frag_unref(skb, i);
1235
1236 /* skb frags point to kernel buffers */
1237 for (i = 0; i < new_frags - 1; i++) {
1238 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
1239 head = (struct page *)page_private(head);
1240 }
1241 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1242 skb_shinfo(skb)->nr_frags = new_frags;
1243
1244release:
1245 skb_zcopy_clear(skb, false);
1246 return 0;
1247}
1248EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1249
1250/**
1251 * skb_clone - duplicate an sk_buff
1252 * @skb: buffer to clone
1253 * @gfp_mask: allocation priority
1254 *
1255 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
1256 * copies share the same packet data but not structure. The new
1257 * buffer has a reference count of 1. If the allocation fails the
1258 * function returns %NULL otherwise the new buffer is returned.
1259 *
1260 * If this function is called from an interrupt gfp_mask() must be
1261 * %GFP_ATOMIC.
1262 */
1263
1264struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1265{
1266 struct sk_buff_fclones *fclones = container_of(skb,
1267 struct sk_buff_fclones,
1268 skb1);
1269 struct sk_buff *n;
1270
1271 if (skb_orphan_frags(skb, gfp_mask))
1272 return NULL;
1273
1274 if (skb->fclone == SKB_FCLONE_ORIG &&
1275 refcount_read(&fclones->fclone_ref) == 1) {
1276 n = &fclones->skb2;
1277 refcount_set(&fclones->fclone_ref, 2);
1278 } else {
1279 if (skb_pfmemalloc(skb))
1280 gfp_mask |= __GFP_MEMALLOC;
1281
1282 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1283 if (!n)
1284 return NULL;
1285
1286 n->fclone = SKB_FCLONE_UNAVAILABLE;
1287 }
1288
1289 return __skb_clone(n, skb);
1290}
1291EXPORT_SYMBOL(skb_clone);
1292
1293static void skb_headers_offset_update(struct sk_buff *skb, int off)
1294{
1295 /* Only adjust this if it actually is csum_start rather than csum */
1296 if (skb->ip_summed == CHECKSUM_PARTIAL)
1297 skb->csum_start += off;
1298 /* {transport,network,mac}_header and tail are relative to skb->head */
1299 skb->transport_header += off;
1300 skb->network_header += off;
1301 if (skb_mac_header_was_set(skb))
1302 skb->mac_header += off;
1303 skb->inner_transport_header += off;
1304 skb->inner_network_header += off;
1305 skb->inner_mac_header += off;
1306}
1307
1308static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1309{
1310 __copy_skb_header(new, old);
1311
1312 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1313 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1314 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1315}
1316
1317static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1318{
1319 if (skb_pfmemalloc(skb))
1320 return SKB_ALLOC_RX;
1321 return 0;
1322}
1323
1324/**
1325 * skb_copy - create private copy of an sk_buff
1326 * @skb: buffer to copy
1327 * @gfp_mask: allocation priority
1328 *
1329 * Make a copy of both an &sk_buff and its data. This is used when the
1330 * caller wishes to modify the data and needs a private copy of the
1331 * data to alter. Returns %NULL on failure or the pointer to the buffer
1332 * on success. The returned buffer has a reference count of 1.
1333 *
1334 * As by-product this function converts non-linear &sk_buff to linear
1335 * one, so that &sk_buff becomes completely private and caller is allowed
1336 * to modify all the data of returned buffer. This means that this
1337 * function is not recommended for use in circumstances when only
1338 * header is going to be modified. Use pskb_copy() instead.
1339 */
1340
1341struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1342{
1343 int headerlen = skb_headroom(skb);
1344 unsigned int size = skb_end_offset(skb) + skb->data_len;
1345 struct sk_buff *n = __alloc_skb(size, gfp_mask,
1346 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1347
1348 if (!n)
1349 return NULL;
1350
1351 /* Set the data pointer */
1352 skb_reserve(n, headerlen);
1353 /* Set the tail pointer and length */
1354 skb_put(n, skb->len);
1355
1356 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1357
1358 copy_skb_header(n, skb);
1359 return n;
1360}
1361EXPORT_SYMBOL(skb_copy);
1362
1363/**
1364 * __pskb_copy_fclone - create copy of an sk_buff with private head.
1365 * @skb: buffer to copy
1366 * @headroom: headroom of new skb
1367 * @gfp_mask: allocation priority
1368 * @fclone: if true allocate the copy of the skb from the fclone
1369 * cache instead of the head cache; it is recommended to set this
1370 * to true for the cases where the copy will likely be cloned
1371 *
1372 * Make a copy of both an &sk_buff and part of its data, located
1373 * in header. Fragmented data remain shared. This is used when
1374 * the caller wishes to modify only header of &sk_buff and needs
1375 * private copy of the header to alter. Returns %NULL on failure
1376 * or the pointer to the buffer on success.
1377 * The returned buffer has a reference count of 1.
1378 */
1379
1380struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1381 gfp_t gfp_mask, bool fclone)
1382{
1383 unsigned int size = skb_headlen(skb) + headroom;
1384 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1385 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1386
1387 if (!n)
1388 goto out;
1389
1390 /* Set the data pointer */
1391 skb_reserve(n, headroom);
1392 /* Set the tail pointer and length */
1393 skb_put(n, skb_headlen(skb));
1394 /* Copy the bytes */
1395 skb_copy_from_linear_data(skb, n->data, n->len);
1396
1397 n->truesize += skb->data_len;
1398 n->data_len = skb->data_len;
1399 n->len = skb->len;
1400
1401 if (skb_shinfo(skb)->nr_frags) {
1402 int i;
1403
1404 if (skb_orphan_frags(skb, gfp_mask) ||
1405 skb_zerocopy_clone(n, skb, gfp_mask)) {
1406 kfree_skb(n);
1407 n = NULL;
1408 goto out;
1409 }
1410 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1411 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1412 skb_frag_ref(skb, i);
1413 }
1414 skb_shinfo(n)->nr_frags = i;
1415 }
1416
1417 if (skb_has_frag_list(skb)) {
1418 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1419 skb_clone_fraglist(n);
1420 }
1421
1422 copy_skb_header(n, skb);
1423out:
1424 return n;
1425}
1426EXPORT_SYMBOL(__pskb_copy_fclone);
1427
1428/**
1429 * pskb_expand_head - reallocate header of &sk_buff
1430 * @skb: buffer to reallocate
1431 * @nhead: room to add at head
1432 * @ntail: room to add at tail
1433 * @gfp_mask: allocation priority
1434 *
1435 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1436 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1437 * reference count of 1. Returns zero in the case of success or error,
1438 * if expansion failed. In the last case, &sk_buff is not changed.
1439 *
1440 * All the pointers pointing into skb header may change and must be
1441 * reloaded after call to this function.
1442 */
1443
1444int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1445 gfp_t gfp_mask)
1446{
1447 int i, osize = skb_end_offset(skb);
1448 int size = osize + nhead + ntail;
1449 long off;
1450 u8 *data;
1451
1452 BUG_ON(nhead < 0);
1453
1454 BUG_ON(skb_shared(skb));
1455
1456 size = SKB_DATA_ALIGN(size);
1457
1458 if (skb_pfmemalloc(skb))
1459 gfp_mask |= __GFP_MEMALLOC;
1460 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1461 gfp_mask, NUMA_NO_NODE, NULL);
1462 if (!data)
1463 goto nodata;
1464 size = SKB_WITH_OVERHEAD(ksize(data));
1465
1466 /* Copy only real data... and, alas, header. This should be
1467 * optimized for the cases when header is void.
1468 */
1469 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1470
1471 memcpy((struct skb_shared_info *)(data + size),
1472 skb_shinfo(skb),
1473 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1474
1475 /*
1476 * if shinfo is shared we must drop the old head gracefully, but if it
1477 * is not we can just drop the old head and let the existing refcount
1478 * be since all we did is relocate the values
1479 */
1480 if (skb_cloned(skb)) {
1481 if (skb_orphan_frags(skb, gfp_mask))
1482 goto nofrags;
1483 if (skb_zcopy(skb))
1484 refcount_inc(&skb_uarg(skb)->refcnt);
1485 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1486 skb_frag_ref(skb, i);
1487
1488 if (skb_has_frag_list(skb))
1489 skb_clone_fraglist(skb);
1490
1491 skb_release_data(skb);
1492 } else {
1493 skb_free_head(skb);
1494 }
1495 off = (data + nhead) - skb->head;
1496
1497 skb->head = data;
1498 skb->head_frag = 0;
1499 skb->data += off;
1500#ifdef NET_SKBUFF_DATA_USES_OFFSET
1501 skb->end = size;
1502 off = nhead;
1503#else
1504 skb->end = skb->head + size;
1505#endif
1506 skb->tail += off;
1507 skb_headers_offset_update(skb, nhead);
1508 skb->cloned = 0;
1509 skb->hdr_len = 0;
1510 skb->nohdr = 0;
1511 atomic_set(&skb_shinfo(skb)->dataref, 1);
1512
1513 skb_metadata_clear(skb);
1514
1515 /* It is not generally safe to change skb->truesize.
1516 * For the moment, we really care of rx path, or
1517 * when skb is orphaned (not attached to a socket).
1518 */
1519 if (!skb->sk || skb->destructor == sock_edemux)
1520 skb->truesize += size - osize;
1521
1522 return 0;
1523
1524nofrags:
1525 kfree(data);
1526nodata:
1527 return -ENOMEM;
1528}
1529EXPORT_SYMBOL(pskb_expand_head);
1530
1531/* Make private copy of skb with writable head and some headroom */
1532
1533struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1534{
1535 struct sk_buff *skb2;
1536 int delta = headroom - skb_headroom(skb);
1537
1538 if (delta <= 0)
1539 skb2 = pskb_copy(skb, GFP_ATOMIC);
1540 else {
1541 skb2 = skb_clone(skb, GFP_ATOMIC);
1542 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1543 GFP_ATOMIC)) {
1544 kfree_skb(skb2);
1545 skb2 = NULL;
1546 }
1547 }
1548 return skb2;
1549}
1550EXPORT_SYMBOL(skb_realloc_headroom);
1551
1552/**
1553 * skb_copy_expand - copy and expand sk_buff
1554 * @skb: buffer to copy
1555 * @newheadroom: new free bytes at head
1556 * @newtailroom: new free bytes at tail
1557 * @gfp_mask: allocation priority
1558 *
1559 * Make a copy of both an &sk_buff and its data and while doing so
1560 * allocate additional space.
1561 *
1562 * This is used when the caller wishes to modify the data and needs a
1563 * private copy of the data to alter as well as more space for new fields.
1564 * Returns %NULL on failure or the pointer to the buffer
1565 * on success. The returned buffer has a reference count of 1.
1566 *
1567 * You must pass %GFP_ATOMIC as the allocation priority if this function
1568 * is called from an interrupt.
1569 */
1570struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1571 int newheadroom, int newtailroom,
1572 gfp_t gfp_mask)
1573{
1574 /*
1575 * Allocate the copy buffer
1576 */
1577 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1578 gfp_mask, skb_alloc_rx_flag(skb),
1579 NUMA_NO_NODE);
1580 int oldheadroom = skb_headroom(skb);
1581 int head_copy_len, head_copy_off;
1582
1583 if (!n)
1584 return NULL;
1585
1586 skb_reserve(n, newheadroom);
1587
1588 /* Set the tail pointer and length */
1589 skb_put(n, skb->len);
1590
1591 head_copy_len = oldheadroom;
1592 head_copy_off = 0;
1593 if (newheadroom <= head_copy_len)
1594 head_copy_len = newheadroom;
1595 else
1596 head_copy_off = newheadroom - head_copy_len;
1597
1598 /* Copy the linear header and data. */
1599 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1600 skb->len + head_copy_len));
1601
1602 copy_skb_header(n, skb);
1603
1604 skb_headers_offset_update(n, newheadroom - oldheadroom);
1605
1606 return n;
1607}
1608EXPORT_SYMBOL(skb_copy_expand);
1609
1610/**
1611 * __skb_pad - zero pad the tail of an skb
1612 * @skb: buffer to pad
1613 * @pad: space to pad
1614 * @free_on_error: free buffer on error
1615 *
1616 * Ensure that a buffer is followed by a padding area that is zero
1617 * filled. Used by network drivers which may DMA or transfer data
1618 * beyond the buffer end onto the wire.
1619 *
1620 * May return error in out of memory cases. The skb is freed on error
1621 * if @free_on_error is true.
1622 */
1623
1624int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
1625{
1626 int err;
1627 int ntail;
1628
1629 /* If the skbuff is non linear tailroom is always zero.. */
1630 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1631 memset(skb->data+skb->len, 0, pad);
1632 return 0;
1633 }
1634
1635 ntail = skb->data_len + pad - (skb->end - skb->tail);
1636 if (likely(skb_cloned(skb) || ntail > 0)) {
1637 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1638 if (unlikely(err))
1639 goto free_skb;
1640 }
1641
1642 /* FIXME: The use of this function with non-linear skb's really needs
1643 * to be audited.
1644 */
1645 err = skb_linearize(skb);
1646 if (unlikely(err))
1647 goto free_skb;
1648
1649 memset(skb->data + skb->len, 0, pad);
1650 return 0;
1651
1652free_skb:
1653 if (free_on_error)
1654 kfree_skb(skb);
1655 return err;
1656}
1657EXPORT_SYMBOL(__skb_pad);
1658
1659/**
1660 * pskb_put - add data to the tail of a potentially fragmented buffer
1661 * @skb: start of the buffer to use
1662 * @tail: tail fragment of the buffer to use
1663 * @len: amount of data to add
1664 *
1665 * This function extends the used data area of the potentially
1666 * fragmented buffer. @tail must be the last fragment of @skb -- or
1667 * @skb itself. If this would exceed the total buffer size the kernel
1668 * will panic. A pointer to the first byte of the extra data is
1669 * returned.
1670 */
1671
1672void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1673{
1674 if (tail != skb) {
1675 skb->data_len += len;
1676 skb->len += len;
1677 }
1678 return skb_put(tail, len);
1679}
1680EXPORT_SYMBOL_GPL(pskb_put);
1681
1682/**
1683 * skb_put - add data to a buffer
1684 * @skb: buffer to use
1685 * @len: amount of data to add
1686 *
1687 * This function extends the used data area of the buffer. If this would
1688 * exceed the total buffer size the kernel will panic. A pointer to the
1689 * first byte of the extra data is returned.
1690 */
1691void *skb_put(struct sk_buff *skb, unsigned int len)
1692{
1693 void *tmp = skb_tail_pointer(skb);
1694 SKB_LINEAR_ASSERT(skb);
1695 skb->tail += len;
1696 skb->len += len;
1697 if (unlikely(skb->tail > skb->end))
1698 skb_over_panic(skb, len, __builtin_return_address(0));
1699 return tmp;
1700}
1701EXPORT_SYMBOL(skb_put);
1702
1703/**
1704 * skb_push - add data to the start of a buffer
1705 * @skb: buffer to use
1706 * @len: amount of data to add
1707 *
1708 * This function extends the used data area of the buffer at the buffer
1709 * start. If this would exceed the total buffer headroom the kernel will
1710 * panic. A pointer to the first byte of the extra data is returned.
1711 */
1712void *skb_push(struct sk_buff *skb, unsigned int len)
1713{
1714 skb->data -= len;
1715 skb->len += len;
1716 if (unlikely(skb->data<skb->head))
1717 skb_under_panic(skb, len, __builtin_return_address(0));
1718 return skb->data;
1719}
1720EXPORT_SYMBOL(skb_push);
1721
1722/**
1723 * skb_pull - remove data from the start of a buffer
1724 * @skb: buffer to use
1725 * @len: amount of data to remove
1726 *
1727 * This function removes data from the start of a buffer, returning
1728 * the memory to the headroom. A pointer to the next data in the buffer
1729 * is returned. Once the data has been pulled future pushes will overwrite
1730 * the old data.
1731 */
1732void *skb_pull(struct sk_buff *skb, unsigned int len)
1733{
1734 return skb_pull_inline(skb, len);
1735}
1736EXPORT_SYMBOL(skb_pull);
1737
1738/**
1739 * skb_trim - remove end from a buffer
1740 * @skb: buffer to alter
1741 * @len: new length
1742 *
1743 * Cut the length of a buffer down by removing data from the tail. If
1744 * the buffer is already under the length specified it is not modified.
1745 * The skb must be linear.
1746 */
1747void skb_trim(struct sk_buff *skb, unsigned int len)
1748{
1749 if (skb->len > len)
1750 __skb_trim(skb, len);
1751}
1752EXPORT_SYMBOL(skb_trim);
1753
1754/* Trims skb to length len. It can change skb pointers.
1755 */
1756
1757int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1758{
1759 struct sk_buff **fragp;
1760 struct sk_buff *frag;
1761 int offset = skb_headlen(skb);
1762 int nfrags = skb_shinfo(skb)->nr_frags;
1763 int i;
1764 int err;
1765
1766 if (skb_cloned(skb) &&
1767 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1768 return err;
1769
1770 i = 0;
1771 if (offset >= len)
1772 goto drop_pages;
1773
1774 for (; i < nfrags; i++) {
1775 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1776
1777 if (end < len) {
1778 offset = end;
1779 continue;
1780 }
1781
1782 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1783
1784drop_pages:
1785 skb_shinfo(skb)->nr_frags = i;
1786
1787 for (; i < nfrags; i++)
1788 skb_frag_unref(skb, i);
1789
1790 if (skb_has_frag_list(skb))
1791 skb_drop_fraglist(skb);
1792 goto done;
1793 }
1794
1795 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1796 fragp = &frag->next) {
1797 int end = offset + frag->len;
1798
1799 if (skb_shared(frag)) {
1800 struct sk_buff *nfrag;
1801
1802 nfrag = skb_clone(frag, GFP_ATOMIC);
1803 if (unlikely(!nfrag))
1804 return -ENOMEM;
1805
1806 nfrag->next = frag->next;
1807 consume_skb(frag);
1808 frag = nfrag;
1809 *fragp = frag;
1810 }
1811
1812 if (end < len) {
1813 offset = end;
1814 continue;
1815 }
1816
1817 if (end > len &&
1818 unlikely((err = pskb_trim(frag, len - offset))))
1819 return err;
1820
1821 if (frag->next)
1822 skb_drop_list(&frag->next);
1823 break;
1824 }
1825
1826done:
1827 if (len > skb_headlen(skb)) {
1828 skb->data_len -= skb->len - len;
1829 skb->len = len;
1830 } else {
1831 skb->len = len;
1832 skb->data_len = 0;
1833 skb_set_tail_pointer(skb, len);
1834 }
1835
1836 if (!skb->sk || skb->destructor == sock_edemux)
1837 skb_condense(skb);
1838 return 0;
1839}
1840EXPORT_SYMBOL(___pskb_trim);
1841
1842/**
1843 * __pskb_pull_tail - advance tail of skb header
1844 * @skb: buffer to reallocate
1845 * @delta: number of bytes to advance tail
1846 *
1847 * The function makes a sense only on a fragmented &sk_buff,
1848 * it expands header moving its tail forward and copying necessary
1849 * data from fragmented part.
1850 *
1851 * &sk_buff MUST have reference count of 1.
1852 *
1853 * Returns %NULL (and &sk_buff does not change) if pull failed
1854 * or value of new tail of skb in the case of success.
1855 *
1856 * All the pointers pointing into skb header may change and must be
1857 * reloaded after call to this function.
1858 */
1859
1860/* Moves tail of skb head forward, copying data from fragmented part,
1861 * when it is necessary.
1862 * 1. It may fail due to malloc failure.
1863 * 2. It may change skb pointers.
1864 *
1865 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1866 */
1867void *__pskb_pull_tail(struct sk_buff *skb, int delta)
1868{
1869 /* If skb has not enough free space at tail, get new one
1870 * plus 128 bytes for future expansions. If we have enough
1871 * room at tail, reallocate without expansion only if skb is cloned.
1872 */
1873 int i, k, eat = (skb->tail + delta) - skb->end;
1874
1875 if (eat > 0 || skb_cloned(skb)) {
1876 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1877 GFP_ATOMIC))
1878 return NULL;
1879 }
1880
1881 BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
1882 skb_tail_pointer(skb), delta));
1883
1884 /* Optimization: no fragments, no reasons to preestimate
1885 * size of pulled pages. Superb.
1886 */
1887 if (!skb_has_frag_list(skb))
1888 goto pull_pages;
1889
1890 /* Estimate size of pulled pages. */
1891 eat = delta;
1892 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1893 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1894
1895 if (size >= eat)
1896 goto pull_pages;
1897 eat -= size;
1898 }
1899
1900 /* If we need update frag list, we are in troubles.
1901 * Certainly, it is possible to add an offset to skb data,
1902 * but taking into account that pulling is expected to
1903 * be very rare operation, it is worth to fight against
1904 * further bloating skb head and crucify ourselves here instead.
1905 * Pure masohism, indeed. 8)8)
1906 */
1907 if (eat) {
1908 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1909 struct sk_buff *clone = NULL;
1910 struct sk_buff *insp = NULL;
1911
1912 do {
1913 BUG_ON(!list);
1914
1915 if (list->len <= eat) {
1916 /* Eaten as whole. */
1917 eat -= list->len;
1918 list = list->next;
1919 insp = list;
1920 } else {
1921 /* Eaten partially. */
1922
1923 if (skb_shared(list)) {
1924 /* Sucks! We need to fork list. :-( */
1925 clone = skb_clone(list, GFP_ATOMIC);
1926 if (!clone)
1927 return NULL;
1928 insp = list->next;
1929 list = clone;
1930 } else {
1931 /* This may be pulled without
1932 * problems. */
1933 insp = list;
1934 }
1935 if (!pskb_pull(list, eat)) {
1936 kfree_skb(clone);
1937 return NULL;
1938 }
1939 break;
1940 }
1941 } while (eat);
1942
1943 /* Free pulled out fragments. */
1944 while ((list = skb_shinfo(skb)->frag_list) != insp) {
1945 skb_shinfo(skb)->frag_list = list->next;
1946 kfree_skb(list);
1947 }
1948 /* And insert new clone at head. */
1949 if (clone) {
1950 clone->next = list;
1951 skb_shinfo(skb)->frag_list = clone;
1952 }
1953 }
1954 /* Success! Now we may commit changes to skb data. */
1955
1956pull_pages:
1957 eat = delta;
1958 k = 0;
1959 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1960 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1961
1962 if (size <= eat) {
1963 skb_frag_unref(skb, i);
1964 eat -= size;
1965 } else {
1966 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1967 if (eat) {
1968 skb_shinfo(skb)->frags[k].page_offset += eat;
1969 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1970 if (!i)
1971 goto end;
1972 eat = 0;
1973 }
1974 k++;
1975 }
1976 }
1977 skb_shinfo(skb)->nr_frags = k;
1978
1979end:
1980 skb->tail += delta;
1981 skb->data_len -= delta;
1982
1983 if (!skb->data_len)
1984 skb_zcopy_clear(skb, false);
1985
1986 return skb_tail_pointer(skb);
1987}
1988EXPORT_SYMBOL(__pskb_pull_tail);
1989
1990/**
1991 * skb_copy_bits - copy bits from skb to kernel buffer
1992 * @skb: source skb
1993 * @offset: offset in source
1994 * @to: destination buffer
1995 * @len: number of bytes to copy
1996 *
1997 * Copy the specified number of bytes from the source skb to the
1998 * destination buffer.
1999 *
2000 * CAUTION ! :
2001 * If its prototype is ever changed,
2002 * check arch/{*}/net/{*}.S files,
2003 * since it is called from BPF assembly code.
2004 */
2005int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2006{
2007 int start = skb_headlen(skb);
2008 struct sk_buff *frag_iter;
2009 int i, copy;
2010
2011 if (offset > (int)skb->len - len)
2012 goto fault;
2013
2014 /* Copy header. */
2015 if ((copy = start - offset) > 0) {
2016 if (copy > len)
2017 copy = len;
2018 skb_copy_from_linear_data_offset(skb, offset, to, copy);
2019 if ((len -= copy) == 0)
2020 return 0;
2021 offset += copy;
2022 to += copy;
2023 }
2024
2025 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2026 int end;
2027 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2028
2029 WARN_ON(start > offset + len);
2030
2031 end = start + skb_frag_size(f);
2032 if ((copy = end - offset) > 0) {
2033 u32 p_off, p_len, copied;
2034 struct page *p;
2035 u8 *vaddr;
2036
2037 if (copy > len)
2038 copy = len;
2039
2040 skb_frag_foreach_page(f,
2041 f->page_offset + offset - start,
2042 copy, p, p_off, p_len, copied) {
2043 vaddr = kmap_atomic(p);
2044 memcpy(to + copied, vaddr + p_off, p_len);
2045 kunmap_atomic(vaddr);
2046 }
2047
2048 if ((len -= copy) == 0)
2049 return 0;
2050 offset += copy;
2051 to += copy;
2052 }
2053 start = end;
2054 }
2055
2056 skb_walk_frags(skb, frag_iter) {
2057 int end;
2058
2059 WARN_ON(start > offset + len);
2060
2061 end = start + frag_iter->len;
2062 if ((copy = end - offset) > 0) {
2063 if (copy > len)
2064 copy = len;
2065 if (skb_copy_bits(frag_iter, offset - start, to, copy))
2066 goto fault;
2067 if ((len -= copy) == 0)
2068 return 0;
2069 offset += copy;
2070 to += copy;
2071 }
2072 start = end;
2073 }
2074
2075 if (!len)
2076 return 0;
2077
2078fault:
2079 return -EFAULT;
2080}
2081EXPORT_SYMBOL(skb_copy_bits);
2082
2083/*
2084 * Callback from splice_to_pipe(), if we need to release some pages
2085 * at the end of the spd in case we error'ed out in filling the pipe.
2086 */
2087static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2088{
2089 put_page(spd->pages[i]);
2090}
2091
2092static struct page *linear_to_page(struct page *page, unsigned int *len,
2093 unsigned int *offset,
2094 struct sock *sk)
2095{
2096 struct page_frag *pfrag = sk_page_frag(sk);
2097
2098 if (!sk_page_frag_refill(sk, pfrag))
2099 return NULL;
2100
2101 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
2102
2103 memcpy(page_address(pfrag->page) + pfrag->offset,
2104 page_address(page) + *offset, *len);
2105 *offset = pfrag->offset;
2106 pfrag->offset += *len;
2107
2108 return pfrag->page;
2109}
2110
2111static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2112 struct page *page,
2113 unsigned int offset)
2114{
2115 return spd->nr_pages &&
2116 spd->pages[spd->nr_pages - 1] == page &&
2117 (spd->partial[spd->nr_pages - 1].offset +
2118 spd->partial[spd->nr_pages - 1].len == offset);
2119}
2120
2121/*
2122 * Fill page/offset/length into spd, if it can hold more pages.
2123 */
2124static bool spd_fill_page(struct splice_pipe_desc *spd,
2125 struct pipe_inode_info *pipe, struct page *page,
2126 unsigned int *len, unsigned int offset,
2127 bool linear,
2128 struct sock *sk)
2129{
2130 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2131 return true;
2132
2133 if (linear) {
2134 page = linear_to_page(page, len, &offset, sk);
2135 if (!page)
2136 return true;
2137 }
2138 if (spd_can_coalesce(spd, page, offset)) {
2139 spd->partial[spd->nr_pages - 1].len += *len;
2140 return false;
2141 }
2142 get_page(page);
2143 spd->pages[spd->nr_pages] = page;
2144 spd->partial[spd->nr_pages].len = *len;
2145 spd->partial[spd->nr_pages].offset = offset;
2146 spd->nr_pages++;
2147
2148 return false;
2149}
2150
2151static bool __splice_segment(struct page *page, unsigned int poff,
2152 unsigned int plen, unsigned int *off,
2153 unsigned int *len,
2154 struct splice_pipe_desc *spd, bool linear,
2155 struct sock *sk,
2156 struct pipe_inode_info *pipe)
2157{
2158 if (!*len)
2159 return true;
2160
2161 /* skip this segment if already processed */
2162 if (*off >= plen) {
2163 *off -= plen;
2164 return false;
2165 }
2166
2167 /* ignore any bits we already processed */
2168 poff += *off;
2169 plen -= *off;
2170 *off = 0;
2171
2172 do {
2173 unsigned int flen = min(*len, plen);
2174
2175 if (spd_fill_page(spd, pipe, page, &flen, poff,
2176 linear, sk))
2177 return true;
2178 poff += flen;
2179 plen -= flen;
2180 *len -= flen;
2181 } while (*len && plen);
2182
2183 return false;
2184}
2185
2186/*
2187 * Map linear and fragment data from the skb to spd. It reports true if the
2188 * pipe is full or if we already spliced the requested length.
2189 */
2190static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2191 unsigned int *offset, unsigned int *len,
2192 struct splice_pipe_desc *spd, struct sock *sk)
2193{
2194 int seg;
2195 struct sk_buff *iter;
2196
2197 /* map the linear part :
2198 * If skb->head_frag is set, this 'linear' part is backed by a
2199 * fragment, and if the head is not shared with any clones then
2200 * we can avoid a copy since we own the head portion of this page.
2201 */
2202 if (__splice_segment(virt_to_page(skb->data),
2203 (unsigned long) skb->data & (PAGE_SIZE - 1),
2204 skb_headlen(skb),
2205 offset, len, spd,
2206 skb_head_is_locked(skb),
2207 sk, pipe))
2208 return true;
2209
2210 /*
2211 * then map the fragments
2212 */
2213 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2214 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2215
2216 if (__splice_segment(skb_frag_page(f),
2217 f->page_offset, skb_frag_size(f),
2218 offset, len, spd, false, sk, pipe))
2219 return true;
2220 }
2221
2222 skb_walk_frags(skb, iter) {
2223 if (*offset >= iter->len) {
2224 *offset -= iter->len;
2225 continue;
2226 }
2227 /* __skb_splice_bits() only fails if the output has no room
2228 * left, so no point in going over the frag_list for the error
2229 * case.
2230 */
2231 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2232 return true;
2233 }
2234
2235 return false;
2236}
2237
2238/*
2239 * Map data from the skb to a pipe. Should handle both the linear part,
2240 * the fragments, and the frag list.
2241 */
2242int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2243 struct pipe_inode_info *pipe, unsigned int tlen,
2244 unsigned int flags)
2245{
2246 struct partial_page partial[MAX_SKB_FRAGS];
2247 struct page *pages[MAX_SKB_FRAGS];
2248 struct splice_pipe_desc spd = {
2249 .pages = pages,
2250 .partial = partial,
2251 .nr_pages_max = MAX_SKB_FRAGS,
2252 .ops = &nosteal_pipe_buf_ops,
2253 .spd_release = sock_spd_release,
2254 };
2255 int ret = 0;
2256
2257 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
2258
2259 if (spd.nr_pages)
2260 ret = splice_to_pipe(pipe, &spd);
2261
2262 return ret;
2263}
2264EXPORT_SYMBOL_GPL(skb_splice_bits);
2265
2266/* Send skb data on a socket. Socket must be locked. */
2267int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
2268 int len)
2269{
2270 unsigned int orig_len = len;
2271 struct sk_buff *head = skb;
2272 unsigned short fragidx;
2273 int slen, ret;
2274
2275do_frag_list:
2276
2277 /* Deal with head data */
2278 while (offset < skb_headlen(skb) && len) {
2279 struct kvec kv;
2280 struct msghdr msg;
2281
2282 slen = min_t(int, len, skb_headlen(skb) - offset);
2283 kv.iov_base = skb->data + offset;
2284 kv.iov_len = slen;
2285 memset(&msg, 0, sizeof(msg));
2286
2287 ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
2288 if (ret <= 0)
2289 goto error;
2290
2291 offset += ret;
2292 len -= ret;
2293 }
2294
2295 /* All the data was skb head? */
2296 if (!len)
2297 goto out;
2298
2299 /* Make offset relative to start of frags */
2300 offset -= skb_headlen(skb);
2301
2302 /* Find where we are in frag list */
2303 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2304 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2305
2306 if (offset < frag->size)
2307 break;
2308
2309 offset -= frag->size;
2310 }
2311
2312 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2313 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2314
2315 slen = min_t(size_t, len, frag->size - offset);
2316
2317 while (slen) {
2318 ret = kernel_sendpage_locked(sk, frag->page.p,
2319 frag->page_offset + offset,
2320 slen, MSG_DONTWAIT);
2321 if (ret <= 0)
2322 goto error;
2323
2324 len -= ret;
2325 offset += ret;
2326 slen -= ret;
2327 }
2328
2329 offset = 0;
2330 }
2331
2332 if (len) {
2333 /* Process any frag lists */
2334
2335 if (skb == head) {
2336 if (skb_has_frag_list(skb)) {
2337 skb = skb_shinfo(skb)->frag_list;
2338 goto do_frag_list;
2339 }
2340 } else if (skb->next) {
2341 skb = skb->next;
2342 goto do_frag_list;
2343 }
2344 }
2345
2346out:
2347 return orig_len - len;
2348
2349error:
2350 return orig_len == len ? ret : orig_len - len;
2351}
2352EXPORT_SYMBOL_GPL(skb_send_sock_locked);
2353
2354/* Send skb data on a socket. */
2355int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
2356{
2357 int ret = 0;
2358
2359 lock_sock(sk);
2360 ret = skb_send_sock_locked(sk, skb, offset, len);
2361 release_sock(sk);
2362
2363 return ret;
2364}
2365EXPORT_SYMBOL_GPL(skb_send_sock);
2366
2367/**
2368 * skb_store_bits - store bits from kernel buffer to skb
2369 * @skb: destination buffer
2370 * @offset: offset in destination
2371 * @from: source buffer
2372 * @len: number of bytes to copy
2373 *
2374 * Copy the specified number of bytes from the source buffer to the
2375 * destination skb. This function handles all the messy bits of
2376 * traversing fragment lists and such.
2377 */
2378
2379int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2380{
2381 int start = skb_headlen(skb);
2382 struct sk_buff *frag_iter;
2383 int i, copy;
2384
2385 if (offset > (int)skb->len - len)
2386 goto fault;
2387
2388 if ((copy = start - offset) > 0) {
2389 if (copy > len)
2390 copy = len;
2391 skb_copy_to_linear_data_offset(skb, offset, from, copy);
2392 if ((len -= copy) == 0)
2393 return 0;
2394 offset += copy;
2395 from += copy;
2396 }
2397
2398 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2399 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2400 int end;
2401
2402 WARN_ON(start > offset + len);
2403
2404 end = start + skb_frag_size(frag);
2405 if ((copy = end - offset) > 0) {
2406 u32 p_off, p_len, copied;
2407 struct page *p;
2408 u8 *vaddr;
2409
2410 if (copy > len)
2411 copy = len;
2412
2413 skb_frag_foreach_page(frag,
2414 frag->page_offset + offset - start,
2415 copy, p, p_off, p_len, copied) {
2416 vaddr = kmap_atomic(p);
2417 memcpy(vaddr + p_off, from + copied, p_len);
2418 kunmap_atomic(vaddr);
2419 }
2420
2421 if ((len -= copy) == 0)
2422 return 0;
2423 offset += copy;
2424 from += copy;
2425 }
2426 start = end;
2427 }
2428
2429 skb_walk_frags(skb, frag_iter) {
2430 int end;
2431
2432 WARN_ON(start > offset + len);
2433
2434 end = start + frag_iter->len;
2435 if ((copy = end - offset) > 0) {
2436 if (copy > len)
2437 copy = len;
2438 if (skb_store_bits(frag_iter, offset - start,
2439 from, copy))
2440 goto fault;
2441 if ((len -= copy) == 0)
2442 return 0;
2443 offset += copy;
2444 from += copy;
2445 }
2446 start = end;
2447 }
2448 if (!len)
2449 return 0;
2450
2451fault:
2452 return -EFAULT;
2453}
2454EXPORT_SYMBOL(skb_store_bits);
2455
2456/* Checksum skb data. */
2457__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2458 __wsum csum, const struct skb_checksum_ops *ops)
2459{
2460 int start = skb_headlen(skb);
2461 int i, copy = start - offset;
2462 struct sk_buff *frag_iter;
2463 int pos = 0;
2464
2465 /* Checksum header. */
2466 if (copy > 0) {
2467 if (copy > len)
2468 copy = len;
2469 csum = ops->update(skb->data + offset, copy, csum);
2470 if ((len -= copy) == 0)
2471 return csum;
2472 offset += copy;
2473 pos = copy;
2474 }
2475
2476 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2477 int end;
2478 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2479
2480 WARN_ON(start > offset + len);
2481
2482 end = start + skb_frag_size(frag);
2483 if ((copy = end - offset) > 0) {
2484 u32 p_off, p_len, copied;
2485 struct page *p;
2486 __wsum csum2;
2487 u8 *vaddr;
2488
2489 if (copy > len)
2490 copy = len;
2491
2492 skb_frag_foreach_page(frag,
2493 frag->page_offset + offset - start,
2494 copy, p, p_off, p_len, copied) {
2495 vaddr = kmap_atomic(p);
2496 csum2 = ops->update(vaddr + p_off, p_len, 0);
2497 kunmap_atomic(vaddr);
2498 csum = ops->combine(csum, csum2, pos, p_len);
2499 pos += p_len;
2500 }
2501
2502 if (!(len -= copy))
2503 return csum;
2504 offset += copy;
2505 }
2506 start = end;
2507 }
2508
2509 skb_walk_frags(skb, frag_iter) {
2510 int end;
2511
2512 WARN_ON(start > offset + len);
2513
2514 end = start + frag_iter->len;
2515 if ((copy = end - offset) > 0) {
2516 __wsum csum2;
2517 if (copy > len)
2518 copy = len;
2519 csum2 = __skb_checksum(frag_iter, offset - start,
2520 copy, 0, ops);
2521 csum = ops->combine(csum, csum2, pos, copy);
2522 if ((len -= copy) == 0)
2523 return csum;
2524 offset += copy;
2525 pos += copy;
2526 }
2527 start = end;
2528 }
2529 BUG_ON(len);
2530
2531 return csum;
2532}
2533EXPORT_SYMBOL(__skb_checksum);
2534
2535__wsum skb_checksum(const struct sk_buff *skb, int offset,
2536 int len, __wsum csum)
2537{
2538 const struct skb_checksum_ops ops = {
2539 .update = csum_partial_ext,
2540 .combine = csum_block_add_ext,
2541 };
2542
2543 return __skb_checksum(skb, offset, len, csum, &ops);
2544}
2545EXPORT_SYMBOL(skb_checksum);
2546
2547/* Both of above in one bottle. */
2548
2549__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2550 u8 *to, int len, __wsum csum)
2551{
2552 int start = skb_headlen(skb);
2553 int i, copy = start - offset;
2554 struct sk_buff *frag_iter;
2555 int pos = 0;
2556
2557 /* Copy header. */
2558 if (copy > 0) {
2559 if (copy > len)
2560 copy = len;
2561 csum = csum_partial_copy_nocheck(skb->data + offset, to,
2562 copy, csum);
2563 if ((len -= copy) == 0)
2564 return csum;
2565 offset += copy;
2566 to += copy;
2567 pos = copy;
2568 }
2569
2570 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2571 int end;
2572
2573 WARN_ON(start > offset + len);
2574
2575 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2576 if ((copy = end - offset) > 0) {
2577 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2578 u32 p_off, p_len, copied;
2579 struct page *p;
2580 __wsum csum2;
2581 u8 *vaddr;
2582
2583 if (copy > len)
2584 copy = len;
2585
2586 skb_frag_foreach_page(frag,
2587 frag->page_offset + offset - start,
2588 copy, p, p_off, p_len, copied) {
2589 vaddr = kmap_atomic(p);
2590 csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2591 to + copied,
2592 p_len, 0);
2593 kunmap_atomic(vaddr);
2594 csum = csum_block_add(csum, csum2, pos);
2595 pos += p_len;
2596 }
2597
2598 if (!(len -= copy))
2599 return csum;
2600 offset += copy;
2601 to += copy;
2602 }
2603 start = end;
2604 }
2605
2606 skb_walk_frags(skb, frag_iter) {
2607 __wsum csum2;
2608 int end;
2609
2610 WARN_ON(start > offset + len);
2611
2612 end = start + frag_iter->len;
2613 if ((copy = end - offset) > 0) {
2614 if (copy > len)
2615 copy = len;
2616 csum2 = skb_copy_and_csum_bits(frag_iter,
2617 offset - start,
2618 to, copy, 0);
2619 csum = csum_block_add(csum, csum2, pos);
2620 if ((len -= copy) == 0)
2621 return csum;
2622 offset += copy;
2623 to += copy;
2624 pos += copy;
2625 }
2626 start = end;
2627 }
2628 BUG_ON(len);
2629 return csum;
2630}
2631EXPORT_SYMBOL(skb_copy_and_csum_bits);
2632
2633static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
2634{
2635 net_warn_ratelimited(
2636 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2637 __func__);
2638 return 0;
2639}
2640
2641static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
2642 int offset, int len)
2643{
2644 net_warn_ratelimited(
2645 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2646 __func__);
2647 return 0;
2648}
2649
2650static const struct skb_checksum_ops default_crc32c_ops = {
2651 .update = warn_crc32c_csum_update,
2652 .combine = warn_crc32c_csum_combine,
2653};
2654
2655const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
2656 &default_crc32c_ops;
2657EXPORT_SYMBOL(crc32c_csum_stub);
2658
2659 /**
2660 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2661 * @from: source buffer
2662 *
2663 * Calculates the amount of linear headroom needed in the 'to' skb passed
2664 * into skb_zerocopy().
2665 */
2666unsigned int
2667skb_zerocopy_headlen(const struct sk_buff *from)
2668{
2669 unsigned int hlen = 0;
2670
2671 if (!from->head_frag ||
2672 skb_headlen(from) < L1_CACHE_BYTES ||
2673 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2674 hlen = skb_headlen(from);
2675
2676 if (skb_has_frag_list(from))
2677 hlen = from->len;
2678
2679 return hlen;
2680}
2681EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2682
2683/**
2684 * skb_zerocopy - Zero copy skb to skb
2685 * @to: destination buffer
2686 * @from: source buffer
2687 * @len: number of bytes to copy from source buffer
2688 * @hlen: size of linear headroom in destination buffer
2689 *
2690 * Copies up to `len` bytes from `from` to `to` by creating references
2691 * to the frags in the source buffer.
2692 *
2693 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2694 * headroom in the `to` buffer.
2695 *
2696 * Return value:
2697 * 0: everything is OK
2698 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
2699 * -EFAULT: skb_copy_bits() found some problem with skb geometry
2700 */
2701int
2702skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2703{
2704 int i, j = 0;
2705 int plen = 0; /* length of skb->head fragment */
2706 int ret;
2707 struct page *page;
2708 unsigned int offset;
2709
2710 BUG_ON(!from->head_frag && !hlen);
2711
2712 /* dont bother with small payloads */
2713 if (len <= skb_tailroom(to))
2714 return skb_copy_bits(from, 0, skb_put(to, len), len);
2715
2716 if (hlen) {
2717 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2718 if (unlikely(ret))
2719 return ret;
2720 len -= hlen;
2721 } else {
2722 plen = min_t(int, skb_headlen(from), len);
2723 if (plen) {
2724 page = virt_to_head_page(from->head);
2725 offset = from->data - (unsigned char *)page_address(page);
2726 __skb_fill_page_desc(to, 0, page, offset, plen);
2727 get_page(page);
2728 j = 1;
2729 len -= plen;
2730 }
2731 }
2732
2733 to->truesize += len + plen;
2734 to->len += len + plen;
2735 to->data_len += len + plen;
2736
2737 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2738 skb_tx_error(from);
2739 return -ENOMEM;
2740 }
2741 skb_zerocopy_clone(to, from, GFP_ATOMIC);
2742
2743 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2744 if (!len)
2745 break;
2746 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2747 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2748 len -= skb_shinfo(to)->frags[j].size;
2749 skb_frag_ref(to, j);
2750 j++;
2751 }
2752 skb_shinfo(to)->nr_frags = j;
2753
2754 return 0;
2755}
2756EXPORT_SYMBOL_GPL(skb_zerocopy);
2757
2758void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2759{
2760 __wsum csum;
2761 long csstart;
2762
2763 if (skb->ip_summed == CHECKSUM_PARTIAL)
2764 csstart = skb_checksum_start_offset(skb);
2765 else
2766 csstart = skb_headlen(skb);
2767
2768 BUG_ON(csstart > skb_headlen(skb));
2769
2770 skb_copy_from_linear_data(skb, to, csstart);
2771
2772 csum = 0;
2773 if (csstart != skb->len)
2774 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2775 skb->len - csstart, 0);
2776
2777 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2778 long csstuff = csstart + skb->csum_offset;
2779
2780 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
2781 }
2782}
2783EXPORT_SYMBOL(skb_copy_and_csum_dev);
2784
2785/**
2786 * skb_dequeue - remove from the head of the queue
2787 * @list: list to dequeue from
2788 *
2789 * Remove the head of the list. The list lock is taken so the function
2790 * may be used safely with other locking list functions. The head item is
2791 * returned or %NULL if the list is empty.
2792 */
2793
2794struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2795{
2796 unsigned long flags;
2797 struct sk_buff *result;
2798
2799 spin_lock_irqsave(&list->lock, flags);
2800 result = __skb_dequeue(list);
2801 spin_unlock_irqrestore(&list->lock, flags);
2802 return result;
2803}
2804EXPORT_SYMBOL(skb_dequeue);
2805
2806/**
2807 * skb_dequeue_tail - remove from the tail of the queue
2808 * @list: list to dequeue from
2809 *
2810 * Remove the tail of the list. The list lock is taken so the function
2811 * may be used safely with other locking list functions. The tail item is
2812 * returned or %NULL if the list is empty.
2813 */
2814struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2815{
2816 unsigned long flags;
2817 struct sk_buff *result;
2818
2819 spin_lock_irqsave(&list->lock, flags);
2820 result = __skb_dequeue_tail(list);
2821 spin_unlock_irqrestore(&list->lock, flags);
2822 return result;
2823}
2824EXPORT_SYMBOL(skb_dequeue_tail);
2825
2826/**
2827 * skb_queue_purge - empty a list
2828 * @list: list to empty
2829 *
2830 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2831 * the list and one reference dropped. This function takes the list
2832 * lock and is atomic with respect to other list locking functions.
2833 */
2834void skb_queue_purge(struct sk_buff_head *list)
2835{
2836 struct sk_buff *skb;
2837 while ((skb = skb_dequeue(list)) != NULL)
2838 kfree_skb(skb);
2839}
2840EXPORT_SYMBOL(skb_queue_purge);
2841
2842/**
2843 * skb_rbtree_purge - empty a skb rbtree
2844 * @root: root of the rbtree to empty
2845 *
2846 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
2847 * the list and one reference dropped. This function does not take
2848 * any lock. Synchronization should be handled by the caller (e.g., TCP
2849 * out-of-order queue is protected by the socket lock).
2850 */
2851void skb_rbtree_purge(struct rb_root *root)
2852{
2853 struct rb_node *p = rb_first(root);
2854
2855 while (p) {
2856 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
2857
2858 p = rb_next(p);
2859 rb_erase(&skb->rbnode, root);
2860 kfree_skb(skb);
2861 }
2862}
2863
2864/**
2865 * skb_queue_head - queue a buffer at the list head
2866 * @list: list to use
2867 * @newsk: buffer to queue
2868 *
2869 * Queue a buffer at the start of the list. This function takes the
2870 * list lock and can be used safely with other locking &sk_buff functions
2871 * safely.
2872 *
2873 * A buffer cannot be placed on two lists at the same time.
2874 */
2875void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2876{
2877 unsigned long flags;
2878
2879 spin_lock_irqsave(&list->lock, flags);
2880 __skb_queue_head(list, newsk);
2881 spin_unlock_irqrestore(&list->lock, flags);
2882}
2883EXPORT_SYMBOL(skb_queue_head);
2884
2885/**
2886 * skb_queue_tail - queue a buffer at the list tail
2887 * @list: list to use
2888 * @newsk: buffer to queue
2889 *
2890 * Queue a buffer at the tail of the list. This function takes the
2891 * list lock and can be used safely with other locking &sk_buff functions
2892 * safely.
2893 *
2894 * A buffer cannot be placed on two lists at the same time.
2895 */
2896void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2897{
2898 unsigned long flags;
2899
2900 spin_lock_irqsave(&list->lock, flags);
2901 __skb_queue_tail(list, newsk);
2902 spin_unlock_irqrestore(&list->lock, flags);
2903}
2904EXPORT_SYMBOL(skb_queue_tail);
2905
2906/**
2907 * skb_unlink - remove a buffer from a list
2908 * @skb: buffer to remove
2909 * @list: list to use
2910 *
2911 * Remove a packet from a list. The list locks are taken and this
2912 * function is atomic with respect to other list locked calls
2913 *
2914 * You must know what list the SKB is on.
2915 */
2916void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2917{
2918 unsigned long flags;
2919
2920 spin_lock_irqsave(&list->lock, flags);
2921 __skb_unlink(skb, list);
2922 spin_unlock_irqrestore(&list->lock, flags);
2923}
2924EXPORT_SYMBOL(skb_unlink);
2925
2926/**
2927 * skb_append - append a buffer
2928 * @old: buffer to insert after
2929 * @newsk: buffer to insert
2930 * @list: list to use
2931 *
2932 * Place a packet after a given packet in a list. The list locks are taken
2933 * and this function is atomic with respect to other list locked calls.
2934 * A buffer cannot be placed on two lists at the same time.
2935 */
2936void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2937{
2938 unsigned long flags;
2939
2940 spin_lock_irqsave(&list->lock, flags);
2941 __skb_queue_after(list, old, newsk);
2942 spin_unlock_irqrestore(&list->lock, flags);
2943}
2944EXPORT_SYMBOL(skb_append);
2945
2946/**
2947 * skb_insert - insert a buffer
2948 * @old: buffer to insert before
2949 * @newsk: buffer to insert
2950 * @list: list to use
2951 *
2952 * Place a packet before a given packet in a list. The list locks are
2953 * taken and this function is atomic with respect to other list locked
2954 * calls.
2955 *
2956 * A buffer cannot be placed on two lists at the same time.
2957 */
2958void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2959{
2960 unsigned long flags;
2961
2962 spin_lock_irqsave(&list->lock, flags);
2963 __skb_insert(newsk, old->prev, old, list);
2964 spin_unlock_irqrestore(&list->lock, flags);
2965}
2966EXPORT_SYMBOL(skb_insert);
2967
2968static inline void skb_split_inside_header(struct sk_buff *skb,
2969 struct sk_buff* skb1,
2970 const u32 len, const int pos)
2971{
2972 int i;
2973
2974 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2975 pos - len);
2976 /* And move data appendix as is. */
2977 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2978 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2979
2980 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2981 skb_shinfo(skb)->nr_frags = 0;
2982 skb1->data_len = skb->data_len;
2983 skb1->len += skb1->data_len;
2984 skb->data_len = 0;
2985 skb->len = len;
2986 skb_set_tail_pointer(skb, len);
2987}
2988
2989static inline void skb_split_no_header(struct sk_buff *skb,
2990 struct sk_buff* skb1,
2991 const u32 len, int pos)
2992{
2993 int i, k = 0;
2994 const int nfrags = skb_shinfo(skb)->nr_frags;
2995
2996 skb_shinfo(skb)->nr_frags = 0;
2997 skb1->len = skb1->data_len = skb->len - len;
2998 skb->len = len;
2999 skb->data_len = len - pos;
3000
3001 for (i = 0; i < nfrags; i++) {
3002 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3003
3004 if (pos + size > len) {
3005 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3006
3007 if (pos < len) {
3008 /* Split frag.
3009 * We have two variants in this case:
3010 * 1. Move all the frag to the second
3011 * part, if it is possible. F.e.
3012 * this approach is mandatory for TUX,
3013 * where splitting is expensive.
3014 * 2. Split is accurately. We make this.
3015 */
3016 skb_frag_ref(skb, i);
3017 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
3018 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3019 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3020 skb_shinfo(skb)->nr_frags++;
3021 }
3022 k++;
3023 } else
3024 skb_shinfo(skb)->nr_frags++;
3025 pos += size;
3026 }
3027 skb_shinfo(skb1)->nr_frags = k;
3028}
3029
3030/**
3031 * skb_split - Split fragmented skb to two parts at length len.
3032 * @skb: the buffer to split
3033 * @skb1: the buffer to receive the second part
3034 * @len: new length for skb
3035 */
3036void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3037{
3038 int pos = skb_headlen(skb);
3039
3040 skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
3041 SKBTX_SHARED_FRAG;
3042 skb_zerocopy_clone(skb1, skb, 0);
3043 if (len < pos) /* Split line is inside header. */
3044 skb_split_inside_header(skb, skb1, len, pos);
3045 else /* Second chunk has no header, nothing to copy. */
3046 skb_split_no_header(skb, skb1, len, pos);
3047}
3048EXPORT_SYMBOL(skb_split);
3049
3050/* Shifting from/to a cloned skb is a no-go.
3051 *
3052 * Caller cannot keep skb_shinfo related pointers past calling here!
3053 */
3054static int skb_prepare_for_shift(struct sk_buff *skb)
3055{
3056 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3057}
3058
3059/**
3060 * skb_shift - Shifts paged data partially from skb to another
3061 * @tgt: buffer into which tail data gets added
3062 * @skb: buffer from which the paged data comes from
3063 * @shiftlen: shift up to this many bytes
3064 *
3065 * Attempts to shift up to shiftlen worth of bytes, which may be less than
3066 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3067 * It's up to caller to free skb if everything was shifted.
3068 *
3069 * If @tgt runs out of frags, the whole operation is aborted.
3070 *
3071 * Skb cannot include anything else but paged data while tgt is allowed
3072 * to have non-paged data as well.
3073 *
3074 * TODO: full sized shift could be optimized but that would need
3075 * specialized skb free'er to handle frags without up-to-date nr_frags.
3076 */
3077int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3078{
3079 int from, to, merge, todo;
3080 struct skb_frag_struct *fragfrom, *fragto;
3081
3082 BUG_ON(shiftlen > skb->len);
3083
3084 if (skb_headlen(skb))
3085 return 0;
3086 if (skb_zcopy(tgt) || skb_zcopy(skb))
3087 return 0;
3088
3089 todo = shiftlen;
3090 from = 0;
3091 to = skb_shinfo(tgt)->nr_frags;
3092 fragfrom = &skb_shinfo(skb)->frags[from];
3093
3094 /* Actual merge is delayed until the point when we know we can
3095 * commit all, so that we don't have to undo partial changes
3096 */
3097 if (!to ||
3098 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3099 fragfrom->page_offset)) {
3100 merge = -1;
3101 } else {
3102 merge = to - 1;
3103
3104 todo -= skb_frag_size(fragfrom);
3105 if (todo < 0) {
3106 if (skb_prepare_for_shift(skb) ||
3107 skb_prepare_for_shift(tgt))
3108 return 0;
3109
3110 /* All previous frag pointers might be stale! */
3111 fragfrom = &skb_shinfo(skb)->frags[from];
3112 fragto = &skb_shinfo(tgt)->frags[merge];
3113
3114 skb_frag_size_add(fragto, shiftlen);
3115 skb_frag_size_sub(fragfrom, shiftlen);
3116 fragfrom->page_offset += shiftlen;
3117
3118 goto onlymerged;
3119 }
3120
3121 from++;
3122 }
3123
3124 /* Skip full, not-fitting skb to avoid expensive operations */
3125 if ((shiftlen == skb->len) &&
3126 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3127 return 0;
3128
3129 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3130 return 0;
3131
3132 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3133 if (to == MAX_SKB_FRAGS)
3134 return 0;
3135
3136 fragfrom = &skb_shinfo(skb)->frags[from];
3137 fragto = &skb_shinfo(tgt)->frags[to];
3138
3139 if (todo >= skb_frag_size(fragfrom)) {
3140 *fragto = *fragfrom;
3141 todo -= skb_frag_size(fragfrom);
3142 from++;
3143 to++;
3144
3145 } else {
3146 __skb_frag_ref(fragfrom);
3147 fragto->page = fragfrom->page;
3148 fragto->page_offset = fragfrom->page_offset;
3149 skb_frag_size_set(fragto, todo);
3150
3151 fragfrom->page_offset += todo;
3152 skb_frag_size_sub(fragfrom, todo);
3153 todo = 0;
3154
3155 to++;
3156 break;
3157 }
3158 }
3159
3160 /* Ready to "commit" this state change to tgt */
3161 skb_shinfo(tgt)->nr_frags = to;
3162
3163 if (merge >= 0) {
3164 fragfrom = &skb_shinfo(skb)->frags[0];
3165 fragto = &skb_shinfo(tgt)->frags[merge];
3166
3167 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
3168 __skb_frag_unref(fragfrom);
3169 }
3170
3171 /* Reposition in the original skb */
3172 to = 0;
3173 while (from < skb_shinfo(skb)->nr_frags)
3174 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3175 skb_shinfo(skb)->nr_frags = to;
3176
3177 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3178
3179onlymerged:
3180 /* Most likely the tgt won't ever need its checksum anymore, skb on
3181 * the other hand might need it if it needs to be resent
3182 */
3183 tgt->ip_summed = CHECKSUM_PARTIAL;
3184 skb->ip_summed = CHECKSUM_PARTIAL;
3185
3186 /* Yak, is it really working this way? Some helper please? */
3187 skb->len -= shiftlen;
3188 skb->data_len -= shiftlen;
3189 skb->truesize -= shiftlen;
3190 tgt->len += shiftlen;
3191 tgt->data_len += shiftlen;
3192 tgt->truesize += shiftlen;
3193
3194 return shiftlen;
3195}
3196
3197/**
3198 * skb_prepare_seq_read - Prepare a sequential read of skb data
3199 * @skb: the buffer to read
3200 * @from: lower offset of data to be read
3201 * @to: upper offset of data to be read
3202 * @st: state variable
3203 *
3204 * Initializes the specified state variable. Must be called before
3205 * invoking skb_seq_read() for the first time.
3206 */
3207void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3208 unsigned int to, struct skb_seq_state *st)
3209{
3210 st->lower_offset = from;
3211 st->upper_offset = to;
3212 st->root_skb = st->cur_skb = skb;
3213 st->frag_idx = st->stepped_offset = 0;
3214 st->frag_data = NULL;
3215}
3216EXPORT_SYMBOL(skb_prepare_seq_read);
3217
3218/**
3219 * skb_seq_read - Sequentially read skb data
3220 * @consumed: number of bytes consumed by the caller so far
3221 * @data: destination pointer for data to be returned
3222 * @st: state variable
3223 *
3224 * Reads a block of skb data at @consumed relative to the
3225 * lower offset specified to skb_prepare_seq_read(). Assigns
3226 * the head of the data block to @data and returns the length
3227 * of the block or 0 if the end of the skb data or the upper
3228 * offset has been reached.
3229 *
3230 * The caller is not required to consume all of the data
3231 * returned, i.e. @consumed is typically set to the number
3232 * of bytes already consumed and the next call to
3233 * skb_seq_read() will return the remaining part of the block.
3234 *
3235 * Note 1: The size of each block of data returned can be arbitrary,
3236 * this limitation is the cost for zerocopy sequential
3237 * reads of potentially non linear data.
3238 *
3239 * Note 2: Fragment lists within fragments are not implemented
3240 * at the moment, state->root_skb could be replaced with
3241 * a stack for this purpose.
3242 */
3243unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3244 struct skb_seq_state *st)
3245{
3246 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3247 skb_frag_t *frag;
3248
3249 if (unlikely(abs_offset >= st->upper_offset)) {
3250 if (st->frag_data) {
3251 kunmap_atomic(st->frag_data);
3252 st->frag_data = NULL;
3253 }
3254 return 0;
3255 }
3256
3257next_skb:
3258 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3259
3260 if (abs_offset < block_limit && !st->frag_data) {
3261 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3262 return block_limit - abs_offset;
3263 }
3264
3265 if (st->frag_idx == 0 && !st->frag_data)
3266 st->stepped_offset += skb_headlen(st->cur_skb);
3267
3268 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3269 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
3270 block_limit = skb_frag_size(frag) + st->stepped_offset;
3271
3272 if (abs_offset < block_limit) {
3273 if (!st->frag_data)
3274 st->frag_data = kmap_atomic(skb_frag_page(frag));
3275
3276 *data = (u8 *) st->frag_data + frag->page_offset +
3277 (abs_offset - st->stepped_offset);
3278
3279 return block_limit - abs_offset;
3280 }
3281
3282 if (st->frag_data) {
3283 kunmap_atomic(st->frag_data);
3284 st->frag_data = NULL;
3285 }
3286
3287 st->frag_idx++;
3288 st->stepped_offset += skb_frag_size(frag);
3289 }
3290
3291 if (st->frag_data) {
3292 kunmap_atomic(st->frag_data);
3293 st->frag_data = NULL;
3294 }
3295
3296 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3297 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
3298 st->frag_idx = 0;
3299 goto next_skb;
3300 } else if (st->cur_skb->next) {
3301 st->cur_skb = st->cur_skb->next;
3302 st->frag_idx = 0;
3303 goto next_skb;
3304 }
3305
3306 return 0;
3307}
3308EXPORT_SYMBOL(skb_seq_read);
3309
3310/**
3311 * skb_abort_seq_read - Abort a sequential read of skb data
3312 * @st: state variable
3313 *
3314 * Must be called if skb_seq_read() was not called until it
3315 * returned 0.
3316 */
3317void skb_abort_seq_read(struct skb_seq_state *st)
3318{
3319 if (st->frag_data)
3320 kunmap_atomic(st->frag_data);
3321}
3322EXPORT_SYMBOL(skb_abort_seq_read);
3323
3324#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
3325
3326static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
3327 struct ts_config *conf,
3328 struct ts_state *state)
3329{
3330 return skb_seq_read(offset, text, TS_SKB_CB(state));
3331}
3332
3333static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
3334{
3335 skb_abort_seq_read(TS_SKB_CB(state));
3336}
3337
3338/**
3339 * skb_find_text - Find a text pattern in skb data
3340 * @skb: the buffer to look in
3341 * @from: search offset
3342 * @to: search limit
3343 * @config: textsearch configuration
3344 *
3345 * Finds a pattern in the skb data according to the specified
3346 * textsearch configuration. Use textsearch_next() to retrieve
3347 * subsequent occurrences of the pattern. Returns the offset
3348 * to the first occurrence or UINT_MAX if no match was found.
3349 */
3350unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3351 unsigned int to, struct ts_config *config)
3352{
3353 struct ts_state state;
3354 unsigned int ret;
3355
3356 config->get_next_block = skb_ts_get_next_block;
3357 config->finish = skb_ts_finish;
3358
3359 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
3360
3361 ret = textsearch_find(config, &state);
3362 return (ret <= to - from ? ret : UINT_MAX);
3363}
3364EXPORT_SYMBOL(skb_find_text);
3365
3366/**
3367 * skb_append_datato_frags - append the user data to a skb
3368 * @sk: sock structure
3369 * @skb: skb structure to be appended with user data.
3370 * @getfrag: call back function to be used for getting the user data
3371 * @from: pointer to user message iov
3372 * @length: length of the iov message
3373 *
3374 * Description: This procedure append the user data in the fragment part
3375 * of the skb if any page alloc fails user this procedure returns -ENOMEM
3376 */
3377int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
3378 int (*getfrag)(void *from, char *to, int offset,
3379 int len, int odd, struct sk_buff *skb),
3380 void *from, int length)
3381{
3382 int frg_cnt = skb_shinfo(skb)->nr_frags;
3383 int copy;
3384 int offset = 0;
3385 int ret;
3386 struct page_frag *pfrag = ¤t->task_frag;
3387
3388 do {
3389 /* Return error if we don't have space for new frag */
3390 if (frg_cnt >= MAX_SKB_FRAGS)
3391 return -EMSGSIZE;
3392
3393 if (!sk_page_frag_refill(sk, pfrag))
3394 return -ENOMEM;
3395
3396 /* copy the user data to page */
3397 copy = min_t(int, length, pfrag->size - pfrag->offset);
3398
3399 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
3400 offset, copy, 0, skb);
3401 if (ret < 0)
3402 return -EFAULT;
3403
3404 /* copy was successful so update the size parameters */
3405 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
3406 copy);
3407 frg_cnt++;
3408 pfrag->offset += copy;
3409 get_page(pfrag->page);
3410
3411 skb->truesize += copy;
3412 refcount_add(copy, &sk->sk_wmem_alloc);
3413 skb->len += copy;
3414 skb->data_len += copy;
3415 offset += copy;
3416 length -= copy;
3417
3418 } while (length > 0);
3419
3420 return 0;
3421}
3422EXPORT_SYMBOL(skb_append_datato_frags);
3423
3424int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3425 int offset, size_t size)
3426{
3427 int i = skb_shinfo(skb)->nr_frags;
3428
3429 if (skb_can_coalesce(skb, i, page, offset)) {
3430 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3431 } else if (i < MAX_SKB_FRAGS) {
3432 get_page(page);
3433 skb_fill_page_desc(skb, i, page, offset, size);
3434 } else {
3435 return -EMSGSIZE;
3436 }
3437
3438 return 0;
3439}
3440EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3441
3442/**
3443 * skb_pull_rcsum - pull skb and update receive checksum
3444 * @skb: buffer to update
3445 * @len: length of data pulled
3446 *
3447 * This function performs an skb_pull on the packet and updates
3448 * the CHECKSUM_COMPLETE checksum. It should be used on
3449 * receive path processing instead of skb_pull unless you know
3450 * that the checksum difference is zero (e.g., a valid IP header)
3451 * or you are setting ip_summed to CHECKSUM_NONE.
3452 */
3453void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3454{
3455 unsigned char *data = skb->data;
3456
3457 BUG_ON(len > skb->len);
3458 __skb_pull(skb, len);
3459 skb_postpull_rcsum(skb, data, len);
3460 return skb->data;
3461}
3462EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3463
3464static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
3465{
3466 skb_frag_t head_frag;
3467 struct page *page;
3468
3469 page = virt_to_head_page(frag_skb->head);
3470 head_frag.page.p = page;
3471 head_frag.page_offset = frag_skb->data -
3472 (unsigned char *)page_address(page);
3473 head_frag.size = skb_headlen(frag_skb);
3474 return head_frag;
3475}
3476
3477/**
3478 * skb_segment - Perform protocol segmentation on skb.
3479 * @head_skb: buffer to segment
3480 * @features: features for the output path (see dev->features)
3481 *
3482 * This function performs segmentation on the given skb. It returns
3483 * a pointer to the first in a list of new skbs for the segments.
3484 * In case of error it returns ERR_PTR(err).
3485 */
3486struct sk_buff *skb_segment(struct sk_buff *head_skb,
3487 netdev_features_t features)
3488{
3489 struct sk_buff *segs = NULL;
3490 struct sk_buff *tail = NULL;
3491 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3492 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3493 unsigned int mss = skb_shinfo(head_skb)->gso_size;
3494 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
3495 struct sk_buff *frag_skb = head_skb;
3496 unsigned int offset = doffset;
3497 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3498 unsigned int partial_segs = 0;
3499 unsigned int headroom;
3500 unsigned int len = head_skb->len;
3501 __be16 proto;
3502 bool csum, sg;
3503 int nfrags = skb_shinfo(head_skb)->nr_frags;
3504 int err = -ENOMEM;
3505 int i = 0;
3506 int pos;
3507 int dummy;
3508
3509 __skb_push(head_skb, doffset);
3510 proto = skb_network_protocol(head_skb, &dummy);
3511 if (unlikely(!proto))
3512 return ERR_PTR(-EINVAL);
3513
3514 sg = !!(features & NETIF_F_SG);
3515 csum = !!can_checksum_protocol(features, proto);
3516
3517 if (sg && csum && (mss != GSO_BY_FRAGS)) {
3518 if (!(features & NETIF_F_GSO_PARTIAL)) {
3519 struct sk_buff *iter;
3520 unsigned int frag_len;
3521
3522 if (!list_skb ||
3523 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3524 goto normal;
3525
3526 /* If we get here then all the required
3527 * GSO features except frag_list are supported.
3528 * Try to split the SKB to multiple GSO SKBs
3529 * with no frag_list.
3530 * Currently we can do that only when the buffers don't
3531 * have a linear part and all the buffers except
3532 * the last are of the same length.
3533 */
3534 frag_len = list_skb->len;
3535 skb_walk_frags(head_skb, iter) {
3536 if (frag_len != iter->len && iter->next)
3537 goto normal;
3538 if (skb_headlen(iter) && !iter->head_frag)
3539 goto normal;
3540
3541 len -= iter->len;
3542 }
3543
3544 if (len != frag_len)
3545 goto normal;
3546 }
3547
3548 /* GSO partial only requires that we trim off any excess that
3549 * doesn't fit into an MSS sized block, so take care of that
3550 * now.
3551 */
3552 partial_segs = len / mss;
3553 if (partial_segs > 1)
3554 mss *= partial_segs;
3555 else
3556 partial_segs = 0;
3557 }
3558
3559normal:
3560 headroom = skb_headroom(head_skb);
3561 pos = skb_headlen(head_skb);
3562
3563 do {
3564 struct sk_buff *nskb;
3565 skb_frag_t *nskb_frag;
3566 int hsize;
3567 int size;
3568
3569 if (unlikely(mss == GSO_BY_FRAGS)) {
3570 len = list_skb->len;
3571 } else {
3572 len = head_skb->len - offset;
3573 if (len > mss)
3574 len = mss;
3575 }
3576
3577 hsize = skb_headlen(head_skb) - offset;
3578 if (hsize < 0)
3579 hsize = 0;
3580 if (hsize > len || !sg)
3581 hsize = len;
3582
3583 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3584 (skb_headlen(list_skb) == len || sg)) {
3585 BUG_ON(skb_headlen(list_skb) > len);
3586
3587 i = 0;
3588 nfrags = skb_shinfo(list_skb)->nr_frags;
3589 frag = skb_shinfo(list_skb)->frags;
3590 frag_skb = list_skb;
3591 pos += skb_headlen(list_skb);
3592
3593 while (pos < offset + len) {
3594 BUG_ON(i >= nfrags);
3595
3596 size = skb_frag_size(frag);
3597 if (pos + size > offset + len)
3598 break;
3599
3600 i++;
3601 pos += size;
3602 frag++;
3603 }
3604
3605 nskb = skb_clone(list_skb, GFP_ATOMIC);
3606 list_skb = list_skb->next;
3607
3608 if (unlikely(!nskb))
3609 goto err;
3610
3611 if (unlikely(pskb_trim(nskb, len))) {
3612 kfree_skb(nskb);
3613 goto err;
3614 }
3615
3616 hsize = skb_end_offset(nskb);
3617 if (skb_cow_head(nskb, doffset + headroom)) {
3618 kfree_skb(nskb);
3619 goto err;
3620 }
3621
3622 nskb->truesize += skb_end_offset(nskb) - hsize;
3623 skb_release_head_state(nskb);
3624 __skb_push(nskb, doffset);
3625 } else {
3626 nskb = __alloc_skb(hsize + doffset + headroom,
3627 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3628 NUMA_NO_NODE);
3629
3630 if (unlikely(!nskb))
3631 goto err;
3632
3633 skb_reserve(nskb, headroom);
3634 __skb_put(nskb, doffset);
3635 }
3636
3637 if (segs)
3638 tail->next = nskb;
3639 else
3640 segs = nskb;
3641 tail = nskb;
3642
3643 __copy_skb_header(nskb, head_skb);
3644
3645 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3646 skb_reset_mac_len(nskb);
3647
3648 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
3649 nskb->data - tnl_hlen,
3650 doffset + tnl_hlen);
3651
3652 if (nskb->len == len + doffset)
3653 goto perform_csum_check;
3654
3655 if (!sg) {
3656 if (!nskb->remcsum_offload)
3657 nskb->ip_summed = CHECKSUM_NONE;
3658 SKB_GSO_CB(nskb)->csum =
3659 skb_copy_and_csum_bits(head_skb, offset,
3660 skb_put(nskb, len),
3661 len, 0);
3662 SKB_GSO_CB(nskb)->csum_start =
3663 skb_headroom(nskb) + doffset;
3664 continue;
3665 }
3666
3667 nskb_frag = skb_shinfo(nskb)->frags;
3668
3669 skb_copy_from_linear_data_offset(head_skb, offset,
3670 skb_put(nskb, hsize), hsize);
3671
3672 skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3673 SKBTX_SHARED_FRAG;
3674
3675 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3676 skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
3677 goto err;
3678
3679 while (pos < offset + len) {
3680 if (i >= nfrags) {
3681 i = 0;
3682 nfrags = skb_shinfo(list_skb)->nr_frags;
3683 frag = skb_shinfo(list_skb)->frags;
3684 frag_skb = list_skb;
3685 if (!skb_headlen(list_skb)) {
3686 BUG_ON(!nfrags);
3687 } else {
3688 BUG_ON(!list_skb->head_frag);
3689
3690 /* to make room for head_frag. */
3691 i--;
3692 frag--;
3693 }
3694 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3695 skb_zerocopy_clone(nskb, frag_skb,
3696 GFP_ATOMIC))
3697 goto err;
3698
3699 list_skb = list_skb->next;
3700 }
3701
3702 if (unlikely(skb_shinfo(nskb)->nr_frags >=
3703 MAX_SKB_FRAGS)) {
3704 net_warn_ratelimited(
3705 "skb_segment: too many frags: %u %u\n",
3706 pos, mss);
3707 goto err;
3708 }
3709
3710 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
3711 __skb_frag_ref(nskb_frag);
3712 size = skb_frag_size(nskb_frag);
3713
3714 if (pos < offset) {
3715 nskb_frag->page_offset += offset - pos;
3716 skb_frag_size_sub(nskb_frag, offset - pos);
3717 }
3718
3719 skb_shinfo(nskb)->nr_frags++;
3720
3721 if (pos + size <= offset + len) {
3722 i++;
3723 frag++;
3724 pos += size;
3725 } else {
3726 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3727 goto skip_fraglist;
3728 }
3729
3730 nskb_frag++;
3731 }
3732
3733skip_fraglist:
3734 nskb->data_len = len - hsize;
3735 nskb->len += nskb->data_len;
3736 nskb->truesize += nskb->data_len;
3737
3738perform_csum_check:
3739 if (!csum) {
3740 if (skb_has_shared_frag(nskb)) {
3741 err = __skb_linearize(nskb);
3742 if (err)
3743 goto err;
3744 }
3745 if (!nskb->remcsum_offload)
3746 nskb->ip_summed = CHECKSUM_NONE;
3747 SKB_GSO_CB(nskb)->csum =
3748 skb_checksum(nskb, doffset,
3749 nskb->len - doffset, 0);
3750 SKB_GSO_CB(nskb)->csum_start =
3751 skb_headroom(nskb) + doffset;
3752 }
3753 } while ((offset += len) < head_skb->len);
3754
3755 /* Some callers want to get the end of the list.
3756 * Put it in segs->prev to avoid walking the list.
3757 * (see validate_xmit_skb_list() for example)
3758 */
3759 segs->prev = tail;
3760
3761 if (partial_segs) {
3762 struct sk_buff *iter;
3763 int type = skb_shinfo(head_skb)->gso_type;
3764 unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
3765
3766 /* Update type to add partial and then remove dodgy if set */
3767 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
3768 type &= ~SKB_GSO_DODGY;
3769
3770 /* Update GSO info and prepare to start updating headers on
3771 * our way back down the stack of protocols.
3772 */
3773 for (iter = segs; iter; iter = iter->next) {
3774 skb_shinfo(iter)->gso_size = gso_size;
3775 skb_shinfo(iter)->gso_segs = partial_segs;
3776 skb_shinfo(iter)->gso_type = type;
3777 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
3778 }
3779
3780 if (tail->len - doffset <= gso_size)
3781 skb_shinfo(tail)->gso_size = 0;
3782 else if (tail != segs)
3783 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
3784 }
3785
3786 /* Following permits correct backpressure, for protocols
3787 * using skb_set_owner_w().
3788 * Idea is to tranfert ownership from head_skb to last segment.
3789 */
3790 if (head_skb->destructor == sock_wfree) {
3791 swap(tail->truesize, head_skb->truesize);
3792 swap(tail->destructor, head_skb->destructor);
3793 swap(tail->sk, head_skb->sk);
3794 }
3795 return segs;
3796
3797err:
3798 kfree_skb_list(segs);
3799 return ERR_PTR(err);
3800}
3801EXPORT_SYMBOL_GPL(skb_segment);
3802
3803int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3804{
3805 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3806 unsigned int offset = skb_gro_offset(skb);
3807 unsigned int headlen = skb_headlen(skb);
3808 unsigned int len = skb_gro_len(skb);
3809 struct sk_buff *lp, *p = *head;
3810 unsigned int delta_truesize;
3811
3812 if (unlikely(p->len + len >= 65536))
3813 return -E2BIG;
3814
3815 lp = NAPI_GRO_CB(p)->last;
3816 pinfo = skb_shinfo(lp);
3817
3818 if (headlen <= offset) {
3819 skb_frag_t *frag;
3820 skb_frag_t *frag2;
3821 int i = skbinfo->nr_frags;
3822 int nr_frags = pinfo->nr_frags + i;
3823
3824 if (nr_frags > MAX_SKB_FRAGS)
3825 goto merge;
3826
3827 offset -= headlen;
3828 pinfo->nr_frags = nr_frags;
3829 skbinfo->nr_frags = 0;
3830
3831 frag = pinfo->frags + nr_frags;
3832 frag2 = skbinfo->frags + i;
3833 do {
3834 *--frag = *--frag2;
3835 } while (--i);
3836
3837 frag->page_offset += offset;
3838 skb_frag_size_sub(frag, offset);
3839
3840 /* all fragments truesize : remove (head size + sk_buff) */
3841 delta_truesize = skb->truesize -
3842 SKB_TRUESIZE(skb_end_offset(skb));
3843
3844 skb->truesize -= skb->data_len;
3845 skb->len -= skb->data_len;
3846 skb->data_len = 0;
3847
3848 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
3849 goto done;
3850 } else if (skb->head_frag) {
3851 int nr_frags = pinfo->nr_frags;
3852 skb_frag_t *frag = pinfo->frags + nr_frags;
3853 struct page *page = virt_to_head_page(skb->head);
3854 unsigned int first_size = headlen - offset;
3855 unsigned int first_offset;
3856
3857 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
3858 goto merge;
3859
3860 first_offset = skb->data -
3861 (unsigned char *)page_address(page) +
3862 offset;
3863
3864 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3865
3866 frag->page.p = page;
3867 frag->page_offset = first_offset;
3868 skb_frag_size_set(frag, first_size);
3869
3870 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3871 /* We dont need to clear skbinfo->nr_frags here */
3872
3873 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3874 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3875 goto done;
3876 }
3877
3878merge:
3879 delta_truesize = skb->truesize;
3880 if (offset > headlen) {
3881 unsigned int eat = offset - headlen;
3882
3883 skbinfo->frags[0].page_offset += eat;
3884 skb_frag_size_sub(&skbinfo->frags[0], eat);
3885 skb->data_len -= eat;
3886 skb->len -= eat;
3887 offset = headlen;
3888 }
3889
3890 __skb_pull(skb, offset);
3891
3892 if (NAPI_GRO_CB(p)->last == p)
3893 skb_shinfo(p)->frag_list = skb;
3894 else
3895 NAPI_GRO_CB(p)->last->next = skb;
3896 NAPI_GRO_CB(p)->last = skb;
3897 __skb_header_release(skb);
3898 lp = p;
3899
3900done:
3901 NAPI_GRO_CB(p)->count++;
3902 p->data_len += len;
3903 p->truesize += delta_truesize;
3904 p->len += len;
3905 if (lp != p) {
3906 lp->data_len += len;
3907 lp->truesize += delta_truesize;
3908 lp->len += len;
3909 }
3910 NAPI_GRO_CB(skb)->same_flow = 1;
3911 return 0;
3912}
3913EXPORT_SYMBOL_GPL(skb_gro_receive);
3914
3915void __init skb_init(void)
3916{
3917 skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
3918 sizeof(struct sk_buff),
3919 0,
3920 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3921 offsetof(struct sk_buff, cb),
3922 sizeof_field(struct sk_buff, cb),
3923 NULL);
3924 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3925 sizeof(struct sk_buff_fclones),
3926 0,
3927 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3928 NULL);
3929}
3930
3931static int
3932__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
3933 unsigned int recursion_level)
3934{
3935 int start = skb_headlen(skb);
3936 int i, copy = start - offset;
3937 struct sk_buff *frag_iter;
3938 int elt = 0;
3939
3940 if (unlikely(recursion_level >= 24))
3941 return -EMSGSIZE;
3942
3943 if (copy > 0) {
3944 if (copy > len)
3945 copy = len;
3946 sg_set_buf(sg, skb->data + offset, copy);
3947 elt++;
3948 if ((len -= copy) == 0)
3949 return elt;
3950 offset += copy;
3951 }
3952
3953 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3954 int end;
3955
3956 WARN_ON(start > offset + len);
3957
3958 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3959 if ((copy = end - offset) > 0) {
3960 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3961 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3962 return -EMSGSIZE;
3963
3964 if (copy > len)
3965 copy = len;
3966 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3967 frag->page_offset+offset-start);
3968 elt++;
3969 if (!(len -= copy))
3970 return elt;
3971 offset += copy;
3972 }
3973 start = end;
3974 }
3975
3976 skb_walk_frags(skb, frag_iter) {
3977 int end, ret;
3978
3979 WARN_ON(start > offset + len);
3980
3981 end = start + frag_iter->len;
3982 if ((copy = end - offset) > 0) {
3983 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3984 return -EMSGSIZE;
3985
3986 if (copy > len)
3987 copy = len;
3988 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3989 copy, recursion_level + 1);
3990 if (unlikely(ret < 0))
3991 return ret;
3992 elt += ret;
3993 if ((len -= copy) == 0)
3994 return elt;
3995 offset += copy;
3996 }
3997 start = end;
3998 }
3999 BUG_ON(len);
4000 return elt;
4001}
4002
4003/**
4004 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4005 * @skb: Socket buffer containing the buffers to be mapped
4006 * @sg: The scatter-gather list to map into
4007 * @offset: The offset into the buffer's contents to start mapping
4008 * @len: Length of buffer space to be mapped
4009 *
4010 * Fill the specified scatter-gather list with mappings/pointers into a
4011 * region of the buffer space attached to a socket buffer. Returns either
4012 * the number of scatterlist items used, or -EMSGSIZE if the contents
4013 * could not fit.
4014 */
4015int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4016{
4017 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4018
4019 if (nsg <= 0)
4020 return nsg;
4021
4022 sg_mark_end(&sg[nsg - 1]);
4023
4024 return nsg;
4025}
4026EXPORT_SYMBOL_GPL(skb_to_sgvec);
4027
4028/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4029 * sglist without mark the sg which contain last skb data as the end.
4030 * So the caller can mannipulate sg list as will when padding new data after
4031 * the first call without calling sg_unmark_end to expend sg list.
4032 *
4033 * Scenario to use skb_to_sgvec_nomark:
4034 * 1. sg_init_table
4035 * 2. skb_to_sgvec_nomark(payload1)
4036 * 3. skb_to_sgvec_nomark(payload2)
4037 *
4038 * This is equivalent to:
4039 * 1. sg_init_table
4040 * 2. skb_to_sgvec(payload1)
4041 * 3. sg_unmark_end
4042 * 4. skb_to_sgvec(payload2)
4043 *
4044 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4045 * is more preferable.
4046 */
4047int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4048 int offset, int len)
4049{
4050 return __skb_to_sgvec(skb, sg, offset, len, 0);
4051}
4052EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
4053
4054
4055
4056/**
4057 * skb_cow_data - Check that a socket buffer's data buffers are writable
4058 * @skb: The socket buffer to check.
4059 * @tailbits: Amount of trailing space to be added
4060 * @trailer: Returned pointer to the skb where the @tailbits space begins
4061 *
4062 * Make sure that the data buffers attached to a socket buffer are
4063 * writable. If they are not, private copies are made of the data buffers
4064 * and the socket buffer is set to use these instead.
4065 *
4066 * If @tailbits is given, make sure that there is space to write @tailbits
4067 * bytes of data beyond current end of socket buffer. @trailer will be
4068 * set to point to the skb in which this space begins.
4069 *
4070 * The number of scatterlist elements required to completely map the
4071 * COW'd and extended socket buffer will be returned.
4072 */
4073int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4074{
4075 int copyflag;
4076 int elt;
4077 struct sk_buff *skb1, **skb_p;
4078
4079 /* If skb is cloned or its head is paged, reallocate
4080 * head pulling out all the pages (pages are considered not writable
4081 * at the moment even if they are anonymous).
4082 */
4083 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4084 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
4085 return -ENOMEM;
4086
4087 /* Easy case. Most of packets will go this way. */
4088 if (!skb_has_frag_list(skb)) {
4089 /* A little of trouble, not enough of space for trailer.
4090 * This should not happen, when stack is tuned to generate
4091 * good frames. OK, on miss we reallocate and reserve even more
4092 * space, 128 bytes is fair. */
4093
4094 if (skb_tailroom(skb) < tailbits &&
4095 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4096 return -ENOMEM;
4097
4098 /* Voila! */
4099 *trailer = skb;
4100 return 1;
4101 }
4102
4103 /* Misery. We are in troubles, going to mincer fragments... */
4104
4105 elt = 1;
4106 skb_p = &skb_shinfo(skb)->frag_list;
4107 copyflag = 0;
4108
4109 while ((skb1 = *skb_p) != NULL) {
4110 int ntail = 0;
4111
4112 /* The fragment is partially pulled by someone,
4113 * this can happen on input. Copy it and everything
4114 * after it. */
4115
4116 if (skb_shared(skb1))
4117 copyflag = 1;
4118
4119 /* If the skb is the last, worry about trailer. */
4120
4121 if (skb1->next == NULL && tailbits) {
4122 if (skb_shinfo(skb1)->nr_frags ||
4123 skb_has_frag_list(skb1) ||
4124 skb_tailroom(skb1) < tailbits)
4125 ntail = tailbits + 128;
4126 }
4127
4128 if (copyflag ||
4129 skb_cloned(skb1) ||
4130 ntail ||
4131 skb_shinfo(skb1)->nr_frags ||
4132 skb_has_frag_list(skb1)) {
4133 struct sk_buff *skb2;
4134
4135 /* Fuck, we are miserable poor guys... */
4136 if (ntail == 0)
4137 skb2 = skb_copy(skb1, GFP_ATOMIC);
4138 else
4139 skb2 = skb_copy_expand(skb1,
4140 skb_headroom(skb1),
4141 ntail,
4142 GFP_ATOMIC);
4143 if (unlikely(skb2 == NULL))
4144 return -ENOMEM;
4145
4146 if (skb1->sk)
4147 skb_set_owner_w(skb2, skb1->sk);
4148
4149 /* Looking around. Are we still alive?
4150 * OK, link new skb, drop old one */
4151
4152 skb2->next = skb1->next;
4153 *skb_p = skb2;
4154 kfree_skb(skb1);
4155 skb1 = skb2;
4156 }
4157 elt++;
4158 *trailer = skb1;
4159 skb_p = &skb1->next;
4160 }
4161
4162 return elt;
4163}
4164EXPORT_SYMBOL_GPL(skb_cow_data);
4165
4166static void sock_rmem_free(struct sk_buff *skb)
4167{
4168 struct sock *sk = skb->sk;
4169
4170 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4171}
4172
4173static void skb_set_err_queue(struct sk_buff *skb)
4174{
4175 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
4176 * So, it is safe to (mis)use it to mark skbs on the error queue.
4177 */
4178 skb->pkt_type = PACKET_OUTGOING;
4179 BUILD_BUG_ON(PACKET_OUTGOING == 0);
4180}
4181
4182/*
4183 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4184 */
4185int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4186{
4187 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
4188 (unsigned int)sk->sk_rcvbuf)
4189 return -ENOMEM;
4190
4191 skb_orphan(skb);
4192 skb->sk = sk;
4193 skb->destructor = sock_rmem_free;
4194 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
4195 skb_set_err_queue(skb);
4196
4197 /* before exiting rcu section, make sure dst is refcounted */
4198 skb_dst_force(skb);
4199
4200 skb_queue_tail(&sk->sk_error_queue, skb);
4201 if (!sock_flag(sk, SOCK_DEAD))
4202 sk->sk_error_report(sk);
4203 return 0;
4204}
4205EXPORT_SYMBOL(sock_queue_err_skb);
4206
4207static bool is_icmp_err_skb(const struct sk_buff *skb)
4208{
4209 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
4210 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
4211}
4212
4213struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4214{
4215 struct sk_buff_head *q = &sk->sk_error_queue;
4216 struct sk_buff *skb, *skb_next = NULL;
4217 bool icmp_next = false;
4218 unsigned long flags;
4219
4220 spin_lock_irqsave(&q->lock, flags);
4221 skb = __skb_dequeue(q);
4222 if (skb && (skb_next = skb_peek(q))) {
4223 icmp_next = is_icmp_err_skb(skb_next);
4224 if (icmp_next)
4225 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
4226 }
4227 spin_unlock_irqrestore(&q->lock, flags);
4228
4229 if (is_icmp_err_skb(skb) && !icmp_next)
4230 sk->sk_err = 0;
4231
4232 if (skb_next)
4233 sk->sk_error_report(sk);
4234
4235 return skb;
4236}
4237EXPORT_SYMBOL(sock_dequeue_err_skb);
4238
4239/**
4240 * skb_clone_sk - create clone of skb, and take reference to socket
4241 * @skb: the skb to clone
4242 *
4243 * This function creates a clone of a buffer that holds a reference on
4244 * sk_refcnt. Buffers created via this function are meant to be
4245 * returned using sock_queue_err_skb, or free via kfree_skb.
4246 *
4247 * When passing buffers allocated with this function to sock_queue_err_skb
4248 * it is necessary to wrap the call with sock_hold/sock_put in order to
4249 * prevent the socket from being released prior to being enqueued on
4250 * the sk_error_queue.
4251 */
4252struct sk_buff *skb_clone_sk(struct sk_buff *skb)
4253{
4254 struct sock *sk = skb->sk;
4255 struct sk_buff *clone;
4256
4257 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
4258 return NULL;
4259
4260 clone = skb_clone(skb, GFP_ATOMIC);
4261 if (!clone) {
4262 sock_put(sk);
4263 return NULL;
4264 }
4265
4266 clone->sk = sk;
4267 clone->destructor = sock_efree;
4268
4269 return clone;
4270}
4271EXPORT_SYMBOL(skb_clone_sk);
4272
4273static void __skb_complete_tx_timestamp(struct sk_buff *skb,
4274 struct sock *sk,
4275 int tstype,
4276 bool opt_stats)
4277{
4278 struct sock_exterr_skb *serr;
4279 int err;
4280
4281 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
4282
4283 serr = SKB_EXT_ERR(skb);
4284 memset(serr, 0, sizeof(*serr));
4285 serr->ee.ee_errno = ENOMSG;
4286 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4287 serr->ee.ee_info = tstype;
4288 serr->opt_stats = opt_stats;
4289 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
4290 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
4291 serr->ee.ee_data = skb_shinfo(skb)->tskey;
4292 if (sk->sk_protocol == IPPROTO_TCP &&
4293 sk->sk_type == SOCK_STREAM)
4294 serr->ee.ee_data -= sk->sk_tskey;
4295 }
4296
4297 err = sock_queue_err_skb(sk, skb);
4298
4299 if (err)
4300 kfree_skb(skb);
4301}
4302
4303static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4304{
4305 bool ret;
4306
4307 if (likely(sysctl_tstamp_allow_data || tsonly))
4308 return true;
4309
4310 read_lock_bh(&sk->sk_callback_lock);
4311 ret = sk->sk_socket && sk->sk_socket->file &&
4312 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4313 read_unlock_bh(&sk->sk_callback_lock);
4314 return ret;
4315}
4316
4317void skb_complete_tx_timestamp(struct sk_buff *skb,
4318 struct skb_shared_hwtstamps *hwtstamps)
4319{
4320 struct sock *sk = skb->sk;
4321
4322 if (!skb_may_tx_timestamp(sk, false))
4323 goto err;
4324
4325 /* Take a reference to prevent skb_orphan() from freeing the socket,
4326 * but only if the socket refcount is not zero.
4327 */
4328 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4329 *skb_hwtstamps(skb) = *hwtstamps;
4330 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
4331 sock_put(sk);
4332 return;
4333 }
4334
4335err:
4336 kfree_skb(skb);
4337}
4338EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4339
4340void __skb_tstamp_tx(struct sk_buff *orig_skb,
4341 struct skb_shared_hwtstamps *hwtstamps,
4342 struct sock *sk, int tstype)
4343{
4344 struct sk_buff *skb;
4345 bool tsonly, opt_stats = false;
4346
4347 if (!sk)
4348 return;
4349
4350 if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4351 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4352 return;
4353
4354 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
4355 if (!skb_may_tx_timestamp(sk, tsonly))
4356 return;
4357
4358 if (tsonly) {
4359#ifdef CONFIG_INET
4360 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
4361 sk->sk_protocol == IPPROTO_TCP &&
4362 sk->sk_type == SOCK_STREAM) {
4363 skb = tcp_get_timestamping_opt_stats(sk);
4364 opt_stats = true;
4365 } else
4366#endif
4367 skb = alloc_skb(0, GFP_ATOMIC);
4368 } else {
4369 skb = skb_clone(orig_skb, GFP_ATOMIC);
4370 }
4371 if (!skb)
4372 return;
4373
4374 if (tsonly) {
4375 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4376 SKBTX_ANY_TSTAMP;
4377 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
4378 }
4379
4380 if (hwtstamps)
4381 *skb_hwtstamps(skb) = *hwtstamps;
4382 else
4383 skb->tstamp = ktime_get_real();
4384
4385 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
4386}
4387EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4388
4389void skb_tstamp_tx(struct sk_buff *orig_skb,
4390 struct skb_shared_hwtstamps *hwtstamps)
4391{
4392 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
4393 SCM_TSTAMP_SND);
4394}
4395EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4396
4397void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
4398{
4399 struct sock *sk = skb->sk;
4400 struct sock_exterr_skb *serr;
4401 int err = 1;
4402
4403 skb->wifi_acked_valid = 1;
4404 skb->wifi_acked = acked;
4405
4406 serr = SKB_EXT_ERR(skb);
4407 memset(serr, 0, sizeof(*serr));
4408 serr->ee.ee_errno = ENOMSG;
4409 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
4410
4411 /* Take a reference to prevent skb_orphan() from freeing the socket,
4412 * but only if the socket refcount is not zero.
4413 */
4414 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4415 err = sock_queue_err_skb(sk, skb);
4416 sock_put(sk);
4417 }
4418 if (err)
4419 kfree_skb(skb);
4420}
4421EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
4422
4423/**
4424 * skb_partial_csum_set - set up and verify partial csum values for packet
4425 * @skb: the skb to set
4426 * @start: the number of bytes after skb->data to start checksumming.
4427 * @off: the offset from start to place the checksum.
4428 *
4429 * For untrusted partially-checksummed packets, we need to make sure the values
4430 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4431 *
4432 * This function checks and sets those values and skb->ip_summed: if this
4433 * returns false you should drop the packet.
4434 */
4435bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4436{
4437 if (unlikely(start > skb_headlen(skb)) ||
4438 unlikely((int)start + off > skb_headlen(skb) - 2)) {
4439 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
4440 start, off, skb_headlen(skb));
4441 return false;
4442 }
4443 skb->ip_summed = CHECKSUM_PARTIAL;
4444 skb->csum_start = skb_headroom(skb) + start;
4445 skb->csum_offset = off;
4446 skb_set_transport_header(skb, start);
4447 return true;
4448}
4449EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4450
4451static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4452 unsigned int max)
4453{
4454 if (skb_headlen(skb) >= len)
4455 return 0;
4456
4457 /* If we need to pullup then pullup to the max, so we
4458 * won't need to do it again.
4459 */
4460 if (max > skb->len)
4461 max = skb->len;
4462
4463 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4464 return -ENOMEM;
4465
4466 if (skb_headlen(skb) < len)
4467 return -EPROTO;
4468
4469 return 0;
4470}
4471
4472#define MAX_TCP_HDR_LEN (15 * 4)
4473
4474static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4475 typeof(IPPROTO_IP) proto,
4476 unsigned int off)
4477{
4478 switch (proto) {
4479 int err;
4480
4481 case IPPROTO_TCP:
4482 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4483 off + MAX_TCP_HDR_LEN);
4484 if (!err && !skb_partial_csum_set(skb, off,
4485 offsetof(struct tcphdr,
4486 check)))
4487 err = -EPROTO;
4488 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4489
4490 case IPPROTO_UDP:
4491 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4492 off + sizeof(struct udphdr));
4493 if (!err && !skb_partial_csum_set(skb, off,
4494 offsetof(struct udphdr,
4495 check)))
4496 err = -EPROTO;
4497 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4498 }
4499
4500 return ERR_PTR(-EPROTO);
4501}
4502
4503/* This value should be large enough to cover a tagged ethernet header plus
4504 * maximally sized IP and TCP or UDP headers.
4505 */
4506#define MAX_IP_HDR_LEN 128
4507
4508static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
4509{
4510 unsigned int off;
4511 bool fragment;
4512 __sum16 *csum;
4513 int err;
4514
4515 fragment = false;
4516
4517 err = skb_maybe_pull_tail(skb,
4518 sizeof(struct iphdr),
4519 MAX_IP_HDR_LEN);
4520 if (err < 0)
4521 goto out;
4522
4523 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
4524 fragment = true;
4525
4526 off = ip_hdrlen(skb);
4527
4528 err = -EPROTO;
4529
4530 if (fragment)
4531 goto out;
4532
4533 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
4534 if (IS_ERR(csum))
4535 return PTR_ERR(csum);
4536
4537 if (recalculate)
4538 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
4539 ip_hdr(skb)->daddr,
4540 skb->len - off,
4541 ip_hdr(skb)->protocol, 0);
4542 err = 0;
4543
4544out:
4545 return err;
4546}
4547
4548/* This value should be large enough to cover a tagged ethernet header plus
4549 * an IPv6 header, all options, and a maximal TCP or UDP header.
4550 */
4551#define MAX_IPV6_HDR_LEN 256
4552
4553#define OPT_HDR(type, skb, off) \
4554 (type *)(skb_network_header(skb) + (off))
4555
4556static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
4557{
4558 int err;
4559 u8 nexthdr;
4560 unsigned int off;
4561 unsigned int len;
4562 bool fragment;
4563 bool done;
4564 __sum16 *csum;
4565
4566 fragment = false;
4567 done = false;
4568
4569 off = sizeof(struct ipv6hdr);
4570
4571 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4572 if (err < 0)
4573 goto out;
4574
4575 nexthdr = ipv6_hdr(skb)->nexthdr;
4576
4577 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4578 while (off <= len && !done) {
4579 switch (nexthdr) {
4580 case IPPROTO_DSTOPTS:
4581 case IPPROTO_HOPOPTS:
4582 case IPPROTO_ROUTING: {
4583 struct ipv6_opt_hdr *hp;
4584
4585 err = skb_maybe_pull_tail(skb,
4586 off +
4587 sizeof(struct ipv6_opt_hdr),
4588 MAX_IPV6_HDR_LEN);
4589 if (err < 0)
4590 goto out;
4591
4592 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
4593 nexthdr = hp->nexthdr;
4594 off += ipv6_optlen(hp);
4595 break;
4596 }
4597 case IPPROTO_AH: {
4598 struct ip_auth_hdr *hp;
4599
4600 err = skb_maybe_pull_tail(skb,
4601 off +
4602 sizeof(struct ip_auth_hdr),
4603 MAX_IPV6_HDR_LEN);
4604 if (err < 0)
4605 goto out;
4606
4607 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
4608 nexthdr = hp->nexthdr;
4609 off += ipv6_authlen(hp);
4610 break;
4611 }
4612 case IPPROTO_FRAGMENT: {
4613 struct frag_hdr *hp;
4614
4615 err = skb_maybe_pull_tail(skb,
4616 off +
4617 sizeof(struct frag_hdr),
4618 MAX_IPV6_HDR_LEN);
4619 if (err < 0)
4620 goto out;
4621
4622 hp = OPT_HDR(struct frag_hdr, skb, off);
4623
4624 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
4625 fragment = true;
4626
4627 nexthdr = hp->nexthdr;
4628 off += sizeof(struct frag_hdr);
4629 break;
4630 }
4631 default:
4632 done = true;
4633 break;
4634 }
4635 }
4636
4637 err = -EPROTO;
4638
4639 if (!done || fragment)
4640 goto out;
4641
4642 csum = skb_checksum_setup_ip(skb, nexthdr, off);
4643 if (IS_ERR(csum))
4644 return PTR_ERR(csum);
4645
4646 if (recalculate)
4647 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4648 &ipv6_hdr(skb)->daddr,
4649 skb->len - off, nexthdr, 0);
4650 err = 0;
4651
4652out:
4653 return err;
4654}
4655
4656/**
4657 * skb_checksum_setup - set up partial checksum offset
4658 * @skb: the skb to set up
4659 * @recalculate: if true the pseudo-header checksum will be recalculated
4660 */
4661int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
4662{
4663 int err;
4664
4665 switch (skb->protocol) {
4666 case htons(ETH_P_IP):
4667 err = skb_checksum_setup_ipv4(skb, recalculate);
4668 break;
4669
4670 case htons(ETH_P_IPV6):
4671 err = skb_checksum_setup_ipv6(skb, recalculate);
4672 break;
4673
4674 default:
4675 err = -EPROTO;
4676 break;
4677 }
4678
4679 return err;
4680}
4681EXPORT_SYMBOL(skb_checksum_setup);
4682
4683/**
4684 * skb_checksum_maybe_trim - maybe trims the given skb
4685 * @skb: the skb to check
4686 * @transport_len: the data length beyond the network header
4687 *
4688 * Checks whether the given skb has data beyond the given transport length.
4689 * If so, returns a cloned skb trimmed to this transport length.
4690 * Otherwise returns the provided skb. Returns NULL in error cases
4691 * (e.g. transport_len exceeds skb length or out-of-memory).
4692 *
4693 * Caller needs to set the skb transport header and free any returned skb if it
4694 * differs from the provided skb.
4695 */
4696static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4697 unsigned int transport_len)
4698{
4699 struct sk_buff *skb_chk;
4700 unsigned int len = skb_transport_offset(skb) + transport_len;
4701 int ret;
4702
4703 if (skb->len < len)
4704 return NULL;
4705 else if (skb->len == len)
4706 return skb;
4707
4708 skb_chk = skb_clone(skb, GFP_ATOMIC);
4709 if (!skb_chk)
4710 return NULL;
4711
4712 ret = pskb_trim_rcsum(skb_chk, len);
4713 if (ret) {
4714 kfree_skb(skb_chk);
4715 return NULL;
4716 }
4717
4718 return skb_chk;
4719}
4720
4721/**
4722 * skb_checksum_trimmed - validate checksum of an skb
4723 * @skb: the skb to check
4724 * @transport_len: the data length beyond the network header
4725 * @skb_chkf: checksum function to use
4726 *
4727 * Applies the given checksum function skb_chkf to the provided skb.
4728 * Returns a checked and maybe trimmed skb. Returns NULL on error.
4729 *
4730 * If the skb has data beyond the given transport length, then a
4731 * trimmed & cloned skb is checked and returned.
4732 *
4733 * Caller needs to set the skb transport header and free any returned skb if it
4734 * differs from the provided skb.
4735 */
4736struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4737 unsigned int transport_len,
4738 __sum16(*skb_chkf)(struct sk_buff *skb))
4739{
4740 struct sk_buff *skb_chk;
4741 unsigned int offset = skb_transport_offset(skb);
4742 __sum16 ret;
4743
4744 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
4745 if (!skb_chk)
4746 goto err;
4747
4748 if (!pskb_may_pull(skb_chk, offset))
4749 goto err;
4750
4751 skb_pull_rcsum(skb_chk, offset);
4752 ret = skb_chkf(skb_chk);
4753 skb_push_rcsum(skb_chk, offset);
4754
4755 if (ret)
4756 goto err;
4757
4758 return skb_chk;
4759
4760err:
4761 if (skb_chk && skb_chk != skb)
4762 kfree_skb(skb_chk);
4763
4764 return NULL;
4765
4766}
4767EXPORT_SYMBOL(skb_checksum_trimmed);
4768
4769void __skb_warn_lro_forwarding(const struct sk_buff *skb)
4770{
4771 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
4772 skb->dev->name);
4773}
4774EXPORT_SYMBOL(__skb_warn_lro_forwarding);
4775
4776void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4777{
4778 if (head_stolen) {
4779 skb_release_head_state(skb);
4780 kmem_cache_free(skbuff_head_cache, skb);
4781 } else {
4782 __kfree_skb(skb);
4783 }
4784}
4785EXPORT_SYMBOL(kfree_skb_partial);
4786
4787/**
4788 * skb_try_coalesce - try to merge skb to prior one
4789 * @to: prior buffer
4790 * @from: buffer to add
4791 * @fragstolen: pointer to boolean
4792 * @delta_truesize: how much more was allocated than was requested
4793 */
4794bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
4795 bool *fragstolen, int *delta_truesize)
4796{
4797 struct skb_shared_info *to_shinfo, *from_shinfo;
4798 int i, delta, len = from->len;
4799
4800 *fragstolen = false;
4801
4802 if (skb_cloned(to))
4803 return false;
4804
4805 if (len <= skb_tailroom(to)) {
4806 if (len)
4807 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4808 *delta_truesize = 0;
4809 return true;
4810 }
4811
4812 to_shinfo = skb_shinfo(to);
4813 from_shinfo = skb_shinfo(from);
4814 if (to_shinfo->frag_list || from_shinfo->frag_list)
4815 return false;
4816 if (skb_zcopy(to) || skb_zcopy(from))
4817 return false;
4818
4819 if (skb_headlen(from) != 0) {
4820 struct page *page;
4821 unsigned int offset;
4822
4823 if (to_shinfo->nr_frags +
4824 from_shinfo->nr_frags >= MAX_SKB_FRAGS)
4825 return false;
4826
4827 if (skb_head_is_locked(from))
4828 return false;
4829
4830 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4831
4832 page = virt_to_head_page(from->head);
4833 offset = from->data - (unsigned char *)page_address(page);
4834
4835 skb_fill_page_desc(to, to_shinfo->nr_frags,
4836 page, offset, skb_headlen(from));
4837 *fragstolen = true;
4838 } else {
4839 if (to_shinfo->nr_frags +
4840 from_shinfo->nr_frags > MAX_SKB_FRAGS)
4841 return false;
4842
4843 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
4844 }
4845
4846 WARN_ON_ONCE(delta < len);
4847
4848 memcpy(to_shinfo->frags + to_shinfo->nr_frags,
4849 from_shinfo->frags,
4850 from_shinfo->nr_frags * sizeof(skb_frag_t));
4851 to_shinfo->nr_frags += from_shinfo->nr_frags;
4852
4853 if (!skb_cloned(from))
4854 from_shinfo->nr_frags = 0;
4855
4856 /* if the skb is not cloned this does nothing
4857 * since we set nr_frags to 0.
4858 */
4859 for (i = 0; i < from_shinfo->nr_frags; i++)
4860 __skb_frag_ref(&from_shinfo->frags[i]);
4861
4862 to->truesize += delta;
4863 to->len += len;
4864 to->data_len += len;
4865
4866 *delta_truesize = delta;
4867 return true;
4868}
4869EXPORT_SYMBOL(skb_try_coalesce);
4870
4871/**
4872 * skb_scrub_packet - scrub an skb
4873 *
4874 * @skb: buffer to clean
4875 * @xnet: packet is crossing netns
4876 *
4877 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
4878 * into/from a tunnel. Some information have to be cleared during these
4879 * operations.
4880 * skb_scrub_packet can also be used to clean a skb before injecting it in
4881 * another namespace (@xnet == true). We have to clear all information in the
4882 * skb that could impact namespace isolation.
4883 */
4884void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4885{
4886 skb->tstamp = 0;
4887 skb->pkt_type = PACKET_HOST;
4888 skb->skb_iif = 0;
4889 skb->ignore_df = 0;
4890 skb_dst_drop(skb);
4891 secpath_reset(skb);
4892 nf_reset(skb);
4893 nf_reset_trace(skb);
4894
4895 if (!xnet)
4896 return;
4897
4898 ipvs_reset(skb);
4899 skb_orphan(skb);
4900 skb->mark = 0;
4901}
4902EXPORT_SYMBOL_GPL(skb_scrub_packet);
4903
4904/**
4905 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
4906 *
4907 * @skb: GSO skb
4908 *
4909 * skb_gso_transport_seglen is used to determine the real size of the
4910 * individual segments, including Layer4 headers (TCP/UDP).
4911 *
4912 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4913 */
4914static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4915{
4916 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4917 unsigned int thlen = 0;
4918
4919 if (skb->encapsulation) {
4920 thlen = skb_inner_transport_header(skb) -
4921 skb_transport_header(skb);
4922
4923 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4924 thlen += inner_tcp_hdrlen(skb);
4925 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4926 thlen = tcp_hdrlen(skb);
4927 } else if (unlikely(skb_is_gso_sctp(skb))) {
4928 thlen = sizeof(struct sctphdr);
4929 }
4930 /* UFO sets gso_size to the size of the fragmentation
4931 * payload, i.e. the size of the L4 (UDP) header is already
4932 * accounted for.
4933 */
4934 return thlen + shinfo->gso_size;
4935}
4936
4937/**
4938 * skb_gso_network_seglen - Return length of individual segments of a gso packet
4939 *
4940 * @skb: GSO skb
4941 *
4942 * skb_gso_network_seglen is used to determine the real size of the
4943 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
4944 *
4945 * The MAC/L2 header is not accounted for.
4946 */
4947static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
4948{
4949 unsigned int hdr_len = skb_transport_header(skb) -
4950 skb_network_header(skb);
4951
4952 return hdr_len + skb_gso_transport_seglen(skb);
4953}
4954
4955/**
4956 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
4957 *
4958 * @skb: GSO skb
4959 *
4960 * skb_gso_mac_seglen is used to determine the real size of the
4961 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
4962 * headers (TCP/UDP).
4963 */
4964static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4965{
4966 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
4967
4968 return hdr_len + skb_gso_transport_seglen(skb);
4969}
4970
4971/**
4972 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
4973 *
4974 * There are a couple of instances where we have a GSO skb, and we
4975 * want to determine what size it would be after it is segmented.
4976 *
4977 * We might want to check:
4978 * - L3+L4+payload size (e.g. IP forwarding)
4979 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
4980 *
4981 * This is a helper to do that correctly considering GSO_BY_FRAGS.
4982 *
4983 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
4984 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
4985 *
4986 * @max_len: The maximum permissible length.
4987 *
4988 * Returns true if the segmented length <= max length.
4989 */
4990static inline bool skb_gso_size_check(const struct sk_buff *skb,
4991 unsigned int seg_len,
4992 unsigned int max_len) {
4993 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4994 const struct sk_buff *iter;
4995
4996 if (shinfo->gso_size != GSO_BY_FRAGS)
4997 return seg_len <= max_len;
4998
4999 /* Undo this so we can re-use header sizes */
5000 seg_len -= GSO_BY_FRAGS;
5001
5002 skb_walk_frags(skb, iter) {
5003 if (seg_len + skb_headlen(iter) > max_len)
5004 return false;
5005 }
5006
5007 return true;
5008}
5009
5010/**
5011 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5012 *
5013 * @skb: GSO skb
5014 * @mtu: MTU to validate against
5015 *
5016 * skb_gso_validate_network_len validates if a given skb will fit a
5017 * wanted MTU once split. It considers L3 headers, L4 headers, and the
5018 * payload.
5019 */
5020bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5021{
5022 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5023}
5024EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5025
5026/**
5027 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
5028 *
5029 * @skb: GSO skb
5030 * @len: length to validate against
5031 *
5032 * skb_gso_validate_mac_len validates if a given skb will fit a wanted
5033 * length once split, including L2, L3 and L4 headers and the payload.
5034 */
5035bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
5036{
5037 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
5038}
5039EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5040
5041static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5042{
5043 int mac_len;
5044
5045 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5046 kfree_skb(skb);
5047 return NULL;
5048 }
5049
5050 mac_len = skb->data - skb_mac_header(skb);
5051 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5052 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5053 mac_len - VLAN_HLEN - ETH_TLEN);
5054 }
5055 skb->mac_header += VLAN_HLEN;
5056 return skb;
5057}
5058
5059struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5060{
5061 struct vlan_hdr *vhdr;
5062 u16 vlan_tci;
5063
5064 if (unlikely(skb_vlan_tag_present(skb))) {
5065 /* vlan_tci is already set-up so leave this for another time */
5066 return skb;
5067 }
5068
5069 skb = skb_share_check(skb, GFP_ATOMIC);
5070 if (unlikely(!skb))
5071 goto err_free;
5072
5073 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
5074 goto err_free;
5075
5076 vhdr = (struct vlan_hdr *)skb->data;
5077 vlan_tci = ntohs(vhdr->h_vlan_TCI);
5078 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5079
5080 skb_pull_rcsum(skb, VLAN_HLEN);
5081 vlan_set_encap_proto(skb, vhdr);
5082
5083 skb = skb_reorder_vlan_header(skb);
5084 if (unlikely(!skb))
5085 goto err_free;
5086
5087 skb_reset_network_header(skb);
5088 skb_reset_transport_header(skb);
5089 skb_reset_mac_len(skb);
5090
5091 return skb;
5092
5093err_free:
5094 kfree_skb(skb);
5095 return NULL;
5096}
5097EXPORT_SYMBOL(skb_vlan_untag);
5098
5099int skb_ensure_writable(struct sk_buff *skb, int write_len)
5100{
5101 if (!pskb_may_pull(skb, write_len))
5102 return -ENOMEM;
5103
5104 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5105 return 0;
5106
5107 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5108}
5109EXPORT_SYMBOL(skb_ensure_writable);
5110
5111/* remove VLAN header from packet and update csum accordingly.
5112 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5113 */
5114int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5115{
5116 struct vlan_hdr *vhdr;
5117 int offset = skb->data - skb_mac_header(skb);
5118 int err;
5119
5120 if (WARN_ONCE(offset,
5121 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5122 offset)) {
5123 return -EINVAL;
5124 }
5125
5126 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5127 if (unlikely(err))
5128 return err;
5129
5130 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5131
5132 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
5133 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
5134
5135 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
5136 __skb_pull(skb, VLAN_HLEN);
5137
5138 vlan_set_encap_proto(skb, vhdr);
5139 skb->mac_header += VLAN_HLEN;
5140
5141 if (skb_network_offset(skb) < ETH_HLEN)
5142 skb_set_network_header(skb, ETH_HLEN);
5143
5144 skb_reset_mac_len(skb);
5145
5146 return err;
5147}
5148EXPORT_SYMBOL(__skb_vlan_pop);
5149
5150/* Pop a vlan tag either from hwaccel or from payload.
5151 * Expects skb->data at mac header.
5152 */
5153int skb_vlan_pop(struct sk_buff *skb)
5154{
5155 u16 vlan_tci;
5156 __be16 vlan_proto;
5157 int err;
5158
5159 if (likely(skb_vlan_tag_present(skb))) {
5160 skb->vlan_tci = 0;
5161 } else {
5162 if (unlikely(!eth_type_vlan(skb->protocol)))
5163 return 0;
5164
5165 err = __skb_vlan_pop(skb, &vlan_tci);
5166 if (err)
5167 return err;
5168 }
5169 /* move next vlan tag to hw accel tag */
5170 if (likely(!eth_type_vlan(skb->protocol)))
5171 return 0;
5172
5173 vlan_proto = skb->protocol;
5174 err = __skb_vlan_pop(skb, &vlan_tci);
5175 if (unlikely(err))
5176 return err;
5177
5178 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5179 return 0;
5180}
5181EXPORT_SYMBOL(skb_vlan_pop);
5182
5183/* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5184 * Expects skb->data at mac header.
5185 */
5186int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
5187{
5188 if (skb_vlan_tag_present(skb)) {
5189 int offset = skb->data - skb_mac_header(skb);
5190 int err;
5191
5192 if (WARN_ONCE(offset,
5193 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5194 offset)) {
5195 return -EINVAL;
5196 }
5197
5198 err = __vlan_insert_tag(skb, skb->vlan_proto,
5199 skb_vlan_tag_get(skb));
5200 if (err)
5201 return err;
5202
5203 skb->protocol = skb->vlan_proto;
5204 skb->mac_len += VLAN_HLEN;
5205
5206 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5207 }
5208 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5209 return 0;
5210}
5211EXPORT_SYMBOL(skb_vlan_push);
5212
5213/**
5214 * alloc_skb_with_frags - allocate skb with page frags
5215 *
5216 * @header_len: size of linear part
5217 * @data_len: needed length in frags
5218 * @max_page_order: max page order desired.
5219 * @errcode: pointer to error code if any
5220 * @gfp_mask: allocation mask
5221 *
5222 * This can be used to allocate a paged skb, given a maximal order for frags.
5223 */
5224struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5225 unsigned long data_len,
5226 int max_page_order,
5227 int *errcode,
5228 gfp_t gfp_mask)
5229{
5230 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
5231 unsigned long chunk;
5232 struct sk_buff *skb;
5233 struct page *page;
5234 gfp_t gfp_head;
5235 int i;
5236
5237 *errcode = -EMSGSIZE;
5238 /* Note this test could be relaxed, if we succeed to allocate
5239 * high order pages...
5240 */
5241 if (npages > MAX_SKB_FRAGS)
5242 return NULL;
5243
5244 gfp_head = gfp_mask;
5245 if (gfp_head & __GFP_DIRECT_RECLAIM)
5246 gfp_head |= __GFP_RETRY_MAYFAIL;
5247
5248 *errcode = -ENOBUFS;
5249 skb = alloc_skb(header_len, gfp_head);
5250 if (!skb)
5251 return NULL;
5252
5253 skb->truesize += npages << PAGE_SHIFT;
5254
5255 for (i = 0; npages > 0; i++) {
5256 int order = max_page_order;
5257
5258 while (order) {
5259 if (npages >= 1 << order) {
5260 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
5261 __GFP_COMP |
5262 __GFP_NOWARN |
5263 __GFP_NORETRY,
5264 order);
5265 if (page)
5266 goto fill_page;
5267 /* Do not retry other high order allocations */
5268 order = 1;
5269 max_page_order = 0;
5270 }
5271 order--;
5272 }
5273 page = alloc_page(gfp_mask);
5274 if (!page)
5275 goto failure;
5276fill_page:
5277 chunk = min_t(unsigned long, data_len,
5278 PAGE_SIZE << order);
5279 skb_fill_page_desc(skb, i, page, 0, chunk);
5280 data_len -= chunk;
5281 npages -= 1 << order;
5282 }
5283 return skb;
5284
5285failure:
5286 kfree_skb(skb);
5287 return NULL;
5288}
5289EXPORT_SYMBOL(alloc_skb_with_frags);
5290
5291/* carve out the first off bytes from skb when off < headlen */
5292static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
5293 const int headlen, gfp_t gfp_mask)
5294{
5295 int i;
5296 int size = skb_end_offset(skb);
5297 int new_hlen = headlen - off;
5298 u8 *data;
5299
5300 size = SKB_DATA_ALIGN(size);
5301
5302 if (skb_pfmemalloc(skb))
5303 gfp_mask |= __GFP_MEMALLOC;
5304 data = kmalloc_reserve(size +
5305 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5306 gfp_mask, NUMA_NO_NODE, NULL);
5307 if (!data)
5308 return -ENOMEM;
5309
5310 size = SKB_WITH_OVERHEAD(ksize(data));
5311
5312 /* Copy real data, and all frags */
5313 skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
5314 skb->len -= off;
5315
5316 memcpy((struct skb_shared_info *)(data + size),
5317 skb_shinfo(skb),
5318 offsetof(struct skb_shared_info,
5319 frags[skb_shinfo(skb)->nr_frags]));
5320 if (skb_cloned(skb)) {
5321 /* drop the old head gracefully */
5322 if (skb_orphan_frags(skb, gfp_mask)) {
5323 kfree(data);
5324 return -ENOMEM;
5325 }
5326 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
5327 skb_frag_ref(skb, i);
5328 if (skb_has_frag_list(skb))
5329 skb_clone_fraglist(skb);
5330 skb_release_data(skb);
5331 } else {
5332 /* we can reuse existing recount- all we did was
5333 * relocate values
5334 */
5335 skb_free_head(skb);
5336 }
5337
5338 skb->head = data;
5339 skb->data = data;
5340 skb->head_frag = 0;
5341#ifdef NET_SKBUFF_DATA_USES_OFFSET
5342 skb->end = size;
5343#else
5344 skb->end = skb->head + size;
5345#endif
5346 skb_set_tail_pointer(skb, skb_headlen(skb));
5347 skb_headers_offset_update(skb, 0);
5348 skb->cloned = 0;
5349 skb->hdr_len = 0;
5350 skb->nohdr = 0;
5351 atomic_set(&skb_shinfo(skb)->dataref, 1);
5352
5353 return 0;
5354}
5355
5356static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
5357
5358/* carve out the first eat bytes from skb's frag_list. May recurse into
5359 * pskb_carve()
5360 */
5361static int pskb_carve_frag_list(struct sk_buff *skb,
5362 struct skb_shared_info *shinfo, int eat,
5363 gfp_t gfp_mask)
5364{
5365 struct sk_buff *list = shinfo->frag_list;
5366 struct sk_buff *clone = NULL;
5367 struct sk_buff *insp = NULL;
5368
5369 do {
5370 if (!list) {
5371 pr_err("Not enough bytes to eat. Want %d\n", eat);
5372 return -EFAULT;
5373 }
5374 if (list->len <= eat) {
5375 /* Eaten as whole. */
5376 eat -= list->len;
5377 list = list->next;
5378 insp = list;
5379 } else {
5380 /* Eaten partially. */
5381 if (skb_shared(list)) {
5382 clone = skb_clone(list, gfp_mask);
5383 if (!clone)
5384 return -ENOMEM;
5385 insp = list->next;
5386 list = clone;
5387 } else {
5388 /* This may be pulled without problems. */
5389 insp = list;
5390 }
5391 if (pskb_carve(list, eat, gfp_mask) < 0) {
5392 kfree_skb(clone);
5393 return -ENOMEM;
5394 }
5395 break;
5396 }
5397 } while (eat);
5398
5399 /* Free pulled out fragments. */
5400 while ((list = shinfo->frag_list) != insp) {
5401 shinfo->frag_list = list->next;
5402 kfree_skb(list);
5403 }
5404 /* And insert new clone at head. */
5405 if (clone) {
5406 clone->next = list;
5407 shinfo->frag_list = clone;
5408 }
5409 return 0;
5410}
5411
5412/* carve off first len bytes from skb. Split line (off) is in the
5413 * non-linear part of skb
5414 */
5415static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
5416 int pos, gfp_t gfp_mask)
5417{
5418 int i, k = 0;
5419 int size = skb_end_offset(skb);
5420 u8 *data;
5421 const int nfrags = skb_shinfo(skb)->nr_frags;
5422 struct skb_shared_info *shinfo;
5423
5424 size = SKB_DATA_ALIGN(size);
5425
5426 if (skb_pfmemalloc(skb))
5427 gfp_mask |= __GFP_MEMALLOC;
5428 data = kmalloc_reserve(size +
5429 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5430 gfp_mask, NUMA_NO_NODE, NULL);
5431 if (!data)
5432 return -ENOMEM;
5433
5434 size = SKB_WITH_OVERHEAD(ksize(data));
5435
5436 memcpy((struct skb_shared_info *)(data + size),
5437 skb_shinfo(skb), offsetof(struct skb_shared_info,
5438 frags[skb_shinfo(skb)->nr_frags]));
5439 if (skb_orphan_frags(skb, gfp_mask)) {
5440 kfree(data);
5441 return -ENOMEM;
5442 }
5443 shinfo = (struct skb_shared_info *)(data + size);
5444 for (i = 0; i < nfrags; i++) {
5445 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
5446
5447 if (pos + fsize > off) {
5448 shinfo->frags[k] = skb_shinfo(skb)->frags[i];
5449
5450 if (pos < off) {
5451 /* Split frag.
5452 * We have two variants in this case:
5453 * 1. Move all the frag to the second
5454 * part, if it is possible. F.e.
5455 * this approach is mandatory for TUX,
5456 * where splitting is expensive.
5457 * 2. Split is accurately. We make this.
5458 */
5459 shinfo->frags[0].page_offset += off - pos;
5460 skb_frag_size_sub(&shinfo->frags[0], off - pos);
5461 }
5462 skb_frag_ref(skb, i);
5463 k++;
5464 }
5465 pos += fsize;
5466 }
5467 shinfo->nr_frags = k;
5468 if (skb_has_frag_list(skb))
5469 skb_clone_fraglist(skb);
5470
5471 if (k == 0) {
5472 /* split line is in frag list */
5473 pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
5474 }
5475 skb_release_data(skb);
5476
5477 skb->head = data;
5478 skb->head_frag = 0;
5479 skb->data = data;
5480#ifdef NET_SKBUFF_DATA_USES_OFFSET
5481 skb->end = size;
5482#else
5483 skb->end = skb->head + size;
5484#endif
5485 skb_reset_tail_pointer(skb);
5486 skb_headers_offset_update(skb, 0);
5487 skb->cloned = 0;
5488 skb->hdr_len = 0;
5489 skb->nohdr = 0;
5490 skb->len -= off;
5491 skb->data_len = skb->len;
5492 atomic_set(&skb_shinfo(skb)->dataref, 1);
5493 return 0;
5494}
5495
5496/* remove len bytes from the beginning of the skb */
5497static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
5498{
5499 int headlen = skb_headlen(skb);
5500
5501 if (len < headlen)
5502 return pskb_carve_inside_header(skb, len, headlen, gfp);
5503 else
5504 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
5505}
5506
5507/* Extract to_copy bytes starting at off from skb, and return this in
5508 * a new skb
5509 */
5510struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
5511 int to_copy, gfp_t gfp)
5512{
5513 struct sk_buff *clone = skb_clone(skb, gfp);
5514
5515 if (!clone)
5516 return NULL;
5517
5518 if (pskb_carve(clone, off, gfp) < 0 ||
5519 pskb_trim(clone, to_copy)) {
5520 kfree_skb(clone);
5521 return NULL;
5522 }
5523 return clone;
5524}
5525EXPORT_SYMBOL(pskb_extract);
5526
5527/**
5528 * skb_condense - try to get rid of fragments/frag_list if possible
5529 * @skb: buffer
5530 *
5531 * Can be used to save memory before skb is added to a busy queue.
5532 * If packet has bytes in frags and enough tail room in skb->head,
5533 * pull all of them, so that we can free the frags right now and adjust
5534 * truesize.
5535 * Notes:
5536 * We do not reallocate skb->head thus can not fail.
5537 * Caller must re-evaluate skb->truesize if needed.
5538 */
5539void skb_condense(struct sk_buff *skb)
5540{
5541 if (skb->data_len) {
5542 if (skb->data_len > skb->end - skb->tail ||
5543 skb_cloned(skb))
5544 return;
5545
5546 /* Nice, we can free page frag(s) right now */
5547 __pskb_pull_tail(skb, skb->data_len);
5548 }
5549 /* At this point, skb->truesize might be over estimated,
5550 * because skb had a fragment, and fragments do not tell
5551 * their truesize.
5552 * When we pulled its content into skb->head, fragment
5553 * was freed, but __pskb_pull_tail() could not possibly
5554 * adjust skb->truesize, not knowing the frag truesize.
5555 */
5556 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5557}