Loading...
1/*
2 * Routines having to do with the 'struct sk_buff' memory handlers.
3 *
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 *
7 * Fixes:
8 * Alan Cox : Fixed the worst of the load
9 * balancer bugs.
10 * Dave Platt : Interrupt stacking fix.
11 * Richard Kooijman : Timestamp fixes.
12 * Alan Cox : Changed buffer format.
13 * Alan Cox : destructor hook for AF_UNIX etc.
14 * Linus Torvalds : Better skb_clone.
15 * Alan Cox : Added skb_copy.
16 * Alan Cox : Added all the changed routines Linus
17 * only put in the headers
18 * Ray VanTassle : Fixed --skb->lock in free
19 * Alan Cox : skb_copy copy arp field
20 * Andi Kleen : slabified it.
21 * Robert Olsson : Removed skb_head_pool
22 *
23 * NOTE:
24 * The __skb_ routines should be called with interrupts
25 * disabled, or you better be *real* sure that the operation is atomic
26 * with respect to whatever list is being frobbed (e.g. via lock_sock()
27 * or via disabling bottom half handlers, etc).
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35/*
36 * The functions in this file will not compile correctly with gcc 2.4.x
37 */
38
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
41#include <linux/module.h>
42#include <linux/types.h>
43#include <linux/kernel.h>
44#include <linux/kmemcheck.h>
45#include <linux/mm.h>
46#include <linux/interrupt.h>
47#include <linux/in.h>
48#include <linux/inet.h>
49#include <linux/slab.h>
50#include <linux/tcp.h>
51#include <linux/udp.h>
52#include <linux/netdevice.h>
53#ifdef CONFIG_NET_CLS_ACT
54#include <net/pkt_sched.h>
55#endif
56#include <linux/string.h>
57#include <linux/skbuff.h>
58#include <linux/splice.h>
59#include <linux/cache.h>
60#include <linux/rtnetlink.h>
61#include <linux/init.h>
62#include <linux/scatterlist.h>
63#include <linux/errqueue.h>
64#include <linux/prefetch.h>
65
66#include <net/protocol.h>
67#include <net/dst.h>
68#include <net/sock.h>
69#include <net/checksum.h>
70#include <net/ip6_checksum.h>
71#include <net/xfrm.h>
72
73#include <asm/uaccess.h>
74#include <trace/events/skb.h>
75#include <linux/highmem.h>
76
77struct kmem_cache *skbuff_head_cache __read_mostly;
78static struct kmem_cache *skbuff_fclone_cache __read_mostly;
79
80/**
81 * skb_panic - private function for out-of-line support
82 * @skb: buffer
83 * @sz: size
84 * @addr: address
85 * @msg: skb_over_panic or skb_under_panic
86 *
87 * Out-of-line support for skb_put() and skb_push().
88 * Called via the wrapper skb_over_panic() or skb_under_panic().
89 * Keep out of line to prevent kernel bloat.
90 * __builtin_return_address is not used because it is not always reliable.
91 */
92static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
93 const char msg[])
94{
95 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
96 msg, addr, skb->len, sz, skb->head, skb->data,
97 (unsigned long)skb->tail, (unsigned long)skb->end,
98 skb->dev ? skb->dev->name : "<NULL>");
99 BUG();
100}
101
102static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
103{
104 skb_panic(skb, sz, addr, __func__);
105}
106
107static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
108{
109 skb_panic(skb, sz, addr, __func__);
110}
111
112/*
113 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
114 * the caller if emergency pfmemalloc reserves are being used. If it is and
115 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
116 * may be used. Otherwise, the packet data may be discarded until enough
117 * memory is free
118 */
119#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
120 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
121
122static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
123 unsigned long ip, bool *pfmemalloc)
124{
125 void *obj;
126 bool ret_pfmemalloc = false;
127
128 /*
129 * Try a regular allocation, when that fails and we're not entitled
130 * to the reserves, fail.
131 */
132 obj = kmalloc_node_track_caller(size,
133 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
134 node);
135 if (obj || !(gfp_pfmemalloc_allowed(flags)))
136 goto out;
137
138 /* Try again but now we are using pfmemalloc reserves */
139 ret_pfmemalloc = true;
140 obj = kmalloc_node_track_caller(size, flags, node);
141
142out:
143 if (pfmemalloc)
144 *pfmemalloc = ret_pfmemalloc;
145
146 return obj;
147}
148
149/* Allocate a new skbuff. We do this ourselves so we can fill in a few
150 * 'private' fields and also do memory statistics to find all the
151 * [BEEP] leaks.
152 *
153 */
154
155struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
156{
157 struct sk_buff *skb;
158
159 /* Get the HEAD */
160 skb = kmem_cache_alloc_node(skbuff_head_cache,
161 gfp_mask & ~__GFP_DMA, node);
162 if (!skb)
163 goto out;
164
165 /*
166 * Only clear those fields we need to clear, not those that we will
167 * actually initialise below. Hence, don't put any more fields after
168 * the tail pointer in struct sk_buff!
169 */
170 memset(skb, 0, offsetof(struct sk_buff, tail));
171 skb->head = NULL;
172 skb->truesize = sizeof(struct sk_buff);
173 atomic_set(&skb->users, 1);
174
175 skb->mac_header = (typeof(skb->mac_header))~0U;
176out:
177 return skb;
178}
179
180/**
181 * __alloc_skb - allocate a network buffer
182 * @size: size to allocate
183 * @gfp_mask: allocation mask
184 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
185 * instead of head cache and allocate a cloned (child) skb.
186 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
187 * allocations in case the data is required for writeback
188 * @node: numa node to allocate memory on
189 *
190 * Allocate a new &sk_buff. The returned buffer has no headroom and a
191 * tail room of at least size bytes. The object has a reference count
192 * of one. The return is the buffer. On a failure the return is %NULL.
193 *
194 * Buffers may only be allocated from interrupts using a @gfp_mask of
195 * %GFP_ATOMIC.
196 */
197struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
198 int flags, int node)
199{
200 struct kmem_cache *cache;
201 struct skb_shared_info *shinfo;
202 struct sk_buff *skb;
203 u8 *data;
204 bool pfmemalloc;
205
206 cache = (flags & SKB_ALLOC_FCLONE)
207 ? skbuff_fclone_cache : skbuff_head_cache;
208
209 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
210 gfp_mask |= __GFP_MEMALLOC;
211
212 /* Get the HEAD */
213 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
214 if (!skb)
215 goto out;
216 prefetchw(skb);
217
218 /* We do our best to align skb_shared_info on a separate cache
219 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
220 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
221 * Both skb->head and skb_shared_info are cache line aligned.
222 */
223 size = SKB_DATA_ALIGN(size);
224 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
225 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
226 if (!data)
227 goto nodata;
228 /* kmalloc(size) might give us more room than requested.
229 * Put skb_shared_info exactly at the end of allocated zone,
230 * to allow max possible filling before reallocation.
231 */
232 size = SKB_WITH_OVERHEAD(ksize(data));
233 prefetchw(data + size);
234
235 /*
236 * Only clear those fields we need to clear, not those that we will
237 * actually initialise below. Hence, don't put any more fields after
238 * the tail pointer in struct sk_buff!
239 */
240 memset(skb, 0, offsetof(struct sk_buff, tail));
241 /* Account for allocated memory : skb + skb->head */
242 skb->truesize = SKB_TRUESIZE(size);
243 skb->pfmemalloc = pfmemalloc;
244 atomic_set(&skb->users, 1);
245 skb->head = data;
246 skb->data = data;
247 skb_reset_tail_pointer(skb);
248 skb->end = skb->tail + size;
249 skb->mac_header = (typeof(skb->mac_header))~0U;
250 skb->transport_header = (typeof(skb->transport_header))~0U;
251
252 /* make sure we initialize shinfo sequentially */
253 shinfo = skb_shinfo(skb);
254 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
255 atomic_set(&shinfo->dataref, 1);
256 kmemcheck_annotate_variable(shinfo->destructor_arg);
257
258 if (flags & SKB_ALLOC_FCLONE) {
259 struct sk_buff *child = skb + 1;
260 atomic_t *fclone_ref = (atomic_t *) (child + 1);
261
262 kmemcheck_annotate_bitfield(child, flags1);
263 kmemcheck_annotate_bitfield(child, flags2);
264 skb->fclone = SKB_FCLONE_ORIG;
265 atomic_set(fclone_ref, 1);
266
267 child->fclone = SKB_FCLONE_UNAVAILABLE;
268 child->pfmemalloc = pfmemalloc;
269 }
270out:
271 return skb;
272nodata:
273 kmem_cache_free(cache, skb);
274 skb = NULL;
275 goto out;
276}
277EXPORT_SYMBOL(__alloc_skb);
278
279/**
280 * build_skb - build a network buffer
281 * @data: data buffer provided by caller
282 * @frag_size: size of fragment, or 0 if head was kmalloced
283 *
284 * Allocate a new &sk_buff. Caller provides space holding head and
285 * skb_shared_info. @data must have been allocated by kmalloc() only if
286 * @frag_size is 0, otherwise data should come from the page allocator.
287 * The return is the new skb buffer.
288 * On a failure the return is %NULL, and @data is not freed.
289 * Notes :
290 * Before IO, driver allocates only data buffer where NIC put incoming frame
291 * Driver should add room at head (NET_SKB_PAD) and
292 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
293 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
294 * before giving packet to stack.
295 * RX rings only contains data buffers, not full skbs.
296 */
297struct sk_buff *build_skb(void *data, unsigned int frag_size)
298{
299 struct skb_shared_info *shinfo;
300 struct sk_buff *skb;
301 unsigned int size = frag_size ? : ksize(data);
302
303 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
304 if (!skb)
305 return NULL;
306
307 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
308
309 memset(skb, 0, offsetof(struct sk_buff, tail));
310 skb->truesize = SKB_TRUESIZE(size);
311 skb->head_frag = frag_size != 0;
312 atomic_set(&skb->users, 1);
313 skb->head = data;
314 skb->data = data;
315 skb_reset_tail_pointer(skb);
316 skb->end = skb->tail + size;
317 skb->mac_header = (typeof(skb->mac_header))~0U;
318 skb->transport_header = (typeof(skb->transport_header))~0U;
319
320 /* make sure we initialize shinfo sequentially */
321 shinfo = skb_shinfo(skb);
322 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
323 atomic_set(&shinfo->dataref, 1);
324 kmemcheck_annotate_variable(shinfo->destructor_arg);
325
326 return skb;
327}
328EXPORT_SYMBOL(build_skb);
329
330struct netdev_alloc_cache {
331 struct page_frag frag;
332 /* we maintain a pagecount bias, so that we dont dirty cache line
333 * containing page->_count every time we allocate a fragment.
334 */
335 unsigned int pagecnt_bias;
336};
337static DEFINE_PER_CPU(struct netdev_alloc_cache, netdev_alloc_cache);
338
339static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
340{
341 struct netdev_alloc_cache *nc;
342 void *data = NULL;
343 int order;
344 unsigned long flags;
345
346 local_irq_save(flags);
347 nc = &__get_cpu_var(netdev_alloc_cache);
348 if (unlikely(!nc->frag.page)) {
349refill:
350 for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
351 gfp_t gfp = gfp_mask;
352
353 if (order)
354 gfp |= __GFP_COMP | __GFP_NOWARN;
355 nc->frag.page = alloc_pages(gfp, order);
356 if (likely(nc->frag.page))
357 break;
358 if (--order < 0)
359 goto end;
360 }
361 nc->frag.size = PAGE_SIZE << order;
362recycle:
363 atomic_set(&nc->frag.page->_count, NETDEV_PAGECNT_MAX_BIAS);
364 nc->pagecnt_bias = NETDEV_PAGECNT_MAX_BIAS;
365 nc->frag.offset = 0;
366 }
367
368 if (nc->frag.offset + fragsz > nc->frag.size) {
369 /* avoid unnecessary locked operations if possible */
370 if ((atomic_read(&nc->frag.page->_count) == nc->pagecnt_bias) ||
371 atomic_sub_and_test(nc->pagecnt_bias, &nc->frag.page->_count))
372 goto recycle;
373 goto refill;
374 }
375
376 data = page_address(nc->frag.page) + nc->frag.offset;
377 nc->frag.offset += fragsz;
378 nc->pagecnt_bias--;
379end:
380 local_irq_restore(flags);
381 return data;
382}
383
384/**
385 * netdev_alloc_frag - allocate a page fragment
386 * @fragsz: fragment size
387 *
388 * Allocates a frag from a page for receive buffer.
389 * Uses GFP_ATOMIC allocations.
390 */
391void *netdev_alloc_frag(unsigned int fragsz)
392{
393 return __netdev_alloc_frag(fragsz, GFP_ATOMIC | __GFP_COLD);
394}
395EXPORT_SYMBOL(netdev_alloc_frag);
396
397/**
398 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
399 * @dev: network device to receive on
400 * @length: length to allocate
401 * @gfp_mask: get_free_pages mask, passed to alloc_skb
402 *
403 * Allocate a new &sk_buff and assign it a usage count of one. The
404 * buffer has unspecified headroom built in. Users should allocate
405 * the headroom they think they need without accounting for the
406 * built in space. The built in space is used for optimisations.
407 *
408 * %NULL is returned if there is no free memory.
409 */
410struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
411 unsigned int length, gfp_t gfp_mask)
412{
413 struct sk_buff *skb = NULL;
414 unsigned int fragsz = SKB_DATA_ALIGN(length + NET_SKB_PAD) +
415 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
416
417 if (fragsz <= PAGE_SIZE && !(gfp_mask & (__GFP_WAIT | GFP_DMA))) {
418 void *data;
419
420 if (sk_memalloc_socks())
421 gfp_mask |= __GFP_MEMALLOC;
422
423 data = __netdev_alloc_frag(fragsz, gfp_mask);
424
425 if (likely(data)) {
426 skb = build_skb(data, fragsz);
427 if (unlikely(!skb))
428 put_page(virt_to_head_page(data));
429 }
430 } else {
431 skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask,
432 SKB_ALLOC_RX, NUMA_NO_NODE);
433 }
434 if (likely(skb)) {
435 skb_reserve(skb, NET_SKB_PAD);
436 skb->dev = dev;
437 }
438 return skb;
439}
440EXPORT_SYMBOL(__netdev_alloc_skb);
441
442void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
443 int size, unsigned int truesize)
444{
445 skb_fill_page_desc(skb, i, page, off, size);
446 skb->len += size;
447 skb->data_len += size;
448 skb->truesize += truesize;
449}
450EXPORT_SYMBOL(skb_add_rx_frag);
451
452void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
453 unsigned int truesize)
454{
455 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
456
457 skb_frag_size_add(frag, size);
458 skb->len += size;
459 skb->data_len += size;
460 skb->truesize += truesize;
461}
462EXPORT_SYMBOL(skb_coalesce_rx_frag);
463
464static void skb_drop_list(struct sk_buff **listp)
465{
466 kfree_skb_list(*listp);
467 *listp = NULL;
468}
469
470static inline void skb_drop_fraglist(struct sk_buff *skb)
471{
472 skb_drop_list(&skb_shinfo(skb)->frag_list);
473}
474
475static void skb_clone_fraglist(struct sk_buff *skb)
476{
477 struct sk_buff *list;
478
479 skb_walk_frags(skb, list)
480 skb_get(list);
481}
482
483static void skb_free_head(struct sk_buff *skb)
484{
485 if (skb->head_frag)
486 put_page(virt_to_head_page(skb->head));
487 else
488 kfree(skb->head);
489}
490
491static void skb_release_data(struct sk_buff *skb)
492{
493 if (!skb->cloned ||
494 !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
495 &skb_shinfo(skb)->dataref)) {
496 if (skb_shinfo(skb)->nr_frags) {
497 int i;
498 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
499 skb_frag_unref(skb, i);
500 }
501
502 /*
503 * If skb buf is from userspace, we need to notify the caller
504 * the lower device DMA has done;
505 */
506 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
507 struct ubuf_info *uarg;
508
509 uarg = skb_shinfo(skb)->destructor_arg;
510 if (uarg->callback)
511 uarg->callback(uarg, true);
512 }
513
514 if (skb_has_frag_list(skb))
515 skb_drop_fraglist(skb);
516
517 skb_free_head(skb);
518 }
519}
520
521/*
522 * Free an skbuff by memory without cleaning the state.
523 */
524static void kfree_skbmem(struct sk_buff *skb)
525{
526 struct sk_buff *other;
527 atomic_t *fclone_ref;
528
529 switch (skb->fclone) {
530 case SKB_FCLONE_UNAVAILABLE:
531 kmem_cache_free(skbuff_head_cache, skb);
532 break;
533
534 case SKB_FCLONE_ORIG:
535 fclone_ref = (atomic_t *) (skb + 2);
536 if (atomic_dec_and_test(fclone_ref))
537 kmem_cache_free(skbuff_fclone_cache, skb);
538 break;
539
540 case SKB_FCLONE_CLONE:
541 fclone_ref = (atomic_t *) (skb + 1);
542 other = skb - 1;
543
544 /* The clone portion is available for
545 * fast-cloning again.
546 */
547 skb->fclone = SKB_FCLONE_UNAVAILABLE;
548
549 if (atomic_dec_and_test(fclone_ref))
550 kmem_cache_free(skbuff_fclone_cache, other);
551 break;
552 }
553}
554
555static void skb_release_head_state(struct sk_buff *skb)
556{
557 skb_dst_drop(skb);
558#ifdef CONFIG_XFRM
559 secpath_put(skb->sp);
560#endif
561 if (skb->destructor) {
562 WARN_ON(in_irq());
563 skb->destructor(skb);
564 }
565#if IS_ENABLED(CONFIG_NF_CONNTRACK)
566 nf_conntrack_put(skb->nfct);
567#endif
568#ifdef CONFIG_BRIDGE_NETFILTER
569 nf_bridge_put(skb->nf_bridge);
570#endif
571/* XXX: IS this still necessary? - JHS */
572#ifdef CONFIG_NET_SCHED
573 skb->tc_index = 0;
574#ifdef CONFIG_NET_CLS_ACT
575 skb->tc_verd = 0;
576#endif
577#endif
578}
579
580/* Free everything but the sk_buff shell. */
581static void skb_release_all(struct sk_buff *skb)
582{
583 skb_release_head_state(skb);
584 if (likely(skb->head))
585 skb_release_data(skb);
586}
587
588/**
589 * __kfree_skb - private function
590 * @skb: buffer
591 *
592 * Free an sk_buff. Release anything attached to the buffer.
593 * Clean the state. This is an internal helper function. Users should
594 * always call kfree_skb
595 */
596
597void __kfree_skb(struct sk_buff *skb)
598{
599 skb_release_all(skb);
600 kfree_skbmem(skb);
601}
602EXPORT_SYMBOL(__kfree_skb);
603
604/**
605 * kfree_skb - free an sk_buff
606 * @skb: buffer to free
607 *
608 * Drop a reference to the buffer and free it if the usage count has
609 * hit zero.
610 */
611void kfree_skb(struct sk_buff *skb)
612{
613 if (unlikely(!skb))
614 return;
615 if (likely(atomic_read(&skb->users) == 1))
616 smp_rmb();
617 else if (likely(!atomic_dec_and_test(&skb->users)))
618 return;
619 trace_kfree_skb(skb, __builtin_return_address(0));
620 __kfree_skb(skb);
621}
622EXPORT_SYMBOL(kfree_skb);
623
624void kfree_skb_list(struct sk_buff *segs)
625{
626 while (segs) {
627 struct sk_buff *next = segs->next;
628
629 kfree_skb(segs);
630 segs = next;
631 }
632}
633EXPORT_SYMBOL(kfree_skb_list);
634
635/**
636 * skb_tx_error - report an sk_buff xmit error
637 * @skb: buffer that triggered an error
638 *
639 * Report xmit error if a device callback is tracking this skb.
640 * skb must be freed afterwards.
641 */
642void skb_tx_error(struct sk_buff *skb)
643{
644 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
645 struct ubuf_info *uarg;
646
647 uarg = skb_shinfo(skb)->destructor_arg;
648 if (uarg->callback)
649 uarg->callback(uarg, false);
650 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
651 }
652}
653EXPORT_SYMBOL(skb_tx_error);
654
655/**
656 * consume_skb - free an skbuff
657 * @skb: buffer to free
658 *
659 * Drop a ref to the buffer and free it if the usage count has hit zero
660 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
661 * is being dropped after a failure and notes that
662 */
663void consume_skb(struct sk_buff *skb)
664{
665 if (unlikely(!skb))
666 return;
667 if (likely(atomic_read(&skb->users) == 1))
668 smp_rmb();
669 else if (likely(!atomic_dec_and_test(&skb->users)))
670 return;
671 trace_consume_skb(skb);
672 __kfree_skb(skb);
673}
674EXPORT_SYMBOL(consume_skb);
675
676static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
677{
678 new->tstamp = old->tstamp;
679 new->dev = old->dev;
680 new->transport_header = old->transport_header;
681 new->network_header = old->network_header;
682 new->mac_header = old->mac_header;
683 new->inner_protocol = old->inner_protocol;
684 new->inner_transport_header = old->inner_transport_header;
685 new->inner_network_header = old->inner_network_header;
686 new->inner_mac_header = old->inner_mac_header;
687 skb_dst_copy(new, old);
688 skb_copy_hash(new, old);
689 new->ooo_okay = old->ooo_okay;
690 new->no_fcs = old->no_fcs;
691 new->encapsulation = old->encapsulation;
692#ifdef CONFIG_XFRM
693 new->sp = secpath_get(old->sp);
694#endif
695 memcpy(new->cb, old->cb, sizeof(old->cb));
696 new->csum = old->csum;
697 new->local_df = old->local_df;
698 new->pkt_type = old->pkt_type;
699 new->ip_summed = old->ip_summed;
700 skb_copy_queue_mapping(new, old);
701 new->priority = old->priority;
702#if IS_ENABLED(CONFIG_IP_VS)
703 new->ipvs_property = old->ipvs_property;
704#endif
705 new->pfmemalloc = old->pfmemalloc;
706 new->protocol = old->protocol;
707 new->mark = old->mark;
708 new->skb_iif = old->skb_iif;
709 __nf_copy(new, old);
710#ifdef CONFIG_NET_SCHED
711 new->tc_index = old->tc_index;
712#ifdef CONFIG_NET_CLS_ACT
713 new->tc_verd = old->tc_verd;
714#endif
715#endif
716 new->vlan_proto = old->vlan_proto;
717 new->vlan_tci = old->vlan_tci;
718
719 skb_copy_secmark(new, old);
720
721#ifdef CONFIG_NET_RX_BUSY_POLL
722 new->napi_id = old->napi_id;
723#endif
724}
725
726/*
727 * You should not add any new code to this function. Add it to
728 * __copy_skb_header above instead.
729 */
730static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
731{
732#define C(x) n->x = skb->x
733
734 n->next = n->prev = NULL;
735 n->sk = NULL;
736 __copy_skb_header(n, skb);
737
738 C(len);
739 C(data_len);
740 C(mac_len);
741 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
742 n->cloned = 1;
743 n->nohdr = 0;
744 n->destructor = NULL;
745 C(tail);
746 C(end);
747 C(head);
748 C(head_frag);
749 C(data);
750 C(truesize);
751 atomic_set(&n->users, 1);
752
753 atomic_inc(&(skb_shinfo(skb)->dataref));
754 skb->cloned = 1;
755
756 return n;
757#undef C
758}
759
760/**
761 * skb_morph - morph one skb into another
762 * @dst: the skb to receive the contents
763 * @src: the skb to supply the contents
764 *
765 * This is identical to skb_clone except that the target skb is
766 * supplied by the user.
767 *
768 * The target skb is returned upon exit.
769 */
770struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
771{
772 skb_release_all(dst);
773 return __skb_clone(dst, src);
774}
775EXPORT_SYMBOL_GPL(skb_morph);
776
777/**
778 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
779 * @skb: the skb to modify
780 * @gfp_mask: allocation priority
781 *
782 * This must be called on SKBTX_DEV_ZEROCOPY skb.
783 * It will copy all frags into kernel and drop the reference
784 * to userspace pages.
785 *
786 * If this function is called from an interrupt gfp_mask() must be
787 * %GFP_ATOMIC.
788 *
789 * Returns 0 on success or a negative error code on failure
790 * to allocate kernel memory to copy to.
791 */
792int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
793{
794 int i;
795 int num_frags = skb_shinfo(skb)->nr_frags;
796 struct page *page, *head = NULL;
797 struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
798
799 for (i = 0; i < num_frags; i++) {
800 u8 *vaddr;
801 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
802
803 page = alloc_page(gfp_mask);
804 if (!page) {
805 while (head) {
806 struct page *next = (struct page *)page_private(head);
807 put_page(head);
808 head = next;
809 }
810 return -ENOMEM;
811 }
812 vaddr = kmap_atomic(skb_frag_page(f));
813 memcpy(page_address(page),
814 vaddr + f->page_offset, skb_frag_size(f));
815 kunmap_atomic(vaddr);
816 set_page_private(page, (unsigned long)head);
817 head = page;
818 }
819
820 /* skb frags release userspace buffers */
821 for (i = 0; i < num_frags; i++)
822 skb_frag_unref(skb, i);
823
824 uarg->callback(uarg, false);
825
826 /* skb frags point to kernel buffers */
827 for (i = num_frags - 1; i >= 0; i--) {
828 __skb_fill_page_desc(skb, i, head, 0,
829 skb_shinfo(skb)->frags[i].size);
830 head = (struct page *)page_private(head);
831 }
832
833 skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
834 return 0;
835}
836EXPORT_SYMBOL_GPL(skb_copy_ubufs);
837
838/**
839 * skb_clone - duplicate an sk_buff
840 * @skb: buffer to clone
841 * @gfp_mask: allocation priority
842 *
843 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
844 * copies share the same packet data but not structure. The new
845 * buffer has a reference count of 1. If the allocation fails the
846 * function returns %NULL otherwise the new buffer is returned.
847 *
848 * If this function is called from an interrupt gfp_mask() must be
849 * %GFP_ATOMIC.
850 */
851
852struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
853{
854 struct sk_buff *n;
855
856 if (skb_orphan_frags(skb, gfp_mask))
857 return NULL;
858
859 n = skb + 1;
860 if (skb->fclone == SKB_FCLONE_ORIG &&
861 n->fclone == SKB_FCLONE_UNAVAILABLE) {
862 atomic_t *fclone_ref = (atomic_t *) (n + 1);
863 n->fclone = SKB_FCLONE_CLONE;
864 atomic_inc(fclone_ref);
865 } else {
866 if (skb_pfmemalloc(skb))
867 gfp_mask |= __GFP_MEMALLOC;
868
869 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
870 if (!n)
871 return NULL;
872
873 kmemcheck_annotate_bitfield(n, flags1);
874 kmemcheck_annotate_bitfield(n, flags2);
875 n->fclone = SKB_FCLONE_UNAVAILABLE;
876 }
877
878 return __skb_clone(n, skb);
879}
880EXPORT_SYMBOL(skb_clone);
881
882static void skb_headers_offset_update(struct sk_buff *skb, int off)
883{
884 /* Only adjust this if it actually is csum_start rather than csum */
885 if (skb->ip_summed == CHECKSUM_PARTIAL)
886 skb->csum_start += off;
887 /* {transport,network,mac}_header and tail are relative to skb->head */
888 skb->transport_header += off;
889 skb->network_header += off;
890 if (skb_mac_header_was_set(skb))
891 skb->mac_header += off;
892 skb->inner_transport_header += off;
893 skb->inner_network_header += off;
894 skb->inner_mac_header += off;
895}
896
897static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
898{
899 __copy_skb_header(new, old);
900
901 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
902 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
903 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
904}
905
906static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
907{
908 if (skb_pfmemalloc(skb))
909 return SKB_ALLOC_RX;
910 return 0;
911}
912
913/**
914 * skb_copy - create private copy of an sk_buff
915 * @skb: buffer to copy
916 * @gfp_mask: allocation priority
917 *
918 * Make a copy of both an &sk_buff and its data. This is used when the
919 * caller wishes to modify the data and needs a private copy of the
920 * data to alter. Returns %NULL on failure or the pointer to the buffer
921 * on success. The returned buffer has a reference count of 1.
922 *
923 * As by-product this function converts non-linear &sk_buff to linear
924 * one, so that &sk_buff becomes completely private and caller is allowed
925 * to modify all the data of returned buffer. This means that this
926 * function is not recommended for use in circumstances when only
927 * header is going to be modified. Use pskb_copy() instead.
928 */
929
930struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
931{
932 int headerlen = skb_headroom(skb);
933 unsigned int size = skb_end_offset(skb) + skb->data_len;
934 struct sk_buff *n = __alloc_skb(size, gfp_mask,
935 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
936
937 if (!n)
938 return NULL;
939
940 /* Set the data pointer */
941 skb_reserve(n, headerlen);
942 /* Set the tail pointer and length */
943 skb_put(n, skb->len);
944
945 if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
946 BUG();
947
948 copy_skb_header(n, skb);
949 return n;
950}
951EXPORT_SYMBOL(skb_copy);
952
953/**
954 * __pskb_copy - create copy of an sk_buff with private head.
955 * @skb: buffer to copy
956 * @headroom: headroom of new skb
957 * @gfp_mask: allocation priority
958 *
959 * Make a copy of both an &sk_buff and part of its data, located
960 * in header. Fragmented data remain shared. This is used when
961 * the caller wishes to modify only header of &sk_buff and needs
962 * private copy of the header to alter. Returns %NULL on failure
963 * or the pointer to the buffer on success.
964 * The returned buffer has a reference count of 1.
965 */
966
967struct sk_buff *__pskb_copy(struct sk_buff *skb, int headroom, gfp_t gfp_mask)
968{
969 unsigned int size = skb_headlen(skb) + headroom;
970 struct sk_buff *n = __alloc_skb(size, gfp_mask,
971 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
972
973 if (!n)
974 goto out;
975
976 /* Set the data pointer */
977 skb_reserve(n, headroom);
978 /* Set the tail pointer and length */
979 skb_put(n, skb_headlen(skb));
980 /* Copy the bytes */
981 skb_copy_from_linear_data(skb, n->data, n->len);
982
983 n->truesize += skb->data_len;
984 n->data_len = skb->data_len;
985 n->len = skb->len;
986
987 if (skb_shinfo(skb)->nr_frags) {
988 int i;
989
990 if (skb_orphan_frags(skb, gfp_mask)) {
991 kfree_skb(n);
992 n = NULL;
993 goto out;
994 }
995 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
996 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
997 skb_frag_ref(skb, i);
998 }
999 skb_shinfo(n)->nr_frags = i;
1000 }
1001
1002 if (skb_has_frag_list(skb)) {
1003 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1004 skb_clone_fraglist(n);
1005 }
1006
1007 copy_skb_header(n, skb);
1008out:
1009 return n;
1010}
1011EXPORT_SYMBOL(__pskb_copy);
1012
1013/**
1014 * pskb_expand_head - reallocate header of &sk_buff
1015 * @skb: buffer to reallocate
1016 * @nhead: room to add at head
1017 * @ntail: room to add at tail
1018 * @gfp_mask: allocation priority
1019 *
1020 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1021 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1022 * reference count of 1. Returns zero in the case of success or error,
1023 * if expansion failed. In the last case, &sk_buff is not changed.
1024 *
1025 * All the pointers pointing into skb header may change and must be
1026 * reloaded after call to this function.
1027 */
1028
1029int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1030 gfp_t gfp_mask)
1031{
1032 int i;
1033 u8 *data;
1034 int size = nhead + skb_end_offset(skb) + ntail;
1035 long off;
1036
1037 BUG_ON(nhead < 0);
1038
1039 if (skb_shared(skb))
1040 BUG();
1041
1042 size = SKB_DATA_ALIGN(size);
1043
1044 if (skb_pfmemalloc(skb))
1045 gfp_mask |= __GFP_MEMALLOC;
1046 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1047 gfp_mask, NUMA_NO_NODE, NULL);
1048 if (!data)
1049 goto nodata;
1050 size = SKB_WITH_OVERHEAD(ksize(data));
1051
1052 /* Copy only real data... and, alas, header. This should be
1053 * optimized for the cases when header is void.
1054 */
1055 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1056
1057 memcpy((struct skb_shared_info *)(data + size),
1058 skb_shinfo(skb),
1059 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1060
1061 /*
1062 * if shinfo is shared we must drop the old head gracefully, but if it
1063 * is not we can just drop the old head and let the existing refcount
1064 * be since all we did is relocate the values
1065 */
1066 if (skb_cloned(skb)) {
1067 /* copy this zero copy skb frags */
1068 if (skb_orphan_frags(skb, gfp_mask))
1069 goto nofrags;
1070 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1071 skb_frag_ref(skb, i);
1072
1073 if (skb_has_frag_list(skb))
1074 skb_clone_fraglist(skb);
1075
1076 skb_release_data(skb);
1077 } else {
1078 skb_free_head(skb);
1079 }
1080 off = (data + nhead) - skb->head;
1081
1082 skb->head = data;
1083 skb->head_frag = 0;
1084 skb->data += off;
1085#ifdef NET_SKBUFF_DATA_USES_OFFSET
1086 skb->end = size;
1087 off = nhead;
1088#else
1089 skb->end = skb->head + size;
1090#endif
1091 skb->tail += off;
1092 skb_headers_offset_update(skb, nhead);
1093 skb->cloned = 0;
1094 skb->hdr_len = 0;
1095 skb->nohdr = 0;
1096 atomic_set(&skb_shinfo(skb)->dataref, 1);
1097 return 0;
1098
1099nofrags:
1100 kfree(data);
1101nodata:
1102 return -ENOMEM;
1103}
1104EXPORT_SYMBOL(pskb_expand_head);
1105
1106/* Make private copy of skb with writable head and some headroom */
1107
1108struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1109{
1110 struct sk_buff *skb2;
1111 int delta = headroom - skb_headroom(skb);
1112
1113 if (delta <= 0)
1114 skb2 = pskb_copy(skb, GFP_ATOMIC);
1115 else {
1116 skb2 = skb_clone(skb, GFP_ATOMIC);
1117 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1118 GFP_ATOMIC)) {
1119 kfree_skb(skb2);
1120 skb2 = NULL;
1121 }
1122 }
1123 return skb2;
1124}
1125EXPORT_SYMBOL(skb_realloc_headroom);
1126
1127/**
1128 * skb_copy_expand - copy and expand sk_buff
1129 * @skb: buffer to copy
1130 * @newheadroom: new free bytes at head
1131 * @newtailroom: new free bytes at tail
1132 * @gfp_mask: allocation priority
1133 *
1134 * Make a copy of both an &sk_buff and its data and while doing so
1135 * allocate additional space.
1136 *
1137 * This is used when the caller wishes to modify the data and needs a
1138 * private copy of the data to alter as well as more space for new fields.
1139 * Returns %NULL on failure or the pointer to the buffer
1140 * on success. The returned buffer has a reference count of 1.
1141 *
1142 * You must pass %GFP_ATOMIC as the allocation priority if this function
1143 * is called from an interrupt.
1144 */
1145struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1146 int newheadroom, int newtailroom,
1147 gfp_t gfp_mask)
1148{
1149 /*
1150 * Allocate the copy buffer
1151 */
1152 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1153 gfp_mask, skb_alloc_rx_flag(skb),
1154 NUMA_NO_NODE);
1155 int oldheadroom = skb_headroom(skb);
1156 int head_copy_len, head_copy_off;
1157
1158 if (!n)
1159 return NULL;
1160
1161 skb_reserve(n, newheadroom);
1162
1163 /* Set the tail pointer and length */
1164 skb_put(n, skb->len);
1165
1166 head_copy_len = oldheadroom;
1167 head_copy_off = 0;
1168 if (newheadroom <= head_copy_len)
1169 head_copy_len = newheadroom;
1170 else
1171 head_copy_off = newheadroom - head_copy_len;
1172
1173 /* Copy the linear header and data. */
1174 if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1175 skb->len + head_copy_len))
1176 BUG();
1177
1178 copy_skb_header(n, skb);
1179
1180 skb_headers_offset_update(n, newheadroom - oldheadroom);
1181
1182 return n;
1183}
1184EXPORT_SYMBOL(skb_copy_expand);
1185
1186/**
1187 * skb_pad - zero pad the tail of an skb
1188 * @skb: buffer to pad
1189 * @pad: space to pad
1190 *
1191 * Ensure that a buffer is followed by a padding area that is zero
1192 * filled. Used by network drivers which may DMA or transfer data
1193 * beyond the buffer end onto the wire.
1194 *
1195 * May return error in out of memory cases. The skb is freed on error.
1196 */
1197
1198int skb_pad(struct sk_buff *skb, int pad)
1199{
1200 int err;
1201 int ntail;
1202
1203 /* If the skbuff is non linear tailroom is always zero.. */
1204 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1205 memset(skb->data+skb->len, 0, pad);
1206 return 0;
1207 }
1208
1209 ntail = skb->data_len + pad - (skb->end - skb->tail);
1210 if (likely(skb_cloned(skb) || ntail > 0)) {
1211 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1212 if (unlikely(err))
1213 goto free_skb;
1214 }
1215
1216 /* FIXME: The use of this function with non-linear skb's really needs
1217 * to be audited.
1218 */
1219 err = skb_linearize(skb);
1220 if (unlikely(err))
1221 goto free_skb;
1222
1223 memset(skb->data + skb->len, 0, pad);
1224 return 0;
1225
1226free_skb:
1227 kfree_skb(skb);
1228 return err;
1229}
1230EXPORT_SYMBOL(skb_pad);
1231
1232/**
1233 * pskb_put - add data to the tail of a potentially fragmented buffer
1234 * @skb: start of the buffer to use
1235 * @tail: tail fragment of the buffer to use
1236 * @len: amount of data to add
1237 *
1238 * This function extends the used data area of the potentially
1239 * fragmented buffer. @tail must be the last fragment of @skb -- or
1240 * @skb itself. If this would exceed the total buffer size the kernel
1241 * will panic. A pointer to the first byte of the extra data is
1242 * returned.
1243 */
1244
1245unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1246{
1247 if (tail != skb) {
1248 skb->data_len += len;
1249 skb->len += len;
1250 }
1251 return skb_put(tail, len);
1252}
1253EXPORT_SYMBOL_GPL(pskb_put);
1254
1255/**
1256 * skb_put - add data to a buffer
1257 * @skb: buffer to use
1258 * @len: amount of data to add
1259 *
1260 * This function extends the used data area of the buffer. If this would
1261 * exceed the total buffer size the kernel will panic. A pointer to the
1262 * first byte of the extra data is returned.
1263 */
1264unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1265{
1266 unsigned char *tmp = skb_tail_pointer(skb);
1267 SKB_LINEAR_ASSERT(skb);
1268 skb->tail += len;
1269 skb->len += len;
1270 if (unlikely(skb->tail > skb->end))
1271 skb_over_panic(skb, len, __builtin_return_address(0));
1272 return tmp;
1273}
1274EXPORT_SYMBOL(skb_put);
1275
1276/**
1277 * skb_push - add data to the start of a buffer
1278 * @skb: buffer to use
1279 * @len: amount of data to add
1280 *
1281 * This function extends the used data area of the buffer at the buffer
1282 * start. If this would exceed the total buffer headroom the kernel will
1283 * panic. A pointer to the first byte of the extra data is returned.
1284 */
1285unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1286{
1287 skb->data -= len;
1288 skb->len += len;
1289 if (unlikely(skb->data<skb->head))
1290 skb_under_panic(skb, len, __builtin_return_address(0));
1291 return skb->data;
1292}
1293EXPORT_SYMBOL(skb_push);
1294
1295/**
1296 * skb_pull - remove data from the start of a buffer
1297 * @skb: buffer to use
1298 * @len: amount of data to remove
1299 *
1300 * This function removes data from the start of a buffer, returning
1301 * the memory to the headroom. A pointer to the next data in the buffer
1302 * is returned. Once the data has been pulled future pushes will overwrite
1303 * the old data.
1304 */
1305unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1306{
1307 return skb_pull_inline(skb, len);
1308}
1309EXPORT_SYMBOL(skb_pull);
1310
1311/**
1312 * skb_trim - remove end from a buffer
1313 * @skb: buffer to alter
1314 * @len: new length
1315 *
1316 * Cut the length of a buffer down by removing data from the tail. If
1317 * the buffer is already under the length specified it is not modified.
1318 * The skb must be linear.
1319 */
1320void skb_trim(struct sk_buff *skb, unsigned int len)
1321{
1322 if (skb->len > len)
1323 __skb_trim(skb, len);
1324}
1325EXPORT_SYMBOL(skb_trim);
1326
1327/* Trims skb to length len. It can change skb pointers.
1328 */
1329
1330int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1331{
1332 struct sk_buff **fragp;
1333 struct sk_buff *frag;
1334 int offset = skb_headlen(skb);
1335 int nfrags = skb_shinfo(skb)->nr_frags;
1336 int i;
1337 int err;
1338
1339 if (skb_cloned(skb) &&
1340 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1341 return err;
1342
1343 i = 0;
1344 if (offset >= len)
1345 goto drop_pages;
1346
1347 for (; i < nfrags; i++) {
1348 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1349
1350 if (end < len) {
1351 offset = end;
1352 continue;
1353 }
1354
1355 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1356
1357drop_pages:
1358 skb_shinfo(skb)->nr_frags = i;
1359
1360 for (; i < nfrags; i++)
1361 skb_frag_unref(skb, i);
1362
1363 if (skb_has_frag_list(skb))
1364 skb_drop_fraglist(skb);
1365 goto done;
1366 }
1367
1368 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1369 fragp = &frag->next) {
1370 int end = offset + frag->len;
1371
1372 if (skb_shared(frag)) {
1373 struct sk_buff *nfrag;
1374
1375 nfrag = skb_clone(frag, GFP_ATOMIC);
1376 if (unlikely(!nfrag))
1377 return -ENOMEM;
1378
1379 nfrag->next = frag->next;
1380 consume_skb(frag);
1381 frag = nfrag;
1382 *fragp = frag;
1383 }
1384
1385 if (end < len) {
1386 offset = end;
1387 continue;
1388 }
1389
1390 if (end > len &&
1391 unlikely((err = pskb_trim(frag, len - offset))))
1392 return err;
1393
1394 if (frag->next)
1395 skb_drop_list(&frag->next);
1396 break;
1397 }
1398
1399done:
1400 if (len > skb_headlen(skb)) {
1401 skb->data_len -= skb->len - len;
1402 skb->len = len;
1403 } else {
1404 skb->len = len;
1405 skb->data_len = 0;
1406 skb_set_tail_pointer(skb, len);
1407 }
1408
1409 return 0;
1410}
1411EXPORT_SYMBOL(___pskb_trim);
1412
1413/**
1414 * __pskb_pull_tail - advance tail of skb header
1415 * @skb: buffer to reallocate
1416 * @delta: number of bytes to advance tail
1417 *
1418 * The function makes a sense only on a fragmented &sk_buff,
1419 * it expands header moving its tail forward and copying necessary
1420 * data from fragmented part.
1421 *
1422 * &sk_buff MUST have reference count of 1.
1423 *
1424 * Returns %NULL (and &sk_buff does not change) if pull failed
1425 * or value of new tail of skb in the case of success.
1426 *
1427 * All the pointers pointing into skb header may change and must be
1428 * reloaded after call to this function.
1429 */
1430
1431/* Moves tail of skb head forward, copying data from fragmented part,
1432 * when it is necessary.
1433 * 1. It may fail due to malloc failure.
1434 * 2. It may change skb pointers.
1435 *
1436 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1437 */
1438unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1439{
1440 /* If skb has not enough free space at tail, get new one
1441 * plus 128 bytes for future expansions. If we have enough
1442 * room at tail, reallocate without expansion only if skb is cloned.
1443 */
1444 int i, k, eat = (skb->tail + delta) - skb->end;
1445
1446 if (eat > 0 || skb_cloned(skb)) {
1447 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1448 GFP_ATOMIC))
1449 return NULL;
1450 }
1451
1452 if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1453 BUG();
1454
1455 /* Optimization: no fragments, no reasons to preestimate
1456 * size of pulled pages. Superb.
1457 */
1458 if (!skb_has_frag_list(skb))
1459 goto pull_pages;
1460
1461 /* Estimate size of pulled pages. */
1462 eat = delta;
1463 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1464 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1465
1466 if (size >= eat)
1467 goto pull_pages;
1468 eat -= size;
1469 }
1470
1471 /* If we need update frag list, we are in troubles.
1472 * Certainly, it possible to add an offset to skb data,
1473 * but taking into account that pulling is expected to
1474 * be very rare operation, it is worth to fight against
1475 * further bloating skb head and crucify ourselves here instead.
1476 * Pure masohism, indeed. 8)8)
1477 */
1478 if (eat) {
1479 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1480 struct sk_buff *clone = NULL;
1481 struct sk_buff *insp = NULL;
1482
1483 do {
1484 BUG_ON(!list);
1485
1486 if (list->len <= eat) {
1487 /* Eaten as whole. */
1488 eat -= list->len;
1489 list = list->next;
1490 insp = list;
1491 } else {
1492 /* Eaten partially. */
1493
1494 if (skb_shared(list)) {
1495 /* Sucks! We need to fork list. :-( */
1496 clone = skb_clone(list, GFP_ATOMIC);
1497 if (!clone)
1498 return NULL;
1499 insp = list->next;
1500 list = clone;
1501 } else {
1502 /* This may be pulled without
1503 * problems. */
1504 insp = list;
1505 }
1506 if (!pskb_pull(list, eat)) {
1507 kfree_skb(clone);
1508 return NULL;
1509 }
1510 break;
1511 }
1512 } while (eat);
1513
1514 /* Free pulled out fragments. */
1515 while ((list = skb_shinfo(skb)->frag_list) != insp) {
1516 skb_shinfo(skb)->frag_list = list->next;
1517 kfree_skb(list);
1518 }
1519 /* And insert new clone at head. */
1520 if (clone) {
1521 clone->next = list;
1522 skb_shinfo(skb)->frag_list = clone;
1523 }
1524 }
1525 /* Success! Now we may commit changes to skb data. */
1526
1527pull_pages:
1528 eat = delta;
1529 k = 0;
1530 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1531 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1532
1533 if (size <= eat) {
1534 skb_frag_unref(skb, i);
1535 eat -= size;
1536 } else {
1537 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1538 if (eat) {
1539 skb_shinfo(skb)->frags[k].page_offset += eat;
1540 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1541 eat = 0;
1542 }
1543 k++;
1544 }
1545 }
1546 skb_shinfo(skb)->nr_frags = k;
1547
1548 skb->tail += delta;
1549 skb->data_len -= delta;
1550
1551 return skb_tail_pointer(skb);
1552}
1553EXPORT_SYMBOL(__pskb_pull_tail);
1554
1555/**
1556 * skb_copy_bits - copy bits from skb to kernel buffer
1557 * @skb: source skb
1558 * @offset: offset in source
1559 * @to: destination buffer
1560 * @len: number of bytes to copy
1561 *
1562 * Copy the specified number of bytes from the source skb to the
1563 * destination buffer.
1564 *
1565 * CAUTION ! :
1566 * If its prototype is ever changed,
1567 * check arch/{*}/net/{*}.S files,
1568 * since it is called from BPF assembly code.
1569 */
1570int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1571{
1572 int start = skb_headlen(skb);
1573 struct sk_buff *frag_iter;
1574 int i, copy;
1575
1576 if (offset > (int)skb->len - len)
1577 goto fault;
1578
1579 /* Copy header. */
1580 if ((copy = start - offset) > 0) {
1581 if (copy > len)
1582 copy = len;
1583 skb_copy_from_linear_data_offset(skb, offset, to, copy);
1584 if ((len -= copy) == 0)
1585 return 0;
1586 offset += copy;
1587 to += copy;
1588 }
1589
1590 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1591 int end;
1592 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1593
1594 WARN_ON(start > offset + len);
1595
1596 end = start + skb_frag_size(f);
1597 if ((copy = end - offset) > 0) {
1598 u8 *vaddr;
1599
1600 if (copy > len)
1601 copy = len;
1602
1603 vaddr = kmap_atomic(skb_frag_page(f));
1604 memcpy(to,
1605 vaddr + f->page_offset + offset - start,
1606 copy);
1607 kunmap_atomic(vaddr);
1608
1609 if ((len -= copy) == 0)
1610 return 0;
1611 offset += copy;
1612 to += copy;
1613 }
1614 start = end;
1615 }
1616
1617 skb_walk_frags(skb, frag_iter) {
1618 int end;
1619
1620 WARN_ON(start > offset + len);
1621
1622 end = start + frag_iter->len;
1623 if ((copy = end - offset) > 0) {
1624 if (copy > len)
1625 copy = len;
1626 if (skb_copy_bits(frag_iter, offset - start, to, copy))
1627 goto fault;
1628 if ((len -= copy) == 0)
1629 return 0;
1630 offset += copy;
1631 to += copy;
1632 }
1633 start = end;
1634 }
1635
1636 if (!len)
1637 return 0;
1638
1639fault:
1640 return -EFAULT;
1641}
1642EXPORT_SYMBOL(skb_copy_bits);
1643
1644/*
1645 * Callback from splice_to_pipe(), if we need to release some pages
1646 * at the end of the spd in case we error'ed out in filling the pipe.
1647 */
1648static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1649{
1650 put_page(spd->pages[i]);
1651}
1652
1653static struct page *linear_to_page(struct page *page, unsigned int *len,
1654 unsigned int *offset,
1655 struct sock *sk)
1656{
1657 struct page_frag *pfrag = sk_page_frag(sk);
1658
1659 if (!sk_page_frag_refill(sk, pfrag))
1660 return NULL;
1661
1662 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
1663
1664 memcpy(page_address(pfrag->page) + pfrag->offset,
1665 page_address(page) + *offset, *len);
1666 *offset = pfrag->offset;
1667 pfrag->offset += *len;
1668
1669 return pfrag->page;
1670}
1671
1672static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
1673 struct page *page,
1674 unsigned int offset)
1675{
1676 return spd->nr_pages &&
1677 spd->pages[spd->nr_pages - 1] == page &&
1678 (spd->partial[spd->nr_pages - 1].offset +
1679 spd->partial[spd->nr_pages - 1].len == offset);
1680}
1681
1682/*
1683 * Fill page/offset/length into spd, if it can hold more pages.
1684 */
1685static bool spd_fill_page(struct splice_pipe_desc *spd,
1686 struct pipe_inode_info *pipe, struct page *page,
1687 unsigned int *len, unsigned int offset,
1688 bool linear,
1689 struct sock *sk)
1690{
1691 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
1692 return true;
1693
1694 if (linear) {
1695 page = linear_to_page(page, len, &offset, sk);
1696 if (!page)
1697 return true;
1698 }
1699 if (spd_can_coalesce(spd, page, offset)) {
1700 spd->partial[spd->nr_pages - 1].len += *len;
1701 return false;
1702 }
1703 get_page(page);
1704 spd->pages[spd->nr_pages] = page;
1705 spd->partial[spd->nr_pages].len = *len;
1706 spd->partial[spd->nr_pages].offset = offset;
1707 spd->nr_pages++;
1708
1709 return false;
1710}
1711
1712static bool __splice_segment(struct page *page, unsigned int poff,
1713 unsigned int plen, unsigned int *off,
1714 unsigned int *len,
1715 struct splice_pipe_desc *spd, bool linear,
1716 struct sock *sk,
1717 struct pipe_inode_info *pipe)
1718{
1719 if (!*len)
1720 return true;
1721
1722 /* skip this segment if already processed */
1723 if (*off >= plen) {
1724 *off -= plen;
1725 return false;
1726 }
1727
1728 /* ignore any bits we already processed */
1729 poff += *off;
1730 plen -= *off;
1731 *off = 0;
1732
1733 do {
1734 unsigned int flen = min(*len, plen);
1735
1736 if (spd_fill_page(spd, pipe, page, &flen, poff,
1737 linear, sk))
1738 return true;
1739 poff += flen;
1740 plen -= flen;
1741 *len -= flen;
1742 } while (*len && plen);
1743
1744 return false;
1745}
1746
1747/*
1748 * Map linear and fragment data from the skb to spd. It reports true if the
1749 * pipe is full or if we already spliced the requested length.
1750 */
1751static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1752 unsigned int *offset, unsigned int *len,
1753 struct splice_pipe_desc *spd, struct sock *sk)
1754{
1755 int seg;
1756
1757 /* map the linear part :
1758 * If skb->head_frag is set, this 'linear' part is backed by a
1759 * fragment, and if the head is not shared with any clones then
1760 * we can avoid a copy since we own the head portion of this page.
1761 */
1762 if (__splice_segment(virt_to_page(skb->data),
1763 (unsigned long) skb->data & (PAGE_SIZE - 1),
1764 skb_headlen(skb),
1765 offset, len, spd,
1766 skb_head_is_locked(skb),
1767 sk, pipe))
1768 return true;
1769
1770 /*
1771 * then map the fragments
1772 */
1773 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1774 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1775
1776 if (__splice_segment(skb_frag_page(f),
1777 f->page_offset, skb_frag_size(f),
1778 offset, len, spd, false, sk, pipe))
1779 return true;
1780 }
1781
1782 return false;
1783}
1784
1785/*
1786 * Map data from the skb to a pipe. Should handle both the linear part,
1787 * the fragments, and the frag list. It does NOT handle frag lists within
1788 * the frag list, if such a thing exists. We'd probably need to recurse to
1789 * handle that cleanly.
1790 */
1791int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1792 struct pipe_inode_info *pipe, unsigned int tlen,
1793 unsigned int flags)
1794{
1795 struct partial_page partial[MAX_SKB_FRAGS];
1796 struct page *pages[MAX_SKB_FRAGS];
1797 struct splice_pipe_desc spd = {
1798 .pages = pages,
1799 .partial = partial,
1800 .nr_pages_max = MAX_SKB_FRAGS,
1801 .flags = flags,
1802 .ops = &nosteal_pipe_buf_ops,
1803 .spd_release = sock_spd_release,
1804 };
1805 struct sk_buff *frag_iter;
1806 struct sock *sk = skb->sk;
1807 int ret = 0;
1808
1809 /*
1810 * __skb_splice_bits() only fails if the output has no room left,
1811 * so no point in going over the frag_list for the error case.
1812 */
1813 if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1814 goto done;
1815 else if (!tlen)
1816 goto done;
1817
1818 /*
1819 * now see if we have a frag_list to map
1820 */
1821 skb_walk_frags(skb, frag_iter) {
1822 if (!tlen)
1823 break;
1824 if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
1825 break;
1826 }
1827
1828done:
1829 if (spd.nr_pages) {
1830 /*
1831 * Drop the socket lock, otherwise we have reverse
1832 * locking dependencies between sk_lock and i_mutex
1833 * here as compared to sendfile(). We enter here
1834 * with the socket lock held, and splice_to_pipe() will
1835 * grab the pipe inode lock. For sendfile() emulation,
1836 * we call into ->sendpage() with the i_mutex lock held
1837 * and networking will grab the socket lock.
1838 */
1839 release_sock(sk);
1840 ret = splice_to_pipe(pipe, &spd);
1841 lock_sock(sk);
1842 }
1843
1844 return ret;
1845}
1846
1847/**
1848 * skb_store_bits - store bits from kernel buffer to skb
1849 * @skb: destination buffer
1850 * @offset: offset in destination
1851 * @from: source buffer
1852 * @len: number of bytes to copy
1853 *
1854 * Copy the specified number of bytes from the source buffer to the
1855 * destination skb. This function handles all the messy bits of
1856 * traversing fragment lists and such.
1857 */
1858
1859int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1860{
1861 int start = skb_headlen(skb);
1862 struct sk_buff *frag_iter;
1863 int i, copy;
1864
1865 if (offset > (int)skb->len - len)
1866 goto fault;
1867
1868 if ((copy = start - offset) > 0) {
1869 if (copy > len)
1870 copy = len;
1871 skb_copy_to_linear_data_offset(skb, offset, from, copy);
1872 if ((len -= copy) == 0)
1873 return 0;
1874 offset += copy;
1875 from += copy;
1876 }
1877
1878 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1879 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1880 int end;
1881
1882 WARN_ON(start > offset + len);
1883
1884 end = start + skb_frag_size(frag);
1885 if ((copy = end - offset) > 0) {
1886 u8 *vaddr;
1887
1888 if (copy > len)
1889 copy = len;
1890
1891 vaddr = kmap_atomic(skb_frag_page(frag));
1892 memcpy(vaddr + frag->page_offset + offset - start,
1893 from, copy);
1894 kunmap_atomic(vaddr);
1895
1896 if ((len -= copy) == 0)
1897 return 0;
1898 offset += copy;
1899 from += copy;
1900 }
1901 start = end;
1902 }
1903
1904 skb_walk_frags(skb, frag_iter) {
1905 int end;
1906
1907 WARN_ON(start > offset + len);
1908
1909 end = start + frag_iter->len;
1910 if ((copy = end - offset) > 0) {
1911 if (copy > len)
1912 copy = len;
1913 if (skb_store_bits(frag_iter, offset - start,
1914 from, copy))
1915 goto fault;
1916 if ((len -= copy) == 0)
1917 return 0;
1918 offset += copy;
1919 from += copy;
1920 }
1921 start = end;
1922 }
1923 if (!len)
1924 return 0;
1925
1926fault:
1927 return -EFAULT;
1928}
1929EXPORT_SYMBOL(skb_store_bits);
1930
1931/* Checksum skb data. */
1932__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
1933 __wsum csum, const struct skb_checksum_ops *ops)
1934{
1935 int start = skb_headlen(skb);
1936 int i, copy = start - offset;
1937 struct sk_buff *frag_iter;
1938 int pos = 0;
1939
1940 /* Checksum header. */
1941 if (copy > 0) {
1942 if (copy > len)
1943 copy = len;
1944 csum = ops->update(skb->data + offset, copy, csum);
1945 if ((len -= copy) == 0)
1946 return csum;
1947 offset += copy;
1948 pos = copy;
1949 }
1950
1951 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1952 int end;
1953 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1954
1955 WARN_ON(start > offset + len);
1956
1957 end = start + skb_frag_size(frag);
1958 if ((copy = end - offset) > 0) {
1959 __wsum csum2;
1960 u8 *vaddr;
1961
1962 if (copy > len)
1963 copy = len;
1964 vaddr = kmap_atomic(skb_frag_page(frag));
1965 csum2 = ops->update(vaddr + frag->page_offset +
1966 offset - start, copy, 0);
1967 kunmap_atomic(vaddr);
1968 csum = ops->combine(csum, csum2, pos, copy);
1969 if (!(len -= copy))
1970 return csum;
1971 offset += copy;
1972 pos += copy;
1973 }
1974 start = end;
1975 }
1976
1977 skb_walk_frags(skb, frag_iter) {
1978 int end;
1979
1980 WARN_ON(start > offset + len);
1981
1982 end = start + frag_iter->len;
1983 if ((copy = end - offset) > 0) {
1984 __wsum csum2;
1985 if (copy > len)
1986 copy = len;
1987 csum2 = __skb_checksum(frag_iter, offset - start,
1988 copy, 0, ops);
1989 csum = ops->combine(csum, csum2, pos, copy);
1990 if ((len -= copy) == 0)
1991 return csum;
1992 offset += copy;
1993 pos += copy;
1994 }
1995 start = end;
1996 }
1997 BUG_ON(len);
1998
1999 return csum;
2000}
2001EXPORT_SYMBOL(__skb_checksum);
2002
2003__wsum skb_checksum(const struct sk_buff *skb, int offset,
2004 int len, __wsum csum)
2005{
2006 const struct skb_checksum_ops ops = {
2007 .update = csum_partial_ext,
2008 .combine = csum_block_add_ext,
2009 };
2010
2011 return __skb_checksum(skb, offset, len, csum, &ops);
2012}
2013EXPORT_SYMBOL(skb_checksum);
2014
2015/* Both of above in one bottle. */
2016
2017__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2018 u8 *to, int len, __wsum csum)
2019{
2020 int start = skb_headlen(skb);
2021 int i, copy = start - offset;
2022 struct sk_buff *frag_iter;
2023 int pos = 0;
2024
2025 /* Copy header. */
2026 if (copy > 0) {
2027 if (copy > len)
2028 copy = len;
2029 csum = csum_partial_copy_nocheck(skb->data + offset, to,
2030 copy, csum);
2031 if ((len -= copy) == 0)
2032 return csum;
2033 offset += copy;
2034 to += copy;
2035 pos = copy;
2036 }
2037
2038 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2039 int end;
2040
2041 WARN_ON(start > offset + len);
2042
2043 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2044 if ((copy = end - offset) > 0) {
2045 __wsum csum2;
2046 u8 *vaddr;
2047 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2048
2049 if (copy > len)
2050 copy = len;
2051 vaddr = kmap_atomic(skb_frag_page(frag));
2052 csum2 = csum_partial_copy_nocheck(vaddr +
2053 frag->page_offset +
2054 offset - start, to,
2055 copy, 0);
2056 kunmap_atomic(vaddr);
2057 csum = csum_block_add(csum, csum2, pos);
2058 if (!(len -= copy))
2059 return csum;
2060 offset += copy;
2061 to += copy;
2062 pos += copy;
2063 }
2064 start = end;
2065 }
2066
2067 skb_walk_frags(skb, frag_iter) {
2068 __wsum csum2;
2069 int end;
2070
2071 WARN_ON(start > offset + len);
2072
2073 end = start + frag_iter->len;
2074 if ((copy = end - offset) > 0) {
2075 if (copy > len)
2076 copy = len;
2077 csum2 = skb_copy_and_csum_bits(frag_iter,
2078 offset - start,
2079 to, copy, 0);
2080 csum = csum_block_add(csum, csum2, pos);
2081 if ((len -= copy) == 0)
2082 return csum;
2083 offset += copy;
2084 to += copy;
2085 pos += copy;
2086 }
2087 start = end;
2088 }
2089 BUG_ON(len);
2090 return csum;
2091}
2092EXPORT_SYMBOL(skb_copy_and_csum_bits);
2093
2094 /**
2095 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2096 * @from: source buffer
2097 *
2098 * Calculates the amount of linear headroom needed in the 'to' skb passed
2099 * into skb_zerocopy().
2100 */
2101unsigned int
2102skb_zerocopy_headlen(const struct sk_buff *from)
2103{
2104 unsigned int hlen = 0;
2105
2106 if (!from->head_frag ||
2107 skb_headlen(from) < L1_CACHE_BYTES ||
2108 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2109 hlen = skb_headlen(from);
2110
2111 if (skb_has_frag_list(from))
2112 hlen = from->len;
2113
2114 return hlen;
2115}
2116EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2117
2118/**
2119 * skb_zerocopy - Zero copy skb to skb
2120 * @to: destination buffer
2121 * @from: source buffer
2122 * @len: number of bytes to copy from source buffer
2123 * @hlen: size of linear headroom in destination buffer
2124 *
2125 * Copies up to `len` bytes from `from` to `to` by creating references
2126 * to the frags in the source buffer.
2127 *
2128 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2129 * headroom in the `to` buffer.
2130 *
2131 * Return value:
2132 * 0: everything is OK
2133 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
2134 * -EFAULT: skb_copy_bits() found some problem with skb geometry
2135 */
2136int
2137skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2138{
2139 int i, j = 0;
2140 int plen = 0; /* length of skb->head fragment */
2141 int ret;
2142 struct page *page;
2143 unsigned int offset;
2144
2145 BUG_ON(!from->head_frag && !hlen);
2146
2147 /* dont bother with small payloads */
2148 if (len <= skb_tailroom(to))
2149 return skb_copy_bits(from, 0, skb_put(to, len), len);
2150
2151 if (hlen) {
2152 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2153 if (unlikely(ret))
2154 return ret;
2155 len -= hlen;
2156 } else {
2157 plen = min_t(int, skb_headlen(from), len);
2158 if (plen) {
2159 page = virt_to_head_page(from->head);
2160 offset = from->data - (unsigned char *)page_address(page);
2161 __skb_fill_page_desc(to, 0, page, offset, plen);
2162 get_page(page);
2163 j = 1;
2164 len -= plen;
2165 }
2166 }
2167
2168 to->truesize += len + plen;
2169 to->len += len + plen;
2170 to->data_len += len + plen;
2171
2172 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2173 skb_tx_error(from);
2174 return -ENOMEM;
2175 }
2176
2177 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2178 if (!len)
2179 break;
2180 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2181 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2182 len -= skb_shinfo(to)->frags[j].size;
2183 skb_frag_ref(to, j);
2184 j++;
2185 }
2186 skb_shinfo(to)->nr_frags = j;
2187
2188 return 0;
2189}
2190EXPORT_SYMBOL_GPL(skb_zerocopy);
2191
2192void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2193{
2194 __wsum csum;
2195 long csstart;
2196
2197 if (skb->ip_summed == CHECKSUM_PARTIAL)
2198 csstart = skb_checksum_start_offset(skb);
2199 else
2200 csstart = skb_headlen(skb);
2201
2202 BUG_ON(csstart > skb_headlen(skb));
2203
2204 skb_copy_from_linear_data(skb, to, csstart);
2205
2206 csum = 0;
2207 if (csstart != skb->len)
2208 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2209 skb->len - csstart, 0);
2210
2211 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2212 long csstuff = csstart + skb->csum_offset;
2213
2214 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
2215 }
2216}
2217EXPORT_SYMBOL(skb_copy_and_csum_dev);
2218
2219/**
2220 * skb_dequeue - remove from the head of the queue
2221 * @list: list to dequeue from
2222 *
2223 * Remove the head of the list. The list lock is taken so the function
2224 * may be used safely with other locking list functions. The head item is
2225 * returned or %NULL if the list is empty.
2226 */
2227
2228struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2229{
2230 unsigned long flags;
2231 struct sk_buff *result;
2232
2233 spin_lock_irqsave(&list->lock, flags);
2234 result = __skb_dequeue(list);
2235 spin_unlock_irqrestore(&list->lock, flags);
2236 return result;
2237}
2238EXPORT_SYMBOL(skb_dequeue);
2239
2240/**
2241 * skb_dequeue_tail - remove from the tail of the queue
2242 * @list: list to dequeue from
2243 *
2244 * Remove the tail of the list. The list lock is taken so the function
2245 * may be used safely with other locking list functions. The tail item is
2246 * returned or %NULL if the list is empty.
2247 */
2248struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2249{
2250 unsigned long flags;
2251 struct sk_buff *result;
2252
2253 spin_lock_irqsave(&list->lock, flags);
2254 result = __skb_dequeue_tail(list);
2255 spin_unlock_irqrestore(&list->lock, flags);
2256 return result;
2257}
2258EXPORT_SYMBOL(skb_dequeue_tail);
2259
2260/**
2261 * skb_queue_purge - empty a list
2262 * @list: list to empty
2263 *
2264 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2265 * the list and one reference dropped. This function takes the list
2266 * lock and is atomic with respect to other list locking functions.
2267 */
2268void skb_queue_purge(struct sk_buff_head *list)
2269{
2270 struct sk_buff *skb;
2271 while ((skb = skb_dequeue(list)) != NULL)
2272 kfree_skb(skb);
2273}
2274EXPORT_SYMBOL(skb_queue_purge);
2275
2276/**
2277 * skb_queue_head - queue a buffer at the list head
2278 * @list: list to use
2279 * @newsk: buffer to queue
2280 *
2281 * Queue a buffer at the start of the list. This function takes the
2282 * list lock and can be used safely with other locking &sk_buff functions
2283 * safely.
2284 *
2285 * A buffer cannot be placed on two lists at the same time.
2286 */
2287void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2288{
2289 unsigned long flags;
2290
2291 spin_lock_irqsave(&list->lock, flags);
2292 __skb_queue_head(list, newsk);
2293 spin_unlock_irqrestore(&list->lock, flags);
2294}
2295EXPORT_SYMBOL(skb_queue_head);
2296
2297/**
2298 * skb_queue_tail - queue a buffer at the list tail
2299 * @list: list to use
2300 * @newsk: buffer to queue
2301 *
2302 * Queue a buffer at the tail of the list. This function takes the
2303 * list lock and can be used safely with other locking &sk_buff functions
2304 * safely.
2305 *
2306 * A buffer cannot be placed on two lists at the same time.
2307 */
2308void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2309{
2310 unsigned long flags;
2311
2312 spin_lock_irqsave(&list->lock, flags);
2313 __skb_queue_tail(list, newsk);
2314 spin_unlock_irqrestore(&list->lock, flags);
2315}
2316EXPORT_SYMBOL(skb_queue_tail);
2317
2318/**
2319 * skb_unlink - remove a buffer from a list
2320 * @skb: buffer to remove
2321 * @list: list to use
2322 *
2323 * Remove a packet from a list. The list locks are taken and this
2324 * function is atomic with respect to other list locked calls
2325 *
2326 * You must know what list the SKB is on.
2327 */
2328void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2329{
2330 unsigned long flags;
2331
2332 spin_lock_irqsave(&list->lock, flags);
2333 __skb_unlink(skb, list);
2334 spin_unlock_irqrestore(&list->lock, flags);
2335}
2336EXPORT_SYMBOL(skb_unlink);
2337
2338/**
2339 * skb_append - append a buffer
2340 * @old: buffer to insert after
2341 * @newsk: buffer to insert
2342 * @list: list to use
2343 *
2344 * Place a packet after a given packet in a list. The list locks are taken
2345 * and this function is atomic with respect to other list locked calls.
2346 * A buffer cannot be placed on two lists at the same time.
2347 */
2348void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2349{
2350 unsigned long flags;
2351
2352 spin_lock_irqsave(&list->lock, flags);
2353 __skb_queue_after(list, old, newsk);
2354 spin_unlock_irqrestore(&list->lock, flags);
2355}
2356EXPORT_SYMBOL(skb_append);
2357
2358/**
2359 * skb_insert - insert a buffer
2360 * @old: buffer to insert before
2361 * @newsk: buffer to insert
2362 * @list: list to use
2363 *
2364 * Place a packet before a given packet in a list. The list locks are
2365 * taken and this function is atomic with respect to other list locked
2366 * calls.
2367 *
2368 * A buffer cannot be placed on two lists at the same time.
2369 */
2370void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2371{
2372 unsigned long flags;
2373
2374 spin_lock_irqsave(&list->lock, flags);
2375 __skb_insert(newsk, old->prev, old, list);
2376 spin_unlock_irqrestore(&list->lock, flags);
2377}
2378EXPORT_SYMBOL(skb_insert);
2379
2380static inline void skb_split_inside_header(struct sk_buff *skb,
2381 struct sk_buff* skb1,
2382 const u32 len, const int pos)
2383{
2384 int i;
2385
2386 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2387 pos - len);
2388 /* And move data appendix as is. */
2389 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2390 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2391
2392 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2393 skb_shinfo(skb)->nr_frags = 0;
2394 skb1->data_len = skb->data_len;
2395 skb1->len += skb1->data_len;
2396 skb->data_len = 0;
2397 skb->len = len;
2398 skb_set_tail_pointer(skb, len);
2399}
2400
2401static inline void skb_split_no_header(struct sk_buff *skb,
2402 struct sk_buff* skb1,
2403 const u32 len, int pos)
2404{
2405 int i, k = 0;
2406 const int nfrags = skb_shinfo(skb)->nr_frags;
2407
2408 skb_shinfo(skb)->nr_frags = 0;
2409 skb1->len = skb1->data_len = skb->len - len;
2410 skb->len = len;
2411 skb->data_len = len - pos;
2412
2413 for (i = 0; i < nfrags; i++) {
2414 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
2415
2416 if (pos + size > len) {
2417 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2418
2419 if (pos < len) {
2420 /* Split frag.
2421 * We have two variants in this case:
2422 * 1. Move all the frag to the second
2423 * part, if it is possible. F.e.
2424 * this approach is mandatory for TUX,
2425 * where splitting is expensive.
2426 * 2. Split is accurately. We make this.
2427 */
2428 skb_frag_ref(skb, i);
2429 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2430 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
2431 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
2432 skb_shinfo(skb)->nr_frags++;
2433 }
2434 k++;
2435 } else
2436 skb_shinfo(skb)->nr_frags++;
2437 pos += size;
2438 }
2439 skb_shinfo(skb1)->nr_frags = k;
2440}
2441
2442/**
2443 * skb_split - Split fragmented skb to two parts at length len.
2444 * @skb: the buffer to split
2445 * @skb1: the buffer to receive the second part
2446 * @len: new length for skb
2447 */
2448void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2449{
2450 int pos = skb_headlen(skb);
2451
2452 skb_shinfo(skb1)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
2453 if (len < pos) /* Split line is inside header. */
2454 skb_split_inside_header(skb, skb1, len, pos);
2455 else /* Second chunk has no header, nothing to copy. */
2456 skb_split_no_header(skb, skb1, len, pos);
2457}
2458EXPORT_SYMBOL(skb_split);
2459
2460/* Shifting from/to a cloned skb is a no-go.
2461 *
2462 * Caller cannot keep skb_shinfo related pointers past calling here!
2463 */
2464static int skb_prepare_for_shift(struct sk_buff *skb)
2465{
2466 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2467}
2468
2469/**
2470 * skb_shift - Shifts paged data partially from skb to another
2471 * @tgt: buffer into which tail data gets added
2472 * @skb: buffer from which the paged data comes from
2473 * @shiftlen: shift up to this many bytes
2474 *
2475 * Attempts to shift up to shiftlen worth of bytes, which may be less than
2476 * the length of the skb, from skb to tgt. Returns number bytes shifted.
2477 * It's up to caller to free skb if everything was shifted.
2478 *
2479 * If @tgt runs out of frags, the whole operation is aborted.
2480 *
2481 * Skb cannot include anything else but paged data while tgt is allowed
2482 * to have non-paged data as well.
2483 *
2484 * TODO: full sized shift could be optimized but that would need
2485 * specialized skb free'er to handle frags without up-to-date nr_frags.
2486 */
2487int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2488{
2489 int from, to, merge, todo;
2490 struct skb_frag_struct *fragfrom, *fragto;
2491
2492 BUG_ON(shiftlen > skb->len);
2493 BUG_ON(skb_headlen(skb)); /* Would corrupt stream */
2494
2495 todo = shiftlen;
2496 from = 0;
2497 to = skb_shinfo(tgt)->nr_frags;
2498 fragfrom = &skb_shinfo(skb)->frags[from];
2499
2500 /* Actual merge is delayed until the point when we know we can
2501 * commit all, so that we don't have to undo partial changes
2502 */
2503 if (!to ||
2504 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
2505 fragfrom->page_offset)) {
2506 merge = -1;
2507 } else {
2508 merge = to - 1;
2509
2510 todo -= skb_frag_size(fragfrom);
2511 if (todo < 0) {
2512 if (skb_prepare_for_shift(skb) ||
2513 skb_prepare_for_shift(tgt))
2514 return 0;
2515
2516 /* All previous frag pointers might be stale! */
2517 fragfrom = &skb_shinfo(skb)->frags[from];
2518 fragto = &skb_shinfo(tgt)->frags[merge];
2519
2520 skb_frag_size_add(fragto, shiftlen);
2521 skb_frag_size_sub(fragfrom, shiftlen);
2522 fragfrom->page_offset += shiftlen;
2523
2524 goto onlymerged;
2525 }
2526
2527 from++;
2528 }
2529
2530 /* Skip full, not-fitting skb to avoid expensive operations */
2531 if ((shiftlen == skb->len) &&
2532 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2533 return 0;
2534
2535 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2536 return 0;
2537
2538 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2539 if (to == MAX_SKB_FRAGS)
2540 return 0;
2541
2542 fragfrom = &skb_shinfo(skb)->frags[from];
2543 fragto = &skb_shinfo(tgt)->frags[to];
2544
2545 if (todo >= skb_frag_size(fragfrom)) {
2546 *fragto = *fragfrom;
2547 todo -= skb_frag_size(fragfrom);
2548 from++;
2549 to++;
2550
2551 } else {
2552 __skb_frag_ref(fragfrom);
2553 fragto->page = fragfrom->page;
2554 fragto->page_offset = fragfrom->page_offset;
2555 skb_frag_size_set(fragto, todo);
2556
2557 fragfrom->page_offset += todo;
2558 skb_frag_size_sub(fragfrom, todo);
2559 todo = 0;
2560
2561 to++;
2562 break;
2563 }
2564 }
2565
2566 /* Ready to "commit" this state change to tgt */
2567 skb_shinfo(tgt)->nr_frags = to;
2568
2569 if (merge >= 0) {
2570 fragfrom = &skb_shinfo(skb)->frags[0];
2571 fragto = &skb_shinfo(tgt)->frags[merge];
2572
2573 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
2574 __skb_frag_unref(fragfrom);
2575 }
2576
2577 /* Reposition in the original skb */
2578 to = 0;
2579 while (from < skb_shinfo(skb)->nr_frags)
2580 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2581 skb_shinfo(skb)->nr_frags = to;
2582
2583 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2584
2585onlymerged:
2586 /* Most likely the tgt won't ever need its checksum anymore, skb on
2587 * the other hand might need it if it needs to be resent
2588 */
2589 tgt->ip_summed = CHECKSUM_PARTIAL;
2590 skb->ip_summed = CHECKSUM_PARTIAL;
2591
2592 /* Yak, is it really working this way? Some helper please? */
2593 skb->len -= shiftlen;
2594 skb->data_len -= shiftlen;
2595 skb->truesize -= shiftlen;
2596 tgt->len += shiftlen;
2597 tgt->data_len += shiftlen;
2598 tgt->truesize += shiftlen;
2599
2600 return shiftlen;
2601}
2602
2603/**
2604 * skb_prepare_seq_read - Prepare a sequential read of skb data
2605 * @skb: the buffer to read
2606 * @from: lower offset of data to be read
2607 * @to: upper offset of data to be read
2608 * @st: state variable
2609 *
2610 * Initializes the specified state variable. Must be called before
2611 * invoking skb_seq_read() for the first time.
2612 */
2613void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2614 unsigned int to, struct skb_seq_state *st)
2615{
2616 st->lower_offset = from;
2617 st->upper_offset = to;
2618 st->root_skb = st->cur_skb = skb;
2619 st->frag_idx = st->stepped_offset = 0;
2620 st->frag_data = NULL;
2621}
2622EXPORT_SYMBOL(skb_prepare_seq_read);
2623
2624/**
2625 * skb_seq_read - Sequentially read skb data
2626 * @consumed: number of bytes consumed by the caller so far
2627 * @data: destination pointer for data to be returned
2628 * @st: state variable
2629 *
2630 * Reads a block of skb data at @consumed relative to the
2631 * lower offset specified to skb_prepare_seq_read(). Assigns
2632 * the head of the data block to @data and returns the length
2633 * of the block or 0 if the end of the skb data or the upper
2634 * offset has been reached.
2635 *
2636 * The caller is not required to consume all of the data
2637 * returned, i.e. @consumed is typically set to the number
2638 * of bytes already consumed and the next call to
2639 * skb_seq_read() will return the remaining part of the block.
2640 *
2641 * Note 1: The size of each block of data returned can be arbitrary,
2642 * this limitation is the cost for zerocopy seqeuental
2643 * reads of potentially non linear data.
2644 *
2645 * Note 2: Fragment lists within fragments are not implemented
2646 * at the moment, state->root_skb could be replaced with
2647 * a stack for this purpose.
2648 */
2649unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2650 struct skb_seq_state *st)
2651{
2652 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2653 skb_frag_t *frag;
2654
2655 if (unlikely(abs_offset >= st->upper_offset)) {
2656 if (st->frag_data) {
2657 kunmap_atomic(st->frag_data);
2658 st->frag_data = NULL;
2659 }
2660 return 0;
2661 }
2662
2663next_skb:
2664 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2665
2666 if (abs_offset < block_limit && !st->frag_data) {
2667 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2668 return block_limit - abs_offset;
2669 }
2670
2671 if (st->frag_idx == 0 && !st->frag_data)
2672 st->stepped_offset += skb_headlen(st->cur_skb);
2673
2674 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2675 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2676 block_limit = skb_frag_size(frag) + st->stepped_offset;
2677
2678 if (abs_offset < block_limit) {
2679 if (!st->frag_data)
2680 st->frag_data = kmap_atomic(skb_frag_page(frag));
2681
2682 *data = (u8 *) st->frag_data + frag->page_offset +
2683 (abs_offset - st->stepped_offset);
2684
2685 return block_limit - abs_offset;
2686 }
2687
2688 if (st->frag_data) {
2689 kunmap_atomic(st->frag_data);
2690 st->frag_data = NULL;
2691 }
2692
2693 st->frag_idx++;
2694 st->stepped_offset += skb_frag_size(frag);
2695 }
2696
2697 if (st->frag_data) {
2698 kunmap_atomic(st->frag_data);
2699 st->frag_data = NULL;
2700 }
2701
2702 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2703 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2704 st->frag_idx = 0;
2705 goto next_skb;
2706 } else if (st->cur_skb->next) {
2707 st->cur_skb = st->cur_skb->next;
2708 st->frag_idx = 0;
2709 goto next_skb;
2710 }
2711
2712 return 0;
2713}
2714EXPORT_SYMBOL(skb_seq_read);
2715
2716/**
2717 * skb_abort_seq_read - Abort a sequential read of skb data
2718 * @st: state variable
2719 *
2720 * Must be called if skb_seq_read() was not called until it
2721 * returned 0.
2722 */
2723void skb_abort_seq_read(struct skb_seq_state *st)
2724{
2725 if (st->frag_data)
2726 kunmap_atomic(st->frag_data);
2727}
2728EXPORT_SYMBOL(skb_abort_seq_read);
2729
2730#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
2731
2732static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2733 struct ts_config *conf,
2734 struct ts_state *state)
2735{
2736 return skb_seq_read(offset, text, TS_SKB_CB(state));
2737}
2738
2739static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2740{
2741 skb_abort_seq_read(TS_SKB_CB(state));
2742}
2743
2744/**
2745 * skb_find_text - Find a text pattern in skb data
2746 * @skb: the buffer to look in
2747 * @from: search offset
2748 * @to: search limit
2749 * @config: textsearch configuration
2750 * @state: uninitialized textsearch state variable
2751 *
2752 * Finds a pattern in the skb data according to the specified
2753 * textsearch configuration. Use textsearch_next() to retrieve
2754 * subsequent occurrences of the pattern. Returns the offset
2755 * to the first occurrence or UINT_MAX if no match was found.
2756 */
2757unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2758 unsigned int to, struct ts_config *config,
2759 struct ts_state *state)
2760{
2761 unsigned int ret;
2762
2763 config->get_next_block = skb_ts_get_next_block;
2764 config->finish = skb_ts_finish;
2765
2766 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2767
2768 ret = textsearch_find(config, state);
2769 return (ret <= to - from ? ret : UINT_MAX);
2770}
2771EXPORT_SYMBOL(skb_find_text);
2772
2773/**
2774 * skb_append_datato_frags - append the user data to a skb
2775 * @sk: sock structure
2776 * @skb: skb structure to be appened with user data.
2777 * @getfrag: call back function to be used for getting the user data
2778 * @from: pointer to user message iov
2779 * @length: length of the iov message
2780 *
2781 * Description: This procedure append the user data in the fragment part
2782 * of the skb if any page alloc fails user this procedure returns -ENOMEM
2783 */
2784int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2785 int (*getfrag)(void *from, char *to, int offset,
2786 int len, int odd, struct sk_buff *skb),
2787 void *from, int length)
2788{
2789 int frg_cnt = skb_shinfo(skb)->nr_frags;
2790 int copy;
2791 int offset = 0;
2792 int ret;
2793 struct page_frag *pfrag = ¤t->task_frag;
2794
2795 do {
2796 /* Return error if we don't have space for new frag */
2797 if (frg_cnt >= MAX_SKB_FRAGS)
2798 return -EMSGSIZE;
2799
2800 if (!sk_page_frag_refill(sk, pfrag))
2801 return -ENOMEM;
2802
2803 /* copy the user data to page */
2804 copy = min_t(int, length, pfrag->size - pfrag->offset);
2805
2806 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
2807 offset, copy, 0, skb);
2808 if (ret < 0)
2809 return -EFAULT;
2810
2811 /* copy was successful so update the size parameters */
2812 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
2813 copy);
2814 frg_cnt++;
2815 pfrag->offset += copy;
2816 get_page(pfrag->page);
2817
2818 skb->truesize += copy;
2819 atomic_add(copy, &sk->sk_wmem_alloc);
2820 skb->len += copy;
2821 skb->data_len += copy;
2822 offset += copy;
2823 length -= copy;
2824
2825 } while (length > 0);
2826
2827 return 0;
2828}
2829EXPORT_SYMBOL(skb_append_datato_frags);
2830
2831/**
2832 * skb_pull_rcsum - pull skb and update receive checksum
2833 * @skb: buffer to update
2834 * @len: length of data pulled
2835 *
2836 * This function performs an skb_pull on the packet and updates
2837 * the CHECKSUM_COMPLETE checksum. It should be used on
2838 * receive path processing instead of skb_pull unless you know
2839 * that the checksum difference is zero (e.g., a valid IP header)
2840 * or you are setting ip_summed to CHECKSUM_NONE.
2841 */
2842unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2843{
2844 BUG_ON(len > skb->len);
2845 skb->len -= len;
2846 BUG_ON(skb->len < skb->data_len);
2847 skb_postpull_rcsum(skb, skb->data, len);
2848 return skb->data += len;
2849}
2850EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2851
2852/**
2853 * skb_segment - Perform protocol segmentation on skb.
2854 * @head_skb: buffer to segment
2855 * @features: features for the output path (see dev->features)
2856 *
2857 * This function performs segmentation on the given skb. It returns
2858 * a pointer to the first in a list of new skbs for the segments.
2859 * In case of error it returns ERR_PTR(err).
2860 */
2861struct sk_buff *skb_segment(struct sk_buff *head_skb,
2862 netdev_features_t features)
2863{
2864 struct sk_buff *segs = NULL;
2865 struct sk_buff *tail = NULL;
2866 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
2867 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
2868 unsigned int mss = skb_shinfo(head_skb)->gso_size;
2869 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
2870 struct sk_buff *frag_skb = head_skb;
2871 unsigned int offset = doffset;
2872 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
2873 unsigned int headroom;
2874 unsigned int len;
2875 __be16 proto;
2876 bool csum;
2877 int sg = !!(features & NETIF_F_SG);
2878 int nfrags = skb_shinfo(head_skb)->nr_frags;
2879 int err = -ENOMEM;
2880 int i = 0;
2881 int pos;
2882 int dummy;
2883
2884 proto = skb_network_protocol(head_skb, &dummy);
2885 if (unlikely(!proto))
2886 return ERR_PTR(-EINVAL);
2887
2888 csum = !!can_checksum_protocol(features, proto);
2889 __skb_push(head_skb, doffset);
2890 headroom = skb_headroom(head_skb);
2891 pos = skb_headlen(head_skb);
2892
2893 do {
2894 struct sk_buff *nskb;
2895 skb_frag_t *nskb_frag;
2896 int hsize;
2897 int size;
2898
2899 len = head_skb->len - offset;
2900 if (len > mss)
2901 len = mss;
2902
2903 hsize = skb_headlen(head_skb) - offset;
2904 if (hsize < 0)
2905 hsize = 0;
2906 if (hsize > len || !sg)
2907 hsize = len;
2908
2909 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
2910 (skb_headlen(list_skb) == len || sg)) {
2911 BUG_ON(skb_headlen(list_skb) > len);
2912
2913 i = 0;
2914 nfrags = skb_shinfo(list_skb)->nr_frags;
2915 frag = skb_shinfo(list_skb)->frags;
2916 frag_skb = list_skb;
2917 pos += skb_headlen(list_skb);
2918
2919 while (pos < offset + len) {
2920 BUG_ON(i >= nfrags);
2921
2922 size = skb_frag_size(frag);
2923 if (pos + size > offset + len)
2924 break;
2925
2926 i++;
2927 pos += size;
2928 frag++;
2929 }
2930
2931 nskb = skb_clone(list_skb, GFP_ATOMIC);
2932 list_skb = list_skb->next;
2933
2934 if (unlikely(!nskb))
2935 goto err;
2936
2937 if (unlikely(pskb_trim(nskb, len))) {
2938 kfree_skb(nskb);
2939 goto err;
2940 }
2941
2942 hsize = skb_end_offset(nskb);
2943 if (skb_cow_head(nskb, doffset + headroom)) {
2944 kfree_skb(nskb);
2945 goto err;
2946 }
2947
2948 nskb->truesize += skb_end_offset(nskb) - hsize;
2949 skb_release_head_state(nskb);
2950 __skb_push(nskb, doffset);
2951 } else {
2952 nskb = __alloc_skb(hsize + doffset + headroom,
2953 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
2954 NUMA_NO_NODE);
2955
2956 if (unlikely(!nskb))
2957 goto err;
2958
2959 skb_reserve(nskb, headroom);
2960 __skb_put(nskb, doffset);
2961 }
2962
2963 if (segs)
2964 tail->next = nskb;
2965 else
2966 segs = nskb;
2967 tail = nskb;
2968
2969 __copy_skb_header(nskb, head_skb);
2970 nskb->mac_len = head_skb->mac_len;
2971
2972 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
2973
2974 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
2975 nskb->data - tnl_hlen,
2976 doffset + tnl_hlen);
2977
2978 if (nskb->len == len + doffset)
2979 goto perform_csum_check;
2980
2981 if (!sg) {
2982 nskb->ip_summed = CHECKSUM_NONE;
2983 nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
2984 skb_put(nskb, len),
2985 len, 0);
2986 continue;
2987 }
2988
2989 nskb_frag = skb_shinfo(nskb)->frags;
2990
2991 skb_copy_from_linear_data_offset(head_skb, offset,
2992 skb_put(nskb, hsize), hsize);
2993
2994 skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
2995 SKBTX_SHARED_FRAG;
2996
2997 while (pos < offset + len) {
2998 if (i >= nfrags) {
2999 BUG_ON(skb_headlen(list_skb));
3000
3001 i = 0;
3002 nfrags = skb_shinfo(list_skb)->nr_frags;
3003 frag = skb_shinfo(list_skb)->frags;
3004 frag_skb = list_skb;
3005
3006 BUG_ON(!nfrags);
3007
3008 list_skb = list_skb->next;
3009 }
3010
3011 if (unlikely(skb_shinfo(nskb)->nr_frags >=
3012 MAX_SKB_FRAGS)) {
3013 net_warn_ratelimited(
3014 "skb_segment: too many frags: %u %u\n",
3015 pos, mss);
3016 goto err;
3017 }
3018
3019 if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
3020 goto err;
3021
3022 *nskb_frag = *frag;
3023 __skb_frag_ref(nskb_frag);
3024 size = skb_frag_size(nskb_frag);
3025
3026 if (pos < offset) {
3027 nskb_frag->page_offset += offset - pos;
3028 skb_frag_size_sub(nskb_frag, offset - pos);
3029 }
3030
3031 skb_shinfo(nskb)->nr_frags++;
3032
3033 if (pos + size <= offset + len) {
3034 i++;
3035 frag++;
3036 pos += size;
3037 } else {
3038 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3039 goto skip_fraglist;
3040 }
3041
3042 nskb_frag++;
3043 }
3044
3045skip_fraglist:
3046 nskb->data_len = len - hsize;
3047 nskb->len += nskb->data_len;
3048 nskb->truesize += nskb->data_len;
3049
3050perform_csum_check:
3051 if (!csum) {
3052 nskb->csum = skb_checksum(nskb, doffset,
3053 nskb->len - doffset, 0);
3054 nskb->ip_summed = CHECKSUM_NONE;
3055 }
3056 } while ((offset += len) < head_skb->len);
3057
3058 return segs;
3059
3060err:
3061 kfree_skb_list(segs);
3062 return ERR_PTR(err);
3063}
3064EXPORT_SYMBOL_GPL(skb_segment);
3065
3066int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3067{
3068 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3069 unsigned int offset = skb_gro_offset(skb);
3070 unsigned int headlen = skb_headlen(skb);
3071 struct sk_buff *nskb, *lp, *p = *head;
3072 unsigned int len = skb_gro_len(skb);
3073 unsigned int delta_truesize;
3074 unsigned int headroom;
3075
3076 if (unlikely(p->len + len >= 65536))
3077 return -E2BIG;
3078
3079 lp = NAPI_GRO_CB(p)->last;
3080 pinfo = skb_shinfo(lp);
3081
3082 if (headlen <= offset) {
3083 skb_frag_t *frag;
3084 skb_frag_t *frag2;
3085 int i = skbinfo->nr_frags;
3086 int nr_frags = pinfo->nr_frags + i;
3087
3088 if (nr_frags > MAX_SKB_FRAGS)
3089 goto merge;
3090
3091 offset -= headlen;
3092 pinfo->nr_frags = nr_frags;
3093 skbinfo->nr_frags = 0;
3094
3095 frag = pinfo->frags + nr_frags;
3096 frag2 = skbinfo->frags + i;
3097 do {
3098 *--frag = *--frag2;
3099 } while (--i);
3100
3101 frag->page_offset += offset;
3102 skb_frag_size_sub(frag, offset);
3103
3104 /* all fragments truesize : remove (head size + sk_buff) */
3105 delta_truesize = skb->truesize -
3106 SKB_TRUESIZE(skb_end_offset(skb));
3107
3108 skb->truesize -= skb->data_len;
3109 skb->len -= skb->data_len;
3110 skb->data_len = 0;
3111
3112 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
3113 goto done;
3114 } else if (skb->head_frag) {
3115 int nr_frags = pinfo->nr_frags;
3116 skb_frag_t *frag = pinfo->frags + nr_frags;
3117 struct page *page = virt_to_head_page(skb->head);
3118 unsigned int first_size = headlen - offset;
3119 unsigned int first_offset;
3120
3121 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
3122 goto merge;
3123
3124 first_offset = skb->data -
3125 (unsigned char *)page_address(page) +
3126 offset;
3127
3128 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3129
3130 frag->page.p = page;
3131 frag->page_offset = first_offset;
3132 skb_frag_size_set(frag, first_size);
3133
3134 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3135 /* We dont need to clear skbinfo->nr_frags here */
3136
3137 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3138 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3139 goto done;
3140 }
3141 if (pinfo->frag_list)
3142 goto merge;
3143 if (skb_gro_len(p) != pinfo->gso_size)
3144 return -E2BIG;
3145
3146 headroom = skb_headroom(p);
3147 nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
3148 if (unlikely(!nskb))
3149 return -ENOMEM;
3150
3151 __copy_skb_header(nskb, p);
3152 nskb->mac_len = p->mac_len;
3153
3154 skb_reserve(nskb, headroom);
3155 __skb_put(nskb, skb_gro_offset(p));
3156
3157 skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
3158 skb_set_network_header(nskb, skb_network_offset(p));
3159 skb_set_transport_header(nskb, skb_transport_offset(p));
3160
3161 __skb_pull(p, skb_gro_offset(p));
3162 memcpy(skb_mac_header(nskb), skb_mac_header(p),
3163 p->data - skb_mac_header(p));
3164
3165 skb_shinfo(nskb)->frag_list = p;
3166 skb_shinfo(nskb)->gso_size = pinfo->gso_size;
3167 pinfo->gso_size = 0;
3168 skb_header_release(p);
3169 NAPI_GRO_CB(nskb)->last = p;
3170
3171 nskb->data_len += p->len;
3172 nskb->truesize += p->truesize;
3173 nskb->len += p->len;
3174
3175 *head = nskb;
3176 nskb->next = p->next;
3177 p->next = NULL;
3178
3179 p = nskb;
3180
3181merge:
3182 delta_truesize = skb->truesize;
3183 if (offset > headlen) {
3184 unsigned int eat = offset - headlen;
3185
3186 skbinfo->frags[0].page_offset += eat;
3187 skb_frag_size_sub(&skbinfo->frags[0], eat);
3188 skb->data_len -= eat;
3189 skb->len -= eat;
3190 offset = headlen;
3191 }
3192
3193 __skb_pull(skb, offset);
3194
3195 if (NAPI_GRO_CB(p)->last == p)
3196 skb_shinfo(p)->frag_list = skb;
3197 else
3198 NAPI_GRO_CB(p)->last->next = skb;
3199 NAPI_GRO_CB(p)->last = skb;
3200 skb_header_release(skb);
3201 lp = p;
3202
3203done:
3204 NAPI_GRO_CB(p)->count++;
3205 p->data_len += len;
3206 p->truesize += delta_truesize;
3207 p->len += len;
3208 if (lp != p) {
3209 lp->data_len += len;
3210 lp->truesize += delta_truesize;
3211 lp->len += len;
3212 }
3213 NAPI_GRO_CB(skb)->same_flow = 1;
3214 return 0;
3215}
3216EXPORT_SYMBOL_GPL(skb_gro_receive);
3217
3218void __init skb_init(void)
3219{
3220 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
3221 sizeof(struct sk_buff),
3222 0,
3223 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3224 NULL);
3225 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3226 (2*sizeof(struct sk_buff)) +
3227 sizeof(atomic_t),
3228 0,
3229 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3230 NULL);
3231}
3232
3233/**
3234 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
3235 * @skb: Socket buffer containing the buffers to be mapped
3236 * @sg: The scatter-gather list to map into
3237 * @offset: The offset into the buffer's contents to start mapping
3238 * @len: Length of buffer space to be mapped
3239 *
3240 * Fill the specified scatter-gather list with mappings/pointers into a
3241 * region of the buffer space attached to a socket buffer.
3242 */
3243static int
3244__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3245{
3246 int start = skb_headlen(skb);
3247 int i, copy = start - offset;
3248 struct sk_buff *frag_iter;
3249 int elt = 0;
3250
3251 if (copy > 0) {
3252 if (copy > len)
3253 copy = len;
3254 sg_set_buf(sg, skb->data + offset, copy);
3255 elt++;
3256 if ((len -= copy) == 0)
3257 return elt;
3258 offset += copy;
3259 }
3260
3261 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3262 int end;
3263
3264 WARN_ON(start > offset + len);
3265
3266 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3267 if ((copy = end - offset) > 0) {
3268 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3269
3270 if (copy > len)
3271 copy = len;
3272 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3273 frag->page_offset+offset-start);
3274 elt++;
3275 if (!(len -= copy))
3276 return elt;
3277 offset += copy;
3278 }
3279 start = end;
3280 }
3281
3282 skb_walk_frags(skb, frag_iter) {
3283 int end;
3284
3285 WARN_ON(start > offset + len);
3286
3287 end = start + frag_iter->len;
3288 if ((copy = end - offset) > 0) {
3289 if (copy > len)
3290 copy = len;
3291 elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3292 copy);
3293 if ((len -= copy) == 0)
3294 return elt;
3295 offset += copy;
3296 }
3297 start = end;
3298 }
3299 BUG_ON(len);
3300 return elt;
3301}
3302
3303/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
3304 * sglist without mark the sg which contain last skb data as the end.
3305 * So the caller can mannipulate sg list as will when padding new data after
3306 * the first call without calling sg_unmark_end to expend sg list.
3307 *
3308 * Scenario to use skb_to_sgvec_nomark:
3309 * 1. sg_init_table
3310 * 2. skb_to_sgvec_nomark(payload1)
3311 * 3. skb_to_sgvec_nomark(payload2)
3312 *
3313 * This is equivalent to:
3314 * 1. sg_init_table
3315 * 2. skb_to_sgvec(payload1)
3316 * 3. sg_unmark_end
3317 * 4. skb_to_sgvec(payload2)
3318 *
3319 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
3320 * is more preferable.
3321 */
3322int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
3323 int offset, int len)
3324{
3325 return __skb_to_sgvec(skb, sg, offset, len);
3326}
3327EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
3328
3329int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
3330{
3331 int nsg = __skb_to_sgvec(skb, sg, offset, len);
3332
3333 sg_mark_end(&sg[nsg - 1]);
3334
3335 return nsg;
3336}
3337EXPORT_SYMBOL_GPL(skb_to_sgvec);
3338
3339/**
3340 * skb_cow_data - Check that a socket buffer's data buffers are writable
3341 * @skb: The socket buffer to check.
3342 * @tailbits: Amount of trailing space to be added
3343 * @trailer: Returned pointer to the skb where the @tailbits space begins
3344 *
3345 * Make sure that the data buffers attached to a socket buffer are
3346 * writable. If they are not, private copies are made of the data buffers
3347 * and the socket buffer is set to use these instead.
3348 *
3349 * If @tailbits is given, make sure that there is space to write @tailbits
3350 * bytes of data beyond current end of socket buffer. @trailer will be
3351 * set to point to the skb in which this space begins.
3352 *
3353 * The number of scatterlist elements required to completely map the
3354 * COW'd and extended socket buffer will be returned.
3355 */
3356int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
3357{
3358 int copyflag;
3359 int elt;
3360 struct sk_buff *skb1, **skb_p;
3361
3362 /* If skb is cloned or its head is paged, reallocate
3363 * head pulling out all the pages (pages are considered not writable
3364 * at the moment even if they are anonymous).
3365 */
3366 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3367 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3368 return -ENOMEM;
3369
3370 /* Easy case. Most of packets will go this way. */
3371 if (!skb_has_frag_list(skb)) {
3372 /* A little of trouble, not enough of space for trailer.
3373 * This should not happen, when stack is tuned to generate
3374 * good frames. OK, on miss we reallocate and reserve even more
3375 * space, 128 bytes is fair. */
3376
3377 if (skb_tailroom(skb) < tailbits &&
3378 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3379 return -ENOMEM;
3380
3381 /* Voila! */
3382 *trailer = skb;
3383 return 1;
3384 }
3385
3386 /* Misery. We are in troubles, going to mincer fragments... */
3387
3388 elt = 1;
3389 skb_p = &skb_shinfo(skb)->frag_list;
3390 copyflag = 0;
3391
3392 while ((skb1 = *skb_p) != NULL) {
3393 int ntail = 0;
3394
3395 /* The fragment is partially pulled by someone,
3396 * this can happen on input. Copy it and everything
3397 * after it. */
3398
3399 if (skb_shared(skb1))
3400 copyflag = 1;
3401
3402 /* If the skb is the last, worry about trailer. */
3403
3404 if (skb1->next == NULL && tailbits) {
3405 if (skb_shinfo(skb1)->nr_frags ||
3406 skb_has_frag_list(skb1) ||
3407 skb_tailroom(skb1) < tailbits)
3408 ntail = tailbits + 128;
3409 }
3410
3411 if (copyflag ||
3412 skb_cloned(skb1) ||
3413 ntail ||
3414 skb_shinfo(skb1)->nr_frags ||
3415 skb_has_frag_list(skb1)) {
3416 struct sk_buff *skb2;
3417
3418 /* Fuck, we are miserable poor guys... */
3419 if (ntail == 0)
3420 skb2 = skb_copy(skb1, GFP_ATOMIC);
3421 else
3422 skb2 = skb_copy_expand(skb1,
3423 skb_headroom(skb1),
3424 ntail,
3425 GFP_ATOMIC);
3426 if (unlikely(skb2 == NULL))
3427 return -ENOMEM;
3428
3429 if (skb1->sk)
3430 skb_set_owner_w(skb2, skb1->sk);
3431
3432 /* Looking around. Are we still alive?
3433 * OK, link new skb, drop old one */
3434
3435 skb2->next = skb1->next;
3436 *skb_p = skb2;
3437 kfree_skb(skb1);
3438 skb1 = skb2;
3439 }
3440 elt++;
3441 *trailer = skb1;
3442 skb_p = &skb1->next;
3443 }
3444
3445 return elt;
3446}
3447EXPORT_SYMBOL_GPL(skb_cow_data);
3448
3449static void sock_rmem_free(struct sk_buff *skb)
3450{
3451 struct sock *sk = skb->sk;
3452
3453 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3454}
3455
3456/*
3457 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3458 */
3459int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3460{
3461 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3462 (unsigned int)sk->sk_rcvbuf)
3463 return -ENOMEM;
3464
3465 skb_orphan(skb);
3466 skb->sk = sk;
3467 skb->destructor = sock_rmem_free;
3468 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
3469
3470 /* before exiting rcu section, make sure dst is refcounted */
3471 skb_dst_force(skb);
3472
3473 skb_queue_tail(&sk->sk_error_queue, skb);
3474 if (!sock_flag(sk, SOCK_DEAD))
3475 sk->sk_data_ready(sk);
3476 return 0;
3477}
3478EXPORT_SYMBOL(sock_queue_err_skb);
3479
3480void skb_tstamp_tx(struct sk_buff *orig_skb,
3481 struct skb_shared_hwtstamps *hwtstamps)
3482{
3483 struct sock *sk = orig_skb->sk;
3484 struct sock_exterr_skb *serr;
3485 struct sk_buff *skb;
3486 int err;
3487
3488 if (!sk)
3489 return;
3490
3491 if (hwtstamps) {
3492 *skb_hwtstamps(orig_skb) =
3493 *hwtstamps;
3494 } else {
3495 /*
3496 * no hardware time stamps available,
3497 * so keep the shared tx_flags and only
3498 * store software time stamp
3499 */
3500 orig_skb->tstamp = ktime_get_real();
3501 }
3502
3503 skb = skb_clone(orig_skb, GFP_ATOMIC);
3504 if (!skb)
3505 return;
3506
3507 serr = SKB_EXT_ERR(skb);
3508 memset(serr, 0, sizeof(*serr));
3509 serr->ee.ee_errno = ENOMSG;
3510 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3511
3512 err = sock_queue_err_skb(sk, skb);
3513
3514 if (err)
3515 kfree_skb(skb);
3516}
3517EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3518
3519void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
3520{
3521 struct sock *sk = skb->sk;
3522 struct sock_exterr_skb *serr;
3523 int err;
3524
3525 skb->wifi_acked_valid = 1;
3526 skb->wifi_acked = acked;
3527
3528 serr = SKB_EXT_ERR(skb);
3529 memset(serr, 0, sizeof(*serr));
3530 serr->ee.ee_errno = ENOMSG;
3531 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
3532
3533 err = sock_queue_err_skb(sk, skb);
3534 if (err)
3535 kfree_skb(skb);
3536}
3537EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
3538
3539
3540/**
3541 * skb_partial_csum_set - set up and verify partial csum values for packet
3542 * @skb: the skb to set
3543 * @start: the number of bytes after skb->data to start checksumming.
3544 * @off: the offset from start to place the checksum.
3545 *
3546 * For untrusted partially-checksummed packets, we need to make sure the values
3547 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3548 *
3549 * This function checks and sets those values and skb->ip_summed: if this
3550 * returns false you should drop the packet.
3551 */
3552bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3553{
3554 if (unlikely(start > skb_headlen(skb)) ||
3555 unlikely((int)start + off > skb_headlen(skb) - 2)) {
3556 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
3557 start, off, skb_headlen(skb));
3558 return false;
3559 }
3560 skb->ip_summed = CHECKSUM_PARTIAL;
3561 skb->csum_start = skb_headroom(skb) + start;
3562 skb->csum_offset = off;
3563 skb_set_transport_header(skb, start);
3564 return true;
3565}
3566EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3567
3568static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
3569 unsigned int max)
3570{
3571 if (skb_headlen(skb) >= len)
3572 return 0;
3573
3574 /* If we need to pullup then pullup to the max, so we
3575 * won't need to do it again.
3576 */
3577 if (max > skb->len)
3578 max = skb->len;
3579
3580 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
3581 return -ENOMEM;
3582
3583 if (skb_headlen(skb) < len)
3584 return -EPROTO;
3585
3586 return 0;
3587}
3588
3589#define MAX_TCP_HDR_LEN (15 * 4)
3590
3591static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
3592 typeof(IPPROTO_IP) proto,
3593 unsigned int off)
3594{
3595 switch (proto) {
3596 int err;
3597
3598 case IPPROTO_TCP:
3599 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
3600 off + MAX_TCP_HDR_LEN);
3601 if (!err && !skb_partial_csum_set(skb, off,
3602 offsetof(struct tcphdr,
3603 check)))
3604 err = -EPROTO;
3605 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
3606
3607 case IPPROTO_UDP:
3608 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
3609 off + sizeof(struct udphdr));
3610 if (!err && !skb_partial_csum_set(skb, off,
3611 offsetof(struct udphdr,
3612 check)))
3613 err = -EPROTO;
3614 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
3615 }
3616
3617 return ERR_PTR(-EPROTO);
3618}
3619
3620/* This value should be large enough to cover a tagged ethernet header plus
3621 * maximally sized IP and TCP or UDP headers.
3622 */
3623#define MAX_IP_HDR_LEN 128
3624
3625static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
3626{
3627 unsigned int off;
3628 bool fragment;
3629 __sum16 *csum;
3630 int err;
3631
3632 fragment = false;
3633
3634 err = skb_maybe_pull_tail(skb,
3635 sizeof(struct iphdr),
3636 MAX_IP_HDR_LEN);
3637 if (err < 0)
3638 goto out;
3639
3640 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
3641 fragment = true;
3642
3643 off = ip_hdrlen(skb);
3644
3645 err = -EPROTO;
3646
3647 if (fragment)
3648 goto out;
3649
3650 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
3651 if (IS_ERR(csum))
3652 return PTR_ERR(csum);
3653
3654 if (recalculate)
3655 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3656 ip_hdr(skb)->daddr,
3657 skb->len - off,
3658 ip_hdr(skb)->protocol, 0);
3659 err = 0;
3660
3661out:
3662 return err;
3663}
3664
3665/* This value should be large enough to cover a tagged ethernet header plus
3666 * an IPv6 header, all options, and a maximal TCP or UDP header.
3667 */
3668#define MAX_IPV6_HDR_LEN 256
3669
3670#define OPT_HDR(type, skb, off) \
3671 (type *)(skb_network_header(skb) + (off))
3672
3673static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
3674{
3675 int err;
3676 u8 nexthdr;
3677 unsigned int off;
3678 unsigned int len;
3679 bool fragment;
3680 bool done;
3681 __sum16 *csum;
3682
3683 fragment = false;
3684 done = false;
3685
3686 off = sizeof(struct ipv6hdr);
3687
3688 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
3689 if (err < 0)
3690 goto out;
3691
3692 nexthdr = ipv6_hdr(skb)->nexthdr;
3693
3694 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
3695 while (off <= len && !done) {
3696 switch (nexthdr) {
3697 case IPPROTO_DSTOPTS:
3698 case IPPROTO_HOPOPTS:
3699 case IPPROTO_ROUTING: {
3700 struct ipv6_opt_hdr *hp;
3701
3702 err = skb_maybe_pull_tail(skb,
3703 off +
3704 sizeof(struct ipv6_opt_hdr),
3705 MAX_IPV6_HDR_LEN);
3706 if (err < 0)
3707 goto out;
3708
3709 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
3710 nexthdr = hp->nexthdr;
3711 off += ipv6_optlen(hp);
3712 break;
3713 }
3714 case IPPROTO_AH: {
3715 struct ip_auth_hdr *hp;
3716
3717 err = skb_maybe_pull_tail(skb,
3718 off +
3719 sizeof(struct ip_auth_hdr),
3720 MAX_IPV6_HDR_LEN);
3721 if (err < 0)
3722 goto out;
3723
3724 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
3725 nexthdr = hp->nexthdr;
3726 off += ipv6_authlen(hp);
3727 break;
3728 }
3729 case IPPROTO_FRAGMENT: {
3730 struct frag_hdr *hp;
3731
3732 err = skb_maybe_pull_tail(skb,
3733 off +
3734 sizeof(struct frag_hdr),
3735 MAX_IPV6_HDR_LEN);
3736 if (err < 0)
3737 goto out;
3738
3739 hp = OPT_HDR(struct frag_hdr, skb, off);
3740
3741 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
3742 fragment = true;
3743
3744 nexthdr = hp->nexthdr;
3745 off += sizeof(struct frag_hdr);
3746 break;
3747 }
3748 default:
3749 done = true;
3750 break;
3751 }
3752 }
3753
3754 err = -EPROTO;
3755
3756 if (!done || fragment)
3757 goto out;
3758
3759 csum = skb_checksum_setup_ip(skb, nexthdr, off);
3760 if (IS_ERR(csum))
3761 return PTR_ERR(csum);
3762
3763 if (recalculate)
3764 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3765 &ipv6_hdr(skb)->daddr,
3766 skb->len - off, nexthdr, 0);
3767 err = 0;
3768
3769out:
3770 return err;
3771}
3772
3773/**
3774 * skb_checksum_setup - set up partial checksum offset
3775 * @skb: the skb to set up
3776 * @recalculate: if true the pseudo-header checksum will be recalculated
3777 */
3778int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
3779{
3780 int err;
3781
3782 switch (skb->protocol) {
3783 case htons(ETH_P_IP):
3784 err = skb_checksum_setup_ipv4(skb, recalculate);
3785 break;
3786
3787 case htons(ETH_P_IPV6):
3788 err = skb_checksum_setup_ipv6(skb, recalculate);
3789 break;
3790
3791 default:
3792 err = -EPROTO;
3793 break;
3794 }
3795
3796 return err;
3797}
3798EXPORT_SYMBOL(skb_checksum_setup);
3799
3800void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3801{
3802 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
3803 skb->dev->name);
3804}
3805EXPORT_SYMBOL(__skb_warn_lro_forwarding);
3806
3807void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
3808{
3809 if (head_stolen) {
3810 skb_release_head_state(skb);
3811 kmem_cache_free(skbuff_head_cache, skb);
3812 } else {
3813 __kfree_skb(skb);
3814 }
3815}
3816EXPORT_SYMBOL(kfree_skb_partial);
3817
3818/**
3819 * skb_try_coalesce - try to merge skb to prior one
3820 * @to: prior buffer
3821 * @from: buffer to add
3822 * @fragstolen: pointer to boolean
3823 * @delta_truesize: how much more was allocated than was requested
3824 */
3825bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3826 bool *fragstolen, int *delta_truesize)
3827{
3828 int i, delta, len = from->len;
3829
3830 *fragstolen = false;
3831
3832 if (skb_cloned(to))
3833 return false;
3834
3835 if (len <= skb_tailroom(to)) {
3836 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
3837 *delta_truesize = 0;
3838 return true;
3839 }
3840
3841 if (skb_has_frag_list(to) || skb_has_frag_list(from))
3842 return false;
3843
3844 if (skb_headlen(from) != 0) {
3845 struct page *page;
3846 unsigned int offset;
3847
3848 if (skb_shinfo(to)->nr_frags +
3849 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
3850 return false;
3851
3852 if (skb_head_is_locked(from))
3853 return false;
3854
3855 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3856
3857 page = virt_to_head_page(from->head);
3858 offset = from->data - (unsigned char *)page_address(page);
3859
3860 skb_fill_page_desc(to, skb_shinfo(to)->nr_frags,
3861 page, offset, skb_headlen(from));
3862 *fragstolen = true;
3863 } else {
3864 if (skb_shinfo(to)->nr_frags +
3865 skb_shinfo(from)->nr_frags > MAX_SKB_FRAGS)
3866 return false;
3867
3868 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
3869 }
3870
3871 WARN_ON_ONCE(delta < len);
3872
3873 memcpy(skb_shinfo(to)->frags + skb_shinfo(to)->nr_frags,
3874 skb_shinfo(from)->frags,
3875 skb_shinfo(from)->nr_frags * sizeof(skb_frag_t));
3876 skb_shinfo(to)->nr_frags += skb_shinfo(from)->nr_frags;
3877
3878 if (!skb_cloned(from))
3879 skb_shinfo(from)->nr_frags = 0;
3880
3881 /* if the skb is not cloned this does nothing
3882 * since we set nr_frags to 0.
3883 */
3884 for (i = 0; i < skb_shinfo(from)->nr_frags; i++)
3885 skb_frag_ref(from, i);
3886
3887 to->truesize += delta;
3888 to->len += len;
3889 to->data_len += len;
3890
3891 *delta_truesize = delta;
3892 return true;
3893}
3894EXPORT_SYMBOL(skb_try_coalesce);
3895
3896/**
3897 * skb_scrub_packet - scrub an skb
3898 *
3899 * @skb: buffer to clean
3900 * @xnet: packet is crossing netns
3901 *
3902 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
3903 * into/from a tunnel. Some information have to be cleared during these
3904 * operations.
3905 * skb_scrub_packet can also be used to clean a skb before injecting it in
3906 * another namespace (@xnet == true). We have to clear all information in the
3907 * skb that could impact namespace isolation.
3908 */
3909void skb_scrub_packet(struct sk_buff *skb, bool xnet)
3910{
3911 if (xnet)
3912 skb_orphan(skb);
3913 skb->tstamp.tv64 = 0;
3914 skb->pkt_type = PACKET_HOST;
3915 skb->skb_iif = 0;
3916 skb->local_df = 0;
3917 skb_dst_drop(skb);
3918 skb->mark = 0;
3919 secpath_reset(skb);
3920 nf_reset(skb);
3921 nf_reset_trace(skb);
3922}
3923EXPORT_SYMBOL_GPL(skb_scrub_packet);
3924
3925/**
3926 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
3927 *
3928 * @skb: GSO skb
3929 *
3930 * skb_gso_transport_seglen is used to determine the real size of the
3931 * individual segments, including Layer4 headers (TCP/UDP).
3932 *
3933 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
3934 */
3935unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
3936{
3937 const struct skb_shared_info *shinfo = skb_shinfo(skb);
3938
3939 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
3940 return tcp_hdrlen(skb) + shinfo->gso_size;
3941
3942 /* UFO sets gso_size to the size of the fragmentation
3943 * payload, i.e. the size of the L4 (UDP) header is already
3944 * accounted for.
3945 */
3946 return shinfo->gso_size;
3947}
3948EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
1/*
2 * Routines having to do with the 'struct sk_buff' memory handlers.
3 *
4 * Authors: Alan Cox <alan@lxorguk.ukuu.org.uk>
5 * Florian La Roche <rzsfl@rz.uni-sb.de>
6 *
7 * Fixes:
8 * Alan Cox : Fixed the worst of the load
9 * balancer bugs.
10 * Dave Platt : Interrupt stacking fix.
11 * Richard Kooijman : Timestamp fixes.
12 * Alan Cox : Changed buffer format.
13 * Alan Cox : destructor hook for AF_UNIX etc.
14 * Linus Torvalds : Better skb_clone.
15 * Alan Cox : Added skb_copy.
16 * Alan Cox : Added all the changed routines Linus
17 * only put in the headers
18 * Ray VanTassle : Fixed --skb->lock in free
19 * Alan Cox : skb_copy copy arp field
20 * Andi Kleen : slabified it.
21 * Robert Olsson : Removed skb_head_pool
22 *
23 * NOTE:
24 * The __skb_ routines should be called with interrupts
25 * disabled, or you better be *real* sure that the operation is atomic
26 * with respect to whatever list is being frobbed (e.g. via lock_sock()
27 * or via disabling bottom half handlers, etc).
28 *
29 * This program is free software; you can redistribute it and/or
30 * modify it under the terms of the GNU General Public License
31 * as published by the Free Software Foundation; either version
32 * 2 of the License, or (at your option) any later version.
33 */
34
35/*
36 * The functions in this file will not compile correctly with gcc 2.4.x
37 */
38
39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
40
41#include <linux/module.h>
42#include <linux/types.h>
43#include <linux/kernel.h>
44#include <linux/mm.h>
45#include <linux/interrupt.h>
46#include <linux/in.h>
47#include <linux/inet.h>
48#include <linux/slab.h>
49#include <linux/tcp.h>
50#include <linux/udp.h>
51#include <linux/sctp.h>
52#include <linux/netdevice.h>
53#ifdef CONFIG_NET_CLS_ACT
54#include <net/pkt_sched.h>
55#endif
56#include <linux/string.h>
57#include <linux/skbuff.h>
58#include <linux/splice.h>
59#include <linux/cache.h>
60#include <linux/rtnetlink.h>
61#include <linux/init.h>
62#include <linux/scatterlist.h>
63#include <linux/errqueue.h>
64#include <linux/prefetch.h>
65#include <linux/if_vlan.h>
66
67#include <net/protocol.h>
68#include <net/dst.h>
69#include <net/sock.h>
70#include <net/checksum.h>
71#include <net/ip6_checksum.h>
72#include <net/xfrm.h>
73
74#include <linux/uaccess.h>
75#include <trace/events/skb.h>
76#include <linux/highmem.h>
77#include <linux/capability.h>
78#include <linux/user_namespace.h>
79
80struct kmem_cache *skbuff_head_cache __ro_after_init;
81static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
82int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
83EXPORT_SYMBOL(sysctl_max_skb_frags);
84
85/**
86 * skb_panic - private function for out-of-line support
87 * @skb: buffer
88 * @sz: size
89 * @addr: address
90 * @msg: skb_over_panic or skb_under_panic
91 *
92 * Out-of-line support for skb_put() and skb_push().
93 * Called via the wrapper skb_over_panic() or skb_under_panic().
94 * Keep out of line to prevent kernel bloat.
95 * __builtin_return_address is not used because it is not always reliable.
96 */
97static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
98 const char msg[])
99{
100 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
101 msg, addr, skb->len, sz, skb->head, skb->data,
102 (unsigned long)skb->tail, (unsigned long)skb->end,
103 skb->dev ? skb->dev->name : "<NULL>");
104 BUG();
105}
106
107static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
108{
109 skb_panic(skb, sz, addr, __func__);
110}
111
112static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
113{
114 skb_panic(skb, sz, addr, __func__);
115}
116
117/*
118 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
119 * the caller if emergency pfmemalloc reserves are being used. If it is and
120 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
121 * may be used. Otherwise, the packet data may be discarded until enough
122 * memory is free
123 */
124#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
125 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
126
127static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
128 unsigned long ip, bool *pfmemalloc)
129{
130 void *obj;
131 bool ret_pfmemalloc = false;
132
133 /*
134 * Try a regular allocation, when that fails and we're not entitled
135 * to the reserves, fail.
136 */
137 obj = kmalloc_node_track_caller(size,
138 flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
139 node);
140 if (obj || !(gfp_pfmemalloc_allowed(flags)))
141 goto out;
142
143 /* Try again but now we are using pfmemalloc reserves */
144 ret_pfmemalloc = true;
145 obj = kmalloc_node_track_caller(size, flags, node);
146
147out:
148 if (pfmemalloc)
149 *pfmemalloc = ret_pfmemalloc;
150
151 return obj;
152}
153
154/* Allocate a new skbuff. We do this ourselves so we can fill in a few
155 * 'private' fields and also do memory statistics to find all the
156 * [BEEP] leaks.
157 *
158 */
159
160/**
161 * __alloc_skb - allocate a network buffer
162 * @size: size to allocate
163 * @gfp_mask: allocation mask
164 * @flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
165 * instead of head cache and allocate a cloned (child) skb.
166 * If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
167 * allocations in case the data is required for writeback
168 * @node: numa node to allocate memory on
169 *
170 * Allocate a new &sk_buff. The returned buffer has no headroom and a
171 * tail room of at least size bytes. The object has a reference count
172 * of one. The return is the buffer. On a failure the return is %NULL.
173 *
174 * Buffers may only be allocated from interrupts using a @gfp_mask of
175 * %GFP_ATOMIC.
176 */
177struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
178 int flags, int node)
179{
180 struct kmem_cache *cache;
181 struct skb_shared_info *shinfo;
182 struct sk_buff *skb;
183 u8 *data;
184 bool pfmemalloc;
185
186 cache = (flags & SKB_ALLOC_FCLONE)
187 ? skbuff_fclone_cache : skbuff_head_cache;
188
189 if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
190 gfp_mask |= __GFP_MEMALLOC;
191
192 /* Get the HEAD */
193 skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
194 if (!skb)
195 goto out;
196 prefetchw(skb);
197
198 /* We do our best to align skb_shared_info on a separate cache
199 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
200 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
201 * Both skb->head and skb_shared_info are cache line aligned.
202 */
203 size = SKB_DATA_ALIGN(size);
204 size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
205 data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
206 if (!data)
207 goto nodata;
208 /* kmalloc(size) might give us more room than requested.
209 * Put skb_shared_info exactly at the end of allocated zone,
210 * to allow max possible filling before reallocation.
211 */
212 size = SKB_WITH_OVERHEAD(ksize(data));
213 prefetchw(data + size);
214
215 /*
216 * Only clear those fields we need to clear, not those that we will
217 * actually initialise below. Hence, don't put any more fields after
218 * the tail pointer in struct sk_buff!
219 */
220 memset(skb, 0, offsetof(struct sk_buff, tail));
221 /* Account for allocated memory : skb + skb->head */
222 skb->truesize = SKB_TRUESIZE(size);
223 skb->pfmemalloc = pfmemalloc;
224 refcount_set(&skb->users, 1);
225 skb->head = data;
226 skb->data = data;
227 skb_reset_tail_pointer(skb);
228 skb->end = skb->tail + size;
229 skb->mac_header = (typeof(skb->mac_header))~0U;
230 skb->transport_header = (typeof(skb->transport_header))~0U;
231
232 /* make sure we initialize shinfo sequentially */
233 shinfo = skb_shinfo(skb);
234 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
235 atomic_set(&shinfo->dataref, 1);
236
237 if (flags & SKB_ALLOC_FCLONE) {
238 struct sk_buff_fclones *fclones;
239
240 fclones = container_of(skb, struct sk_buff_fclones, skb1);
241
242 skb->fclone = SKB_FCLONE_ORIG;
243 refcount_set(&fclones->fclone_ref, 1);
244
245 fclones->skb2.fclone = SKB_FCLONE_CLONE;
246 }
247out:
248 return skb;
249nodata:
250 kmem_cache_free(cache, skb);
251 skb = NULL;
252 goto out;
253}
254EXPORT_SYMBOL(__alloc_skb);
255
256/**
257 * __build_skb - build a network buffer
258 * @data: data buffer provided by caller
259 * @frag_size: size of data, or 0 if head was kmalloced
260 *
261 * Allocate a new &sk_buff. Caller provides space holding head and
262 * skb_shared_info. @data must have been allocated by kmalloc() only if
263 * @frag_size is 0, otherwise data should come from the page allocator
264 * or vmalloc()
265 * The return is the new skb buffer.
266 * On a failure the return is %NULL, and @data is not freed.
267 * Notes :
268 * Before IO, driver allocates only data buffer where NIC put incoming frame
269 * Driver should add room at head (NET_SKB_PAD) and
270 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
271 * After IO, driver calls build_skb(), to allocate sk_buff and populate it
272 * before giving packet to stack.
273 * RX rings only contains data buffers, not full skbs.
274 */
275struct sk_buff *__build_skb(void *data, unsigned int frag_size)
276{
277 struct skb_shared_info *shinfo;
278 struct sk_buff *skb;
279 unsigned int size = frag_size ? : ksize(data);
280
281 skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
282 if (!skb)
283 return NULL;
284
285 size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
286
287 memset(skb, 0, offsetof(struct sk_buff, tail));
288 skb->truesize = SKB_TRUESIZE(size);
289 refcount_set(&skb->users, 1);
290 skb->head = data;
291 skb->data = data;
292 skb_reset_tail_pointer(skb);
293 skb->end = skb->tail + size;
294 skb->mac_header = (typeof(skb->mac_header))~0U;
295 skb->transport_header = (typeof(skb->transport_header))~0U;
296
297 /* make sure we initialize shinfo sequentially */
298 shinfo = skb_shinfo(skb);
299 memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
300 atomic_set(&shinfo->dataref, 1);
301
302 return skb;
303}
304
305/* build_skb() is wrapper over __build_skb(), that specifically
306 * takes care of skb->head and skb->pfmemalloc
307 * This means that if @frag_size is not zero, then @data must be backed
308 * by a page fragment, not kmalloc() or vmalloc()
309 */
310struct sk_buff *build_skb(void *data, unsigned int frag_size)
311{
312 struct sk_buff *skb = __build_skb(data, frag_size);
313
314 if (skb && frag_size) {
315 skb->head_frag = 1;
316 if (page_is_pfmemalloc(virt_to_head_page(data)))
317 skb->pfmemalloc = 1;
318 }
319 return skb;
320}
321EXPORT_SYMBOL(build_skb);
322
323#define NAPI_SKB_CACHE_SIZE 64
324
325struct napi_alloc_cache {
326 struct page_frag_cache page;
327 unsigned int skb_count;
328 void *skb_cache[NAPI_SKB_CACHE_SIZE];
329};
330
331static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
332static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
333
334static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
335{
336 struct page_frag_cache *nc;
337 unsigned long flags;
338 void *data;
339
340 local_irq_save(flags);
341 nc = this_cpu_ptr(&netdev_alloc_cache);
342 data = page_frag_alloc(nc, fragsz, gfp_mask);
343 local_irq_restore(flags);
344 return data;
345}
346
347/**
348 * netdev_alloc_frag - allocate a page fragment
349 * @fragsz: fragment size
350 *
351 * Allocates a frag from a page for receive buffer.
352 * Uses GFP_ATOMIC allocations.
353 */
354void *netdev_alloc_frag(unsigned int fragsz)
355{
356 return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
357}
358EXPORT_SYMBOL(netdev_alloc_frag);
359
360static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
361{
362 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
363
364 return page_frag_alloc(&nc->page, fragsz, gfp_mask);
365}
366
367void *napi_alloc_frag(unsigned int fragsz)
368{
369 return __napi_alloc_frag(fragsz, GFP_ATOMIC);
370}
371EXPORT_SYMBOL(napi_alloc_frag);
372
373/**
374 * __netdev_alloc_skb - allocate an skbuff for rx on a specific device
375 * @dev: network device to receive on
376 * @len: length to allocate
377 * @gfp_mask: get_free_pages mask, passed to alloc_skb
378 *
379 * Allocate a new &sk_buff and assign it a usage count of one. The
380 * buffer has NET_SKB_PAD headroom built in. Users should allocate
381 * the headroom they think they need without accounting for the
382 * built in space. The built in space is used for optimisations.
383 *
384 * %NULL is returned if there is no free memory.
385 */
386struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
387 gfp_t gfp_mask)
388{
389 struct page_frag_cache *nc;
390 unsigned long flags;
391 struct sk_buff *skb;
392 bool pfmemalloc;
393 void *data;
394
395 len += NET_SKB_PAD;
396
397 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
398 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
399 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
400 if (!skb)
401 goto skb_fail;
402 goto skb_success;
403 }
404
405 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
406 len = SKB_DATA_ALIGN(len);
407
408 if (sk_memalloc_socks())
409 gfp_mask |= __GFP_MEMALLOC;
410
411 local_irq_save(flags);
412
413 nc = this_cpu_ptr(&netdev_alloc_cache);
414 data = page_frag_alloc(nc, len, gfp_mask);
415 pfmemalloc = nc->pfmemalloc;
416
417 local_irq_restore(flags);
418
419 if (unlikely(!data))
420 return NULL;
421
422 skb = __build_skb(data, len);
423 if (unlikely(!skb)) {
424 skb_free_frag(data);
425 return NULL;
426 }
427
428 /* use OR instead of assignment to avoid clearing of bits in mask */
429 if (pfmemalloc)
430 skb->pfmemalloc = 1;
431 skb->head_frag = 1;
432
433skb_success:
434 skb_reserve(skb, NET_SKB_PAD);
435 skb->dev = dev;
436
437skb_fail:
438 return skb;
439}
440EXPORT_SYMBOL(__netdev_alloc_skb);
441
442/**
443 * __napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
444 * @napi: napi instance this buffer was allocated for
445 * @len: length to allocate
446 * @gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
447 *
448 * Allocate a new sk_buff for use in NAPI receive. This buffer will
449 * attempt to allocate the head from a special reserved region used
450 * only for NAPI Rx allocation. By doing this we can save several
451 * CPU cycles by avoiding having to disable and re-enable IRQs.
452 *
453 * %NULL is returned if there is no free memory.
454 */
455struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
456 gfp_t gfp_mask)
457{
458 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
459 struct sk_buff *skb;
460 void *data;
461
462 len += NET_SKB_PAD + NET_IP_ALIGN;
463
464 if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
465 (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
466 skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
467 if (!skb)
468 goto skb_fail;
469 goto skb_success;
470 }
471
472 len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
473 len = SKB_DATA_ALIGN(len);
474
475 if (sk_memalloc_socks())
476 gfp_mask |= __GFP_MEMALLOC;
477
478 data = page_frag_alloc(&nc->page, len, gfp_mask);
479 if (unlikely(!data))
480 return NULL;
481
482 skb = __build_skb(data, len);
483 if (unlikely(!skb)) {
484 skb_free_frag(data);
485 return NULL;
486 }
487
488 /* use OR instead of assignment to avoid clearing of bits in mask */
489 if (nc->page.pfmemalloc)
490 skb->pfmemalloc = 1;
491 skb->head_frag = 1;
492
493skb_success:
494 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
495 skb->dev = napi->dev;
496
497skb_fail:
498 return skb;
499}
500EXPORT_SYMBOL(__napi_alloc_skb);
501
502void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
503 int size, unsigned int truesize)
504{
505 skb_fill_page_desc(skb, i, page, off, size);
506 skb->len += size;
507 skb->data_len += size;
508 skb->truesize += truesize;
509}
510EXPORT_SYMBOL(skb_add_rx_frag);
511
512void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
513 unsigned int truesize)
514{
515 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
516
517 skb_frag_size_add(frag, size);
518 skb->len += size;
519 skb->data_len += size;
520 skb->truesize += truesize;
521}
522EXPORT_SYMBOL(skb_coalesce_rx_frag);
523
524static void skb_drop_list(struct sk_buff **listp)
525{
526 kfree_skb_list(*listp);
527 *listp = NULL;
528}
529
530static inline void skb_drop_fraglist(struct sk_buff *skb)
531{
532 skb_drop_list(&skb_shinfo(skb)->frag_list);
533}
534
535static void skb_clone_fraglist(struct sk_buff *skb)
536{
537 struct sk_buff *list;
538
539 skb_walk_frags(skb, list)
540 skb_get(list);
541}
542
543static void skb_free_head(struct sk_buff *skb)
544{
545 unsigned char *head = skb->head;
546
547 if (skb->head_frag)
548 skb_free_frag(head);
549 else
550 kfree(head);
551}
552
553static void skb_release_data(struct sk_buff *skb)
554{
555 struct skb_shared_info *shinfo = skb_shinfo(skb);
556 int i;
557
558 if (skb->cloned &&
559 atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
560 &shinfo->dataref))
561 return;
562
563 for (i = 0; i < shinfo->nr_frags; i++)
564 __skb_frag_unref(&shinfo->frags[i]);
565
566 if (shinfo->frag_list)
567 kfree_skb_list(shinfo->frag_list);
568
569 skb_zcopy_clear(skb, true);
570 skb_free_head(skb);
571}
572
573/*
574 * Free an skbuff by memory without cleaning the state.
575 */
576static void kfree_skbmem(struct sk_buff *skb)
577{
578 struct sk_buff_fclones *fclones;
579
580 switch (skb->fclone) {
581 case SKB_FCLONE_UNAVAILABLE:
582 kmem_cache_free(skbuff_head_cache, skb);
583 return;
584
585 case SKB_FCLONE_ORIG:
586 fclones = container_of(skb, struct sk_buff_fclones, skb1);
587
588 /* We usually free the clone (TX completion) before original skb
589 * This test would have no chance to be true for the clone,
590 * while here, branch prediction will be good.
591 */
592 if (refcount_read(&fclones->fclone_ref) == 1)
593 goto fastpath;
594 break;
595
596 default: /* SKB_FCLONE_CLONE */
597 fclones = container_of(skb, struct sk_buff_fclones, skb2);
598 break;
599 }
600 if (!refcount_dec_and_test(&fclones->fclone_ref))
601 return;
602fastpath:
603 kmem_cache_free(skbuff_fclone_cache, fclones);
604}
605
606void skb_release_head_state(struct sk_buff *skb)
607{
608 skb_dst_drop(skb);
609 secpath_reset(skb);
610 if (skb->destructor) {
611 WARN_ON(in_irq());
612 skb->destructor(skb);
613 }
614#if IS_ENABLED(CONFIG_NF_CONNTRACK)
615 nf_conntrack_put(skb_nfct(skb));
616#endif
617#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
618 nf_bridge_put(skb->nf_bridge);
619#endif
620}
621
622/* Free everything but the sk_buff shell. */
623static void skb_release_all(struct sk_buff *skb)
624{
625 skb_release_head_state(skb);
626 if (likely(skb->head))
627 skb_release_data(skb);
628}
629
630/**
631 * __kfree_skb - private function
632 * @skb: buffer
633 *
634 * Free an sk_buff. Release anything attached to the buffer.
635 * Clean the state. This is an internal helper function. Users should
636 * always call kfree_skb
637 */
638
639void __kfree_skb(struct sk_buff *skb)
640{
641 skb_release_all(skb);
642 kfree_skbmem(skb);
643}
644EXPORT_SYMBOL(__kfree_skb);
645
646/**
647 * kfree_skb - free an sk_buff
648 * @skb: buffer to free
649 *
650 * Drop a reference to the buffer and free it if the usage count has
651 * hit zero.
652 */
653void kfree_skb(struct sk_buff *skb)
654{
655 if (!skb_unref(skb))
656 return;
657
658 trace_kfree_skb(skb, __builtin_return_address(0));
659 __kfree_skb(skb);
660}
661EXPORT_SYMBOL(kfree_skb);
662
663void kfree_skb_list(struct sk_buff *segs)
664{
665 while (segs) {
666 struct sk_buff *next = segs->next;
667
668 kfree_skb(segs);
669 segs = next;
670 }
671}
672EXPORT_SYMBOL(kfree_skb_list);
673
674/**
675 * skb_tx_error - report an sk_buff xmit error
676 * @skb: buffer that triggered an error
677 *
678 * Report xmit error if a device callback is tracking this skb.
679 * skb must be freed afterwards.
680 */
681void skb_tx_error(struct sk_buff *skb)
682{
683 skb_zcopy_clear(skb, true);
684}
685EXPORT_SYMBOL(skb_tx_error);
686
687/**
688 * consume_skb - free an skbuff
689 * @skb: buffer to free
690 *
691 * Drop a ref to the buffer and free it if the usage count has hit zero
692 * Functions identically to kfree_skb, but kfree_skb assumes that the frame
693 * is being dropped after a failure and notes that
694 */
695void consume_skb(struct sk_buff *skb)
696{
697 if (!skb_unref(skb))
698 return;
699
700 trace_consume_skb(skb);
701 __kfree_skb(skb);
702}
703EXPORT_SYMBOL(consume_skb);
704
705/**
706 * consume_stateless_skb - free an skbuff, assuming it is stateless
707 * @skb: buffer to free
708 *
709 * Alike consume_skb(), but this variant assumes that this is the last
710 * skb reference and all the head states have been already dropped
711 */
712void __consume_stateless_skb(struct sk_buff *skb)
713{
714 trace_consume_skb(skb);
715 skb_release_data(skb);
716 kfree_skbmem(skb);
717}
718
719void __kfree_skb_flush(void)
720{
721 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
722
723 /* flush skb_cache if containing objects */
724 if (nc->skb_count) {
725 kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
726 nc->skb_cache);
727 nc->skb_count = 0;
728 }
729}
730
731static inline void _kfree_skb_defer(struct sk_buff *skb)
732{
733 struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
734
735 /* drop skb->head and call any destructors for packet */
736 skb_release_all(skb);
737
738 /* record skb to CPU local list */
739 nc->skb_cache[nc->skb_count++] = skb;
740
741#ifdef CONFIG_SLUB
742 /* SLUB writes into objects when freeing */
743 prefetchw(skb);
744#endif
745
746 /* flush skb_cache if it is filled */
747 if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
748 kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
749 nc->skb_cache);
750 nc->skb_count = 0;
751 }
752}
753void __kfree_skb_defer(struct sk_buff *skb)
754{
755 _kfree_skb_defer(skb);
756}
757
758void napi_consume_skb(struct sk_buff *skb, int budget)
759{
760 if (unlikely(!skb))
761 return;
762
763 /* Zero budget indicate non-NAPI context called us, like netpoll */
764 if (unlikely(!budget)) {
765 dev_consume_skb_any(skb);
766 return;
767 }
768
769 if (!skb_unref(skb))
770 return;
771
772 /* if reaching here SKB is ready to free */
773 trace_consume_skb(skb);
774
775 /* if SKB is a clone, don't handle this case */
776 if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
777 __kfree_skb(skb);
778 return;
779 }
780
781 _kfree_skb_defer(skb);
782}
783EXPORT_SYMBOL(napi_consume_skb);
784
785/* Make sure a field is enclosed inside headers_start/headers_end section */
786#define CHECK_SKB_FIELD(field) \
787 BUILD_BUG_ON(offsetof(struct sk_buff, field) < \
788 offsetof(struct sk_buff, headers_start)); \
789 BUILD_BUG_ON(offsetof(struct sk_buff, field) > \
790 offsetof(struct sk_buff, headers_end)); \
791
792static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
793{
794 new->tstamp = old->tstamp;
795 /* We do not copy old->sk */
796 new->dev = old->dev;
797 memcpy(new->cb, old->cb, sizeof(old->cb));
798 skb_dst_copy(new, old);
799#ifdef CONFIG_XFRM
800 new->sp = secpath_get(old->sp);
801#endif
802 __nf_copy(new, old, false);
803
804 /* Note : this field could be in headers_start/headers_end section
805 * It is not yet because we do not want to have a 16 bit hole
806 */
807 new->queue_mapping = old->queue_mapping;
808
809 memcpy(&new->headers_start, &old->headers_start,
810 offsetof(struct sk_buff, headers_end) -
811 offsetof(struct sk_buff, headers_start));
812 CHECK_SKB_FIELD(protocol);
813 CHECK_SKB_FIELD(csum);
814 CHECK_SKB_FIELD(hash);
815 CHECK_SKB_FIELD(priority);
816 CHECK_SKB_FIELD(skb_iif);
817 CHECK_SKB_FIELD(vlan_proto);
818 CHECK_SKB_FIELD(vlan_tci);
819 CHECK_SKB_FIELD(transport_header);
820 CHECK_SKB_FIELD(network_header);
821 CHECK_SKB_FIELD(mac_header);
822 CHECK_SKB_FIELD(inner_protocol);
823 CHECK_SKB_FIELD(inner_transport_header);
824 CHECK_SKB_FIELD(inner_network_header);
825 CHECK_SKB_FIELD(inner_mac_header);
826 CHECK_SKB_FIELD(mark);
827#ifdef CONFIG_NETWORK_SECMARK
828 CHECK_SKB_FIELD(secmark);
829#endif
830#ifdef CONFIG_NET_RX_BUSY_POLL
831 CHECK_SKB_FIELD(napi_id);
832#endif
833#ifdef CONFIG_XPS
834 CHECK_SKB_FIELD(sender_cpu);
835#endif
836#ifdef CONFIG_NET_SCHED
837 CHECK_SKB_FIELD(tc_index);
838#endif
839
840}
841
842/*
843 * You should not add any new code to this function. Add it to
844 * __copy_skb_header above instead.
845 */
846static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
847{
848#define C(x) n->x = skb->x
849
850 n->next = n->prev = NULL;
851 n->sk = NULL;
852 __copy_skb_header(n, skb);
853
854 C(len);
855 C(data_len);
856 C(mac_len);
857 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
858 n->cloned = 1;
859 n->nohdr = 0;
860 n->peeked = 0;
861 n->destructor = NULL;
862 C(tail);
863 C(end);
864 C(head);
865 C(head_frag);
866 C(data);
867 C(truesize);
868 refcount_set(&n->users, 1);
869
870 atomic_inc(&(skb_shinfo(skb)->dataref));
871 skb->cloned = 1;
872
873 return n;
874#undef C
875}
876
877/**
878 * skb_morph - morph one skb into another
879 * @dst: the skb to receive the contents
880 * @src: the skb to supply the contents
881 *
882 * This is identical to skb_clone except that the target skb is
883 * supplied by the user.
884 *
885 * The target skb is returned upon exit.
886 */
887struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
888{
889 skb_release_all(dst);
890 return __skb_clone(dst, src);
891}
892EXPORT_SYMBOL_GPL(skb_morph);
893
894int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
895{
896 unsigned long max_pg, num_pg, new_pg, old_pg;
897 struct user_struct *user;
898
899 if (capable(CAP_IPC_LOCK) || !size)
900 return 0;
901
902 num_pg = (size >> PAGE_SHIFT) + 2; /* worst case */
903 max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
904 user = mmp->user ? : current_user();
905
906 do {
907 old_pg = atomic_long_read(&user->locked_vm);
908 new_pg = old_pg + num_pg;
909 if (new_pg > max_pg)
910 return -ENOBUFS;
911 } while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
912 old_pg);
913
914 if (!mmp->user) {
915 mmp->user = get_uid(user);
916 mmp->num_pg = num_pg;
917 } else {
918 mmp->num_pg += num_pg;
919 }
920
921 return 0;
922}
923EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
924
925void mm_unaccount_pinned_pages(struct mmpin *mmp)
926{
927 if (mmp->user) {
928 atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
929 free_uid(mmp->user);
930 }
931}
932EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
933
934struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
935{
936 struct ubuf_info *uarg;
937 struct sk_buff *skb;
938
939 WARN_ON_ONCE(!in_task());
940
941 if (!sock_flag(sk, SOCK_ZEROCOPY))
942 return NULL;
943
944 skb = sock_omalloc(sk, 0, GFP_KERNEL);
945 if (!skb)
946 return NULL;
947
948 BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
949 uarg = (void *)skb->cb;
950 uarg->mmp.user = NULL;
951
952 if (mm_account_pinned_pages(&uarg->mmp, size)) {
953 kfree_skb(skb);
954 return NULL;
955 }
956
957 uarg->callback = sock_zerocopy_callback;
958 uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
959 uarg->len = 1;
960 uarg->bytelen = size;
961 uarg->zerocopy = 1;
962 refcount_set(&uarg->refcnt, 1);
963 sock_hold(sk);
964
965 return uarg;
966}
967EXPORT_SYMBOL_GPL(sock_zerocopy_alloc);
968
969static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
970{
971 return container_of((void *)uarg, struct sk_buff, cb);
972}
973
974struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
975 struct ubuf_info *uarg)
976{
977 if (uarg) {
978 const u32 byte_limit = 1 << 19; /* limit to a few TSO */
979 u32 bytelen, next;
980
981 /* realloc only when socket is locked (TCP, UDP cork),
982 * so uarg->len and sk_zckey access is serialized
983 */
984 if (!sock_owned_by_user(sk)) {
985 WARN_ON_ONCE(1);
986 return NULL;
987 }
988
989 bytelen = uarg->bytelen + size;
990 if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
991 /* TCP can create new skb to attach new uarg */
992 if (sk->sk_type == SOCK_STREAM)
993 goto new_alloc;
994 return NULL;
995 }
996
997 next = (u32)atomic_read(&sk->sk_zckey);
998 if ((u32)(uarg->id + uarg->len) == next) {
999 if (mm_account_pinned_pages(&uarg->mmp, size))
1000 return NULL;
1001 uarg->len++;
1002 uarg->bytelen = bytelen;
1003 atomic_set(&sk->sk_zckey, ++next);
1004 sock_zerocopy_get(uarg);
1005 return uarg;
1006 }
1007 }
1008
1009new_alloc:
1010 return sock_zerocopy_alloc(sk, size);
1011}
1012EXPORT_SYMBOL_GPL(sock_zerocopy_realloc);
1013
1014static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1015{
1016 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1017 u32 old_lo, old_hi;
1018 u64 sum_len;
1019
1020 old_lo = serr->ee.ee_info;
1021 old_hi = serr->ee.ee_data;
1022 sum_len = old_hi - old_lo + 1ULL + len;
1023
1024 if (sum_len >= (1ULL << 32))
1025 return false;
1026
1027 if (lo != old_hi + 1)
1028 return false;
1029
1030 serr->ee.ee_data += len;
1031 return true;
1032}
1033
1034void sock_zerocopy_callback(struct ubuf_info *uarg, bool success)
1035{
1036 struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1037 struct sock_exterr_skb *serr;
1038 struct sock *sk = skb->sk;
1039 struct sk_buff_head *q;
1040 unsigned long flags;
1041 u32 lo, hi;
1042 u16 len;
1043
1044 mm_unaccount_pinned_pages(&uarg->mmp);
1045
1046 /* if !len, there was only 1 call, and it was aborted
1047 * so do not queue a completion notification
1048 */
1049 if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1050 goto release;
1051
1052 len = uarg->len;
1053 lo = uarg->id;
1054 hi = uarg->id + len - 1;
1055
1056 serr = SKB_EXT_ERR(skb);
1057 memset(serr, 0, sizeof(*serr));
1058 serr->ee.ee_errno = 0;
1059 serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1060 serr->ee.ee_data = hi;
1061 serr->ee.ee_info = lo;
1062 if (!success)
1063 serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1064
1065 q = &sk->sk_error_queue;
1066 spin_lock_irqsave(&q->lock, flags);
1067 tail = skb_peek_tail(q);
1068 if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1069 !skb_zerocopy_notify_extend(tail, lo, len)) {
1070 __skb_queue_tail(q, skb);
1071 skb = NULL;
1072 }
1073 spin_unlock_irqrestore(&q->lock, flags);
1074
1075 sk->sk_error_report(sk);
1076
1077release:
1078 consume_skb(skb);
1079 sock_put(sk);
1080}
1081EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
1082
1083void sock_zerocopy_put(struct ubuf_info *uarg)
1084{
1085 if (uarg && refcount_dec_and_test(&uarg->refcnt)) {
1086 if (uarg->callback)
1087 uarg->callback(uarg, uarg->zerocopy);
1088 else
1089 consume_skb(skb_from_uarg(uarg));
1090 }
1091}
1092EXPORT_SYMBOL_GPL(sock_zerocopy_put);
1093
1094void sock_zerocopy_put_abort(struct ubuf_info *uarg)
1095{
1096 if (uarg) {
1097 struct sock *sk = skb_from_uarg(uarg)->sk;
1098
1099 atomic_dec(&sk->sk_zckey);
1100 uarg->len--;
1101
1102 sock_zerocopy_put(uarg);
1103 }
1104}
1105EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
1106
1107extern int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1108 struct iov_iter *from, size_t length);
1109
1110int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1111 struct msghdr *msg, int len,
1112 struct ubuf_info *uarg)
1113{
1114 struct ubuf_info *orig_uarg = skb_zcopy(skb);
1115 struct iov_iter orig_iter = msg->msg_iter;
1116 int err, orig_len = skb->len;
1117
1118 /* An skb can only point to one uarg. This edge case happens when
1119 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1120 */
1121 if (orig_uarg && uarg != orig_uarg)
1122 return -EEXIST;
1123
1124 err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1125 if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1126 struct sock *save_sk = skb->sk;
1127
1128 /* Streams do not free skb on error. Reset to prev state. */
1129 msg->msg_iter = orig_iter;
1130 skb->sk = sk;
1131 ___pskb_trim(skb, orig_len);
1132 skb->sk = save_sk;
1133 return err;
1134 }
1135
1136 skb_zcopy_set(skb, uarg);
1137 return skb->len - orig_len;
1138}
1139EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1140
1141static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1142 gfp_t gfp_mask)
1143{
1144 if (skb_zcopy(orig)) {
1145 if (skb_zcopy(nskb)) {
1146 /* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1147 if (!gfp_mask) {
1148 WARN_ON_ONCE(1);
1149 return -ENOMEM;
1150 }
1151 if (skb_uarg(nskb) == skb_uarg(orig))
1152 return 0;
1153 if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1154 return -EIO;
1155 }
1156 skb_zcopy_set(nskb, skb_uarg(orig));
1157 }
1158 return 0;
1159}
1160
1161/**
1162 * skb_copy_ubufs - copy userspace skb frags buffers to kernel
1163 * @skb: the skb to modify
1164 * @gfp_mask: allocation priority
1165 *
1166 * This must be called on SKBTX_DEV_ZEROCOPY skb.
1167 * It will copy all frags into kernel and drop the reference
1168 * to userspace pages.
1169 *
1170 * If this function is called from an interrupt gfp_mask() must be
1171 * %GFP_ATOMIC.
1172 *
1173 * Returns 0 on success or a negative error code on failure
1174 * to allocate kernel memory to copy to.
1175 */
1176int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1177{
1178 int num_frags = skb_shinfo(skb)->nr_frags;
1179 struct page *page, *head = NULL;
1180 int i, new_frags;
1181 u32 d_off;
1182
1183 if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1184 return -EINVAL;
1185
1186 if (!num_frags)
1187 goto release;
1188
1189 new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1190 for (i = 0; i < new_frags; i++) {
1191 page = alloc_page(gfp_mask);
1192 if (!page) {
1193 while (head) {
1194 struct page *next = (struct page *)page_private(head);
1195 put_page(head);
1196 head = next;
1197 }
1198 return -ENOMEM;
1199 }
1200 set_page_private(page, (unsigned long)head);
1201 head = page;
1202 }
1203
1204 page = head;
1205 d_off = 0;
1206 for (i = 0; i < num_frags; i++) {
1207 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1208 u32 p_off, p_len, copied;
1209 struct page *p;
1210 u8 *vaddr;
1211
1212 skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f),
1213 p, p_off, p_len, copied) {
1214 u32 copy, done = 0;
1215 vaddr = kmap_atomic(p);
1216
1217 while (done < p_len) {
1218 if (d_off == PAGE_SIZE) {
1219 d_off = 0;
1220 page = (struct page *)page_private(page);
1221 }
1222 copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
1223 memcpy(page_address(page) + d_off,
1224 vaddr + p_off + done, copy);
1225 done += copy;
1226 d_off += copy;
1227 }
1228 kunmap_atomic(vaddr);
1229 }
1230 }
1231
1232 /* skb frags release userspace buffers */
1233 for (i = 0; i < num_frags; i++)
1234 skb_frag_unref(skb, i);
1235
1236 /* skb frags point to kernel buffers */
1237 for (i = 0; i < new_frags - 1; i++) {
1238 __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
1239 head = (struct page *)page_private(head);
1240 }
1241 __skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1242 skb_shinfo(skb)->nr_frags = new_frags;
1243
1244release:
1245 skb_zcopy_clear(skb, false);
1246 return 0;
1247}
1248EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1249
1250/**
1251 * skb_clone - duplicate an sk_buff
1252 * @skb: buffer to clone
1253 * @gfp_mask: allocation priority
1254 *
1255 * Duplicate an &sk_buff. The new one is not owned by a socket. Both
1256 * copies share the same packet data but not structure. The new
1257 * buffer has a reference count of 1. If the allocation fails the
1258 * function returns %NULL otherwise the new buffer is returned.
1259 *
1260 * If this function is called from an interrupt gfp_mask() must be
1261 * %GFP_ATOMIC.
1262 */
1263
1264struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1265{
1266 struct sk_buff_fclones *fclones = container_of(skb,
1267 struct sk_buff_fclones,
1268 skb1);
1269 struct sk_buff *n;
1270
1271 if (skb_orphan_frags(skb, gfp_mask))
1272 return NULL;
1273
1274 if (skb->fclone == SKB_FCLONE_ORIG &&
1275 refcount_read(&fclones->fclone_ref) == 1) {
1276 n = &fclones->skb2;
1277 refcount_set(&fclones->fclone_ref, 2);
1278 } else {
1279 if (skb_pfmemalloc(skb))
1280 gfp_mask |= __GFP_MEMALLOC;
1281
1282 n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1283 if (!n)
1284 return NULL;
1285
1286 n->fclone = SKB_FCLONE_UNAVAILABLE;
1287 }
1288
1289 return __skb_clone(n, skb);
1290}
1291EXPORT_SYMBOL(skb_clone);
1292
1293static void skb_headers_offset_update(struct sk_buff *skb, int off)
1294{
1295 /* Only adjust this if it actually is csum_start rather than csum */
1296 if (skb->ip_summed == CHECKSUM_PARTIAL)
1297 skb->csum_start += off;
1298 /* {transport,network,mac}_header and tail are relative to skb->head */
1299 skb->transport_header += off;
1300 skb->network_header += off;
1301 if (skb_mac_header_was_set(skb))
1302 skb->mac_header += off;
1303 skb->inner_transport_header += off;
1304 skb->inner_network_header += off;
1305 skb->inner_mac_header += off;
1306}
1307
1308static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1309{
1310 __copy_skb_header(new, old);
1311
1312 skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1313 skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1314 skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1315}
1316
1317static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1318{
1319 if (skb_pfmemalloc(skb))
1320 return SKB_ALLOC_RX;
1321 return 0;
1322}
1323
1324/**
1325 * skb_copy - create private copy of an sk_buff
1326 * @skb: buffer to copy
1327 * @gfp_mask: allocation priority
1328 *
1329 * Make a copy of both an &sk_buff and its data. This is used when the
1330 * caller wishes to modify the data and needs a private copy of the
1331 * data to alter. Returns %NULL on failure or the pointer to the buffer
1332 * on success. The returned buffer has a reference count of 1.
1333 *
1334 * As by-product this function converts non-linear &sk_buff to linear
1335 * one, so that &sk_buff becomes completely private and caller is allowed
1336 * to modify all the data of returned buffer. This means that this
1337 * function is not recommended for use in circumstances when only
1338 * header is going to be modified. Use pskb_copy() instead.
1339 */
1340
1341struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1342{
1343 int headerlen = skb_headroom(skb);
1344 unsigned int size = skb_end_offset(skb) + skb->data_len;
1345 struct sk_buff *n = __alloc_skb(size, gfp_mask,
1346 skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1347
1348 if (!n)
1349 return NULL;
1350
1351 /* Set the data pointer */
1352 skb_reserve(n, headerlen);
1353 /* Set the tail pointer and length */
1354 skb_put(n, skb->len);
1355
1356 BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
1357
1358 copy_skb_header(n, skb);
1359 return n;
1360}
1361EXPORT_SYMBOL(skb_copy);
1362
1363/**
1364 * __pskb_copy_fclone - create copy of an sk_buff with private head.
1365 * @skb: buffer to copy
1366 * @headroom: headroom of new skb
1367 * @gfp_mask: allocation priority
1368 * @fclone: if true allocate the copy of the skb from the fclone
1369 * cache instead of the head cache; it is recommended to set this
1370 * to true for the cases where the copy will likely be cloned
1371 *
1372 * Make a copy of both an &sk_buff and part of its data, located
1373 * in header. Fragmented data remain shared. This is used when
1374 * the caller wishes to modify only header of &sk_buff and needs
1375 * private copy of the header to alter. Returns %NULL on failure
1376 * or the pointer to the buffer on success.
1377 * The returned buffer has a reference count of 1.
1378 */
1379
1380struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1381 gfp_t gfp_mask, bool fclone)
1382{
1383 unsigned int size = skb_headlen(skb) + headroom;
1384 int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1385 struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1386
1387 if (!n)
1388 goto out;
1389
1390 /* Set the data pointer */
1391 skb_reserve(n, headroom);
1392 /* Set the tail pointer and length */
1393 skb_put(n, skb_headlen(skb));
1394 /* Copy the bytes */
1395 skb_copy_from_linear_data(skb, n->data, n->len);
1396
1397 n->truesize += skb->data_len;
1398 n->data_len = skb->data_len;
1399 n->len = skb->len;
1400
1401 if (skb_shinfo(skb)->nr_frags) {
1402 int i;
1403
1404 if (skb_orphan_frags(skb, gfp_mask) ||
1405 skb_zerocopy_clone(n, skb, gfp_mask)) {
1406 kfree_skb(n);
1407 n = NULL;
1408 goto out;
1409 }
1410 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1411 skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1412 skb_frag_ref(skb, i);
1413 }
1414 skb_shinfo(n)->nr_frags = i;
1415 }
1416
1417 if (skb_has_frag_list(skb)) {
1418 skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1419 skb_clone_fraglist(n);
1420 }
1421
1422 copy_skb_header(n, skb);
1423out:
1424 return n;
1425}
1426EXPORT_SYMBOL(__pskb_copy_fclone);
1427
1428/**
1429 * pskb_expand_head - reallocate header of &sk_buff
1430 * @skb: buffer to reallocate
1431 * @nhead: room to add at head
1432 * @ntail: room to add at tail
1433 * @gfp_mask: allocation priority
1434 *
1435 * Expands (or creates identical copy, if @nhead and @ntail are zero)
1436 * header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1437 * reference count of 1. Returns zero in the case of success or error,
1438 * if expansion failed. In the last case, &sk_buff is not changed.
1439 *
1440 * All the pointers pointing into skb header may change and must be
1441 * reloaded after call to this function.
1442 */
1443
1444int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1445 gfp_t gfp_mask)
1446{
1447 int i, osize = skb_end_offset(skb);
1448 int size = osize + nhead + ntail;
1449 long off;
1450 u8 *data;
1451
1452 BUG_ON(nhead < 0);
1453
1454 BUG_ON(skb_shared(skb));
1455
1456 size = SKB_DATA_ALIGN(size);
1457
1458 if (skb_pfmemalloc(skb))
1459 gfp_mask |= __GFP_MEMALLOC;
1460 data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1461 gfp_mask, NUMA_NO_NODE, NULL);
1462 if (!data)
1463 goto nodata;
1464 size = SKB_WITH_OVERHEAD(ksize(data));
1465
1466 /* Copy only real data... and, alas, header. This should be
1467 * optimized for the cases when header is void.
1468 */
1469 memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1470
1471 memcpy((struct skb_shared_info *)(data + size),
1472 skb_shinfo(skb),
1473 offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1474
1475 /*
1476 * if shinfo is shared we must drop the old head gracefully, but if it
1477 * is not we can just drop the old head and let the existing refcount
1478 * be since all we did is relocate the values
1479 */
1480 if (skb_cloned(skb)) {
1481 if (skb_orphan_frags(skb, gfp_mask))
1482 goto nofrags;
1483 if (skb_zcopy(skb))
1484 refcount_inc(&skb_uarg(skb)->refcnt);
1485 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1486 skb_frag_ref(skb, i);
1487
1488 if (skb_has_frag_list(skb))
1489 skb_clone_fraglist(skb);
1490
1491 skb_release_data(skb);
1492 } else {
1493 skb_free_head(skb);
1494 }
1495 off = (data + nhead) - skb->head;
1496
1497 skb->head = data;
1498 skb->head_frag = 0;
1499 skb->data += off;
1500#ifdef NET_SKBUFF_DATA_USES_OFFSET
1501 skb->end = size;
1502 off = nhead;
1503#else
1504 skb->end = skb->head + size;
1505#endif
1506 skb->tail += off;
1507 skb_headers_offset_update(skb, nhead);
1508 skb->cloned = 0;
1509 skb->hdr_len = 0;
1510 skb->nohdr = 0;
1511 atomic_set(&skb_shinfo(skb)->dataref, 1);
1512
1513 skb_metadata_clear(skb);
1514
1515 /* It is not generally safe to change skb->truesize.
1516 * For the moment, we really care of rx path, or
1517 * when skb is orphaned (not attached to a socket).
1518 */
1519 if (!skb->sk || skb->destructor == sock_edemux)
1520 skb->truesize += size - osize;
1521
1522 return 0;
1523
1524nofrags:
1525 kfree(data);
1526nodata:
1527 return -ENOMEM;
1528}
1529EXPORT_SYMBOL(pskb_expand_head);
1530
1531/* Make private copy of skb with writable head and some headroom */
1532
1533struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1534{
1535 struct sk_buff *skb2;
1536 int delta = headroom - skb_headroom(skb);
1537
1538 if (delta <= 0)
1539 skb2 = pskb_copy(skb, GFP_ATOMIC);
1540 else {
1541 skb2 = skb_clone(skb, GFP_ATOMIC);
1542 if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1543 GFP_ATOMIC)) {
1544 kfree_skb(skb2);
1545 skb2 = NULL;
1546 }
1547 }
1548 return skb2;
1549}
1550EXPORT_SYMBOL(skb_realloc_headroom);
1551
1552/**
1553 * skb_copy_expand - copy and expand sk_buff
1554 * @skb: buffer to copy
1555 * @newheadroom: new free bytes at head
1556 * @newtailroom: new free bytes at tail
1557 * @gfp_mask: allocation priority
1558 *
1559 * Make a copy of both an &sk_buff and its data and while doing so
1560 * allocate additional space.
1561 *
1562 * This is used when the caller wishes to modify the data and needs a
1563 * private copy of the data to alter as well as more space for new fields.
1564 * Returns %NULL on failure or the pointer to the buffer
1565 * on success. The returned buffer has a reference count of 1.
1566 *
1567 * You must pass %GFP_ATOMIC as the allocation priority if this function
1568 * is called from an interrupt.
1569 */
1570struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1571 int newheadroom, int newtailroom,
1572 gfp_t gfp_mask)
1573{
1574 /*
1575 * Allocate the copy buffer
1576 */
1577 struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1578 gfp_mask, skb_alloc_rx_flag(skb),
1579 NUMA_NO_NODE);
1580 int oldheadroom = skb_headroom(skb);
1581 int head_copy_len, head_copy_off;
1582
1583 if (!n)
1584 return NULL;
1585
1586 skb_reserve(n, newheadroom);
1587
1588 /* Set the tail pointer and length */
1589 skb_put(n, skb->len);
1590
1591 head_copy_len = oldheadroom;
1592 head_copy_off = 0;
1593 if (newheadroom <= head_copy_len)
1594 head_copy_len = newheadroom;
1595 else
1596 head_copy_off = newheadroom - head_copy_len;
1597
1598 /* Copy the linear header and data. */
1599 BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1600 skb->len + head_copy_len));
1601
1602 copy_skb_header(n, skb);
1603
1604 skb_headers_offset_update(n, newheadroom - oldheadroom);
1605
1606 return n;
1607}
1608EXPORT_SYMBOL(skb_copy_expand);
1609
1610/**
1611 * __skb_pad - zero pad the tail of an skb
1612 * @skb: buffer to pad
1613 * @pad: space to pad
1614 * @free_on_error: free buffer on error
1615 *
1616 * Ensure that a buffer is followed by a padding area that is zero
1617 * filled. Used by network drivers which may DMA or transfer data
1618 * beyond the buffer end onto the wire.
1619 *
1620 * May return error in out of memory cases. The skb is freed on error
1621 * if @free_on_error is true.
1622 */
1623
1624int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
1625{
1626 int err;
1627 int ntail;
1628
1629 /* If the skbuff is non linear tailroom is always zero.. */
1630 if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1631 memset(skb->data+skb->len, 0, pad);
1632 return 0;
1633 }
1634
1635 ntail = skb->data_len + pad - (skb->end - skb->tail);
1636 if (likely(skb_cloned(skb) || ntail > 0)) {
1637 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1638 if (unlikely(err))
1639 goto free_skb;
1640 }
1641
1642 /* FIXME: The use of this function with non-linear skb's really needs
1643 * to be audited.
1644 */
1645 err = skb_linearize(skb);
1646 if (unlikely(err))
1647 goto free_skb;
1648
1649 memset(skb->data + skb->len, 0, pad);
1650 return 0;
1651
1652free_skb:
1653 if (free_on_error)
1654 kfree_skb(skb);
1655 return err;
1656}
1657EXPORT_SYMBOL(__skb_pad);
1658
1659/**
1660 * pskb_put - add data to the tail of a potentially fragmented buffer
1661 * @skb: start of the buffer to use
1662 * @tail: tail fragment of the buffer to use
1663 * @len: amount of data to add
1664 *
1665 * This function extends the used data area of the potentially
1666 * fragmented buffer. @tail must be the last fragment of @skb -- or
1667 * @skb itself. If this would exceed the total buffer size the kernel
1668 * will panic. A pointer to the first byte of the extra data is
1669 * returned.
1670 */
1671
1672void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1673{
1674 if (tail != skb) {
1675 skb->data_len += len;
1676 skb->len += len;
1677 }
1678 return skb_put(tail, len);
1679}
1680EXPORT_SYMBOL_GPL(pskb_put);
1681
1682/**
1683 * skb_put - add data to a buffer
1684 * @skb: buffer to use
1685 * @len: amount of data to add
1686 *
1687 * This function extends the used data area of the buffer. If this would
1688 * exceed the total buffer size the kernel will panic. A pointer to the
1689 * first byte of the extra data is returned.
1690 */
1691void *skb_put(struct sk_buff *skb, unsigned int len)
1692{
1693 void *tmp = skb_tail_pointer(skb);
1694 SKB_LINEAR_ASSERT(skb);
1695 skb->tail += len;
1696 skb->len += len;
1697 if (unlikely(skb->tail > skb->end))
1698 skb_over_panic(skb, len, __builtin_return_address(0));
1699 return tmp;
1700}
1701EXPORT_SYMBOL(skb_put);
1702
1703/**
1704 * skb_push - add data to the start of a buffer
1705 * @skb: buffer to use
1706 * @len: amount of data to add
1707 *
1708 * This function extends the used data area of the buffer at the buffer
1709 * start. If this would exceed the total buffer headroom the kernel will
1710 * panic. A pointer to the first byte of the extra data is returned.
1711 */
1712void *skb_push(struct sk_buff *skb, unsigned int len)
1713{
1714 skb->data -= len;
1715 skb->len += len;
1716 if (unlikely(skb->data<skb->head))
1717 skb_under_panic(skb, len, __builtin_return_address(0));
1718 return skb->data;
1719}
1720EXPORT_SYMBOL(skb_push);
1721
1722/**
1723 * skb_pull - remove data from the start of a buffer
1724 * @skb: buffer to use
1725 * @len: amount of data to remove
1726 *
1727 * This function removes data from the start of a buffer, returning
1728 * the memory to the headroom. A pointer to the next data in the buffer
1729 * is returned. Once the data has been pulled future pushes will overwrite
1730 * the old data.
1731 */
1732void *skb_pull(struct sk_buff *skb, unsigned int len)
1733{
1734 return skb_pull_inline(skb, len);
1735}
1736EXPORT_SYMBOL(skb_pull);
1737
1738/**
1739 * skb_trim - remove end from a buffer
1740 * @skb: buffer to alter
1741 * @len: new length
1742 *
1743 * Cut the length of a buffer down by removing data from the tail. If
1744 * the buffer is already under the length specified it is not modified.
1745 * The skb must be linear.
1746 */
1747void skb_trim(struct sk_buff *skb, unsigned int len)
1748{
1749 if (skb->len > len)
1750 __skb_trim(skb, len);
1751}
1752EXPORT_SYMBOL(skb_trim);
1753
1754/* Trims skb to length len. It can change skb pointers.
1755 */
1756
1757int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1758{
1759 struct sk_buff **fragp;
1760 struct sk_buff *frag;
1761 int offset = skb_headlen(skb);
1762 int nfrags = skb_shinfo(skb)->nr_frags;
1763 int i;
1764 int err;
1765
1766 if (skb_cloned(skb) &&
1767 unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1768 return err;
1769
1770 i = 0;
1771 if (offset >= len)
1772 goto drop_pages;
1773
1774 for (; i < nfrags; i++) {
1775 int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1776
1777 if (end < len) {
1778 offset = end;
1779 continue;
1780 }
1781
1782 skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1783
1784drop_pages:
1785 skb_shinfo(skb)->nr_frags = i;
1786
1787 for (; i < nfrags; i++)
1788 skb_frag_unref(skb, i);
1789
1790 if (skb_has_frag_list(skb))
1791 skb_drop_fraglist(skb);
1792 goto done;
1793 }
1794
1795 for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1796 fragp = &frag->next) {
1797 int end = offset + frag->len;
1798
1799 if (skb_shared(frag)) {
1800 struct sk_buff *nfrag;
1801
1802 nfrag = skb_clone(frag, GFP_ATOMIC);
1803 if (unlikely(!nfrag))
1804 return -ENOMEM;
1805
1806 nfrag->next = frag->next;
1807 consume_skb(frag);
1808 frag = nfrag;
1809 *fragp = frag;
1810 }
1811
1812 if (end < len) {
1813 offset = end;
1814 continue;
1815 }
1816
1817 if (end > len &&
1818 unlikely((err = pskb_trim(frag, len - offset))))
1819 return err;
1820
1821 if (frag->next)
1822 skb_drop_list(&frag->next);
1823 break;
1824 }
1825
1826done:
1827 if (len > skb_headlen(skb)) {
1828 skb->data_len -= skb->len - len;
1829 skb->len = len;
1830 } else {
1831 skb->len = len;
1832 skb->data_len = 0;
1833 skb_set_tail_pointer(skb, len);
1834 }
1835
1836 if (!skb->sk || skb->destructor == sock_edemux)
1837 skb_condense(skb);
1838 return 0;
1839}
1840EXPORT_SYMBOL(___pskb_trim);
1841
1842/**
1843 * __pskb_pull_tail - advance tail of skb header
1844 * @skb: buffer to reallocate
1845 * @delta: number of bytes to advance tail
1846 *
1847 * The function makes a sense only on a fragmented &sk_buff,
1848 * it expands header moving its tail forward and copying necessary
1849 * data from fragmented part.
1850 *
1851 * &sk_buff MUST have reference count of 1.
1852 *
1853 * Returns %NULL (and &sk_buff does not change) if pull failed
1854 * or value of new tail of skb in the case of success.
1855 *
1856 * All the pointers pointing into skb header may change and must be
1857 * reloaded after call to this function.
1858 */
1859
1860/* Moves tail of skb head forward, copying data from fragmented part,
1861 * when it is necessary.
1862 * 1. It may fail due to malloc failure.
1863 * 2. It may change skb pointers.
1864 *
1865 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1866 */
1867void *__pskb_pull_tail(struct sk_buff *skb, int delta)
1868{
1869 /* If skb has not enough free space at tail, get new one
1870 * plus 128 bytes for future expansions. If we have enough
1871 * room at tail, reallocate without expansion only if skb is cloned.
1872 */
1873 int i, k, eat = (skb->tail + delta) - skb->end;
1874
1875 if (eat > 0 || skb_cloned(skb)) {
1876 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1877 GFP_ATOMIC))
1878 return NULL;
1879 }
1880
1881 BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
1882 skb_tail_pointer(skb), delta));
1883
1884 /* Optimization: no fragments, no reasons to preestimate
1885 * size of pulled pages. Superb.
1886 */
1887 if (!skb_has_frag_list(skb))
1888 goto pull_pages;
1889
1890 /* Estimate size of pulled pages. */
1891 eat = delta;
1892 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1893 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1894
1895 if (size >= eat)
1896 goto pull_pages;
1897 eat -= size;
1898 }
1899
1900 /* If we need update frag list, we are in troubles.
1901 * Certainly, it is possible to add an offset to skb data,
1902 * but taking into account that pulling is expected to
1903 * be very rare operation, it is worth to fight against
1904 * further bloating skb head and crucify ourselves here instead.
1905 * Pure masohism, indeed. 8)8)
1906 */
1907 if (eat) {
1908 struct sk_buff *list = skb_shinfo(skb)->frag_list;
1909 struct sk_buff *clone = NULL;
1910 struct sk_buff *insp = NULL;
1911
1912 do {
1913 BUG_ON(!list);
1914
1915 if (list->len <= eat) {
1916 /* Eaten as whole. */
1917 eat -= list->len;
1918 list = list->next;
1919 insp = list;
1920 } else {
1921 /* Eaten partially. */
1922
1923 if (skb_shared(list)) {
1924 /* Sucks! We need to fork list. :-( */
1925 clone = skb_clone(list, GFP_ATOMIC);
1926 if (!clone)
1927 return NULL;
1928 insp = list->next;
1929 list = clone;
1930 } else {
1931 /* This may be pulled without
1932 * problems. */
1933 insp = list;
1934 }
1935 if (!pskb_pull(list, eat)) {
1936 kfree_skb(clone);
1937 return NULL;
1938 }
1939 break;
1940 }
1941 } while (eat);
1942
1943 /* Free pulled out fragments. */
1944 while ((list = skb_shinfo(skb)->frag_list) != insp) {
1945 skb_shinfo(skb)->frag_list = list->next;
1946 kfree_skb(list);
1947 }
1948 /* And insert new clone at head. */
1949 if (clone) {
1950 clone->next = list;
1951 skb_shinfo(skb)->frag_list = clone;
1952 }
1953 }
1954 /* Success! Now we may commit changes to skb data. */
1955
1956pull_pages:
1957 eat = delta;
1958 k = 0;
1959 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1960 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1961
1962 if (size <= eat) {
1963 skb_frag_unref(skb, i);
1964 eat -= size;
1965 } else {
1966 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1967 if (eat) {
1968 skb_shinfo(skb)->frags[k].page_offset += eat;
1969 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1970 if (!i)
1971 goto end;
1972 eat = 0;
1973 }
1974 k++;
1975 }
1976 }
1977 skb_shinfo(skb)->nr_frags = k;
1978
1979end:
1980 skb->tail += delta;
1981 skb->data_len -= delta;
1982
1983 if (!skb->data_len)
1984 skb_zcopy_clear(skb, false);
1985
1986 return skb_tail_pointer(skb);
1987}
1988EXPORT_SYMBOL(__pskb_pull_tail);
1989
1990/**
1991 * skb_copy_bits - copy bits from skb to kernel buffer
1992 * @skb: source skb
1993 * @offset: offset in source
1994 * @to: destination buffer
1995 * @len: number of bytes to copy
1996 *
1997 * Copy the specified number of bytes from the source skb to the
1998 * destination buffer.
1999 *
2000 * CAUTION ! :
2001 * If its prototype is ever changed,
2002 * check arch/{*}/net/{*}.S files,
2003 * since it is called from BPF assembly code.
2004 */
2005int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2006{
2007 int start = skb_headlen(skb);
2008 struct sk_buff *frag_iter;
2009 int i, copy;
2010
2011 if (offset > (int)skb->len - len)
2012 goto fault;
2013
2014 /* Copy header. */
2015 if ((copy = start - offset) > 0) {
2016 if (copy > len)
2017 copy = len;
2018 skb_copy_from_linear_data_offset(skb, offset, to, copy);
2019 if ((len -= copy) == 0)
2020 return 0;
2021 offset += copy;
2022 to += copy;
2023 }
2024
2025 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2026 int end;
2027 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2028
2029 WARN_ON(start > offset + len);
2030
2031 end = start + skb_frag_size(f);
2032 if ((copy = end - offset) > 0) {
2033 u32 p_off, p_len, copied;
2034 struct page *p;
2035 u8 *vaddr;
2036
2037 if (copy > len)
2038 copy = len;
2039
2040 skb_frag_foreach_page(f,
2041 f->page_offset + offset - start,
2042 copy, p, p_off, p_len, copied) {
2043 vaddr = kmap_atomic(p);
2044 memcpy(to + copied, vaddr + p_off, p_len);
2045 kunmap_atomic(vaddr);
2046 }
2047
2048 if ((len -= copy) == 0)
2049 return 0;
2050 offset += copy;
2051 to += copy;
2052 }
2053 start = end;
2054 }
2055
2056 skb_walk_frags(skb, frag_iter) {
2057 int end;
2058
2059 WARN_ON(start > offset + len);
2060
2061 end = start + frag_iter->len;
2062 if ((copy = end - offset) > 0) {
2063 if (copy > len)
2064 copy = len;
2065 if (skb_copy_bits(frag_iter, offset - start, to, copy))
2066 goto fault;
2067 if ((len -= copy) == 0)
2068 return 0;
2069 offset += copy;
2070 to += copy;
2071 }
2072 start = end;
2073 }
2074
2075 if (!len)
2076 return 0;
2077
2078fault:
2079 return -EFAULT;
2080}
2081EXPORT_SYMBOL(skb_copy_bits);
2082
2083/*
2084 * Callback from splice_to_pipe(), if we need to release some pages
2085 * at the end of the spd in case we error'ed out in filling the pipe.
2086 */
2087static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2088{
2089 put_page(spd->pages[i]);
2090}
2091
2092static struct page *linear_to_page(struct page *page, unsigned int *len,
2093 unsigned int *offset,
2094 struct sock *sk)
2095{
2096 struct page_frag *pfrag = sk_page_frag(sk);
2097
2098 if (!sk_page_frag_refill(sk, pfrag))
2099 return NULL;
2100
2101 *len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
2102
2103 memcpy(page_address(pfrag->page) + pfrag->offset,
2104 page_address(page) + *offset, *len);
2105 *offset = pfrag->offset;
2106 pfrag->offset += *len;
2107
2108 return pfrag->page;
2109}
2110
2111static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2112 struct page *page,
2113 unsigned int offset)
2114{
2115 return spd->nr_pages &&
2116 spd->pages[spd->nr_pages - 1] == page &&
2117 (spd->partial[spd->nr_pages - 1].offset +
2118 spd->partial[spd->nr_pages - 1].len == offset);
2119}
2120
2121/*
2122 * Fill page/offset/length into spd, if it can hold more pages.
2123 */
2124static bool spd_fill_page(struct splice_pipe_desc *spd,
2125 struct pipe_inode_info *pipe, struct page *page,
2126 unsigned int *len, unsigned int offset,
2127 bool linear,
2128 struct sock *sk)
2129{
2130 if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2131 return true;
2132
2133 if (linear) {
2134 page = linear_to_page(page, len, &offset, sk);
2135 if (!page)
2136 return true;
2137 }
2138 if (spd_can_coalesce(spd, page, offset)) {
2139 spd->partial[spd->nr_pages - 1].len += *len;
2140 return false;
2141 }
2142 get_page(page);
2143 spd->pages[spd->nr_pages] = page;
2144 spd->partial[spd->nr_pages].len = *len;
2145 spd->partial[spd->nr_pages].offset = offset;
2146 spd->nr_pages++;
2147
2148 return false;
2149}
2150
2151static bool __splice_segment(struct page *page, unsigned int poff,
2152 unsigned int plen, unsigned int *off,
2153 unsigned int *len,
2154 struct splice_pipe_desc *spd, bool linear,
2155 struct sock *sk,
2156 struct pipe_inode_info *pipe)
2157{
2158 if (!*len)
2159 return true;
2160
2161 /* skip this segment if already processed */
2162 if (*off >= plen) {
2163 *off -= plen;
2164 return false;
2165 }
2166
2167 /* ignore any bits we already processed */
2168 poff += *off;
2169 plen -= *off;
2170 *off = 0;
2171
2172 do {
2173 unsigned int flen = min(*len, plen);
2174
2175 if (spd_fill_page(spd, pipe, page, &flen, poff,
2176 linear, sk))
2177 return true;
2178 poff += flen;
2179 plen -= flen;
2180 *len -= flen;
2181 } while (*len && plen);
2182
2183 return false;
2184}
2185
2186/*
2187 * Map linear and fragment data from the skb to spd. It reports true if the
2188 * pipe is full or if we already spliced the requested length.
2189 */
2190static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2191 unsigned int *offset, unsigned int *len,
2192 struct splice_pipe_desc *spd, struct sock *sk)
2193{
2194 int seg;
2195 struct sk_buff *iter;
2196
2197 /* map the linear part :
2198 * If skb->head_frag is set, this 'linear' part is backed by a
2199 * fragment, and if the head is not shared with any clones then
2200 * we can avoid a copy since we own the head portion of this page.
2201 */
2202 if (__splice_segment(virt_to_page(skb->data),
2203 (unsigned long) skb->data & (PAGE_SIZE - 1),
2204 skb_headlen(skb),
2205 offset, len, spd,
2206 skb_head_is_locked(skb),
2207 sk, pipe))
2208 return true;
2209
2210 /*
2211 * then map the fragments
2212 */
2213 for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2214 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2215
2216 if (__splice_segment(skb_frag_page(f),
2217 f->page_offset, skb_frag_size(f),
2218 offset, len, spd, false, sk, pipe))
2219 return true;
2220 }
2221
2222 skb_walk_frags(skb, iter) {
2223 if (*offset >= iter->len) {
2224 *offset -= iter->len;
2225 continue;
2226 }
2227 /* __skb_splice_bits() only fails if the output has no room
2228 * left, so no point in going over the frag_list for the error
2229 * case.
2230 */
2231 if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2232 return true;
2233 }
2234
2235 return false;
2236}
2237
2238/*
2239 * Map data from the skb to a pipe. Should handle both the linear part,
2240 * the fragments, and the frag list.
2241 */
2242int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2243 struct pipe_inode_info *pipe, unsigned int tlen,
2244 unsigned int flags)
2245{
2246 struct partial_page partial[MAX_SKB_FRAGS];
2247 struct page *pages[MAX_SKB_FRAGS];
2248 struct splice_pipe_desc spd = {
2249 .pages = pages,
2250 .partial = partial,
2251 .nr_pages_max = MAX_SKB_FRAGS,
2252 .ops = &nosteal_pipe_buf_ops,
2253 .spd_release = sock_spd_release,
2254 };
2255 int ret = 0;
2256
2257 __skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
2258
2259 if (spd.nr_pages)
2260 ret = splice_to_pipe(pipe, &spd);
2261
2262 return ret;
2263}
2264EXPORT_SYMBOL_GPL(skb_splice_bits);
2265
2266/* Send skb data on a socket. Socket must be locked. */
2267int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
2268 int len)
2269{
2270 unsigned int orig_len = len;
2271 struct sk_buff *head = skb;
2272 unsigned short fragidx;
2273 int slen, ret;
2274
2275do_frag_list:
2276
2277 /* Deal with head data */
2278 while (offset < skb_headlen(skb) && len) {
2279 struct kvec kv;
2280 struct msghdr msg;
2281
2282 slen = min_t(int, len, skb_headlen(skb) - offset);
2283 kv.iov_base = skb->data + offset;
2284 kv.iov_len = slen;
2285 memset(&msg, 0, sizeof(msg));
2286
2287 ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
2288 if (ret <= 0)
2289 goto error;
2290
2291 offset += ret;
2292 len -= ret;
2293 }
2294
2295 /* All the data was skb head? */
2296 if (!len)
2297 goto out;
2298
2299 /* Make offset relative to start of frags */
2300 offset -= skb_headlen(skb);
2301
2302 /* Find where we are in frag list */
2303 for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2304 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2305
2306 if (offset < frag->size)
2307 break;
2308
2309 offset -= frag->size;
2310 }
2311
2312 for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2313 skb_frag_t *frag = &skb_shinfo(skb)->frags[fragidx];
2314
2315 slen = min_t(size_t, len, frag->size - offset);
2316
2317 while (slen) {
2318 ret = kernel_sendpage_locked(sk, frag->page.p,
2319 frag->page_offset + offset,
2320 slen, MSG_DONTWAIT);
2321 if (ret <= 0)
2322 goto error;
2323
2324 len -= ret;
2325 offset += ret;
2326 slen -= ret;
2327 }
2328
2329 offset = 0;
2330 }
2331
2332 if (len) {
2333 /* Process any frag lists */
2334
2335 if (skb == head) {
2336 if (skb_has_frag_list(skb)) {
2337 skb = skb_shinfo(skb)->frag_list;
2338 goto do_frag_list;
2339 }
2340 } else if (skb->next) {
2341 skb = skb->next;
2342 goto do_frag_list;
2343 }
2344 }
2345
2346out:
2347 return orig_len - len;
2348
2349error:
2350 return orig_len == len ? ret : orig_len - len;
2351}
2352EXPORT_SYMBOL_GPL(skb_send_sock_locked);
2353
2354/* Send skb data on a socket. */
2355int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
2356{
2357 int ret = 0;
2358
2359 lock_sock(sk);
2360 ret = skb_send_sock_locked(sk, skb, offset, len);
2361 release_sock(sk);
2362
2363 return ret;
2364}
2365EXPORT_SYMBOL_GPL(skb_send_sock);
2366
2367/**
2368 * skb_store_bits - store bits from kernel buffer to skb
2369 * @skb: destination buffer
2370 * @offset: offset in destination
2371 * @from: source buffer
2372 * @len: number of bytes to copy
2373 *
2374 * Copy the specified number of bytes from the source buffer to the
2375 * destination skb. This function handles all the messy bits of
2376 * traversing fragment lists and such.
2377 */
2378
2379int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2380{
2381 int start = skb_headlen(skb);
2382 struct sk_buff *frag_iter;
2383 int i, copy;
2384
2385 if (offset > (int)skb->len - len)
2386 goto fault;
2387
2388 if ((copy = start - offset) > 0) {
2389 if (copy > len)
2390 copy = len;
2391 skb_copy_to_linear_data_offset(skb, offset, from, copy);
2392 if ((len -= copy) == 0)
2393 return 0;
2394 offset += copy;
2395 from += copy;
2396 }
2397
2398 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2399 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2400 int end;
2401
2402 WARN_ON(start > offset + len);
2403
2404 end = start + skb_frag_size(frag);
2405 if ((copy = end - offset) > 0) {
2406 u32 p_off, p_len, copied;
2407 struct page *p;
2408 u8 *vaddr;
2409
2410 if (copy > len)
2411 copy = len;
2412
2413 skb_frag_foreach_page(frag,
2414 frag->page_offset + offset - start,
2415 copy, p, p_off, p_len, copied) {
2416 vaddr = kmap_atomic(p);
2417 memcpy(vaddr + p_off, from + copied, p_len);
2418 kunmap_atomic(vaddr);
2419 }
2420
2421 if ((len -= copy) == 0)
2422 return 0;
2423 offset += copy;
2424 from += copy;
2425 }
2426 start = end;
2427 }
2428
2429 skb_walk_frags(skb, frag_iter) {
2430 int end;
2431
2432 WARN_ON(start > offset + len);
2433
2434 end = start + frag_iter->len;
2435 if ((copy = end - offset) > 0) {
2436 if (copy > len)
2437 copy = len;
2438 if (skb_store_bits(frag_iter, offset - start,
2439 from, copy))
2440 goto fault;
2441 if ((len -= copy) == 0)
2442 return 0;
2443 offset += copy;
2444 from += copy;
2445 }
2446 start = end;
2447 }
2448 if (!len)
2449 return 0;
2450
2451fault:
2452 return -EFAULT;
2453}
2454EXPORT_SYMBOL(skb_store_bits);
2455
2456/* Checksum skb data. */
2457__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2458 __wsum csum, const struct skb_checksum_ops *ops)
2459{
2460 int start = skb_headlen(skb);
2461 int i, copy = start - offset;
2462 struct sk_buff *frag_iter;
2463 int pos = 0;
2464
2465 /* Checksum header. */
2466 if (copy > 0) {
2467 if (copy > len)
2468 copy = len;
2469 csum = ops->update(skb->data + offset, copy, csum);
2470 if ((len -= copy) == 0)
2471 return csum;
2472 offset += copy;
2473 pos = copy;
2474 }
2475
2476 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2477 int end;
2478 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2479
2480 WARN_ON(start > offset + len);
2481
2482 end = start + skb_frag_size(frag);
2483 if ((copy = end - offset) > 0) {
2484 u32 p_off, p_len, copied;
2485 struct page *p;
2486 __wsum csum2;
2487 u8 *vaddr;
2488
2489 if (copy > len)
2490 copy = len;
2491
2492 skb_frag_foreach_page(frag,
2493 frag->page_offset + offset - start,
2494 copy, p, p_off, p_len, copied) {
2495 vaddr = kmap_atomic(p);
2496 csum2 = ops->update(vaddr + p_off, p_len, 0);
2497 kunmap_atomic(vaddr);
2498 csum = ops->combine(csum, csum2, pos, p_len);
2499 pos += p_len;
2500 }
2501
2502 if (!(len -= copy))
2503 return csum;
2504 offset += copy;
2505 }
2506 start = end;
2507 }
2508
2509 skb_walk_frags(skb, frag_iter) {
2510 int end;
2511
2512 WARN_ON(start > offset + len);
2513
2514 end = start + frag_iter->len;
2515 if ((copy = end - offset) > 0) {
2516 __wsum csum2;
2517 if (copy > len)
2518 copy = len;
2519 csum2 = __skb_checksum(frag_iter, offset - start,
2520 copy, 0, ops);
2521 csum = ops->combine(csum, csum2, pos, copy);
2522 if ((len -= copy) == 0)
2523 return csum;
2524 offset += copy;
2525 pos += copy;
2526 }
2527 start = end;
2528 }
2529 BUG_ON(len);
2530
2531 return csum;
2532}
2533EXPORT_SYMBOL(__skb_checksum);
2534
2535__wsum skb_checksum(const struct sk_buff *skb, int offset,
2536 int len, __wsum csum)
2537{
2538 const struct skb_checksum_ops ops = {
2539 .update = csum_partial_ext,
2540 .combine = csum_block_add_ext,
2541 };
2542
2543 return __skb_checksum(skb, offset, len, csum, &ops);
2544}
2545EXPORT_SYMBOL(skb_checksum);
2546
2547/* Both of above in one bottle. */
2548
2549__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2550 u8 *to, int len, __wsum csum)
2551{
2552 int start = skb_headlen(skb);
2553 int i, copy = start - offset;
2554 struct sk_buff *frag_iter;
2555 int pos = 0;
2556
2557 /* Copy header. */
2558 if (copy > 0) {
2559 if (copy > len)
2560 copy = len;
2561 csum = csum_partial_copy_nocheck(skb->data + offset, to,
2562 copy, csum);
2563 if ((len -= copy) == 0)
2564 return csum;
2565 offset += copy;
2566 to += copy;
2567 pos = copy;
2568 }
2569
2570 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2571 int end;
2572
2573 WARN_ON(start > offset + len);
2574
2575 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2576 if ((copy = end - offset) > 0) {
2577 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2578 u32 p_off, p_len, copied;
2579 struct page *p;
2580 __wsum csum2;
2581 u8 *vaddr;
2582
2583 if (copy > len)
2584 copy = len;
2585
2586 skb_frag_foreach_page(frag,
2587 frag->page_offset + offset - start,
2588 copy, p, p_off, p_len, copied) {
2589 vaddr = kmap_atomic(p);
2590 csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2591 to + copied,
2592 p_len, 0);
2593 kunmap_atomic(vaddr);
2594 csum = csum_block_add(csum, csum2, pos);
2595 pos += p_len;
2596 }
2597
2598 if (!(len -= copy))
2599 return csum;
2600 offset += copy;
2601 to += copy;
2602 }
2603 start = end;
2604 }
2605
2606 skb_walk_frags(skb, frag_iter) {
2607 __wsum csum2;
2608 int end;
2609
2610 WARN_ON(start > offset + len);
2611
2612 end = start + frag_iter->len;
2613 if ((copy = end - offset) > 0) {
2614 if (copy > len)
2615 copy = len;
2616 csum2 = skb_copy_and_csum_bits(frag_iter,
2617 offset - start,
2618 to, copy, 0);
2619 csum = csum_block_add(csum, csum2, pos);
2620 if ((len -= copy) == 0)
2621 return csum;
2622 offset += copy;
2623 to += copy;
2624 pos += copy;
2625 }
2626 start = end;
2627 }
2628 BUG_ON(len);
2629 return csum;
2630}
2631EXPORT_SYMBOL(skb_copy_and_csum_bits);
2632
2633static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
2634{
2635 net_warn_ratelimited(
2636 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2637 __func__);
2638 return 0;
2639}
2640
2641static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
2642 int offset, int len)
2643{
2644 net_warn_ratelimited(
2645 "%s: attempt to compute crc32c without libcrc32c.ko\n",
2646 __func__);
2647 return 0;
2648}
2649
2650static const struct skb_checksum_ops default_crc32c_ops = {
2651 .update = warn_crc32c_csum_update,
2652 .combine = warn_crc32c_csum_combine,
2653};
2654
2655const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
2656 &default_crc32c_ops;
2657EXPORT_SYMBOL(crc32c_csum_stub);
2658
2659 /**
2660 * skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2661 * @from: source buffer
2662 *
2663 * Calculates the amount of linear headroom needed in the 'to' skb passed
2664 * into skb_zerocopy().
2665 */
2666unsigned int
2667skb_zerocopy_headlen(const struct sk_buff *from)
2668{
2669 unsigned int hlen = 0;
2670
2671 if (!from->head_frag ||
2672 skb_headlen(from) < L1_CACHE_BYTES ||
2673 skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2674 hlen = skb_headlen(from);
2675
2676 if (skb_has_frag_list(from))
2677 hlen = from->len;
2678
2679 return hlen;
2680}
2681EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2682
2683/**
2684 * skb_zerocopy - Zero copy skb to skb
2685 * @to: destination buffer
2686 * @from: source buffer
2687 * @len: number of bytes to copy from source buffer
2688 * @hlen: size of linear headroom in destination buffer
2689 *
2690 * Copies up to `len` bytes from `from` to `to` by creating references
2691 * to the frags in the source buffer.
2692 *
2693 * The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2694 * headroom in the `to` buffer.
2695 *
2696 * Return value:
2697 * 0: everything is OK
2698 * -ENOMEM: couldn't orphan frags of @from due to lack of memory
2699 * -EFAULT: skb_copy_bits() found some problem with skb geometry
2700 */
2701int
2702skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2703{
2704 int i, j = 0;
2705 int plen = 0; /* length of skb->head fragment */
2706 int ret;
2707 struct page *page;
2708 unsigned int offset;
2709
2710 BUG_ON(!from->head_frag && !hlen);
2711
2712 /* dont bother with small payloads */
2713 if (len <= skb_tailroom(to))
2714 return skb_copy_bits(from, 0, skb_put(to, len), len);
2715
2716 if (hlen) {
2717 ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2718 if (unlikely(ret))
2719 return ret;
2720 len -= hlen;
2721 } else {
2722 plen = min_t(int, skb_headlen(from), len);
2723 if (plen) {
2724 page = virt_to_head_page(from->head);
2725 offset = from->data - (unsigned char *)page_address(page);
2726 __skb_fill_page_desc(to, 0, page, offset, plen);
2727 get_page(page);
2728 j = 1;
2729 len -= plen;
2730 }
2731 }
2732
2733 to->truesize += len + plen;
2734 to->len += len + plen;
2735 to->data_len += len + plen;
2736
2737 if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2738 skb_tx_error(from);
2739 return -ENOMEM;
2740 }
2741 skb_zerocopy_clone(to, from, GFP_ATOMIC);
2742
2743 for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2744 if (!len)
2745 break;
2746 skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2747 skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2748 len -= skb_shinfo(to)->frags[j].size;
2749 skb_frag_ref(to, j);
2750 j++;
2751 }
2752 skb_shinfo(to)->nr_frags = j;
2753
2754 return 0;
2755}
2756EXPORT_SYMBOL_GPL(skb_zerocopy);
2757
2758void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2759{
2760 __wsum csum;
2761 long csstart;
2762
2763 if (skb->ip_summed == CHECKSUM_PARTIAL)
2764 csstart = skb_checksum_start_offset(skb);
2765 else
2766 csstart = skb_headlen(skb);
2767
2768 BUG_ON(csstart > skb_headlen(skb));
2769
2770 skb_copy_from_linear_data(skb, to, csstart);
2771
2772 csum = 0;
2773 if (csstart != skb->len)
2774 csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2775 skb->len - csstart, 0);
2776
2777 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2778 long csstuff = csstart + skb->csum_offset;
2779
2780 *((__sum16 *)(to + csstuff)) = csum_fold(csum);
2781 }
2782}
2783EXPORT_SYMBOL(skb_copy_and_csum_dev);
2784
2785/**
2786 * skb_dequeue - remove from the head of the queue
2787 * @list: list to dequeue from
2788 *
2789 * Remove the head of the list. The list lock is taken so the function
2790 * may be used safely with other locking list functions. The head item is
2791 * returned or %NULL if the list is empty.
2792 */
2793
2794struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2795{
2796 unsigned long flags;
2797 struct sk_buff *result;
2798
2799 spin_lock_irqsave(&list->lock, flags);
2800 result = __skb_dequeue(list);
2801 spin_unlock_irqrestore(&list->lock, flags);
2802 return result;
2803}
2804EXPORT_SYMBOL(skb_dequeue);
2805
2806/**
2807 * skb_dequeue_tail - remove from the tail of the queue
2808 * @list: list to dequeue from
2809 *
2810 * Remove the tail of the list. The list lock is taken so the function
2811 * may be used safely with other locking list functions. The tail item is
2812 * returned or %NULL if the list is empty.
2813 */
2814struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2815{
2816 unsigned long flags;
2817 struct sk_buff *result;
2818
2819 spin_lock_irqsave(&list->lock, flags);
2820 result = __skb_dequeue_tail(list);
2821 spin_unlock_irqrestore(&list->lock, flags);
2822 return result;
2823}
2824EXPORT_SYMBOL(skb_dequeue_tail);
2825
2826/**
2827 * skb_queue_purge - empty a list
2828 * @list: list to empty
2829 *
2830 * Delete all buffers on an &sk_buff list. Each buffer is removed from
2831 * the list and one reference dropped. This function takes the list
2832 * lock and is atomic with respect to other list locking functions.
2833 */
2834void skb_queue_purge(struct sk_buff_head *list)
2835{
2836 struct sk_buff *skb;
2837 while ((skb = skb_dequeue(list)) != NULL)
2838 kfree_skb(skb);
2839}
2840EXPORT_SYMBOL(skb_queue_purge);
2841
2842/**
2843 * skb_rbtree_purge - empty a skb rbtree
2844 * @root: root of the rbtree to empty
2845 *
2846 * Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
2847 * the list and one reference dropped. This function does not take
2848 * any lock. Synchronization should be handled by the caller (e.g., TCP
2849 * out-of-order queue is protected by the socket lock).
2850 */
2851void skb_rbtree_purge(struct rb_root *root)
2852{
2853 struct rb_node *p = rb_first(root);
2854
2855 while (p) {
2856 struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
2857
2858 p = rb_next(p);
2859 rb_erase(&skb->rbnode, root);
2860 kfree_skb(skb);
2861 }
2862}
2863
2864/**
2865 * skb_queue_head - queue a buffer at the list head
2866 * @list: list to use
2867 * @newsk: buffer to queue
2868 *
2869 * Queue a buffer at the start of the list. This function takes the
2870 * list lock and can be used safely with other locking &sk_buff functions
2871 * safely.
2872 *
2873 * A buffer cannot be placed on two lists at the same time.
2874 */
2875void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2876{
2877 unsigned long flags;
2878
2879 spin_lock_irqsave(&list->lock, flags);
2880 __skb_queue_head(list, newsk);
2881 spin_unlock_irqrestore(&list->lock, flags);
2882}
2883EXPORT_SYMBOL(skb_queue_head);
2884
2885/**
2886 * skb_queue_tail - queue a buffer at the list tail
2887 * @list: list to use
2888 * @newsk: buffer to queue
2889 *
2890 * Queue a buffer at the tail of the list. This function takes the
2891 * list lock and can be used safely with other locking &sk_buff functions
2892 * safely.
2893 *
2894 * A buffer cannot be placed on two lists at the same time.
2895 */
2896void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2897{
2898 unsigned long flags;
2899
2900 spin_lock_irqsave(&list->lock, flags);
2901 __skb_queue_tail(list, newsk);
2902 spin_unlock_irqrestore(&list->lock, flags);
2903}
2904EXPORT_SYMBOL(skb_queue_tail);
2905
2906/**
2907 * skb_unlink - remove a buffer from a list
2908 * @skb: buffer to remove
2909 * @list: list to use
2910 *
2911 * Remove a packet from a list. The list locks are taken and this
2912 * function is atomic with respect to other list locked calls
2913 *
2914 * You must know what list the SKB is on.
2915 */
2916void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2917{
2918 unsigned long flags;
2919
2920 spin_lock_irqsave(&list->lock, flags);
2921 __skb_unlink(skb, list);
2922 spin_unlock_irqrestore(&list->lock, flags);
2923}
2924EXPORT_SYMBOL(skb_unlink);
2925
2926/**
2927 * skb_append - append a buffer
2928 * @old: buffer to insert after
2929 * @newsk: buffer to insert
2930 * @list: list to use
2931 *
2932 * Place a packet after a given packet in a list. The list locks are taken
2933 * and this function is atomic with respect to other list locked calls.
2934 * A buffer cannot be placed on two lists at the same time.
2935 */
2936void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2937{
2938 unsigned long flags;
2939
2940 spin_lock_irqsave(&list->lock, flags);
2941 __skb_queue_after(list, old, newsk);
2942 spin_unlock_irqrestore(&list->lock, flags);
2943}
2944EXPORT_SYMBOL(skb_append);
2945
2946/**
2947 * skb_insert - insert a buffer
2948 * @old: buffer to insert before
2949 * @newsk: buffer to insert
2950 * @list: list to use
2951 *
2952 * Place a packet before a given packet in a list. The list locks are
2953 * taken and this function is atomic with respect to other list locked
2954 * calls.
2955 *
2956 * A buffer cannot be placed on two lists at the same time.
2957 */
2958void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2959{
2960 unsigned long flags;
2961
2962 spin_lock_irqsave(&list->lock, flags);
2963 __skb_insert(newsk, old->prev, old, list);
2964 spin_unlock_irqrestore(&list->lock, flags);
2965}
2966EXPORT_SYMBOL(skb_insert);
2967
2968static inline void skb_split_inside_header(struct sk_buff *skb,
2969 struct sk_buff* skb1,
2970 const u32 len, const int pos)
2971{
2972 int i;
2973
2974 skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2975 pos - len);
2976 /* And move data appendix as is. */
2977 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2978 skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2979
2980 skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2981 skb_shinfo(skb)->nr_frags = 0;
2982 skb1->data_len = skb->data_len;
2983 skb1->len += skb1->data_len;
2984 skb->data_len = 0;
2985 skb->len = len;
2986 skb_set_tail_pointer(skb, len);
2987}
2988
2989static inline void skb_split_no_header(struct sk_buff *skb,
2990 struct sk_buff* skb1,
2991 const u32 len, int pos)
2992{
2993 int i, k = 0;
2994 const int nfrags = skb_shinfo(skb)->nr_frags;
2995
2996 skb_shinfo(skb)->nr_frags = 0;
2997 skb1->len = skb1->data_len = skb->len - len;
2998 skb->len = len;
2999 skb->data_len = len - pos;
3000
3001 for (i = 0; i < nfrags; i++) {
3002 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3003
3004 if (pos + size > len) {
3005 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3006
3007 if (pos < len) {
3008 /* Split frag.
3009 * We have two variants in this case:
3010 * 1. Move all the frag to the second
3011 * part, if it is possible. F.e.
3012 * this approach is mandatory for TUX,
3013 * where splitting is expensive.
3014 * 2. Split is accurately. We make this.
3015 */
3016 skb_frag_ref(skb, i);
3017 skb_shinfo(skb1)->frags[0].page_offset += len - pos;
3018 skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3019 skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3020 skb_shinfo(skb)->nr_frags++;
3021 }
3022 k++;
3023 } else
3024 skb_shinfo(skb)->nr_frags++;
3025 pos += size;
3026 }
3027 skb_shinfo(skb1)->nr_frags = k;
3028}
3029
3030/**
3031 * skb_split - Split fragmented skb to two parts at length len.
3032 * @skb: the buffer to split
3033 * @skb1: the buffer to receive the second part
3034 * @len: new length for skb
3035 */
3036void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3037{
3038 int pos = skb_headlen(skb);
3039
3040 skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
3041 SKBTX_SHARED_FRAG;
3042 skb_zerocopy_clone(skb1, skb, 0);
3043 if (len < pos) /* Split line is inside header. */
3044 skb_split_inside_header(skb, skb1, len, pos);
3045 else /* Second chunk has no header, nothing to copy. */
3046 skb_split_no_header(skb, skb1, len, pos);
3047}
3048EXPORT_SYMBOL(skb_split);
3049
3050/* Shifting from/to a cloned skb is a no-go.
3051 *
3052 * Caller cannot keep skb_shinfo related pointers past calling here!
3053 */
3054static int skb_prepare_for_shift(struct sk_buff *skb)
3055{
3056 return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3057}
3058
3059/**
3060 * skb_shift - Shifts paged data partially from skb to another
3061 * @tgt: buffer into which tail data gets added
3062 * @skb: buffer from which the paged data comes from
3063 * @shiftlen: shift up to this many bytes
3064 *
3065 * Attempts to shift up to shiftlen worth of bytes, which may be less than
3066 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3067 * It's up to caller to free skb if everything was shifted.
3068 *
3069 * If @tgt runs out of frags, the whole operation is aborted.
3070 *
3071 * Skb cannot include anything else but paged data while tgt is allowed
3072 * to have non-paged data as well.
3073 *
3074 * TODO: full sized shift could be optimized but that would need
3075 * specialized skb free'er to handle frags without up-to-date nr_frags.
3076 */
3077int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3078{
3079 int from, to, merge, todo;
3080 struct skb_frag_struct *fragfrom, *fragto;
3081
3082 BUG_ON(shiftlen > skb->len);
3083
3084 if (skb_headlen(skb))
3085 return 0;
3086 if (skb_zcopy(tgt) || skb_zcopy(skb))
3087 return 0;
3088
3089 todo = shiftlen;
3090 from = 0;
3091 to = skb_shinfo(tgt)->nr_frags;
3092 fragfrom = &skb_shinfo(skb)->frags[from];
3093
3094 /* Actual merge is delayed until the point when we know we can
3095 * commit all, so that we don't have to undo partial changes
3096 */
3097 if (!to ||
3098 !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3099 fragfrom->page_offset)) {
3100 merge = -1;
3101 } else {
3102 merge = to - 1;
3103
3104 todo -= skb_frag_size(fragfrom);
3105 if (todo < 0) {
3106 if (skb_prepare_for_shift(skb) ||
3107 skb_prepare_for_shift(tgt))
3108 return 0;
3109
3110 /* All previous frag pointers might be stale! */
3111 fragfrom = &skb_shinfo(skb)->frags[from];
3112 fragto = &skb_shinfo(tgt)->frags[merge];
3113
3114 skb_frag_size_add(fragto, shiftlen);
3115 skb_frag_size_sub(fragfrom, shiftlen);
3116 fragfrom->page_offset += shiftlen;
3117
3118 goto onlymerged;
3119 }
3120
3121 from++;
3122 }
3123
3124 /* Skip full, not-fitting skb to avoid expensive operations */
3125 if ((shiftlen == skb->len) &&
3126 (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3127 return 0;
3128
3129 if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3130 return 0;
3131
3132 while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3133 if (to == MAX_SKB_FRAGS)
3134 return 0;
3135
3136 fragfrom = &skb_shinfo(skb)->frags[from];
3137 fragto = &skb_shinfo(tgt)->frags[to];
3138
3139 if (todo >= skb_frag_size(fragfrom)) {
3140 *fragto = *fragfrom;
3141 todo -= skb_frag_size(fragfrom);
3142 from++;
3143 to++;
3144
3145 } else {
3146 __skb_frag_ref(fragfrom);
3147 fragto->page = fragfrom->page;
3148 fragto->page_offset = fragfrom->page_offset;
3149 skb_frag_size_set(fragto, todo);
3150
3151 fragfrom->page_offset += todo;
3152 skb_frag_size_sub(fragfrom, todo);
3153 todo = 0;
3154
3155 to++;
3156 break;
3157 }
3158 }
3159
3160 /* Ready to "commit" this state change to tgt */
3161 skb_shinfo(tgt)->nr_frags = to;
3162
3163 if (merge >= 0) {
3164 fragfrom = &skb_shinfo(skb)->frags[0];
3165 fragto = &skb_shinfo(tgt)->frags[merge];
3166
3167 skb_frag_size_add(fragto, skb_frag_size(fragfrom));
3168 __skb_frag_unref(fragfrom);
3169 }
3170
3171 /* Reposition in the original skb */
3172 to = 0;
3173 while (from < skb_shinfo(skb)->nr_frags)
3174 skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3175 skb_shinfo(skb)->nr_frags = to;
3176
3177 BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3178
3179onlymerged:
3180 /* Most likely the tgt won't ever need its checksum anymore, skb on
3181 * the other hand might need it if it needs to be resent
3182 */
3183 tgt->ip_summed = CHECKSUM_PARTIAL;
3184 skb->ip_summed = CHECKSUM_PARTIAL;
3185
3186 /* Yak, is it really working this way? Some helper please? */
3187 skb->len -= shiftlen;
3188 skb->data_len -= shiftlen;
3189 skb->truesize -= shiftlen;
3190 tgt->len += shiftlen;
3191 tgt->data_len += shiftlen;
3192 tgt->truesize += shiftlen;
3193
3194 return shiftlen;
3195}
3196
3197/**
3198 * skb_prepare_seq_read - Prepare a sequential read of skb data
3199 * @skb: the buffer to read
3200 * @from: lower offset of data to be read
3201 * @to: upper offset of data to be read
3202 * @st: state variable
3203 *
3204 * Initializes the specified state variable. Must be called before
3205 * invoking skb_seq_read() for the first time.
3206 */
3207void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3208 unsigned int to, struct skb_seq_state *st)
3209{
3210 st->lower_offset = from;
3211 st->upper_offset = to;
3212 st->root_skb = st->cur_skb = skb;
3213 st->frag_idx = st->stepped_offset = 0;
3214 st->frag_data = NULL;
3215}
3216EXPORT_SYMBOL(skb_prepare_seq_read);
3217
3218/**
3219 * skb_seq_read - Sequentially read skb data
3220 * @consumed: number of bytes consumed by the caller so far
3221 * @data: destination pointer for data to be returned
3222 * @st: state variable
3223 *
3224 * Reads a block of skb data at @consumed relative to the
3225 * lower offset specified to skb_prepare_seq_read(). Assigns
3226 * the head of the data block to @data and returns the length
3227 * of the block or 0 if the end of the skb data or the upper
3228 * offset has been reached.
3229 *
3230 * The caller is not required to consume all of the data
3231 * returned, i.e. @consumed is typically set to the number
3232 * of bytes already consumed and the next call to
3233 * skb_seq_read() will return the remaining part of the block.
3234 *
3235 * Note 1: The size of each block of data returned can be arbitrary,
3236 * this limitation is the cost for zerocopy sequential
3237 * reads of potentially non linear data.
3238 *
3239 * Note 2: Fragment lists within fragments are not implemented
3240 * at the moment, state->root_skb could be replaced with
3241 * a stack for this purpose.
3242 */
3243unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3244 struct skb_seq_state *st)
3245{
3246 unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3247 skb_frag_t *frag;
3248
3249 if (unlikely(abs_offset >= st->upper_offset)) {
3250 if (st->frag_data) {
3251 kunmap_atomic(st->frag_data);
3252 st->frag_data = NULL;
3253 }
3254 return 0;
3255 }
3256
3257next_skb:
3258 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3259
3260 if (abs_offset < block_limit && !st->frag_data) {
3261 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3262 return block_limit - abs_offset;
3263 }
3264
3265 if (st->frag_idx == 0 && !st->frag_data)
3266 st->stepped_offset += skb_headlen(st->cur_skb);
3267
3268 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3269 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
3270 block_limit = skb_frag_size(frag) + st->stepped_offset;
3271
3272 if (abs_offset < block_limit) {
3273 if (!st->frag_data)
3274 st->frag_data = kmap_atomic(skb_frag_page(frag));
3275
3276 *data = (u8 *) st->frag_data + frag->page_offset +
3277 (abs_offset - st->stepped_offset);
3278
3279 return block_limit - abs_offset;
3280 }
3281
3282 if (st->frag_data) {
3283 kunmap_atomic(st->frag_data);
3284 st->frag_data = NULL;
3285 }
3286
3287 st->frag_idx++;
3288 st->stepped_offset += skb_frag_size(frag);
3289 }
3290
3291 if (st->frag_data) {
3292 kunmap_atomic(st->frag_data);
3293 st->frag_data = NULL;
3294 }
3295
3296 if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3297 st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
3298 st->frag_idx = 0;
3299 goto next_skb;
3300 } else if (st->cur_skb->next) {
3301 st->cur_skb = st->cur_skb->next;
3302 st->frag_idx = 0;
3303 goto next_skb;
3304 }
3305
3306 return 0;
3307}
3308EXPORT_SYMBOL(skb_seq_read);
3309
3310/**
3311 * skb_abort_seq_read - Abort a sequential read of skb data
3312 * @st: state variable
3313 *
3314 * Must be called if skb_seq_read() was not called until it
3315 * returned 0.
3316 */
3317void skb_abort_seq_read(struct skb_seq_state *st)
3318{
3319 if (st->frag_data)
3320 kunmap_atomic(st->frag_data);
3321}
3322EXPORT_SYMBOL(skb_abort_seq_read);
3323
3324#define TS_SKB_CB(state) ((struct skb_seq_state *) &((state)->cb))
3325
3326static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
3327 struct ts_config *conf,
3328 struct ts_state *state)
3329{
3330 return skb_seq_read(offset, text, TS_SKB_CB(state));
3331}
3332
3333static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
3334{
3335 skb_abort_seq_read(TS_SKB_CB(state));
3336}
3337
3338/**
3339 * skb_find_text - Find a text pattern in skb data
3340 * @skb: the buffer to look in
3341 * @from: search offset
3342 * @to: search limit
3343 * @config: textsearch configuration
3344 *
3345 * Finds a pattern in the skb data according to the specified
3346 * textsearch configuration. Use textsearch_next() to retrieve
3347 * subsequent occurrences of the pattern. Returns the offset
3348 * to the first occurrence or UINT_MAX if no match was found.
3349 */
3350unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3351 unsigned int to, struct ts_config *config)
3352{
3353 struct ts_state state;
3354 unsigned int ret;
3355
3356 config->get_next_block = skb_ts_get_next_block;
3357 config->finish = skb_ts_finish;
3358
3359 skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
3360
3361 ret = textsearch_find(config, &state);
3362 return (ret <= to - from ? ret : UINT_MAX);
3363}
3364EXPORT_SYMBOL(skb_find_text);
3365
3366/**
3367 * skb_append_datato_frags - append the user data to a skb
3368 * @sk: sock structure
3369 * @skb: skb structure to be appended with user data.
3370 * @getfrag: call back function to be used for getting the user data
3371 * @from: pointer to user message iov
3372 * @length: length of the iov message
3373 *
3374 * Description: This procedure append the user data in the fragment part
3375 * of the skb if any page alloc fails user this procedure returns -ENOMEM
3376 */
3377int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
3378 int (*getfrag)(void *from, char *to, int offset,
3379 int len, int odd, struct sk_buff *skb),
3380 void *from, int length)
3381{
3382 int frg_cnt = skb_shinfo(skb)->nr_frags;
3383 int copy;
3384 int offset = 0;
3385 int ret;
3386 struct page_frag *pfrag = ¤t->task_frag;
3387
3388 do {
3389 /* Return error if we don't have space for new frag */
3390 if (frg_cnt >= MAX_SKB_FRAGS)
3391 return -EMSGSIZE;
3392
3393 if (!sk_page_frag_refill(sk, pfrag))
3394 return -ENOMEM;
3395
3396 /* copy the user data to page */
3397 copy = min_t(int, length, pfrag->size - pfrag->offset);
3398
3399 ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
3400 offset, copy, 0, skb);
3401 if (ret < 0)
3402 return -EFAULT;
3403
3404 /* copy was successful so update the size parameters */
3405 skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
3406 copy);
3407 frg_cnt++;
3408 pfrag->offset += copy;
3409 get_page(pfrag->page);
3410
3411 skb->truesize += copy;
3412 refcount_add(copy, &sk->sk_wmem_alloc);
3413 skb->len += copy;
3414 skb->data_len += copy;
3415 offset += copy;
3416 length -= copy;
3417
3418 } while (length > 0);
3419
3420 return 0;
3421}
3422EXPORT_SYMBOL(skb_append_datato_frags);
3423
3424int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3425 int offset, size_t size)
3426{
3427 int i = skb_shinfo(skb)->nr_frags;
3428
3429 if (skb_can_coalesce(skb, i, page, offset)) {
3430 skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3431 } else if (i < MAX_SKB_FRAGS) {
3432 get_page(page);
3433 skb_fill_page_desc(skb, i, page, offset, size);
3434 } else {
3435 return -EMSGSIZE;
3436 }
3437
3438 return 0;
3439}
3440EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3441
3442/**
3443 * skb_pull_rcsum - pull skb and update receive checksum
3444 * @skb: buffer to update
3445 * @len: length of data pulled
3446 *
3447 * This function performs an skb_pull on the packet and updates
3448 * the CHECKSUM_COMPLETE checksum. It should be used on
3449 * receive path processing instead of skb_pull unless you know
3450 * that the checksum difference is zero (e.g., a valid IP header)
3451 * or you are setting ip_summed to CHECKSUM_NONE.
3452 */
3453void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3454{
3455 unsigned char *data = skb->data;
3456
3457 BUG_ON(len > skb->len);
3458 __skb_pull(skb, len);
3459 skb_postpull_rcsum(skb, data, len);
3460 return skb->data;
3461}
3462EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3463
3464static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
3465{
3466 skb_frag_t head_frag;
3467 struct page *page;
3468
3469 page = virt_to_head_page(frag_skb->head);
3470 head_frag.page.p = page;
3471 head_frag.page_offset = frag_skb->data -
3472 (unsigned char *)page_address(page);
3473 head_frag.size = skb_headlen(frag_skb);
3474 return head_frag;
3475}
3476
3477/**
3478 * skb_segment - Perform protocol segmentation on skb.
3479 * @head_skb: buffer to segment
3480 * @features: features for the output path (see dev->features)
3481 *
3482 * This function performs segmentation on the given skb. It returns
3483 * a pointer to the first in a list of new skbs for the segments.
3484 * In case of error it returns ERR_PTR(err).
3485 */
3486struct sk_buff *skb_segment(struct sk_buff *head_skb,
3487 netdev_features_t features)
3488{
3489 struct sk_buff *segs = NULL;
3490 struct sk_buff *tail = NULL;
3491 struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3492 skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3493 unsigned int mss = skb_shinfo(head_skb)->gso_size;
3494 unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
3495 struct sk_buff *frag_skb = head_skb;
3496 unsigned int offset = doffset;
3497 unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3498 unsigned int partial_segs = 0;
3499 unsigned int headroom;
3500 unsigned int len = head_skb->len;
3501 __be16 proto;
3502 bool csum, sg;
3503 int nfrags = skb_shinfo(head_skb)->nr_frags;
3504 int err = -ENOMEM;
3505 int i = 0;
3506 int pos;
3507 int dummy;
3508
3509 __skb_push(head_skb, doffset);
3510 proto = skb_network_protocol(head_skb, &dummy);
3511 if (unlikely(!proto))
3512 return ERR_PTR(-EINVAL);
3513
3514 sg = !!(features & NETIF_F_SG);
3515 csum = !!can_checksum_protocol(features, proto);
3516
3517 if (sg && csum && (mss != GSO_BY_FRAGS)) {
3518 if (!(features & NETIF_F_GSO_PARTIAL)) {
3519 struct sk_buff *iter;
3520 unsigned int frag_len;
3521
3522 if (!list_skb ||
3523 !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3524 goto normal;
3525
3526 /* If we get here then all the required
3527 * GSO features except frag_list are supported.
3528 * Try to split the SKB to multiple GSO SKBs
3529 * with no frag_list.
3530 * Currently we can do that only when the buffers don't
3531 * have a linear part and all the buffers except
3532 * the last are of the same length.
3533 */
3534 frag_len = list_skb->len;
3535 skb_walk_frags(head_skb, iter) {
3536 if (frag_len != iter->len && iter->next)
3537 goto normal;
3538 if (skb_headlen(iter) && !iter->head_frag)
3539 goto normal;
3540
3541 len -= iter->len;
3542 }
3543
3544 if (len != frag_len)
3545 goto normal;
3546 }
3547
3548 /* GSO partial only requires that we trim off any excess that
3549 * doesn't fit into an MSS sized block, so take care of that
3550 * now.
3551 */
3552 partial_segs = len / mss;
3553 if (partial_segs > 1)
3554 mss *= partial_segs;
3555 else
3556 partial_segs = 0;
3557 }
3558
3559normal:
3560 headroom = skb_headroom(head_skb);
3561 pos = skb_headlen(head_skb);
3562
3563 do {
3564 struct sk_buff *nskb;
3565 skb_frag_t *nskb_frag;
3566 int hsize;
3567 int size;
3568
3569 if (unlikely(mss == GSO_BY_FRAGS)) {
3570 len = list_skb->len;
3571 } else {
3572 len = head_skb->len - offset;
3573 if (len > mss)
3574 len = mss;
3575 }
3576
3577 hsize = skb_headlen(head_skb) - offset;
3578 if (hsize < 0)
3579 hsize = 0;
3580 if (hsize > len || !sg)
3581 hsize = len;
3582
3583 if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3584 (skb_headlen(list_skb) == len || sg)) {
3585 BUG_ON(skb_headlen(list_skb) > len);
3586
3587 i = 0;
3588 nfrags = skb_shinfo(list_skb)->nr_frags;
3589 frag = skb_shinfo(list_skb)->frags;
3590 frag_skb = list_skb;
3591 pos += skb_headlen(list_skb);
3592
3593 while (pos < offset + len) {
3594 BUG_ON(i >= nfrags);
3595
3596 size = skb_frag_size(frag);
3597 if (pos + size > offset + len)
3598 break;
3599
3600 i++;
3601 pos += size;
3602 frag++;
3603 }
3604
3605 nskb = skb_clone(list_skb, GFP_ATOMIC);
3606 list_skb = list_skb->next;
3607
3608 if (unlikely(!nskb))
3609 goto err;
3610
3611 if (unlikely(pskb_trim(nskb, len))) {
3612 kfree_skb(nskb);
3613 goto err;
3614 }
3615
3616 hsize = skb_end_offset(nskb);
3617 if (skb_cow_head(nskb, doffset + headroom)) {
3618 kfree_skb(nskb);
3619 goto err;
3620 }
3621
3622 nskb->truesize += skb_end_offset(nskb) - hsize;
3623 skb_release_head_state(nskb);
3624 __skb_push(nskb, doffset);
3625 } else {
3626 nskb = __alloc_skb(hsize + doffset + headroom,
3627 GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3628 NUMA_NO_NODE);
3629
3630 if (unlikely(!nskb))
3631 goto err;
3632
3633 skb_reserve(nskb, headroom);
3634 __skb_put(nskb, doffset);
3635 }
3636
3637 if (segs)
3638 tail->next = nskb;
3639 else
3640 segs = nskb;
3641 tail = nskb;
3642
3643 __copy_skb_header(nskb, head_skb);
3644
3645 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3646 skb_reset_mac_len(nskb);
3647
3648 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
3649 nskb->data - tnl_hlen,
3650 doffset + tnl_hlen);
3651
3652 if (nskb->len == len + doffset)
3653 goto perform_csum_check;
3654
3655 if (!sg) {
3656 if (!nskb->remcsum_offload)
3657 nskb->ip_summed = CHECKSUM_NONE;
3658 SKB_GSO_CB(nskb)->csum =
3659 skb_copy_and_csum_bits(head_skb, offset,
3660 skb_put(nskb, len),
3661 len, 0);
3662 SKB_GSO_CB(nskb)->csum_start =
3663 skb_headroom(nskb) + doffset;
3664 continue;
3665 }
3666
3667 nskb_frag = skb_shinfo(nskb)->frags;
3668
3669 skb_copy_from_linear_data_offset(head_skb, offset,
3670 skb_put(nskb, hsize), hsize);
3671
3672 skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3673 SKBTX_SHARED_FRAG;
3674
3675 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3676 skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
3677 goto err;
3678
3679 while (pos < offset + len) {
3680 if (i >= nfrags) {
3681 i = 0;
3682 nfrags = skb_shinfo(list_skb)->nr_frags;
3683 frag = skb_shinfo(list_skb)->frags;
3684 frag_skb = list_skb;
3685 if (!skb_headlen(list_skb)) {
3686 BUG_ON(!nfrags);
3687 } else {
3688 BUG_ON(!list_skb->head_frag);
3689
3690 /* to make room for head_frag. */
3691 i--;
3692 frag--;
3693 }
3694 if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3695 skb_zerocopy_clone(nskb, frag_skb,
3696 GFP_ATOMIC))
3697 goto err;
3698
3699 list_skb = list_skb->next;
3700 }
3701
3702 if (unlikely(skb_shinfo(nskb)->nr_frags >=
3703 MAX_SKB_FRAGS)) {
3704 net_warn_ratelimited(
3705 "skb_segment: too many frags: %u %u\n",
3706 pos, mss);
3707 goto err;
3708 }
3709
3710 *nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
3711 __skb_frag_ref(nskb_frag);
3712 size = skb_frag_size(nskb_frag);
3713
3714 if (pos < offset) {
3715 nskb_frag->page_offset += offset - pos;
3716 skb_frag_size_sub(nskb_frag, offset - pos);
3717 }
3718
3719 skb_shinfo(nskb)->nr_frags++;
3720
3721 if (pos + size <= offset + len) {
3722 i++;
3723 frag++;
3724 pos += size;
3725 } else {
3726 skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3727 goto skip_fraglist;
3728 }
3729
3730 nskb_frag++;
3731 }
3732
3733skip_fraglist:
3734 nskb->data_len = len - hsize;
3735 nskb->len += nskb->data_len;
3736 nskb->truesize += nskb->data_len;
3737
3738perform_csum_check:
3739 if (!csum) {
3740 if (skb_has_shared_frag(nskb)) {
3741 err = __skb_linearize(nskb);
3742 if (err)
3743 goto err;
3744 }
3745 if (!nskb->remcsum_offload)
3746 nskb->ip_summed = CHECKSUM_NONE;
3747 SKB_GSO_CB(nskb)->csum =
3748 skb_checksum(nskb, doffset,
3749 nskb->len - doffset, 0);
3750 SKB_GSO_CB(nskb)->csum_start =
3751 skb_headroom(nskb) + doffset;
3752 }
3753 } while ((offset += len) < head_skb->len);
3754
3755 /* Some callers want to get the end of the list.
3756 * Put it in segs->prev to avoid walking the list.
3757 * (see validate_xmit_skb_list() for example)
3758 */
3759 segs->prev = tail;
3760
3761 if (partial_segs) {
3762 struct sk_buff *iter;
3763 int type = skb_shinfo(head_skb)->gso_type;
3764 unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
3765
3766 /* Update type to add partial and then remove dodgy if set */
3767 type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
3768 type &= ~SKB_GSO_DODGY;
3769
3770 /* Update GSO info and prepare to start updating headers on
3771 * our way back down the stack of protocols.
3772 */
3773 for (iter = segs; iter; iter = iter->next) {
3774 skb_shinfo(iter)->gso_size = gso_size;
3775 skb_shinfo(iter)->gso_segs = partial_segs;
3776 skb_shinfo(iter)->gso_type = type;
3777 SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
3778 }
3779
3780 if (tail->len - doffset <= gso_size)
3781 skb_shinfo(tail)->gso_size = 0;
3782 else if (tail != segs)
3783 skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
3784 }
3785
3786 /* Following permits correct backpressure, for protocols
3787 * using skb_set_owner_w().
3788 * Idea is to tranfert ownership from head_skb to last segment.
3789 */
3790 if (head_skb->destructor == sock_wfree) {
3791 swap(tail->truesize, head_skb->truesize);
3792 swap(tail->destructor, head_skb->destructor);
3793 swap(tail->sk, head_skb->sk);
3794 }
3795 return segs;
3796
3797err:
3798 kfree_skb_list(segs);
3799 return ERR_PTR(err);
3800}
3801EXPORT_SYMBOL_GPL(skb_segment);
3802
3803int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3804{
3805 struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
3806 unsigned int offset = skb_gro_offset(skb);
3807 unsigned int headlen = skb_headlen(skb);
3808 unsigned int len = skb_gro_len(skb);
3809 struct sk_buff *lp, *p = *head;
3810 unsigned int delta_truesize;
3811
3812 if (unlikely(p->len + len >= 65536))
3813 return -E2BIG;
3814
3815 lp = NAPI_GRO_CB(p)->last;
3816 pinfo = skb_shinfo(lp);
3817
3818 if (headlen <= offset) {
3819 skb_frag_t *frag;
3820 skb_frag_t *frag2;
3821 int i = skbinfo->nr_frags;
3822 int nr_frags = pinfo->nr_frags + i;
3823
3824 if (nr_frags > MAX_SKB_FRAGS)
3825 goto merge;
3826
3827 offset -= headlen;
3828 pinfo->nr_frags = nr_frags;
3829 skbinfo->nr_frags = 0;
3830
3831 frag = pinfo->frags + nr_frags;
3832 frag2 = skbinfo->frags + i;
3833 do {
3834 *--frag = *--frag2;
3835 } while (--i);
3836
3837 frag->page_offset += offset;
3838 skb_frag_size_sub(frag, offset);
3839
3840 /* all fragments truesize : remove (head size + sk_buff) */
3841 delta_truesize = skb->truesize -
3842 SKB_TRUESIZE(skb_end_offset(skb));
3843
3844 skb->truesize -= skb->data_len;
3845 skb->len -= skb->data_len;
3846 skb->data_len = 0;
3847
3848 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
3849 goto done;
3850 } else if (skb->head_frag) {
3851 int nr_frags = pinfo->nr_frags;
3852 skb_frag_t *frag = pinfo->frags + nr_frags;
3853 struct page *page = virt_to_head_page(skb->head);
3854 unsigned int first_size = headlen - offset;
3855 unsigned int first_offset;
3856
3857 if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
3858 goto merge;
3859
3860 first_offset = skb->data -
3861 (unsigned char *)page_address(page) +
3862 offset;
3863
3864 pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3865
3866 frag->page.p = page;
3867 frag->page_offset = first_offset;
3868 skb_frag_size_set(frag, first_size);
3869
3870 memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3871 /* We dont need to clear skbinfo->nr_frags here */
3872
3873 delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3874 NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3875 goto done;
3876 }
3877
3878merge:
3879 delta_truesize = skb->truesize;
3880 if (offset > headlen) {
3881 unsigned int eat = offset - headlen;
3882
3883 skbinfo->frags[0].page_offset += eat;
3884 skb_frag_size_sub(&skbinfo->frags[0], eat);
3885 skb->data_len -= eat;
3886 skb->len -= eat;
3887 offset = headlen;
3888 }
3889
3890 __skb_pull(skb, offset);
3891
3892 if (NAPI_GRO_CB(p)->last == p)
3893 skb_shinfo(p)->frag_list = skb;
3894 else
3895 NAPI_GRO_CB(p)->last->next = skb;
3896 NAPI_GRO_CB(p)->last = skb;
3897 __skb_header_release(skb);
3898 lp = p;
3899
3900done:
3901 NAPI_GRO_CB(p)->count++;
3902 p->data_len += len;
3903 p->truesize += delta_truesize;
3904 p->len += len;
3905 if (lp != p) {
3906 lp->data_len += len;
3907 lp->truesize += delta_truesize;
3908 lp->len += len;
3909 }
3910 NAPI_GRO_CB(skb)->same_flow = 1;
3911 return 0;
3912}
3913EXPORT_SYMBOL_GPL(skb_gro_receive);
3914
3915void __init skb_init(void)
3916{
3917 skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
3918 sizeof(struct sk_buff),
3919 0,
3920 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3921 offsetof(struct sk_buff, cb),
3922 sizeof_field(struct sk_buff, cb),
3923 NULL);
3924 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3925 sizeof(struct sk_buff_fclones),
3926 0,
3927 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3928 NULL);
3929}
3930
3931static int
3932__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
3933 unsigned int recursion_level)
3934{
3935 int start = skb_headlen(skb);
3936 int i, copy = start - offset;
3937 struct sk_buff *frag_iter;
3938 int elt = 0;
3939
3940 if (unlikely(recursion_level >= 24))
3941 return -EMSGSIZE;
3942
3943 if (copy > 0) {
3944 if (copy > len)
3945 copy = len;
3946 sg_set_buf(sg, skb->data + offset, copy);
3947 elt++;
3948 if ((len -= copy) == 0)
3949 return elt;
3950 offset += copy;
3951 }
3952
3953 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3954 int end;
3955
3956 WARN_ON(start > offset + len);
3957
3958 end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3959 if ((copy = end - offset) > 0) {
3960 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3961 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3962 return -EMSGSIZE;
3963
3964 if (copy > len)
3965 copy = len;
3966 sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3967 frag->page_offset+offset-start);
3968 elt++;
3969 if (!(len -= copy))
3970 return elt;
3971 offset += copy;
3972 }
3973 start = end;
3974 }
3975
3976 skb_walk_frags(skb, frag_iter) {
3977 int end, ret;
3978
3979 WARN_ON(start > offset + len);
3980
3981 end = start + frag_iter->len;
3982 if ((copy = end - offset) > 0) {
3983 if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3984 return -EMSGSIZE;
3985
3986 if (copy > len)
3987 copy = len;
3988 ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3989 copy, recursion_level + 1);
3990 if (unlikely(ret < 0))
3991 return ret;
3992 elt += ret;
3993 if ((len -= copy) == 0)
3994 return elt;
3995 offset += copy;
3996 }
3997 start = end;
3998 }
3999 BUG_ON(len);
4000 return elt;
4001}
4002
4003/**
4004 * skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4005 * @skb: Socket buffer containing the buffers to be mapped
4006 * @sg: The scatter-gather list to map into
4007 * @offset: The offset into the buffer's contents to start mapping
4008 * @len: Length of buffer space to be mapped
4009 *
4010 * Fill the specified scatter-gather list with mappings/pointers into a
4011 * region of the buffer space attached to a socket buffer. Returns either
4012 * the number of scatterlist items used, or -EMSGSIZE if the contents
4013 * could not fit.
4014 */
4015int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4016{
4017 int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4018
4019 if (nsg <= 0)
4020 return nsg;
4021
4022 sg_mark_end(&sg[nsg - 1]);
4023
4024 return nsg;
4025}
4026EXPORT_SYMBOL_GPL(skb_to_sgvec);
4027
4028/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4029 * sglist without mark the sg which contain last skb data as the end.
4030 * So the caller can mannipulate sg list as will when padding new data after
4031 * the first call without calling sg_unmark_end to expend sg list.
4032 *
4033 * Scenario to use skb_to_sgvec_nomark:
4034 * 1. sg_init_table
4035 * 2. skb_to_sgvec_nomark(payload1)
4036 * 3. skb_to_sgvec_nomark(payload2)
4037 *
4038 * This is equivalent to:
4039 * 1. sg_init_table
4040 * 2. skb_to_sgvec(payload1)
4041 * 3. sg_unmark_end
4042 * 4. skb_to_sgvec(payload2)
4043 *
4044 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4045 * is more preferable.
4046 */
4047int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4048 int offset, int len)
4049{
4050 return __skb_to_sgvec(skb, sg, offset, len, 0);
4051}
4052EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
4053
4054
4055
4056/**
4057 * skb_cow_data - Check that a socket buffer's data buffers are writable
4058 * @skb: The socket buffer to check.
4059 * @tailbits: Amount of trailing space to be added
4060 * @trailer: Returned pointer to the skb where the @tailbits space begins
4061 *
4062 * Make sure that the data buffers attached to a socket buffer are
4063 * writable. If they are not, private copies are made of the data buffers
4064 * and the socket buffer is set to use these instead.
4065 *
4066 * If @tailbits is given, make sure that there is space to write @tailbits
4067 * bytes of data beyond current end of socket buffer. @trailer will be
4068 * set to point to the skb in which this space begins.
4069 *
4070 * The number of scatterlist elements required to completely map the
4071 * COW'd and extended socket buffer will be returned.
4072 */
4073int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4074{
4075 int copyflag;
4076 int elt;
4077 struct sk_buff *skb1, **skb_p;
4078
4079 /* If skb is cloned or its head is paged, reallocate
4080 * head pulling out all the pages (pages are considered not writable
4081 * at the moment even if they are anonymous).
4082 */
4083 if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4084 __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
4085 return -ENOMEM;
4086
4087 /* Easy case. Most of packets will go this way. */
4088 if (!skb_has_frag_list(skb)) {
4089 /* A little of trouble, not enough of space for trailer.
4090 * This should not happen, when stack is tuned to generate
4091 * good frames. OK, on miss we reallocate and reserve even more
4092 * space, 128 bytes is fair. */
4093
4094 if (skb_tailroom(skb) < tailbits &&
4095 pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4096 return -ENOMEM;
4097
4098 /* Voila! */
4099 *trailer = skb;
4100 return 1;
4101 }
4102
4103 /* Misery. We are in troubles, going to mincer fragments... */
4104
4105 elt = 1;
4106 skb_p = &skb_shinfo(skb)->frag_list;
4107 copyflag = 0;
4108
4109 while ((skb1 = *skb_p) != NULL) {
4110 int ntail = 0;
4111
4112 /* The fragment is partially pulled by someone,
4113 * this can happen on input. Copy it and everything
4114 * after it. */
4115
4116 if (skb_shared(skb1))
4117 copyflag = 1;
4118
4119 /* If the skb is the last, worry about trailer. */
4120
4121 if (skb1->next == NULL && tailbits) {
4122 if (skb_shinfo(skb1)->nr_frags ||
4123 skb_has_frag_list(skb1) ||
4124 skb_tailroom(skb1) < tailbits)
4125 ntail = tailbits + 128;
4126 }
4127
4128 if (copyflag ||
4129 skb_cloned(skb1) ||
4130 ntail ||
4131 skb_shinfo(skb1)->nr_frags ||
4132 skb_has_frag_list(skb1)) {
4133 struct sk_buff *skb2;
4134
4135 /* Fuck, we are miserable poor guys... */
4136 if (ntail == 0)
4137 skb2 = skb_copy(skb1, GFP_ATOMIC);
4138 else
4139 skb2 = skb_copy_expand(skb1,
4140 skb_headroom(skb1),
4141 ntail,
4142 GFP_ATOMIC);
4143 if (unlikely(skb2 == NULL))
4144 return -ENOMEM;
4145
4146 if (skb1->sk)
4147 skb_set_owner_w(skb2, skb1->sk);
4148
4149 /* Looking around. Are we still alive?
4150 * OK, link new skb, drop old one */
4151
4152 skb2->next = skb1->next;
4153 *skb_p = skb2;
4154 kfree_skb(skb1);
4155 skb1 = skb2;
4156 }
4157 elt++;
4158 *trailer = skb1;
4159 skb_p = &skb1->next;
4160 }
4161
4162 return elt;
4163}
4164EXPORT_SYMBOL_GPL(skb_cow_data);
4165
4166static void sock_rmem_free(struct sk_buff *skb)
4167{
4168 struct sock *sk = skb->sk;
4169
4170 atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4171}
4172
4173static void skb_set_err_queue(struct sk_buff *skb)
4174{
4175 /* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
4176 * So, it is safe to (mis)use it to mark skbs on the error queue.
4177 */
4178 skb->pkt_type = PACKET_OUTGOING;
4179 BUILD_BUG_ON(PACKET_OUTGOING == 0);
4180}
4181
4182/*
4183 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4184 */
4185int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4186{
4187 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
4188 (unsigned int)sk->sk_rcvbuf)
4189 return -ENOMEM;
4190
4191 skb_orphan(skb);
4192 skb->sk = sk;
4193 skb->destructor = sock_rmem_free;
4194 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
4195 skb_set_err_queue(skb);
4196
4197 /* before exiting rcu section, make sure dst is refcounted */
4198 skb_dst_force(skb);
4199
4200 skb_queue_tail(&sk->sk_error_queue, skb);
4201 if (!sock_flag(sk, SOCK_DEAD))
4202 sk->sk_error_report(sk);
4203 return 0;
4204}
4205EXPORT_SYMBOL(sock_queue_err_skb);
4206
4207static bool is_icmp_err_skb(const struct sk_buff *skb)
4208{
4209 return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
4210 SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
4211}
4212
4213struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4214{
4215 struct sk_buff_head *q = &sk->sk_error_queue;
4216 struct sk_buff *skb, *skb_next = NULL;
4217 bool icmp_next = false;
4218 unsigned long flags;
4219
4220 spin_lock_irqsave(&q->lock, flags);
4221 skb = __skb_dequeue(q);
4222 if (skb && (skb_next = skb_peek(q))) {
4223 icmp_next = is_icmp_err_skb(skb_next);
4224 if (icmp_next)
4225 sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
4226 }
4227 spin_unlock_irqrestore(&q->lock, flags);
4228
4229 if (is_icmp_err_skb(skb) && !icmp_next)
4230 sk->sk_err = 0;
4231
4232 if (skb_next)
4233 sk->sk_error_report(sk);
4234
4235 return skb;
4236}
4237EXPORT_SYMBOL(sock_dequeue_err_skb);
4238
4239/**
4240 * skb_clone_sk - create clone of skb, and take reference to socket
4241 * @skb: the skb to clone
4242 *
4243 * This function creates a clone of a buffer that holds a reference on
4244 * sk_refcnt. Buffers created via this function are meant to be
4245 * returned using sock_queue_err_skb, or free via kfree_skb.
4246 *
4247 * When passing buffers allocated with this function to sock_queue_err_skb
4248 * it is necessary to wrap the call with sock_hold/sock_put in order to
4249 * prevent the socket from being released prior to being enqueued on
4250 * the sk_error_queue.
4251 */
4252struct sk_buff *skb_clone_sk(struct sk_buff *skb)
4253{
4254 struct sock *sk = skb->sk;
4255 struct sk_buff *clone;
4256
4257 if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
4258 return NULL;
4259
4260 clone = skb_clone(skb, GFP_ATOMIC);
4261 if (!clone) {
4262 sock_put(sk);
4263 return NULL;
4264 }
4265
4266 clone->sk = sk;
4267 clone->destructor = sock_efree;
4268
4269 return clone;
4270}
4271EXPORT_SYMBOL(skb_clone_sk);
4272
4273static void __skb_complete_tx_timestamp(struct sk_buff *skb,
4274 struct sock *sk,
4275 int tstype,
4276 bool opt_stats)
4277{
4278 struct sock_exterr_skb *serr;
4279 int err;
4280
4281 BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
4282
4283 serr = SKB_EXT_ERR(skb);
4284 memset(serr, 0, sizeof(*serr));
4285 serr->ee.ee_errno = ENOMSG;
4286 serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4287 serr->ee.ee_info = tstype;
4288 serr->opt_stats = opt_stats;
4289 serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
4290 if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
4291 serr->ee.ee_data = skb_shinfo(skb)->tskey;
4292 if (sk->sk_protocol == IPPROTO_TCP &&
4293 sk->sk_type == SOCK_STREAM)
4294 serr->ee.ee_data -= sk->sk_tskey;
4295 }
4296
4297 err = sock_queue_err_skb(sk, skb);
4298
4299 if (err)
4300 kfree_skb(skb);
4301}
4302
4303static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4304{
4305 bool ret;
4306
4307 if (likely(sysctl_tstamp_allow_data || tsonly))
4308 return true;
4309
4310 read_lock_bh(&sk->sk_callback_lock);
4311 ret = sk->sk_socket && sk->sk_socket->file &&
4312 file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4313 read_unlock_bh(&sk->sk_callback_lock);
4314 return ret;
4315}
4316
4317void skb_complete_tx_timestamp(struct sk_buff *skb,
4318 struct skb_shared_hwtstamps *hwtstamps)
4319{
4320 struct sock *sk = skb->sk;
4321
4322 if (!skb_may_tx_timestamp(sk, false))
4323 goto err;
4324
4325 /* Take a reference to prevent skb_orphan() from freeing the socket,
4326 * but only if the socket refcount is not zero.
4327 */
4328 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4329 *skb_hwtstamps(skb) = *hwtstamps;
4330 __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
4331 sock_put(sk);
4332 return;
4333 }
4334
4335err:
4336 kfree_skb(skb);
4337}
4338EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4339
4340void __skb_tstamp_tx(struct sk_buff *orig_skb,
4341 struct skb_shared_hwtstamps *hwtstamps,
4342 struct sock *sk, int tstype)
4343{
4344 struct sk_buff *skb;
4345 bool tsonly, opt_stats = false;
4346
4347 if (!sk)
4348 return;
4349
4350 if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4351 skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4352 return;
4353
4354 tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
4355 if (!skb_may_tx_timestamp(sk, tsonly))
4356 return;
4357
4358 if (tsonly) {
4359#ifdef CONFIG_INET
4360 if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
4361 sk->sk_protocol == IPPROTO_TCP &&
4362 sk->sk_type == SOCK_STREAM) {
4363 skb = tcp_get_timestamping_opt_stats(sk);
4364 opt_stats = true;
4365 } else
4366#endif
4367 skb = alloc_skb(0, GFP_ATOMIC);
4368 } else {
4369 skb = skb_clone(orig_skb, GFP_ATOMIC);
4370 }
4371 if (!skb)
4372 return;
4373
4374 if (tsonly) {
4375 skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4376 SKBTX_ANY_TSTAMP;
4377 skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
4378 }
4379
4380 if (hwtstamps)
4381 *skb_hwtstamps(skb) = *hwtstamps;
4382 else
4383 skb->tstamp = ktime_get_real();
4384
4385 __skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
4386}
4387EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4388
4389void skb_tstamp_tx(struct sk_buff *orig_skb,
4390 struct skb_shared_hwtstamps *hwtstamps)
4391{
4392 return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
4393 SCM_TSTAMP_SND);
4394}
4395EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4396
4397void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
4398{
4399 struct sock *sk = skb->sk;
4400 struct sock_exterr_skb *serr;
4401 int err = 1;
4402
4403 skb->wifi_acked_valid = 1;
4404 skb->wifi_acked = acked;
4405
4406 serr = SKB_EXT_ERR(skb);
4407 memset(serr, 0, sizeof(*serr));
4408 serr->ee.ee_errno = ENOMSG;
4409 serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
4410
4411 /* Take a reference to prevent skb_orphan() from freeing the socket,
4412 * but only if the socket refcount is not zero.
4413 */
4414 if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4415 err = sock_queue_err_skb(sk, skb);
4416 sock_put(sk);
4417 }
4418 if (err)
4419 kfree_skb(skb);
4420}
4421EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
4422
4423/**
4424 * skb_partial_csum_set - set up and verify partial csum values for packet
4425 * @skb: the skb to set
4426 * @start: the number of bytes after skb->data to start checksumming.
4427 * @off: the offset from start to place the checksum.
4428 *
4429 * For untrusted partially-checksummed packets, we need to make sure the values
4430 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4431 *
4432 * This function checks and sets those values and skb->ip_summed: if this
4433 * returns false you should drop the packet.
4434 */
4435bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4436{
4437 if (unlikely(start > skb_headlen(skb)) ||
4438 unlikely((int)start + off > skb_headlen(skb) - 2)) {
4439 net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
4440 start, off, skb_headlen(skb));
4441 return false;
4442 }
4443 skb->ip_summed = CHECKSUM_PARTIAL;
4444 skb->csum_start = skb_headroom(skb) + start;
4445 skb->csum_offset = off;
4446 skb_set_transport_header(skb, start);
4447 return true;
4448}
4449EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4450
4451static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4452 unsigned int max)
4453{
4454 if (skb_headlen(skb) >= len)
4455 return 0;
4456
4457 /* If we need to pullup then pullup to the max, so we
4458 * won't need to do it again.
4459 */
4460 if (max > skb->len)
4461 max = skb->len;
4462
4463 if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4464 return -ENOMEM;
4465
4466 if (skb_headlen(skb) < len)
4467 return -EPROTO;
4468
4469 return 0;
4470}
4471
4472#define MAX_TCP_HDR_LEN (15 * 4)
4473
4474static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4475 typeof(IPPROTO_IP) proto,
4476 unsigned int off)
4477{
4478 switch (proto) {
4479 int err;
4480
4481 case IPPROTO_TCP:
4482 err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4483 off + MAX_TCP_HDR_LEN);
4484 if (!err && !skb_partial_csum_set(skb, off,
4485 offsetof(struct tcphdr,
4486 check)))
4487 err = -EPROTO;
4488 return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4489
4490 case IPPROTO_UDP:
4491 err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4492 off + sizeof(struct udphdr));
4493 if (!err && !skb_partial_csum_set(skb, off,
4494 offsetof(struct udphdr,
4495 check)))
4496 err = -EPROTO;
4497 return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4498 }
4499
4500 return ERR_PTR(-EPROTO);
4501}
4502
4503/* This value should be large enough to cover a tagged ethernet header plus
4504 * maximally sized IP and TCP or UDP headers.
4505 */
4506#define MAX_IP_HDR_LEN 128
4507
4508static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
4509{
4510 unsigned int off;
4511 bool fragment;
4512 __sum16 *csum;
4513 int err;
4514
4515 fragment = false;
4516
4517 err = skb_maybe_pull_tail(skb,
4518 sizeof(struct iphdr),
4519 MAX_IP_HDR_LEN);
4520 if (err < 0)
4521 goto out;
4522
4523 if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
4524 fragment = true;
4525
4526 off = ip_hdrlen(skb);
4527
4528 err = -EPROTO;
4529
4530 if (fragment)
4531 goto out;
4532
4533 csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
4534 if (IS_ERR(csum))
4535 return PTR_ERR(csum);
4536
4537 if (recalculate)
4538 *csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
4539 ip_hdr(skb)->daddr,
4540 skb->len - off,
4541 ip_hdr(skb)->protocol, 0);
4542 err = 0;
4543
4544out:
4545 return err;
4546}
4547
4548/* This value should be large enough to cover a tagged ethernet header plus
4549 * an IPv6 header, all options, and a maximal TCP or UDP header.
4550 */
4551#define MAX_IPV6_HDR_LEN 256
4552
4553#define OPT_HDR(type, skb, off) \
4554 (type *)(skb_network_header(skb) + (off))
4555
4556static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
4557{
4558 int err;
4559 u8 nexthdr;
4560 unsigned int off;
4561 unsigned int len;
4562 bool fragment;
4563 bool done;
4564 __sum16 *csum;
4565
4566 fragment = false;
4567 done = false;
4568
4569 off = sizeof(struct ipv6hdr);
4570
4571 err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4572 if (err < 0)
4573 goto out;
4574
4575 nexthdr = ipv6_hdr(skb)->nexthdr;
4576
4577 len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4578 while (off <= len && !done) {
4579 switch (nexthdr) {
4580 case IPPROTO_DSTOPTS:
4581 case IPPROTO_HOPOPTS:
4582 case IPPROTO_ROUTING: {
4583 struct ipv6_opt_hdr *hp;
4584
4585 err = skb_maybe_pull_tail(skb,
4586 off +
4587 sizeof(struct ipv6_opt_hdr),
4588 MAX_IPV6_HDR_LEN);
4589 if (err < 0)
4590 goto out;
4591
4592 hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
4593 nexthdr = hp->nexthdr;
4594 off += ipv6_optlen(hp);
4595 break;
4596 }
4597 case IPPROTO_AH: {
4598 struct ip_auth_hdr *hp;
4599
4600 err = skb_maybe_pull_tail(skb,
4601 off +
4602 sizeof(struct ip_auth_hdr),
4603 MAX_IPV6_HDR_LEN);
4604 if (err < 0)
4605 goto out;
4606
4607 hp = OPT_HDR(struct ip_auth_hdr, skb, off);
4608 nexthdr = hp->nexthdr;
4609 off += ipv6_authlen(hp);
4610 break;
4611 }
4612 case IPPROTO_FRAGMENT: {
4613 struct frag_hdr *hp;
4614
4615 err = skb_maybe_pull_tail(skb,
4616 off +
4617 sizeof(struct frag_hdr),
4618 MAX_IPV6_HDR_LEN);
4619 if (err < 0)
4620 goto out;
4621
4622 hp = OPT_HDR(struct frag_hdr, skb, off);
4623
4624 if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
4625 fragment = true;
4626
4627 nexthdr = hp->nexthdr;
4628 off += sizeof(struct frag_hdr);
4629 break;
4630 }
4631 default:
4632 done = true;
4633 break;
4634 }
4635 }
4636
4637 err = -EPROTO;
4638
4639 if (!done || fragment)
4640 goto out;
4641
4642 csum = skb_checksum_setup_ip(skb, nexthdr, off);
4643 if (IS_ERR(csum))
4644 return PTR_ERR(csum);
4645
4646 if (recalculate)
4647 *csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4648 &ipv6_hdr(skb)->daddr,
4649 skb->len - off, nexthdr, 0);
4650 err = 0;
4651
4652out:
4653 return err;
4654}
4655
4656/**
4657 * skb_checksum_setup - set up partial checksum offset
4658 * @skb: the skb to set up
4659 * @recalculate: if true the pseudo-header checksum will be recalculated
4660 */
4661int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
4662{
4663 int err;
4664
4665 switch (skb->protocol) {
4666 case htons(ETH_P_IP):
4667 err = skb_checksum_setup_ipv4(skb, recalculate);
4668 break;
4669
4670 case htons(ETH_P_IPV6):
4671 err = skb_checksum_setup_ipv6(skb, recalculate);
4672 break;
4673
4674 default:
4675 err = -EPROTO;
4676 break;
4677 }
4678
4679 return err;
4680}
4681EXPORT_SYMBOL(skb_checksum_setup);
4682
4683/**
4684 * skb_checksum_maybe_trim - maybe trims the given skb
4685 * @skb: the skb to check
4686 * @transport_len: the data length beyond the network header
4687 *
4688 * Checks whether the given skb has data beyond the given transport length.
4689 * If so, returns a cloned skb trimmed to this transport length.
4690 * Otherwise returns the provided skb. Returns NULL in error cases
4691 * (e.g. transport_len exceeds skb length or out-of-memory).
4692 *
4693 * Caller needs to set the skb transport header and free any returned skb if it
4694 * differs from the provided skb.
4695 */
4696static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4697 unsigned int transport_len)
4698{
4699 struct sk_buff *skb_chk;
4700 unsigned int len = skb_transport_offset(skb) + transport_len;
4701 int ret;
4702
4703 if (skb->len < len)
4704 return NULL;
4705 else if (skb->len == len)
4706 return skb;
4707
4708 skb_chk = skb_clone(skb, GFP_ATOMIC);
4709 if (!skb_chk)
4710 return NULL;
4711
4712 ret = pskb_trim_rcsum(skb_chk, len);
4713 if (ret) {
4714 kfree_skb(skb_chk);
4715 return NULL;
4716 }
4717
4718 return skb_chk;
4719}
4720
4721/**
4722 * skb_checksum_trimmed - validate checksum of an skb
4723 * @skb: the skb to check
4724 * @transport_len: the data length beyond the network header
4725 * @skb_chkf: checksum function to use
4726 *
4727 * Applies the given checksum function skb_chkf to the provided skb.
4728 * Returns a checked and maybe trimmed skb. Returns NULL on error.
4729 *
4730 * If the skb has data beyond the given transport length, then a
4731 * trimmed & cloned skb is checked and returned.
4732 *
4733 * Caller needs to set the skb transport header and free any returned skb if it
4734 * differs from the provided skb.
4735 */
4736struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4737 unsigned int transport_len,
4738 __sum16(*skb_chkf)(struct sk_buff *skb))
4739{
4740 struct sk_buff *skb_chk;
4741 unsigned int offset = skb_transport_offset(skb);
4742 __sum16 ret;
4743
4744 skb_chk = skb_checksum_maybe_trim(skb, transport_len);
4745 if (!skb_chk)
4746 goto err;
4747
4748 if (!pskb_may_pull(skb_chk, offset))
4749 goto err;
4750
4751 skb_pull_rcsum(skb_chk, offset);
4752 ret = skb_chkf(skb_chk);
4753 skb_push_rcsum(skb_chk, offset);
4754
4755 if (ret)
4756 goto err;
4757
4758 return skb_chk;
4759
4760err:
4761 if (skb_chk && skb_chk != skb)
4762 kfree_skb(skb_chk);
4763
4764 return NULL;
4765
4766}
4767EXPORT_SYMBOL(skb_checksum_trimmed);
4768
4769void __skb_warn_lro_forwarding(const struct sk_buff *skb)
4770{
4771 net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
4772 skb->dev->name);
4773}
4774EXPORT_SYMBOL(__skb_warn_lro_forwarding);
4775
4776void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4777{
4778 if (head_stolen) {
4779 skb_release_head_state(skb);
4780 kmem_cache_free(skbuff_head_cache, skb);
4781 } else {
4782 __kfree_skb(skb);
4783 }
4784}
4785EXPORT_SYMBOL(kfree_skb_partial);
4786
4787/**
4788 * skb_try_coalesce - try to merge skb to prior one
4789 * @to: prior buffer
4790 * @from: buffer to add
4791 * @fragstolen: pointer to boolean
4792 * @delta_truesize: how much more was allocated than was requested
4793 */
4794bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
4795 bool *fragstolen, int *delta_truesize)
4796{
4797 struct skb_shared_info *to_shinfo, *from_shinfo;
4798 int i, delta, len = from->len;
4799
4800 *fragstolen = false;
4801
4802 if (skb_cloned(to))
4803 return false;
4804
4805 if (len <= skb_tailroom(to)) {
4806 if (len)
4807 BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4808 *delta_truesize = 0;
4809 return true;
4810 }
4811
4812 to_shinfo = skb_shinfo(to);
4813 from_shinfo = skb_shinfo(from);
4814 if (to_shinfo->frag_list || from_shinfo->frag_list)
4815 return false;
4816 if (skb_zcopy(to) || skb_zcopy(from))
4817 return false;
4818
4819 if (skb_headlen(from) != 0) {
4820 struct page *page;
4821 unsigned int offset;
4822
4823 if (to_shinfo->nr_frags +
4824 from_shinfo->nr_frags >= MAX_SKB_FRAGS)
4825 return false;
4826
4827 if (skb_head_is_locked(from))
4828 return false;
4829
4830 delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4831
4832 page = virt_to_head_page(from->head);
4833 offset = from->data - (unsigned char *)page_address(page);
4834
4835 skb_fill_page_desc(to, to_shinfo->nr_frags,
4836 page, offset, skb_headlen(from));
4837 *fragstolen = true;
4838 } else {
4839 if (to_shinfo->nr_frags +
4840 from_shinfo->nr_frags > MAX_SKB_FRAGS)
4841 return false;
4842
4843 delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
4844 }
4845
4846 WARN_ON_ONCE(delta < len);
4847
4848 memcpy(to_shinfo->frags + to_shinfo->nr_frags,
4849 from_shinfo->frags,
4850 from_shinfo->nr_frags * sizeof(skb_frag_t));
4851 to_shinfo->nr_frags += from_shinfo->nr_frags;
4852
4853 if (!skb_cloned(from))
4854 from_shinfo->nr_frags = 0;
4855
4856 /* if the skb is not cloned this does nothing
4857 * since we set nr_frags to 0.
4858 */
4859 for (i = 0; i < from_shinfo->nr_frags; i++)
4860 __skb_frag_ref(&from_shinfo->frags[i]);
4861
4862 to->truesize += delta;
4863 to->len += len;
4864 to->data_len += len;
4865
4866 *delta_truesize = delta;
4867 return true;
4868}
4869EXPORT_SYMBOL(skb_try_coalesce);
4870
4871/**
4872 * skb_scrub_packet - scrub an skb
4873 *
4874 * @skb: buffer to clean
4875 * @xnet: packet is crossing netns
4876 *
4877 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
4878 * into/from a tunnel. Some information have to be cleared during these
4879 * operations.
4880 * skb_scrub_packet can also be used to clean a skb before injecting it in
4881 * another namespace (@xnet == true). We have to clear all information in the
4882 * skb that could impact namespace isolation.
4883 */
4884void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4885{
4886 skb->tstamp = 0;
4887 skb->pkt_type = PACKET_HOST;
4888 skb->skb_iif = 0;
4889 skb->ignore_df = 0;
4890 skb_dst_drop(skb);
4891 secpath_reset(skb);
4892 nf_reset(skb);
4893 nf_reset_trace(skb);
4894
4895 if (!xnet)
4896 return;
4897
4898 ipvs_reset(skb);
4899 skb_orphan(skb);
4900 skb->mark = 0;
4901}
4902EXPORT_SYMBOL_GPL(skb_scrub_packet);
4903
4904/**
4905 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
4906 *
4907 * @skb: GSO skb
4908 *
4909 * skb_gso_transport_seglen is used to determine the real size of the
4910 * individual segments, including Layer4 headers (TCP/UDP).
4911 *
4912 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4913 */
4914static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4915{
4916 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4917 unsigned int thlen = 0;
4918
4919 if (skb->encapsulation) {
4920 thlen = skb_inner_transport_header(skb) -
4921 skb_transport_header(skb);
4922
4923 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4924 thlen += inner_tcp_hdrlen(skb);
4925 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4926 thlen = tcp_hdrlen(skb);
4927 } else if (unlikely(skb_is_gso_sctp(skb))) {
4928 thlen = sizeof(struct sctphdr);
4929 }
4930 /* UFO sets gso_size to the size of the fragmentation
4931 * payload, i.e. the size of the L4 (UDP) header is already
4932 * accounted for.
4933 */
4934 return thlen + shinfo->gso_size;
4935}
4936
4937/**
4938 * skb_gso_network_seglen - Return length of individual segments of a gso packet
4939 *
4940 * @skb: GSO skb
4941 *
4942 * skb_gso_network_seglen is used to determine the real size of the
4943 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
4944 *
4945 * The MAC/L2 header is not accounted for.
4946 */
4947static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
4948{
4949 unsigned int hdr_len = skb_transport_header(skb) -
4950 skb_network_header(skb);
4951
4952 return hdr_len + skb_gso_transport_seglen(skb);
4953}
4954
4955/**
4956 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
4957 *
4958 * @skb: GSO skb
4959 *
4960 * skb_gso_mac_seglen is used to determine the real size of the
4961 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
4962 * headers (TCP/UDP).
4963 */
4964static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4965{
4966 unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
4967
4968 return hdr_len + skb_gso_transport_seglen(skb);
4969}
4970
4971/**
4972 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
4973 *
4974 * There are a couple of instances where we have a GSO skb, and we
4975 * want to determine what size it would be after it is segmented.
4976 *
4977 * We might want to check:
4978 * - L3+L4+payload size (e.g. IP forwarding)
4979 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
4980 *
4981 * This is a helper to do that correctly considering GSO_BY_FRAGS.
4982 *
4983 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
4984 * GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
4985 *
4986 * @max_len: The maximum permissible length.
4987 *
4988 * Returns true if the segmented length <= max length.
4989 */
4990static inline bool skb_gso_size_check(const struct sk_buff *skb,
4991 unsigned int seg_len,
4992 unsigned int max_len) {
4993 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4994 const struct sk_buff *iter;
4995
4996 if (shinfo->gso_size != GSO_BY_FRAGS)
4997 return seg_len <= max_len;
4998
4999 /* Undo this so we can re-use header sizes */
5000 seg_len -= GSO_BY_FRAGS;
5001
5002 skb_walk_frags(skb, iter) {
5003 if (seg_len + skb_headlen(iter) > max_len)
5004 return false;
5005 }
5006
5007 return true;
5008}
5009
5010/**
5011 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5012 *
5013 * @skb: GSO skb
5014 * @mtu: MTU to validate against
5015 *
5016 * skb_gso_validate_network_len validates if a given skb will fit a
5017 * wanted MTU once split. It considers L3 headers, L4 headers, and the
5018 * payload.
5019 */
5020bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5021{
5022 return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5023}
5024EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5025
5026/**
5027 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
5028 *
5029 * @skb: GSO skb
5030 * @len: length to validate against
5031 *
5032 * skb_gso_validate_mac_len validates if a given skb will fit a wanted
5033 * length once split, including L2, L3 and L4 headers and the payload.
5034 */
5035bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
5036{
5037 return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
5038}
5039EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5040
5041static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5042{
5043 int mac_len;
5044
5045 if (skb_cow(skb, skb_headroom(skb)) < 0) {
5046 kfree_skb(skb);
5047 return NULL;
5048 }
5049
5050 mac_len = skb->data - skb_mac_header(skb);
5051 if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5052 memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5053 mac_len - VLAN_HLEN - ETH_TLEN);
5054 }
5055 skb->mac_header += VLAN_HLEN;
5056 return skb;
5057}
5058
5059struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5060{
5061 struct vlan_hdr *vhdr;
5062 u16 vlan_tci;
5063
5064 if (unlikely(skb_vlan_tag_present(skb))) {
5065 /* vlan_tci is already set-up so leave this for another time */
5066 return skb;
5067 }
5068
5069 skb = skb_share_check(skb, GFP_ATOMIC);
5070 if (unlikely(!skb))
5071 goto err_free;
5072
5073 if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
5074 goto err_free;
5075
5076 vhdr = (struct vlan_hdr *)skb->data;
5077 vlan_tci = ntohs(vhdr->h_vlan_TCI);
5078 __vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5079
5080 skb_pull_rcsum(skb, VLAN_HLEN);
5081 vlan_set_encap_proto(skb, vhdr);
5082
5083 skb = skb_reorder_vlan_header(skb);
5084 if (unlikely(!skb))
5085 goto err_free;
5086
5087 skb_reset_network_header(skb);
5088 skb_reset_transport_header(skb);
5089 skb_reset_mac_len(skb);
5090
5091 return skb;
5092
5093err_free:
5094 kfree_skb(skb);
5095 return NULL;
5096}
5097EXPORT_SYMBOL(skb_vlan_untag);
5098
5099int skb_ensure_writable(struct sk_buff *skb, int write_len)
5100{
5101 if (!pskb_may_pull(skb, write_len))
5102 return -ENOMEM;
5103
5104 if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5105 return 0;
5106
5107 return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5108}
5109EXPORT_SYMBOL(skb_ensure_writable);
5110
5111/* remove VLAN header from packet and update csum accordingly.
5112 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5113 */
5114int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5115{
5116 struct vlan_hdr *vhdr;
5117 int offset = skb->data - skb_mac_header(skb);
5118 int err;
5119
5120 if (WARN_ONCE(offset,
5121 "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5122 offset)) {
5123 return -EINVAL;
5124 }
5125
5126 err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5127 if (unlikely(err))
5128 return err;
5129
5130 skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5131
5132 vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
5133 *vlan_tci = ntohs(vhdr->h_vlan_TCI);
5134
5135 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
5136 __skb_pull(skb, VLAN_HLEN);
5137
5138 vlan_set_encap_proto(skb, vhdr);
5139 skb->mac_header += VLAN_HLEN;
5140
5141 if (skb_network_offset(skb) < ETH_HLEN)
5142 skb_set_network_header(skb, ETH_HLEN);
5143
5144 skb_reset_mac_len(skb);
5145
5146 return err;
5147}
5148EXPORT_SYMBOL(__skb_vlan_pop);
5149
5150/* Pop a vlan tag either from hwaccel or from payload.
5151 * Expects skb->data at mac header.
5152 */
5153int skb_vlan_pop(struct sk_buff *skb)
5154{
5155 u16 vlan_tci;
5156 __be16 vlan_proto;
5157 int err;
5158
5159 if (likely(skb_vlan_tag_present(skb))) {
5160 skb->vlan_tci = 0;
5161 } else {
5162 if (unlikely(!eth_type_vlan(skb->protocol)))
5163 return 0;
5164
5165 err = __skb_vlan_pop(skb, &vlan_tci);
5166 if (err)
5167 return err;
5168 }
5169 /* move next vlan tag to hw accel tag */
5170 if (likely(!eth_type_vlan(skb->protocol)))
5171 return 0;
5172
5173 vlan_proto = skb->protocol;
5174 err = __skb_vlan_pop(skb, &vlan_tci);
5175 if (unlikely(err))
5176 return err;
5177
5178 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5179 return 0;
5180}
5181EXPORT_SYMBOL(skb_vlan_pop);
5182
5183/* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5184 * Expects skb->data at mac header.
5185 */
5186int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
5187{
5188 if (skb_vlan_tag_present(skb)) {
5189 int offset = skb->data - skb_mac_header(skb);
5190 int err;
5191
5192 if (WARN_ONCE(offset,
5193 "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5194 offset)) {
5195 return -EINVAL;
5196 }
5197
5198 err = __vlan_insert_tag(skb, skb->vlan_proto,
5199 skb_vlan_tag_get(skb));
5200 if (err)
5201 return err;
5202
5203 skb->protocol = skb->vlan_proto;
5204 skb->mac_len += VLAN_HLEN;
5205
5206 skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5207 }
5208 __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5209 return 0;
5210}
5211EXPORT_SYMBOL(skb_vlan_push);
5212
5213/**
5214 * alloc_skb_with_frags - allocate skb with page frags
5215 *
5216 * @header_len: size of linear part
5217 * @data_len: needed length in frags
5218 * @max_page_order: max page order desired.
5219 * @errcode: pointer to error code if any
5220 * @gfp_mask: allocation mask
5221 *
5222 * This can be used to allocate a paged skb, given a maximal order for frags.
5223 */
5224struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5225 unsigned long data_len,
5226 int max_page_order,
5227 int *errcode,
5228 gfp_t gfp_mask)
5229{
5230 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
5231 unsigned long chunk;
5232 struct sk_buff *skb;
5233 struct page *page;
5234 gfp_t gfp_head;
5235 int i;
5236
5237 *errcode = -EMSGSIZE;
5238 /* Note this test could be relaxed, if we succeed to allocate
5239 * high order pages...
5240 */
5241 if (npages > MAX_SKB_FRAGS)
5242 return NULL;
5243
5244 gfp_head = gfp_mask;
5245 if (gfp_head & __GFP_DIRECT_RECLAIM)
5246 gfp_head |= __GFP_RETRY_MAYFAIL;
5247
5248 *errcode = -ENOBUFS;
5249 skb = alloc_skb(header_len, gfp_head);
5250 if (!skb)
5251 return NULL;
5252
5253 skb->truesize += npages << PAGE_SHIFT;
5254
5255 for (i = 0; npages > 0; i++) {
5256 int order = max_page_order;
5257
5258 while (order) {
5259 if (npages >= 1 << order) {
5260 page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
5261 __GFP_COMP |
5262 __GFP_NOWARN |
5263 __GFP_NORETRY,
5264 order);
5265 if (page)
5266 goto fill_page;
5267 /* Do not retry other high order allocations */
5268 order = 1;
5269 max_page_order = 0;
5270 }
5271 order--;
5272 }
5273 page = alloc_page(gfp_mask);
5274 if (!page)
5275 goto failure;
5276fill_page:
5277 chunk = min_t(unsigned long, data_len,
5278 PAGE_SIZE << order);
5279 skb_fill_page_desc(skb, i, page, 0, chunk);
5280 data_len -= chunk;
5281 npages -= 1 << order;
5282 }
5283 return skb;
5284
5285failure:
5286 kfree_skb(skb);
5287 return NULL;
5288}
5289EXPORT_SYMBOL(alloc_skb_with_frags);
5290
5291/* carve out the first off bytes from skb when off < headlen */
5292static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
5293 const int headlen, gfp_t gfp_mask)
5294{
5295 int i;
5296 int size = skb_end_offset(skb);
5297 int new_hlen = headlen - off;
5298 u8 *data;
5299
5300 size = SKB_DATA_ALIGN(size);
5301
5302 if (skb_pfmemalloc(skb))
5303 gfp_mask |= __GFP_MEMALLOC;
5304 data = kmalloc_reserve(size +
5305 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5306 gfp_mask, NUMA_NO_NODE, NULL);
5307 if (!data)
5308 return -ENOMEM;
5309
5310 size = SKB_WITH_OVERHEAD(ksize(data));
5311
5312 /* Copy real data, and all frags */
5313 skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
5314 skb->len -= off;
5315
5316 memcpy((struct skb_shared_info *)(data + size),
5317 skb_shinfo(skb),
5318 offsetof(struct skb_shared_info,
5319 frags[skb_shinfo(skb)->nr_frags]));
5320 if (skb_cloned(skb)) {
5321 /* drop the old head gracefully */
5322 if (skb_orphan_frags(skb, gfp_mask)) {
5323 kfree(data);
5324 return -ENOMEM;
5325 }
5326 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
5327 skb_frag_ref(skb, i);
5328 if (skb_has_frag_list(skb))
5329 skb_clone_fraglist(skb);
5330 skb_release_data(skb);
5331 } else {
5332 /* we can reuse existing recount- all we did was
5333 * relocate values
5334 */
5335 skb_free_head(skb);
5336 }
5337
5338 skb->head = data;
5339 skb->data = data;
5340 skb->head_frag = 0;
5341#ifdef NET_SKBUFF_DATA_USES_OFFSET
5342 skb->end = size;
5343#else
5344 skb->end = skb->head + size;
5345#endif
5346 skb_set_tail_pointer(skb, skb_headlen(skb));
5347 skb_headers_offset_update(skb, 0);
5348 skb->cloned = 0;
5349 skb->hdr_len = 0;
5350 skb->nohdr = 0;
5351 atomic_set(&skb_shinfo(skb)->dataref, 1);
5352
5353 return 0;
5354}
5355
5356static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
5357
5358/* carve out the first eat bytes from skb's frag_list. May recurse into
5359 * pskb_carve()
5360 */
5361static int pskb_carve_frag_list(struct sk_buff *skb,
5362 struct skb_shared_info *shinfo, int eat,
5363 gfp_t gfp_mask)
5364{
5365 struct sk_buff *list = shinfo->frag_list;
5366 struct sk_buff *clone = NULL;
5367 struct sk_buff *insp = NULL;
5368
5369 do {
5370 if (!list) {
5371 pr_err("Not enough bytes to eat. Want %d\n", eat);
5372 return -EFAULT;
5373 }
5374 if (list->len <= eat) {
5375 /* Eaten as whole. */
5376 eat -= list->len;
5377 list = list->next;
5378 insp = list;
5379 } else {
5380 /* Eaten partially. */
5381 if (skb_shared(list)) {
5382 clone = skb_clone(list, gfp_mask);
5383 if (!clone)
5384 return -ENOMEM;
5385 insp = list->next;
5386 list = clone;
5387 } else {
5388 /* This may be pulled without problems. */
5389 insp = list;
5390 }
5391 if (pskb_carve(list, eat, gfp_mask) < 0) {
5392 kfree_skb(clone);
5393 return -ENOMEM;
5394 }
5395 break;
5396 }
5397 } while (eat);
5398
5399 /* Free pulled out fragments. */
5400 while ((list = shinfo->frag_list) != insp) {
5401 shinfo->frag_list = list->next;
5402 kfree_skb(list);
5403 }
5404 /* And insert new clone at head. */
5405 if (clone) {
5406 clone->next = list;
5407 shinfo->frag_list = clone;
5408 }
5409 return 0;
5410}
5411
5412/* carve off first len bytes from skb. Split line (off) is in the
5413 * non-linear part of skb
5414 */
5415static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
5416 int pos, gfp_t gfp_mask)
5417{
5418 int i, k = 0;
5419 int size = skb_end_offset(skb);
5420 u8 *data;
5421 const int nfrags = skb_shinfo(skb)->nr_frags;
5422 struct skb_shared_info *shinfo;
5423
5424 size = SKB_DATA_ALIGN(size);
5425
5426 if (skb_pfmemalloc(skb))
5427 gfp_mask |= __GFP_MEMALLOC;
5428 data = kmalloc_reserve(size +
5429 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5430 gfp_mask, NUMA_NO_NODE, NULL);
5431 if (!data)
5432 return -ENOMEM;
5433
5434 size = SKB_WITH_OVERHEAD(ksize(data));
5435
5436 memcpy((struct skb_shared_info *)(data + size),
5437 skb_shinfo(skb), offsetof(struct skb_shared_info,
5438 frags[skb_shinfo(skb)->nr_frags]));
5439 if (skb_orphan_frags(skb, gfp_mask)) {
5440 kfree(data);
5441 return -ENOMEM;
5442 }
5443 shinfo = (struct skb_shared_info *)(data + size);
5444 for (i = 0; i < nfrags; i++) {
5445 int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
5446
5447 if (pos + fsize > off) {
5448 shinfo->frags[k] = skb_shinfo(skb)->frags[i];
5449
5450 if (pos < off) {
5451 /* Split frag.
5452 * We have two variants in this case:
5453 * 1. Move all the frag to the second
5454 * part, if it is possible. F.e.
5455 * this approach is mandatory for TUX,
5456 * where splitting is expensive.
5457 * 2. Split is accurately. We make this.
5458 */
5459 shinfo->frags[0].page_offset += off - pos;
5460 skb_frag_size_sub(&shinfo->frags[0], off - pos);
5461 }
5462 skb_frag_ref(skb, i);
5463 k++;
5464 }
5465 pos += fsize;
5466 }
5467 shinfo->nr_frags = k;
5468 if (skb_has_frag_list(skb))
5469 skb_clone_fraglist(skb);
5470
5471 if (k == 0) {
5472 /* split line is in frag list */
5473 pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
5474 }
5475 skb_release_data(skb);
5476
5477 skb->head = data;
5478 skb->head_frag = 0;
5479 skb->data = data;
5480#ifdef NET_SKBUFF_DATA_USES_OFFSET
5481 skb->end = size;
5482#else
5483 skb->end = skb->head + size;
5484#endif
5485 skb_reset_tail_pointer(skb);
5486 skb_headers_offset_update(skb, 0);
5487 skb->cloned = 0;
5488 skb->hdr_len = 0;
5489 skb->nohdr = 0;
5490 skb->len -= off;
5491 skb->data_len = skb->len;
5492 atomic_set(&skb_shinfo(skb)->dataref, 1);
5493 return 0;
5494}
5495
5496/* remove len bytes from the beginning of the skb */
5497static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
5498{
5499 int headlen = skb_headlen(skb);
5500
5501 if (len < headlen)
5502 return pskb_carve_inside_header(skb, len, headlen, gfp);
5503 else
5504 return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
5505}
5506
5507/* Extract to_copy bytes starting at off from skb, and return this in
5508 * a new skb
5509 */
5510struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
5511 int to_copy, gfp_t gfp)
5512{
5513 struct sk_buff *clone = skb_clone(skb, gfp);
5514
5515 if (!clone)
5516 return NULL;
5517
5518 if (pskb_carve(clone, off, gfp) < 0 ||
5519 pskb_trim(clone, to_copy)) {
5520 kfree_skb(clone);
5521 return NULL;
5522 }
5523 return clone;
5524}
5525EXPORT_SYMBOL(pskb_extract);
5526
5527/**
5528 * skb_condense - try to get rid of fragments/frag_list if possible
5529 * @skb: buffer
5530 *
5531 * Can be used to save memory before skb is added to a busy queue.
5532 * If packet has bytes in frags and enough tail room in skb->head,
5533 * pull all of them, so that we can free the frags right now and adjust
5534 * truesize.
5535 * Notes:
5536 * We do not reallocate skb->head thus can not fail.
5537 * Caller must re-evaluate skb->truesize if needed.
5538 */
5539void skb_condense(struct sk_buff *skb)
5540{
5541 if (skb->data_len) {
5542 if (skb->data_len > skb->end - skb->tail ||
5543 skb_cloned(skb))
5544 return;
5545
5546 /* Nice, we can free page frag(s) right now */
5547 __pskb_pull_tail(skb, skb->data_len);
5548 }
5549 /* At this point, skb->truesize might be over estimated,
5550 * because skb had a fragment, and fragments do not tell
5551 * their truesize.
5552 * When we pulled its content into skb->head, fragment
5553 * was freed, but __pskb_pull_tail() could not possibly
5554 * adjust skb->truesize, not knowing the frag truesize.
5555 */
5556 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5557}