Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v3.1
   1/*
   2 *	Routines having to do with the 'struct sk_buff' memory handlers.
   3 *
   4 *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
   5 *			Florian La Roche <rzsfl@rz.uni-sb.de>
   6 *
   7 *	Fixes:
   8 *		Alan Cox	:	Fixed the worst of the load
   9 *					balancer bugs.
  10 *		Dave Platt	:	Interrupt stacking fix.
  11 *	Richard Kooijman	:	Timestamp fixes.
  12 *		Alan Cox	:	Changed buffer format.
  13 *		Alan Cox	:	destructor hook for AF_UNIX etc.
  14 *		Linus Torvalds	:	Better skb_clone.
  15 *		Alan Cox	:	Added skb_copy.
  16 *		Alan Cox	:	Added all the changed routines Linus
  17 *					only put in the headers
  18 *		Ray VanTassle	:	Fixed --skb->lock in free
  19 *		Alan Cox	:	skb_copy copy arp field
  20 *		Andi Kleen	:	slabified it.
  21 *		Robert Olsson	:	Removed skb_head_pool
  22 *
  23 *	NOTE:
  24 *		The __skb_ routines should be called with interrupts
  25 *	disabled, or you better be *real* sure that the operation is atomic
  26 *	with respect to whatever list is being frobbed (e.g. via lock_sock()
  27 *	or via disabling bottom half handlers, etc).
  28 *
  29 *	This program is free software; you can redistribute it and/or
  30 *	modify it under the terms of the GNU General Public License
  31 *	as published by the Free Software Foundation; either version
  32 *	2 of the License, or (at your option) any later version.
  33 */
  34
  35/*
  36 *	The functions in this file will not compile correctly with gcc 2.4.x
  37 */
  38
 
 
  39#include <linux/module.h>
  40#include <linux/types.h>
  41#include <linux/kernel.h>
  42#include <linux/kmemcheck.h>
  43#include <linux/mm.h>
  44#include <linux/interrupt.h>
  45#include <linux/in.h>
  46#include <linux/inet.h>
  47#include <linux/slab.h>
 
 
 
  48#include <linux/netdevice.h>
  49#ifdef CONFIG_NET_CLS_ACT
  50#include <net/pkt_sched.h>
  51#endif
  52#include <linux/string.h>
  53#include <linux/skbuff.h>
  54#include <linux/splice.h>
  55#include <linux/cache.h>
  56#include <linux/rtnetlink.h>
  57#include <linux/init.h>
  58#include <linux/scatterlist.h>
  59#include <linux/errqueue.h>
  60#include <linux/prefetch.h>
 
  61
  62#include <net/protocol.h>
  63#include <net/dst.h>
  64#include <net/sock.h>
  65#include <net/checksum.h>
 
  66#include <net/xfrm.h>
  67
  68#include <asm/uaccess.h>
  69#include <asm/system.h>
  70#include <trace/events/skb.h>
  71
  72#include "kmap_skb.h"
  73
  74static struct kmem_cache *skbuff_head_cache __read_mostly;
  75static struct kmem_cache *skbuff_fclone_cache __read_mostly;
  76
  77static void sock_pipe_buf_release(struct pipe_inode_info *pipe,
  78				  struct pipe_buffer *buf)
  79{
  80	put_page(buf->page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  81}
  82
  83static void sock_pipe_buf_get(struct pipe_inode_info *pipe,
  84				struct pipe_buffer *buf)
  85{
  86	get_page(buf->page);
  87}
  88
  89static int sock_pipe_buf_steal(struct pipe_inode_info *pipe,
  90			       struct pipe_buffer *buf)
  91{
  92	return 1;
  93}
  94
  95
  96/* Pipe buffer operations for a socket. */
  97static const struct pipe_buf_operations sock_pipe_buf_ops = {
  98	.can_merge = 0,
  99	.map = generic_pipe_buf_map,
 100	.unmap = generic_pipe_buf_unmap,
 101	.confirm = generic_pipe_buf_confirm,
 102	.release = sock_pipe_buf_release,
 103	.steal = sock_pipe_buf_steal,
 104	.get = sock_pipe_buf_get,
 105};
 106
 107/*
 108 *	Keep out-of-line to prevent kernel bloat.
 109 *	__builtin_return_address is not used because it is not always
 110 *	reliable.
 
 
 111 */
 
 
 112
 113/**
 114 *	skb_over_panic	- 	private function
 115 *	@skb: buffer
 116 *	@sz: size
 117 *	@here: address
 118 *
 119 *	Out of line support code for skb_put(). Not user callable.
 120 */
 121static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
 122{
 123	printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
 124			  "data:%p tail:%#lx end:%#lx dev:%s\n",
 125	       here, skb->len, sz, skb->head, skb->data,
 126	       (unsigned long)skb->tail, (unsigned long)skb->end,
 127	       skb->dev ? skb->dev->name : "<NULL>");
 128	BUG();
 129}
 130
 131/**
 132 *	skb_under_panic	- 	private function
 133 *	@skb: buffer
 134 *	@sz: size
 135 *	@here: address
 136 *
 137 *	Out of line support code for skb_push(). Not user callable.
 138 */
 
 139
 140static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
 141{
 142	printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
 143			  "data:%p tail:%#lx end:%#lx dev:%s\n",
 144	       here, skb->len, sz, skb->head, skb->data,
 145	       (unsigned long)skb->tail, (unsigned long)skb->end,
 146	       skb->dev ? skb->dev->name : "<NULL>");
 147	BUG();
 
 148}
 149
 150/* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
 151 *	'private' fields and also do memory statistics to find all the
 152 *	[BEEP] leaks.
 153 *
 154 */
 155
 156/**
 157 *	__alloc_skb	-	allocate a network buffer
 158 *	@size: size to allocate
 159 *	@gfp_mask: allocation mask
 160 *	@fclone: allocate from fclone cache instead of head cache
 161 *		and allocate a cloned (child) skb
 
 
 162 *	@node: numa node to allocate memory on
 163 *
 164 *	Allocate a new &sk_buff. The returned buffer has no headroom and a
 165 *	tail room of size bytes. The object has a reference count of one.
 166 *	The return is the buffer. On a failure the return is %NULL.
 167 *
 168 *	Buffers may only be allocated from interrupts using a @gfp_mask of
 169 *	%GFP_ATOMIC.
 170 */
 171struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 172			    int fclone, int node)
 173{
 174	struct kmem_cache *cache;
 175	struct skb_shared_info *shinfo;
 176	struct sk_buff *skb;
 177	u8 *data;
 
 
 
 
 178
 179	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
 
 180
 181	/* Get the HEAD */
 182	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
 183	if (!skb)
 184		goto out;
 185	prefetchw(skb);
 186
 
 
 
 
 
 187	size = SKB_DATA_ALIGN(size);
 188	data = kmalloc_node_track_caller(size + sizeof(struct skb_shared_info),
 189			gfp_mask, node);
 190	if (!data)
 191		goto nodata;
 
 
 
 
 
 192	prefetchw(data + size);
 193
 194	/*
 195	 * Only clear those fields we need to clear, not those that we will
 196	 * actually initialise below. Hence, don't put any more fields after
 197	 * the tail pointer in struct sk_buff!
 198	 */
 199	memset(skb, 0, offsetof(struct sk_buff, tail));
 200	skb->truesize = size + sizeof(struct sk_buff);
 201	atomic_set(&skb->users, 1);
 
 
 202	skb->head = data;
 203	skb->data = data;
 204	skb_reset_tail_pointer(skb);
 205	skb->end = skb->tail + size;
 206#ifdef NET_SKBUFF_DATA_USES_OFFSET
 207	skb->mac_header = ~0U;
 208#endif
 209
 210	/* make sure we initialize shinfo sequentially */
 211	shinfo = skb_shinfo(skb);
 212	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 213	atomic_set(&shinfo->dataref, 1);
 214	kmemcheck_annotate_variable(shinfo->destructor_arg);
 215
 216	if (fclone) {
 217		struct sk_buff *child = skb + 1;
 218		atomic_t *fclone_ref = (atomic_t *) (child + 1);
 
 219
 220		kmemcheck_annotate_bitfield(child, flags1);
 221		kmemcheck_annotate_bitfield(child, flags2);
 222		skb->fclone = SKB_FCLONE_ORIG;
 223		atomic_set(fclone_ref, 1);
 224
 225		child->fclone = SKB_FCLONE_UNAVAILABLE;
 226	}
 227out:
 228	return skb;
 229nodata:
 230	kmem_cache_free(cache, skb);
 231	skb = NULL;
 232	goto out;
 233}
 234EXPORT_SYMBOL(__alloc_skb);
 235
 236/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 237 *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
 238 *	@dev: network device to receive on
 239 *	@length: length to allocate
 240 *	@gfp_mask: get_free_pages mask, passed to alloc_skb
 241 *
 242 *	Allocate a new &sk_buff and assign it a usage count of one. The
 243 *	buffer has unspecified headroom built in. Users should allocate
 244 *	the headroom they think they need without accounting for the
 245 *	built in space. The built in space is used for optimisations.
 246 *
 247 *	%NULL is returned if there is no free memory.
 248 */
 249struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
 250		unsigned int length, gfp_t gfp_mask)
 251{
 
 
 252	struct sk_buff *skb;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 253
 254	skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, NUMA_NO_NODE);
 255	if (likely(skb)) {
 256		skb_reserve(skb, NET_SKB_PAD);
 257		skb->dev = dev;
 
 
 
 
 
 
 
 
 
 258	}
 
 
 
 
 
 
 
 
 
 
 
 259	return skb;
 260}
 261EXPORT_SYMBOL(__netdev_alloc_skb);
 262
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 263void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
 264		int size)
 265{
 266	skb_fill_page_desc(skb, i, page, off, size);
 267	skb->len += size;
 268	skb->data_len += size;
 269	skb->truesize += size;
 270}
 271EXPORT_SYMBOL(skb_add_rx_frag);
 272
 273/**
 274 *	dev_alloc_skb - allocate an skbuff for receiving
 275 *	@length: length to allocate
 276 *
 277 *	Allocate a new &sk_buff and assign it a usage count of one. The
 278 *	buffer has unspecified headroom built in. Users should allocate
 279 *	the headroom they think they need without accounting for the
 280 *	built in space. The built in space is used for optimisations.
 281 *
 282 *	%NULL is returned if there is no free memory. Although this function
 283 *	allocates memory it can be called from an interrupt.
 284 */
 285struct sk_buff *dev_alloc_skb(unsigned int length)
 286{
 287	/*
 288	 * There is more code here than it seems:
 289	 * __dev_alloc_skb is an inline
 290	 */
 291	return __dev_alloc_skb(length, GFP_ATOMIC);
 
 292}
 293EXPORT_SYMBOL(dev_alloc_skb);
 294
 295static void skb_drop_list(struct sk_buff **listp)
 296{
 297	struct sk_buff *list = *listp;
 298
 299	*listp = NULL;
 300
 301	do {
 302		struct sk_buff *this = list;
 303		list = list->next;
 304		kfree_skb(this);
 305	} while (list);
 306}
 307
 308static inline void skb_drop_fraglist(struct sk_buff *skb)
 309{
 310	skb_drop_list(&skb_shinfo(skb)->frag_list);
 311}
 312
 313static void skb_clone_fraglist(struct sk_buff *skb)
 314{
 315	struct sk_buff *list;
 316
 317	skb_walk_frags(skb, list)
 318		skb_get(list);
 319}
 320
 
 
 
 
 
 
 
 
 
 
 321static void skb_release_data(struct sk_buff *skb)
 322{
 323	if (!skb->cloned ||
 324	    !atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
 325			       &skb_shinfo(skb)->dataref)) {
 326		if (skb_shinfo(skb)->nr_frags) {
 327			int i;
 328			for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 329				put_page(skb_shinfo(skb)->frags[i].page);
 330		}
 331
 332		/*
 333		 * If skb buf is from userspace, we need to notify the caller
 334		 * the lower device DMA has done;
 335		 */
 336		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
 337			struct ubuf_info *uarg;
 338
 339			uarg = skb_shinfo(skb)->destructor_arg;
 340			if (uarg->callback)
 341				uarg->callback(uarg);
 342		}
 343
 344		if (skb_has_frag_list(skb))
 345			skb_drop_fraglist(skb);
 346
 347		kfree(skb->head);
 348	}
 349}
 350
 351/*
 352 *	Free an skbuff by memory without cleaning the state.
 353 */
 354static void kfree_skbmem(struct sk_buff *skb)
 355{
 356	struct sk_buff *other;
 357	atomic_t *fclone_ref;
 358
 359	switch (skb->fclone) {
 360	case SKB_FCLONE_UNAVAILABLE:
 361		kmem_cache_free(skbuff_head_cache, skb);
 362		break;
 363
 364	case SKB_FCLONE_ORIG:
 365		fclone_ref = (atomic_t *) (skb + 2);
 366		if (atomic_dec_and_test(fclone_ref))
 367			kmem_cache_free(skbuff_fclone_cache, skb);
 368		break;
 369
 370	case SKB_FCLONE_CLONE:
 371		fclone_ref = (atomic_t *) (skb + 1);
 372		other = skb - 1;
 373
 374		/* The clone portion is available for
 375		 * fast-cloning again.
 
 376		 */
 377		skb->fclone = SKB_FCLONE_UNAVAILABLE;
 
 
 378
 379		if (atomic_dec_and_test(fclone_ref))
 380			kmem_cache_free(skbuff_fclone_cache, other);
 381		break;
 382	}
 
 
 
 
 383}
 384
 385static void skb_release_head_state(struct sk_buff *skb)
 386{
 387	skb_dst_drop(skb);
 388#ifdef CONFIG_XFRM
 389	secpath_put(skb->sp);
 390#endif
 391	if (skb->destructor) {
 392		WARN_ON(in_irq());
 393		skb->destructor(skb);
 394	}
 395#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 396	nf_conntrack_put(skb->nfct);
 397#endif
 398#ifdef NET_SKBUFF_NF_DEFRAG_NEEDED
 399	nf_conntrack_put_reasm(skb->nfct_reasm);
 400#endif
 401#ifdef CONFIG_BRIDGE_NETFILTER
 402	nf_bridge_put(skb->nf_bridge);
 403#endif
 404/* XXX: IS this still necessary? - JHS */
 405#ifdef CONFIG_NET_SCHED
 406	skb->tc_index = 0;
 407#ifdef CONFIG_NET_CLS_ACT
 408	skb->tc_verd = 0;
 409#endif
 410#endif
 411}
 412
 413/* Free everything but the sk_buff shell. */
 414static void skb_release_all(struct sk_buff *skb)
 415{
 416	skb_release_head_state(skb);
 417	skb_release_data(skb);
 
 418}
 419
 420/**
 421 *	__kfree_skb - private function
 422 *	@skb: buffer
 423 *
 424 *	Free an sk_buff. Release anything attached to the buffer.
 425 *	Clean the state. This is an internal helper function. Users should
 426 *	always call kfree_skb
 427 */
 428
 429void __kfree_skb(struct sk_buff *skb)
 430{
 431	skb_release_all(skb);
 432	kfree_skbmem(skb);
 433}
 434EXPORT_SYMBOL(__kfree_skb);
 435
 436/**
 437 *	kfree_skb - free an sk_buff
 438 *	@skb: buffer to free
 439 *
 440 *	Drop a reference to the buffer and free it if the usage count has
 441 *	hit zero.
 442 */
 443void kfree_skb(struct sk_buff *skb)
 444{
 445	if (unlikely(!skb))
 446		return;
 447	if (likely(atomic_read(&skb->users) == 1))
 448		smp_rmb();
 449	else if (likely(!atomic_dec_and_test(&skb->users)))
 450		return;
 
 451	trace_kfree_skb(skb, __builtin_return_address(0));
 452	__kfree_skb(skb);
 453}
 454EXPORT_SYMBOL(kfree_skb);
 455
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 456/**
 457 *	consume_skb - free an skbuff
 458 *	@skb: buffer to free
 459 *
 460 *	Drop a ref to the buffer and free it if the usage count has hit zero
 461 *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
 462 *	is being dropped after a failure and notes that
 463 */
 464void consume_skb(struct sk_buff *skb)
 465{
 466	if (unlikely(!skb))
 467		return;
 468	if (likely(atomic_read(&skb->users) == 1))
 469		smp_rmb();
 470	else if (likely(!atomic_dec_and_test(&skb->users)))
 471		return;
 
 472	trace_consume_skb(skb);
 473	__kfree_skb(skb);
 474}
 475EXPORT_SYMBOL(consume_skb);
 476
 477/**
 478 *	skb_recycle_check - check if skb can be reused for receive
 479 *	@skb: buffer
 480 *	@skb_size: minimum receive buffer size
 481 *
 482 *	Checks that the skb passed in is not shared or cloned, and
 483 *	that it is linear and its head portion at least as large as
 484 *	skb_size so that it can be recycled as a receive buffer.
 485 *	If these conditions are met, this function does any necessary
 486 *	reference count dropping and cleans up the skbuff as if it
 487 *	just came from __alloc_skb().
 488 */
 489bool skb_recycle_check(struct sk_buff *skb, int skb_size)
 490{
 491	struct skb_shared_info *shinfo;
 
 
 
 492
 493	if (irqs_disabled())
 494		return false;
 
 495
 496	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY)
 497		return false;
 
 
 
 
 
 498
 499	if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE)
 500		return false;
 
 501
 502	skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD);
 503	if (skb_end_pointer(skb) - skb->head < skb_size)
 504		return false;
 505
 506	if (skb_shared(skb) || skb_cloned(skb))
 507		return false;
 508
 509	skb_release_head_state(skb);
 
 
 
 510
 511	shinfo = skb_shinfo(skb);
 512	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 513	atomic_set(&shinfo->dataref, 1);
 
 
 
 
 
 
 
 
 514
 515	memset(skb, 0, offsetof(struct sk_buff, tail));
 516	skb->data = skb->head + NET_SKB_PAD;
 517	skb_reset_tail_pointer(skb);
 
 518
 519	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520}
 521EXPORT_SYMBOL(skb_recycle_check);
 
 
 
 
 
 
 
 522
 523static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 524{
 525	new->tstamp		= old->tstamp;
 
 526	new->dev		= old->dev;
 527	new->transport_header	= old->transport_header;
 528	new->network_header	= old->network_header;
 529	new->mac_header		= old->mac_header;
 530	skb_dst_copy(new, old);
 531	new->rxhash		= old->rxhash;
 532#ifdef CONFIG_XFRM
 533	new->sp			= secpath_get(old->sp);
 534#endif
 535	memcpy(new->cb, old->cb, sizeof(old->cb));
 536	new->csum		= old->csum;
 537	new->local_df		= old->local_df;
 538	new->pkt_type		= old->pkt_type;
 539	new->ip_summed		= old->ip_summed;
 540	skb_copy_queue_mapping(new, old);
 541	new->priority		= old->priority;
 542#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
 543	new->ipvs_property	= old->ipvs_property;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 544#endif
 545	new->protocol		= old->protocol;
 546	new->mark		= old->mark;
 547	new->skb_iif		= old->skb_iif;
 548	__nf_copy(new, old);
 549#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
 550    defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
 551	new->nf_trace		= old->nf_trace;
 552#endif
 553#ifdef CONFIG_NET_SCHED
 554	new->tc_index		= old->tc_index;
 555#ifdef CONFIG_NET_CLS_ACT
 556	new->tc_verd		= old->tc_verd;
 557#endif
 
 
 558#endif
 559	new->vlan_tci		= old->vlan_tci;
 560
 561	skb_copy_secmark(new, old);
 562}
 563
 564/*
 565 * You should not add any new code to this function.  Add it to
 566 * __copy_skb_header above instead.
 567 */
 568static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 569{
 570#define C(x) n->x = skb->x
 571
 572	n->next = n->prev = NULL;
 573	n->sk = NULL;
 574	__copy_skb_header(n, skb);
 575
 576	C(len);
 577	C(data_len);
 578	C(mac_len);
 579	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
 580	n->cloned = 1;
 581	n->nohdr = 0;
 
 582	n->destructor = NULL;
 583	C(tail);
 584	C(end);
 585	C(head);
 
 586	C(data);
 587	C(truesize);
 588	atomic_set(&n->users, 1);
 589
 590	atomic_inc(&(skb_shinfo(skb)->dataref));
 591	skb->cloned = 1;
 592
 593	return n;
 594#undef C
 595}
 596
 597/**
 598 *	skb_morph	-	morph one skb into another
 599 *	@dst: the skb to receive the contents
 600 *	@src: the skb to supply the contents
 601 *
 602 *	This is identical to skb_clone except that the target skb is
 603 *	supplied by the user.
 604 *
 605 *	The target skb is returned upon exit.
 606 */
 607struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
 608{
 609	skb_release_all(dst);
 610	return __skb_clone(dst, src);
 611}
 612EXPORT_SYMBOL_GPL(skb_morph);
 613
 614/*	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 615 *	@skb: the skb to modify
 616 *	@gfp_mask: allocation priority
 617 *
 618 *	This must be called on SKBTX_DEV_ZEROCOPY skb.
 619 *	It will copy all frags into kernel and drop the reference
 620 *	to userspace pages.
 621 *
 622 *	If this function is called from an interrupt gfp_mask() must be
 623 *	%GFP_ATOMIC.
 624 *
 625 *	Returns 0 on success or a negative error code on failure
 626 *	to allocate kernel memory to copy to.
 627 */
 628int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
 629{
 630	int i;
 631	int num_frags = skb_shinfo(skb)->nr_frags;
 632	struct page *page, *head = NULL;
 633	struct ubuf_info *uarg = skb_shinfo(skb)->destructor_arg;
 
 634
 635	for (i = 0; i < num_frags; i++) {
 636		u8 *vaddr;
 637		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
 
 
 638
 639		page = alloc_page(GFP_ATOMIC);
 
 
 640		if (!page) {
 641			while (head) {
 642				struct page *next = (struct page *)head->private;
 643				put_page(head);
 644				head = next;
 645			}
 646			return -ENOMEM;
 647		}
 648		vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
 649		memcpy(page_address(page),
 650		       vaddr + f->page_offset, f->size);
 651		kunmap_skb_frag(vaddr);
 652		page->private = (unsigned long)head;
 653		head = page;
 654	}
 655
 656	/* skb frags release userspace buffers */
 657	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 658		put_page(skb_shinfo(skb)->frags[i].page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 659
 660	uarg->callback(uarg);
 
 
 661
 662	/* skb frags point to kernel buffers */
 663	for (i = skb_shinfo(skb)->nr_frags; i > 0; i--) {
 664		skb_shinfo(skb)->frags[i - 1].page_offset = 0;
 665		skb_shinfo(skb)->frags[i - 1].page = head;
 666		head = (struct page *)head->private;
 667	}
 
 
 668
 669	skb_shinfo(skb)->tx_flags &= ~SKBTX_DEV_ZEROCOPY;
 
 670	return 0;
 671}
 672
 673
 674/**
 675 *	skb_clone	-	duplicate an sk_buff
 676 *	@skb: buffer to clone
 677 *	@gfp_mask: allocation priority
 678 *
 679 *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
 680 *	copies share the same packet data but not structure. The new
 681 *	buffer has a reference count of 1. If the allocation fails the
 682 *	function returns %NULL otherwise the new buffer is returned.
 683 *
 684 *	If this function is called from an interrupt gfp_mask() must be
 685 *	%GFP_ATOMIC.
 686 */
 687
 688struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
 689{
 
 
 
 690	struct sk_buff *n;
 691
 692	if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
 693		if (skb_copy_ubufs(skb, gfp_mask))
 694			return NULL;
 695	}
 696
 697	n = skb + 1;
 698	if (skb->fclone == SKB_FCLONE_ORIG &&
 699	    n->fclone == SKB_FCLONE_UNAVAILABLE) {
 700		atomic_t *fclone_ref = (atomic_t *) (n + 1);
 701		n->fclone = SKB_FCLONE_CLONE;
 702		atomic_inc(fclone_ref);
 703	} else {
 
 
 
 704		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
 705		if (!n)
 706			return NULL;
 707
 708		kmemcheck_annotate_bitfield(n, flags1);
 709		kmemcheck_annotate_bitfield(n, flags2);
 710		n->fclone = SKB_FCLONE_UNAVAILABLE;
 711	}
 712
 713	return __skb_clone(n, skb);
 714}
 715EXPORT_SYMBOL(skb_clone);
 716
 717static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 718{
 719#ifndef NET_SKBUFF_DATA_USES_OFFSET
 720	/*
 721	 *	Shift between the two data areas in bytes
 722	 */
 723	unsigned long offset = new->data - old->data;
 724#endif
 
 
 
 
 
 
 725
 
 
 726	__copy_skb_header(new, old);
 727
 728#ifndef NET_SKBUFF_DATA_USES_OFFSET
 729	/* {transport,network,mac}_header are relative to skb->head */
 730	new->transport_header += offset;
 731	new->network_header   += offset;
 732	if (skb_mac_header_was_set(new))
 733		new->mac_header	      += offset;
 734#endif
 735	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
 736	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
 737	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
 738}
 739
 
 
 
 
 
 
 
 740/**
 741 *	skb_copy	-	create private copy of an sk_buff
 742 *	@skb: buffer to copy
 743 *	@gfp_mask: allocation priority
 744 *
 745 *	Make a copy of both an &sk_buff and its data. This is used when the
 746 *	caller wishes to modify the data and needs a private copy of the
 747 *	data to alter. Returns %NULL on failure or the pointer to the buffer
 748 *	on success. The returned buffer has a reference count of 1.
 749 *
 750 *	As by-product this function converts non-linear &sk_buff to linear
 751 *	one, so that &sk_buff becomes completely private and caller is allowed
 752 *	to modify all the data of returned buffer. This means that this
 753 *	function is not recommended for use in circumstances when only
 754 *	header is going to be modified. Use pskb_copy() instead.
 755 */
 756
 757struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
 758{
 759	int headerlen = skb_headroom(skb);
 760	unsigned int size = (skb_end_pointer(skb) - skb->head) + skb->data_len;
 761	struct sk_buff *n = alloc_skb(size, gfp_mask);
 
 762
 763	if (!n)
 764		return NULL;
 765
 766	/* Set the data pointer */
 767	skb_reserve(n, headerlen);
 768	/* Set the tail pointer and length */
 769	skb_put(n, skb->len);
 770
 771	if (skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len))
 772		BUG();
 773
 774	copy_skb_header(n, skb);
 775	return n;
 776}
 777EXPORT_SYMBOL(skb_copy);
 778
 779/**
 780 *	pskb_copy	-	create copy of an sk_buff with private head.
 781 *	@skb: buffer to copy
 
 782 *	@gfp_mask: allocation priority
 
 
 
 783 *
 784 *	Make a copy of both an &sk_buff and part of its data, located
 785 *	in header. Fragmented data remain shared. This is used when
 786 *	the caller wishes to modify only header of &sk_buff and needs
 787 *	private copy of the header to alter. Returns %NULL on failure
 788 *	or the pointer to the buffer on success.
 789 *	The returned buffer has a reference count of 1.
 790 */
 791
 792struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask)
 
 793{
 794	unsigned int size = skb_end_pointer(skb) - skb->head;
 795	struct sk_buff *n = alloc_skb(size, gfp_mask);
 
 796
 797	if (!n)
 798		goto out;
 799
 800	/* Set the data pointer */
 801	skb_reserve(n, skb_headroom(skb));
 802	/* Set the tail pointer and length */
 803	skb_put(n, skb_headlen(skb));
 804	/* Copy the bytes */
 805	skb_copy_from_linear_data(skb, n->data, n->len);
 806
 807	n->truesize += skb->data_len;
 808	n->data_len  = skb->data_len;
 809	n->len	     = skb->len;
 810
 811	if (skb_shinfo(skb)->nr_frags) {
 812		int i;
 813
 814		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
 815			if (skb_copy_ubufs(skb, gfp_mask)) {
 816				kfree_skb(n);
 817				n = NULL;
 818				goto out;
 819			}
 820		}
 821		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
 822			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
 823			get_page(skb_shinfo(n)->frags[i].page);
 824		}
 825		skb_shinfo(n)->nr_frags = i;
 826	}
 827
 828	if (skb_has_frag_list(skb)) {
 829		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
 830		skb_clone_fraglist(n);
 831	}
 832
 833	copy_skb_header(n, skb);
 834out:
 835	return n;
 836}
 837EXPORT_SYMBOL(pskb_copy);
 838
 839/**
 840 *	pskb_expand_head - reallocate header of &sk_buff
 841 *	@skb: buffer to reallocate
 842 *	@nhead: room to add at head
 843 *	@ntail: room to add at tail
 844 *	@gfp_mask: allocation priority
 845 *
 846 *	Expands (or creates identical copy, if &nhead and &ntail are zero)
 847 *	header of skb. &sk_buff itself is not changed. &sk_buff MUST have
 848 *	reference count of 1. Returns zero in the case of success or error,
 849 *	if expansion failed. In the last case, &sk_buff is not changed.
 850 *
 851 *	All the pointers pointing into skb header may change and must be
 852 *	reloaded after call to this function.
 853 */
 854
 855int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
 856		     gfp_t gfp_mask)
 857{
 858	int i;
 859	u8 *data;
 860	int size = nhead + (skb_end_pointer(skb) - skb->head) + ntail;
 861	long off;
 862	bool fastpath;
 863
 864	BUG_ON(nhead < 0);
 865
 866	if (skb_shared(skb))
 867		BUG();
 868
 869	size = SKB_DATA_ALIGN(size);
 870
 871	/* Check if we can avoid taking references on fragments if we own
 872	 * the last reference on skb->head. (see skb_release_data())
 873	 */
 874	if (!skb->cloned)
 875		fastpath = true;
 876	else {
 877		int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
 878		fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
 879	}
 880
 881	if (fastpath &&
 882	    size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
 883		memmove(skb->head + size, skb_shinfo(skb),
 884			offsetof(struct skb_shared_info,
 885				 frags[skb_shinfo(skb)->nr_frags]));
 886		memmove(skb->head + nhead, skb->head,
 887			skb_tail_pointer(skb) - skb->head);
 888		off = nhead;
 889		goto adjust_others;
 890	}
 891
 892	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
 893	if (!data)
 894		goto nodata;
 
 895
 896	/* Copy only real data... and, alas, header. This should be
 897	 * optimized for the cases when header is void.
 898	 */
 899	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
 900
 901	memcpy((struct skb_shared_info *)(data + size),
 902	       skb_shinfo(skb),
 903	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
 904
 905	if (fastpath) {
 906		kfree(skb->head);
 907	} else {
 908		/* copy this zero copy skb frags */
 909		if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
 910			if (skb_copy_ubufs(skb, gfp_mask))
 911				goto nofrags;
 912		}
 
 
 913		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
 914			get_page(skb_shinfo(skb)->frags[i].page);
 915
 916		if (skb_has_frag_list(skb))
 917			skb_clone_fraglist(skb);
 918
 919		skb_release_data(skb);
 
 
 920	}
 921	off = (data + nhead) - skb->head;
 922
 923	skb->head     = data;
 924adjust_others:
 925	skb->data    += off;
 926#ifdef NET_SKBUFF_DATA_USES_OFFSET
 927	skb->end      = size;
 928	off           = nhead;
 929#else
 930	skb->end      = skb->head + size;
 931#endif
 932	/* {transport,network,mac}_header and tail are relative to skb->head */
 933	skb->tail	      += off;
 934	skb->transport_header += off;
 935	skb->network_header   += off;
 936	if (skb_mac_header_was_set(skb))
 937		skb->mac_header += off;
 938	/* Only adjust this if it actually is csum_start rather than csum */
 939	if (skb->ip_summed == CHECKSUM_PARTIAL)
 940		skb->csum_start += nhead;
 941	skb->cloned   = 0;
 942	skb->hdr_len  = 0;
 943	skb->nohdr    = 0;
 944	atomic_set(&skb_shinfo(skb)->dataref, 1);
 
 
 
 
 
 
 
 
 
 
 945	return 0;
 946
 947nofrags:
 948	kfree(data);
 949nodata:
 950	return -ENOMEM;
 951}
 952EXPORT_SYMBOL(pskb_expand_head);
 953
 954/* Make private copy of skb with writable head and some headroom */
 955
 956struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
 957{
 958	struct sk_buff *skb2;
 959	int delta = headroom - skb_headroom(skb);
 960
 961	if (delta <= 0)
 962		skb2 = pskb_copy(skb, GFP_ATOMIC);
 963	else {
 964		skb2 = skb_clone(skb, GFP_ATOMIC);
 965		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
 966					     GFP_ATOMIC)) {
 967			kfree_skb(skb2);
 968			skb2 = NULL;
 969		}
 970	}
 971	return skb2;
 972}
 973EXPORT_SYMBOL(skb_realloc_headroom);
 974
 975/**
 976 *	skb_copy_expand	-	copy and expand sk_buff
 977 *	@skb: buffer to copy
 978 *	@newheadroom: new free bytes at head
 979 *	@newtailroom: new free bytes at tail
 980 *	@gfp_mask: allocation priority
 981 *
 982 *	Make a copy of both an &sk_buff and its data and while doing so
 983 *	allocate additional space.
 984 *
 985 *	This is used when the caller wishes to modify the data and needs a
 986 *	private copy of the data to alter as well as more space for new fields.
 987 *	Returns %NULL on failure or the pointer to the buffer
 988 *	on success. The returned buffer has a reference count of 1.
 989 *
 990 *	You must pass %GFP_ATOMIC as the allocation priority if this function
 991 *	is called from an interrupt.
 992 */
 993struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
 994				int newheadroom, int newtailroom,
 995				gfp_t gfp_mask)
 996{
 997	/*
 998	 *	Allocate the copy buffer
 999	 */
1000	struct sk_buff *n = alloc_skb(newheadroom + skb->len + newtailroom,
1001				      gfp_mask);
 
1002	int oldheadroom = skb_headroom(skb);
1003	int head_copy_len, head_copy_off;
1004	int off;
1005
1006	if (!n)
1007		return NULL;
1008
1009	skb_reserve(n, newheadroom);
1010
1011	/* Set the tail pointer and length */
1012	skb_put(n, skb->len);
1013
1014	head_copy_len = oldheadroom;
1015	head_copy_off = 0;
1016	if (newheadroom <= head_copy_len)
1017		head_copy_len = newheadroom;
1018	else
1019		head_copy_off = newheadroom - head_copy_len;
1020
1021	/* Copy the linear header and data. */
1022	if (skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1023			  skb->len + head_copy_len))
1024		BUG();
1025
1026	copy_skb_header(n, skb);
1027
1028	off                  = newheadroom - oldheadroom;
1029	if (n->ip_summed == CHECKSUM_PARTIAL)
1030		n->csum_start += off;
1031#ifdef NET_SKBUFF_DATA_USES_OFFSET
1032	n->transport_header += off;
1033	n->network_header   += off;
1034	if (skb_mac_header_was_set(skb))
1035		n->mac_header += off;
1036#endif
1037
1038	return n;
1039}
1040EXPORT_SYMBOL(skb_copy_expand);
1041
1042/**
1043 *	skb_pad			-	zero pad the tail of an skb
1044 *	@skb: buffer to pad
1045 *	@pad: space to pad
 
1046 *
1047 *	Ensure that a buffer is followed by a padding area that is zero
1048 *	filled. Used by network drivers which may DMA or transfer data
1049 *	beyond the buffer end onto the wire.
1050 *
1051 *	May return error in out of memory cases. The skb is freed on error.
 
1052 */
1053
1054int skb_pad(struct sk_buff *skb, int pad)
1055{
1056	int err;
1057	int ntail;
1058
1059	/* If the skbuff is non linear tailroom is always zero.. */
1060	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1061		memset(skb->data+skb->len, 0, pad);
1062		return 0;
1063	}
1064
1065	ntail = skb->data_len + pad - (skb->end - skb->tail);
1066	if (likely(skb_cloned(skb) || ntail > 0)) {
1067		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1068		if (unlikely(err))
1069			goto free_skb;
1070	}
1071
1072	/* FIXME: The use of this function with non-linear skb's really needs
1073	 * to be audited.
1074	 */
1075	err = skb_linearize(skb);
1076	if (unlikely(err))
1077		goto free_skb;
1078
1079	memset(skb->data + skb->len, 0, pad);
1080	return 0;
1081
1082free_skb:
1083	kfree_skb(skb);
 
1084	return err;
1085}
1086EXPORT_SYMBOL(skb_pad);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1087
1088/**
1089 *	skb_put - add data to a buffer
1090 *	@skb: buffer to use
1091 *	@len: amount of data to add
1092 *
1093 *	This function extends the used data area of the buffer. If this would
1094 *	exceed the total buffer size the kernel will panic. A pointer to the
1095 *	first byte of the extra data is returned.
1096 */
1097unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
1098{
1099	unsigned char *tmp = skb_tail_pointer(skb);
1100	SKB_LINEAR_ASSERT(skb);
1101	skb->tail += len;
1102	skb->len  += len;
1103	if (unlikely(skb->tail > skb->end))
1104		skb_over_panic(skb, len, __builtin_return_address(0));
1105	return tmp;
1106}
1107EXPORT_SYMBOL(skb_put);
1108
1109/**
1110 *	skb_push - add data to the start of a buffer
1111 *	@skb: buffer to use
1112 *	@len: amount of data to add
1113 *
1114 *	This function extends the used data area of the buffer at the buffer
1115 *	start. If this would exceed the total buffer headroom the kernel will
1116 *	panic. A pointer to the first byte of the extra data is returned.
1117 */
1118unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
1119{
1120	skb->data -= len;
1121	skb->len  += len;
1122	if (unlikely(skb->data<skb->head))
1123		skb_under_panic(skb, len, __builtin_return_address(0));
1124	return skb->data;
1125}
1126EXPORT_SYMBOL(skb_push);
1127
1128/**
1129 *	skb_pull - remove data from the start of a buffer
1130 *	@skb: buffer to use
1131 *	@len: amount of data to remove
1132 *
1133 *	This function removes data from the start of a buffer, returning
1134 *	the memory to the headroom. A pointer to the next data in the buffer
1135 *	is returned. Once the data has been pulled future pushes will overwrite
1136 *	the old data.
1137 */
1138unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
1139{
1140	return skb_pull_inline(skb, len);
1141}
1142EXPORT_SYMBOL(skb_pull);
1143
1144/**
1145 *	skb_trim - remove end from a buffer
1146 *	@skb: buffer to alter
1147 *	@len: new length
1148 *
1149 *	Cut the length of a buffer down by removing data from the tail. If
1150 *	the buffer is already under the length specified it is not modified.
1151 *	The skb must be linear.
1152 */
1153void skb_trim(struct sk_buff *skb, unsigned int len)
1154{
1155	if (skb->len > len)
1156		__skb_trim(skb, len);
1157}
1158EXPORT_SYMBOL(skb_trim);
1159
1160/* Trims skb to length len. It can change skb pointers.
1161 */
1162
1163int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1164{
1165	struct sk_buff **fragp;
1166	struct sk_buff *frag;
1167	int offset = skb_headlen(skb);
1168	int nfrags = skb_shinfo(skb)->nr_frags;
1169	int i;
1170	int err;
1171
1172	if (skb_cloned(skb) &&
1173	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1174		return err;
1175
1176	i = 0;
1177	if (offset >= len)
1178		goto drop_pages;
1179
1180	for (; i < nfrags; i++) {
1181		int end = offset + skb_shinfo(skb)->frags[i].size;
1182
1183		if (end < len) {
1184			offset = end;
1185			continue;
1186		}
1187
1188		skb_shinfo(skb)->frags[i++].size = len - offset;
1189
1190drop_pages:
1191		skb_shinfo(skb)->nr_frags = i;
1192
1193		for (; i < nfrags; i++)
1194			put_page(skb_shinfo(skb)->frags[i].page);
1195
1196		if (skb_has_frag_list(skb))
1197			skb_drop_fraglist(skb);
1198		goto done;
1199	}
1200
1201	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1202	     fragp = &frag->next) {
1203		int end = offset + frag->len;
1204
1205		if (skb_shared(frag)) {
1206			struct sk_buff *nfrag;
1207
1208			nfrag = skb_clone(frag, GFP_ATOMIC);
1209			if (unlikely(!nfrag))
1210				return -ENOMEM;
1211
1212			nfrag->next = frag->next;
1213			kfree_skb(frag);
1214			frag = nfrag;
1215			*fragp = frag;
1216		}
1217
1218		if (end < len) {
1219			offset = end;
1220			continue;
1221		}
1222
1223		if (end > len &&
1224		    unlikely((err = pskb_trim(frag, len - offset))))
1225			return err;
1226
1227		if (frag->next)
1228			skb_drop_list(&frag->next);
1229		break;
1230	}
1231
1232done:
1233	if (len > skb_headlen(skb)) {
1234		skb->data_len -= skb->len - len;
1235		skb->len       = len;
1236	} else {
1237		skb->len       = len;
1238		skb->data_len  = 0;
1239		skb_set_tail_pointer(skb, len);
1240	}
1241
 
 
1242	return 0;
1243}
1244EXPORT_SYMBOL(___pskb_trim);
1245
1246/**
1247 *	__pskb_pull_tail - advance tail of skb header
1248 *	@skb: buffer to reallocate
1249 *	@delta: number of bytes to advance tail
1250 *
1251 *	The function makes a sense only on a fragmented &sk_buff,
1252 *	it expands header moving its tail forward and copying necessary
1253 *	data from fragmented part.
1254 *
1255 *	&sk_buff MUST have reference count of 1.
1256 *
1257 *	Returns %NULL (and &sk_buff does not change) if pull failed
1258 *	or value of new tail of skb in the case of success.
1259 *
1260 *	All the pointers pointing into skb header may change and must be
1261 *	reloaded after call to this function.
1262 */
1263
1264/* Moves tail of skb head forward, copying data from fragmented part,
1265 * when it is necessary.
1266 * 1. It may fail due to malloc failure.
1267 * 2. It may change skb pointers.
1268 *
1269 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1270 */
1271unsigned char *__pskb_pull_tail(struct sk_buff *skb, int delta)
1272{
1273	/* If skb has not enough free space at tail, get new one
1274	 * plus 128 bytes for future expansions. If we have enough
1275	 * room at tail, reallocate without expansion only if skb is cloned.
1276	 */
1277	int i, k, eat = (skb->tail + delta) - skb->end;
1278
1279	if (eat > 0 || skb_cloned(skb)) {
1280		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1281				     GFP_ATOMIC))
1282			return NULL;
1283	}
1284
1285	if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta))
1286		BUG();
1287
1288	/* Optimization: no fragments, no reasons to preestimate
1289	 * size of pulled pages. Superb.
1290	 */
1291	if (!skb_has_frag_list(skb))
1292		goto pull_pages;
1293
1294	/* Estimate size of pulled pages. */
1295	eat = delta;
1296	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1297		if (skb_shinfo(skb)->frags[i].size >= eat)
 
 
1298			goto pull_pages;
1299		eat -= skb_shinfo(skb)->frags[i].size;
1300	}
1301
1302	/* If we need update frag list, we are in troubles.
1303	 * Certainly, it possible to add an offset to skb data,
1304	 * but taking into account that pulling is expected to
1305	 * be very rare operation, it is worth to fight against
1306	 * further bloating skb head and crucify ourselves here instead.
1307	 * Pure masohism, indeed. 8)8)
1308	 */
1309	if (eat) {
1310		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1311		struct sk_buff *clone = NULL;
1312		struct sk_buff *insp = NULL;
1313
1314		do {
1315			BUG_ON(!list);
1316
1317			if (list->len <= eat) {
1318				/* Eaten as whole. */
1319				eat -= list->len;
1320				list = list->next;
1321				insp = list;
1322			} else {
1323				/* Eaten partially. */
1324
1325				if (skb_shared(list)) {
1326					/* Sucks! We need to fork list. :-( */
1327					clone = skb_clone(list, GFP_ATOMIC);
1328					if (!clone)
1329						return NULL;
1330					insp = list->next;
1331					list = clone;
1332				} else {
1333					/* This may be pulled without
1334					 * problems. */
1335					insp = list;
1336				}
1337				if (!pskb_pull(list, eat)) {
1338					kfree_skb(clone);
1339					return NULL;
1340				}
1341				break;
1342			}
1343		} while (eat);
1344
1345		/* Free pulled out fragments. */
1346		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1347			skb_shinfo(skb)->frag_list = list->next;
1348			kfree_skb(list);
1349		}
1350		/* And insert new clone at head. */
1351		if (clone) {
1352			clone->next = list;
1353			skb_shinfo(skb)->frag_list = clone;
1354		}
1355	}
1356	/* Success! Now we may commit changes to skb data. */
1357
1358pull_pages:
1359	eat = delta;
1360	k = 0;
1361	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1362		if (skb_shinfo(skb)->frags[i].size <= eat) {
1363			put_page(skb_shinfo(skb)->frags[i].page);
1364			eat -= skb_shinfo(skb)->frags[i].size;
 
 
1365		} else {
1366			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1367			if (eat) {
1368				skb_shinfo(skb)->frags[k].page_offset += eat;
1369				skb_shinfo(skb)->frags[k].size -= eat;
 
 
1370				eat = 0;
1371			}
1372			k++;
1373		}
1374	}
1375	skb_shinfo(skb)->nr_frags = k;
1376
 
1377	skb->tail     += delta;
1378	skb->data_len -= delta;
1379
 
 
 
1380	return skb_tail_pointer(skb);
1381}
1382EXPORT_SYMBOL(__pskb_pull_tail);
1383
1384/**
1385 *	skb_copy_bits - copy bits from skb to kernel buffer
1386 *	@skb: source skb
1387 *	@offset: offset in source
1388 *	@to: destination buffer
1389 *	@len: number of bytes to copy
1390 *
1391 *	Copy the specified number of bytes from the source skb to the
1392 *	destination buffer.
1393 *
1394 *	CAUTION ! :
1395 *		If its prototype is ever changed,
1396 *		check arch/{*}/net/{*}.S files,
1397 *		since it is called from BPF assembly code.
1398 */
1399int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
1400{
1401	int start = skb_headlen(skb);
1402	struct sk_buff *frag_iter;
1403	int i, copy;
1404
1405	if (offset > (int)skb->len - len)
1406		goto fault;
1407
1408	/* Copy header. */
1409	if ((copy = start - offset) > 0) {
1410		if (copy > len)
1411			copy = len;
1412		skb_copy_from_linear_data_offset(skb, offset, to, copy);
1413		if ((len -= copy) == 0)
1414			return 0;
1415		offset += copy;
1416		to     += copy;
1417	}
1418
1419	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1420		int end;
 
1421
1422		WARN_ON(start > offset + len);
1423
1424		end = start + skb_shinfo(skb)->frags[i].size;
1425		if ((copy = end - offset) > 0) {
 
 
1426			u8 *vaddr;
1427
1428			if (copy > len)
1429				copy = len;
1430
1431			vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
1432			memcpy(to,
1433			       vaddr + skb_shinfo(skb)->frags[i].page_offset+
1434			       offset - start, copy);
1435			kunmap_skb_frag(vaddr);
 
 
1436
1437			if ((len -= copy) == 0)
1438				return 0;
1439			offset += copy;
1440			to     += copy;
1441		}
1442		start = end;
1443	}
1444
1445	skb_walk_frags(skb, frag_iter) {
1446		int end;
1447
1448		WARN_ON(start > offset + len);
1449
1450		end = start + frag_iter->len;
1451		if ((copy = end - offset) > 0) {
1452			if (copy > len)
1453				copy = len;
1454			if (skb_copy_bits(frag_iter, offset - start, to, copy))
1455				goto fault;
1456			if ((len -= copy) == 0)
1457				return 0;
1458			offset += copy;
1459			to     += copy;
1460		}
1461		start = end;
1462	}
1463
1464	if (!len)
1465		return 0;
1466
1467fault:
1468	return -EFAULT;
1469}
1470EXPORT_SYMBOL(skb_copy_bits);
1471
1472/*
1473 * Callback from splice_to_pipe(), if we need to release some pages
1474 * at the end of the spd in case we error'ed out in filling the pipe.
1475 */
1476static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
1477{
1478	put_page(spd->pages[i]);
1479}
1480
1481static inline struct page *linear_to_page(struct page *page, unsigned int *len,
1482					  unsigned int *offset,
1483					  struct sk_buff *skb, struct sock *sk)
1484{
1485	struct page *p = sk->sk_sndmsg_page;
1486	unsigned int off;
1487
1488	if (!p) {
1489new_page:
1490		p = sk->sk_sndmsg_page = alloc_pages(sk->sk_allocation, 0);
1491		if (!p)
1492			return NULL;
1493
1494		off = sk->sk_sndmsg_off = 0;
1495		/* hold one ref to this page until it's full */
1496	} else {
1497		unsigned int mlen;
1498
1499		off = sk->sk_sndmsg_off;
1500		mlen = PAGE_SIZE - off;
1501		if (mlen < 64 && mlen < *len) {
1502			put_page(p);
1503			goto new_page;
1504		}
1505
1506		*len = min_t(unsigned int, *len, mlen);
1507	}
 
 
1508
1509	memcpy(page_address(p) + off, page_address(page) + *offset, *len);
1510	sk->sk_sndmsg_off += *len;
1511	*offset = off;
1512	get_page(p);
1513
1514	return p;
 
 
 
 
 
 
 
1515}
1516
1517/*
1518 * Fill page/offset/length into spd, if it can hold more pages.
1519 */
1520static inline int spd_fill_page(struct splice_pipe_desc *spd,
1521				struct pipe_inode_info *pipe, struct page *page,
1522				unsigned int *len, unsigned int offset,
1523				struct sk_buff *skb, int linear,
1524				struct sock *sk)
1525{
1526	if (unlikely(spd->nr_pages == pipe->buffers))
1527		return 1;
1528
1529	if (linear) {
1530		page = linear_to_page(page, len, &offset, skb, sk);
1531		if (!page)
1532			return 1;
1533	} else
1534		get_page(page);
1535
 
 
 
1536	spd->pages[spd->nr_pages] = page;
1537	spd->partial[spd->nr_pages].len = *len;
1538	spd->partial[spd->nr_pages].offset = offset;
1539	spd->nr_pages++;
1540
1541	return 0;
1542}
1543
1544static inline void __segment_seek(struct page **page, unsigned int *poff,
1545				  unsigned int *plen, unsigned int off)
1546{
1547	unsigned long n;
1548
1549	*poff += off;
1550	n = *poff / PAGE_SIZE;
1551	if (n)
1552		*page = nth_page(*page, n);
1553
1554	*poff = *poff % PAGE_SIZE;
1555	*plen -= off;
1556}
1557
1558static inline int __splice_segment(struct page *page, unsigned int poff,
1559				   unsigned int plen, unsigned int *off,
1560				   unsigned int *len, struct sk_buff *skb,
1561				   struct splice_pipe_desc *spd, int linear,
1562				   struct sock *sk,
1563				   struct pipe_inode_info *pipe)
1564{
1565	if (!*len)
1566		return 1;
1567
1568	/* skip this segment if already processed */
1569	if (*off >= plen) {
1570		*off -= plen;
1571		return 0;
1572	}
1573
1574	/* ignore any bits we already processed */
1575	if (*off) {
1576		__segment_seek(&page, &poff, &plen, *off);
1577		*off = 0;
1578	}
1579
1580	do {
1581		unsigned int flen = min(*len, plen);
1582
1583		/* the linear region may spread across several pages  */
1584		flen = min_t(unsigned int, flen, PAGE_SIZE - poff);
1585
1586		if (spd_fill_page(spd, pipe, page, &flen, poff, skb, linear, sk))
1587			return 1;
1588
1589		__segment_seek(&page, &poff, &plen, flen);
1590		*len -= flen;
1591
1592	} while (*len && plen);
1593
1594	return 0;
1595}
1596
1597/*
1598 * Map linear and fragment data from the skb to spd. It reports failure if the
1599 * pipe is full or if we already spliced the requested length.
1600 */
1601static int __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
1602			     unsigned int *offset, unsigned int *len,
1603			     struct splice_pipe_desc *spd, struct sock *sk)
1604{
1605	int seg;
 
1606
1607	/*
1608	 * map the linear part
 
 
1609	 */
1610	if (__splice_segment(virt_to_page(skb->data),
1611			     (unsigned long) skb->data & (PAGE_SIZE - 1),
1612			     skb_headlen(skb),
1613			     offset, len, skb, spd, 1, sk, pipe))
1614		return 1;
 
 
1615
1616	/*
1617	 * then map the fragments
1618	 */
1619	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
1620		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
1621
1622		if (__splice_segment(f->page, f->page_offset, f->size,
1623				     offset, len, skb, spd, 0, sk, pipe))
1624			return 1;
 
1625	}
1626
1627	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
1628}
1629
1630/*
1631 * Map data from the skb to a pipe. Should handle both the linear part,
1632 * the fragments, and the frag list. It does NOT handle frag lists within
1633 * the frag list, if such a thing exists. We'd probably need to recurse to
1634 * handle that cleanly.
1635 */
1636int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
1637		    struct pipe_inode_info *pipe, unsigned int tlen,
1638		    unsigned int flags)
1639{
1640	struct partial_page partial[PIPE_DEF_BUFFERS];
1641	struct page *pages[PIPE_DEF_BUFFERS];
1642	struct splice_pipe_desc spd = {
1643		.pages = pages,
1644		.partial = partial,
1645		.flags = flags,
1646		.ops = &sock_pipe_buf_ops,
1647		.spd_release = sock_spd_release,
1648	};
1649	struct sk_buff *frag_iter;
1650	struct sock *sk = skb->sk;
1651	int ret = 0;
1652
1653	if (splice_grow_spd(pipe, &spd))
1654		return -ENOMEM;
1655
1656	/*
1657	 * __skb_splice_bits() only fails if the output has no room left,
1658	 * so no point in going over the frag_list for the error case.
1659	 */
1660	if (__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk))
1661		goto done;
1662	else if (!tlen)
1663		goto done;
1664
1665	/*
1666	 * now see if we have a frag_list to map
1667	 */
1668	skb_walk_frags(skb, frag_iter) {
1669		if (!tlen)
1670			break;
1671		if (__skb_splice_bits(frag_iter, pipe, &offset, &tlen, &spd, sk))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1672			break;
 
 
1673	}
1674
1675done:
1676	if (spd.nr_pages) {
1677		/*
1678		 * Drop the socket lock, otherwise we have reverse
1679		 * locking dependencies between sk_lock and i_mutex
1680		 * here as compared to sendfile(). We enter here
1681		 * with the socket lock held, and splice_to_pipe() will
1682		 * grab the pipe inode lock. For sendfile() emulation,
1683		 * we call into ->sendpage() with the i_mutex lock held
1684		 * and networking will grab the socket lock.
1685		 */
1686		release_sock(sk);
1687		ret = splice_to_pipe(pipe, &spd);
1688		lock_sock(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1689	}
1690
1691	splice_shrink_spd(pipe, &spd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1692	return ret;
1693}
 
1694
1695/**
1696 *	skb_store_bits - store bits from kernel buffer to skb
1697 *	@skb: destination buffer
1698 *	@offset: offset in destination
1699 *	@from: source buffer
1700 *	@len: number of bytes to copy
1701 *
1702 *	Copy the specified number of bytes from the source buffer to the
1703 *	destination skb.  This function handles all the messy bits of
1704 *	traversing fragment lists and such.
1705 */
1706
1707int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
1708{
1709	int start = skb_headlen(skb);
1710	struct sk_buff *frag_iter;
1711	int i, copy;
1712
1713	if (offset > (int)skb->len - len)
1714		goto fault;
1715
1716	if ((copy = start - offset) > 0) {
1717		if (copy > len)
1718			copy = len;
1719		skb_copy_to_linear_data_offset(skb, offset, from, copy);
1720		if ((len -= copy) == 0)
1721			return 0;
1722		offset += copy;
1723		from += copy;
1724	}
1725
1726	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1727		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1728		int end;
1729
1730		WARN_ON(start > offset + len);
1731
1732		end = start + frag->size;
1733		if ((copy = end - offset) > 0) {
 
 
1734			u8 *vaddr;
1735
1736			if (copy > len)
1737				copy = len;
1738
1739			vaddr = kmap_skb_frag(frag);
1740			memcpy(vaddr + frag->page_offset + offset - start,
1741			       from, copy);
1742			kunmap_skb_frag(vaddr);
 
 
 
1743
1744			if ((len -= copy) == 0)
1745				return 0;
1746			offset += copy;
1747			from += copy;
1748		}
1749		start = end;
1750	}
1751
1752	skb_walk_frags(skb, frag_iter) {
1753		int end;
1754
1755		WARN_ON(start > offset + len);
1756
1757		end = start + frag_iter->len;
1758		if ((copy = end - offset) > 0) {
1759			if (copy > len)
1760				copy = len;
1761			if (skb_store_bits(frag_iter, offset - start,
1762					   from, copy))
1763				goto fault;
1764			if ((len -= copy) == 0)
1765				return 0;
1766			offset += copy;
1767			from += copy;
1768		}
1769		start = end;
1770	}
1771	if (!len)
1772		return 0;
1773
1774fault:
1775	return -EFAULT;
1776}
1777EXPORT_SYMBOL(skb_store_bits);
1778
1779/* Checksum skb data. */
1780
1781__wsum skb_checksum(const struct sk_buff *skb, int offset,
1782			  int len, __wsum csum)
1783{
1784	int start = skb_headlen(skb);
1785	int i, copy = start - offset;
1786	struct sk_buff *frag_iter;
1787	int pos = 0;
1788
1789	/* Checksum header. */
1790	if (copy > 0) {
1791		if (copy > len)
1792			copy = len;
1793		csum = csum_partial(skb->data + offset, copy, csum);
1794		if ((len -= copy) == 0)
1795			return csum;
1796		offset += copy;
1797		pos	= copy;
1798	}
1799
1800	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1801		int end;
 
1802
1803		WARN_ON(start > offset + len);
1804
1805		end = start + skb_shinfo(skb)->frags[i].size;
1806		if ((copy = end - offset) > 0) {
 
 
1807			__wsum csum2;
1808			u8 *vaddr;
1809			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1810
1811			if (copy > len)
1812				copy = len;
1813			vaddr = kmap_skb_frag(frag);
1814			csum2 = csum_partial(vaddr + frag->page_offset +
1815					     offset - start, copy, 0);
1816			kunmap_skb_frag(vaddr);
1817			csum = csum_block_add(csum, csum2, pos);
 
 
 
 
 
 
1818			if (!(len -= copy))
1819				return csum;
1820			offset += copy;
1821			pos    += copy;
1822		}
1823		start = end;
1824	}
1825
1826	skb_walk_frags(skb, frag_iter) {
1827		int end;
1828
1829		WARN_ON(start > offset + len);
1830
1831		end = start + frag_iter->len;
1832		if ((copy = end - offset) > 0) {
1833			__wsum csum2;
1834			if (copy > len)
1835				copy = len;
1836			csum2 = skb_checksum(frag_iter, offset - start,
1837					     copy, 0);
1838			csum = csum_block_add(csum, csum2, pos);
1839			if ((len -= copy) == 0)
1840				return csum;
1841			offset += copy;
1842			pos    += copy;
1843		}
1844		start = end;
1845	}
1846	BUG_ON(len);
1847
1848	return csum;
1849}
 
 
 
 
 
 
 
 
 
 
 
 
1850EXPORT_SYMBOL(skb_checksum);
1851
1852/* Both of above in one bottle. */
1853
1854__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
1855				    u8 *to, int len, __wsum csum)
1856{
1857	int start = skb_headlen(skb);
1858	int i, copy = start - offset;
1859	struct sk_buff *frag_iter;
1860	int pos = 0;
1861
1862	/* Copy header. */
1863	if (copy > 0) {
1864		if (copy > len)
1865			copy = len;
1866		csum = csum_partial_copy_nocheck(skb->data + offset, to,
1867						 copy, csum);
1868		if ((len -= copy) == 0)
1869			return csum;
1870		offset += copy;
1871		to     += copy;
1872		pos	= copy;
1873	}
1874
1875	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1876		int end;
1877
1878		WARN_ON(start > offset + len);
1879
1880		end = start + skb_shinfo(skb)->frags[i].size;
1881		if ((copy = end - offset) > 0) {
 
 
 
1882			__wsum csum2;
1883			u8 *vaddr;
1884			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1885
1886			if (copy > len)
1887				copy = len;
1888			vaddr = kmap_skb_frag(frag);
1889			csum2 = csum_partial_copy_nocheck(vaddr +
1890							  frag->page_offset +
1891							  offset - start, to,
1892							  copy, 0);
1893			kunmap_skb_frag(vaddr);
1894			csum = csum_block_add(csum, csum2, pos);
 
 
 
 
 
 
1895			if (!(len -= copy))
1896				return csum;
1897			offset += copy;
1898			to     += copy;
1899			pos    += copy;
1900		}
1901		start = end;
1902	}
1903
1904	skb_walk_frags(skb, frag_iter) {
1905		__wsum csum2;
1906		int end;
1907
1908		WARN_ON(start > offset + len);
1909
1910		end = start + frag_iter->len;
1911		if ((copy = end - offset) > 0) {
1912			if (copy > len)
1913				copy = len;
1914			csum2 = skb_copy_and_csum_bits(frag_iter,
1915						       offset - start,
1916						       to, copy, 0);
1917			csum = csum_block_add(csum, csum2, pos);
1918			if ((len -= copy) == 0)
1919				return csum;
1920			offset += copy;
1921			to     += copy;
1922			pos    += copy;
1923		}
1924		start = end;
1925	}
1926	BUG_ON(len);
1927	return csum;
1928}
1929EXPORT_SYMBOL(skb_copy_and_csum_bits);
1930
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1931void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
1932{
1933	__wsum csum;
1934	long csstart;
1935
1936	if (skb->ip_summed == CHECKSUM_PARTIAL)
1937		csstart = skb_checksum_start_offset(skb);
1938	else
1939		csstart = skb_headlen(skb);
1940
1941	BUG_ON(csstart > skb_headlen(skb));
1942
1943	skb_copy_from_linear_data(skb, to, csstart);
1944
1945	csum = 0;
1946	if (csstart != skb->len)
1947		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
1948					      skb->len - csstart, 0);
1949
1950	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1951		long csstuff = csstart + skb->csum_offset;
1952
1953		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
1954	}
1955}
1956EXPORT_SYMBOL(skb_copy_and_csum_dev);
1957
1958/**
1959 *	skb_dequeue - remove from the head of the queue
1960 *	@list: list to dequeue from
1961 *
1962 *	Remove the head of the list. The list lock is taken so the function
1963 *	may be used safely with other locking list functions. The head item is
1964 *	returned or %NULL if the list is empty.
1965 */
1966
1967struct sk_buff *skb_dequeue(struct sk_buff_head *list)
1968{
1969	unsigned long flags;
1970	struct sk_buff *result;
1971
1972	spin_lock_irqsave(&list->lock, flags);
1973	result = __skb_dequeue(list);
1974	spin_unlock_irqrestore(&list->lock, flags);
1975	return result;
1976}
1977EXPORT_SYMBOL(skb_dequeue);
1978
1979/**
1980 *	skb_dequeue_tail - remove from the tail of the queue
1981 *	@list: list to dequeue from
1982 *
1983 *	Remove the tail of the list. The list lock is taken so the function
1984 *	may be used safely with other locking list functions. The tail item is
1985 *	returned or %NULL if the list is empty.
1986 */
1987struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
1988{
1989	unsigned long flags;
1990	struct sk_buff *result;
1991
1992	spin_lock_irqsave(&list->lock, flags);
1993	result = __skb_dequeue_tail(list);
1994	spin_unlock_irqrestore(&list->lock, flags);
1995	return result;
1996}
1997EXPORT_SYMBOL(skb_dequeue_tail);
1998
1999/**
2000 *	skb_queue_purge - empty a list
2001 *	@list: list to empty
2002 *
2003 *	Delete all buffers on an &sk_buff list. Each buffer is removed from
2004 *	the list and one reference dropped. This function takes the list
2005 *	lock and is atomic with respect to other list locking functions.
2006 */
2007void skb_queue_purge(struct sk_buff_head *list)
2008{
2009	struct sk_buff *skb;
2010	while ((skb = skb_dequeue(list)) != NULL)
2011		kfree_skb(skb);
2012}
2013EXPORT_SYMBOL(skb_queue_purge);
2014
2015/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2016 *	skb_queue_head - queue a buffer at the list head
2017 *	@list: list to use
2018 *	@newsk: buffer to queue
2019 *
2020 *	Queue a buffer at the start of the list. This function takes the
2021 *	list lock and can be used safely with other locking &sk_buff functions
2022 *	safely.
2023 *
2024 *	A buffer cannot be placed on two lists at the same time.
2025 */
2026void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2027{
2028	unsigned long flags;
2029
2030	spin_lock_irqsave(&list->lock, flags);
2031	__skb_queue_head(list, newsk);
2032	spin_unlock_irqrestore(&list->lock, flags);
2033}
2034EXPORT_SYMBOL(skb_queue_head);
2035
2036/**
2037 *	skb_queue_tail - queue a buffer at the list tail
2038 *	@list: list to use
2039 *	@newsk: buffer to queue
2040 *
2041 *	Queue a buffer at the tail of the list. This function takes the
2042 *	list lock and can be used safely with other locking &sk_buff functions
2043 *	safely.
2044 *
2045 *	A buffer cannot be placed on two lists at the same time.
2046 */
2047void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2048{
2049	unsigned long flags;
2050
2051	spin_lock_irqsave(&list->lock, flags);
2052	__skb_queue_tail(list, newsk);
2053	spin_unlock_irqrestore(&list->lock, flags);
2054}
2055EXPORT_SYMBOL(skb_queue_tail);
2056
2057/**
2058 *	skb_unlink	-	remove a buffer from a list
2059 *	@skb: buffer to remove
2060 *	@list: list to use
2061 *
2062 *	Remove a packet from a list. The list locks are taken and this
2063 *	function is atomic with respect to other list locked calls
2064 *
2065 *	You must know what list the SKB is on.
2066 */
2067void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2068{
2069	unsigned long flags;
2070
2071	spin_lock_irqsave(&list->lock, flags);
2072	__skb_unlink(skb, list);
2073	spin_unlock_irqrestore(&list->lock, flags);
2074}
2075EXPORT_SYMBOL(skb_unlink);
2076
2077/**
2078 *	skb_append	-	append a buffer
2079 *	@old: buffer to insert after
2080 *	@newsk: buffer to insert
2081 *	@list: list to use
2082 *
2083 *	Place a packet after a given packet in a list. The list locks are taken
2084 *	and this function is atomic with respect to other list locked calls.
2085 *	A buffer cannot be placed on two lists at the same time.
2086 */
2087void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2088{
2089	unsigned long flags;
2090
2091	spin_lock_irqsave(&list->lock, flags);
2092	__skb_queue_after(list, old, newsk);
2093	spin_unlock_irqrestore(&list->lock, flags);
2094}
2095EXPORT_SYMBOL(skb_append);
2096
2097/**
2098 *	skb_insert	-	insert a buffer
2099 *	@old: buffer to insert before
2100 *	@newsk: buffer to insert
2101 *	@list: list to use
2102 *
2103 *	Place a packet before a given packet in a list. The list locks are
2104 * 	taken and this function is atomic with respect to other list locked
2105 *	calls.
2106 *
2107 *	A buffer cannot be placed on two lists at the same time.
2108 */
2109void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2110{
2111	unsigned long flags;
2112
2113	spin_lock_irqsave(&list->lock, flags);
2114	__skb_insert(newsk, old->prev, old, list);
2115	spin_unlock_irqrestore(&list->lock, flags);
2116}
2117EXPORT_SYMBOL(skb_insert);
2118
2119static inline void skb_split_inside_header(struct sk_buff *skb,
2120					   struct sk_buff* skb1,
2121					   const u32 len, const int pos)
2122{
2123	int i;
2124
2125	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2126					 pos - len);
2127	/* And move data appendix as is. */
2128	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2129		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2130
2131	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2132	skb_shinfo(skb)->nr_frags  = 0;
2133	skb1->data_len		   = skb->data_len;
2134	skb1->len		   += skb1->data_len;
2135	skb->data_len		   = 0;
2136	skb->len		   = len;
2137	skb_set_tail_pointer(skb, len);
2138}
2139
2140static inline void skb_split_no_header(struct sk_buff *skb,
2141				       struct sk_buff* skb1,
2142				       const u32 len, int pos)
2143{
2144	int i, k = 0;
2145	const int nfrags = skb_shinfo(skb)->nr_frags;
2146
2147	skb_shinfo(skb)->nr_frags = 0;
2148	skb1->len		  = skb1->data_len = skb->len - len;
2149	skb->len		  = len;
2150	skb->data_len		  = len - pos;
2151
2152	for (i = 0; i < nfrags; i++) {
2153		int size = skb_shinfo(skb)->frags[i].size;
2154
2155		if (pos + size > len) {
2156			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
2157
2158			if (pos < len) {
2159				/* Split frag.
2160				 * We have two variants in this case:
2161				 * 1. Move all the frag to the second
2162				 *    part, if it is possible. F.e.
2163				 *    this approach is mandatory for TUX,
2164				 *    where splitting is expensive.
2165				 * 2. Split is accurately. We make this.
2166				 */
2167				get_page(skb_shinfo(skb)->frags[i].page);
2168				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
2169				skb_shinfo(skb1)->frags[0].size -= len - pos;
2170				skb_shinfo(skb)->frags[i].size	= len - pos;
2171				skb_shinfo(skb)->nr_frags++;
2172			}
2173			k++;
2174		} else
2175			skb_shinfo(skb)->nr_frags++;
2176		pos += size;
2177	}
2178	skb_shinfo(skb1)->nr_frags = k;
2179}
2180
2181/**
2182 * skb_split - Split fragmented skb to two parts at length len.
2183 * @skb: the buffer to split
2184 * @skb1: the buffer to receive the second part
2185 * @len: new length for skb
2186 */
2187void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
2188{
2189	int pos = skb_headlen(skb);
2190
 
 
 
2191	if (len < pos)	/* Split line is inside header. */
2192		skb_split_inside_header(skb, skb1, len, pos);
2193	else		/* Second chunk has no header, nothing to copy. */
2194		skb_split_no_header(skb, skb1, len, pos);
2195}
2196EXPORT_SYMBOL(skb_split);
2197
2198/* Shifting from/to a cloned skb is a no-go.
2199 *
2200 * Caller cannot keep skb_shinfo related pointers past calling here!
2201 */
2202static int skb_prepare_for_shift(struct sk_buff *skb)
2203{
2204	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2205}
2206
2207/**
2208 * skb_shift - Shifts paged data partially from skb to another
2209 * @tgt: buffer into which tail data gets added
2210 * @skb: buffer from which the paged data comes from
2211 * @shiftlen: shift up to this many bytes
2212 *
2213 * Attempts to shift up to shiftlen worth of bytes, which may be less than
2214 * the length of the skb, from tgt to skb. Returns number bytes shifted.
2215 * It's up to caller to free skb if everything was shifted.
2216 *
2217 * If @tgt runs out of frags, the whole operation is aborted.
2218 *
2219 * Skb cannot include anything else but paged data while tgt is allowed
2220 * to have non-paged data as well.
2221 *
2222 * TODO: full sized shift could be optimized but that would need
2223 * specialized skb free'er to handle frags without up-to-date nr_frags.
2224 */
2225int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
2226{
2227	int from, to, merge, todo;
2228	struct skb_frag_struct *fragfrom, *fragto;
2229
2230	BUG_ON(shiftlen > skb->len);
2231	BUG_ON(skb_headlen(skb));	/* Would corrupt stream */
 
 
 
 
2232
2233	todo = shiftlen;
2234	from = 0;
2235	to = skb_shinfo(tgt)->nr_frags;
2236	fragfrom = &skb_shinfo(skb)->frags[from];
2237
2238	/* Actual merge is delayed until the point when we know we can
2239	 * commit all, so that we don't have to undo partial changes
2240	 */
2241	if (!to ||
2242	    !skb_can_coalesce(tgt, to, fragfrom->page, fragfrom->page_offset)) {
 
2243		merge = -1;
2244	} else {
2245		merge = to - 1;
2246
2247		todo -= fragfrom->size;
2248		if (todo < 0) {
2249			if (skb_prepare_for_shift(skb) ||
2250			    skb_prepare_for_shift(tgt))
2251				return 0;
2252
2253			/* All previous frag pointers might be stale! */
2254			fragfrom = &skb_shinfo(skb)->frags[from];
2255			fragto = &skb_shinfo(tgt)->frags[merge];
2256
2257			fragto->size += shiftlen;
2258			fragfrom->size -= shiftlen;
2259			fragfrom->page_offset += shiftlen;
2260
2261			goto onlymerged;
2262		}
2263
2264		from++;
2265	}
2266
2267	/* Skip full, not-fitting skb to avoid expensive operations */
2268	if ((shiftlen == skb->len) &&
2269	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
2270		return 0;
2271
2272	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
2273		return 0;
2274
2275	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
2276		if (to == MAX_SKB_FRAGS)
2277			return 0;
2278
2279		fragfrom = &skb_shinfo(skb)->frags[from];
2280		fragto = &skb_shinfo(tgt)->frags[to];
2281
2282		if (todo >= fragfrom->size) {
2283			*fragto = *fragfrom;
2284			todo -= fragfrom->size;
2285			from++;
2286			to++;
2287
2288		} else {
2289			get_page(fragfrom->page);
2290			fragto->page = fragfrom->page;
2291			fragto->page_offset = fragfrom->page_offset;
2292			fragto->size = todo;
2293
2294			fragfrom->page_offset += todo;
2295			fragfrom->size -= todo;
2296			todo = 0;
2297
2298			to++;
2299			break;
2300		}
2301	}
2302
2303	/* Ready to "commit" this state change to tgt */
2304	skb_shinfo(tgt)->nr_frags = to;
2305
2306	if (merge >= 0) {
2307		fragfrom = &skb_shinfo(skb)->frags[0];
2308		fragto = &skb_shinfo(tgt)->frags[merge];
2309
2310		fragto->size += fragfrom->size;
2311		put_page(fragfrom->page);
2312	}
2313
2314	/* Reposition in the original skb */
2315	to = 0;
2316	while (from < skb_shinfo(skb)->nr_frags)
2317		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
2318	skb_shinfo(skb)->nr_frags = to;
2319
2320	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
2321
2322onlymerged:
2323	/* Most likely the tgt won't ever need its checksum anymore, skb on
2324	 * the other hand might need it if it needs to be resent
2325	 */
2326	tgt->ip_summed = CHECKSUM_PARTIAL;
2327	skb->ip_summed = CHECKSUM_PARTIAL;
2328
2329	/* Yak, is it really working this way? Some helper please? */
2330	skb->len -= shiftlen;
2331	skb->data_len -= shiftlen;
2332	skb->truesize -= shiftlen;
2333	tgt->len += shiftlen;
2334	tgt->data_len += shiftlen;
2335	tgt->truesize += shiftlen;
2336
2337	return shiftlen;
2338}
2339
2340/**
2341 * skb_prepare_seq_read - Prepare a sequential read of skb data
2342 * @skb: the buffer to read
2343 * @from: lower offset of data to be read
2344 * @to: upper offset of data to be read
2345 * @st: state variable
2346 *
2347 * Initializes the specified state variable. Must be called before
2348 * invoking skb_seq_read() for the first time.
2349 */
2350void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
2351			  unsigned int to, struct skb_seq_state *st)
2352{
2353	st->lower_offset = from;
2354	st->upper_offset = to;
2355	st->root_skb = st->cur_skb = skb;
2356	st->frag_idx = st->stepped_offset = 0;
2357	st->frag_data = NULL;
2358}
2359EXPORT_SYMBOL(skb_prepare_seq_read);
2360
2361/**
2362 * skb_seq_read - Sequentially read skb data
2363 * @consumed: number of bytes consumed by the caller so far
2364 * @data: destination pointer for data to be returned
2365 * @st: state variable
2366 *
2367 * Reads a block of skb data at &consumed relative to the
2368 * lower offset specified to skb_prepare_seq_read(). Assigns
2369 * the head of the data block to &data and returns the length
2370 * of the block or 0 if the end of the skb data or the upper
2371 * offset has been reached.
2372 *
2373 * The caller is not required to consume all of the data
2374 * returned, i.e. &consumed is typically set to the number
2375 * of bytes already consumed and the next call to
2376 * skb_seq_read() will return the remaining part of the block.
2377 *
2378 * Note 1: The size of each block of data returned can be arbitrary,
2379 *       this limitation is the cost for zerocopy seqeuental
2380 *       reads of potentially non linear data.
2381 *
2382 * Note 2: Fragment lists within fragments are not implemented
2383 *       at the moment, state->root_skb could be replaced with
2384 *       a stack for this purpose.
2385 */
2386unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
2387			  struct skb_seq_state *st)
2388{
2389	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
2390	skb_frag_t *frag;
2391
2392	if (unlikely(abs_offset >= st->upper_offset))
 
 
 
 
2393		return 0;
 
2394
2395next_skb:
2396	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
2397
2398	if (abs_offset < block_limit && !st->frag_data) {
2399		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
2400		return block_limit - abs_offset;
2401	}
2402
2403	if (st->frag_idx == 0 && !st->frag_data)
2404		st->stepped_offset += skb_headlen(st->cur_skb);
2405
2406	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
2407		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
2408		block_limit = frag->size + st->stepped_offset;
2409
2410		if (abs_offset < block_limit) {
2411			if (!st->frag_data)
2412				st->frag_data = kmap_skb_frag(frag);
2413
2414			*data = (u8 *) st->frag_data + frag->page_offset +
2415				(abs_offset - st->stepped_offset);
2416
2417			return block_limit - abs_offset;
2418		}
2419
2420		if (st->frag_data) {
2421			kunmap_skb_frag(st->frag_data);
2422			st->frag_data = NULL;
2423		}
2424
2425		st->frag_idx++;
2426		st->stepped_offset += frag->size;
2427	}
2428
2429	if (st->frag_data) {
2430		kunmap_skb_frag(st->frag_data);
2431		st->frag_data = NULL;
2432	}
2433
2434	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
2435		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
2436		st->frag_idx = 0;
2437		goto next_skb;
2438	} else if (st->cur_skb->next) {
2439		st->cur_skb = st->cur_skb->next;
2440		st->frag_idx = 0;
2441		goto next_skb;
2442	}
2443
2444	return 0;
2445}
2446EXPORT_SYMBOL(skb_seq_read);
2447
2448/**
2449 * skb_abort_seq_read - Abort a sequential read of skb data
2450 * @st: state variable
2451 *
2452 * Must be called if skb_seq_read() was not called until it
2453 * returned 0.
2454 */
2455void skb_abort_seq_read(struct skb_seq_state *st)
2456{
2457	if (st->frag_data)
2458		kunmap_skb_frag(st->frag_data);
2459}
2460EXPORT_SYMBOL(skb_abort_seq_read);
2461
2462#define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
2463
2464static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
2465					  struct ts_config *conf,
2466					  struct ts_state *state)
2467{
2468	return skb_seq_read(offset, text, TS_SKB_CB(state));
2469}
2470
2471static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
2472{
2473	skb_abort_seq_read(TS_SKB_CB(state));
2474}
2475
2476/**
2477 * skb_find_text - Find a text pattern in skb data
2478 * @skb: the buffer to look in
2479 * @from: search offset
2480 * @to: search limit
2481 * @config: textsearch configuration
2482 * @state: uninitialized textsearch state variable
2483 *
2484 * Finds a pattern in the skb data according to the specified
2485 * textsearch configuration. Use textsearch_next() to retrieve
2486 * subsequent occurrences of the pattern. Returns the offset
2487 * to the first occurrence or UINT_MAX if no match was found.
2488 */
2489unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
2490			   unsigned int to, struct ts_config *config,
2491			   struct ts_state *state)
2492{
 
2493	unsigned int ret;
2494
2495	config->get_next_block = skb_ts_get_next_block;
2496	config->finish = skb_ts_finish;
2497
2498	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(state));
2499
2500	ret = textsearch_find(config, state);
2501	return (ret <= to - from ? ret : UINT_MAX);
2502}
2503EXPORT_SYMBOL(skb_find_text);
2504
2505/**
2506 * skb_append_datato_frags: - append the user data to a skb
2507 * @sk: sock  structure
2508 * @skb: skb structure to be appened with user data.
2509 * @getfrag: call back function to be used for getting the user data
2510 * @from: pointer to user message iov
2511 * @length: length of the iov message
2512 *
2513 * Description: This procedure append the user data in the fragment part
2514 * of the skb if any page alloc fails user this procedure returns  -ENOMEM
2515 */
2516int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
2517			int (*getfrag)(void *from, char *to, int offset,
2518					int len, int odd, struct sk_buff *skb),
2519			void *from, int length)
2520{
2521	int frg_cnt = 0;
2522	skb_frag_t *frag = NULL;
2523	struct page *page = NULL;
2524	int copy, left;
2525	int offset = 0;
2526	int ret;
 
2527
2528	do {
2529		/* Return error if we don't have space for new frag */
2530		frg_cnt = skb_shinfo(skb)->nr_frags;
2531		if (frg_cnt >= MAX_SKB_FRAGS)
2532			return -EFAULT;
2533
2534		/* allocate a new page for next frag */
2535		page = alloc_pages(sk->sk_allocation, 0);
2536
2537		/* If alloc_page fails just return failure and caller will
2538		 * free previous allocated pages by doing kfree_skb()
2539		 */
2540		if (page == NULL)
2541			return -ENOMEM;
2542
2543		/* initialize the next frag */
2544		skb_fill_page_desc(skb, frg_cnt, page, 0, 0);
2545		skb->truesize += PAGE_SIZE;
2546		atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
2547
2548		/* get the new initialized frag */
2549		frg_cnt = skb_shinfo(skb)->nr_frags;
2550		frag = &skb_shinfo(skb)->frags[frg_cnt - 1];
2551
2552		/* copy the user data to page */
2553		left = PAGE_SIZE - frag->page_offset;
2554		copy = (length > left)? left : length;
2555
2556		ret = getfrag(from, (page_address(frag->page) +
2557			    frag->page_offset + frag->size),
2558			    offset, copy, 0, skb);
2559		if (ret < 0)
2560			return -EFAULT;
2561
2562		/* copy was successful so update the size parameters */
2563		frag->size += copy;
 
 
 
 
 
 
 
2564		skb->len += copy;
2565		skb->data_len += copy;
2566		offset += copy;
2567		length -= copy;
2568
2569	} while (length > 0);
2570
2571	return 0;
2572}
2573EXPORT_SYMBOL(skb_append_datato_frags);
2574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2575/**
2576 *	skb_pull_rcsum - pull skb and update receive checksum
2577 *	@skb: buffer to update
2578 *	@len: length of data pulled
2579 *
2580 *	This function performs an skb_pull on the packet and updates
2581 *	the CHECKSUM_COMPLETE checksum.  It should be used on
2582 *	receive path processing instead of skb_pull unless you know
2583 *	that the checksum difference is zero (e.g., a valid IP header)
2584 *	or you are setting ip_summed to CHECKSUM_NONE.
2585 */
2586unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
2587{
 
 
2588	BUG_ON(len > skb->len);
2589	skb->len -= len;
2590	BUG_ON(skb->len < skb->data_len);
2591	skb_postpull_rcsum(skb, skb->data, len);
2592	return skb->data += len;
2593}
2594EXPORT_SYMBOL_GPL(skb_pull_rcsum);
2595
 
 
 
 
 
 
 
 
 
 
 
 
 
2596/**
2597 *	skb_segment - Perform protocol segmentation on skb.
2598 *	@skb: buffer to segment
2599 *	@features: features for the output path (see dev->features)
2600 *
2601 *	This function performs segmentation on the given skb.  It returns
2602 *	a pointer to the first in a list of new skbs for the segments.
2603 *	In case of error it returns ERR_PTR(err).
2604 */
2605struct sk_buff *skb_segment(struct sk_buff *skb, u32 features)
 
2606{
2607	struct sk_buff *segs = NULL;
2608	struct sk_buff *tail = NULL;
2609	struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
2610	unsigned int mss = skb_shinfo(skb)->gso_size;
2611	unsigned int doffset = skb->data - skb_mac_header(skb);
 
 
2612	unsigned int offset = doffset;
 
 
2613	unsigned int headroom;
2614	unsigned int len;
2615	int sg = !!(features & NETIF_F_SG);
2616	int nfrags = skb_shinfo(skb)->nr_frags;
 
2617	int err = -ENOMEM;
2618	int i = 0;
2619	int pos;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2620
2621	__skb_push(skb, doffset);
2622	headroom = skb_headroom(skb);
2623	pos = skb_headlen(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2624
2625	do {
2626		struct sk_buff *nskb;
2627		skb_frag_t *frag;
2628		int hsize;
2629		int size;
2630
2631		len = skb->len - offset;
2632		if (len > mss)
2633			len = mss;
 
 
 
 
2634
2635		hsize = skb_headlen(skb) - offset;
2636		if (hsize < 0)
2637			hsize = 0;
2638		if (hsize > len || !sg)
2639			hsize = len;
2640
2641		if (!hsize && i >= nfrags) {
2642			BUG_ON(fskb->len != len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2643
2644			pos += len;
2645			nskb = skb_clone(fskb, GFP_ATOMIC);
2646			fskb = fskb->next;
 
 
 
 
2647
2648			if (unlikely(!nskb))
2649				goto err;
2650
2651			hsize = skb_end_pointer(nskb) - nskb->head;
 
 
 
 
 
2652			if (skb_cow_head(nskb, doffset + headroom)) {
2653				kfree_skb(nskb);
2654				goto err;
2655			}
2656
2657			nskb->truesize += skb_end_pointer(nskb) - nskb->head -
2658					  hsize;
2659			skb_release_head_state(nskb);
2660			__skb_push(nskb, doffset);
2661		} else {
2662			nskb = alloc_skb(hsize + doffset + headroom,
2663					 GFP_ATOMIC);
 
2664
2665			if (unlikely(!nskb))
2666				goto err;
2667
2668			skb_reserve(nskb, headroom);
2669			__skb_put(nskb, doffset);
2670		}
2671
2672		if (segs)
2673			tail->next = nskb;
2674		else
2675			segs = nskb;
2676		tail = nskb;
2677
2678		__copy_skb_header(nskb, skb);
2679		nskb->mac_len = skb->mac_len;
2680
2681		/* nskb and skb might have different headroom */
2682		if (nskb->ip_summed == CHECKSUM_PARTIAL)
2683			nskb->csum_start += skb_headroom(nskb) - headroom;
2684
2685		skb_reset_mac_header(nskb);
2686		skb_set_network_header(nskb, skb->mac_len);
2687		nskb->transport_header = (nskb->network_header +
2688					  skb_network_header_len(skb));
2689		skb_copy_from_linear_data(skb, nskb->data, doffset);
2690
2691		if (fskb != skb_shinfo(skb)->frag_list)
2692			continue;
 
 
 
 
2693
2694		if (!sg) {
2695			nskb->ip_summed = CHECKSUM_NONE;
2696			nskb->csum = skb_copy_and_csum_bits(skb, offset,
2697							    skb_put(nskb, len),
2698							    len, 0);
 
 
 
 
2699			continue;
2700		}
2701
2702		frag = skb_shinfo(nskb)->frags;
2703
2704		skb_copy_from_linear_data_offset(skb, offset,
2705						 skb_put(nskb, hsize), hsize);
2706
2707		while (pos < offset + len && i < nfrags) {
2708			*frag = skb_shinfo(skb)->frags[i];
2709			get_page(frag->page);
2710			size = frag->size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2711
2712			if (pos < offset) {
2713				frag->page_offset += offset - pos;
2714				frag->size -= offset - pos;
2715			}
2716
2717			skb_shinfo(nskb)->nr_frags++;
2718
2719			if (pos + size <= offset + len) {
2720				i++;
 
2721				pos += size;
2722			} else {
2723				frag->size -= pos + size - (offset + len);
2724				goto skip_fraglist;
2725			}
2726
2727			frag++;
2728		}
2729
2730		if (pos < offset + len) {
2731			struct sk_buff *fskb2 = fskb;
 
 
2732
2733			BUG_ON(pos + fskb->len != offset + len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2734
2735			pos += fskb->len;
2736			fskb = fskb->next;
 
 
 
2737
2738			if (fskb2->next) {
2739				fskb2 = skb_clone(fskb2, GFP_ATOMIC);
2740				if (!fskb2)
2741					goto err;
2742			} else
2743				skb_get(fskb2);
 
 
2744
2745			SKB_FRAG_ASSERT(nskb);
2746			skb_shinfo(nskb)->frag_list = fskb2;
 
 
 
 
 
 
2747		}
2748
2749skip_fraglist:
2750		nskb->data_len = len - hsize;
2751		nskb->len += nskb->data_len;
2752		nskb->truesize += nskb->data_len;
2753	} while ((offset += len) < skb->len);
2754
 
 
 
 
 
 
 
 
 
2755	return segs;
2756
2757err:
2758	while ((skb = segs)) {
2759		segs = skb->next;
2760		kfree_skb(skb);
2761	}
2762	return ERR_PTR(err);
2763}
2764EXPORT_SYMBOL_GPL(skb_segment);
2765
2766int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2767{
2768	struct sk_buff *p = *head;
2769	struct sk_buff *nskb;
2770	struct skb_shared_info *skbinfo = skb_shinfo(skb);
2771	struct skb_shared_info *pinfo = skb_shinfo(p);
2772	unsigned int headroom;
2773	unsigned int len = skb_gro_len(skb);
2774	unsigned int offset = skb_gro_offset(skb);
2775	unsigned int headlen = skb_headlen(skb);
 
 
 
2776
2777	if (p->len + len >= 65536)
2778		return -E2BIG;
2779
2780	if (pinfo->frag_list)
2781		goto merge;
2782	else if (headlen <= offset) {
 
2783		skb_frag_t *frag;
2784		skb_frag_t *frag2;
2785		int i = skbinfo->nr_frags;
2786		int nr_frags = pinfo->nr_frags + i;
2787
2788		offset -= headlen;
2789
2790		if (nr_frags > MAX_SKB_FRAGS)
2791			return -E2BIG;
2792
 
2793		pinfo->nr_frags = nr_frags;
2794		skbinfo->nr_frags = 0;
2795
2796		frag = pinfo->frags + nr_frags;
2797		frag2 = skbinfo->frags + i;
2798		do {
2799			*--frag = *--frag2;
2800		} while (--i);
2801
2802		frag->page_offset += offset;
2803		frag->size -= offset;
 
 
 
 
2804
2805		skb->truesize -= skb->data_len;
2806		skb->len -= skb->data_len;
2807		skb->data_len = 0;
2808
2809		NAPI_GRO_CB(skb)->free = 1;
2810		goto done;
2811	} else if (skb_gro_len(p) != pinfo->gso_size)
2812		return -E2BIG;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2813
2814	headroom = skb_headroom(p);
2815	nskb = alloc_skb(headroom + skb_gro_offset(p), GFP_ATOMIC);
2816	if (unlikely(!nskb))
2817		return -ENOMEM;
2818
2819	__copy_skb_header(nskb, p);
2820	nskb->mac_len = p->mac_len;
2821
2822	skb_reserve(nskb, headroom);
2823	__skb_put(nskb, skb_gro_offset(p));
2824
2825	skb_set_mac_header(nskb, skb_mac_header(p) - p->data);
2826	skb_set_network_header(nskb, skb_network_offset(p));
2827	skb_set_transport_header(nskb, skb_transport_offset(p));
2828
2829	__skb_pull(p, skb_gro_offset(p));
2830	memcpy(skb_mac_header(nskb), skb_mac_header(p),
2831	       p->data - skb_mac_header(p));
2832
2833	*NAPI_GRO_CB(nskb) = *NAPI_GRO_CB(p);
2834	skb_shinfo(nskb)->frag_list = p;
2835	skb_shinfo(nskb)->gso_size = pinfo->gso_size;
2836	pinfo->gso_size = 0;
2837	skb_header_release(p);
2838	nskb->prev = p;
2839
2840	nskb->data_len += p->len;
2841	nskb->truesize += p->len;
2842	nskb->len += p->len;
2843
2844	*head = nskb;
2845	nskb->next = p->next;
2846	p->next = NULL;
2847
2848	p = nskb;
2849
2850merge:
 
2851	if (offset > headlen) {
2852		unsigned int eat = offset - headlen;
2853
2854		skbinfo->frags[0].page_offset += eat;
2855		skbinfo->frags[0].size -= eat;
2856		skb->data_len -= eat;
2857		skb->len -= eat;
2858		offset = headlen;
2859	}
2860
2861	__skb_pull(skb, offset);
2862
2863	p->prev->next = skb;
2864	p->prev = skb;
2865	skb_header_release(skb);
 
 
 
 
2866
2867done:
2868	NAPI_GRO_CB(p)->count++;
2869	p->data_len += len;
2870	p->truesize += len;
2871	p->len += len;
2872
 
 
 
 
2873	NAPI_GRO_CB(skb)->same_flow = 1;
2874	return 0;
2875}
2876EXPORT_SYMBOL_GPL(skb_gro_receive);
2877
2878void __init skb_init(void)
2879{
2880	skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
2881					      sizeof(struct sk_buff),
2882					      0,
2883					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
 
 
2884					      NULL);
2885	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
2886						(2*sizeof(struct sk_buff)) +
2887						sizeof(atomic_t),
2888						0,
2889						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
2890						NULL);
2891}
2892
2893/**
2894 *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
2895 *	@skb: Socket buffer containing the buffers to be mapped
2896 *	@sg: The scatter-gather list to map into
2897 *	@offset: The offset into the buffer's contents to start mapping
2898 *	@len: Length of buffer space to be mapped
2899 *
2900 *	Fill the specified scatter-gather list with mappings/pointers into a
2901 *	region of the buffer space attached to a socket buffer.
2902 */
2903static int
2904__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
 
2905{
2906	int start = skb_headlen(skb);
2907	int i, copy = start - offset;
2908	struct sk_buff *frag_iter;
2909	int elt = 0;
2910
 
 
 
2911	if (copy > 0) {
2912		if (copy > len)
2913			copy = len;
2914		sg_set_buf(sg, skb->data + offset, copy);
2915		elt++;
2916		if ((len -= copy) == 0)
2917			return elt;
2918		offset += copy;
2919	}
2920
2921	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2922		int end;
2923
2924		WARN_ON(start > offset + len);
2925
2926		end = start + skb_shinfo(skb)->frags[i].size;
2927		if ((copy = end - offset) > 0) {
2928			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 
 
2929
2930			if (copy > len)
2931				copy = len;
2932			sg_set_page(&sg[elt], frag->page, copy,
2933					frag->page_offset+offset-start);
2934			elt++;
2935			if (!(len -= copy))
2936				return elt;
2937			offset += copy;
2938		}
2939		start = end;
2940	}
2941
2942	skb_walk_frags(skb, frag_iter) {
2943		int end;
2944
2945		WARN_ON(start > offset + len);
2946
2947		end = start + frag_iter->len;
2948		if ((copy = end - offset) > 0) {
 
 
 
2949			if (copy > len)
2950				copy = len;
2951			elt += __skb_to_sgvec(frag_iter, sg+elt, offset - start,
2952					      copy);
 
 
 
2953			if ((len -= copy) == 0)
2954				return elt;
2955			offset += copy;
2956		}
2957		start = end;
2958	}
2959	BUG_ON(len);
2960	return elt;
2961}
2962
 
 
 
 
 
 
 
 
 
 
 
 
2963int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
2964{
2965	int nsg = __skb_to_sgvec(skb, sg, offset, len);
 
 
 
2966
2967	sg_mark_end(&sg[nsg - 1]);
2968
2969	return nsg;
2970}
2971EXPORT_SYMBOL_GPL(skb_to_sgvec);
2972
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2973/**
2974 *	skb_cow_data - Check that a socket buffer's data buffers are writable
2975 *	@skb: The socket buffer to check.
2976 *	@tailbits: Amount of trailing space to be added
2977 *	@trailer: Returned pointer to the skb where the @tailbits space begins
2978 *
2979 *	Make sure that the data buffers attached to a socket buffer are
2980 *	writable. If they are not, private copies are made of the data buffers
2981 *	and the socket buffer is set to use these instead.
2982 *
2983 *	If @tailbits is given, make sure that there is space to write @tailbits
2984 *	bytes of data beyond current end of socket buffer.  @trailer will be
2985 *	set to point to the skb in which this space begins.
2986 *
2987 *	The number of scatterlist elements required to completely map the
2988 *	COW'd and extended socket buffer will be returned.
2989 */
2990int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
2991{
2992	int copyflag;
2993	int elt;
2994	struct sk_buff *skb1, **skb_p;
2995
2996	/* If skb is cloned or its head is paged, reallocate
2997	 * head pulling out all the pages (pages are considered not writable
2998	 * at the moment even if they are anonymous).
2999	 */
3000	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
3001	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
3002		return -ENOMEM;
3003
3004	/* Easy case. Most of packets will go this way. */
3005	if (!skb_has_frag_list(skb)) {
3006		/* A little of trouble, not enough of space for trailer.
3007		 * This should not happen, when stack is tuned to generate
3008		 * good frames. OK, on miss we reallocate and reserve even more
3009		 * space, 128 bytes is fair. */
3010
3011		if (skb_tailroom(skb) < tailbits &&
3012		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
3013			return -ENOMEM;
3014
3015		/* Voila! */
3016		*trailer = skb;
3017		return 1;
3018	}
3019
3020	/* Misery. We are in troubles, going to mincer fragments... */
3021
3022	elt = 1;
3023	skb_p = &skb_shinfo(skb)->frag_list;
3024	copyflag = 0;
3025
3026	while ((skb1 = *skb_p) != NULL) {
3027		int ntail = 0;
3028
3029		/* The fragment is partially pulled by someone,
3030		 * this can happen on input. Copy it and everything
3031		 * after it. */
3032
3033		if (skb_shared(skb1))
3034			copyflag = 1;
3035
3036		/* If the skb is the last, worry about trailer. */
3037
3038		if (skb1->next == NULL && tailbits) {
3039			if (skb_shinfo(skb1)->nr_frags ||
3040			    skb_has_frag_list(skb1) ||
3041			    skb_tailroom(skb1) < tailbits)
3042				ntail = tailbits + 128;
3043		}
3044
3045		if (copyflag ||
3046		    skb_cloned(skb1) ||
3047		    ntail ||
3048		    skb_shinfo(skb1)->nr_frags ||
3049		    skb_has_frag_list(skb1)) {
3050			struct sk_buff *skb2;
3051
3052			/* Fuck, we are miserable poor guys... */
3053			if (ntail == 0)
3054				skb2 = skb_copy(skb1, GFP_ATOMIC);
3055			else
3056				skb2 = skb_copy_expand(skb1,
3057						       skb_headroom(skb1),
3058						       ntail,
3059						       GFP_ATOMIC);
3060			if (unlikely(skb2 == NULL))
3061				return -ENOMEM;
3062
3063			if (skb1->sk)
3064				skb_set_owner_w(skb2, skb1->sk);
3065
3066			/* Looking around. Are we still alive?
3067			 * OK, link new skb, drop old one */
3068
3069			skb2->next = skb1->next;
3070			*skb_p = skb2;
3071			kfree_skb(skb1);
3072			skb1 = skb2;
3073		}
3074		elt++;
3075		*trailer = skb1;
3076		skb_p = &skb1->next;
3077	}
3078
3079	return elt;
3080}
3081EXPORT_SYMBOL_GPL(skb_cow_data);
3082
3083static void sock_rmem_free(struct sk_buff *skb)
3084{
3085	struct sock *sk = skb->sk;
3086
3087	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
3088}
3089
 
 
 
 
 
 
 
 
 
3090/*
3091 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
3092 */
3093int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
3094{
3095	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
3096	    (unsigned)sk->sk_rcvbuf)
3097		return -ENOMEM;
3098
3099	skb_orphan(skb);
3100	skb->sk = sk;
3101	skb->destructor = sock_rmem_free;
3102	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
 
3103
3104	/* before exiting rcu section, make sure dst is refcounted */
3105	skb_dst_force(skb);
3106
3107	skb_queue_tail(&sk->sk_error_queue, skb);
3108	if (!sock_flag(sk, SOCK_DEAD))
3109		sk->sk_data_ready(sk, skb->len);
3110	return 0;
3111}
3112EXPORT_SYMBOL(sock_queue_err_skb);
3113
3114void skb_tstamp_tx(struct sk_buff *orig_skb,
3115		struct skb_shared_hwtstamps *hwtstamps)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3116{
3117	struct sock *sk = orig_skb->sk;
3118	struct sock_exterr_skb *serr;
3119	struct sk_buff *skb;
3120	int err;
3121
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3122	if (!sk)
3123		return;
3124
3125	skb = skb_clone(orig_skb, GFP_ATOMIC);
3126	if (!skb)
 
 
 
 
3127		return;
3128
3129	if (hwtstamps) {
3130		*skb_hwtstamps(skb) =
3131			*hwtstamps;
 
 
 
 
 
 
 
3132	} else {
3133		/*
3134		 * no hardware time stamps available,
3135		 * so keep the shared tx_flags and only
3136		 * store software time stamp
3137		 */
3138		skb->tstamp = ktime_get_real();
3139	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3140
3141	serr = SKB_EXT_ERR(skb);
3142	memset(serr, 0, sizeof(*serr));
3143	serr->ee.ee_errno = ENOMSG;
3144	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
3145
3146	err = sock_queue_err_skb(sk, skb);
3147
 
 
 
 
 
 
 
3148	if (err)
3149		kfree_skb(skb);
3150}
3151EXPORT_SYMBOL_GPL(skb_tstamp_tx);
3152
3153
3154/**
3155 * skb_partial_csum_set - set up and verify partial csum values for packet
3156 * @skb: the skb to set
3157 * @start: the number of bytes after skb->data to start checksumming.
3158 * @off: the offset from start to place the checksum.
3159 *
3160 * For untrusted partially-checksummed packets, we need to make sure the values
3161 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
3162 *
3163 * This function checks and sets those values and skb->ip_summed: if this
3164 * returns false you should drop the packet.
3165 */
3166bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
3167{
3168	if (unlikely(start > skb_headlen(skb)) ||
3169	    unlikely((int)start + off > skb_headlen(skb) - 2)) {
3170		if (net_ratelimit())
3171			printk(KERN_WARNING
3172			       "bad partial csum: csum=%u/%u len=%u\n",
3173			       start, off, skb_headlen(skb));
3174		return false;
3175	}
3176	skb->ip_summed = CHECKSUM_PARTIAL;
3177	skb->csum_start = skb_headroom(skb) + start;
3178	skb->csum_offset = off;
 
3179	return true;
3180}
3181EXPORT_SYMBOL_GPL(skb_partial_csum_set);
3182
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3183void __skb_warn_lro_forwarding(const struct sk_buff *skb)
3184{
3185	if (net_ratelimit())
3186		pr_warning("%s: received packets cannot be forwarded"
3187			   " while LRO is enabled\n", skb->dev->name);
3188}
3189EXPORT_SYMBOL(__skb_warn_lro_forwarding);
v4.17
   1/*
   2 *	Routines having to do with the 'struct sk_buff' memory handlers.
   3 *
   4 *	Authors:	Alan Cox <alan@lxorguk.ukuu.org.uk>
   5 *			Florian La Roche <rzsfl@rz.uni-sb.de>
   6 *
   7 *	Fixes:
   8 *		Alan Cox	:	Fixed the worst of the load
   9 *					balancer bugs.
  10 *		Dave Platt	:	Interrupt stacking fix.
  11 *	Richard Kooijman	:	Timestamp fixes.
  12 *		Alan Cox	:	Changed buffer format.
  13 *		Alan Cox	:	destructor hook for AF_UNIX etc.
  14 *		Linus Torvalds	:	Better skb_clone.
  15 *		Alan Cox	:	Added skb_copy.
  16 *		Alan Cox	:	Added all the changed routines Linus
  17 *					only put in the headers
  18 *		Ray VanTassle	:	Fixed --skb->lock in free
  19 *		Alan Cox	:	skb_copy copy arp field
  20 *		Andi Kleen	:	slabified it.
  21 *		Robert Olsson	:	Removed skb_head_pool
  22 *
  23 *	NOTE:
  24 *		The __skb_ routines should be called with interrupts
  25 *	disabled, or you better be *real* sure that the operation is atomic
  26 *	with respect to whatever list is being frobbed (e.g. via lock_sock()
  27 *	or via disabling bottom half handlers, etc).
  28 *
  29 *	This program is free software; you can redistribute it and/or
  30 *	modify it under the terms of the GNU General Public License
  31 *	as published by the Free Software Foundation; either version
  32 *	2 of the License, or (at your option) any later version.
  33 */
  34
  35/*
  36 *	The functions in this file will not compile correctly with gcc 2.4.x
  37 */
  38
  39#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  40
  41#include <linux/module.h>
  42#include <linux/types.h>
  43#include <linux/kernel.h>
 
  44#include <linux/mm.h>
  45#include <linux/interrupt.h>
  46#include <linux/in.h>
  47#include <linux/inet.h>
  48#include <linux/slab.h>
  49#include <linux/tcp.h>
  50#include <linux/udp.h>
  51#include <linux/sctp.h>
  52#include <linux/netdevice.h>
  53#ifdef CONFIG_NET_CLS_ACT
  54#include <net/pkt_sched.h>
  55#endif
  56#include <linux/string.h>
  57#include <linux/skbuff.h>
  58#include <linux/splice.h>
  59#include <linux/cache.h>
  60#include <linux/rtnetlink.h>
  61#include <linux/init.h>
  62#include <linux/scatterlist.h>
  63#include <linux/errqueue.h>
  64#include <linux/prefetch.h>
  65#include <linux/if_vlan.h>
  66
  67#include <net/protocol.h>
  68#include <net/dst.h>
  69#include <net/sock.h>
  70#include <net/checksum.h>
  71#include <net/ip6_checksum.h>
  72#include <net/xfrm.h>
  73
  74#include <linux/uaccess.h>
 
  75#include <trace/events/skb.h>
  76#include <linux/highmem.h>
  77#include <linux/capability.h>
  78#include <linux/user_namespace.h>
  79
  80struct kmem_cache *skbuff_head_cache __ro_after_init;
  81static struct kmem_cache *skbuff_fclone_cache __ro_after_init;
  82int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
  83EXPORT_SYMBOL(sysctl_max_skb_frags);
  84
  85/**
  86 *	skb_panic - private function for out-of-line support
  87 *	@skb:	buffer
  88 *	@sz:	size
  89 *	@addr:	address
  90 *	@msg:	skb_over_panic or skb_under_panic
  91 *
  92 *	Out-of-line support for skb_put() and skb_push().
  93 *	Called via the wrapper skb_over_panic() or skb_under_panic().
  94 *	Keep out of line to prevent kernel bloat.
  95 *	__builtin_return_address is not used because it is not always reliable.
  96 */
  97static void skb_panic(struct sk_buff *skb, unsigned int sz, void *addr,
  98		      const char msg[])
  99{
 100	pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n",
 101		 msg, addr, skb->len, sz, skb->head, skb->data,
 102		 (unsigned long)skb->tail, (unsigned long)skb->end,
 103		 skb->dev ? skb->dev->name : "<NULL>");
 104	BUG();
 105}
 106
 107static void skb_over_panic(struct sk_buff *skb, unsigned int sz, void *addr)
 
 108{
 109	skb_panic(skb, sz, addr, __func__);
 110}
 111
 112static void skb_under_panic(struct sk_buff *skb, unsigned int sz, void *addr)
 
 113{
 114	skb_panic(skb, sz, addr, __func__);
 115}
 116
 
 
 
 
 
 
 
 
 
 
 
 
 117/*
 118 * kmalloc_reserve is a wrapper around kmalloc_node_track_caller that tells
 119 * the caller if emergency pfmemalloc reserves are being used. If it is and
 120 * the socket is later found to be SOCK_MEMALLOC then PFMEMALLOC reserves
 121 * may be used. Otherwise, the packet data may be discarded until enough
 122 * memory is free
 123 */
 124#define kmalloc_reserve(size, gfp, node, pfmemalloc) \
 125	 __kmalloc_reserve(size, gfp, node, _RET_IP_, pfmemalloc)
 126
 127static void *__kmalloc_reserve(size_t size, gfp_t flags, int node,
 128			       unsigned long ip, bool *pfmemalloc)
 
 
 
 
 
 
 
 129{
 130	void *obj;
 131	bool ret_pfmemalloc = false;
 
 
 
 
 
 132
 133	/*
 134	 * Try a regular allocation, when that fails and we're not entitled
 135	 * to the reserves, fail.
 136	 */
 137	obj = kmalloc_node_track_caller(size,
 138					flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
 139					node);
 140	if (obj || !(gfp_pfmemalloc_allowed(flags)))
 141		goto out;
 142
 143	/* Try again but now we are using pfmemalloc reserves */
 144	ret_pfmemalloc = true;
 145	obj = kmalloc_node_track_caller(size, flags, node);
 146
 147out:
 148	if (pfmemalloc)
 149		*pfmemalloc = ret_pfmemalloc;
 150
 151	return obj;
 152}
 153
 154/* 	Allocate a new skbuff. We do this ourselves so we can fill in a few
 155 *	'private' fields and also do memory statistics to find all the
 156 *	[BEEP] leaks.
 157 *
 158 */
 159
 160/**
 161 *	__alloc_skb	-	allocate a network buffer
 162 *	@size: size to allocate
 163 *	@gfp_mask: allocation mask
 164 *	@flags: If SKB_ALLOC_FCLONE is set, allocate from fclone cache
 165 *		instead of head cache and allocate a cloned (child) skb.
 166 *		If SKB_ALLOC_RX is set, __GFP_MEMALLOC will be used for
 167 *		allocations in case the data is required for writeback
 168 *	@node: numa node to allocate memory on
 169 *
 170 *	Allocate a new &sk_buff. The returned buffer has no headroom and a
 171 *	tail room of at least size bytes. The object has a reference count
 172 *	of one. The return is the buffer. On a failure the return is %NULL.
 173 *
 174 *	Buffers may only be allocated from interrupts using a @gfp_mask of
 175 *	%GFP_ATOMIC.
 176 */
 177struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
 178			    int flags, int node)
 179{
 180	struct kmem_cache *cache;
 181	struct skb_shared_info *shinfo;
 182	struct sk_buff *skb;
 183	u8 *data;
 184	bool pfmemalloc;
 185
 186	cache = (flags & SKB_ALLOC_FCLONE)
 187		? skbuff_fclone_cache : skbuff_head_cache;
 188
 189	if (sk_memalloc_socks() && (flags & SKB_ALLOC_RX))
 190		gfp_mask |= __GFP_MEMALLOC;
 191
 192	/* Get the HEAD */
 193	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
 194	if (!skb)
 195		goto out;
 196	prefetchw(skb);
 197
 198	/* We do our best to align skb_shared_info on a separate cache
 199	 * line. It usually works because kmalloc(X > SMP_CACHE_BYTES) gives
 200	 * aligned memory blocks, unless SLUB/SLAB debug is enabled.
 201	 * Both skb->head and skb_shared_info are cache line aligned.
 202	 */
 203	size = SKB_DATA_ALIGN(size);
 204	size += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 205	data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
 206	if (!data)
 207		goto nodata;
 208	/* kmalloc(size) might give us more room than requested.
 209	 * Put skb_shared_info exactly at the end of allocated zone,
 210	 * to allow max possible filling before reallocation.
 211	 */
 212	size = SKB_WITH_OVERHEAD(ksize(data));
 213	prefetchw(data + size);
 214
 215	/*
 216	 * Only clear those fields we need to clear, not those that we will
 217	 * actually initialise below. Hence, don't put any more fields after
 218	 * the tail pointer in struct sk_buff!
 219	 */
 220	memset(skb, 0, offsetof(struct sk_buff, tail));
 221	/* Account for allocated memory : skb + skb->head */
 222	skb->truesize = SKB_TRUESIZE(size);
 223	skb->pfmemalloc = pfmemalloc;
 224	refcount_set(&skb->users, 1);
 225	skb->head = data;
 226	skb->data = data;
 227	skb_reset_tail_pointer(skb);
 228	skb->end = skb->tail + size;
 229	skb->mac_header = (typeof(skb->mac_header))~0U;
 230	skb->transport_header = (typeof(skb->transport_header))~0U;
 
 231
 232	/* make sure we initialize shinfo sequentially */
 233	shinfo = skb_shinfo(skb);
 234	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 235	atomic_set(&shinfo->dataref, 1);
 
 236
 237	if (flags & SKB_ALLOC_FCLONE) {
 238		struct sk_buff_fclones *fclones;
 239
 240		fclones = container_of(skb, struct sk_buff_fclones, skb1);
 241
 
 
 242		skb->fclone = SKB_FCLONE_ORIG;
 243		refcount_set(&fclones->fclone_ref, 1);
 244
 245		fclones->skb2.fclone = SKB_FCLONE_CLONE;
 246	}
 247out:
 248	return skb;
 249nodata:
 250	kmem_cache_free(cache, skb);
 251	skb = NULL;
 252	goto out;
 253}
 254EXPORT_SYMBOL(__alloc_skb);
 255
 256/**
 257 * __build_skb - build a network buffer
 258 * @data: data buffer provided by caller
 259 * @frag_size: size of data, or 0 if head was kmalloced
 260 *
 261 * Allocate a new &sk_buff. Caller provides space holding head and
 262 * skb_shared_info. @data must have been allocated by kmalloc() only if
 263 * @frag_size is 0, otherwise data should come from the page allocator
 264 *  or vmalloc()
 265 * The return is the new skb buffer.
 266 * On a failure the return is %NULL, and @data is not freed.
 267 * Notes :
 268 *  Before IO, driver allocates only data buffer where NIC put incoming frame
 269 *  Driver should add room at head (NET_SKB_PAD) and
 270 *  MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
 271 *  After IO, driver calls build_skb(), to allocate sk_buff and populate it
 272 *  before giving packet to stack.
 273 *  RX rings only contains data buffers, not full skbs.
 274 */
 275struct sk_buff *__build_skb(void *data, unsigned int frag_size)
 276{
 277	struct skb_shared_info *shinfo;
 278	struct sk_buff *skb;
 279	unsigned int size = frag_size ? : ksize(data);
 280
 281	skb = kmem_cache_alloc(skbuff_head_cache, GFP_ATOMIC);
 282	if (!skb)
 283		return NULL;
 284
 285	size -= SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 286
 287	memset(skb, 0, offsetof(struct sk_buff, tail));
 288	skb->truesize = SKB_TRUESIZE(size);
 289	refcount_set(&skb->users, 1);
 290	skb->head = data;
 291	skb->data = data;
 292	skb_reset_tail_pointer(skb);
 293	skb->end = skb->tail + size;
 294	skb->mac_header = (typeof(skb->mac_header))~0U;
 295	skb->transport_header = (typeof(skb->transport_header))~0U;
 296
 297	/* make sure we initialize shinfo sequentially */
 298	shinfo = skb_shinfo(skb);
 299	memset(shinfo, 0, offsetof(struct skb_shared_info, dataref));
 300	atomic_set(&shinfo->dataref, 1);
 301
 302	return skb;
 303}
 304
 305/* build_skb() is wrapper over __build_skb(), that specifically
 306 * takes care of skb->head and skb->pfmemalloc
 307 * This means that if @frag_size is not zero, then @data must be backed
 308 * by a page fragment, not kmalloc() or vmalloc()
 309 */
 310struct sk_buff *build_skb(void *data, unsigned int frag_size)
 311{
 312	struct sk_buff *skb = __build_skb(data, frag_size);
 313
 314	if (skb && frag_size) {
 315		skb->head_frag = 1;
 316		if (page_is_pfmemalloc(virt_to_head_page(data)))
 317			skb->pfmemalloc = 1;
 318	}
 319	return skb;
 320}
 321EXPORT_SYMBOL(build_skb);
 322
 323#define NAPI_SKB_CACHE_SIZE	64
 324
 325struct napi_alloc_cache {
 326	struct page_frag_cache page;
 327	unsigned int skb_count;
 328	void *skb_cache[NAPI_SKB_CACHE_SIZE];
 329};
 330
 331static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
 332static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
 333
 334static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 335{
 336	struct page_frag_cache *nc;
 337	unsigned long flags;
 338	void *data;
 339
 340	local_irq_save(flags);
 341	nc = this_cpu_ptr(&netdev_alloc_cache);
 342	data = page_frag_alloc(nc, fragsz, gfp_mask);
 343	local_irq_restore(flags);
 344	return data;
 345}
 346
 347/**
 348 * netdev_alloc_frag - allocate a page fragment
 349 * @fragsz: fragment size
 350 *
 351 * Allocates a frag from a page for receive buffer.
 352 * Uses GFP_ATOMIC allocations.
 353 */
 354void *netdev_alloc_frag(unsigned int fragsz)
 355{
 356	return __netdev_alloc_frag(fragsz, GFP_ATOMIC);
 357}
 358EXPORT_SYMBOL(netdev_alloc_frag);
 359
 360static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
 361{
 362	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 363
 364	return page_frag_alloc(&nc->page, fragsz, gfp_mask);
 365}
 366
 367void *napi_alloc_frag(unsigned int fragsz)
 368{
 369	return __napi_alloc_frag(fragsz, GFP_ATOMIC);
 370}
 371EXPORT_SYMBOL(napi_alloc_frag);
 372
 373/**
 374 *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
 375 *	@dev: network device to receive on
 376 *	@len: length to allocate
 377 *	@gfp_mask: get_free_pages mask, passed to alloc_skb
 378 *
 379 *	Allocate a new &sk_buff and assign it a usage count of one. The
 380 *	buffer has NET_SKB_PAD headroom built in. Users should allocate
 381 *	the headroom they think they need without accounting for the
 382 *	built in space. The built in space is used for optimisations.
 383 *
 384 *	%NULL is returned if there is no free memory.
 385 */
 386struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
 387				   gfp_t gfp_mask)
 388{
 389	struct page_frag_cache *nc;
 390	unsigned long flags;
 391	struct sk_buff *skb;
 392	bool pfmemalloc;
 393	void *data;
 394
 395	len += NET_SKB_PAD;
 396
 397	if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
 398	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
 399		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
 400		if (!skb)
 401			goto skb_fail;
 402		goto skb_success;
 403	}
 404
 405	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 406	len = SKB_DATA_ALIGN(len);
 407
 408	if (sk_memalloc_socks())
 409		gfp_mask |= __GFP_MEMALLOC;
 410
 411	local_irq_save(flags);
 412
 413	nc = this_cpu_ptr(&netdev_alloc_cache);
 414	data = page_frag_alloc(nc, len, gfp_mask);
 415	pfmemalloc = nc->pfmemalloc;
 416
 417	local_irq_restore(flags);
 418
 419	if (unlikely(!data))
 420		return NULL;
 421
 422	skb = __build_skb(data, len);
 423	if (unlikely(!skb)) {
 424		skb_free_frag(data);
 425		return NULL;
 426	}
 427
 428	/* use OR instead of assignment to avoid clearing of bits in mask */
 429	if (pfmemalloc)
 430		skb->pfmemalloc = 1;
 431	skb->head_frag = 1;
 432
 433skb_success:
 434	skb_reserve(skb, NET_SKB_PAD);
 435	skb->dev = dev;
 436
 437skb_fail:
 438	return skb;
 439}
 440EXPORT_SYMBOL(__netdev_alloc_skb);
 441
 442/**
 443 *	__napi_alloc_skb - allocate skbuff for rx in a specific NAPI instance
 444 *	@napi: napi instance this buffer was allocated for
 445 *	@len: length to allocate
 446 *	@gfp_mask: get_free_pages mask, passed to alloc_skb and alloc_pages
 447 *
 448 *	Allocate a new sk_buff for use in NAPI receive.  This buffer will
 449 *	attempt to allocate the head from a special reserved region used
 450 *	only for NAPI Rx allocation.  By doing this we can save several
 451 *	CPU cycles by avoiding having to disable and re-enable IRQs.
 452 *
 453 *	%NULL is returned if there is no free memory.
 454 */
 455struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
 456				 gfp_t gfp_mask)
 457{
 458	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 459	struct sk_buff *skb;
 460	void *data;
 461
 462	len += NET_SKB_PAD + NET_IP_ALIGN;
 463
 464	if ((len > SKB_WITH_OVERHEAD(PAGE_SIZE)) ||
 465	    (gfp_mask & (__GFP_DIRECT_RECLAIM | GFP_DMA))) {
 466		skb = __alloc_skb(len, gfp_mask, SKB_ALLOC_RX, NUMA_NO_NODE);
 467		if (!skb)
 468			goto skb_fail;
 469		goto skb_success;
 470	}
 471
 472	len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
 473	len = SKB_DATA_ALIGN(len);
 474
 475	if (sk_memalloc_socks())
 476		gfp_mask |= __GFP_MEMALLOC;
 477
 478	data = page_frag_alloc(&nc->page, len, gfp_mask);
 479	if (unlikely(!data))
 480		return NULL;
 481
 482	skb = __build_skb(data, len);
 483	if (unlikely(!skb)) {
 484		skb_free_frag(data);
 485		return NULL;
 486	}
 487
 488	/* use OR instead of assignment to avoid clearing of bits in mask */
 489	if (nc->page.pfmemalloc)
 490		skb->pfmemalloc = 1;
 491	skb->head_frag = 1;
 492
 493skb_success:
 494	skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
 495	skb->dev = napi->dev;
 496
 497skb_fail:
 498	return skb;
 499}
 500EXPORT_SYMBOL(__napi_alloc_skb);
 501
 502void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
 503		     int size, unsigned int truesize)
 504{
 505	skb_fill_page_desc(skb, i, page, off, size);
 506	skb->len += size;
 507	skb->data_len += size;
 508	skb->truesize += truesize;
 509}
 510EXPORT_SYMBOL(skb_add_rx_frag);
 511
 512void skb_coalesce_rx_frag(struct sk_buff *skb, int i, int size,
 513			  unsigned int truesize)
 
 
 
 
 
 
 
 
 
 
 
 514{
 515	skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
 516
 517	skb_frag_size_add(frag, size);
 518	skb->len += size;
 519	skb->data_len += size;
 520	skb->truesize += truesize;
 521}
 522EXPORT_SYMBOL(skb_coalesce_rx_frag);
 523
 524static void skb_drop_list(struct sk_buff **listp)
 525{
 526	kfree_skb_list(*listp);
 
 527	*listp = NULL;
 
 
 
 
 
 
 528}
 529
 530static inline void skb_drop_fraglist(struct sk_buff *skb)
 531{
 532	skb_drop_list(&skb_shinfo(skb)->frag_list);
 533}
 534
 535static void skb_clone_fraglist(struct sk_buff *skb)
 536{
 537	struct sk_buff *list;
 538
 539	skb_walk_frags(skb, list)
 540		skb_get(list);
 541}
 542
 543static void skb_free_head(struct sk_buff *skb)
 544{
 545	unsigned char *head = skb->head;
 546
 547	if (skb->head_frag)
 548		skb_free_frag(head);
 549	else
 550		kfree(head);
 551}
 552
 553static void skb_release_data(struct sk_buff *skb)
 554{
 555	struct skb_shared_info *shinfo = skb_shinfo(skb);
 556	int i;
 
 
 
 
 
 
 557
 558	if (skb->cloned &&
 559	    atomic_sub_return(skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1,
 560			      &shinfo->dataref))
 561		return;
 
 
 562
 563	for (i = 0; i < shinfo->nr_frags; i++)
 564		__skb_frag_unref(&shinfo->frags[i]);
 
 
 565
 566	if (shinfo->frag_list)
 567		kfree_skb_list(shinfo->frag_list);
 568
 569	skb_zcopy_clear(skb, true);
 570	skb_free_head(skb);
 571}
 572
 573/*
 574 *	Free an skbuff by memory without cleaning the state.
 575 */
 576static void kfree_skbmem(struct sk_buff *skb)
 577{
 578	struct sk_buff_fclones *fclones;
 
 579
 580	switch (skb->fclone) {
 581	case SKB_FCLONE_UNAVAILABLE:
 582		kmem_cache_free(skbuff_head_cache, skb);
 583		return;
 584
 585	case SKB_FCLONE_ORIG:
 586		fclones = container_of(skb, struct sk_buff_fclones, skb1);
 
 
 
 
 
 
 
 587
 588		/* We usually free the clone (TX completion) before original skb
 589		 * This test would have no chance to be true for the clone,
 590		 * while here, branch prediction will be good.
 591		 */
 592		if (refcount_read(&fclones->fclone_ref) == 1)
 593			goto fastpath;
 594		break;
 595
 596	default: /* SKB_FCLONE_CLONE */
 597		fclones = container_of(skb, struct sk_buff_fclones, skb2);
 598		break;
 599	}
 600	if (!refcount_dec_and_test(&fclones->fclone_ref))
 601		return;
 602fastpath:
 603	kmem_cache_free(skbuff_fclone_cache, fclones);
 604}
 605
 606void skb_release_head_state(struct sk_buff *skb)
 607{
 608	skb_dst_drop(skb);
 609	secpath_reset(skb);
 
 
 610	if (skb->destructor) {
 611		WARN_ON(in_irq());
 612		skb->destructor(skb);
 613	}
 614#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 615	nf_conntrack_put(skb_nfct(skb));
 
 
 
 616#endif
 617#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
 618	nf_bridge_put(skb->nf_bridge);
 619#endif
 
 
 
 
 
 
 
 620}
 621
 622/* Free everything but the sk_buff shell. */
 623static void skb_release_all(struct sk_buff *skb)
 624{
 625	skb_release_head_state(skb);
 626	if (likely(skb->head))
 627		skb_release_data(skb);
 628}
 629
 630/**
 631 *	__kfree_skb - private function
 632 *	@skb: buffer
 633 *
 634 *	Free an sk_buff. Release anything attached to the buffer.
 635 *	Clean the state. This is an internal helper function. Users should
 636 *	always call kfree_skb
 637 */
 638
 639void __kfree_skb(struct sk_buff *skb)
 640{
 641	skb_release_all(skb);
 642	kfree_skbmem(skb);
 643}
 644EXPORT_SYMBOL(__kfree_skb);
 645
 646/**
 647 *	kfree_skb - free an sk_buff
 648 *	@skb: buffer to free
 649 *
 650 *	Drop a reference to the buffer and free it if the usage count has
 651 *	hit zero.
 652 */
 653void kfree_skb(struct sk_buff *skb)
 654{
 655	if (!skb_unref(skb))
 
 
 
 
 656		return;
 657
 658	trace_kfree_skb(skb, __builtin_return_address(0));
 659	__kfree_skb(skb);
 660}
 661EXPORT_SYMBOL(kfree_skb);
 662
 663void kfree_skb_list(struct sk_buff *segs)
 664{
 665	while (segs) {
 666		struct sk_buff *next = segs->next;
 667
 668		kfree_skb(segs);
 669		segs = next;
 670	}
 671}
 672EXPORT_SYMBOL(kfree_skb_list);
 673
 674/**
 675 *	skb_tx_error - report an sk_buff xmit error
 676 *	@skb: buffer that triggered an error
 677 *
 678 *	Report xmit error if a device callback is tracking this skb.
 679 *	skb must be freed afterwards.
 680 */
 681void skb_tx_error(struct sk_buff *skb)
 682{
 683	skb_zcopy_clear(skb, true);
 684}
 685EXPORT_SYMBOL(skb_tx_error);
 686
 687/**
 688 *	consume_skb - free an skbuff
 689 *	@skb: buffer to free
 690 *
 691 *	Drop a ref to the buffer and free it if the usage count has hit zero
 692 *	Functions identically to kfree_skb, but kfree_skb assumes that the frame
 693 *	is being dropped after a failure and notes that
 694 */
 695void consume_skb(struct sk_buff *skb)
 696{
 697	if (!skb_unref(skb))
 
 
 
 
 698		return;
 699
 700	trace_consume_skb(skb);
 701	__kfree_skb(skb);
 702}
 703EXPORT_SYMBOL(consume_skb);
 704
 705/**
 706 *	consume_stateless_skb - free an skbuff, assuming it is stateless
 707 *	@skb: buffer to free
 
 708 *
 709 *	Alike consume_skb(), but this variant assumes that this is the last
 710 *	skb reference and all the head states have been already dropped
 
 
 
 
 711 */
 712void __consume_stateless_skb(struct sk_buff *skb)
 713{
 714	trace_consume_skb(skb);
 715	skb_release_data(skb);
 716	kfree_skbmem(skb);
 717}
 718
 719void __kfree_skb_flush(void)
 720{
 721	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 722
 723	/* flush skb_cache if containing objects */
 724	if (nc->skb_count) {
 725		kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count,
 726				     nc->skb_cache);
 727		nc->skb_count = 0;
 728	}
 729}
 730
 731static inline void _kfree_skb_defer(struct sk_buff *skb)
 732{
 733	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 734
 735	/* drop skb->head and call any destructors for packet */
 736	skb_release_all(skb);
 
 737
 738	/* record skb to CPU local list */
 739	nc->skb_cache[nc->skb_count++] = skb;
 740
 741#ifdef CONFIG_SLUB
 742	/* SLUB writes into objects when freeing */
 743	prefetchw(skb);
 744#endif
 745
 746	/* flush skb_cache if it is filled */
 747	if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) {
 748		kmem_cache_free_bulk(skbuff_head_cache, NAPI_SKB_CACHE_SIZE,
 749				     nc->skb_cache);
 750		nc->skb_count = 0;
 751	}
 752}
 753void __kfree_skb_defer(struct sk_buff *skb)
 754{
 755	_kfree_skb_defer(skb);
 756}
 757
 758void napi_consume_skb(struct sk_buff *skb, int budget)
 759{
 760	if (unlikely(!skb))
 761		return;
 762
 763	/* Zero budget indicate non-NAPI context called us, like netpoll */
 764	if (unlikely(!budget)) {
 765		dev_consume_skb_any(skb);
 766		return;
 767	}
 768
 769	if (!skb_unref(skb))
 770		return;
 771
 772	/* if reaching here SKB is ready to free */
 773	trace_consume_skb(skb);
 774
 775	/* if SKB is a clone, don't handle this case */
 776	if (skb->fclone != SKB_FCLONE_UNAVAILABLE) {
 777		__kfree_skb(skb);
 778		return;
 779	}
 780
 781	_kfree_skb_defer(skb);
 782}
 783EXPORT_SYMBOL(napi_consume_skb);
 784
 785/* Make sure a field is enclosed inside headers_start/headers_end section */
 786#define CHECK_SKB_FIELD(field) \
 787	BUILD_BUG_ON(offsetof(struct sk_buff, field) <		\
 788		     offsetof(struct sk_buff, headers_start));	\
 789	BUILD_BUG_ON(offsetof(struct sk_buff, field) >		\
 790		     offsetof(struct sk_buff, headers_end));	\
 791
 792static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
 793{
 794	new->tstamp		= old->tstamp;
 795	/* We do not copy old->sk */
 796	new->dev		= old->dev;
 797	memcpy(new->cb, old->cb, sizeof(old->cb));
 
 
 798	skb_dst_copy(new, old);
 
 799#ifdef CONFIG_XFRM
 800	new->sp			= secpath_get(old->sp);
 801#endif
 802	__nf_copy(new, old, false);
 803
 804	/* Note : this field could be in headers_start/headers_end section
 805	 * It is not yet because we do not want to have a 16 bit hole
 806	 */
 807	new->queue_mapping = old->queue_mapping;
 808
 809	memcpy(&new->headers_start, &old->headers_start,
 810	       offsetof(struct sk_buff, headers_end) -
 811	       offsetof(struct sk_buff, headers_start));
 812	CHECK_SKB_FIELD(protocol);
 813	CHECK_SKB_FIELD(csum);
 814	CHECK_SKB_FIELD(hash);
 815	CHECK_SKB_FIELD(priority);
 816	CHECK_SKB_FIELD(skb_iif);
 817	CHECK_SKB_FIELD(vlan_proto);
 818	CHECK_SKB_FIELD(vlan_tci);
 819	CHECK_SKB_FIELD(transport_header);
 820	CHECK_SKB_FIELD(network_header);
 821	CHECK_SKB_FIELD(mac_header);
 822	CHECK_SKB_FIELD(inner_protocol);
 823	CHECK_SKB_FIELD(inner_transport_header);
 824	CHECK_SKB_FIELD(inner_network_header);
 825	CHECK_SKB_FIELD(inner_mac_header);
 826	CHECK_SKB_FIELD(mark);
 827#ifdef CONFIG_NETWORK_SECMARK
 828	CHECK_SKB_FIELD(secmark);
 829#endif
 830#ifdef CONFIG_NET_RX_BUSY_POLL
 831	CHECK_SKB_FIELD(napi_id);
 
 
 
 
 
 832#endif
 833#ifdef CONFIG_XPS
 834	CHECK_SKB_FIELD(sender_cpu);
 
 
 835#endif
 836#ifdef CONFIG_NET_SCHED
 837	CHECK_SKB_FIELD(tc_index);
 838#endif
 
 839
 
 840}
 841
 842/*
 843 * You should not add any new code to this function.  Add it to
 844 * __copy_skb_header above instead.
 845 */
 846static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
 847{
 848#define C(x) n->x = skb->x
 849
 850	n->next = n->prev = NULL;
 851	n->sk = NULL;
 852	__copy_skb_header(n, skb);
 853
 854	C(len);
 855	C(data_len);
 856	C(mac_len);
 857	n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
 858	n->cloned = 1;
 859	n->nohdr = 0;
 860	n->peeked = 0;
 861	n->destructor = NULL;
 862	C(tail);
 863	C(end);
 864	C(head);
 865	C(head_frag);
 866	C(data);
 867	C(truesize);
 868	refcount_set(&n->users, 1);
 869
 870	atomic_inc(&(skb_shinfo(skb)->dataref));
 871	skb->cloned = 1;
 872
 873	return n;
 874#undef C
 875}
 876
 877/**
 878 *	skb_morph	-	morph one skb into another
 879 *	@dst: the skb to receive the contents
 880 *	@src: the skb to supply the contents
 881 *
 882 *	This is identical to skb_clone except that the target skb is
 883 *	supplied by the user.
 884 *
 885 *	The target skb is returned upon exit.
 886 */
 887struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src)
 888{
 889	skb_release_all(dst);
 890	return __skb_clone(dst, src);
 891}
 892EXPORT_SYMBOL_GPL(skb_morph);
 893
 894int mm_account_pinned_pages(struct mmpin *mmp, size_t size)
 895{
 896	unsigned long max_pg, num_pg, new_pg, old_pg;
 897	struct user_struct *user;
 898
 899	if (capable(CAP_IPC_LOCK) || !size)
 900		return 0;
 901
 902	num_pg = (size >> PAGE_SHIFT) + 2;	/* worst case */
 903	max_pg = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
 904	user = mmp->user ? : current_user();
 905
 906	do {
 907		old_pg = atomic_long_read(&user->locked_vm);
 908		new_pg = old_pg + num_pg;
 909		if (new_pg > max_pg)
 910			return -ENOBUFS;
 911	} while (atomic_long_cmpxchg(&user->locked_vm, old_pg, new_pg) !=
 912		 old_pg);
 913
 914	if (!mmp->user) {
 915		mmp->user = get_uid(user);
 916		mmp->num_pg = num_pg;
 917	} else {
 918		mmp->num_pg += num_pg;
 919	}
 920
 921	return 0;
 922}
 923EXPORT_SYMBOL_GPL(mm_account_pinned_pages);
 924
 925void mm_unaccount_pinned_pages(struct mmpin *mmp)
 926{
 927	if (mmp->user) {
 928		atomic_long_sub(mmp->num_pg, &mmp->user->locked_vm);
 929		free_uid(mmp->user);
 930	}
 931}
 932EXPORT_SYMBOL_GPL(mm_unaccount_pinned_pages);
 933
 934struct ubuf_info *sock_zerocopy_alloc(struct sock *sk, size_t size)
 935{
 936	struct ubuf_info *uarg;
 937	struct sk_buff *skb;
 938
 939	WARN_ON_ONCE(!in_task());
 940
 941	if (!sock_flag(sk, SOCK_ZEROCOPY))
 942		return NULL;
 943
 944	skb = sock_omalloc(sk, 0, GFP_KERNEL);
 945	if (!skb)
 946		return NULL;
 947
 948	BUILD_BUG_ON(sizeof(*uarg) > sizeof(skb->cb));
 949	uarg = (void *)skb->cb;
 950	uarg->mmp.user = NULL;
 951
 952	if (mm_account_pinned_pages(&uarg->mmp, size)) {
 953		kfree_skb(skb);
 954		return NULL;
 955	}
 956
 957	uarg->callback = sock_zerocopy_callback;
 958	uarg->id = ((u32)atomic_inc_return(&sk->sk_zckey)) - 1;
 959	uarg->len = 1;
 960	uarg->bytelen = size;
 961	uarg->zerocopy = 1;
 962	refcount_set(&uarg->refcnt, 1);
 963	sock_hold(sk);
 964
 965	return uarg;
 966}
 967EXPORT_SYMBOL_GPL(sock_zerocopy_alloc);
 968
 969static inline struct sk_buff *skb_from_uarg(struct ubuf_info *uarg)
 970{
 971	return container_of((void *)uarg, struct sk_buff, cb);
 972}
 973
 974struct ubuf_info *sock_zerocopy_realloc(struct sock *sk, size_t size,
 975					struct ubuf_info *uarg)
 976{
 977	if (uarg) {
 978		const u32 byte_limit = 1 << 19;		/* limit to a few TSO */
 979		u32 bytelen, next;
 980
 981		/* realloc only when socket is locked (TCP, UDP cork),
 982		 * so uarg->len and sk_zckey access is serialized
 983		 */
 984		if (!sock_owned_by_user(sk)) {
 985			WARN_ON_ONCE(1);
 986			return NULL;
 987		}
 988
 989		bytelen = uarg->bytelen + size;
 990		if (uarg->len == USHRT_MAX - 1 || bytelen > byte_limit) {
 991			/* TCP can create new skb to attach new uarg */
 992			if (sk->sk_type == SOCK_STREAM)
 993				goto new_alloc;
 994			return NULL;
 995		}
 996
 997		next = (u32)atomic_read(&sk->sk_zckey);
 998		if ((u32)(uarg->id + uarg->len) == next) {
 999			if (mm_account_pinned_pages(&uarg->mmp, size))
1000				return NULL;
1001			uarg->len++;
1002			uarg->bytelen = bytelen;
1003			atomic_set(&sk->sk_zckey, ++next);
1004			sock_zerocopy_get(uarg);
1005			return uarg;
1006		}
1007	}
1008
1009new_alloc:
1010	return sock_zerocopy_alloc(sk, size);
1011}
1012EXPORT_SYMBOL_GPL(sock_zerocopy_realloc);
1013
1014static bool skb_zerocopy_notify_extend(struct sk_buff *skb, u32 lo, u16 len)
1015{
1016	struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
1017	u32 old_lo, old_hi;
1018	u64 sum_len;
1019
1020	old_lo = serr->ee.ee_info;
1021	old_hi = serr->ee.ee_data;
1022	sum_len = old_hi - old_lo + 1ULL + len;
1023
1024	if (sum_len >= (1ULL << 32))
1025		return false;
1026
1027	if (lo != old_hi + 1)
1028		return false;
1029
1030	serr->ee.ee_data += len;
1031	return true;
1032}
1033
1034void sock_zerocopy_callback(struct ubuf_info *uarg, bool success)
1035{
1036	struct sk_buff *tail, *skb = skb_from_uarg(uarg);
1037	struct sock_exterr_skb *serr;
1038	struct sock *sk = skb->sk;
1039	struct sk_buff_head *q;
1040	unsigned long flags;
1041	u32 lo, hi;
1042	u16 len;
1043
1044	mm_unaccount_pinned_pages(&uarg->mmp);
1045
1046	/* if !len, there was only 1 call, and it was aborted
1047	 * so do not queue a completion notification
1048	 */
1049	if (!uarg->len || sock_flag(sk, SOCK_DEAD))
1050		goto release;
1051
1052	len = uarg->len;
1053	lo = uarg->id;
1054	hi = uarg->id + len - 1;
1055
1056	serr = SKB_EXT_ERR(skb);
1057	memset(serr, 0, sizeof(*serr));
1058	serr->ee.ee_errno = 0;
1059	serr->ee.ee_origin = SO_EE_ORIGIN_ZEROCOPY;
1060	serr->ee.ee_data = hi;
1061	serr->ee.ee_info = lo;
1062	if (!success)
1063		serr->ee.ee_code |= SO_EE_CODE_ZEROCOPY_COPIED;
1064
1065	q = &sk->sk_error_queue;
1066	spin_lock_irqsave(&q->lock, flags);
1067	tail = skb_peek_tail(q);
1068	if (!tail || SKB_EXT_ERR(tail)->ee.ee_origin != SO_EE_ORIGIN_ZEROCOPY ||
1069	    !skb_zerocopy_notify_extend(tail, lo, len)) {
1070		__skb_queue_tail(q, skb);
1071		skb = NULL;
1072	}
1073	spin_unlock_irqrestore(&q->lock, flags);
1074
1075	sk->sk_error_report(sk);
1076
1077release:
1078	consume_skb(skb);
1079	sock_put(sk);
1080}
1081EXPORT_SYMBOL_GPL(sock_zerocopy_callback);
1082
1083void sock_zerocopy_put(struct ubuf_info *uarg)
1084{
1085	if (uarg && refcount_dec_and_test(&uarg->refcnt)) {
1086		if (uarg->callback)
1087			uarg->callback(uarg, uarg->zerocopy);
1088		else
1089			consume_skb(skb_from_uarg(uarg));
1090	}
1091}
1092EXPORT_SYMBOL_GPL(sock_zerocopy_put);
1093
1094void sock_zerocopy_put_abort(struct ubuf_info *uarg)
1095{
1096	if (uarg) {
1097		struct sock *sk = skb_from_uarg(uarg)->sk;
1098
1099		atomic_dec(&sk->sk_zckey);
1100		uarg->len--;
1101
1102		sock_zerocopy_put(uarg);
1103	}
1104}
1105EXPORT_SYMBOL_GPL(sock_zerocopy_put_abort);
1106
1107extern int __zerocopy_sg_from_iter(struct sock *sk, struct sk_buff *skb,
1108				   struct iov_iter *from, size_t length);
1109
1110int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb,
1111			     struct msghdr *msg, int len,
1112			     struct ubuf_info *uarg)
1113{
1114	struct ubuf_info *orig_uarg = skb_zcopy(skb);
1115	struct iov_iter orig_iter = msg->msg_iter;
1116	int err, orig_len = skb->len;
1117
1118	/* An skb can only point to one uarg. This edge case happens when
1119	 * TCP appends to an skb, but zerocopy_realloc triggered a new alloc.
1120	 */
1121	if (orig_uarg && uarg != orig_uarg)
1122		return -EEXIST;
1123
1124	err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len);
1125	if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) {
1126		struct sock *save_sk = skb->sk;
1127
1128		/* Streams do not free skb on error. Reset to prev state. */
1129		msg->msg_iter = orig_iter;
1130		skb->sk = sk;
1131		___pskb_trim(skb, orig_len);
1132		skb->sk = save_sk;
1133		return err;
1134	}
1135
1136	skb_zcopy_set(skb, uarg);
1137	return skb->len - orig_len;
1138}
1139EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream);
1140
1141static int skb_zerocopy_clone(struct sk_buff *nskb, struct sk_buff *orig,
1142			      gfp_t gfp_mask)
1143{
1144	if (skb_zcopy(orig)) {
1145		if (skb_zcopy(nskb)) {
1146			/* !gfp_mask callers are verified to !skb_zcopy(nskb) */
1147			if (!gfp_mask) {
1148				WARN_ON_ONCE(1);
1149				return -ENOMEM;
1150			}
1151			if (skb_uarg(nskb) == skb_uarg(orig))
1152				return 0;
1153			if (skb_copy_ubufs(nskb, GFP_ATOMIC))
1154				return -EIO;
1155		}
1156		skb_zcopy_set(nskb, skb_uarg(orig));
1157	}
1158	return 0;
1159}
1160
1161/**
1162 *	skb_copy_ubufs	-	copy userspace skb frags buffers to kernel
1163 *	@skb: the skb to modify
1164 *	@gfp_mask: allocation priority
1165 *
1166 *	This must be called on SKBTX_DEV_ZEROCOPY skb.
1167 *	It will copy all frags into kernel and drop the reference
1168 *	to userspace pages.
1169 *
1170 *	If this function is called from an interrupt gfp_mask() must be
1171 *	%GFP_ATOMIC.
1172 *
1173 *	Returns 0 on success or a negative error code on failure
1174 *	to allocate kernel memory to copy to.
1175 */
1176int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
1177{
 
1178	int num_frags = skb_shinfo(skb)->nr_frags;
1179	struct page *page, *head = NULL;
1180	int i, new_frags;
1181	u32 d_off;
1182
1183	if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
1184		return -EINVAL;
1185
1186	if (!num_frags)
1187		goto release;
1188
1189	new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
1190	for (i = 0; i < new_frags; i++) {
1191		page = alloc_page(gfp_mask);
1192		if (!page) {
1193			while (head) {
1194				struct page *next = (struct page *)page_private(head);
1195				put_page(head);
1196				head = next;
1197			}
1198			return -ENOMEM;
1199		}
1200		set_page_private(page, (unsigned long)head);
 
 
 
 
1201		head = page;
1202	}
1203
1204	page = head;
1205	d_off = 0;
1206	for (i = 0; i < num_frags; i++) {
1207		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1208		u32 p_off, p_len, copied;
1209		struct page *p;
1210		u8 *vaddr;
1211
1212		skb_frag_foreach_page(f, f->page_offset, skb_frag_size(f),
1213				      p, p_off, p_len, copied) {
1214			u32 copy, done = 0;
1215			vaddr = kmap_atomic(p);
1216
1217			while (done < p_len) {
1218				if (d_off == PAGE_SIZE) {
1219					d_off = 0;
1220					page = (struct page *)page_private(page);
1221				}
1222				copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
1223				memcpy(page_address(page) + d_off,
1224				       vaddr + p_off + done, copy);
1225				done += copy;
1226				d_off += copy;
1227			}
1228			kunmap_atomic(vaddr);
1229		}
1230	}
1231
1232	/* skb frags release userspace buffers */
1233	for (i = 0; i < num_frags; i++)
1234		skb_frag_unref(skb, i);
1235
1236	/* skb frags point to kernel buffers */
1237	for (i = 0; i < new_frags - 1; i++) {
1238		__skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
1239		head = (struct page *)page_private(head);
 
1240	}
1241	__skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
1242	skb_shinfo(skb)->nr_frags = new_frags;
1243
1244release:
1245	skb_zcopy_clear(skb, false);
1246	return 0;
1247}
1248EXPORT_SYMBOL_GPL(skb_copy_ubufs);
1249
1250/**
1251 *	skb_clone	-	duplicate an sk_buff
1252 *	@skb: buffer to clone
1253 *	@gfp_mask: allocation priority
1254 *
1255 *	Duplicate an &sk_buff. The new one is not owned by a socket. Both
1256 *	copies share the same packet data but not structure. The new
1257 *	buffer has a reference count of 1. If the allocation fails the
1258 *	function returns %NULL otherwise the new buffer is returned.
1259 *
1260 *	If this function is called from an interrupt gfp_mask() must be
1261 *	%GFP_ATOMIC.
1262 */
1263
1264struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
1265{
1266	struct sk_buff_fclones *fclones = container_of(skb,
1267						       struct sk_buff_fclones,
1268						       skb1);
1269	struct sk_buff *n;
1270
1271	if (skb_orphan_frags(skb, gfp_mask))
1272		return NULL;
 
 
1273
 
1274	if (skb->fclone == SKB_FCLONE_ORIG &&
1275	    refcount_read(&fclones->fclone_ref) == 1) {
1276		n = &fclones->skb2;
1277		refcount_set(&fclones->fclone_ref, 2);
 
1278	} else {
1279		if (skb_pfmemalloc(skb))
1280			gfp_mask |= __GFP_MEMALLOC;
1281
1282		n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
1283		if (!n)
1284			return NULL;
1285
 
 
1286		n->fclone = SKB_FCLONE_UNAVAILABLE;
1287	}
1288
1289	return __skb_clone(n, skb);
1290}
1291EXPORT_SYMBOL(skb_clone);
1292
1293static void skb_headers_offset_update(struct sk_buff *skb, int off)
1294{
1295	/* Only adjust this if it actually is csum_start rather than csum */
1296	if (skb->ip_summed == CHECKSUM_PARTIAL)
1297		skb->csum_start += off;
1298	/* {transport,network,mac}_header and tail are relative to skb->head */
1299	skb->transport_header += off;
1300	skb->network_header   += off;
1301	if (skb_mac_header_was_set(skb))
1302		skb->mac_header += off;
1303	skb->inner_transport_header += off;
1304	skb->inner_network_header += off;
1305	skb->inner_mac_header += off;
1306}
1307
1308static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
1309{
1310	__copy_skb_header(new, old);
1311
 
 
 
 
 
 
 
1312	skb_shinfo(new)->gso_size = skb_shinfo(old)->gso_size;
1313	skb_shinfo(new)->gso_segs = skb_shinfo(old)->gso_segs;
1314	skb_shinfo(new)->gso_type = skb_shinfo(old)->gso_type;
1315}
1316
1317static inline int skb_alloc_rx_flag(const struct sk_buff *skb)
1318{
1319	if (skb_pfmemalloc(skb))
1320		return SKB_ALLOC_RX;
1321	return 0;
1322}
1323
1324/**
1325 *	skb_copy	-	create private copy of an sk_buff
1326 *	@skb: buffer to copy
1327 *	@gfp_mask: allocation priority
1328 *
1329 *	Make a copy of both an &sk_buff and its data. This is used when the
1330 *	caller wishes to modify the data and needs a private copy of the
1331 *	data to alter. Returns %NULL on failure or the pointer to the buffer
1332 *	on success. The returned buffer has a reference count of 1.
1333 *
1334 *	As by-product this function converts non-linear &sk_buff to linear
1335 *	one, so that &sk_buff becomes completely private and caller is allowed
1336 *	to modify all the data of returned buffer. This means that this
1337 *	function is not recommended for use in circumstances when only
1338 *	header is going to be modified. Use pskb_copy() instead.
1339 */
1340
1341struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask)
1342{
1343	int headerlen = skb_headroom(skb);
1344	unsigned int size = skb_end_offset(skb) + skb->data_len;
1345	struct sk_buff *n = __alloc_skb(size, gfp_mask,
1346					skb_alloc_rx_flag(skb), NUMA_NO_NODE);
1347
1348	if (!n)
1349		return NULL;
1350
1351	/* Set the data pointer */
1352	skb_reserve(n, headerlen);
1353	/* Set the tail pointer and length */
1354	skb_put(n, skb->len);
1355
1356	BUG_ON(skb_copy_bits(skb, -headerlen, n->head, headerlen + skb->len));
 
1357
1358	copy_skb_header(n, skb);
1359	return n;
1360}
1361EXPORT_SYMBOL(skb_copy);
1362
1363/**
1364 *	__pskb_copy_fclone	-  create copy of an sk_buff with private head.
1365 *	@skb: buffer to copy
1366 *	@headroom: headroom of new skb
1367 *	@gfp_mask: allocation priority
1368 *	@fclone: if true allocate the copy of the skb from the fclone
1369 *	cache instead of the head cache; it is recommended to set this
1370 *	to true for the cases where the copy will likely be cloned
1371 *
1372 *	Make a copy of both an &sk_buff and part of its data, located
1373 *	in header. Fragmented data remain shared. This is used when
1374 *	the caller wishes to modify only header of &sk_buff and needs
1375 *	private copy of the header to alter. Returns %NULL on failure
1376 *	or the pointer to the buffer on success.
1377 *	The returned buffer has a reference count of 1.
1378 */
1379
1380struct sk_buff *__pskb_copy_fclone(struct sk_buff *skb, int headroom,
1381				   gfp_t gfp_mask, bool fclone)
1382{
1383	unsigned int size = skb_headlen(skb) + headroom;
1384	int flags = skb_alloc_rx_flag(skb) | (fclone ? SKB_ALLOC_FCLONE : 0);
1385	struct sk_buff *n = __alloc_skb(size, gfp_mask, flags, NUMA_NO_NODE);
1386
1387	if (!n)
1388		goto out;
1389
1390	/* Set the data pointer */
1391	skb_reserve(n, headroom);
1392	/* Set the tail pointer and length */
1393	skb_put(n, skb_headlen(skb));
1394	/* Copy the bytes */
1395	skb_copy_from_linear_data(skb, n->data, n->len);
1396
1397	n->truesize += skb->data_len;
1398	n->data_len  = skb->data_len;
1399	n->len	     = skb->len;
1400
1401	if (skb_shinfo(skb)->nr_frags) {
1402		int i;
1403
1404		if (skb_orphan_frags(skb, gfp_mask) ||
1405		    skb_zerocopy_clone(n, skb, gfp_mask)) {
1406			kfree_skb(n);
1407			n = NULL;
1408			goto out;
 
1409		}
1410		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1411			skb_shinfo(n)->frags[i] = skb_shinfo(skb)->frags[i];
1412			skb_frag_ref(skb, i);
1413		}
1414		skb_shinfo(n)->nr_frags = i;
1415	}
1416
1417	if (skb_has_frag_list(skb)) {
1418		skb_shinfo(n)->frag_list = skb_shinfo(skb)->frag_list;
1419		skb_clone_fraglist(n);
1420	}
1421
1422	copy_skb_header(n, skb);
1423out:
1424	return n;
1425}
1426EXPORT_SYMBOL(__pskb_copy_fclone);
1427
1428/**
1429 *	pskb_expand_head - reallocate header of &sk_buff
1430 *	@skb: buffer to reallocate
1431 *	@nhead: room to add at head
1432 *	@ntail: room to add at tail
1433 *	@gfp_mask: allocation priority
1434 *
1435 *	Expands (or creates identical copy, if @nhead and @ntail are zero)
1436 *	header of @skb. &sk_buff itself is not changed. &sk_buff MUST have
1437 *	reference count of 1. Returns zero in the case of success or error,
1438 *	if expansion failed. In the last case, &sk_buff is not changed.
1439 *
1440 *	All the pointers pointing into skb header may change and must be
1441 *	reloaded after call to this function.
1442 */
1443
1444int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
1445		     gfp_t gfp_mask)
1446{
1447	int i, osize = skb_end_offset(skb);
1448	int size = osize + nhead + ntail;
 
1449	long off;
1450	u8 *data;
1451
1452	BUG_ON(nhead < 0);
1453
1454	BUG_ON(skb_shared(skb));
 
1455
1456	size = SKB_DATA_ALIGN(size);
1457
1458	if (skb_pfmemalloc(skb))
1459		gfp_mask |= __GFP_MEMALLOC;
1460	data = kmalloc_reserve(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
1461			       gfp_mask, NUMA_NO_NODE, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1462	if (!data)
1463		goto nodata;
1464	size = SKB_WITH_OVERHEAD(ksize(data));
1465
1466	/* Copy only real data... and, alas, header. This should be
1467	 * optimized for the cases when header is void.
1468	 */
1469	memcpy(data + nhead, skb->head, skb_tail_pointer(skb) - skb->head);
1470
1471	memcpy((struct skb_shared_info *)(data + size),
1472	       skb_shinfo(skb),
1473	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
1474
1475	/*
1476	 * if shinfo is shared we must drop the old head gracefully, but if it
1477	 * is not we can just drop the old head and let the existing refcount
1478	 * be since all we did is relocate the values
1479	 */
1480	if (skb_cloned(skb)) {
1481		if (skb_orphan_frags(skb, gfp_mask))
1482			goto nofrags;
1483		if (skb_zcopy(skb))
1484			refcount_inc(&skb_uarg(skb)->refcnt);
1485		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1486			skb_frag_ref(skb, i);
1487
1488		if (skb_has_frag_list(skb))
1489			skb_clone_fraglist(skb);
1490
1491		skb_release_data(skb);
1492	} else {
1493		skb_free_head(skb);
1494	}
1495	off = (data + nhead) - skb->head;
1496
1497	skb->head     = data;
1498	skb->head_frag = 0;
1499	skb->data    += off;
1500#ifdef NET_SKBUFF_DATA_USES_OFFSET
1501	skb->end      = size;
1502	off           = nhead;
1503#else
1504	skb->end      = skb->head + size;
1505#endif
 
1506	skb->tail	      += off;
1507	skb_headers_offset_update(skb, nhead);
 
 
 
 
 
 
1508	skb->cloned   = 0;
1509	skb->hdr_len  = 0;
1510	skb->nohdr    = 0;
1511	atomic_set(&skb_shinfo(skb)->dataref, 1);
1512
1513	skb_metadata_clear(skb);
1514
1515	/* It is not generally safe to change skb->truesize.
1516	 * For the moment, we really care of rx path, or
1517	 * when skb is orphaned (not attached to a socket).
1518	 */
1519	if (!skb->sk || skb->destructor == sock_edemux)
1520		skb->truesize += size - osize;
1521
1522	return 0;
1523
1524nofrags:
1525	kfree(data);
1526nodata:
1527	return -ENOMEM;
1528}
1529EXPORT_SYMBOL(pskb_expand_head);
1530
1531/* Make private copy of skb with writable head and some headroom */
1532
1533struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom)
1534{
1535	struct sk_buff *skb2;
1536	int delta = headroom - skb_headroom(skb);
1537
1538	if (delta <= 0)
1539		skb2 = pskb_copy(skb, GFP_ATOMIC);
1540	else {
1541		skb2 = skb_clone(skb, GFP_ATOMIC);
1542		if (skb2 && pskb_expand_head(skb2, SKB_DATA_ALIGN(delta), 0,
1543					     GFP_ATOMIC)) {
1544			kfree_skb(skb2);
1545			skb2 = NULL;
1546		}
1547	}
1548	return skb2;
1549}
1550EXPORT_SYMBOL(skb_realloc_headroom);
1551
1552/**
1553 *	skb_copy_expand	-	copy and expand sk_buff
1554 *	@skb: buffer to copy
1555 *	@newheadroom: new free bytes at head
1556 *	@newtailroom: new free bytes at tail
1557 *	@gfp_mask: allocation priority
1558 *
1559 *	Make a copy of both an &sk_buff and its data and while doing so
1560 *	allocate additional space.
1561 *
1562 *	This is used when the caller wishes to modify the data and needs a
1563 *	private copy of the data to alter as well as more space for new fields.
1564 *	Returns %NULL on failure or the pointer to the buffer
1565 *	on success. The returned buffer has a reference count of 1.
1566 *
1567 *	You must pass %GFP_ATOMIC as the allocation priority if this function
1568 *	is called from an interrupt.
1569 */
1570struct sk_buff *skb_copy_expand(const struct sk_buff *skb,
1571				int newheadroom, int newtailroom,
1572				gfp_t gfp_mask)
1573{
1574	/*
1575	 *	Allocate the copy buffer
1576	 */
1577	struct sk_buff *n = __alloc_skb(newheadroom + skb->len + newtailroom,
1578					gfp_mask, skb_alloc_rx_flag(skb),
1579					NUMA_NO_NODE);
1580	int oldheadroom = skb_headroom(skb);
1581	int head_copy_len, head_copy_off;
 
1582
1583	if (!n)
1584		return NULL;
1585
1586	skb_reserve(n, newheadroom);
1587
1588	/* Set the tail pointer and length */
1589	skb_put(n, skb->len);
1590
1591	head_copy_len = oldheadroom;
1592	head_copy_off = 0;
1593	if (newheadroom <= head_copy_len)
1594		head_copy_len = newheadroom;
1595	else
1596		head_copy_off = newheadroom - head_copy_len;
1597
1598	/* Copy the linear header and data. */
1599	BUG_ON(skb_copy_bits(skb, -head_copy_len, n->head + head_copy_off,
1600			     skb->len + head_copy_len));
 
1601
1602	copy_skb_header(n, skb);
1603
1604	skb_headers_offset_update(n, newheadroom - oldheadroom);
 
 
 
 
 
 
 
 
1605
1606	return n;
1607}
1608EXPORT_SYMBOL(skb_copy_expand);
1609
1610/**
1611 *	__skb_pad		-	zero pad the tail of an skb
1612 *	@skb: buffer to pad
1613 *	@pad: space to pad
1614 *	@free_on_error: free buffer on error
1615 *
1616 *	Ensure that a buffer is followed by a padding area that is zero
1617 *	filled. Used by network drivers which may DMA or transfer data
1618 *	beyond the buffer end onto the wire.
1619 *
1620 *	May return error in out of memory cases. The skb is freed on error
1621 *	if @free_on_error is true.
1622 */
1623
1624int __skb_pad(struct sk_buff *skb, int pad, bool free_on_error)
1625{
1626	int err;
1627	int ntail;
1628
1629	/* If the skbuff is non linear tailroom is always zero.. */
1630	if (!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
1631		memset(skb->data+skb->len, 0, pad);
1632		return 0;
1633	}
1634
1635	ntail = skb->data_len + pad - (skb->end - skb->tail);
1636	if (likely(skb_cloned(skb) || ntail > 0)) {
1637		err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC);
1638		if (unlikely(err))
1639			goto free_skb;
1640	}
1641
1642	/* FIXME: The use of this function with non-linear skb's really needs
1643	 * to be audited.
1644	 */
1645	err = skb_linearize(skb);
1646	if (unlikely(err))
1647		goto free_skb;
1648
1649	memset(skb->data + skb->len, 0, pad);
1650	return 0;
1651
1652free_skb:
1653	if (free_on_error)
1654		kfree_skb(skb);
1655	return err;
1656}
1657EXPORT_SYMBOL(__skb_pad);
1658
1659/**
1660 *	pskb_put - add data to the tail of a potentially fragmented buffer
1661 *	@skb: start of the buffer to use
1662 *	@tail: tail fragment of the buffer to use
1663 *	@len: amount of data to add
1664 *
1665 *	This function extends the used data area of the potentially
1666 *	fragmented buffer. @tail must be the last fragment of @skb -- or
1667 *	@skb itself. If this would exceed the total buffer size the kernel
1668 *	will panic. A pointer to the first byte of the extra data is
1669 *	returned.
1670 */
1671
1672void *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len)
1673{
1674	if (tail != skb) {
1675		skb->data_len += len;
1676		skb->len += len;
1677	}
1678	return skb_put(tail, len);
1679}
1680EXPORT_SYMBOL_GPL(pskb_put);
1681
1682/**
1683 *	skb_put - add data to a buffer
1684 *	@skb: buffer to use
1685 *	@len: amount of data to add
1686 *
1687 *	This function extends the used data area of the buffer. If this would
1688 *	exceed the total buffer size the kernel will panic. A pointer to the
1689 *	first byte of the extra data is returned.
1690 */
1691void *skb_put(struct sk_buff *skb, unsigned int len)
1692{
1693	void *tmp = skb_tail_pointer(skb);
1694	SKB_LINEAR_ASSERT(skb);
1695	skb->tail += len;
1696	skb->len  += len;
1697	if (unlikely(skb->tail > skb->end))
1698		skb_over_panic(skb, len, __builtin_return_address(0));
1699	return tmp;
1700}
1701EXPORT_SYMBOL(skb_put);
1702
1703/**
1704 *	skb_push - add data to the start of a buffer
1705 *	@skb: buffer to use
1706 *	@len: amount of data to add
1707 *
1708 *	This function extends the used data area of the buffer at the buffer
1709 *	start. If this would exceed the total buffer headroom the kernel will
1710 *	panic. A pointer to the first byte of the extra data is returned.
1711 */
1712void *skb_push(struct sk_buff *skb, unsigned int len)
1713{
1714	skb->data -= len;
1715	skb->len  += len;
1716	if (unlikely(skb->data<skb->head))
1717		skb_under_panic(skb, len, __builtin_return_address(0));
1718	return skb->data;
1719}
1720EXPORT_SYMBOL(skb_push);
1721
1722/**
1723 *	skb_pull - remove data from the start of a buffer
1724 *	@skb: buffer to use
1725 *	@len: amount of data to remove
1726 *
1727 *	This function removes data from the start of a buffer, returning
1728 *	the memory to the headroom. A pointer to the next data in the buffer
1729 *	is returned. Once the data has been pulled future pushes will overwrite
1730 *	the old data.
1731 */
1732void *skb_pull(struct sk_buff *skb, unsigned int len)
1733{
1734	return skb_pull_inline(skb, len);
1735}
1736EXPORT_SYMBOL(skb_pull);
1737
1738/**
1739 *	skb_trim - remove end from a buffer
1740 *	@skb: buffer to alter
1741 *	@len: new length
1742 *
1743 *	Cut the length of a buffer down by removing data from the tail. If
1744 *	the buffer is already under the length specified it is not modified.
1745 *	The skb must be linear.
1746 */
1747void skb_trim(struct sk_buff *skb, unsigned int len)
1748{
1749	if (skb->len > len)
1750		__skb_trim(skb, len);
1751}
1752EXPORT_SYMBOL(skb_trim);
1753
1754/* Trims skb to length len. It can change skb pointers.
1755 */
1756
1757int ___pskb_trim(struct sk_buff *skb, unsigned int len)
1758{
1759	struct sk_buff **fragp;
1760	struct sk_buff *frag;
1761	int offset = skb_headlen(skb);
1762	int nfrags = skb_shinfo(skb)->nr_frags;
1763	int i;
1764	int err;
1765
1766	if (skb_cloned(skb) &&
1767	    unlikely((err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))))
1768		return err;
1769
1770	i = 0;
1771	if (offset >= len)
1772		goto drop_pages;
1773
1774	for (; i < nfrags; i++) {
1775		int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]);
1776
1777		if (end < len) {
1778			offset = end;
1779			continue;
1780		}
1781
1782		skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset);
1783
1784drop_pages:
1785		skb_shinfo(skb)->nr_frags = i;
1786
1787		for (; i < nfrags; i++)
1788			skb_frag_unref(skb, i);
1789
1790		if (skb_has_frag_list(skb))
1791			skb_drop_fraglist(skb);
1792		goto done;
1793	}
1794
1795	for (fragp = &skb_shinfo(skb)->frag_list; (frag = *fragp);
1796	     fragp = &frag->next) {
1797		int end = offset + frag->len;
1798
1799		if (skb_shared(frag)) {
1800			struct sk_buff *nfrag;
1801
1802			nfrag = skb_clone(frag, GFP_ATOMIC);
1803			if (unlikely(!nfrag))
1804				return -ENOMEM;
1805
1806			nfrag->next = frag->next;
1807			consume_skb(frag);
1808			frag = nfrag;
1809			*fragp = frag;
1810		}
1811
1812		if (end < len) {
1813			offset = end;
1814			continue;
1815		}
1816
1817		if (end > len &&
1818		    unlikely((err = pskb_trim(frag, len - offset))))
1819			return err;
1820
1821		if (frag->next)
1822			skb_drop_list(&frag->next);
1823		break;
1824	}
1825
1826done:
1827	if (len > skb_headlen(skb)) {
1828		skb->data_len -= skb->len - len;
1829		skb->len       = len;
1830	} else {
1831		skb->len       = len;
1832		skb->data_len  = 0;
1833		skb_set_tail_pointer(skb, len);
1834	}
1835
1836	if (!skb->sk || skb->destructor == sock_edemux)
1837		skb_condense(skb);
1838	return 0;
1839}
1840EXPORT_SYMBOL(___pskb_trim);
1841
1842/**
1843 *	__pskb_pull_tail - advance tail of skb header
1844 *	@skb: buffer to reallocate
1845 *	@delta: number of bytes to advance tail
1846 *
1847 *	The function makes a sense only on a fragmented &sk_buff,
1848 *	it expands header moving its tail forward and copying necessary
1849 *	data from fragmented part.
1850 *
1851 *	&sk_buff MUST have reference count of 1.
1852 *
1853 *	Returns %NULL (and &sk_buff does not change) if pull failed
1854 *	or value of new tail of skb in the case of success.
1855 *
1856 *	All the pointers pointing into skb header may change and must be
1857 *	reloaded after call to this function.
1858 */
1859
1860/* Moves tail of skb head forward, copying data from fragmented part,
1861 * when it is necessary.
1862 * 1. It may fail due to malloc failure.
1863 * 2. It may change skb pointers.
1864 *
1865 * It is pretty complicated. Luckily, it is called only in exceptional cases.
1866 */
1867void *__pskb_pull_tail(struct sk_buff *skb, int delta)
1868{
1869	/* If skb has not enough free space at tail, get new one
1870	 * plus 128 bytes for future expansions. If we have enough
1871	 * room at tail, reallocate without expansion only if skb is cloned.
1872	 */
1873	int i, k, eat = (skb->tail + delta) - skb->end;
1874
1875	if (eat > 0 || skb_cloned(skb)) {
1876		if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0,
1877				     GFP_ATOMIC))
1878			return NULL;
1879	}
1880
1881	BUG_ON(skb_copy_bits(skb, skb_headlen(skb),
1882			     skb_tail_pointer(skb), delta));
1883
1884	/* Optimization: no fragments, no reasons to preestimate
1885	 * size of pulled pages. Superb.
1886	 */
1887	if (!skb_has_frag_list(skb))
1888		goto pull_pages;
1889
1890	/* Estimate size of pulled pages. */
1891	eat = delta;
1892	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1893		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1894
1895		if (size >= eat)
1896			goto pull_pages;
1897		eat -= size;
1898	}
1899
1900	/* If we need update frag list, we are in troubles.
1901	 * Certainly, it is possible to add an offset to skb data,
1902	 * but taking into account that pulling is expected to
1903	 * be very rare operation, it is worth to fight against
1904	 * further bloating skb head and crucify ourselves here instead.
1905	 * Pure masohism, indeed. 8)8)
1906	 */
1907	if (eat) {
1908		struct sk_buff *list = skb_shinfo(skb)->frag_list;
1909		struct sk_buff *clone = NULL;
1910		struct sk_buff *insp = NULL;
1911
1912		do {
1913			BUG_ON(!list);
1914
1915			if (list->len <= eat) {
1916				/* Eaten as whole. */
1917				eat -= list->len;
1918				list = list->next;
1919				insp = list;
1920			} else {
1921				/* Eaten partially. */
1922
1923				if (skb_shared(list)) {
1924					/* Sucks! We need to fork list. :-( */
1925					clone = skb_clone(list, GFP_ATOMIC);
1926					if (!clone)
1927						return NULL;
1928					insp = list->next;
1929					list = clone;
1930				} else {
1931					/* This may be pulled without
1932					 * problems. */
1933					insp = list;
1934				}
1935				if (!pskb_pull(list, eat)) {
1936					kfree_skb(clone);
1937					return NULL;
1938				}
1939				break;
1940			}
1941		} while (eat);
1942
1943		/* Free pulled out fragments. */
1944		while ((list = skb_shinfo(skb)->frag_list) != insp) {
1945			skb_shinfo(skb)->frag_list = list->next;
1946			kfree_skb(list);
1947		}
1948		/* And insert new clone at head. */
1949		if (clone) {
1950			clone->next = list;
1951			skb_shinfo(skb)->frag_list = clone;
1952		}
1953	}
1954	/* Success! Now we may commit changes to skb data. */
1955
1956pull_pages:
1957	eat = delta;
1958	k = 0;
1959	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1960		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
1961
1962		if (size <= eat) {
1963			skb_frag_unref(skb, i);
1964			eat -= size;
1965		} else {
1966			skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i];
1967			if (eat) {
1968				skb_shinfo(skb)->frags[k].page_offset += eat;
1969				skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat);
1970				if (!i)
1971					goto end;
1972				eat = 0;
1973			}
1974			k++;
1975		}
1976	}
1977	skb_shinfo(skb)->nr_frags = k;
1978
1979end:
1980	skb->tail     += delta;
1981	skb->data_len -= delta;
1982
1983	if (!skb->data_len)
1984		skb_zcopy_clear(skb, false);
1985
1986	return skb_tail_pointer(skb);
1987}
1988EXPORT_SYMBOL(__pskb_pull_tail);
1989
1990/**
1991 *	skb_copy_bits - copy bits from skb to kernel buffer
1992 *	@skb: source skb
1993 *	@offset: offset in source
1994 *	@to: destination buffer
1995 *	@len: number of bytes to copy
1996 *
1997 *	Copy the specified number of bytes from the source skb to the
1998 *	destination buffer.
1999 *
2000 *	CAUTION ! :
2001 *		If its prototype is ever changed,
2002 *		check arch/{*}/net/{*}.S files,
2003 *		since it is called from BPF assembly code.
2004 */
2005int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len)
2006{
2007	int start = skb_headlen(skb);
2008	struct sk_buff *frag_iter;
2009	int i, copy;
2010
2011	if (offset > (int)skb->len - len)
2012		goto fault;
2013
2014	/* Copy header. */
2015	if ((copy = start - offset) > 0) {
2016		if (copy > len)
2017			copy = len;
2018		skb_copy_from_linear_data_offset(skb, offset, to, copy);
2019		if ((len -= copy) == 0)
2020			return 0;
2021		offset += copy;
2022		to     += copy;
2023	}
2024
2025	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2026		int end;
2027		skb_frag_t *f = &skb_shinfo(skb)->frags[i];
2028
2029		WARN_ON(start > offset + len);
2030
2031		end = start + skb_frag_size(f);
2032		if ((copy = end - offset) > 0) {
2033			u32 p_off, p_len, copied;
2034			struct page *p;
2035			u8 *vaddr;
2036
2037			if (copy > len)
2038				copy = len;
2039
2040			skb_frag_foreach_page(f,
2041					      f->page_offset + offset - start,
2042					      copy, p, p_off, p_len, copied) {
2043				vaddr = kmap_atomic(p);
2044				memcpy(to + copied, vaddr + p_off, p_len);
2045				kunmap_atomic(vaddr);
2046			}
2047
2048			if ((len -= copy) == 0)
2049				return 0;
2050			offset += copy;
2051			to     += copy;
2052		}
2053		start = end;
2054	}
2055
2056	skb_walk_frags(skb, frag_iter) {
2057		int end;
2058
2059		WARN_ON(start > offset + len);
2060
2061		end = start + frag_iter->len;
2062		if ((copy = end - offset) > 0) {
2063			if (copy > len)
2064				copy = len;
2065			if (skb_copy_bits(frag_iter, offset - start, to, copy))
2066				goto fault;
2067			if ((len -= copy) == 0)
2068				return 0;
2069			offset += copy;
2070			to     += copy;
2071		}
2072		start = end;
2073	}
2074
2075	if (!len)
2076		return 0;
2077
2078fault:
2079	return -EFAULT;
2080}
2081EXPORT_SYMBOL(skb_copy_bits);
2082
2083/*
2084 * Callback from splice_to_pipe(), if we need to release some pages
2085 * at the end of the spd in case we error'ed out in filling the pipe.
2086 */
2087static void sock_spd_release(struct splice_pipe_desc *spd, unsigned int i)
2088{
2089	put_page(spd->pages[i]);
2090}
2091
2092static struct page *linear_to_page(struct page *page, unsigned int *len,
2093				   unsigned int *offset,
2094				   struct sock *sk)
2095{
2096	struct page_frag *pfrag = sk_page_frag(sk);
 
2097
2098	if (!sk_page_frag_refill(sk, pfrag))
2099		return NULL;
 
 
 
 
 
 
 
 
2100
2101	*len = min_t(unsigned int, *len, pfrag->size - pfrag->offset);
 
 
 
 
 
2102
2103	memcpy(page_address(pfrag->page) + pfrag->offset,
2104	       page_address(page) + *offset, *len);
2105	*offset = pfrag->offset;
2106	pfrag->offset += *len;
2107
2108	return pfrag->page;
2109}
 
 
2110
2111static bool spd_can_coalesce(const struct splice_pipe_desc *spd,
2112			     struct page *page,
2113			     unsigned int offset)
2114{
2115	return	spd->nr_pages &&
2116		spd->pages[spd->nr_pages - 1] == page &&
2117		(spd->partial[spd->nr_pages - 1].offset +
2118		 spd->partial[spd->nr_pages - 1].len == offset);
2119}
2120
2121/*
2122 * Fill page/offset/length into spd, if it can hold more pages.
2123 */
2124static bool spd_fill_page(struct splice_pipe_desc *spd,
2125			  struct pipe_inode_info *pipe, struct page *page,
2126			  unsigned int *len, unsigned int offset,
2127			  bool linear,
2128			  struct sock *sk)
2129{
2130	if (unlikely(spd->nr_pages == MAX_SKB_FRAGS))
2131		return true;
2132
2133	if (linear) {
2134		page = linear_to_page(page, len, &offset, sk);
2135		if (!page)
2136			return true;
2137	}
2138	if (spd_can_coalesce(spd, page, offset)) {
2139		spd->partial[spd->nr_pages - 1].len += *len;
2140		return false;
2141	}
2142	get_page(page);
2143	spd->pages[spd->nr_pages] = page;
2144	spd->partial[spd->nr_pages].len = *len;
2145	spd->partial[spd->nr_pages].offset = offset;
2146	spd->nr_pages++;
2147
2148	return false;
2149}
2150
2151static bool __splice_segment(struct page *page, unsigned int poff,
2152			     unsigned int plen, unsigned int *off,
2153			     unsigned int *len,
2154			     struct splice_pipe_desc *spd, bool linear,
2155			     struct sock *sk,
2156			     struct pipe_inode_info *pipe)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2157{
2158	if (!*len)
2159		return true;
2160
2161	/* skip this segment if already processed */
2162	if (*off >= plen) {
2163		*off -= plen;
2164		return false;
2165	}
2166
2167	/* ignore any bits we already processed */
2168	poff += *off;
2169	plen -= *off;
2170	*off = 0;
 
2171
2172	do {
2173		unsigned int flen = min(*len, plen);
2174
2175		if (spd_fill_page(spd, pipe, page, &flen, poff,
2176				  linear, sk))
2177			return true;
2178		poff += flen;
2179		plen -= flen;
 
 
2180		*len -= flen;
 
2181	} while (*len && plen);
2182
2183	return false;
2184}
2185
2186/*
2187 * Map linear and fragment data from the skb to spd. It reports true if the
2188 * pipe is full or if we already spliced the requested length.
2189 */
2190static bool __skb_splice_bits(struct sk_buff *skb, struct pipe_inode_info *pipe,
2191			      unsigned int *offset, unsigned int *len,
2192			      struct splice_pipe_desc *spd, struct sock *sk)
2193{
2194	int seg;
2195	struct sk_buff *iter;
2196
2197	/* map the linear part :
2198	 * If skb->head_frag is set, this 'linear' part is backed by a
2199	 * fragment, and if the head is not shared with any clones then
2200	 * we can avoid a copy since we own the head portion of this page.
2201	 */
2202	if (__splice_segment(virt_to_page(skb->data),
2203			     (unsigned long) skb->data & (PAGE_SIZE - 1),
2204			     skb_headlen(skb),
2205			     offset, len, spd,
2206			     skb_head_is_locked(skb),
2207			     sk, pipe))
2208		return true;
2209
2210	/*
2211	 * then map the fragments
2212	 */
2213	for (seg = 0; seg < skb_shinfo(skb)->nr_frags; seg++) {
2214		const skb_frag_t *f = &skb_shinfo(skb)->frags[seg];
2215
2216		if (__splice_segment(skb_frag_page(f),
2217				     f->page_offset, skb_frag_size(f),
2218				     offset, len, spd, false, sk, pipe))
2219			return true;
2220	}
2221
2222	skb_walk_frags(skb, iter) {
2223		if (*offset >= iter->len) {
2224			*offset -= iter->len;
2225			continue;
2226		}
2227		/* __skb_splice_bits() only fails if the output has no room
2228		 * left, so no point in going over the frag_list for the error
2229		 * case.
2230		 */
2231		if (__skb_splice_bits(iter, pipe, offset, len, spd, sk))
2232			return true;
2233	}
2234
2235	return false;
2236}
2237
2238/*
2239 * Map data from the skb to a pipe. Should handle both the linear part,
2240 * the fragments, and the frag list.
 
 
2241 */
2242int skb_splice_bits(struct sk_buff *skb, struct sock *sk, unsigned int offset,
2243		    struct pipe_inode_info *pipe, unsigned int tlen,
2244		    unsigned int flags)
2245{
2246	struct partial_page partial[MAX_SKB_FRAGS];
2247	struct page *pages[MAX_SKB_FRAGS];
2248	struct splice_pipe_desc spd = {
2249		.pages = pages,
2250		.partial = partial,
2251		.nr_pages_max = MAX_SKB_FRAGS,
2252		.ops = &nosteal_pipe_buf_ops,
2253		.spd_release = sock_spd_release,
2254	};
 
 
2255	int ret = 0;
2256
2257	__skb_splice_bits(skb, pipe, &offset, &tlen, &spd, sk);
 
2258
2259	if (spd.nr_pages)
2260		ret = splice_to_pipe(pipe, &spd);
 
 
 
 
 
 
2261
2262	return ret;
2263}
2264EXPORT_SYMBOL_GPL(skb_splice_bits);
2265
2266/* Send skb data on a socket. Socket must be locked. */
2267int skb_send_sock_locked(struct sock *sk, struct sk_buff *skb, int offset,
2268			 int len)
2269{
2270	unsigned int orig_len = len;
2271	struct sk_buff *head = skb;
2272	unsigned short fragidx;
2273	int slen, ret;
2274
2275do_frag_list:
2276
2277	/* Deal with head data */
2278	while (offset < skb_headlen(skb) && len) {
2279		struct kvec kv;
2280		struct msghdr msg;
2281
2282		slen = min_t(int, len, skb_headlen(skb) - offset);
2283		kv.iov_base = skb->data + offset;
2284		kv.iov_len = slen;
2285		memset(&msg, 0, sizeof(msg));
2286
2287		ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
2288		if (ret <= 0)
2289			goto error;
2290
2291		offset += ret;
2292		len -= ret;
2293	}
2294
2295	/* All the data was skb head? */
2296	if (!len)
2297		goto out;
2298
2299	/* Make offset relative to start of frags */
2300	offset -= skb_headlen(skb);
2301
2302	/* Find where we are in frag list */
2303	for (fragidx = 0; fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2304		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
2305
2306		if (offset < frag->size)
2307			break;
2308
2309		offset -= frag->size;
2310	}
2311
2312	for (; len && fragidx < skb_shinfo(skb)->nr_frags; fragidx++) {
2313		skb_frag_t *frag  = &skb_shinfo(skb)->frags[fragidx];
2314
2315		slen = min_t(size_t, len, frag->size - offset);
2316
2317		while (slen) {
2318			ret = kernel_sendpage_locked(sk, frag->page.p,
2319						     frag->page_offset + offset,
2320						     slen, MSG_DONTWAIT);
2321			if (ret <= 0)
2322				goto error;
2323
2324			len -= ret;
2325			offset += ret;
2326			slen -= ret;
2327		}
2328
2329		offset = 0;
2330	}
2331
2332	if (len) {
2333		/* Process any frag lists */
2334
2335		if (skb == head) {
2336			if (skb_has_frag_list(skb)) {
2337				skb = skb_shinfo(skb)->frag_list;
2338				goto do_frag_list;
2339			}
2340		} else if (skb->next) {
2341			skb = skb->next;
2342			goto do_frag_list;
2343		}
2344	}
2345
2346out:
2347	return orig_len - len;
2348
2349error:
2350	return orig_len == len ? ret : orig_len - len;
2351}
2352EXPORT_SYMBOL_GPL(skb_send_sock_locked);
2353
2354/* Send skb data on a socket. */
2355int skb_send_sock(struct sock *sk, struct sk_buff *skb, int offset, int len)
2356{
2357	int ret = 0;
2358
2359	lock_sock(sk);
2360	ret = skb_send_sock_locked(sk, skb, offset, len);
2361	release_sock(sk);
2362
2363	return ret;
2364}
2365EXPORT_SYMBOL_GPL(skb_send_sock);
2366
2367/**
2368 *	skb_store_bits - store bits from kernel buffer to skb
2369 *	@skb: destination buffer
2370 *	@offset: offset in destination
2371 *	@from: source buffer
2372 *	@len: number of bytes to copy
2373 *
2374 *	Copy the specified number of bytes from the source buffer to the
2375 *	destination skb.  This function handles all the messy bits of
2376 *	traversing fragment lists and such.
2377 */
2378
2379int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len)
2380{
2381	int start = skb_headlen(skb);
2382	struct sk_buff *frag_iter;
2383	int i, copy;
2384
2385	if (offset > (int)skb->len - len)
2386		goto fault;
2387
2388	if ((copy = start - offset) > 0) {
2389		if (copy > len)
2390			copy = len;
2391		skb_copy_to_linear_data_offset(skb, offset, from, copy);
2392		if ((len -= copy) == 0)
2393			return 0;
2394		offset += copy;
2395		from += copy;
2396	}
2397
2398	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2399		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2400		int end;
2401
2402		WARN_ON(start > offset + len);
2403
2404		end = start + skb_frag_size(frag);
2405		if ((copy = end - offset) > 0) {
2406			u32 p_off, p_len, copied;
2407			struct page *p;
2408			u8 *vaddr;
2409
2410			if (copy > len)
2411				copy = len;
2412
2413			skb_frag_foreach_page(frag,
2414					      frag->page_offset + offset - start,
2415					      copy, p, p_off, p_len, copied) {
2416				vaddr = kmap_atomic(p);
2417				memcpy(vaddr + p_off, from + copied, p_len);
2418				kunmap_atomic(vaddr);
2419			}
2420
2421			if ((len -= copy) == 0)
2422				return 0;
2423			offset += copy;
2424			from += copy;
2425		}
2426		start = end;
2427	}
2428
2429	skb_walk_frags(skb, frag_iter) {
2430		int end;
2431
2432		WARN_ON(start > offset + len);
2433
2434		end = start + frag_iter->len;
2435		if ((copy = end - offset) > 0) {
2436			if (copy > len)
2437				copy = len;
2438			if (skb_store_bits(frag_iter, offset - start,
2439					   from, copy))
2440				goto fault;
2441			if ((len -= copy) == 0)
2442				return 0;
2443			offset += copy;
2444			from += copy;
2445		}
2446		start = end;
2447	}
2448	if (!len)
2449		return 0;
2450
2451fault:
2452	return -EFAULT;
2453}
2454EXPORT_SYMBOL(skb_store_bits);
2455
2456/* Checksum skb data. */
2457__wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
2458		      __wsum csum, const struct skb_checksum_ops *ops)
 
2459{
2460	int start = skb_headlen(skb);
2461	int i, copy = start - offset;
2462	struct sk_buff *frag_iter;
2463	int pos = 0;
2464
2465	/* Checksum header. */
2466	if (copy > 0) {
2467		if (copy > len)
2468			copy = len;
2469		csum = ops->update(skb->data + offset, copy, csum);
2470		if ((len -= copy) == 0)
2471			return csum;
2472		offset += copy;
2473		pos	= copy;
2474	}
2475
2476	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2477		int end;
2478		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2479
2480		WARN_ON(start > offset + len);
2481
2482		end = start + skb_frag_size(frag);
2483		if ((copy = end - offset) > 0) {
2484			u32 p_off, p_len, copied;
2485			struct page *p;
2486			__wsum csum2;
2487			u8 *vaddr;
 
2488
2489			if (copy > len)
2490				copy = len;
2491
2492			skb_frag_foreach_page(frag,
2493					      frag->page_offset + offset - start,
2494					      copy, p, p_off, p_len, copied) {
2495				vaddr = kmap_atomic(p);
2496				csum2 = ops->update(vaddr + p_off, p_len, 0);
2497				kunmap_atomic(vaddr);
2498				csum = ops->combine(csum, csum2, pos, p_len);
2499				pos += p_len;
2500			}
2501
2502			if (!(len -= copy))
2503				return csum;
2504			offset += copy;
 
2505		}
2506		start = end;
2507	}
2508
2509	skb_walk_frags(skb, frag_iter) {
2510		int end;
2511
2512		WARN_ON(start > offset + len);
2513
2514		end = start + frag_iter->len;
2515		if ((copy = end - offset) > 0) {
2516			__wsum csum2;
2517			if (copy > len)
2518				copy = len;
2519			csum2 = __skb_checksum(frag_iter, offset - start,
2520					       copy, 0, ops);
2521			csum = ops->combine(csum, csum2, pos, copy);
2522			if ((len -= copy) == 0)
2523				return csum;
2524			offset += copy;
2525			pos    += copy;
2526		}
2527		start = end;
2528	}
2529	BUG_ON(len);
2530
2531	return csum;
2532}
2533EXPORT_SYMBOL(__skb_checksum);
2534
2535__wsum skb_checksum(const struct sk_buff *skb, int offset,
2536		    int len, __wsum csum)
2537{
2538	const struct skb_checksum_ops ops = {
2539		.update  = csum_partial_ext,
2540		.combine = csum_block_add_ext,
2541	};
2542
2543	return __skb_checksum(skb, offset, len, csum, &ops);
2544}
2545EXPORT_SYMBOL(skb_checksum);
2546
2547/* Both of above in one bottle. */
2548
2549__wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset,
2550				    u8 *to, int len, __wsum csum)
2551{
2552	int start = skb_headlen(skb);
2553	int i, copy = start - offset;
2554	struct sk_buff *frag_iter;
2555	int pos = 0;
2556
2557	/* Copy header. */
2558	if (copy > 0) {
2559		if (copy > len)
2560			copy = len;
2561		csum = csum_partial_copy_nocheck(skb->data + offset, to,
2562						 copy, csum);
2563		if ((len -= copy) == 0)
2564			return csum;
2565		offset += copy;
2566		to     += copy;
2567		pos	= copy;
2568	}
2569
2570	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2571		int end;
2572
2573		WARN_ON(start > offset + len);
2574
2575		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
2576		if ((copy = end - offset) > 0) {
2577			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2578			u32 p_off, p_len, copied;
2579			struct page *p;
2580			__wsum csum2;
2581			u8 *vaddr;
 
2582
2583			if (copy > len)
2584				copy = len;
2585
2586			skb_frag_foreach_page(frag,
2587					      frag->page_offset + offset - start,
2588					      copy, p, p_off, p_len, copied) {
2589				vaddr = kmap_atomic(p);
2590				csum2 = csum_partial_copy_nocheck(vaddr + p_off,
2591								  to + copied,
2592								  p_len, 0);
2593				kunmap_atomic(vaddr);
2594				csum = csum_block_add(csum, csum2, pos);
2595				pos += p_len;
2596			}
2597
2598			if (!(len -= copy))
2599				return csum;
2600			offset += copy;
2601			to     += copy;
 
2602		}
2603		start = end;
2604	}
2605
2606	skb_walk_frags(skb, frag_iter) {
2607		__wsum csum2;
2608		int end;
2609
2610		WARN_ON(start > offset + len);
2611
2612		end = start + frag_iter->len;
2613		if ((copy = end - offset) > 0) {
2614			if (copy > len)
2615				copy = len;
2616			csum2 = skb_copy_and_csum_bits(frag_iter,
2617						       offset - start,
2618						       to, copy, 0);
2619			csum = csum_block_add(csum, csum2, pos);
2620			if ((len -= copy) == 0)
2621				return csum;
2622			offset += copy;
2623			to     += copy;
2624			pos    += copy;
2625		}
2626		start = end;
2627	}
2628	BUG_ON(len);
2629	return csum;
2630}
2631EXPORT_SYMBOL(skb_copy_and_csum_bits);
2632
2633static __wsum warn_crc32c_csum_update(const void *buff, int len, __wsum sum)
2634{
2635	net_warn_ratelimited(
2636		"%s: attempt to compute crc32c without libcrc32c.ko\n",
2637		__func__);
2638	return 0;
2639}
2640
2641static __wsum warn_crc32c_csum_combine(__wsum csum, __wsum csum2,
2642				       int offset, int len)
2643{
2644	net_warn_ratelimited(
2645		"%s: attempt to compute crc32c without libcrc32c.ko\n",
2646		__func__);
2647	return 0;
2648}
2649
2650static const struct skb_checksum_ops default_crc32c_ops = {
2651	.update  = warn_crc32c_csum_update,
2652	.combine = warn_crc32c_csum_combine,
2653};
2654
2655const struct skb_checksum_ops *crc32c_csum_stub __read_mostly =
2656	&default_crc32c_ops;
2657EXPORT_SYMBOL(crc32c_csum_stub);
2658
2659 /**
2660 *	skb_zerocopy_headlen - Calculate headroom needed for skb_zerocopy()
2661 *	@from: source buffer
2662 *
2663 *	Calculates the amount of linear headroom needed in the 'to' skb passed
2664 *	into skb_zerocopy().
2665 */
2666unsigned int
2667skb_zerocopy_headlen(const struct sk_buff *from)
2668{
2669	unsigned int hlen = 0;
2670
2671	if (!from->head_frag ||
2672	    skb_headlen(from) < L1_CACHE_BYTES ||
2673	    skb_shinfo(from)->nr_frags >= MAX_SKB_FRAGS)
2674		hlen = skb_headlen(from);
2675
2676	if (skb_has_frag_list(from))
2677		hlen = from->len;
2678
2679	return hlen;
2680}
2681EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
2682
2683/**
2684 *	skb_zerocopy - Zero copy skb to skb
2685 *	@to: destination buffer
2686 *	@from: source buffer
2687 *	@len: number of bytes to copy from source buffer
2688 *	@hlen: size of linear headroom in destination buffer
2689 *
2690 *	Copies up to `len` bytes from `from` to `to` by creating references
2691 *	to the frags in the source buffer.
2692 *
2693 *	The `hlen` as calculated by skb_zerocopy_headlen() specifies the
2694 *	headroom in the `to` buffer.
2695 *
2696 *	Return value:
2697 *	0: everything is OK
2698 *	-ENOMEM: couldn't orphan frags of @from due to lack of memory
2699 *	-EFAULT: skb_copy_bits() found some problem with skb geometry
2700 */
2701int
2702skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
2703{
2704	int i, j = 0;
2705	int plen = 0; /* length of skb->head fragment */
2706	int ret;
2707	struct page *page;
2708	unsigned int offset;
2709
2710	BUG_ON(!from->head_frag && !hlen);
2711
2712	/* dont bother with small payloads */
2713	if (len <= skb_tailroom(to))
2714		return skb_copy_bits(from, 0, skb_put(to, len), len);
2715
2716	if (hlen) {
2717		ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
2718		if (unlikely(ret))
2719			return ret;
2720		len -= hlen;
2721	} else {
2722		plen = min_t(int, skb_headlen(from), len);
2723		if (plen) {
2724			page = virt_to_head_page(from->head);
2725			offset = from->data - (unsigned char *)page_address(page);
2726			__skb_fill_page_desc(to, 0, page, offset, plen);
2727			get_page(page);
2728			j = 1;
2729			len -= plen;
2730		}
2731	}
2732
2733	to->truesize += len + plen;
2734	to->len += len + plen;
2735	to->data_len += len + plen;
2736
2737	if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
2738		skb_tx_error(from);
2739		return -ENOMEM;
2740	}
2741	skb_zerocopy_clone(to, from, GFP_ATOMIC);
2742
2743	for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
2744		if (!len)
2745			break;
2746		skb_shinfo(to)->frags[j] = skb_shinfo(from)->frags[i];
2747		skb_shinfo(to)->frags[j].size = min_t(int, skb_shinfo(to)->frags[j].size, len);
2748		len -= skb_shinfo(to)->frags[j].size;
2749		skb_frag_ref(to, j);
2750		j++;
2751	}
2752	skb_shinfo(to)->nr_frags = j;
2753
2754	return 0;
2755}
2756EXPORT_SYMBOL_GPL(skb_zerocopy);
2757
2758void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to)
2759{
2760	__wsum csum;
2761	long csstart;
2762
2763	if (skb->ip_summed == CHECKSUM_PARTIAL)
2764		csstart = skb_checksum_start_offset(skb);
2765	else
2766		csstart = skb_headlen(skb);
2767
2768	BUG_ON(csstart > skb_headlen(skb));
2769
2770	skb_copy_from_linear_data(skb, to, csstart);
2771
2772	csum = 0;
2773	if (csstart != skb->len)
2774		csum = skb_copy_and_csum_bits(skb, csstart, to + csstart,
2775					      skb->len - csstart, 0);
2776
2777	if (skb->ip_summed == CHECKSUM_PARTIAL) {
2778		long csstuff = csstart + skb->csum_offset;
2779
2780		*((__sum16 *)(to + csstuff)) = csum_fold(csum);
2781	}
2782}
2783EXPORT_SYMBOL(skb_copy_and_csum_dev);
2784
2785/**
2786 *	skb_dequeue - remove from the head of the queue
2787 *	@list: list to dequeue from
2788 *
2789 *	Remove the head of the list. The list lock is taken so the function
2790 *	may be used safely with other locking list functions. The head item is
2791 *	returned or %NULL if the list is empty.
2792 */
2793
2794struct sk_buff *skb_dequeue(struct sk_buff_head *list)
2795{
2796	unsigned long flags;
2797	struct sk_buff *result;
2798
2799	spin_lock_irqsave(&list->lock, flags);
2800	result = __skb_dequeue(list);
2801	spin_unlock_irqrestore(&list->lock, flags);
2802	return result;
2803}
2804EXPORT_SYMBOL(skb_dequeue);
2805
2806/**
2807 *	skb_dequeue_tail - remove from the tail of the queue
2808 *	@list: list to dequeue from
2809 *
2810 *	Remove the tail of the list. The list lock is taken so the function
2811 *	may be used safely with other locking list functions. The tail item is
2812 *	returned or %NULL if the list is empty.
2813 */
2814struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
2815{
2816	unsigned long flags;
2817	struct sk_buff *result;
2818
2819	spin_lock_irqsave(&list->lock, flags);
2820	result = __skb_dequeue_tail(list);
2821	spin_unlock_irqrestore(&list->lock, flags);
2822	return result;
2823}
2824EXPORT_SYMBOL(skb_dequeue_tail);
2825
2826/**
2827 *	skb_queue_purge - empty a list
2828 *	@list: list to empty
2829 *
2830 *	Delete all buffers on an &sk_buff list. Each buffer is removed from
2831 *	the list and one reference dropped. This function takes the list
2832 *	lock and is atomic with respect to other list locking functions.
2833 */
2834void skb_queue_purge(struct sk_buff_head *list)
2835{
2836	struct sk_buff *skb;
2837	while ((skb = skb_dequeue(list)) != NULL)
2838		kfree_skb(skb);
2839}
2840EXPORT_SYMBOL(skb_queue_purge);
2841
2842/**
2843 *	skb_rbtree_purge - empty a skb rbtree
2844 *	@root: root of the rbtree to empty
2845 *
2846 *	Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
2847 *	the list and one reference dropped. This function does not take
2848 *	any lock. Synchronization should be handled by the caller (e.g., TCP
2849 *	out-of-order queue is protected by the socket lock).
2850 */
2851void skb_rbtree_purge(struct rb_root *root)
2852{
2853	struct rb_node *p = rb_first(root);
2854
2855	while (p) {
2856		struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
2857
2858		p = rb_next(p);
2859		rb_erase(&skb->rbnode, root);
2860		kfree_skb(skb);
2861	}
2862}
2863
2864/**
2865 *	skb_queue_head - queue a buffer at the list head
2866 *	@list: list to use
2867 *	@newsk: buffer to queue
2868 *
2869 *	Queue a buffer at the start of the list. This function takes the
2870 *	list lock and can be used safely with other locking &sk_buff functions
2871 *	safely.
2872 *
2873 *	A buffer cannot be placed on two lists at the same time.
2874 */
2875void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
2876{
2877	unsigned long flags;
2878
2879	spin_lock_irqsave(&list->lock, flags);
2880	__skb_queue_head(list, newsk);
2881	spin_unlock_irqrestore(&list->lock, flags);
2882}
2883EXPORT_SYMBOL(skb_queue_head);
2884
2885/**
2886 *	skb_queue_tail - queue a buffer at the list tail
2887 *	@list: list to use
2888 *	@newsk: buffer to queue
2889 *
2890 *	Queue a buffer at the tail of the list. This function takes the
2891 *	list lock and can be used safely with other locking &sk_buff functions
2892 *	safely.
2893 *
2894 *	A buffer cannot be placed on two lists at the same time.
2895 */
2896void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
2897{
2898	unsigned long flags;
2899
2900	spin_lock_irqsave(&list->lock, flags);
2901	__skb_queue_tail(list, newsk);
2902	spin_unlock_irqrestore(&list->lock, flags);
2903}
2904EXPORT_SYMBOL(skb_queue_tail);
2905
2906/**
2907 *	skb_unlink	-	remove a buffer from a list
2908 *	@skb: buffer to remove
2909 *	@list: list to use
2910 *
2911 *	Remove a packet from a list. The list locks are taken and this
2912 *	function is atomic with respect to other list locked calls
2913 *
2914 *	You must know what list the SKB is on.
2915 */
2916void skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
2917{
2918	unsigned long flags;
2919
2920	spin_lock_irqsave(&list->lock, flags);
2921	__skb_unlink(skb, list);
2922	spin_unlock_irqrestore(&list->lock, flags);
2923}
2924EXPORT_SYMBOL(skb_unlink);
2925
2926/**
2927 *	skb_append	-	append a buffer
2928 *	@old: buffer to insert after
2929 *	@newsk: buffer to insert
2930 *	@list: list to use
2931 *
2932 *	Place a packet after a given packet in a list. The list locks are taken
2933 *	and this function is atomic with respect to other list locked calls.
2934 *	A buffer cannot be placed on two lists at the same time.
2935 */
2936void skb_append(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2937{
2938	unsigned long flags;
2939
2940	spin_lock_irqsave(&list->lock, flags);
2941	__skb_queue_after(list, old, newsk);
2942	spin_unlock_irqrestore(&list->lock, flags);
2943}
2944EXPORT_SYMBOL(skb_append);
2945
2946/**
2947 *	skb_insert	-	insert a buffer
2948 *	@old: buffer to insert before
2949 *	@newsk: buffer to insert
2950 *	@list: list to use
2951 *
2952 *	Place a packet before a given packet in a list. The list locks are
2953 * 	taken and this function is atomic with respect to other list locked
2954 *	calls.
2955 *
2956 *	A buffer cannot be placed on two lists at the same time.
2957 */
2958void skb_insert(struct sk_buff *old, struct sk_buff *newsk, struct sk_buff_head *list)
2959{
2960	unsigned long flags;
2961
2962	spin_lock_irqsave(&list->lock, flags);
2963	__skb_insert(newsk, old->prev, old, list);
2964	spin_unlock_irqrestore(&list->lock, flags);
2965}
2966EXPORT_SYMBOL(skb_insert);
2967
2968static inline void skb_split_inside_header(struct sk_buff *skb,
2969					   struct sk_buff* skb1,
2970					   const u32 len, const int pos)
2971{
2972	int i;
2973
2974	skb_copy_from_linear_data_offset(skb, len, skb_put(skb1, pos - len),
2975					 pos - len);
2976	/* And move data appendix as is. */
2977	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
2978		skb_shinfo(skb1)->frags[i] = skb_shinfo(skb)->frags[i];
2979
2980	skb_shinfo(skb1)->nr_frags = skb_shinfo(skb)->nr_frags;
2981	skb_shinfo(skb)->nr_frags  = 0;
2982	skb1->data_len		   = skb->data_len;
2983	skb1->len		   += skb1->data_len;
2984	skb->data_len		   = 0;
2985	skb->len		   = len;
2986	skb_set_tail_pointer(skb, len);
2987}
2988
2989static inline void skb_split_no_header(struct sk_buff *skb,
2990				       struct sk_buff* skb1,
2991				       const u32 len, int pos)
2992{
2993	int i, k = 0;
2994	const int nfrags = skb_shinfo(skb)->nr_frags;
2995
2996	skb_shinfo(skb)->nr_frags = 0;
2997	skb1->len		  = skb1->data_len = skb->len - len;
2998	skb->len		  = len;
2999	skb->data_len		  = len - pos;
3000
3001	for (i = 0; i < nfrags; i++) {
3002		int size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
3003
3004		if (pos + size > len) {
3005			skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i];
3006
3007			if (pos < len) {
3008				/* Split frag.
3009				 * We have two variants in this case:
3010				 * 1. Move all the frag to the second
3011				 *    part, if it is possible. F.e.
3012				 *    this approach is mandatory for TUX,
3013				 *    where splitting is expensive.
3014				 * 2. Split is accurately. We make this.
3015				 */
3016				skb_frag_ref(skb, i);
3017				skb_shinfo(skb1)->frags[0].page_offset += len - pos;
3018				skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos);
3019				skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos);
3020				skb_shinfo(skb)->nr_frags++;
3021			}
3022			k++;
3023		} else
3024			skb_shinfo(skb)->nr_frags++;
3025		pos += size;
3026	}
3027	skb_shinfo(skb1)->nr_frags = k;
3028}
3029
3030/**
3031 * skb_split - Split fragmented skb to two parts at length len.
3032 * @skb: the buffer to split
3033 * @skb1: the buffer to receive the second part
3034 * @len: new length for skb
3035 */
3036void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len)
3037{
3038	int pos = skb_headlen(skb);
3039
3040	skb_shinfo(skb1)->tx_flags |= skb_shinfo(skb)->tx_flags &
3041				      SKBTX_SHARED_FRAG;
3042	skb_zerocopy_clone(skb1, skb, 0);
3043	if (len < pos)	/* Split line is inside header. */
3044		skb_split_inside_header(skb, skb1, len, pos);
3045	else		/* Second chunk has no header, nothing to copy. */
3046		skb_split_no_header(skb, skb1, len, pos);
3047}
3048EXPORT_SYMBOL(skb_split);
3049
3050/* Shifting from/to a cloned skb is a no-go.
3051 *
3052 * Caller cannot keep skb_shinfo related pointers past calling here!
3053 */
3054static int skb_prepare_for_shift(struct sk_buff *skb)
3055{
3056	return skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
3057}
3058
3059/**
3060 * skb_shift - Shifts paged data partially from skb to another
3061 * @tgt: buffer into which tail data gets added
3062 * @skb: buffer from which the paged data comes from
3063 * @shiftlen: shift up to this many bytes
3064 *
3065 * Attempts to shift up to shiftlen worth of bytes, which may be less than
3066 * the length of the skb, from skb to tgt. Returns number bytes shifted.
3067 * It's up to caller to free skb if everything was shifted.
3068 *
3069 * If @tgt runs out of frags, the whole operation is aborted.
3070 *
3071 * Skb cannot include anything else but paged data while tgt is allowed
3072 * to have non-paged data as well.
3073 *
3074 * TODO: full sized shift could be optimized but that would need
3075 * specialized skb free'er to handle frags without up-to-date nr_frags.
3076 */
3077int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
3078{
3079	int from, to, merge, todo;
3080	struct skb_frag_struct *fragfrom, *fragto;
3081
3082	BUG_ON(shiftlen > skb->len);
3083
3084	if (skb_headlen(skb))
3085		return 0;
3086	if (skb_zcopy(tgt) || skb_zcopy(skb))
3087		return 0;
3088
3089	todo = shiftlen;
3090	from = 0;
3091	to = skb_shinfo(tgt)->nr_frags;
3092	fragfrom = &skb_shinfo(skb)->frags[from];
3093
3094	/* Actual merge is delayed until the point when we know we can
3095	 * commit all, so that we don't have to undo partial changes
3096	 */
3097	if (!to ||
3098	    !skb_can_coalesce(tgt, to, skb_frag_page(fragfrom),
3099			      fragfrom->page_offset)) {
3100		merge = -1;
3101	} else {
3102		merge = to - 1;
3103
3104		todo -= skb_frag_size(fragfrom);
3105		if (todo < 0) {
3106			if (skb_prepare_for_shift(skb) ||
3107			    skb_prepare_for_shift(tgt))
3108				return 0;
3109
3110			/* All previous frag pointers might be stale! */
3111			fragfrom = &skb_shinfo(skb)->frags[from];
3112			fragto = &skb_shinfo(tgt)->frags[merge];
3113
3114			skb_frag_size_add(fragto, shiftlen);
3115			skb_frag_size_sub(fragfrom, shiftlen);
3116			fragfrom->page_offset += shiftlen;
3117
3118			goto onlymerged;
3119		}
3120
3121		from++;
3122	}
3123
3124	/* Skip full, not-fitting skb to avoid expensive operations */
3125	if ((shiftlen == skb->len) &&
3126	    (skb_shinfo(skb)->nr_frags - from) > (MAX_SKB_FRAGS - to))
3127		return 0;
3128
3129	if (skb_prepare_for_shift(skb) || skb_prepare_for_shift(tgt))
3130		return 0;
3131
3132	while ((todo > 0) && (from < skb_shinfo(skb)->nr_frags)) {
3133		if (to == MAX_SKB_FRAGS)
3134			return 0;
3135
3136		fragfrom = &skb_shinfo(skb)->frags[from];
3137		fragto = &skb_shinfo(tgt)->frags[to];
3138
3139		if (todo >= skb_frag_size(fragfrom)) {
3140			*fragto = *fragfrom;
3141			todo -= skb_frag_size(fragfrom);
3142			from++;
3143			to++;
3144
3145		} else {
3146			__skb_frag_ref(fragfrom);
3147			fragto->page = fragfrom->page;
3148			fragto->page_offset = fragfrom->page_offset;
3149			skb_frag_size_set(fragto, todo);
3150
3151			fragfrom->page_offset += todo;
3152			skb_frag_size_sub(fragfrom, todo);
3153			todo = 0;
3154
3155			to++;
3156			break;
3157		}
3158	}
3159
3160	/* Ready to "commit" this state change to tgt */
3161	skb_shinfo(tgt)->nr_frags = to;
3162
3163	if (merge >= 0) {
3164		fragfrom = &skb_shinfo(skb)->frags[0];
3165		fragto = &skb_shinfo(tgt)->frags[merge];
3166
3167		skb_frag_size_add(fragto, skb_frag_size(fragfrom));
3168		__skb_frag_unref(fragfrom);
3169	}
3170
3171	/* Reposition in the original skb */
3172	to = 0;
3173	while (from < skb_shinfo(skb)->nr_frags)
3174		skb_shinfo(skb)->frags[to++] = skb_shinfo(skb)->frags[from++];
3175	skb_shinfo(skb)->nr_frags = to;
3176
3177	BUG_ON(todo > 0 && !skb_shinfo(skb)->nr_frags);
3178
3179onlymerged:
3180	/* Most likely the tgt won't ever need its checksum anymore, skb on
3181	 * the other hand might need it if it needs to be resent
3182	 */
3183	tgt->ip_summed = CHECKSUM_PARTIAL;
3184	skb->ip_summed = CHECKSUM_PARTIAL;
3185
3186	/* Yak, is it really working this way? Some helper please? */
3187	skb->len -= shiftlen;
3188	skb->data_len -= shiftlen;
3189	skb->truesize -= shiftlen;
3190	tgt->len += shiftlen;
3191	tgt->data_len += shiftlen;
3192	tgt->truesize += shiftlen;
3193
3194	return shiftlen;
3195}
3196
3197/**
3198 * skb_prepare_seq_read - Prepare a sequential read of skb data
3199 * @skb: the buffer to read
3200 * @from: lower offset of data to be read
3201 * @to: upper offset of data to be read
3202 * @st: state variable
3203 *
3204 * Initializes the specified state variable. Must be called before
3205 * invoking skb_seq_read() for the first time.
3206 */
3207void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from,
3208			  unsigned int to, struct skb_seq_state *st)
3209{
3210	st->lower_offset = from;
3211	st->upper_offset = to;
3212	st->root_skb = st->cur_skb = skb;
3213	st->frag_idx = st->stepped_offset = 0;
3214	st->frag_data = NULL;
3215}
3216EXPORT_SYMBOL(skb_prepare_seq_read);
3217
3218/**
3219 * skb_seq_read - Sequentially read skb data
3220 * @consumed: number of bytes consumed by the caller so far
3221 * @data: destination pointer for data to be returned
3222 * @st: state variable
3223 *
3224 * Reads a block of skb data at @consumed relative to the
3225 * lower offset specified to skb_prepare_seq_read(). Assigns
3226 * the head of the data block to @data and returns the length
3227 * of the block or 0 if the end of the skb data or the upper
3228 * offset has been reached.
3229 *
3230 * The caller is not required to consume all of the data
3231 * returned, i.e. @consumed is typically set to the number
3232 * of bytes already consumed and the next call to
3233 * skb_seq_read() will return the remaining part of the block.
3234 *
3235 * Note 1: The size of each block of data returned can be arbitrary,
3236 *       this limitation is the cost for zerocopy sequential
3237 *       reads of potentially non linear data.
3238 *
3239 * Note 2: Fragment lists within fragments are not implemented
3240 *       at the moment, state->root_skb could be replaced with
3241 *       a stack for this purpose.
3242 */
3243unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
3244			  struct skb_seq_state *st)
3245{
3246	unsigned int block_limit, abs_offset = consumed + st->lower_offset;
3247	skb_frag_t *frag;
3248
3249	if (unlikely(abs_offset >= st->upper_offset)) {
3250		if (st->frag_data) {
3251			kunmap_atomic(st->frag_data);
3252			st->frag_data = NULL;
3253		}
3254		return 0;
3255	}
3256
3257next_skb:
3258	block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
3259
3260	if (abs_offset < block_limit && !st->frag_data) {
3261		*data = st->cur_skb->data + (abs_offset - st->stepped_offset);
3262		return block_limit - abs_offset;
3263	}
3264
3265	if (st->frag_idx == 0 && !st->frag_data)
3266		st->stepped_offset += skb_headlen(st->cur_skb);
3267
3268	while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) {
3269		frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx];
3270		block_limit = skb_frag_size(frag) + st->stepped_offset;
3271
3272		if (abs_offset < block_limit) {
3273			if (!st->frag_data)
3274				st->frag_data = kmap_atomic(skb_frag_page(frag));
3275
3276			*data = (u8 *) st->frag_data + frag->page_offset +
3277				(abs_offset - st->stepped_offset);
3278
3279			return block_limit - abs_offset;
3280		}
3281
3282		if (st->frag_data) {
3283			kunmap_atomic(st->frag_data);
3284			st->frag_data = NULL;
3285		}
3286
3287		st->frag_idx++;
3288		st->stepped_offset += skb_frag_size(frag);
3289	}
3290
3291	if (st->frag_data) {
3292		kunmap_atomic(st->frag_data);
3293		st->frag_data = NULL;
3294	}
3295
3296	if (st->root_skb == st->cur_skb && skb_has_frag_list(st->root_skb)) {
3297		st->cur_skb = skb_shinfo(st->root_skb)->frag_list;
3298		st->frag_idx = 0;
3299		goto next_skb;
3300	} else if (st->cur_skb->next) {
3301		st->cur_skb = st->cur_skb->next;
3302		st->frag_idx = 0;
3303		goto next_skb;
3304	}
3305
3306	return 0;
3307}
3308EXPORT_SYMBOL(skb_seq_read);
3309
3310/**
3311 * skb_abort_seq_read - Abort a sequential read of skb data
3312 * @st: state variable
3313 *
3314 * Must be called if skb_seq_read() was not called until it
3315 * returned 0.
3316 */
3317void skb_abort_seq_read(struct skb_seq_state *st)
3318{
3319	if (st->frag_data)
3320		kunmap_atomic(st->frag_data);
3321}
3322EXPORT_SYMBOL(skb_abort_seq_read);
3323
3324#define TS_SKB_CB(state)	((struct skb_seq_state *) &((state)->cb))
3325
3326static unsigned int skb_ts_get_next_block(unsigned int offset, const u8 **text,
3327					  struct ts_config *conf,
3328					  struct ts_state *state)
3329{
3330	return skb_seq_read(offset, text, TS_SKB_CB(state));
3331}
3332
3333static void skb_ts_finish(struct ts_config *conf, struct ts_state *state)
3334{
3335	skb_abort_seq_read(TS_SKB_CB(state));
3336}
3337
3338/**
3339 * skb_find_text - Find a text pattern in skb data
3340 * @skb: the buffer to look in
3341 * @from: search offset
3342 * @to: search limit
3343 * @config: textsearch configuration
 
3344 *
3345 * Finds a pattern in the skb data according to the specified
3346 * textsearch configuration. Use textsearch_next() to retrieve
3347 * subsequent occurrences of the pattern. Returns the offset
3348 * to the first occurrence or UINT_MAX if no match was found.
3349 */
3350unsigned int skb_find_text(struct sk_buff *skb, unsigned int from,
3351			   unsigned int to, struct ts_config *config)
 
3352{
3353	struct ts_state state;
3354	unsigned int ret;
3355
3356	config->get_next_block = skb_ts_get_next_block;
3357	config->finish = skb_ts_finish;
3358
3359	skb_prepare_seq_read(skb, from, to, TS_SKB_CB(&state));
3360
3361	ret = textsearch_find(config, &state);
3362	return (ret <= to - from ? ret : UINT_MAX);
3363}
3364EXPORT_SYMBOL(skb_find_text);
3365
3366/**
3367 * skb_append_datato_frags - append the user data to a skb
3368 * @sk: sock  structure
3369 * @skb: skb structure to be appended with user data.
3370 * @getfrag: call back function to be used for getting the user data
3371 * @from: pointer to user message iov
3372 * @length: length of the iov message
3373 *
3374 * Description: This procedure append the user data in the fragment part
3375 * of the skb if any page alloc fails user this procedure returns  -ENOMEM
3376 */
3377int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
3378			int (*getfrag)(void *from, char *to, int offset,
3379					int len, int odd, struct sk_buff *skb),
3380			void *from, int length)
3381{
3382	int frg_cnt = skb_shinfo(skb)->nr_frags;
3383	int copy;
 
 
3384	int offset = 0;
3385	int ret;
3386	struct page_frag *pfrag = &current->task_frag;
3387
3388	do {
3389		/* Return error if we don't have space for new frag */
 
3390		if (frg_cnt >= MAX_SKB_FRAGS)
3391			return -EMSGSIZE;
 
 
 
3392
3393		if (!sk_page_frag_refill(sk, pfrag))
 
 
 
3394			return -ENOMEM;
3395
 
 
 
 
 
 
 
 
 
3396		/* copy the user data to page */
3397		copy = min_t(int, length, pfrag->size - pfrag->offset);
 
3398
3399		ret = getfrag(from, page_address(pfrag->page) + pfrag->offset,
3400			      offset, copy, 0, skb);
 
3401		if (ret < 0)
3402			return -EFAULT;
3403
3404		/* copy was successful so update the size parameters */
3405		skb_fill_page_desc(skb, frg_cnt, pfrag->page, pfrag->offset,
3406				   copy);
3407		frg_cnt++;
3408		pfrag->offset += copy;
3409		get_page(pfrag->page);
3410
3411		skb->truesize += copy;
3412		refcount_add(copy, &sk->sk_wmem_alloc);
3413		skb->len += copy;
3414		skb->data_len += copy;
3415		offset += copy;
3416		length -= copy;
3417
3418	} while (length > 0);
3419
3420	return 0;
3421}
3422EXPORT_SYMBOL(skb_append_datato_frags);
3423
3424int skb_append_pagefrags(struct sk_buff *skb, struct page *page,
3425			 int offset, size_t size)
3426{
3427	int i = skb_shinfo(skb)->nr_frags;
3428
3429	if (skb_can_coalesce(skb, i, page, offset)) {
3430		skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], size);
3431	} else if (i < MAX_SKB_FRAGS) {
3432		get_page(page);
3433		skb_fill_page_desc(skb, i, page, offset, size);
3434	} else {
3435		return -EMSGSIZE;
3436	}
3437
3438	return 0;
3439}
3440EXPORT_SYMBOL_GPL(skb_append_pagefrags);
3441
3442/**
3443 *	skb_pull_rcsum - pull skb and update receive checksum
3444 *	@skb: buffer to update
3445 *	@len: length of data pulled
3446 *
3447 *	This function performs an skb_pull on the packet and updates
3448 *	the CHECKSUM_COMPLETE checksum.  It should be used on
3449 *	receive path processing instead of skb_pull unless you know
3450 *	that the checksum difference is zero (e.g., a valid IP header)
3451 *	or you are setting ip_summed to CHECKSUM_NONE.
3452 */
3453void *skb_pull_rcsum(struct sk_buff *skb, unsigned int len)
3454{
3455	unsigned char *data = skb->data;
3456
3457	BUG_ON(len > skb->len);
3458	__skb_pull(skb, len);
3459	skb_postpull_rcsum(skb, data, len);
3460	return skb->data;
 
3461}
3462EXPORT_SYMBOL_GPL(skb_pull_rcsum);
3463
3464static inline skb_frag_t skb_head_frag_to_page_desc(struct sk_buff *frag_skb)
3465{
3466	skb_frag_t head_frag;
3467	struct page *page;
3468
3469	page = virt_to_head_page(frag_skb->head);
3470	head_frag.page.p = page;
3471	head_frag.page_offset = frag_skb->data -
3472		(unsigned char *)page_address(page);
3473	head_frag.size = skb_headlen(frag_skb);
3474	return head_frag;
3475}
3476
3477/**
3478 *	skb_segment - Perform protocol segmentation on skb.
3479 *	@head_skb: buffer to segment
3480 *	@features: features for the output path (see dev->features)
3481 *
3482 *	This function performs segmentation on the given skb.  It returns
3483 *	a pointer to the first in a list of new skbs for the segments.
3484 *	In case of error it returns ERR_PTR(err).
3485 */
3486struct sk_buff *skb_segment(struct sk_buff *head_skb,
3487			    netdev_features_t features)
3488{
3489	struct sk_buff *segs = NULL;
3490	struct sk_buff *tail = NULL;
3491	struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
3492	skb_frag_t *frag = skb_shinfo(head_skb)->frags;
3493	unsigned int mss = skb_shinfo(head_skb)->gso_size;
3494	unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
3495	struct sk_buff *frag_skb = head_skb;
3496	unsigned int offset = doffset;
3497	unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
3498	unsigned int partial_segs = 0;
3499	unsigned int headroom;
3500	unsigned int len = head_skb->len;
3501	__be16 proto;
3502	bool csum, sg;
3503	int nfrags = skb_shinfo(head_skb)->nr_frags;
3504	int err = -ENOMEM;
3505	int i = 0;
3506	int pos;
3507	int dummy;
3508
3509	__skb_push(head_skb, doffset);
3510	proto = skb_network_protocol(head_skb, &dummy);
3511	if (unlikely(!proto))
3512		return ERR_PTR(-EINVAL);
3513
3514	sg = !!(features & NETIF_F_SG);
3515	csum = !!can_checksum_protocol(features, proto);
3516
3517	if (sg && csum && (mss != GSO_BY_FRAGS))  {
3518		if (!(features & NETIF_F_GSO_PARTIAL)) {
3519			struct sk_buff *iter;
3520			unsigned int frag_len;
3521
3522			if (!list_skb ||
3523			    !net_gso_ok(features, skb_shinfo(head_skb)->gso_type))
3524				goto normal;
3525
3526			/* If we get here then all the required
3527			 * GSO features except frag_list are supported.
3528			 * Try to split the SKB to multiple GSO SKBs
3529			 * with no frag_list.
3530			 * Currently we can do that only when the buffers don't
3531			 * have a linear part and all the buffers except
3532			 * the last are of the same length.
3533			 */
3534			frag_len = list_skb->len;
3535			skb_walk_frags(head_skb, iter) {
3536				if (frag_len != iter->len && iter->next)
3537					goto normal;
3538				if (skb_headlen(iter) && !iter->head_frag)
3539					goto normal;
3540
3541				len -= iter->len;
3542			}
3543
3544			if (len != frag_len)
3545				goto normal;
3546		}
3547
3548		/* GSO partial only requires that we trim off any excess that
3549		 * doesn't fit into an MSS sized block, so take care of that
3550		 * now.
3551		 */
3552		partial_segs = len / mss;
3553		if (partial_segs > 1)
3554			mss *= partial_segs;
3555		else
3556			partial_segs = 0;
3557	}
3558
3559normal:
3560	headroom = skb_headroom(head_skb);
3561	pos = skb_headlen(head_skb);
3562
3563	do {
3564		struct sk_buff *nskb;
3565		skb_frag_t *nskb_frag;
3566		int hsize;
3567		int size;
3568
3569		if (unlikely(mss == GSO_BY_FRAGS)) {
3570			len = list_skb->len;
3571		} else {
3572			len = head_skb->len - offset;
3573			if (len > mss)
3574				len = mss;
3575		}
3576
3577		hsize = skb_headlen(head_skb) - offset;
3578		if (hsize < 0)
3579			hsize = 0;
3580		if (hsize > len || !sg)
3581			hsize = len;
3582
3583		if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
3584		    (skb_headlen(list_skb) == len || sg)) {
3585			BUG_ON(skb_headlen(list_skb) > len);
3586
3587			i = 0;
3588			nfrags = skb_shinfo(list_skb)->nr_frags;
3589			frag = skb_shinfo(list_skb)->frags;
3590			frag_skb = list_skb;
3591			pos += skb_headlen(list_skb);
3592
3593			while (pos < offset + len) {
3594				BUG_ON(i >= nfrags);
3595
3596				size = skb_frag_size(frag);
3597				if (pos + size > offset + len)
3598					break;
3599
3600				i++;
3601				pos += size;
3602				frag++;
3603			}
3604
3605			nskb = skb_clone(list_skb, GFP_ATOMIC);
3606			list_skb = list_skb->next;
3607
3608			if (unlikely(!nskb))
3609				goto err;
3610
3611			if (unlikely(pskb_trim(nskb, len))) {
3612				kfree_skb(nskb);
3613				goto err;
3614			}
3615
3616			hsize = skb_end_offset(nskb);
3617			if (skb_cow_head(nskb, doffset + headroom)) {
3618				kfree_skb(nskb);
3619				goto err;
3620			}
3621
3622			nskb->truesize += skb_end_offset(nskb) - hsize;
 
3623			skb_release_head_state(nskb);
3624			__skb_push(nskb, doffset);
3625		} else {
3626			nskb = __alloc_skb(hsize + doffset + headroom,
3627					   GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
3628					   NUMA_NO_NODE);
3629
3630			if (unlikely(!nskb))
3631				goto err;
3632
3633			skb_reserve(nskb, headroom);
3634			__skb_put(nskb, doffset);
3635		}
3636
3637		if (segs)
3638			tail->next = nskb;
3639		else
3640			segs = nskb;
3641		tail = nskb;
3642
3643		__copy_skb_header(nskb, head_skb);
 
3644
3645		skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
3646		skb_reset_mac_len(nskb);
 
 
 
 
 
 
 
3647
3648		skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
3649						 nskb->data - tnl_hlen,
3650						 doffset + tnl_hlen);
3651
3652		if (nskb->len == len + doffset)
3653			goto perform_csum_check;
3654
3655		if (!sg) {
3656			if (!nskb->remcsum_offload)
3657				nskb->ip_summed = CHECKSUM_NONE;
3658			SKB_GSO_CB(nskb)->csum =
3659				skb_copy_and_csum_bits(head_skb, offset,
3660						       skb_put(nskb, len),
3661						       len, 0);
3662			SKB_GSO_CB(nskb)->csum_start =
3663				skb_headroom(nskb) + doffset;
3664			continue;
3665		}
3666
3667		nskb_frag = skb_shinfo(nskb)->frags;
3668
3669		skb_copy_from_linear_data_offset(head_skb, offset,
3670						 skb_put(nskb, hsize), hsize);
3671
3672		skb_shinfo(nskb)->tx_flags |= skb_shinfo(head_skb)->tx_flags &
3673					      SKBTX_SHARED_FRAG;
3674
3675		if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3676		    skb_zerocopy_clone(nskb, frag_skb, GFP_ATOMIC))
3677			goto err;
3678
3679		while (pos < offset + len) {
3680			if (i >= nfrags) {
3681				i = 0;
3682				nfrags = skb_shinfo(list_skb)->nr_frags;
3683				frag = skb_shinfo(list_skb)->frags;
3684				frag_skb = list_skb;
3685				if (!skb_headlen(list_skb)) {
3686					BUG_ON(!nfrags);
3687				} else {
3688					BUG_ON(!list_skb->head_frag);
3689
3690					/* to make room for head_frag. */
3691					i--;
3692					frag--;
3693				}
3694				if (skb_orphan_frags(frag_skb, GFP_ATOMIC) ||
3695				    skb_zerocopy_clone(nskb, frag_skb,
3696						       GFP_ATOMIC))
3697					goto err;
3698
3699				list_skb = list_skb->next;
3700			}
3701
3702			if (unlikely(skb_shinfo(nskb)->nr_frags >=
3703				     MAX_SKB_FRAGS)) {
3704				net_warn_ratelimited(
3705					"skb_segment: too many frags: %u %u\n",
3706					pos, mss);
3707				goto err;
3708			}
3709
3710			*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
3711			__skb_frag_ref(nskb_frag);
3712			size = skb_frag_size(nskb_frag);
3713
3714			if (pos < offset) {
3715				nskb_frag->page_offset += offset - pos;
3716				skb_frag_size_sub(nskb_frag, offset - pos);
3717			}
3718
3719			skb_shinfo(nskb)->nr_frags++;
3720
3721			if (pos + size <= offset + len) {
3722				i++;
3723				frag++;
3724				pos += size;
3725			} else {
3726				skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
3727				goto skip_fraglist;
3728			}
3729
3730			nskb_frag++;
3731		}
3732
3733skip_fraglist:
3734		nskb->data_len = len - hsize;
3735		nskb->len += nskb->data_len;
3736		nskb->truesize += nskb->data_len;
3737
3738perform_csum_check:
3739		if (!csum) {
3740			if (skb_has_shared_frag(nskb)) {
3741				err = __skb_linearize(nskb);
3742				if (err)
3743					goto err;
3744			}
3745			if (!nskb->remcsum_offload)
3746				nskb->ip_summed = CHECKSUM_NONE;
3747			SKB_GSO_CB(nskb)->csum =
3748				skb_checksum(nskb, doffset,
3749					     nskb->len - doffset, 0);
3750			SKB_GSO_CB(nskb)->csum_start =
3751				skb_headroom(nskb) + doffset;
3752		}
3753	} while ((offset += len) < head_skb->len);
3754
3755	/* Some callers want to get the end of the list.
3756	 * Put it in segs->prev to avoid walking the list.
3757	 * (see validate_xmit_skb_list() for example)
3758	 */
3759	segs->prev = tail;
3760
3761	if (partial_segs) {
3762		struct sk_buff *iter;
3763		int type = skb_shinfo(head_skb)->gso_type;
3764		unsigned short gso_size = skb_shinfo(head_skb)->gso_size;
3765
3766		/* Update type to add partial and then remove dodgy if set */
3767		type |= (features & NETIF_F_GSO_PARTIAL) / NETIF_F_GSO_PARTIAL * SKB_GSO_PARTIAL;
3768		type &= ~SKB_GSO_DODGY;
3769
3770		/* Update GSO info and prepare to start updating headers on
3771		 * our way back down the stack of protocols.
3772		 */
3773		for (iter = segs; iter; iter = iter->next) {
3774			skb_shinfo(iter)->gso_size = gso_size;
3775			skb_shinfo(iter)->gso_segs = partial_segs;
3776			skb_shinfo(iter)->gso_type = type;
3777			SKB_GSO_CB(iter)->data_offset = skb_headroom(iter) + doffset;
3778		}
3779
3780		if (tail->len - doffset <= gso_size)
3781			skb_shinfo(tail)->gso_size = 0;
3782		else if (tail != segs)
3783			skb_shinfo(tail)->gso_segs = DIV_ROUND_UP(tail->len - doffset, gso_size);
3784	}
3785
3786	/* Following permits correct backpressure, for protocols
3787	 * using skb_set_owner_w().
3788	 * Idea is to tranfert ownership from head_skb to last segment.
3789	 */
3790	if (head_skb->destructor == sock_wfree) {
3791		swap(tail->truesize, head_skb->truesize);
3792		swap(tail->destructor, head_skb->destructor);
3793		swap(tail->sk, head_skb->sk);
3794	}
3795	return segs;
3796
3797err:
3798	kfree_skb_list(segs);
 
 
 
3799	return ERR_PTR(err);
3800}
3801EXPORT_SYMBOL_GPL(skb_segment);
3802
3803int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
3804{
3805	struct skb_shared_info *pinfo, *skbinfo = skb_shinfo(skb);
 
 
 
 
 
3806	unsigned int offset = skb_gro_offset(skb);
3807	unsigned int headlen = skb_headlen(skb);
3808	unsigned int len = skb_gro_len(skb);
3809	struct sk_buff *lp, *p = *head;
3810	unsigned int delta_truesize;
3811
3812	if (unlikely(p->len + len >= 65536))
3813		return -E2BIG;
3814
3815	lp = NAPI_GRO_CB(p)->last;
3816	pinfo = skb_shinfo(lp);
3817
3818	if (headlen <= offset) {
3819		skb_frag_t *frag;
3820		skb_frag_t *frag2;
3821		int i = skbinfo->nr_frags;
3822		int nr_frags = pinfo->nr_frags + i;
3823
 
 
3824		if (nr_frags > MAX_SKB_FRAGS)
3825			goto merge;
3826
3827		offset -= headlen;
3828		pinfo->nr_frags = nr_frags;
3829		skbinfo->nr_frags = 0;
3830
3831		frag = pinfo->frags + nr_frags;
3832		frag2 = skbinfo->frags + i;
3833		do {
3834			*--frag = *--frag2;
3835		} while (--i);
3836
3837		frag->page_offset += offset;
3838		skb_frag_size_sub(frag, offset);
3839
3840		/* all fragments truesize : remove (head size + sk_buff) */
3841		delta_truesize = skb->truesize -
3842				 SKB_TRUESIZE(skb_end_offset(skb));
3843
3844		skb->truesize -= skb->data_len;
3845		skb->len -= skb->data_len;
3846		skb->data_len = 0;
3847
3848		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE;
3849		goto done;
3850	} else if (skb->head_frag) {
3851		int nr_frags = pinfo->nr_frags;
3852		skb_frag_t *frag = pinfo->frags + nr_frags;
3853		struct page *page = virt_to_head_page(skb->head);
3854		unsigned int first_size = headlen - offset;
3855		unsigned int first_offset;
3856
3857		if (nr_frags + 1 + skbinfo->nr_frags > MAX_SKB_FRAGS)
3858			goto merge;
3859
3860		first_offset = skb->data -
3861			       (unsigned char *)page_address(page) +
3862			       offset;
3863
3864		pinfo->nr_frags = nr_frags + 1 + skbinfo->nr_frags;
3865
3866		frag->page.p	  = page;
3867		frag->page_offset = first_offset;
3868		skb_frag_size_set(frag, first_size);
3869
3870		memcpy(frag + 1, skbinfo->frags, sizeof(*frag) * skbinfo->nr_frags);
3871		/* We dont need to clear skbinfo->nr_frags here */
 
 
 
 
 
 
 
 
3872
3873		delta_truesize = skb->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
3874		NAPI_GRO_CB(skb)->free = NAPI_GRO_FREE_STOLEN_HEAD;
3875		goto done;
3876	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3877
3878merge:
3879	delta_truesize = skb->truesize;
3880	if (offset > headlen) {
3881		unsigned int eat = offset - headlen;
3882
3883		skbinfo->frags[0].page_offset += eat;
3884		skb_frag_size_sub(&skbinfo->frags[0], eat);
3885		skb->data_len -= eat;
3886		skb->len -= eat;
3887		offset = headlen;
3888	}
3889
3890	__skb_pull(skb, offset);
3891
3892	if (NAPI_GRO_CB(p)->last == p)
3893		skb_shinfo(p)->frag_list = skb;
3894	else
3895		NAPI_GRO_CB(p)->last->next = skb;
3896	NAPI_GRO_CB(p)->last = skb;
3897	__skb_header_release(skb);
3898	lp = p;
3899
3900done:
3901	NAPI_GRO_CB(p)->count++;
3902	p->data_len += len;
3903	p->truesize += delta_truesize;
3904	p->len += len;
3905	if (lp != p) {
3906		lp->data_len += len;
3907		lp->truesize += delta_truesize;
3908		lp->len += len;
3909	}
3910	NAPI_GRO_CB(skb)->same_flow = 1;
3911	return 0;
3912}
3913EXPORT_SYMBOL_GPL(skb_gro_receive);
3914
3915void __init skb_init(void)
3916{
3917	skbuff_head_cache = kmem_cache_create_usercopy("skbuff_head_cache",
3918					      sizeof(struct sk_buff),
3919					      0,
3920					      SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3921					      offsetof(struct sk_buff, cb),
3922					      sizeof_field(struct sk_buff, cb),
3923					      NULL);
3924	skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
3925						sizeof(struct sk_buff_fclones),
 
3926						0,
3927						SLAB_HWCACHE_ALIGN|SLAB_PANIC,
3928						NULL);
3929}
3930
 
 
 
 
 
 
 
 
 
 
3931static int
3932__skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len,
3933	       unsigned int recursion_level)
3934{
3935	int start = skb_headlen(skb);
3936	int i, copy = start - offset;
3937	struct sk_buff *frag_iter;
3938	int elt = 0;
3939
3940	if (unlikely(recursion_level >= 24))
3941		return -EMSGSIZE;
3942
3943	if (copy > 0) {
3944		if (copy > len)
3945			copy = len;
3946		sg_set_buf(sg, skb->data + offset, copy);
3947		elt++;
3948		if ((len -= copy) == 0)
3949			return elt;
3950		offset += copy;
3951	}
3952
3953	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3954		int end;
3955
3956		WARN_ON(start > offset + len);
3957
3958		end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]);
3959		if ((copy = end - offset) > 0) {
3960			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3961			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3962				return -EMSGSIZE;
3963
3964			if (copy > len)
3965				copy = len;
3966			sg_set_page(&sg[elt], skb_frag_page(frag), copy,
3967					frag->page_offset+offset-start);
3968			elt++;
3969			if (!(len -= copy))
3970				return elt;
3971			offset += copy;
3972		}
3973		start = end;
3974	}
3975
3976	skb_walk_frags(skb, frag_iter) {
3977		int end, ret;
3978
3979		WARN_ON(start > offset + len);
3980
3981		end = start + frag_iter->len;
3982		if ((copy = end - offset) > 0) {
3983			if (unlikely(elt && sg_is_last(&sg[elt - 1])))
3984				return -EMSGSIZE;
3985
3986			if (copy > len)
3987				copy = len;
3988			ret = __skb_to_sgvec(frag_iter, sg+elt, offset - start,
3989					      copy, recursion_level + 1);
3990			if (unlikely(ret < 0))
3991				return ret;
3992			elt += ret;
3993			if ((len -= copy) == 0)
3994				return elt;
3995			offset += copy;
3996		}
3997		start = end;
3998	}
3999	BUG_ON(len);
4000	return elt;
4001}
4002
4003/**
4004 *	skb_to_sgvec - Fill a scatter-gather list from a socket buffer
4005 *	@skb: Socket buffer containing the buffers to be mapped
4006 *	@sg: The scatter-gather list to map into
4007 *	@offset: The offset into the buffer's contents to start mapping
4008 *	@len: Length of buffer space to be mapped
4009 *
4010 *	Fill the specified scatter-gather list with mappings/pointers into a
4011 *	region of the buffer space attached to a socket buffer. Returns either
4012 *	the number of scatterlist items used, or -EMSGSIZE if the contents
4013 *	could not fit.
4014 */
4015int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
4016{
4017	int nsg = __skb_to_sgvec(skb, sg, offset, len, 0);
4018
4019	if (nsg <= 0)
4020		return nsg;
4021
4022	sg_mark_end(&sg[nsg - 1]);
4023
4024	return nsg;
4025}
4026EXPORT_SYMBOL_GPL(skb_to_sgvec);
4027
4028/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
4029 * sglist without mark the sg which contain last skb data as the end.
4030 * So the caller can mannipulate sg list as will when padding new data after
4031 * the first call without calling sg_unmark_end to expend sg list.
4032 *
4033 * Scenario to use skb_to_sgvec_nomark:
4034 * 1. sg_init_table
4035 * 2. skb_to_sgvec_nomark(payload1)
4036 * 3. skb_to_sgvec_nomark(payload2)
4037 *
4038 * This is equivalent to:
4039 * 1. sg_init_table
4040 * 2. skb_to_sgvec(payload1)
4041 * 3. sg_unmark_end
4042 * 4. skb_to_sgvec(payload2)
4043 *
4044 * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
4045 * is more preferable.
4046 */
4047int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
4048			int offset, int len)
4049{
4050	return __skb_to_sgvec(skb, sg, offset, len, 0);
4051}
4052EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
4053
4054
4055
4056/**
4057 *	skb_cow_data - Check that a socket buffer's data buffers are writable
4058 *	@skb: The socket buffer to check.
4059 *	@tailbits: Amount of trailing space to be added
4060 *	@trailer: Returned pointer to the skb where the @tailbits space begins
4061 *
4062 *	Make sure that the data buffers attached to a socket buffer are
4063 *	writable. If they are not, private copies are made of the data buffers
4064 *	and the socket buffer is set to use these instead.
4065 *
4066 *	If @tailbits is given, make sure that there is space to write @tailbits
4067 *	bytes of data beyond current end of socket buffer.  @trailer will be
4068 *	set to point to the skb in which this space begins.
4069 *
4070 *	The number of scatterlist elements required to completely map the
4071 *	COW'd and extended socket buffer will be returned.
4072 */
4073int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
4074{
4075	int copyflag;
4076	int elt;
4077	struct sk_buff *skb1, **skb_p;
4078
4079	/* If skb is cloned or its head is paged, reallocate
4080	 * head pulling out all the pages (pages are considered not writable
4081	 * at the moment even if they are anonymous).
4082	 */
4083	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
4084	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
4085		return -ENOMEM;
4086
4087	/* Easy case. Most of packets will go this way. */
4088	if (!skb_has_frag_list(skb)) {
4089		/* A little of trouble, not enough of space for trailer.
4090		 * This should not happen, when stack is tuned to generate
4091		 * good frames. OK, on miss we reallocate and reserve even more
4092		 * space, 128 bytes is fair. */
4093
4094		if (skb_tailroom(skb) < tailbits &&
4095		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
4096			return -ENOMEM;
4097
4098		/* Voila! */
4099		*trailer = skb;
4100		return 1;
4101	}
4102
4103	/* Misery. We are in troubles, going to mincer fragments... */
4104
4105	elt = 1;
4106	skb_p = &skb_shinfo(skb)->frag_list;
4107	copyflag = 0;
4108
4109	while ((skb1 = *skb_p) != NULL) {
4110		int ntail = 0;
4111
4112		/* The fragment is partially pulled by someone,
4113		 * this can happen on input. Copy it and everything
4114		 * after it. */
4115
4116		if (skb_shared(skb1))
4117			copyflag = 1;
4118
4119		/* If the skb is the last, worry about trailer. */
4120
4121		if (skb1->next == NULL && tailbits) {
4122			if (skb_shinfo(skb1)->nr_frags ||
4123			    skb_has_frag_list(skb1) ||
4124			    skb_tailroom(skb1) < tailbits)
4125				ntail = tailbits + 128;
4126		}
4127
4128		if (copyflag ||
4129		    skb_cloned(skb1) ||
4130		    ntail ||
4131		    skb_shinfo(skb1)->nr_frags ||
4132		    skb_has_frag_list(skb1)) {
4133			struct sk_buff *skb2;
4134
4135			/* Fuck, we are miserable poor guys... */
4136			if (ntail == 0)
4137				skb2 = skb_copy(skb1, GFP_ATOMIC);
4138			else
4139				skb2 = skb_copy_expand(skb1,
4140						       skb_headroom(skb1),
4141						       ntail,
4142						       GFP_ATOMIC);
4143			if (unlikely(skb2 == NULL))
4144				return -ENOMEM;
4145
4146			if (skb1->sk)
4147				skb_set_owner_w(skb2, skb1->sk);
4148
4149			/* Looking around. Are we still alive?
4150			 * OK, link new skb, drop old one */
4151
4152			skb2->next = skb1->next;
4153			*skb_p = skb2;
4154			kfree_skb(skb1);
4155			skb1 = skb2;
4156		}
4157		elt++;
4158		*trailer = skb1;
4159		skb_p = &skb1->next;
4160	}
4161
4162	return elt;
4163}
4164EXPORT_SYMBOL_GPL(skb_cow_data);
4165
4166static void sock_rmem_free(struct sk_buff *skb)
4167{
4168	struct sock *sk = skb->sk;
4169
4170	atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
4171}
4172
4173static void skb_set_err_queue(struct sk_buff *skb)
4174{
4175	/* pkt_type of skbs received on local sockets is never PACKET_OUTGOING.
4176	 * So, it is safe to (mis)use it to mark skbs on the error queue.
4177	 */
4178	skb->pkt_type = PACKET_OUTGOING;
4179	BUILD_BUG_ON(PACKET_OUTGOING == 0);
4180}
4181
4182/*
4183 * Note: We dont mem charge error packets (no sk_forward_alloc changes)
4184 */
4185int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
4186{
4187	if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
4188	    (unsigned int)sk->sk_rcvbuf)
4189		return -ENOMEM;
4190
4191	skb_orphan(skb);
4192	skb->sk = sk;
4193	skb->destructor = sock_rmem_free;
4194	atomic_add(skb->truesize, &sk->sk_rmem_alloc);
4195	skb_set_err_queue(skb);
4196
4197	/* before exiting rcu section, make sure dst is refcounted */
4198	skb_dst_force(skb);
4199
4200	skb_queue_tail(&sk->sk_error_queue, skb);
4201	if (!sock_flag(sk, SOCK_DEAD))
4202		sk->sk_error_report(sk);
4203	return 0;
4204}
4205EXPORT_SYMBOL(sock_queue_err_skb);
4206
4207static bool is_icmp_err_skb(const struct sk_buff *skb)
4208{
4209	return skb && (SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
4210		       SKB_EXT_ERR(skb)->ee.ee_origin == SO_EE_ORIGIN_ICMP6);
4211}
4212
4213struct sk_buff *sock_dequeue_err_skb(struct sock *sk)
4214{
4215	struct sk_buff_head *q = &sk->sk_error_queue;
4216	struct sk_buff *skb, *skb_next = NULL;
4217	bool icmp_next = false;
4218	unsigned long flags;
4219
4220	spin_lock_irqsave(&q->lock, flags);
4221	skb = __skb_dequeue(q);
4222	if (skb && (skb_next = skb_peek(q))) {
4223		icmp_next = is_icmp_err_skb(skb_next);
4224		if (icmp_next)
4225			sk->sk_err = SKB_EXT_ERR(skb_next)->ee.ee_origin;
4226	}
4227	spin_unlock_irqrestore(&q->lock, flags);
4228
4229	if (is_icmp_err_skb(skb) && !icmp_next)
4230		sk->sk_err = 0;
4231
4232	if (skb_next)
4233		sk->sk_error_report(sk);
4234
4235	return skb;
4236}
4237EXPORT_SYMBOL(sock_dequeue_err_skb);
4238
4239/**
4240 * skb_clone_sk - create clone of skb, and take reference to socket
4241 * @skb: the skb to clone
4242 *
4243 * This function creates a clone of a buffer that holds a reference on
4244 * sk_refcnt.  Buffers created via this function are meant to be
4245 * returned using sock_queue_err_skb, or free via kfree_skb.
4246 *
4247 * When passing buffers allocated with this function to sock_queue_err_skb
4248 * it is necessary to wrap the call with sock_hold/sock_put in order to
4249 * prevent the socket from being released prior to being enqueued on
4250 * the sk_error_queue.
4251 */
4252struct sk_buff *skb_clone_sk(struct sk_buff *skb)
4253{
4254	struct sock *sk = skb->sk;
4255	struct sk_buff *clone;
4256
4257	if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
4258		return NULL;
4259
4260	clone = skb_clone(skb, GFP_ATOMIC);
4261	if (!clone) {
4262		sock_put(sk);
4263		return NULL;
4264	}
4265
4266	clone->sk = sk;
4267	clone->destructor = sock_efree;
4268
4269	return clone;
4270}
4271EXPORT_SYMBOL(skb_clone_sk);
4272
4273static void __skb_complete_tx_timestamp(struct sk_buff *skb,
4274					struct sock *sk,
4275					int tstype,
4276					bool opt_stats)
4277{
 
4278	struct sock_exterr_skb *serr;
 
4279	int err;
4280
4281	BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
4282
4283	serr = SKB_EXT_ERR(skb);
4284	memset(serr, 0, sizeof(*serr));
4285	serr->ee.ee_errno = ENOMSG;
4286	serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
4287	serr->ee.ee_info = tstype;
4288	serr->opt_stats = opt_stats;
4289	serr->header.h4.iif = skb->dev ? skb->dev->ifindex : 0;
4290	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) {
4291		serr->ee.ee_data = skb_shinfo(skb)->tskey;
4292		if (sk->sk_protocol == IPPROTO_TCP &&
4293		    sk->sk_type == SOCK_STREAM)
4294			serr->ee.ee_data -= sk->sk_tskey;
4295	}
4296
4297	err = sock_queue_err_skb(sk, skb);
4298
4299	if (err)
4300		kfree_skb(skb);
4301}
4302
4303static bool skb_may_tx_timestamp(struct sock *sk, bool tsonly)
4304{
4305	bool ret;
4306
4307	if (likely(sysctl_tstamp_allow_data || tsonly))
4308		return true;
4309
4310	read_lock_bh(&sk->sk_callback_lock);
4311	ret = sk->sk_socket && sk->sk_socket->file &&
4312	      file_ns_capable(sk->sk_socket->file, &init_user_ns, CAP_NET_RAW);
4313	read_unlock_bh(&sk->sk_callback_lock);
4314	return ret;
4315}
4316
4317void skb_complete_tx_timestamp(struct sk_buff *skb,
4318			       struct skb_shared_hwtstamps *hwtstamps)
4319{
4320	struct sock *sk = skb->sk;
4321
4322	if (!skb_may_tx_timestamp(sk, false))
4323		goto err;
4324
4325	/* Take a reference to prevent skb_orphan() from freeing the socket,
4326	 * but only if the socket refcount is not zero.
4327	 */
4328	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4329		*skb_hwtstamps(skb) = *hwtstamps;
4330		__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
4331		sock_put(sk);
4332		return;
4333	}
4334
4335err:
4336	kfree_skb(skb);
4337}
4338EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp);
4339
4340void __skb_tstamp_tx(struct sk_buff *orig_skb,
4341		     struct skb_shared_hwtstamps *hwtstamps,
4342		     struct sock *sk, int tstype)
4343{
4344	struct sk_buff *skb;
4345	bool tsonly, opt_stats = false;
4346
4347	if (!sk)
4348		return;
4349
4350	if (!hwtstamps && !(sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TX_SWHW) &&
4351	    skb_shinfo(orig_skb)->tx_flags & SKBTX_IN_PROGRESS)
4352		return;
4353
4354	tsonly = sk->sk_tsflags & SOF_TIMESTAMPING_OPT_TSONLY;
4355	if (!skb_may_tx_timestamp(sk, tsonly))
4356		return;
4357
4358	if (tsonly) {
4359#ifdef CONFIG_INET
4360		if ((sk->sk_tsflags & SOF_TIMESTAMPING_OPT_STATS) &&
4361		    sk->sk_protocol == IPPROTO_TCP &&
4362		    sk->sk_type == SOCK_STREAM) {
4363			skb = tcp_get_timestamping_opt_stats(sk);
4364			opt_stats = true;
4365		} else
4366#endif
4367			skb = alloc_skb(0, GFP_ATOMIC);
4368	} else {
4369		skb = skb_clone(orig_skb, GFP_ATOMIC);
 
 
 
 
 
4370	}
4371	if (!skb)
4372		return;
4373
4374	if (tsonly) {
4375		skb_shinfo(skb)->tx_flags |= skb_shinfo(orig_skb)->tx_flags &
4376					     SKBTX_ANY_TSTAMP;
4377		skb_shinfo(skb)->tskey = skb_shinfo(orig_skb)->tskey;
4378	}
4379
4380	if (hwtstamps)
4381		*skb_hwtstamps(skb) = *hwtstamps;
4382	else
4383		skb->tstamp = ktime_get_real();
4384
4385	__skb_complete_tx_timestamp(skb, sk, tstype, opt_stats);
4386}
4387EXPORT_SYMBOL_GPL(__skb_tstamp_tx);
4388
4389void skb_tstamp_tx(struct sk_buff *orig_skb,
4390		   struct skb_shared_hwtstamps *hwtstamps)
4391{
4392	return __skb_tstamp_tx(orig_skb, hwtstamps, orig_skb->sk,
4393			       SCM_TSTAMP_SND);
4394}
4395EXPORT_SYMBOL_GPL(skb_tstamp_tx);
4396
4397void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
4398{
4399	struct sock *sk = skb->sk;
4400	struct sock_exterr_skb *serr;
4401	int err = 1;
4402
4403	skb->wifi_acked_valid = 1;
4404	skb->wifi_acked = acked;
4405
4406	serr = SKB_EXT_ERR(skb);
4407	memset(serr, 0, sizeof(*serr));
4408	serr->ee.ee_errno = ENOMSG;
4409	serr->ee.ee_origin = SO_EE_ORIGIN_TXSTATUS;
 
 
4410
4411	/* Take a reference to prevent skb_orphan() from freeing the socket,
4412	 * but only if the socket refcount is not zero.
4413	 */
4414	if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
4415		err = sock_queue_err_skb(sk, skb);
4416		sock_put(sk);
4417	}
4418	if (err)
4419		kfree_skb(skb);
4420}
4421EXPORT_SYMBOL_GPL(skb_complete_wifi_ack);
 
4422
4423/**
4424 * skb_partial_csum_set - set up and verify partial csum values for packet
4425 * @skb: the skb to set
4426 * @start: the number of bytes after skb->data to start checksumming.
4427 * @off: the offset from start to place the checksum.
4428 *
4429 * For untrusted partially-checksummed packets, we need to make sure the values
4430 * for skb->csum_start and skb->csum_offset are valid so we don't oops.
4431 *
4432 * This function checks and sets those values and skb->ip_summed: if this
4433 * returns false you should drop the packet.
4434 */
4435bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off)
4436{
4437	if (unlikely(start > skb_headlen(skb)) ||
4438	    unlikely((int)start + off > skb_headlen(skb) - 2)) {
4439		net_warn_ratelimited("bad partial csum: csum=%u/%u len=%u\n",
4440				     start, off, skb_headlen(skb));
 
 
4441		return false;
4442	}
4443	skb->ip_summed = CHECKSUM_PARTIAL;
4444	skb->csum_start = skb_headroom(skb) + start;
4445	skb->csum_offset = off;
4446	skb_set_transport_header(skb, start);
4447	return true;
4448}
4449EXPORT_SYMBOL_GPL(skb_partial_csum_set);
4450
4451static int skb_maybe_pull_tail(struct sk_buff *skb, unsigned int len,
4452			       unsigned int max)
4453{
4454	if (skb_headlen(skb) >= len)
4455		return 0;
4456
4457	/* If we need to pullup then pullup to the max, so we
4458	 * won't need to do it again.
4459	 */
4460	if (max > skb->len)
4461		max = skb->len;
4462
4463	if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
4464		return -ENOMEM;
4465
4466	if (skb_headlen(skb) < len)
4467		return -EPROTO;
4468
4469	return 0;
4470}
4471
4472#define MAX_TCP_HDR_LEN (15 * 4)
4473
4474static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
4475				      typeof(IPPROTO_IP) proto,
4476				      unsigned int off)
4477{
4478	switch (proto) {
4479		int err;
4480
4481	case IPPROTO_TCP:
4482		err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
4483					  off + MAX_TCP_HDR_LEN);
4484		if (!err && !skb_partial_csum_set(skb, off,
4485						  offsetof(struct tcphdr,
4486							   check)))
4487			err = -EPROTO;
4488		return err ? ERR_PTR(err) : &tcp_hdr(skb)->check;
4489
4490	case IPPROTO_UDP:
4491		err = skb_maybe_pull_tail(skb, off + sizeof(struct udphdr),
4492					  off + sizeof(struct udphdr));
4493		if (!err && !skb_partial_csum_set(skb, off,
4494						  offsetof(struct udphdr,
4495							   check)))
4496			err = -EPROTO;
4497		return err ? ERR_PTR(err) : &udp_hdr(skb)->check;
4498	}
4499
4500	return ERR_PTR(-EPROTO);
4501}
4502
4503/* This value should be large enough to cover a tagged ethernet header plus
4504 * maximally sized IP and TCP or UDP headers.
4505 */
4506#define MAX_IP_HDR_LEN 128
4507
4508static int skb_checksum_setup_ipv4(struct sk_buff *skb, bool recalculate)
4509{
4510	unsigned int off;
4511	bool fragment;
4512	__sum16 *csum;
4513	int err;
4514
4515	fragment = false;
4516
4517	err = skb_maybe_pull_tail(skb,
4518				  sizeof(struct iphdr),
4519				  MAX_IP_HDR_LEN);
4520	if (err < 0)
4521		goto out;
4522
4523	if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
4524		fragment = true;
4525
4526	off = ip_hdrlen(skb);
4527
4528	err = -EPROTO;
4529
4530	if (fragment)
4531		goto out;
4532
4533	csum = skb_checksum_setup_ip(skb, ip_hdr(skb)->protocol, off);
4534	if (IS_ERR(csum))
4535		return PTR_ERR(csum);
4536
4537	if (recalculate)
4538		*csum = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
4539					   ip_hdr(skb)->daddr,
4540					   skb->len - off,
4541					   ip_hdr(skb)->protocol, 0);
4542	err = 0;
4543
4544out:
4545	return err;
4546}
4547
4548/* This value should be large enough to cover a tagged ethernet header plus
4549 * an IPv6 header, all options, and a maximal TCP or UDP header.
4550 */
4551#define MAX_IPV6_HDR_LEN 256
4552
4553#define OPT_HDR(type, skb, off) \
4554	(type *)(skb_network_header(skb) + (off))
4555
4556static int skb_checksum_setup_ipv6(struct sk_buff *skb, bool recalculate)
4557{
4558	int err;
4559	u8 nexthdr;
4560	unsigned int off;
4561	unsigned int len;
4562	bool fragment;
4563	bool done;
4564	__sum16 *csum;
4565
4566	fragment = false;
4567	done = false;
4568
4569	off = sizeof(struct ipv6hdr);
4570
4571	err = skb_maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
4572	if (err < 0)
4573		goto out;
4574
4575	nexthdr = ipv6_hdr(skb)->nexthdr;
4576
4577	len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
4578	while (off <= len && !done) {
4579		switch (nexthdr) {
4580		case IPPROTO_DSTOPTS:
4581		case IPPROTO_HOPOPTS:
4582		case IPPROTO_ROUTING: {
4583			struct ipv6_opt_hdr *hp;
4584
4585			err = skb_maybe_pull_tail(skb,
4586						  off +
4587						  sizeof(struct ipv6_opt_hdr),
4588						  MAX_IPV6_HDR_LEN);
4589			if (err < 0)
4590				goto out;
4591
4592			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
4593			nexthdr = hp->nexthdr;
4594			off += ipv6_optlen(hp);
4595			break;
4596		}
4597		case IPPROTO_AH: {
4598			struct ip_auth_hdr *hp;
4599
4600			err = skb_maybe_pull_tail(skb,
4601						  off +
4602						  sizeof(struct ip_auth_hdr),
4603						  MAX_IPV6_HDR_LEN);
4604			if (err < 0)
4605				goto out;
4606
4607			hp = OPT_HDR(struct ip_auth_hdr, skb, off);
4608			nexthdr = hp->nexthdr;
4609			off += ipv6_authlen(hp);
4610			break;
4611		}
4612		case IPPROTO_FRAGMENT: {
4613			struct frag_hdr *hp;
4614
4615			err = skb_maybe_pull_tail(skb,
4616						  off +
4617						  sizeof(struct frag_hdr),
4618						  MAX_IPV6_HDR_LEN);
4619			if (err < 0)
4620				goto out;
4621
4622			hp = OPT_HDR(struct frag_hdr, skb, off);
4623
4624			if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
4625				fragment = true;
4626
4627			nexthdr = hp->nexthdr;
4628			off += sizeof(struct frag_hdr);
4629			break;
4630		}
4631		default:
4632			done = true;
4633			break;
4634		}
4635	}
4636
4637	err = -EPROTO;
4638
4639	if (!done || fragment)
4640		goto out;
4641
4642	csum = skb_checksum_setup_ip(skb, nexthdr, off);
4643	if (IS_ERR(csum))
4644		return PTR_ERR(csum);
4645
4646	if (recalculate)
4647		*csum = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
4648					 &ipv6_hdr(skb)->daddr,
4649					 skb->len - off, nexthdr, 0);
4650	err = 0;
4651
4652out:
4653	return err;
4654}
4655
4656/**
4657 * skb_checksum_setup - set up partial checksum offset
4658 * @skb: the skb to set up
4659 * @recalculate: if true the pseudo-header checksum will be recalculated
4660 */
4661int skb_checksum_setup(struct sk_buff *skb, bool recalculate)
4662{
4663	int err;
4664
4665	switch (skb->protocol) {
4666	case htons(ETH_P_IP):
4667		err = skb_checksum_setup_ipv4(skb, recalculate);
4668		break;
4669
4670	case htons(ETH_P_IPV6):
4671		err = skb_checksum_setup_ipv6(skb, recalculate);
4672		break;
4673
4674	default:
4675		err = -EPROTO;
4676		break;
4677	}
4678
4679	return err;
4680}
4681EXPORT_SYMBOL(skb_checksum_setup);
4682
4683/**
4684 * skb_checksum_maybe_trim - maybe trims the given skb
4685 * @skb: the skb to check
4686 * @transport_len: the data length beyond the network header
4687 *
4688 * Checks whether the given skb has data beyond the given transport length.
4689 * If so, returns a cloned skb trimmed to this transport length.
4690 * Otherwise returns the provided skb. Returns NULL in error cases
4691 * (e.g. transport_len exceeds skb length or out-of-memory).
4692 *
4693 * Caller needs to set the skb transport header and free any returned skb if it
4694 * differs from the provided skb.
4695 */
4696static struct sk_buff *skb_checksum_maybe_trim(struct sk_buff *skb,
4697					       unsigned int transport_len)
4698{
4699	struct sk_buff *skb_chk;
4700	unsigned int len = skb_transport_offset(skb) + transport_len;
4701	int ret;
4702
4703	if (skb->len < len)
4704		return NULL;
4705	else if (skb->len == len)
4706		return skb;
4707
4708	skb_chk = skb_clone(skb, GFP_ATOMIC);
4709	if (!skb_chk)
4710		return NULL;
4711
4712	ret = pskb_trim_rcsum(skb_chk, len);
4713	if (ret) {
4714		kfree_skb(skb_chk);
4715		return NULL;
4716	}
4717
4718	return skb_chk;
4719}
4720
4721/**
4722 * skb_checksum_trimmed - validate checksum of an skb
4723 * @skb: the skb to check
4724 * @transport_len: the data length beyond the network header
4725 * @skb_chkf: checksum function to use
4726 *
4727 * Applies the given checksum function skb_chkf to the provided skb.
4728 * Returns a checked and maybe trimmed skb. Returns NULL on error.
4729 *
4730 * If the skb has data beyond the given transport length, then a
4731 * trimmed & cloned skb is checked and returned.
4732 *
4733 * Caller needs to set the skb transport header and free any returned skb if it
4734 * differs from the provided skb.
4735 */
4736struct sk_buff *skb_checksum_trimmed(struct sk_buff *skb,
4737				     unsigned int transport_len,
4738				     __sum16(*skb_chkf)(struct sk_buff *skb))
4739{
4740	struct sk_buff *skb_chk;
4741	unsigned int offset = skb_transport_offset(skb);
4742	__sum16 ret;
4743
4744	skb_chk = skb_checksum_maybe_trim(skb, transport_len);
4745	if (!skb_chk)
4746		goto err;
4747
4748	if (!pskb_may_pull(skb_chk, offset))
4749		goto err;
4750
4751	skb_pull_rcsum(skb_chk, offset);
4752	ret = skb_chkf(skb_chk);
4753	skb_push_rcsum(skb_chk, offset);
4754
4755	if (ret)
4756		goto err;
4757
4758	return skb_chk;
4759
4760err:
4761	if (skb_chk && skb_chk != skb)
4762		kfree_skb(skb_chk);
4763
4764	return NULL;
4765
4766}
4767EXPORT_SYMBOL(skb_checksum_trimmed);
4768
4769void __skb_warn_lro_forwarding(const struct sk_buff *skb)
4770{
4771	net_warn_ratelimited("%s: received packets cannot be forwarded while LRO is enabled\n",
4772			     skb->dev->name);
 
4773}
4774EXPORT_SYMBOL(__skb_warn_lro_forwarding);
4775
4776void kfree_skb_partial(struct sk_buff *skb, bool head_stolen)
4777{
4778	if (head_stolen) {
4779		skb_release_head_state(skb);
4780		kmem_cache_free(skbuff_head_cache, skb);
4781	} else {
4782		__kfree_skb(skb);
4783	}
4784}
4785EXPORT_SYMBOL(kfree_skb_partial);
4786
4787/**
4788 * skb_try_coalesce - try to merge skb to prior one
4789 * @to: prior buffer
4790 * @from: buffer to add
4791 * @fragstolen: pointer to boolean
4792 * @delta_truesize: how much more was allocated than was requested
4793 */
4794bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
4795		      bool *fragstolen, int *delta_truesize)
4796{
4797	struct skb_shared_info *to_shinfo, *from_shinfo;
4798	int i, delta, len = from->len;
4799
4800	*fragstolen = false;
4801
4802	if (skb_cloned(to))
4803		return false;
4804
4805	if (len <= skb_tailroom(to)) {
4806		if (len)
4807			BUG_ON(skb_copy_bits(from, 0, skb_put(to, len), len));
4808		*delta_truesize = 0;
4809		return true;
4810	}
4811
4812	to_shinfo = skb_shinfo(to);
4813	from_shinfo = skb_shinfo(from);
4814	if (to_shinfo->frag_list || from_shinfo->frag_list)
4815		return false;
4816	if (skb_zcopy(to) || skb_zcopy(from))
4817		return false;
4818
4819	if (skb_headlen(from) != 0) {
4820		struct page *page;
4821		unsigned int offset;
4822
4823		if (to_shinfo->nr_frags +
4824		    from_shinfo->nr_frags >= MAX_SKB_FRAGS)
4825			return false;
4826
4827		if (skb_head_is_locked(from))
4828			return false;
4829
4830		delta = from->truesize - SKB_DATA_ALIGN(sizeof(struct sk_buff));
4831
4832		page = virt_to_head_page(from->head);
4833		offset = from->data - (unsigned char *)page_address(page);
4834
4835		skb_fill_page_desc(to, to_shinfo->nr_frags,
4836				   page, offset, skb_headlen(from));
4837		*fragstolen = true;
4838	} else {
4839		if (to_shinfo->nr_frags +
4840		    from_shinfo->nr_frags > MAX_SKB_FRAGS)
4841			return false;
4842
4843		delta = from->truesize - SKB_TRUESIZE(skb_end_offset(from));
4844	}
4845
4846	WARN_ON_ONCE(delta < len);
4847
4848	memcpy(to_shinfo->frags + to_shinfo->nr_frags,
4849	       from_shinfo->frags,
4850	       from_shinfo->nr_frags * sizeof(skb_frag_t));
4851	to_shinfo->nr_frags += from_shinfo->nr_frags;
4852
4853	if (!skb_cloned(from))
4854		from_shinfo->nr_frags = 0;
4855
4856	/* if the skb is not cloned this does nothing
4857	 * since we set nr_frags to 0.
4858	 */
4859	for (i = 0; i < from_shinfo->nr_frags; i++)
4860		__skb_frag_ref(&from_shinfo->frags[i]);
4861
4862	to->truesize += delta;
4863	to->len += len;
4864	to->data_len += len;
4865
4866	*delta_truesize = delta;
4867	return true;
4868}
4869EXPORT_SYMBOL(skb_try_coalesce);
4870
4871/**
4872 * skb_scrub_packet - scrub an skb
4873 *
4874 * @skb: buffer to clean
4875 * @xnet: packet is crossing netns
4876 *
4877 * skb_scrub_packet can be used after encapsulating or decapsulting a packet
4878 * into/from a tunnel. Some information have to be cleared during these
4879 * operations.
4880 * skb_scrub_packet can also be used to clean a skb before injecting it in
4881 * another namespace (@xnet == true). We have to clear all information in the
4882 * skb that could impact namespace isolation.
4883 */
4884void skb_scrub_packet(struct sk_buff *skb, bool xnet)
4885{
4886	skb->tstamp = 0;
4887	skb->pkt_type = PACKET_HOST;
4888	skb->skb_iif = 0;
4889	skb->ignore_df = 0;
4890	skb_dst_drop(skb);
4891	secpath_reset(skb);
4892	nf_reset(skb);
4893	nf_reset_trace(skb);
4894
4895	if (!xnet)
4896		return;
4897
4898	ipvs_reset(skb);
4899	skb_orphan(skb);
4900	skb->mark = 0;
4901}
4902EXPORT_SYMBOL_GPL(skb_scrub_packet);
4903
4904/**
4905 * skb_gso_transport_seglen - Return length of individual segments of a gso packet
4906 *
4907 * @skb: GSO skb
4908 *
4909 * skb_gso_transport_seglen is used to determine the real size of the
4910 * individual segments, including Layer4 headers (TCP/UDP).
4911 *
4912 * The MAC/L2 or network (IP, IPv6) headers are not accounted for.
4913 */
4914static unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4915{
4916	const struct skb_shared_info *shinfo = skb_shinfo(skb);
4917	unsigned int thlen = 0;
4918
4919	if (skb->encapsulation) {
4920		thlen = skb_inner_transport_header(skb) -
4921			skb_transport_header(skb);
4922
4923		if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4924			thlen += inner_tcp_hdrlen(skb);
4925	} else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4926		thlen = tcp_hdrlen(skb);
4927	} else if (unlikely(skb_is_gso_sctp(skb))) {
4928		thlen = sizeof(struct sctphdr);
4929	}
4930	/* UFO sets gso_size to the size of the fragmentation
4931	 * payload, i.e. the size of the L4 (UDP) header is already
4932	 * accounted for.
4933	 */
4934	return thlen + shinfo->gso_size;
4935}
4936
4937/**
4938 * skb_gso_network_seglen - Return length of individual segments of a gso packet
4939 *
4940 * @skb: GSO skb
4941 *
4942 * skb_gso_network_seglen is used to determine the real size of the
4943 * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
4944 *
4945 * The MAC/L2 header is not accounted for.
4946 */
4947static unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
4948{
4949	unsigned int hdr_len = skb_transport_header(skb) -
4950			       skb_network_header(skb);
4951
4952	return hdr_len + skb_gso_transport_seglen(skb);
4953}
4954
4955/**
4956 * skb_gso_mac_seglen - Return length of individual segments of a gso packet
4957 *
4958 * @skb: GSO skb
4959 *
4960 * skb_gso_mac_seglen is used to determine the real size of the
4961 * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4
4962 * headers (TCP/UDP).
4963 */
4964static unsigned int skb_gso_mac_seglen(const struct sk_buff *skb)
4965{
4966	unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
4967
4968	return hdr_len + skb_gso_transport_seglen(skb);
4969}
4970
4971/**
4972 * skb_gso_size_check - check the skb size, considering GSO_BY_FRAGS
4973 *
4974 * There are a couple of instances where we have a GSO skb, and we
4975 * want to determine what size it would be after it is segmented.
4976 *
4977 * We might want to check:
4978 * -    L3+L4+payload size (e.g. IP forwarding)
4979 * - L2+L3+L4+payload size (e.g. sanity check before passing to driver)
4980 *
4981 * This is a helper to do that correctly considering GSO_BY_FRAGS.
4982 *
4983 * @seg_len: The segmented length (from skb_gso_*_seglen). In the
4984 *           GSO_BY_FRAGS case this will be [header sizes + GSO_BY_FRAGS].
4985 *
4986 * @max_len: The maximum permissible length.
4987 *
4988 * Returns true if the segmented length <= max length.
4989 */
4990static inline bool skb_gso_size_check(const struct sk_buff *skb,
4991				      unsigned int seg_len,
4992				      unsigned int max_len) {
4993	const struct skb_shared_info *shinfo = skb_shinfo(skb);
4994	const struct sk_buff *iter;
4995
4996	if (shinfo->gso_size != GSO_BY_FRAGS)
4997		return seg_len <= max_len;
4998
4999	/* Undo this so we can re-use header sizes */
5000	seg_len -= GSO_BY_FRAGS;
5001
5002	skb_walk_frags(skb, iter) {
5003		if (seg_len + skb_headlen(iter) > max_len)
5004			return false;
5005	}
5006
5007	return true;
5008}
5009
5010/**
5011 * skb_gso_validate_network_len - Will a split GSO skb fit into a given MTU?
5012 *
5013 * @skb: GSO skb
5014 * @mtu: MTU to validate against
5015 *
5016 * skb_gso_validate_network_len validates if a given skb will fit a
5017 * wanted MTU once split. It considers L3 headers, L4 headers, and the
5018 * payload.
5019 */
5020bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu)
5021{
5022	return skb_gso_size_check(skb, skb_gso_network_seglen(skb), mtu);
5023}
5024EXPORT_SYMBOL_GPL(skb_gso_validate_network_len);
5025
5026/**
5027 * skb_gso_validate_mac_len - Will a split GSO skb fit in a given length?
5028 *
5029 * @skb: GSO skb
5030 * @len: length to validate against
5031 *
5032 * skb_gso_validate_mac_len validates if a given skb will fit a wanted
5033 * length once split, including L2, L3 and L4 headers and the payload.
5034 */
5035bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len)
5036{
5037	return skb_gso_size_check(skb, skb_gso_mac_seglen(skb), len);
5038}
5039EXPORT_SYMBOL_GPL(skb_gso_validate_mac_len);
5040
5041static struct sk_buff *skb_reorder_vlan_header(struct sk_buff *skb)
5042{
5043	int mac_len;
5044
5045	if (skb_cow(skb, skb_headroom(skb)) < 0) {
5046		kfree_skb(skb);
5047		return NULL;
5048	}
5049
5050	mac_len = skb->data - skb_mac_header(skb);
5051	if (likely(mac_len > VLAN_HLEN + ETH_TLEN)) {
5052		memmove(skb_mac_header(skb) + VLAN_HLEN, skb_mac_header(skb),
5053			mac_len - VLAN_HLEN - ETH_TLEN);
5054	}
5055	skb->mac_header += VLAN_HLEN;
5056	return skb;
5057}
5058
5059struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
5060{
5061	struct vlan_hdr *vhdr;
5062	u16 vlan_tci;
5063
5064	if (unlikely(skb_vlan_tag_present(skb))) {
5065		/* vlan_tci is already set-up so leave this for another time */
5066		return skb;
5067	}
5068
5069	skb = skb_share_check(skb, GFP_ATOMIC);
5070	if (unlikely(!skb))
5071		goto err_free;
5072
5073	if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
5074		goto err_free;
5075
5076	vhdr = (struct vlan_hdr *)skb->data;
5077	vlan_tci = ntohs(vhdr->h_vlan_TCI);
5078	__vlan_hwaccel_put_tag(skb, skb->protocol, vlan_tci);
5079
5080	skb_pull_rcsum(skb, VLAN_HLEN);
5081	vlan_set_encap_proto(skb, vhdr);
5082
5083	skb = skb_reorder_vlan_header(skb);
5084	if (unlikely(!skb))
5085		goto err_free;
5086
5087	skb_reset_network_header(skb);
5088	skb_reset_transport_header(skb);
5089	skb_reset_mac_len(skb);
5090
5091	return skb;
5092
5093err_free:
5094	kfree_skb(skb);
5095	return NULL;
5096}
5097EXPORT_SYMBOL(skb_vlan_untag);
5098
5099int skb_ensure_writable(struct sk_buff *skb, int write_len)
5100{
5101	if (!pskb_may_pull(skb, write_len))
5102		return -ENOMEM;
5103
5104	if (!skb_cloned(skb) || skb_clone_writable(skb, write_len))
5105		return 0;
5106
5107	return pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
5108}
5109EXPORT_SYMBOL(skb_ensure_writable);
5110
5111/* remove VLAN header from packet and update csum accordingly.
5112 * expects a non skb_vlan_tag_present skb with a vlan tag payload
5113 */
5114int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci)
5115{
5116	struct vlan_hdr *vhdr;
5117	int offset = skb->data - skb_mac_header(skb);
5118	int err;
5119
5120	if (WARN_ONCE(offset,
5121		      "__skb_vlan_pop got skb with skb->data not at mac header (offset %d)\n",
5122		      offset)) {
5123		return -EINVAL;
5124	}
5125
5126	err = skb_ensure_writable(skb, VLAN_ETH_HLEN);
5127	if (unlikely(err))
5128		return err;
5129
5130	skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5131
5132	vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
5133	*vlan_tci = ntohs(vhdr->h_vlan_TCI);
5134
5135	memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
5136	__skb_pull(skb, VLAN_HLEN);
5137
5138	vlan_set_encap_proto(skb, vhdr);
5139	skb->mac_header += VLAN_HLEN;
5140
5141	if (skb_network_offset(skb) < ETH_HLEN)
5142		skb_set_network_header(skb, ETH_HLEN);
5143
5144	skb_reset_mac_len(skb);
5145
5146	return err;
5147}
5148EXPORT_SYMBOL(__skb_vlan_pop);
5149
5150/* Pop a vlan tag either from hwaccel or from payload.
5151 * Expects skb->data at mac header.
5152 */
5153int skb_vlan_pop(struct sk_buff *skb)
5154{
5155	u16 vlan_tci;
5156	__be16 vlan_proto;
5157	int err;
5158
5159	if (likely(skb_vlan_tag_present(skb))) {
5160		skb->vlan_tci = 0;
5161	} else {
5162		if (unlikely(!eth_type_vlan(skb->protocol)))
5163			return 0;
5164
5165		err = __skb_vlan_pop(skb, &vlan_tci);
5166		if (err)
5167			return err;
5168	}
5169	/* move next vlan tag to hw accel tag */
5170	if (likely(!eth_type_vlan(skb->protocol)))
5171		return 0;
5172
5173	vlan_proto = skb->protocol;
5174	err = __skb_vlan_pop(skb, &vlan_tci);
5175	if (unlikely(err))
5176		return err;
5177
5178	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5179	return 0;
5180}
5181EXPORT_SYMBOL(skb_vlan_pop);
5182
5183/* Push a vlan tag either into hwaccel or into payload (if hwaccel tag present).
5184 * Expects skb->data at mac header.
5185 */
5186int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci)
5187{
5188	if (skb_vlan_tag_present(skb)) {
5189		int offset = skb->data - skb_mac_header(skb);
5190		int err;
5191
5192		if (WARN_ONCE(offset,
5193			      "skb_vlan_push got skb with skb->data not at mac header (offset %d)\n",
5194			      offset)) {
5195			return -EINVAL;
5196		}
5197
5198		err = __vlan_insert_tag(skb, skb->vlan_proto,
5199					skb_vlan_tag_get(skb));
5200		if (err)
5201			return err;
5202
5203		skb->protocol = skb->vlan_proto;
5204		skb->mac_len += VLAN_HLEN;
5205
5206		skb_postpush_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN);
5207	}
5208	__vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci);
5209	return 0;
5210}
5211EXPORT_SYMBOL(skb_vlan_push);
5212
5213/**
5214 * alloc_skb_with_frags - allocate skb with page frags
5215 *
5216 * @header_len: size of linear part
5217 * @data_len: needed length in frags
5218 * @max_page_order: max page order desired.
5219 * @errcode: pointer to error code if any
5220 * @gfp_mask: allocation mask
5221 *
5222 * This can be used to allocate a paged skb, given a maximal order for frags.
5223 */
5224struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
5225				     unsigned long data_len,
5226				     int max_page_order,
5227				     int *errcode,
5228				     gfp_t gfp_mask)
5229{
5230	int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
5231	unsigned long chunk;
5232	struct sk_buff *skb;
5233	struct page *page;
5234	gfp_t gfp_head;
5235	int i;
5236
5237	*errcode = -EMSGSIZE;
5238	/* Note this test could be relaxed, if we succeed to allocate
5239	 * high order pages...
5240	 */
5241	if (npages > MAX_SKB_FRAGS)
5242		return NULL;
5243
5244	gfp_head = gfp_mask;
5245	if (gfp_head & __GFP_DIRECT_RECLAIM)
5246		gfp_head |= __GFP_RETRY_MAYFAIL;
5247
5248	*errcode = -ENOBUFS;
5249	skb = alloc_skb(header_len, gfp_head);
5250	if (!skb)
5251		return NULL;
5252
5253	skb->truesize += npages << PAGE_SHIFT;
5254
5255	for (i = 0; npages > 0; i++) {
5256		int order = max_page_order;
5257
5258		while (order) {
5259			if (npages >= 1 << order) {
5260				page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
5261						   __GFP_COMP |
5262						   __GFP_NOWARN |
5263						   __GFP_NORETRY,
5264						   order);
5265				if (page)
5266					goto fill_page;
5267				/* Do not retry other high order allocations */
5268				order = 1;
5269				max_page_order = 0;
5270			}
5271			order--;
5272		}
5273		page = alloc_page(gfp_mask);
5274		if (!page)
5275			goto failure;
5276fill_page:
5277		chunk = min_t(unsigned long, data_len,
5278			      PAGE_SIZE << order);
5279		skb_fill_page_desc(skb, i, page, 0, chunk);
5280		data_len -= chunk;
5281		npages -= 1 << order;
5282	}
5283	return skb;
5284
5285failure:
5286	kfree_skb(skb);
5287	return NULL;
5288}
5289EXPORT_SYMBOL(alloc_skb_with_frags);
5290
5291/* carve out the first off bytes from skb when off < headlen */
5292static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
5293				    const int headlen, gfp_t gfp_mask)
5294{
5295	int i;
5296	int size = skb_end_offset(skb);
5297	int new_hlen = headlen - off;
5298	u8 *data;
5299
5300	size = SKB_DATA_ALIGN(size);
5301
5302	if (skb_pfmemalloc(skb))
5303		gfp_mask |= __GFP_MEMALLOC;
5304	data = kmalloc_reserve(size +
5305			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5306			       gfp_mask, NUMA_NO_NODE, NULL);
5307	if (!data)
5308		return -ENOMEM;
5309
5310	size = SKB_WITH_OVERHEAD(ksize(data));
5311
5312	/* Copy real data, and all frags */
5313	skb_copy_from_linear_data_offset(skb, off, data, new_hlen);
5314	skb->len -= off;
5315
5316	memcpy((struct skb_shared_info *)(data + size),
5317	       skb_shinfo(skb),
5318	       offsetof(struct skb_shared_info,
5319			frags[skb_shinfo(skb)->nr_frags]));
5320	if (skb_cloned(skb)) {
5321		/* drop the old head gracefully */
5322		if (skb_orphan_frags(skb, gfp_mask)) {
5323			kfree(data);
5324			return -ENOMEM;
5325		}
5326		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
5327			skb_frag_ref(skb, i);
5328		if (skb_has_frag_list(skb))
5329			skb_clone_fraglist(skb);
5330		skb_release_data(skb);
5331	} else {
5332		/* we can reuse existing recount- all we did was
5333		 * relocate values
5334		 */
5335		skb_free_head(skb);
5336	}
5337
5338	skb->head = data;
5339	skb->data = data;
5340	skb->head_frag = 0;
5341#ifdef NET_SKBUFF_DATA_USES_OFFSET
5342	skb->end = size;
5343#else
5344	skb->end = skb->head + size;
5345#endif
5346	skb_set_tail_pointer(skb, skb_headlen(skb));
5347	skb_headers_offset_update(skb, 0);
5348	skb->cloned = 0;
5349	skb->hdr_len = 0;
5350	skb->nohdr = 0;
5351	atomic_set(&skb_shinfo(skb)->dataref, 1);
5352
5353	return 0;
5354}
5355
5356static int pskb_carve(struct sk_buff *skb, const u32 off, gfp_t gfp);
5357
5358/* carve out the first eat bytes from skb's frag_list. May recurse into
5359 * pskb_carve()
5360 */
5361static int pskb_carve_frag_list(struct sk_buff *skb,
5362				struct skb_shared_info *shinfo, int eat,
5363				gfp_t gfp_mask)
5364{
5365	struct sk_buff *list = shinfo->frag_list;
5366	struct sk_buff *clone = NULL;
5367	struct sk_buff *insp = NULL;
5368
5369	do {
5370		if (!list) {
5371			pr_err("Not enough bytes to eat. Want %d\n", eat);
5372			return -EFAULT;
5373		}
5374		if (list->len <= eat) {
5375			/* Eaten as whole. */
5376			eat -= list->len;
5377			list = list->next;
5378			insp = list;
5379		} else {
5380			/* Eaten partially. */
5381			if (skb_shared(list)) {
5382				clone = skb_clone(list, gfp_mask);
5383				if (!clone)
5384					return -ENOMEM;
5385				insp = list->next;
5386				list = clone;
5387			} else {
5388				/* This may be pulled without problems. */
5389				insp = list;
5390			}
5391			if (pskb_carve(list, eat, gfp_mask) < 0) {
5392				kfree_skb(clone);
5393				return -ENOMEM;
5394			}
5395			break;
5396		}
5397	} while (eat);
5398
5399	/* Free pulled out fragments. */
5400	while ((list = shinfo->frag_list) != insp) {
5401		shinfo->frag_list = list->next;
5402		kfree_skb(list);
5403	}
5404	/* And insert new clone at head. */
5405	if (clone) {
5406		clone->next = list;
5407		shinfo->frag_list = clone;
5408	}
5409	return 0;
5410}
5411
5412/* carve off first len bytes from skb. Split line (off) is in the
5413 * non-linear part of skb
5414 */
5415static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
5416				       int pos, gfp_t gfp_mask)
5417{
5418	int i, k = 0;
5419	int size = skb_end_offset(skb);
5420	u8 *data;
5421	const int nfrags = skb_shinfo(skb)->nr_frags;
5422	struct skb_shared_info *shinfo;
5423
5424	size = SKB_DATA_ALIGN(size);
5425
5426	if (skb_pfmemalloc(skb))
5427		gfp_mask |= __GFP_MEMALLOC;
5428	data = kmalloc_reserve(size +
5429			       SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
5430			       gfp_mask, NUMA_NO_NODE, NULL);
5431	if (!data)
5432		return -ENOMEM;
5433
5434	size = SKB_WITH_OVERHEAD(ksize(data));
5435
5436	memcpy((struct skb_shared_info *)(data + size),
5437	       skb_shinfo(skb), offsetof(struct skb_shared_info,
5438					 frags[skb_shinfo(skb)->nr_frags]));
5439	if (skb_orphan_frags(skb, gfp_mask)) {
5440		kfree(data);
5441		return -ENOMEM;
5442	}
5443	shinfo = (struct skb_shared_info *)(data + size);
5444	for (i = 0; i < nfrags; i++) {
5445		int fsize = skb_frag_size(&skb_shinfo(skb)->frags[i]);
5446
5447		if (pos + fsize > off) {
5448			shinfo->frags[k] = skb_shinfo(skb)->frags[i];
5449
5450			if (pos < off) {
5451				/* Split frag.
5452				 * We have two variants in this case:
5453				 * 1. Move all the frag to the second
5454				 *    part, if it is possible. F.e.
5455				 *    this approach is mandatory for TUX,
5456				 *    where splitting is expensive.
5457				 * 2. Split is accurately. We make this.
5458				 */
5459				shinfo->frags[0].page_offset += off - pos;
5460				skb_frag_size_sub(&shinfo->frags[0], off - pos);
5461			}
5462			skb_frag_ref(skb, i);
5463			k++;
5464		}
5465		pos += fsize;
5466	}
5467	shinfo->nr_frags = k;
5468	if (skb_has_frag_list(skb))
5469		skb_clone_fraglist(skb);
5470
5471	if (k == 0) {
5472		/* split line is in frag list */
5473		pskb_carve_frag_list(skb, shinfo, off - pos, gfp_mask);
5474	}
5475	skb_release_data(skb);
5476
5477	skb->head = data;
5478	skb->head_frag = 0;
5479	skb->data = data;
5480#ifdef NET_SKBUFF_DATA_USES_OFFSET
5481	skb->end = size;
5482#else
5483	skb->end = skb->head + size;
5484#endif
5485	skb_reset_tail_pointer(skb);
5486	skb_headers_offset_update(skb, 0);
5487	skb->cloned   = 0;
5488	skb->hdr_len  = 0;
5489	skb->nohdr    = 0;
5490	skb->len -= off;
5491	skb->data_len = skb->len;
5492	atomic_set(&skb_shinfo(skb)->dataref, 1);
5493	return 0;
5494}
5495
5496/* remove len bytes from the beginning of the skb */
5497static int pskb_carve(struct sk_buff *skb, const u32 len, gfp_t gfp)
5498{
5499	int headlen = skb_headlen(skb);
5500
5501	if (len < headlen)
5502		return pskb_carve_inside_header(skb, len, headlen, gfp);
5503	else
5504		return pskb_carve_inside_nonlinear(skb, len, headlen, gfp);
5505}
5506
5507/* Extract to_copy bytes starting at off from skb, and return this in
5508 * a new skb
5509 */
5510struct sk_buff *pskb_extract(struct sk_buff *skb, int off,
5511			     int to_copy, gfp_t gfp)
5512{
5513	struct sk_buff  *clone = skb_clone(skb, gfp);
5514
5515	if (!clone)
5516		return NULL;
5517
5518	if (pskb_carve(clone, off, gfp) < 0 ||
5519	    pskb_trim(clone, to_copy)) {
5520		kfree_skb(clone);
5521		return NULL;
5522	}
5523	return clone;
5524}
5525EXPORT_SYMBOL(pskb_extract);
5526
5527/**
5528 * skb_condense - try to get rid of fragments/frag_list if possible
5529 * @skb: buffer
5530 *
5531 * Can be used to save memory before skb is added to a busy queue.
5532 * If packet has bytes in frags and enough tail room in skb->head,
5533 * pull all of them, so that we can free the frags right now and adjust
5534 * truesize.
5535 * Notes:
5536 *	We do not reallocate skb->head thus can not fail.
5537 *	Caller must re-evaluate skb->truesize if needed.
5538 */
5539void skb_condense(struct sk_buff *skb)
5540{
5541	if (skb->data_len) {
5542		if (skb->data_len > skb->end - skb->tail ||
5543		    skb_cloned(skb))
5544			return;
5545
5546		/* Nice, we can free page frag(s) right now */
5547		__pskb_pull_tail(skb, skb->data_len);
5548	}
5549	/* At this point, skb->truesize might be over estimated,
5550	 * because skb had a fragment, and fragments do not tell
5551	 * their truesize.
5552	 * When we pulled its content into skb->head, fragment
5553	 * was freed, but __pskb_pull_tail() could not possibly
5554	 * adjust skb->truesize, not knowing the frag truesize.
5555	 */
5556	skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
5557}