Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		PACKET - implements raw packet sockets.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *
  13 * Fixes:
  14 *		Alan Cox	:	verify_area() now used correctly
  15 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
  16 *		Alan Cox	:	tidied skbuff lists.
  17 *		Alan Cox	:	Now uses generic datagram routines I
  18 *					added. Also fixed the peek/read crash
  19 *					from all old Linux datagram code.
  20 *		Alan Cox	:	Uses the improved datagram code.
  21 *		Alan Cox	:	Added NULL's for socket options.
  22 *		Alan Cox	:	Re-commented the code.
  23 *		Alan Cox	:	Use new kernel side addressing
  24 *		Rob Janssen	:	Correct MTU usage.
  25 *		Dave Platt	:	Counter leaks caused by incorrect
  26 *					interrupt locking and some slightly
  27 *					dubious gcc output. Can you read
  28 *					compiler: it said _VOLATILE_
  29 *	Richard Kooijman	:	Timestamp fixes.
  30 *		Alan Cox	:	New buffers. Use sk->mac.raw.
  31 *		Alan Cox	:	sendmsg/recvmsg support.
  32 *		Alan Cox	:	Protocol setting support
  33 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
  34 *	Cyrus Durgin		:	Fixed kerneld for kmod.
  35 *	Michal Ostrowski        :       Module initialization cleanup.
  36 *         Ulises Alonso        :       Frame number limit removal and
  37 *                                      packet_set_ring memory leak.
  38 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
  39 *					The convention is that longer addresses
  40 *					will simply extend the hardware address
  41 *					byte arrays at the end of sockaddr_ll
  42 *					and packet_mreq.
  43 *		Johann Baudy	:	Added TX RING.
  44 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
  45 *					layer.
  46 *					Copyright (C) 2011, <lokec@ccs.neu.edu>
  47 */
  48
  49#include <linux/types.h>
  50#include <linux/mm.h>
  51#include <linux/capability.h>
  52#include <linux/fcntl.h>
  53#include <linux/socket.h>
  54#include <linux/in.h>
  55#include <linux/inet.h>
  56#include <linux/netdevice.h>
  57#include <linux/if_packet.h>
  58#include <linux/wireless.h>
  59#include <linux/kernel.h>
  60#include <linux/kmod.h>
  61#include <linux/slab.h>
  62#include <linux/vmalloc.h>
  63#include <net/net_namespace.h>
  64#include <net/ip.h>
  65#include <net/protocol.h>
  66#include <linux/skbuff.h>
  67#include <net/sock.h>
  68#include <linux/errno.h>
  69#include <linux/timer.h>
  70#include <linux/uaccess.h>
  71#include <asm/ioctls.h>
  72#include <asm/page.h>
  73#include <asm/cacheflush.h>
  74#include <asm/io.h>
  75#include <linux/proc_fs.h>
  76#include <linux/seq_file.h>
  77#include <linux/poll.h>
  78#include <linux/module.h>
  79#include <linux/init.h>
  80#include <linux/mutex.h>
  81#include <linux/if_vlan.h>
  82#include <linux/virtio_net.h>
  83#include <linux/errqueue.h>
  84#include <linux/net_tstamp.h>
  85#include <linux/percpu.h>
  86#ifdef CONFIG_INET
  87#include <net/inet_common.h>
  88#endif
  89#include <linux/bpf.h>
  90#include <net/compat.h>
  91
  92#include "internal.h"
  93
  94/*
  95   Assumptions:
  96   - if device has no dev->hard_header routine, it adds and removes ll header
  97     inside itself. In this case ll header is invisible outside of device,
  98     but higher levels still should reserve dev->hard_header_len.
  99     Some devices are enough clever to reallocate skb, when header
 100     will not fit to reserved space (tunnel), another ones are silly
 101     (PPP).
 102   - packet socket receives packets with pulled ll header,
 103     so that SOCK_RAW should push it back.
 104
 105On receive:
 106-----------
 107
 108Incoming, dev->hard_header!=NULL
 109   mac_header -> ll header
 110   data       -> data
 111
 112Outgoing, dev->hard_header!=NULL
 113   mac_header -> ll header
 114   data       -> ll header
 115
 116Incoming, dev->hard_header==NULL
 117   mac_header -> UNKNOWN position. It is very likely, that it points to ll
 118		 header.  PPP makes it, that is wrong, because introduce
 119		 assymetry between rx and tx paths.
 120   data       -> data
 121
 122Outgoing, dev->hard_header==NULL
 123   mac_header -> data. ll header is still not built!
 124   data       -> data
 125
 126Resume
 127  If dev->hard_header==NULL we are unlikely to restore sensible ll header.
 128
 129
 130On transmit:
 131------------
 132
 133dev->hard_header != NULL
 134   mac_header -> ll header
 135   data       -> ll header
 136
 137dev->hard_header == NULL (ll header is added by device, we cannot control it)
 138   mac_header -> data
 139   data       -> data
 140
 141   We should set nh.raw on output to correct posistion,
 142   packet classifier depends on it.
 143 */
 144
 145/* Private packet socket structures. */
 146
 147/* identical to struct packet_mreq except it has
 148 * a longer address field.
 149 */
 150struct packet_mreq_max {
 151	int		mr_ifindex;
 152	unsigned short	mr_type;
 153	unsigned short	mr_alen;
 154	unsigned char	mr_address[MAX_ADDR_LEN];
 155};
 156
 157union tpacket_uhdr {
 158	struct tpacket_hdr  *h1;
 159	struct tpacket2_hdr *h2;
 160	struct tpacket3_hdr *h3;
 161	void *raw;
 162};
 163
 164static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 165		int closing, int tx_ring);
 166
 167#define V3_ALIGNMENT	(8)
 168
 169#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 170
 171#define BLK_PLUS_PRIV(sz_of_priv) \
 172	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 173
 174#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 175#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 176#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 177#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 178#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 179#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 180#define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
 181
 182struct packet_sock;
 183static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 184		       struct packet_type *pt, struct net_device *orig_dev);
 185
 186static void *packet_previous_frame(struct packet_sock *po,
 187		struct packet_ring_buffer *rb,
 188		int status);
 189static void packet_increment_head(struct packet_ring_buffer *buff);
 190static int prb_curr_blk_in_use(struct tpacket_block_desc *);
 191static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 192			struct packet_sock *);
 193static void prb_retire_current_block(struct tpacket_kbdq_core *,
 194		struct packet_sock *, unsigned int status);
 195static int prb_queue_frozen(struct tpacket_kbdq_core *);
 196static void prb_open_block(struct tpacket_kbdq_core *,
 197		struct tpacket_block_desc *);
 198static void prb_retire_rx_blk_timer_expired(struct timer_list *);
 199static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 200static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 201static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 202		struct tpacket3_hdr *);
 203static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 204		struct tpacket3_hdr *);
 205static void packet_flush_mclist(struct sock *sk);
 206static u16 packet_pick_tx_queue(struct sk_buff *skb);
 207
 208struct packet_skb_cb {
 209	union {
 210		struct sockaddr_pkt pkt;
 211		union {
 212			/* Trick: alias skb original length with
 213			 * ll.sll_family and ll.protocol in order
 214			 * to save room.
 215			 */
 216			unsigned int origlen;
 217			struct sockaddr_ll ll;
 218		};
 219	} sa;
 220};
 221
 222#define vio_le() virtio_legacy_is_little_endian()
 223
 224#define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 225
 226#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 227#define GET_PBLOCK_DESC(x, bid)	\
 228	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 229#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 230	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 231#define GET_NEXT_PRB_BLK_NUM(x) \
 232	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 233	((x)->kactive_blk_num+1) : 0)
 234
 235static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 236static void __fanout_link(struct sock *sk, struct packet_sock *po);
 237
 238static int packet_direct_xmit(struct sk_buff *skb)
 239{
 240	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
 241}
 242
 243static struct net_device *packet_cached_dev_get(struct packet_sock *po)
 244{
 245	struct net_device *dev;
 246
 247	rcu_read_lock();
 248	dev = rcu_dereference(po->cached_dev);
 249	if (likely(dev))
 250		dev_hold(dev);
 251	rcu_read_unlock();
 252
 253	return dev;
 254}
 255
 256static void packet_cached_dev_assign(struct packet_sock *po,
 257				     struct net_device *dev)
 258{
 259	rcu_assign_pointer(po->cached_dev, dev);
 260}
 261
 262static void packet_cached_dev_reset(struct packet_sock *po)
 263{
 264	RCU_INIT_POINTER(po->cached_dev, NULL);
 265}
 266
 267static bool packet_use_direct_xmit(const struct packet_sock *po)
 268{
 269	return po->xmit == packet_direct_xmit;
 270}
 271
 272static u16 packet_pick_tx_queue(struct sk_buff *skb)
 273{
 274	struct net_device *dev = skb->dev;
 275	const struct net_device_ops *ops = dev->netdev_ops;
 276	int cpu = raw_smp_processor_id();
 277	u16 queue_index;
 278
 279#ifdef CONFIG_XPS
 280	skb->sender_cpu = cpu + 1;
 281#endif
 282	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
 283	if (ops->ndo_select_queue) {
 284		queue_index = ops->ndo_select_queue(dev, skb, NULL);
 285		queue_index = netdev_cap_txqueue(dev, queue_index);
 286	} else {
 287		queue_index = netdev_pick_tx(dev, skb, NULL);
 288	}
 289
 290	return queue_index;
 291}
 292
 293/* __register_prot_hook must be invoked through register_prot_hook
 294 * or from a context in which asynchronous accesses to the packet
 295 * socket is not possible (packet_create()).
 296 */
 297static void __register_prot_hook(struct sock *sk)
 298{
 299	struct packet_sock *po = pkt_sk(sk);
 300
 301	if (!po->running) {
 302		if (po->fanout)
 303			__fanout_link(sk, po);
 304		else
 305			dev_add_pack(&po->prot_hook);
 306
 307		sock_hold(sk);
 308		po->running = 1;
 309	}
 310}
 311
 312static void register_prot_hook(struct sock *sk)
 313{
 314	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
 315	__register_prot_hook(sk);
 316}
 317
 318/* If the sync parameter is true, we will temporarily drop
 319 * the po->bind_lock and do a synchronize_net to make sure no
 320 * asynchronous packet processing paths still refer to the elements
 321 * of po->prot_hook.  If the sync parameter is false, it is the
 322 * callers responsibility to take care of this.
 323 */
 324static void __unregister_prot_hook(struct sock *sk, bool sync)
 325{
 326	struct packet_sock *po = pkt_sk(sk);
 327
 328	lockdep_assert_held_once(&po->bind_lock);
 329
 330	po->running = 0;
 331
 332	if (po->fanout)
 333		__fanout_unlink(sk, po);
 334	else
 335		__dev_remove_pack(&po->prot_hook);
 336
 337	__sock_put(sk);
 338
 339	if (sync) {
 340		spin_unlock(&po->bind_lock);
 341		synchronize_net();
 342		spin_lock(&po->bind_lock);
 343	}
 344}
 345
 346static void unregister_prot_hook(struct sock *sk, bool sync)
 347{
 348	struct packet_sock *po = pkt_sk(sk);
 349
 350	if (po->running)
 351		__unregister_prot_hook(sk, sync);
 352}
 353
 354static inline struct page * __pure pgv_to_page(void *addr)
 355{
 356	if (is_vmalloc_addr(addr))
 357		return vmalloc_to_page(addr);
 358	return virt_to_page(addr);
 359}
 360
 361static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 362{
 363	union tpacket_uhdr h;
 364
 365	h.raw = frame;
 366	switch (po->tp_version) {
 367	case TPACKET_V1:
 368		h.h1->tp_status = status;
 369		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 370		break;
 371	case TPACKET_V2:
 372		h.h2->tp_status = status;
 373		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 374		break;
 375	case TPACKET_V3:
 376		h.h3->tp_status = status;
 377		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 378		break;
 379	default:
 380		WARN(1, "TPACKET version not supported.\n");
 381		BUG();
 382	}
 383
 384	smp_wmb();
 385}
 386
 387static int __packet_get_status(const struct packet_sock *po, void *frame)
 388{
 389	union tpacket_uhdr h;
 390
 391	smp_rmb();
 392
 393	h.raw = frame;
 394	switch (po->tp_version) {
 395	case TPACKET_V1:
 396		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 397		return h.h1->tp_status;
 398	case TPACKET_V2:
 399		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 400		return h.h2->tp_status;
 401	case TPACKET_V3:
 402		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 403		return h.h3->tp_status;
 404	default:
 405		WARN(1, "TPACKET version not supported.\n");
 406		BUG();
 407		return 0;
 408	}
 409}
 410
 411static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
 412				   unsigned int flags)
 413{
 414	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 415
 416	if (shhwtstamps &&
 417	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
 418	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
 419		return TP_STATUS_TS_RAW_HARDWARE;
 420
 421	if (ktime_to_timespec64_cond(skb->tstamp, ts))
 422		return TP_STATUS_TS_SOFTWARE;
 423
 424	return 0;
 425}
 426
 427static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
 428				    struct sk_buff *skb)
 429{
 430	union tpacket_uhdr h;
 431	struct timespec64 ts;
 432	__u32 ts_status;
 433
 434	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
 435		return 0;
 436
 437	h.raw = frame;
 438	/*
 439	 * versions 1 through 3 overflow the timestamps in y2106, since they
 440	 * all store the seconds in a 32-bit unsigned integer.
 441	 * If we create a version 4, that should have a 64-bit timestamp,
 442	 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
 443	 * nanoseconds.
 444	 */
 445	switch (po->tp_version) {
 446	case TPACKET_V1:
 447		h.h1->tp_sec = ts.tv_sec;
 448		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
 449		break;
 450	case TPACKET_V2:
 451		h.h2->tp_sec = ts.tv_sec;
 452		h.h2->tp_nsec = ts.tv_nsec;
 453		break;
 454	case TPACKET_V3:
 455		h.h3->tp_sec = ts.tv_sec;
 456		h.h3->tp_nsec = ts.tv_nsec;
 457		break;
 458	default:
 459		WARN(1, "TPACKET version not supported.\n");
 460		BUG();
 461	}
 462
 463	/* one flush is safe, as both fields always lie on the same cacheline */
 464	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
 465	smp_wmb();
 466
 467	return ts_status;
 468}
 469
 470static void *packet_lookup_frame(const struct packet_sock *po,
 471				 const struct packet_ring_buffer *rb,
 472				 unsigned int position,
 473				 int status)
 474{
 475	unsigned int pg_vec_pos, frame_offset;
 476	union tpacket_uhdr h;
 477
 478	pg_vec_pos = position / rb->frames_per_block;
 479	frame_offset = position % rb->frames_per_block;
 480
 481	h.raw = rb->pg_vec[pg_vec_pos].buffer +
 482		(frame_offset * rb->frame_size);
 483
 484	if (status != __packet_get_status(po, h.raw))
 485		return NULL;
 486
 487	return h.raw;
 488}
 489
 490static void *packet_current_frame(struct packet_sock *po,
 491		struct packet_ring_buffer *rb,
 492		int status)
 493{
 494	return packet_lookup_frame(po, rb, rb->head, status);
 495}
 496
 497static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 498{
 499	del_timer_sync(&pkc->retire_blk_timer);
 500}
 501
 502static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 503		struct sk_buff_head *rb_queue)
 504{
 505	struct tpacket_kbdq_core *pkc;
 506
 507	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 508
 509	spin_lock_bh(&rb_queue->lock);
 510	pkc->delete_blk_timer = 1;
 511	spin_unlock_bh(&rb_queue->lock);
 512
 513	prb_del_retire_blk_timer(pkc);
 514}
 515
 516static void prb_setup_retire_blk_timer(struct packet_sock *po)
 517{
 518	struct tpacket_kbdq_core *pkc;
 519
 520	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 521	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
 522		    0);
 523	pkc->retire_blk_timer.expires = jiffies;
 524}
 525
 526static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 527				int blk_size_in_bytes)
 528{
 529	struct net_device *dev;
 530	unsigned int mbits, div;
 531	struct ethtool_link_ksettings ecmd;
 532	int err;
 533
 534	rtnl_lock();
 535	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 536	if (unlikely(!dev)) {
 537		rtnl_unlock();
 538		return DEFAULT_PRB_RETIRE_TOV;
 539	}
 540	err = __ethtool_get_link_ksettings(dev, &ecmd);
 541	rtnl_unlock();
 542	if (err)
 543		return DEFAULT_PRB_RETIRE_TOV;
 544
 545	/* If the link speed is so slow you don't really
 546	 * need to worry about perf anyways
 547	 */
 548	if (ecmd.base.speed < SPEED_1000 ||
 549	    ecmd.base.speed == SPEED_UNKNOWN)
 550		return DEFAULT_PRB_RETIRE_TOV;
 
 
 
 
 551
 552	div = ecmd.base.speed / 1000;
 553	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 554
 555	if (div)
 556		mbits /= div;
 557
 
 
 558	if (div)
 559		return mbits + 1;
 560	return mbits;
 561}
 562
 563static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 564			union tpacket_req_u *req_u)
 565{
 566	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 567}
 568
 569static void init_prb_bdqc(struct packet_sock *po,
 570			struct packet_ring_buffer *rb,
 571			struct pgv *pg_vec,
 572			union tpacket_req_u *req_u)
 573{
 574	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
 575	struct tpacket_block_desc *pbd;
 576
 577	memset(p1, 0x0, sizeof(*p1));
 578
 579	p1->knxt_seq_num = 1;
 580	p1->pkbdq = pg_vec;
 581	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 582	p1->pkblk_start	= pg_vec[0].buffer;
 583	p1->kblk_size = req_u->req3.tp_block_size;
 584	p1->knum_blocks	= req_u->req3.tp_block_nr;
 585	p1->hdrlen = po->tp_hdrlen;
 586	p1->version = po->tp_version;
 587	p1->last_kactive_blk_num = 0;
 588	po->stats.stats3.tp_freeze_q_cnt = 0;
 589	if (req_u->req3.tp_retire_blk_tov)
 590		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 591	else
 592		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 593						req_u->req3.tp_block_size);
 594	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 595	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 596	rwlock_init(&p1->blk_fill_in_prog_lock);
 597
 598	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
 599	prb_init_ft_ops(p1, req_u);
 600	prb_setup_retire_blk_timer(po);
 601	prb_open_block(p1, pbd);
 602}
 603
 604/*  Do NOT update the last_blk_num first.
 605 *  Assumes sk_buff_head lock is held.
 606 */
 607static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 608{
 609	mod_timer(&pkc->retire_blk_timer,
 610			jiffies + pkc->tov_in_jiffies);
 611	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 612}
 613
 614/*
 615 * Timer logic:
 616 * 1) We refresh the timer only when we open a block.
 617 *    By doing this we don't waste cycles refreshing the timer
 618 *	  on packet-by-packet basis.
 619 *
 620 * With a 1MB block-size, on a 1Gbps line, it will take
 621 * i) ~8 ms to fill a block + ii) memcpy etc.
 622 * In this cut we are not accounting for the memcpy time.
 623 *
 624 * So, if the user sets the 'tmo' to 10ms then the timer
 625 * will never fire while the block is still getting filled
 626 * (which is what we want). However, the user could choose
 627 * to close a block early and that's fine.
 628 *
 629 * But when the timer does fire, we check whether or not to refresh it.
 630 * Since the tmo granularity is in msecs, it is not too expensive
 631 * to refresh the timer, lets say every '8' msecs.
 632 * Either the user can set the 'tmo' or we can derive it based on
 633 * a) line-speed and b) block-size.
 634 * prb_calc_retire_blk_tmo() calculates the tmo.
 635 *
 636 */
 637static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
 638{
 639	struct packet_sock *po =
 640		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
 641	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 642	unsigned int frozen;
 643	struct tpacket_block_desc *pbd;
 644
 645	spin_lock(&po->sk.sk_receive_queue.lock);
 646
 647	frozen = prb_queue_frozen(pkc);
 648	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 649
 650	if (unlikely(pkc->delete_blk_timer))
 651		goto out;
 652
 653	/* We only need to plug the race when the block is partially filled.
 654	 * tpacket_rcv:
 655	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 656	 *		copy_bits() is in progress ...
 657	 *		timer fires on other cpu:
 658	 *		we can't retire the current block because copy_bits
 659	 *		is in progress.
 660	 *
 661	 */
 662	if (BLOCK_NUM_PKTS(pbd)) {
 663		/* Waiting for skb_copy_bits to finish... */
 664		write_lock(&pkc->blk_fill_in_prog_lock);
 665		write_unlock(&pkc->blk_fill_in_prog_lock);
 
 666	}
 667
 668	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 669		if (!frozen) {
 670			if (!BLOCK_NUM_PKTS(pbd)) {
 671				/* An empty block. Just refresh the timer. */
 672				goto refresh_timer;
 673			}
 674			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 675			if (!prb_dispatch_next_block(pkc, po))
 676				goto refresh_timer;
 677			else
 678				goto out;
 679		} else {
 680			/* Case 1. Queue was frozen because user-space was
 681			 *	   lagging behind.
 682			 */
 683			if (prb_curr_blk_in_use(pbd)) {
 684				/*
 685				 * Ok, user-space is still behind.
 686				 * So just refresh the timer.
 687				 */
 688				goto refresh_timer;
 689			} else {
 690			       /* Case 2. queue was frozen,user-space caught up,
 691				* now the link went idle && the timer fired.
 692				* We don't have a block to close.So we open this
 693				* block and restart the timer.
 694				* opening a block thaws the queue,restarts timer
 695				* Thawing/timer-refresh is a side effect.
 696				*/
 697				prb_open_block(pkc, pbd);
 698				goto out;
 699			}
 700		}
 701	}
 702
 703refresh_timer:
 704	_prb_refresh_rx_retire_blk_timer(pkc);
 705
 706out:
 707	spin_unlock(&po->sk.sk_receive_queue.lock);
 708}
 709
 710static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 711		struct tpacket_block_desc *pbd1, __u32 status)
 712{
 713	/* Flush everything minus the block header */
 714
 715#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 716	u8 *start, *end;
 717
 718	start = (u8 *)pbd1;
 719
 720	/* Skip the block header(we know header WILL fit in 4K) */
 721	start += PAGE_SIZE;
 722
 723	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 724	for (; start < end; start += PAGE_SIZE)
 725		flush_dcache_page(pgv_to_page(start));
 726
 727	smp_wmb();
 728#endif
 729
 730	/* Now update the block status. */
 731
 732	BLOCK_STATUS(pbd1) = status;
 733
 734	/* Flush the block header */
 735
 736#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 737	start = (u8 *)pbd1;
 738	flush_dcache_page(pgv_to_page(start));
 739
 740	smp_wmb();
 741#endif
 742}
 743
 744/*
 745 * Side effect:
 746 *
 747 * 1) flush the block
 748 * 2) Increment active_blk_num
 749 *
 750 * Note:We DONT refresh the timer on purpose.
 751 *	Because almost always the next block will be opened.
 752 */
 753static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 754		struct tpacket_block_desc *pbd1,
 755		struct packet_sock *po, unsigned int stat)
 756{
 757	__u32 status = TP_STATUS_USER | stat;
 758
 759	struct tpacket3_hdr *last_pkt;
 760	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 761	struct sock *sk = &po->sk;
 762
 763	if (atomic_read(&po->tp_drops))
 764		status |= TP_STATUS_LOSING;
 765
 766	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 767	last_pkt->tp_next_offset = 0;
 768
 769	/* Get the ts of the last pkt */
 770	if (BLOCK_NUM_PKTS(pbd1)) {
 771		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 772		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 773	} else {
 774		/* Ok, we tmo'd - so get the current time.
 775		 *
 776		 * It shouldn't really happen as we don't close empty
 777		 * blocks. See prb_retire_rx_blk_timer_expired().
 778		 */
 779		struct timespec64 ts;
 780		ktime_get_real_ts64(&ts);
 781		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 782		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 783	}
 784
 785	smp_wmb();
 786
 787	/* Flush the block */
 788	prb_flush_block(pkc1, pbd1, status);
 789
 790	sk->sk_data_ready(sk);
 791
 792	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 793}
 794
 795static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 796{
 797	pkc->reset_pending_on_curr_blk = 0;
 798}
 799
 800/*
 801 * Side effect of opening a block:
 802 *
 803 * 1) prb_queue is thawed.
 804 * 2) retire_blk_timer is refreshed.
 805 *
 806 */
 807static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 808	struct tpacket_block_desc *pbd1)
 809{
 810	struct timespec64 ts;
 811	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 812
 813	smp_rmb();
 814
 815	/* We could have just memset this but we will lose the
 816	 * flexibility of making the priv area sticky
 817	 */
 818
 819	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 820	BLOCK_NUM_PKTS(pbd1) = 0;
 821	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 822
 823	ktime_get_real_ts64(&ts);
 824
 825	h1->ts_first_pkt.ts_sec = ts.tv_sec;
 826	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 827
 828	pkc1->pkblk_start = (char *)pbd1;
 829	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 830
 831	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 832	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 833
 834	pbd1->version = pkc1->version;
 835	pkc1->prev = pkc1->nxt_offset;
 836	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 837
 838	prb_thaw_queue(pkc1);
 839	_prb_refresh_rx_retire_blk_timer(pkc1);
 840
 841	smp_wmb();
 842}
 843
 844/*
 845 * Queue freeze logic:
 846 * 1) Assume tp_block_nr = 8 blocks.
 847 * 2) At time 't0', user opens Rx ring.
 848 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 849 * 4) user-space is either sleeping or processing block '0'.
 850 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 851 *    it will close block-7,loop around and try to fill block '0'.
 852 *    call-flow:
 853 *    __packet_lookup_frame_in_block
 854 *      prb_retire_current_block()
 855 *      prb_dispatch_next_block()
 856 *        |->(BLOCK_STATUS == USER) evaluates to true
 857 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 858 * 6) Now there are two cases:
 859 *    6.1) Link goes idle right after the queue is frozen.
 860 *         But remember, the last open_block() refreshed the timer.
 861 *         When this timer expires,it will refresh itself so that we can
 862 *         re-open block-0 in near future.
 863 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 864 *         case and __packet_lookup_frame_in_block will check if block-0
 865 *         is free and can now be re-used.
 866 */
 867static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 868				  struct packet_sock *po)
 869{
 870	pkc->reset_pending_on_curr_blk = 1;
 871	po->stats.stats3.tp_freeze_q_cnt++;
 872}
 873
 874#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 875
 876/*
 877 * If the next block is free then we will dispatch it
 878 * and return a good offset.
 879 * Else, we will freeze the queue.
 880 * So, caller must check the return value.
 881 */
 882static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 883		struct packet_sock *po)
 884{
 885	struct tpacket_block_desc *pbd;
 886
 887	smp_rmb();
 888
 889	/* 1. Get current block num */
 890	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 891
 892	/* 2. If this block is currently in_use then freeze the queue */
 893	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 894		prb_freeze_queue(pkc, po);
 895		return NULL;
 896	}
 897
 898	/*
 899	 * 3.
 900	 * open this block and return the offset where the first packet
 901	 * needs to get stored.
 902	 */
 903	prb_open_block(pkc, pbd);
 904	return (void *)pkc->nxt_offset;
 905}
 906
 907static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 908		struct packet_sock *po, unsigned int status)
 909{
 910	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 911
 912	/* retire/close the current block */
 913	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 914		/*
 915		 * Plug the case where copy_bits() is in progress on
 916		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
 917		 * have space to copy the pkt in the current block and
 918		 * called prb_retire_current_block()
 919		 *
 920		 * We don't need to worry about the TMO case because
 921		 * the timer-handler already handled this case.
 922		 */
 923		if (!(status & TP_STATUS_BLK_TMO)) {
 924			/* Waiting for skb_copy_bits to finish... */
 925			write_lock(&pkc->blk_fill_in_prog_lock);
 926			write_unlock(&pkc->blk_fill_in_prog_lock);
 
 927		}
 928		prb_close_block(pkc, pbd, po, status);
 929		return;
 930	}
 931}
 932
 933static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
 934{
 935	return TP_STATUS_USER & BLOCK_STATUS(pbd);
 936}
 937
 938static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 939{
 940	return pkc->reset_pending_on_curr_blk;
 941}
 942
 943static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
 944	__releases(&pkc->blk_fill_in_prog_lock)
 945{
 946	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 947
 948	read_unlock(&pkc->blk_fill_in_prog_lock);
 949}
 950
 951static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
 952			struct tpacket3_hdr *ppd)
 953{
 954	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
 955}
 956
 957static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
 958			struct tpacket3_hdr *ppd)
 959{
 960	ppd->hv1.tp_rxhash = 0;
 961}
 962
 963static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
 964			struct tpacket3_hdr *ppd)
 965{
 966	if (skb_vlan_tag_present(pkc->skb)) {
 967		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
 968		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
 969		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
 970	} else {
 971		ppd->hv1.tp_vlan_tci = 0;
 972		ppd->hv1.tp_vlan_tpid = 0;
 973		ppd->tp_status = TP_STATUS_AVAILABLE;
 974	}
 975}
 976
 977static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
 978			struct tpacket3_hdr *ppd)
 979{
 980	ppd->hv1.tp_padding = 0;
 981	prb_fill_vlan_info(pkc, ppd);
 982
 983	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
 984		prb_fill_rxhash(pkc, ppd);
 985	else
 986		prb_clear_rxhash(pkc, ppd);
 987}
 988
 989static void prb_fill_curr_block(char *curr,
 990				struct tpacket_kbdq_core *pkc,
 991				struct tpacket_block_desc *pbd,
 992				unsigned int len)
 993	__acquires(&pkc->blk_fill_in_prog_lock)
 994{
 995	struct tpacket3_hdr *ppd;
 996
 997	ppd  = (struct tpacket3_hdr *)curr;
 998	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
 999	pkc->prev = curr;
1000	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1001	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1002	BLOCK_NUM_PKTS(pbd) += 1;
1003	read_lock(&pkc->blk_fill_in_prog_lock);
1004	prb_run_all_ft_ops(pkc, ppd);
1005}
1006
1007/* Assumes caller has the sk->rx_queue.lock */
1008static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1009					    struct sk_buff *skb,
1010					    unsigned int len
1011					    )
1012{
1013	struct tpacket_kbdq_core *pkc;
1014	struct tpacket_block_desc *pbd;
1015	char *curr, *end;
1016
1017	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1018	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1019
1020	/* Queue is frozen when user space is lagging behind */
1021	if (prb_queue_frozen(pkc)) {
1022		/*
1023		 * Check if that last block which caused the queue to freeze,
1024		 * is still in_use by user-space.
1025		 */
1026		if (prb_curr_blk_in_use(pbd)) {
1027			/* Can't record this packet */
1028			return NULL;
1029		} else {
1030			/*
1031			 * Ok, the block was released by user-space.
1032			 * Now let's open that block.
1033			 * opening a block also thaws the queue.
1034			 * Thawing is a side effect.
1035			 */
1036			prb_open_block(pkc, pbd);
1037		}
1038	}
1039
1040	smp_mb();
1041	curr = pkc->nxt_offset;
1042	pkc->skb = skb;
1043	end = (char *)pbd + pkc->kblk_size;
1044
1045	/* first try the current block */
1046	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1047		prb_fill_curr_block(curr, pkc, pbd, len);
1048		return (void *)curr;
1049	}
1050
1051	/* Ok, close the current block */
1052	prb_retire_current_block(pkc, po, 0);
1053
1054	/* Now, try to dispatch the next block */
1055	curr = (char *)prb_dispatch_next_block(pkc, po);
1056	if (curr) {
1057		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1058		prb_fill_curr_block(curr, pkc, pbd, len);
1059		return (void *)curr;
1060	}
1061
1062	/*
1063	 * No free blocks are available.user_space hasn't caught up yet.
1064	 * Queue was just frozen and now this packet will get dropped.
1065	 */
1066	return NULL;
1067}
1068
1069static void *packet_current_rx_frame(struct packet_sock *po,
1070					    struct sk_buff *skb,
1071					    int status, unsigned int len)
1072{
1073	char *curr = NULL;
1074	switch (po->tp_version) {
1075	case TPACKET_V1:
1076	case TPACKET_V2:
1077		curr = packet_lookup_frame(po, &po->rx_ring,
1078					po->rx_ring.head, status);
1079		return curr;
1080	case TPACKET_V3:
1081		return __packet_lookup_frame_in_block(po, skb, len);
1082	default:
1083		WARN(1, "TPACKET version not supported\n");
1084		BUG();
1085		return NULL;
1086	}
1087}
1088
1089static void *prb_lookup_block(const struct packet_sock *po,
1090			      const struct packet_ring_buffer *rb,
1091			      unsigned int idx,
1092			      int status)
1093{
1094	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1095	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1096
1097	if (status != BLOCK_STATUS(pbd))
1098		return NULL;
1099	return pbd;
1100}
1101
1102static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1103{
1104	unsigned int prev;
1105	if (rb->prb_bdqc.kactive_blk_num)
1106		prev = rb->prb_bdqc.kactive_blk_num-1;
1107	else
1108		prev = rb->prb_bdqc.knum_blocks-1;
1109	return prev;
1110}
1111
1112/* Assumes caller has held the rx_queue.lock */
1113static void *__prb_previous_block(struct packet_sock *po,
1114					 struct packet_ring_buffer *rb,
1115					 int status)
1116{
1117	unsigned int previous = prb_previous_blk_num(rb);
1118	return prb_lookup_block(po, rb, previous, status);
1119}
1120
1121static void *packet_previous_rx_frame(struct packet_sock *po,
1122					     struct packet_ring_buffer *rb,
1123					     int status)
1124{
1125	if (po->tp_version <= TPACKET_V2)
1126		return packet_previous_frame(po, rb, status);
1127
1128	return __prb_previous_block(po, rb, status);
1129}
1130
1131static void packet_increment_rx_head(struct packet_sock *po,
1132					    struct packet_ring_buffer *rb)
1133{
1134	switch (po->tp_version) {
1135	case TPACKET_V1:
1136	case TPACKET_V2:
1137		return packet_increment_head(rb);
1138	case TPACKET_V3:
1139	default:
1140		WARN(1, "TPACKET version not supported.\n");
1141		BUG();
1142		return;
1143	}
1144}
1145
1146static void *packet_previous_frame(struct packet_sock *po,
1147		struct packet_ring_buffer *rb,
1148		int status)
1149{
1150	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1151	return packet_lookup_frame(po, rb, previous, status);
1152}
1153
1154static void packet_increment_head(struct packet_ring_buffer *buff)
1155{
1156	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1157}
1158
1159static void packet_inc_pending(struct packet_ring_buffer *rb)
1160{
1161	this_cpu_inc(*rb->pending_refcnt);
1162}
1163
1164static void packet_dec_pending(struct packet_ring_buffer *rb)
1165{
1166	this_cpu_dec(*rb->pending_refcnt);
1167}
1168
1169static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1170{
1171	unsigned int refcnt = 0;
1172	int cpu;
1173
1174	/* We don't use pending refcount in rx_ring. */
1175	if (rb->pending_refcnt == NULL)
1176		return 0;
1177
1178	for_each_possible_cpu(cpu)
1179		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1180
1181	return refcnt;
1182}
1183
1184static int packet_alloc_pending(struct packet_sock *po)
1185{
1186	po->rx_ring.pending_refcnt = NULL;
1187
1188	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1189	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1190		return -ENOBUFS;
1191
1192	return 0;
1193}
1194
1195static void packet_free_pending(struct packet_sock *po)
1196{
1197	free_percpu(po->tx_ring.pending_refcnt);
1198}
1199
1200#define ROOM_POW_OFF	2
1201#define ROOM_NONE	0x0
1202#define ROOM_LOW	0x1
1203#define ROOM_NORMAL	0x2
1204
1205static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1206{
1207	int idx, len;
1208
1209	len = READ_ONCE(po->rx_ring.frame_max) + 1;
1210	idx = READ_ONCE(po->rx_ring.head);
1211	if (pow_off)
1212		idx += len >> pow_off;
1213	if (idx >= len)
1214		idx -= len;
1215	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1216}
1217
1218static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1219{
1220	int idx, len;
1221
1222	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1223	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1224	if (pow_off)
1225		idx += len >> pow_off;
1226	if (idx >= len)
1227		idx -= len;
1228	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1229}
1230
1231static int __packet_rcv_has_room(const struct packet_sock *po,
1232				 const struct sk_buff *skb)
1233{
1234	const struct sock *sk = &po->sk;
1235	int ret = ROOM_NONE;
1236
1237	if (po->prot_hook.func != tpacket_rcv) {
1238		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1239		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1240				   - (skb ? skb->truesize : 0);
1241
1242		if (avail > (rcvbuf >> ROOM_POW_OFF))
1243			return ROOM_NORMAL;
1244		else if (avail > 0)
1245			return ROOM_LOW;
1246		else
1247			return ROOM_NONE;
1248	}
1249
1250	if (po->tp_version == TPACKET_V3) {
1251		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1252			ret = ROOM_NORMAL;
1253		else if (__tpacket_v3_has_room(po, 0))
1254			ret = ROOM_LOW;
1255	} else {
1256		if (__tpacket_has_room(po, ROOM_POW_OFF))
1257			ret = ROOM_NORMAL;
1258		else if (__tpacket_has_room(po, 0))
1259			ret = ROOM_LOW;
1260	}
1261
1262	return ret;
1263}
1264
1265static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1266{
1267	int pressure, ret;
1268
1269	ret = __packet_rcv_has_room(po, skb);
1270	pressure = ret != ROOM_NORMAL;
1271
1272	if (READ_ONCE(po->pressure) != pressure)
1273		WRITE_ONCE(po->pressure, pressure);
1274
1275	return ret;
1276}
1277
1278static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1279{
1280	if (READ_ONCE(po->pressure) &&
1281	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1282		WRITE_ONCE(po->pressure,  0);
1283}
1284
1285static void packet_sock_destruct(struct sock *sk)
1286{
1287	skb_queue_purge(&sk->sk_error_queue);
1288
1289	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1290	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1291
1292	if (!sock_flag(sk, SOCK_DEAD)) {
1293		pr_err("Attempt to release alive packet socket: %p\n", sk);
1294		return;
1295	}
1296
1297	sk_refcnt_debug_dec(sk);
1298}
1299
1300static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1301{
1302	u32 *history = po->rollover->history;
1303	u32 victim, rxhash;
1304	int i, count = 0;
1305
1306	rxhash = skb_get_hash(skb);
1307	for (i = 0; i < ROLLOVER_HLEN; i++)
1308		if (READ_ONCE(history[i]) == rxhash)
1309			count++;
1310
1311	victim = prandom_u32() % ROLLOVER_HLEN;
1312
1313	/* Avoid dirtying the cache line if possible */
1314	if (READ_ONCE(history[victim]) != rxhash)
1315		WRITE_ONCE(history[victim], rxhash);
1316
1317	return count > (ROLLOVER_HLEN >> 1);
1318}
1319
1320static unsigned int fanout_demux_hash(struct packet_fanout *f,
1321				      struct sk_buff *skb,
1322				      unsigned int num)
1323{
1324	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1325}
1326
1327static unsigned int fanout_demux_lb(struct packet_fanout *f,
1328				    struct sk_buff *skb,
1329				    unsigned int num)
1330{
1331	unsigned int val = atomic_inc_return(&f->rr_cur);
1332
1333	return val % num;
1334}
1335
1336static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1337				     struct sk_buff *skb,
1338				     unsigned int num)
1339{
1340	return smp_processor_id() % num;
1341}
1342
1343static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1344				     struct sk_buff *skb,
1345				     unsigned int num)
1346{
1347	return prandom_u32_max(num);
1348}
1349
1350static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1351					  struct sk_buff *skb,
1352					  unsigned int idx, bool try_self,
1353					  unsigned int num)
1354{
1355	struct packet_sock *po, *po_next, *po_skip = NULL;
1356	unsigned int i, j, room = ROOM_NONE;
1357
1358	po = pkt_sk(f->arr[idx]);
1359
1360	if (try_self) {
1361		room = packet_rcv_has_room(po, skb);
1362		if (room == ROOM_NORMAL ||
1363		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1364			return idx;
1365		po_skip = po;
1366	}
1367
1368	i = j = min_t(int, po->rollover->sock, num - 1);
1369	do {
1370		po_next = pkt_sk(f->arr[i]);
1371		if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1372		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1373			if (i != j)
1374				po->rollover->sock = i;
1375			atomic_long_inc(&po->rollover->num);
1376			if (room == ROOM_LOW)
1377				atomic_long_inc(&po->rollover->num_huge);
1378			return i;
1379		}
1380
1381		if (++i == num)
1382			i = 0;
1383	} while (i != j);
1384
1385	atomic_long_inc(&po->rollover->num_failed);
1386	return idx;
1387}
1388
1389static unsigned int fanout_demux_qm(struct packet_fanout *f,
1390				    struct sk_buff *skb,
1391				    unsigned int num)
1392{
1393	return skb_get_queue_mapping(skb) % num;
1394}
1395
1396static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1397				     struct sk_buff *skb,
1398				     unsigned int num)
1399{
1400	struct bpf_prog *prog;
1401	unsigned int ret = 0;
1402
1403	rcu_read_lock();
1404	prog = rcu_dereference(f->bpf_prog);
1405	if (prog)
1406		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1407	rcu_read_unlock();
1408
1409	return ret;
1410}
1411
1412static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1413{
1414	return f->flags & (flag >> 8);
1415}
1416
1417static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1418			     struct packet_type *pt, struct net_device *orig_dev)
1419{
1420	struct packet_fanout *f = pt->af_packet_priv;
1421	unsigned int num = READ_ONCE(f->num_members);
1422	struct net *net = read_pnet(&f->net);
1423	struct packet_sock *po;
1424	unsigned int idx;
1425
1426	if (!net_eq(dev_net(dev), net) || !num) {
1427		kfree_skb(skb);
1428		return 0;
1429	}
1430
1431	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1432		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1433		if (!skb)
1434			return 0;
1435	}
1436	switch (f->type) {
1437	case PACKET_FANOUT_HASH:
1438	default:
1439		idx = fanout_demux_hash(f, skb, num);
1440		break;
1441	case PACKET_FANOUT_LB:
1442		idx = fanout_demux_lb(f, skb, num);
1443		break;
1444	case PACKET_FANOUT_CPU:
1445		idx = fanout_demux_cpu(f, skb, num);
1446		break;
1447	case PACKET_FANOUT_RND:
1448		idx = fanout_demux_rnd(f, skb, num);
1449		break;
1450	case PACKET_FANOUT_QM:
1451		idx = fanout_demux_qm(f, skb, num);
1452		break;
1453	case PACKET_FANOUT_ROLLOVER:
1454		idx = fanout_demux_rollover(f, skb, 0, false, num);
1455		break;
1456	case PACKET_FANOUT_CBPF:
1457	case PACKET_FANOUT_EBPF:
1458		idx = fanout_demux_bpf(f, skb, num);
1459		break;
1460	}
1461
1462	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1463		idx = fanout_demux_rollover(f, skb, idx, true, num);
1464
1465	po = pkt_sk(f->arr[idx]);
1466	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1467}
1468
1469DEFINE_MUTEX(fanout_mutex);
1470EXPORT_SYMBOL_GPL(fanout_mutex);
1471static LIST_HEAD(fanout_list);
1472static u16 fanout_next_id;
1473
1474static void __fanout_link(struct sock *sk, struct packet_sock *po)
1475{
1476	struct packet_fanout *f = po->fanout;
1477
1478	spin_lock(&f->lock);
1479	f->arr[f->num_members] = sk;
1480	smp_wmb();
1481	f->num_members++;
1482	if (f->num_members == 1)
1483		dev_add_pack(&f->prot_hook);
1484	spin_unlock(&f->lock);
1485}
1486
1487static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1488{
1489	struct packet_fanout *f = po->fanout;
1490	int i;
1491
1492	spin_lock(&f->lock);
1493	for (i = 0; i < f->num_members; i++) {
1494		if (f->arr[i] == sk)
1495			break;
1496	}
1497	BUG_ON(i >= f->num_members);
1498	f->arr[i] = f->arr[f->num_members - 1];
1499	f->num_members--;
1500	if (f->num_members == 0)
1501		__dev_remove_pack(&f->prot_hook);
1502	spin_unlock(&f->lock);
1503}
1504
1505static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1506{
1507	if (sk->sk_family != PF_PACKET)
1508		return false;
1509
1510	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1511}
1512
1513static void fanout_init_data(struct packet_fanout *f)
1514{
1515	switch (f->type) {
1516	case PACKET_FANOUT_LB:
1517		atomic_set(&f->rr_cur, 0);
1518		break;
1519	case PACKET_FANOUT_CBPF:
1520	case PACKET_FANOUT_EBPF:
1521		RCU_INIT_POINTER(f->bpf_prog, NULL);
1522		break;
1523	}
1524}
1525
1526static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1527{
1528	struct bpf_prog *old;
1529
1530	spin_lock(&f->lock);
1531	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1532	rcu_assign_pointer(f->bpf_prog, new);
1533	spin_unlock(&f->lock);
1534
1535	if (old) {
1536		synchronize_net();
1537		bpf_prog_destroy(old);
1538	}
1539}
1540
1541static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1542				unsigned int len)
1543{
1544	struct bpf_prog *new;
1545	struct sock_fprog fprog;
1546	int ret;
1547
1548	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1549		return -EPERM;
1550
1551	ret = copy_bpf_fprog_from_user(&fprog, data, len);
1552	if (ret)
1553		return ret;
1554
1555	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1556	if (ret)
1557		return ret;
1558
1559	__fanout_set_data_bpf(po->fanout, new);
1560	return 0;
1561}
1562
1563static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1564				unsigned int len)
1565{
1566	struct bpf_prog *new;
1567	u32 fd;
1568
1569	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1570		return -EPERM;
1571	if (len != sizeof(fd))
1572		return -EINVAL;
1573	if (copy_from_sockptr(&fd, data, len))
1574		return -EFAULT;
1575
1576	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1577	if (IS_ERR(new))
1578		return PTR_ERR(new);
1579
1580	__fanout_set_data_bpf(po->fanout, new);
1581	return 0;
1582}
1583
1584static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1585			   unsigned int len)
1586{
1587	switch (po->fanout->type) {
1588	case PACKET_FANOUT_CBPF:
1589		return fanout_set_data_cbpf(po, data, len);
1590	case PACKET_FANOUT_EBPF:
1591		return fanout_set_data_ebpf(po, data, len);
1592	default:
1593		return -EINVAL;
1594	}
1595}
1596
1597static void fanout_release_data(struct packet_fanout *f)
1598{
1599	switch (f->type) {
1600	case PACKET_FANOUT_CBPF:
1601	case PACKET_FANOUT_EBPF:
1602		__fanout_set_data_bpf(f, NULL);
1603	}
1604}
1605
1606static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1607{
1608	struct packet_fanout *f;
1609
1610	list_for_each_entry(f, &fanout_list, list) {
1611		if (f->id == candidate_id &&
1612		    read_pnet(&f->net) == sock_net(sk)) {
1613			return false;
1614		}
1615	}
1616	return true;
1617}
1618
1619static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1620{
1621	u16 id = fanout_next_id;
1622
1623	do {
1624		if (__fanout_id_is_free(sk, id)) {
1625			*new_id = id;
1626			fanout_next_id = id + 1;
1627			return true;
1628		}
1629
1630		id++;
1631	} while (id != fanout_next_id);
1632
1633	return false;
1634}
1635
1636static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1637{
1638	struct packet_rollover *rollover = NULL;
1639	struct packet_sock *po = pkt_sk(sk);
1640	struct packet_fanout *f, *match;
1641	u8 type = type_flags & 0xff;
1642	u8 flags = type_flags >> 8;
1643	int err;
1644
1645	switch (type) {
1646	case PACKET_FANOUT_ROLLOVER:
1647		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1648			return -EINVAL;
1649	case PACKET_FANOUT_HASH:
1650	case PACKET_FANOUT_LB:
1651	case PACKET_FANOUT_CPU:
1652	case PACKET_FANOUT_RND:
1653	case PACKET_FANOUT_QM:
1654	case PACKET_FANOUT_CBPF:
1655	case PACKET_FANOUT_EBPF:
1656		break;
1657	default:
1658		return -EINVAL;
1659	}
1660
1661	mutex_lock(&fanout_mutex);
1662
1663	err = -EALREADY;
1664	if (po->fanout)
1665		goto out;
1666
1667	if (type == PACKET_FANOUT_ROLLOVER ||
1668	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1669		err = -ENOMEM;
1670		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1671		if (!rollover)
1672			goto out;
1673		atomic_long_set(&rollover->num, 0);
1674		atomic_long_set(&rollover->num_huge, 0);
1675		atomic_long_set(&rollover->num_failed, 0);
1676	}
1677
1678	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1679		if (id != 0) {
1680			err = -EINVAL;
1681			goto out;
1682		}
1683		if (!fanout_find_new_id(sk, &id)) {
1684			err = -ENOMEM;
1685			goto out;
1686		}
1687		/* ephemeral flag for the first socket in the group: drop it */
1688		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1689	}
1690
1691	match = NULL;
1692	list_for_each_entry(f, &fanout_list, list) {
1693		if (f->id == id &&
1694		    read_pnet(&f->net) == sock_net(sk)) {
1695			match = f;
1696			break;
1697		}
1698	}
1699	err = -EINVAL;
1700	if (match && match->flags != flags)
1701		goto out;
1702	if (!match) {
1703		err = -ENOMEM;
1704		match = kzalloc(sizeof(*match), GFP_KERNEL);
1705		if (!match)
1706			goto out;
1707		write_pnet(&match->net, sock_net(sk));
1708		match->id = id;
1709		match->type = type;
1710		match->flags = flags;
1711		INIT_LIST_HEAD(&match->list);
1712		spin_lock_init(&match->lock);
1713		refcount_set(&match->sk_ref, 0);
1714		fanout_init_data(match);
1715		match->prot_hook.type = po->prot_hook.type;
1716		match->prot_hook.dev = po->prot_hook.dev;
1717		match->prot_hook.func = packet_rcv_fanout;
1718		match->prot_hook.af_packet_priv = match;
1719		match->prot_hook.id_match = match_fanout_group;
1720		list_add(&match->list, &fanout_list);
1721	}
1722	err = -EINVAL;
1723
1724	spin_lock(&po->bind_lock);
1725	if (po->running &&
1726	    match->type == type &&
1727	    match->prot_hook.type == po->prot_hook.type &&
1728	    match->prot_hook.dev == po->prot_hook.dev) {
1729		err = -ENOSPC;
1730		if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1731			__dev_remove_pack(&po->prot_hook);
1732			po->fanout = match;
1733			po->rollover = rollover;
1734			rollover = NULL;
1735			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1736			__fanout_link(sk, po);
1737			err = 0;
1738		}
1739	}
1740	spin_unlock(&po->bind_lock);
1741
1742	if (err && !refcount_read(&match->sk_ref)) {
1743		list_del(&match->list);
1744		kfree(match);
1745	}
1746
1747out:
1748	kfree(rollover);
1749	mutex_unlock(&fanout_mutex);
1750	return err;
1751}
1752
1753/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1754 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1755 * It is the responsibility of the caller to call fanout_release_data() and
1756 * free the returned packet_fanout (after synchronize_net())
1757 */
1758static struct packet_fanout *fanout_release(struct sock *sk)
1759{
1760	struct packet_sock *po = pkt_sk(sk);
1761	struct packet_fanout *f;
1762
1763	mutex_lock(&fanout_mutex);
1764	f = po->fanout;
1765	if (f) {
1766		po->fanout = NULL;
1767
1768		if (refcount_dec_and_test(&f->sk_ref))
1769			list_del(&f->list);
1770		else
1771			f = NULL;
1772	}
1773	mutex_unlock(&fanout_mutex);
1774
1775	return f;
1776}
1777
1778static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1779					  struct sk_buff *skb)
1780{
1781	/* Earlier code assumed this would be a VLAN pkt, double-check
1782	 * this now that we have the actual packet in hand. We can only
1783	 * do this check on Ethernet devices.
1784	 */
1785	if (unlikely(dev->type != ARPHRD_ETHER))
1786		return false;
1787
1788	skb_reset_mac_header(skb);
1789	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1790}
1791
1792static const struct proto_ops packet_ops;
1793
1794static const struct proto_ops packet_ops_spkt;
1795
1796static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1797			   struct packet_type *pt, struct net_device *orig_dev)
1798{
1799	struct sock *sk;
1800	struct sockaddr_pkt *spkt;
1801
1802	/*
1803	 *	When we registered the protocol we saved the socket in the data
1804	 *	field for just this event.
1805	 */
1806
1807	sk = pt->af_packet_priv;
1808
1809	/*
1810	 *	Yank back the headers [hope the device set this
1811	 *	right or kerboom...]
1812	 *
1813	 *	Incoming packets have ll header pulled,
1814	 *	push it back.
1815	 *
1816	 *	For outgoing ones skb->data == skb_mac_header(skb)
1817	 *	so that this procedure is noop.
1818	 */
1819
1820	if (skb->pkt_type == PACKET_LOOPBACK)
1821		goto out;
1822
1823	if (!net_eq(dev_net(dev), sock_net(sk)))
1824		goto out;
1825
1826	skb = skb_share_check(skb, GFP_ATOMIC);
1827	if (skb == NULL)
1828		goto oom;
1829
1830	/* drop any routing info */
1831	skb_dst_drop(skb);
1832
1833	/* drop conntrack reference */
1834	nf_reset_ct(skb);
1835
1836	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1837
1838	skb_push(skb, skb->data - skb_mac_header(skb));
1839
1840	/*
1841	 *	The SOCK_PACKET socket receives _all_ frames.
1842	 */
1843
1844	spkt->spkt_family = dev->type;
1845	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1846	spkt->spkt_protocol = skb->protocol;
1847
1848	/*
1849	 *	Charge the memory to the socket. This is done specifically
1850	 *	to prevent sockets using all the memory up.
1851	 */
1852
1853	if (sock_queue_rcv_skb(sk, skb) == 0)
1854		return 0;
1855
1856out:
1857	kfree_skb(skb);
1858oom:
1859	return 0;
1860}
1861
1862static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1863{
1864	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1865	    sock->type == SOCK_RAW) {
1866		skb_reset_mac_header(skb);
1867		skb->protocol = dev_parse_header_protocol(skb);
1868	}
1869
1870	skb_probe_transport_header(skb);
1871}
1872
1873/*
1874 *	Output a raw packet to a device layer. This bypasses all the other
1875 *	protocol layers and you must therefore supply it with a complete frame
1876 */
1877
1878static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1879			       size_t len)
1880{
1881	struct sock *sk = sock->sk;
1882	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1883	struct sk_buff *skb = NULL;
1884	struct net_device *dev;
1885	struct sockcm_cookie sockc;
1886	__be16 proto = 0;
1887	int err;
1888	int extra_len = 0;
1889
1890	/*
1891	 *	Get and verify the address.
1892	 */
1893
1894	if (saddr) {
1895		if (msg->msg_namelen < sizeof(struct sockaddr))
1896			return -EINVAL;
1897		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1898			proto = saddr->spkt_protocol;
1899	} else
1900		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1901
1902	/*
1903	 *	Find the device first to size check it
1904	 */
1905
1906	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1907retry:
1908	rcu_read_lock();
1909	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1910	err = -ENODEV;
1911	if (dev == NULL)
1912		goto out_unlock;
1913
1914	err = -ENETDOWN;
1915	if (!(dev->flags & IFF_UP))
1916		goto out_unlock;
1917
1918	/*
1919	 * You may not queue a frame bigger than the mtu. This is the lowest level
1920	 * raw protocol and you must do your own fragmentation at this level.
1921	 */
1922
1923	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1924		if (!netif_supports_nofcs(dev)) {
1925			err = -EPROTONOSUPPORT;
1926			goto out_unlock;
1927		}
1928		extra_len = 4; /* We're doing our own CRC */
1929	}
1930
1931	err = -EMSGSIZE;
1932	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1933		goto out_unlock;
1934
1935	if (!skb) {
1936		size_t reserved = LL_RESERVED_SPACE(dev);
1937		int tlen = dev->needed_tailroom;
1938		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1939
1940		rcu_read_unlock();
1941		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1942		if (skb == NULL)
1943			return -ENOBUFS;
1944		/* FIXME: Save some space for broken drivers that write a hard
1945		 * header at transmission time by themselves. PPP is the notable
1946		 * one here. This should really be fixed at the driver level.
1947		 */
1948		skb_reserve(skb, reserved);
1949		skb_reset_network_header(skb);
1950
1951		/* Try to align data part correctly */
1952		if (hhlen) {
1953			skb->data -= hhlen;
1954			skb->tail -= hhlen;
1955			if (len < hhlen)
1956				skb_reset_network_header(skb);
1957		}
1958		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1959		if (err)
1960			goto out_free;
1961		goto retry;
1962	}
1963
1964	if (!dev_validate_header(dev, skb->data, len)) {
1965		err = -EINVAL;
1966		goto out_unlock;
1967	}
1968	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1969	    !packet_extra_vlan_len_allowed(dev, skb)) {
1970		err = -EMSGSIZE;
1971		goto out_unlock;
1972	}
1973
1974	sockcm_init(&sockc, sk);
1975	if (msg->msg_controllen) {
1976		err = sock_cmsg_send(sk, msg, &sockc);
1977		if (unlikely(err))
1978			goto out_unlock;
1979	}
1980
1981	skb->protocol = proto;
1982	skb->dev = dev;
1983	skb->priority = sk->sk_priority;
1984	skb->mark = sk->sk_mark;
1985	skb->tstamp = sockc.transmit_time;
1986
1987	skb_setup_tx_timestamp(skb, sockc.tsflags);
1988
1989	if (unlikely(extra_len == 4))
1990		skb->no_fcs = 1;
1991
1992	packet_parse_headers(skb, sock);
1993
1994	dev_queue_xmit(skb);
1995	rcu_read_unlock();
1996	return len;
1997
1998out_unlock:
1999	rcu_read_unlock();
2000out_free:
2001	kfree_skb(skb);
2002	return err;
2003}
2004
2005static unsigned int run_filter(struct sk_buff *skb,
2006			       const struct sock *sk,
2007			       unsigned int res)
2008{
2009	struct sk_filter *filter;
2010
2011	rcu_read_lock();
2012	filter = rcu_dereference(sk->sk_filter);
2013	if (filter != NULL)
2014		res = bpf_prog_run_clear_cb(filter->prog, skb);
2015	rcu_read_unlock();
2016
2017	return res;
2018}
2019
2020static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2021			   size_t *len)
2022{
2023	struct virtio_net_hdr vnet_hdr;
2024
2025	if (*len < sizeof(vnet_hdr))
2026		return -EINVAL;
2027	*len -= sizeof(vnet_hdr);
2028
2029	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2030		return -EINVAL;
2031
2032	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2033}
2034
2035/*
2036 * This function makes lazy skb cloning in hope that most of packets
2037 * are discarded by BPF.
2038 *
2039 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2040 * and skb->cb are mangled. It works because (and until) packets
2041 * falling here are owned by current CPU. Output packets are cloned
2042 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2043 * sequencially, so that if we return skb to original state on exit,
2044 * we will not harm anyone.
2045 */
2046
2047static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2048		      struct packet_type *pt, struct net_device *orig_dev)
2049{
2050	struct sock *sk;
2051	struct sockaddr_ll *sll;
2052	struct packet_sock *po;
2053	u8 *skb_head = skb->data;
2054	int skb_len = skb->len;
2055	unsigned int snaplen, res;
2056	bool is_drop_n_account = false;
2057
2058	if (skb->pkt_type == PACKET_LOOPBACK)
2059		goto drop;
2060
2061	sk = pt->af_packet_priv;
2062	po = pkt_sk(sk);
2063
2064	if (!net_eq(dev_net(dev), sock_net(sk)))
2065		goto drop;
2066
2067	skb->dev = dev;
2068
2069	if (dev->header_ops) {
2070		/* The device has an explicit notion of ll header,
2071		 * exported to higher levels.
2072		 *
2073		 * Otherwise, the device hides details of its frame
2074		 * structure, so that corresponding packet head is
2075		 * never delivered to user.
2076		 */
2077		if (sk->sk_type != SOCK_DGRAM)
2078			skb_push(skb, skb->data - skb_mac_header(skb));
2079		else if (skb->pkt_type == PACKET_OUTGOING) {
2080			/* Special case: outgoing packets have ll header at head */
2081			skb_pull(skb, skb_network_offset(skb));
2082		}
2083	}
2084
2085	snaplen = skb->len;
2086
2087	res = run_filter(skb, sk, snaplen);
2088	if (!res)
2089		goto drop_n_restore;
2090	if (snaplen > res)
2091		snaplen = res;
2092
2093	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2094		goto drop_n_acct;
2095
2096	if (skb_shared(skb)) {
2097		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2098		if (nskb == NULL)
2099			goto drop_n_acct;
2100
2101		if (skb_head != skb->data) {
2102			skb->data = skb_head;
2103			skb->len = skb_len;
2104		}
2105		consume_skb(skb);
2106		skb = nskb;
2107	}
2108
2109	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2110
2111	sll = &PACKET_SKB_CB(skb)->sa.ll;
2112	sll->sll_hatype = dev->type;
2113	sll->sll_pkttype = skb->pkt_type;
2114	if (unlikely(po->origdev))
2115		sll->sll_ifindex = orig_dev->ifindex;
2116	else
2117		sll->sll_ifindex = dev->ifindex;
2118
2119	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2120
2121	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2122	 * Use their space for storing the original skb length.
2123	 */
2124	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2125
2126	if (pskb_trim(skb, snaplen))
2127		goto drop_n_acct;
2128
2129	skb_set_owner_r(skb, sk);
2130	skb->dev = NULL;
2131	skb_dst_drop(skb);
2132
2133	/* drop conntrack reference */
2134	nf_reset_ct(skb);
2135
2136	spin_lock(&sk->sk_receive_queue.lock);
2137	po->stats.stats1.tp_packets++;
2138	sock_skb_set_dropcount(sk, skb);
2139	__skb_queue_tail(&sk->sk_receive_queue, skb);
2140	spin_unlock(&sk->sk_receive_queue.lock);
2141	sk->sk_data_ready(sk);
2142	return 0;
2143
2144drop_n_acct:
2145	is_drop_n_account = true;
2146	atomic_inc(&po->tp_drops);
2147	atomic_inc(&sk->sk_drops);
2148
2149drop_n_restore:
2150	if (skb_head != skb->data && skb_shared(skb)) {
2151		skb->data = skb_head;
2152		skb->len = skb_len;
2153	}
2154drop:
2155	if (!is_drop_n_account)
2156		consume_skb(skb);
2157	else
2158		kfree_skb(skb);
2159	return 0;
2160}
2161
2162static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2163		       struct packet_type *pt, struct net_device *orig_dev)
2164{
2165	struct sock *sk;
2166	struct packet_sock *po;
2167	struct sockaddr_ll *sll;
2168	union tpacket_uhdr h;
2169	u8 *skb_head = skb->data;
2170	int skb_len = skb->len;
2171	unsigned int snaplen, res;
2172	unsigned long status = TP_STATUS_USER;
2173	unsigned short macoff, hdrlen;
2174	unsigned int netoff;
2175	struct sk_buff *copy_skb = NULL;
2176	struct timespec64 ts;
2177	__u32 ts_status;
2178	bool is_drop_n_account = false;
2179	unsigned int slot_id = 0;
2180	bool do_vnet = false;
2181
2182	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2183	 * We may add members to them until current aligned size without forcing
2184	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2185	 */
2186	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2187	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2188
2189	if (skb->pkt_type == PACKET_LOOPBACK)
2190		goto drop;
2191
2192	sk = pt->af_packet_priv;
2193	po = pkt_sk(sk);
2194
2195	if (!net_eq(dev_net(dev), sock_net(sk)))
2196		goto drop;
2197
2198	if (dev->header_ops) {
2199		if (sk->sk_type != SOCK_DGRAM)
2200			skb_push(skb, skb->data - skb_mac_header(skb));
2201		else if (skb->pkt_type == PACKET_OUTGOING) {
2202			/* Special case: outgoing packets have ll header at head */
2203			skb_pull(skb, skb_network_offset(skb));
2204		}
2205	}
2206
2207	snaplen = skb->len;
2208
2209	res = run_filter(skb, sk, snaplen);
2210	if (!res)
2211		goto drop_n_restore;
2212
2213	/* If we are flooded, just give up */
2214	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2215		atomic_inc(&po->tp_drops);
2216		goto drop_n_restore;
2217	}
2218
2219	if (skb->ip_summed == CHECKSUM_PARTIAL)
2220		status |= TP_STATUS_CSUMNOTREADY;
2221	else if (skb->pkt_type != PACKET_OUTGOING &&
2222		 (skb->ip_summed == CHECKSUM_COMPLETE ||
2223		  skb_csum_unnecessary(skb)))
2224		status |= TP_STATUS_CSUM_VALID;
2225
2226	if (snaplen > res)
2227		snaplen = res;
2228
2229	if (sk->sk_type == SOCK_DGRAM) {
2230		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2231				  po->tp_reserve;
2232	} else {
2233		unsigned int maclen = skb_network_offset(skb);
2234		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2235				       (maclen < 16 ? 16 : maclen)) +
2236				       po->tp_reserve;
2237		if (po->has_vnet_hdr) {
2238			netoff += sizeof(struct virtio_net_hdr);
2239			do_vnet = true;
2240		}
2241		macoff = netoff - maclen;
2242	}
2243	if (netoff > USHRT_MAX) {
2244		atomic_inc(&po->tp_drops);
2245		goto drop_n_restore;
2246	}
2247	if (po->tp_version <= TPACKET_V2) {
2248		if (macoff + snaplen > po->rx_ring.frame_size) {
2249			if (po->copy_thresh &&
2250			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2251				if (skb_shared(skb)) {
2252					copy_skb = skb_clone(skb, GFP_ATOMIC);
2253				} else {
2254					copy_skb = skb_get(skb);
2255					skb_head = skb->data;
2256				}
2257				if (copy_skb)
2258					skb_set_owner_r(copy_skb, sk);
2259			}
2260			snaplen = po->rx_ring.frame_size - macoff;
2261			if ((int)snaplen < 0) {
2262				snaplen = 0;
2263				do_vnet = false;
2264			}
2265		}
2266	} else if (unlikely(macoff + snaplen >
2267			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2268		u32 nval;
2269
2270		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2271		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2272			    snaplen, nval, macoff);
2273		snaplen = nval;
2274		if (unlikely((int)snaplen < 0)) {
2275			snaplen = 0;
2276			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2277			do_vnet = false;
2278		}
2279	}
2280	spin_lock(&sk->sk_receive_queue.lock);
2281	h.raw = packet_current_rx_frame(po, skb,
2282					TP_STATUS_KERNEL, (macoff+snaplen));
2283	if (!h.raw)
2284		goto drop_n_account;
2285
2286	if (po->tp_version <= TPACKET_V2) {
2287		slot_id = po->rx_ring.head;
2288		if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2289			goto drop_n_account;
2290		__set_bit(slot_id, po->rx_ring.rx_owner_map);
2291	}
2292
2293	if (do_vnet &&
2294	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
2295				    sizeof(struct virtio_net_hdr),
2296				    vio_le(), true, 0)) {
2297		if (po->tp_version == TPACKET_V3)
2298			prb_clear_blk_fill_status(&po->rx_ring);
2299		goto drop_n_account;
2300	}
2301
2302	if (po->tp_version <= TPACKET_V2) {
2303		packet_increment_rx_head(po, &po->rx_ring);
2304	/*
2305	 * LOSING will be reported till you read the stats,
2306	 * because it's COR - Clear On Read.
2307	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2308	 * at packet level.
2309	 */
2310		if (atomic_read(&po->tp_drops))
2311			status |= TP_STATUS_LOSING;
2312	}
2313
 
 
 
 
 
 
2314	po->stats.stats1.tp_packets++;
2315	if (copy_skb) {
2316		status |= TP_STATUS_COPY;
2317		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2318	}
2319	spin_unlock(&sk->sk_receive_queue.lock);
2320
2321	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2322
2323	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2324		ktime_get_real_ts64(&ts);
2325
2326	status |= ts_status;
2327
2328	switch (po->tp_version) {
2329	case TPACKET_V1:
2330		h.h1->tp_len = skb->len;
2331		h.h1->tp_snaplen = snaplen;
2332		h.h1->tp_mac = macoff;
2333		h.h1->tp_net = netoff;
2334		h.h1->tp_sec = ts.tv_sec;
2335		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2336		hdrlen = sizeof(*h.h1);
2337		break;
2338	case TPACKET_V2:
2339		h.h2->tp_len = skb->len;
2340		h.h2->tp_snaplen = snaplen;
2341		h.h2->tp_mac = macoff;
2342		h.h2->tp_net = netoff;
2343		h.h2->tp_sec = ts.tv_sec;
2344		h.h2->tp_nsec = ts.tv_nsec;
2345		if (skb_vlan_tag_present(skb)) {
2346			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2347			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2348			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2349		} else {
2350			h.h2->tp_vlan_tci = 0;
2351			h.h2->tp_vlan_tpid = 0;
2352		}
2353		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2354		hdrlen = sizeof(*h.h2);
2355		break;
2356	case TPACKET_V3:
2357		/* tp_nxt_offset,vlan are already populated above.
2358		 * So DONT clear those fields here
2359		 */
2360		h.h3->tp_status |= status;
2361		h.h3->tp_len = skb->len;
2362		h.h3->tp_snaplen = snaplen;
2363		h.h3->tp_mac = macoff;
2364		h.h3->tp_net = netoff;
2365		h.h3->tp_sec  = ts.tv_sec;
2366		h.h3->tp_nsec = ts.tv_nsec;
2367		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2368		hdrlen = sizeof(*h.h3);
2369		break;
2370	default:
2371		BUG();
2372	}
2373
2374	sll = h.raw + TPACKET_ALIGN(hdrlen);
2375	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2376	sll->sll_family = AF_PACKET;
2377	sll->sll_hatype = dev->type;
2378	sll->sll_protocol = skb->protocol;
2379	sll->sll_pkttype = skb->pkt_type;
2380	if (unlikely(po->origdev))
2381		sll->sll_ifindex = orig_dev->ifindex;
2382	else
2383		sll->sll_ifindex = dev->ifindex;
2384
2385	smp_mb();
2386
2387#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2388	if (po->tp_version <= TPACKET_V2) {
2389		u8 *start, *end;
2390
2391		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2392					macoff + snaplen);
2393
2394		for (start = h.raw; start < end; start += PAGE_SIZE)
2395			flush_dcache_page(pgv_to_page(start));
2396	}
2397	smp_wmb();
2398#endif
2399
2400	if (po->tp_version <= TPACKET_V2) {
2401		spin_lock(&sk->sk_receive_queue.lock);
2402		__packet_set_status(po, h.raw, status);
2403		__clear_bit(slot_id, po->rx_ring.rx_owner_map);
2404		spin_unlock(&sk->sk_receive_queue.lock);
2405		sk->sk_data_ready(sk);
2406	} else if (po->tp_version == TPACKET_V3) {
2407		prb_clear_blk_fill_status(&po->rx_ring);
2408	}
2409
2410drop_n_restore:
2411	if (skb_head != skb->data && skb_shared(skb)) {
2412		skb->data = skb_head;
2413		skb->len = skb_len;
2414	}
2415drop:
2416	if (!is_drop_n_account)
2417		consume_skb(skb);
2418	else
2419		kfree_skb(skb);
2420	return 0;
2421
2422drop_n_account:
2423	spin_unlock(&sk->sk_receive_queue.lock);
2424	atomic_inc(&po->tp_drops);
2425	is_drop_n_account = true;
2426
2427	sk->sk_data_ready(sk);
2428	kfree_skb(copy_skb);
2429	goto drop_n_restore;
2430}
2431
2432static void tpacket_destruct_skb(struct sk_buff *skb)
2433{
2434	struct packet_sock *po = pkt_sk(skb->sk);
2435
2436	if (likely(po->tx_ring.pg_vec)) {
2437		void *ph;
2438		__u32 ts;
2439
2440		ph = skb_zcopy_get_nouarg(skb);
2441		packet_dec_pending(&po->tx_ring);
2442
2443		ts = __packet_set_timestamp(po, ph, skb);
2444		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2445
2446		if (!packet_read_pending(&po->tx_ring))
2447			complete(&po->skb_completion);
2448	}
2449
2450	sock_wfree(skb);
2451}
2452
2453static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2454{
2455	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2456	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2457	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2458	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2459		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2460			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2461			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2462
2463	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2464		return -EINVAL;
2465
2466	return 0;
2467}
2468
2469static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2470				 struct virtio_net_hdr *vnet_hdr)
2471{
2472	if (*len < sizeof(*vnet_hdr))
2473		return -EINVAL;
2474	*len -= sizeof(*vnet_hdr);
2475
2476	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2477		return -EFAULT;
2478
2479	return __packet_snd_vnet_parse(vnet_hdr, *len);
2480}
2481
2482static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2483		void *frame, struct net_device *dev, void *data, int tp_len,
2484		__be16 proto, unsigned char *addr, int hlen, int copylen,
2485		const struct sockcm_cookie *sockc)
2486{
2487	union tpacket_uhdr ph;
2488	int to_write, offset, len, nr_frags, len_max;
2489	struct socket *sock = po->sk.sk_socket;
2490	struct page *page;
2491	int err;
2492
2493	ph.raw = frame;
2494
2495	skb->protocol = proto;
2496	skb->dev = dev;
2497	skb->priority = po->sk.sk_priority;
2498	skb->mark = po->sk.sk_mark;
2499	skb->tstamp = sockc->transmit_time;
2500	skb_setup_tx_timestamp(skb, sockc->tsflags);
2501	skb_zcopy_set_nouarg(skb, ph.raw);
2502
2503	skb_reserve(skb, hlen);
2504	skb_reset_network_header(skb);
2505
2506	to_write = tp_len;
2507
2508	if (sock->type == SOCK_DGRAM) {
2509		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2510				NULL, tp_len);
2511		if (unlikely(err < 0))
2512			return -EINVAL;
2513	} else if (copylen) {
2514		int hdrlen = min_t(int, copylen, tp_len);
2515
2516		skb_push(skb, dev->hard_header_len);
2517		skb_put(skb, copylen - dev->hard_header_len);
2518		err = skb_store_bits(skb, 0, data, hdrlen);
2519		if (unlikely(err))
2520			return err;
2521		if (!dev_validate_header(dev, skb->data, hdrlen))
2522			return -EINVAL;
2523
2524		data += hdrlen;
2525		to_write -= hdrlen;
2526	}
2527
2528	offset = offset_in_page(data);
2529	len_max = PAGE_SIZE - offset;
2530	len = ((to_write > len_max) ? len_max : to_write);
2531
2532	skb->data_len = to_write;
2533	skb->len += to_write;
2534	skb->truesize += to_write;
2535	refcount_add(to_write, &po->sk.sk_wmem_alloc);
2536
2537	while (likely(to_write)) {
2538		nr_frags = skb_shinfo(skb)->nr_frags;
2539
2540		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2541			pr_err("Packet exceed the number of skb frags(%lu)\n",
2542			       MAX_SKB_FRAGS);
2543			return -EFAULT;
2544		}
2545
2546		page = pgv_to_page(data);
2547		data += len;
2548		flush_dcache_page(page);
2549		get_page(page);
2550		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2551		to_write -= len;
2552		offset = 0;
2553		len_max = PAGE_SIZE;
2554		len = ((to_write > len_max) ? len_max : to_write);
2555	}
2556
2557	packet_parse_headers(skb, sock);
2558
2559	return tp_len;
2560}
2561
2562static int tpacket_parse_header(struct packet_sock *po, void *frame,
2563				int size_max, void **data)
2564{
2565	union tpacket_uhdr ph;
2566	int tp_len, off;
2567
2568	ph.raw = frame;
2569
2570	switch (po->tp_version) {
2571	case TPACKET_V3:
2572		if (ph.h3->tp_next_offset != 0) {
2573			pr_warn_once("variable sized slot not supported");
2574			return -EINVAL;
2575		}
2576		tp_len = ph.h3->tp_len;
2577		break;
2578	case TPACKET_V2:
2579		tp_len = ph.h2->tp_len;
2580		break;
2581	default:
2582		tp_len = ph.h1->tp_len;
2583		break;
2584	}
2585	if (unlikely(tp_len > size_max)) {
2586		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2587		return -EMSGSIZE;
2588	}
2589
2590	if (unlikely(po->tp_tx_has_off)) {
2591		int off_min, off_max;
2592
2593		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2594		off_max = po->tx_ring.frame_size - tp_len;
2595		if (po->sk.sk_type == SOCK_DGRAM) {
2596			switch (po->tp_version) {
2597			case TPACKET_V3:
2598				off = ph.h3->tp_net;
2599				break;
2600			case TPACKET_V2:
2601				off = ph.h2->tp_net;
2602				break;
2603			default:
2604				off = ph.h1->tp_net;
2605				break;
2606			}
2607		} else {
2608			switch (po->tp_version) {
2609			case TPACKET_V3:
2610				off = ph.h3->tp_mac;
2611				break;
2612			case TPACKET_V2:
2613				off = ph.h2->tp_mac;
2614				break;
2615			default:
2616				off = ph.h1->tp_mac;
2617				break;
2618			}
2619		}
2620		if (unlikely((off < off_min) || (off_max < off)))
2621			return -EINVAL;
2622	} else {
2623		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2624	}
2625
2626	*data = frame + off;
2627	return tp_len;
2628}
2629
2630static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2631{
2632	struct sk_buff *skb = NULL;
2633	struct net_device *dev;
2634	struct virtio_net_hdr *vnet_hdr = NULL;
2635	struct sockcm_cookie sockc;
2636	__be16 proto;
2637	int err, reserve = 0;
2638	void *ph;
2639	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2640	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2641	unsigned char *addr = NULL;
2642	int tp_len, size_max;
2643	void *data;
2644	int len_sum = 0;
2645	int status = TP_STATUS_AVAILABLE;
2646	int hlen, tlen, copylen = 0;
2647	long timeo = 0;
2648
2649	mutex_lock(&po->pg_vec_lock);
2650
2651	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2652	 * we need to confirm it under protection of pg_vec_lock.
2653	 */
2654	if (unlikely(!po->tx_ring.pg_vec)) {
2655		err = -EBUSY;
2656		goto out;
2657	}
2658	if (likely(saddr == NULL)) {
2659		dev	= packet_cached_dev_get(po);
2660		proto	= po->num;
2661	} else {
2662		err = -EINVAL;
2663		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2664			goto out;
2665		if (msg->msg_namelen < (saddr->sll_halen
2666					+ offsetof(struct sockaddr_ll,
2667						sll_addr)))
2668			goto out;
2669		proto	= saddr->sll_protocol;
2670		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2671		if (po->sk.sk_socket->type == SOCK_DGRAM) {
2672			if (dev && msg->msg_namelen < dev->addr_len +
2673				   offsetof(struct sockaddr_ll, sll_addr))
2674				goto out_put;
2675			addr = saddr->sll_addr;
2676		}
2677	}
2678
2679	err = -ENXIO;
2680	if (unlikely(dev == NULL))
2681		goto out;
2682	err = -ENETDOWN;
2683	if (unlikely(!(dev->flags & IFF_UP)))
2684		goto out_put;
2685
2686	sockcm_init(&sockc, &po->sk);
2687	if (msg->msg_controllen) {
2688		err = sock_cmsg_send(&po->sk, msg, &sockc);
2689		if (unlikely(err))
2690			goto out_put;
2691	}
2692
2693	if (po->sk.sk_socket->type == SOCK_RAW)
2694		reserve = dev->hard_header_len;
2695	size_max = po->tx_ring.frame_size
2696		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2697
2698	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2699		size_max = dev->mtu + reserve + VLAN_HLEN;
2700
2701	reinit_completion(&po->skb_completion);
2702
2703	do {
2704		ph = packet_current_frame(po, &po->tx_ring,
2705					  TP_STATUS_SEND_REQUEST);
2706		if (unlikely(ph == NULL)) {
2707			if (need_wait && skb) {
2708				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2709				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2710				if (timeo <= 0) {
2711					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2712					goto out_put;
2713				}
2714			}
2715			/* check for additional frames */
2716			continue;
2717		}
2718
2719		skb = NULL;
2720		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2721		if (tp_len < 0)
2722			goto tpacket_error;
2723
2724		status = TP_STATUS_SEND_REQUEST;
2725		hlen = LL_RESERVED_SPACE(dev);
2726		tlen = dev->needed_tailroom;
2727		if (po->has_vnet_hdr) {
2728			vnet_hdr = data;
2729			data += sizeof(*vnet_hdr);
2730			tp_len -= sizeof(*vnet_hdr);
2731			if (tp_len < 0 ||
2732			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2733				tp_len = -EINVAL;
2734				goto tpacket_error;
2735			}
2736			copylen = __virtio16_to_cpu(vio_le(),
2737						    vnet_hdr->hdr_len);
2738		}
2739		copylen = max_t(int, copylen, dev->hard_header_len);
2740		skb = sock_alloc_send_skb(&po->sk,
2741				hlen + tlen + sizeof(struct sockaddr_ll) +
2742				(copylen - dev->hard_header_len),
2743				!need_wait, &err);
2744
2745		if (unlikely(skb == NULL)) {
2746			/* we assume the socket was initially writeable ... */
2747			if (likely(len_sum > 0))
2748				err = len_sum;
2749			goto out_status;
2750		}
2751		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2752					  addr, hlen, copylen, &sockc);
2753		if (likely(tp_len >= 0) &&
2754		    tp_len > dev->mtu + reserve &&
2755		    !po->has_vnet_hdr &&
2756		    !packet_extra_vlan_len_allowed(dev, skb))
2757			tp_len = -EMSGSIZE;
2758
2759		if (unlikely(tp_len < 0)) {
2760tpacket_error:
2761			if (po->tp_loss) {
2762				__packet_set_status(po, ph,
2763						TP_STATUS_AVAILABLE);
2764				packet_increment_head(&po->tx_ring);
2765				kfree_skb(skb);
2766				continue;
2767			} else {
2768				status = TP_STATUS_WRONG_FORMAT;
2769				err = tp_len;
2770				goto out_status;
2771			}
2772		}
2773
2774		if (po->has_vnet_hdr) {
2775			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2776				tp_len = -EINVAL;
2777				goto tpacket_error;
2778			}
2779			virtio_net_hdr_set_proto(skb, vnet_hdr);
2780		}
2781
2782		skb->destructor = tpacket_destruct_skb;
2783		__packet_set_status(po, ph, TP_STATUS_SENDING);
2784		packet_inc_pending(&po->tx_ring);
2785
2786		status = TP_STATUS_SEND_REQUEST;
2787		err = po->xmit(skb);
2788		if (unlikely(err > 0)) {
2789			err = net_xmit_errno(err);
2790			if (err && __packet_get_status(po, ph) ==
2791				   TP_STATUS_AVAILABLE) {
2792				/* skb was destructed already */
2793				skb = NULL;
2794				goto out_status;
2795			}
2796			/*
2797			 * skb was dropped but not destructed yet;
2798			 * let's treat it like congestion or err < 0
2799			 */
2800			err = 0;
2801		}
2802		packet_increment_head(&po->tx_ring);
2803		len_sum += tp_len;
2804	} while (likely((ph != NULL) ||
2805		/* Note: packet_read_pending() might be slow if we have
2806		 * to call it as it's per_cpu variable, but in fast-path
2807		 * we already short-circuit the loop with the first
2808		 * condition, and luckily don't have to go that path
2809		 * anyway.
2810		 */
2811		 (need_wait && packet_read_pending(&po->tx_ring))));
2812
2813	err = len_sum;
2814	goto out_put;
2815
2816out_status:
2817	__packet_set_status(po, ph, status);
2818	kfree_skb(skb);
2819out_put:
2820	dev_put(dev);
2821out:
2822	mutex_unlock(&po->pg_vec_lock);
2823	return err;
2824}
2825
2826static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2827				        size_t reserve, size_t len,
2828				        size_t linear, int noblock,
2829				        int *err)
2830{
2831	struct sk_buff *skb;
2832
2833	/* Under a page?  Don't bother with paged skb. */
2834	if (prepad + len < PAGE_SIZE || !linear)
2835		linear = len;
2836
2837	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2838				   err, 0);
2839	if (!skb)
2840		return NULL;
2841
2842	skb_reserve(skb, reserve);
2843	skb_put(skb, linear);
2844	skb->data_len = len - linear;
2845	skb->len += len - linear;
2846
2847	return skb;
2848}
2849
2850static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2851{
2852	struct sock *sk = sock->sk;
2853	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2854	struct sk_buff *skb;
2855	struct net_device *dev;
2856	__be16 proto;
2857	unsigned char *addr = NULL;
2858	int err, reserve = 0;
2859	struct sockcm_cookie sockc;
2860	struct virtio_net_hdr vnet_hdr = { 0 };
2861	int offset = 0;
2862	struct packet_sock *po = pkt_sk(sk);
2863	bool has_vnet_hdr = false;
2864	int hlen, tlen, linear;
2865	int extra_len = 0;
2866
2867	/*
2868	 *	Get and verify the address.
2869	 */
2870
2871	if (likely(saddr == NULL)) {
2872		dev	= packet_cached_dev_get(po);
2873		proto	= po->num;
2874	} else {
2875		err = -EINVAL;
2876		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2877			goto out;
2878		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2879			goto out;
2880		proto	= saddr->sll_protocol;
2881		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2882		if (sock->type == SOCK_DGRAM) {
2883			if (dev && msg->msg_namelen < dev->addr_len +
2884				   offsetof(struct sockaddr_ll, sll_addr))
2885				goto out_unlock;
2886			addr = saddr->sll_addr;
2887		}
2888	}
2889
2890	err = -ENXIO;
2891	if (unlikely(dev == NULL))
2892		goto out_unlock;
2893	err = -ENETDOWN;
2894	if (unlikely(!(dev->flags & IFF_UP)))
2895		goto out_unlock;
2896
2897	sockcm_init(&sockc, sk);
2898	sockc.mark = sk->sk_mark;
2899	if (msg->msg_controllen) {
2900		err = sock_cmsg_send(sk, msg, &sockc);
2901		if (unlikely(err))
2902			goto out_unlock;
2903	}
2904
2905	if (sock->type == SOCK_RAW)
2906		reserve = dev->hard_header_len;
2907	if (po->has_vnet_hdr) {
2908		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2909		if (err)
2910			goto out_unlock;
2911		has_vnet_hdr = true;
2912	}
2913
2914	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2915		if (!netif_supports_nofcs(dev)) {
2916			err = -EPROTONOSUPPORT;
2917			goto out_unlock;
2918		}
2919		extra_len = 4; /* We're doing our own CRC */
2920	}
2921
2922	err = -EMSGSIZE;
2923	if (!vnet_hdr.gso_type &&
2924	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2925		goto out_unlock;
2926
2927	err = -ENOBUFS;
2928	hlen = LL_RESERVED_SPACE(dev);
2929	tlen = dev->needed_tailroom;
2930	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2931	linear = max(linear, min_t(int, len, dev->hard_header_len));
2932	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2933			       msg->msg_flags & MSG_DONTWAIT, &err);
2934	if (skb == NULL)
2935		goto out_unlock;
2936
2937	skb_reset_network_header(skb);
2938
2939	err = -EINVAL;
2940	if (sock->type == SOCK_DGRAM) {
2941		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2942		if (unlikely(offset < 0))
2943			goto out_free;
2944	} else if (reserve) {
2945		skb_reserve(skb, -reserve);
2946		if (len < reserve + sizeof(struct ipv6hdr) &&
2947		    dev->min_header_len != dev->hard_header_len)
2948			skb_reset_network_header(skb);
2949	}
2950
2951	/* Returns -EFAULT on error */
2952	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2953	if (err)
2954		goto out_free;
2955
2956	if (sock->type == SOCK_RAW &&
2957	    !dev_validate_header(dev, skb->data, len)) {
2958		err = -EINVAL;
2959		goto out_free;
2960	}
2961
2962	skb_setup_tx_timestamp(skb, sockc.tsflags);
2963
2964	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2965	    !packet_extra_vlan_len_allowed(dev, skb)) {
2966		err = -EMSGSIZE;
2967		goto out_free;
2968	}
2969
2970	skb->protocol = proto;
2971	skb->dev = dev;
2972	skb->priority = sk->sk_priority;
2973	skb->mark = sockc.mark;
2974	skb->tstamp = sockc.transmit_time;
2975
2976	if (has_vnet_hdr) {
2977		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2978		if (err)
2979			goto out_free;
2980		len += sizeof(vnet_hdr);
2981		virtio_net_hdr_set_proto(skb, &vnet_hdr);
2982	}
2983
2984	packet_parse_headers(skb, sock);
2985
2986	if (unlikely(extra_len == 4))
2987		skb->no_fcs = 1;
2988
2989	err = po->xmit(skb);
2990	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2991		goto out_unlock;
2992
2993	dev_put(dev);
2994
2995	return len;
2996
2997out_free:
2998	kfree_skb(skb);
2999out_unlock:
3000	if (dev)
3001		dev_put(dev);
3002out:
3003	return err;
3004}
3005
3006static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3007{
3008	struct sock *sk = sock->sk;
3009	struct packet_sock *po = pkt_sk(sk);
3010
3011	if (po->tx_ring.pg_vec)
3012		return tpacket_snd(po, msg);
3013	else
3014		return packet_snd(sock, msg, len);
3015}
3016
3017/*
3018 *	Close a PACKET socket. This is fairly simple. We immediately go
3019 *	to 'closed' state and remove our protocol entry in the device list.
3020 */
3021
3022static int packet_release(struct socket *sock)
3023{
3024	struct sock *sk = sock->sk;
3025	struct packet_sock *po;
3026	struct packet_fanout *f;
3027	struct net *net;
3028	union tpacket_req_u req_u;
3029
3030	if (!sk)
3031		return 0;
3032
3033	net = sock_net(sk);
3034	po = pkt_sk(sk);
3035
3036	mutex_lock(&net->packet.sklist_lock);
3037	sk_del_node_init_rcu(sk);
3038	mutex_unlock(&net->packet.sklist_lock);
3039
3040	preempt_disable();
3041	sock_prot_inuse_add(net, sk->sk_prot, -1);
3042	preempt_enable();
3043
3044	spin_lock(&po->bind_lock);
3045	unregister_prot_hook(sk, false);
3046	packet_cached_dev_reset(po);
3047
3048	if (po->prot_hook.dev) {
3049		dev_put(po->prot_hook.dev);
3050		po->prot_hook.dev = NULL;
3051	}
3052	spin_unlock(&po->bind_lock);
3053
3054	packet_flush_mclist(sk);
3055
3056	lock_sock(sk);
3057	if (po->rx_ring.pg_vec) {
3058		memset(&req_u, 0, sizeof(req_u));
3059		packet_set_ring(sk, &req_u, 1, 0);
3060	}
3061
3062	if (po->tx_ring.pg_vec) {
3063		memset(&req_u, 0, sizeof(req_u));
3064		packet_set_ring(sk, &req_u, 1, 1);
3065	}
3066	release_sock(sk);
3067
3068	f = fanout_release(sk);
3069
3070	synchronize_net();
3071
3072	kfree(po->rollover);
3073	if (f) {
3074		fanout_release_data(f);
3075		kfree(f);
3076	}
3077	/*
3078	 *	Now the socket is dead. No more input will appear.
3079	 */
3080	sock_orphan(sk);
3081	sock->sk = NULL;
3082
3083	/* Purge queues */
3084
3085	skb_queue_purge(&sk->sk_receive_queue);
3086	packet_free_pending(po);
3087	sk_refcnt_debug_release(sk);
3088
3089	sock_put(sk);
3090	return 0;
3091}
3092
3093/*
3094 *	Attach a packet hook.
3095 */
3096
3097static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3098			  __be16 proto)
3099{
3100	struct packet_sock *po = pkt_sk(sk);
3101	struct net_device *dev_curr;
3102	__be16 proto_curr;
3103	bool need_rehook;
3104	struct net_device *dev = NULL;
3105	int ret = 0;
3106	bool unlisted = false;
3107
3108	lock_sock(sk);
3109	spin_lock(&po->bind_lock);
3110	rcu_read_lock();
3111
3112	if (po->fanout) {
3113		ret = -EINVAL;
3114		goto out_unlock;
3115	}
3116
3117	if (name) {
3118		dev = dev_get_by_name_rcu(sock_net(sk), name);
3119		if (!dev) {
3120			ret = -ENODEV;
3121			goto out_unlock;
3122		}
3123	} else if (ifindex) {
3124		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3125		if (!dev) {
3126			ret = -ENODEV;
3127			goto out_unlock;
3128		}
3129	}
3130
3131	if (dev)
3132		dev_hold(dev);
3133
3134	proto_curr = po->prot_hook.type;
3135	dev_curr = po->prot_hook.dev;
3136
3137	need_rehook = proto_curr != proto || dev_curr != dev;
3138
3139	if (need_rehook) {
3140		if (po->running) {
3141			rcu_read_unlock();
3142			/* prevents packet_notifier() from calling
3143			 * register_prot_hook()
3144			 */
3145			po->num = 0;
3146			__unregister_prot_hook(sk, true);
3147			rcu_read_lock();
3148			dev_curr = po->prot_hook.dev;
3149			if (dev)
3150				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3151								 dev->ifindex);
3152		}
3153
3154		BUG_ON(po->running);
3155		po->num = proto;
3156		po->prot_hook.type = proto;
3157
3158		if (unlikely(unlisted)) {
3159			dev_put(dev);
3160			po->prot_hook.dev = NULL;
3161			po->ifindex = -1;
3162			packet_cached_dev_reset(po);
3163		} else {
3164			po->prot_hook.dev = dev;
3165			po->ifindex = dev ? dev->ifindex : 0;
3166			packet_cached_dev_assign(po, dev);
3167		}
3168	}
3169	if (dev_curr)
3170		dev_put(dev_curr);
3171
3172	if (proto == 0 || !need_rehook)
3173		goto out_unlock;
3174
3175	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3176		register_prot_hook(sk);
3177	} else {
3178		sk->sk_err = ENETDOWN;
3179		if (!sock_flag(sk, SOCK_DEAD))
3180			sk->sk_error_report(sk);
3181	}
3182
3183out_unlock:
3184	rcu_read_unlock();
3185	spin_unlock(&po->bind_lock);
3186	release_sock(sk);
3187	return ret;
3188}
3189
3190/*
3191 *	Bind a packet socket to a device
3192 */
3193
3194static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3195			    int addr_len)
3196{
3197	struct sock *sk = sock->sk;
3198	char name[sizeof(uaddr->sa_data) + 1];
3199
3200	/*
3201	 *	Check legality
3202	 */
3203
3204	if (addr_len != sizeof(struct sockaddr))
3205		return -EINVAL;
3206	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3207	 * zero-terminated.
3208	 */
3209	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3210	name[sizeof(uaddr->sa_data)] = 0;
3211
3212	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3213}
3214
3215static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3216{
3217	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3218	struct sock *sk = sock->sk;
3219
3220	/*
3221	 *	Check legality
3222	 */
3223
3224	if (addr_len < sizeof(struct sockaddr_ll))
3225		return -EINVAL;
3226	if (sll->sll_family != AF_PACKET)
3227		return -EINVAL;
3228
3229	return packet_do_bind(sk, NULL, sll->sll_ifindex,
3230			      sll->sll_protocol ? : pkt_sk(sk)->num);
3231}
3232
3233static struct proto packet_proto = {
3234	.name	  = "PACKET",
3235	.owner	  = THIS_MODULE,
3236	.obj_size = sizeof(struct packet_sock),
3237};
3238
3239/*
3240 *	Create a packet of type SOCK_PACKET.
3241 */
3242
3243static int packet_create(struct net *net, struct socket *sock, int protocol,
3244			 int kern)
3245{
3246	struct sock *sk;
3247	struct packet_sock *po;
3248	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3249	int err;
3250
3251	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3252		return -EPERM;
3253	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3254	    sock->type != SOCK_PACKET)
3255		return -ESOCKTNOSUPPORT;
3256
3257	sock->state = SS_UNCONNECTED;
3258
3259	err = -ENOBUFS;
3260	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3261	if (sk == NULL)
3262		goto out;
3263
3264	sock->ops = &packet_ops;
3265	if (sock->type == SOCK_PACKET)
3266		sock->ops = &packet_ops_spkt;
3267
3268	sock_init_data(sock, sk);
3269
3270	po = pkt_sk(sk);
3271	init_completion(&po->skb_completion);
3272	sk->sk_family = PF_PACKET;
3273	po->num = proto;
3274	po->xmit = dev_queue_xmit;
3275
3276	err = packet_alloc_pending(po);
3277	if (err)
3278		goto out2;
3279
3280	packet_cached_dev_reset(po);
3281
3282	sk->sk_destruct = packet_sock_destruct;
3283	sk_refcnt_debug_inc(sk);
3284
3285	/*
3286	 *	Attach a protocol block
3287	 */
3288
3289	spin_lock_init(&po->bind_lock);
3290	mutex_init(&po->pg_vec_lock);
3291	po->rollover = NULL;
3292	po->prot_hook.func = packet_rcv;
3293
3294	if (sock->type == SOCK_PACKET)
3295		po->prot_hook.func = packet_rcv_spkt;
3296
3297	po->prot_hook.af_packet_priv = sk;
3298
3299	if (proto) {
3300		po->prot_hook.type = proto;
3301		__register_prot_hook(sk);
3302	}
3303
3304	mutex_lock(&net->packet.sklist_lock);
3305	sk_add_node_tail_rcu(sk, &net->packet.sklist);
3306	mutex_unlock(&net->packet.sklist_lock);
3307
3308	preempt_disable();
3309	sock_prot_inuse_add(net, &packet_proto, 1);
3310	preempt_enable();
3311
3312	return 0;
3313out2:
3314	sk_free(sk);
3315out:
3316	return err;
3317}
3318
3319/*
3320 *	Pull a packet from our receive queue and hand it to the user.
3321 *	If necessary we block.
3322 */
3323
3324static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3325			  int flags)
3326{
3327	struct sock *sk = sock->sk;
3328	struct sk_buff *skb;
3329	int copied, err;
3330	int vnet_hdr_len = 0;
3331	unsigned int origlen = 0;
3332
3333	err = -EINVAL;
3334	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3335		goto out;
3336
3337#if 0
3338	/* What error should we return now? EUNATTACH? */
3339	if (pkt_sk(sk)->ifindex < 0)
3340		return -ENODEV;
3341#endif
3342
3343	if (flags & MSG_ERRQUEUE) {
3344		err = sock_recv_errqueue(sk, msg, len,
3345					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3346		goto out;
3347	}
3348
3349	/*
3350	 *	Call the generic datagram receiver. This handles all sorts
3351	 *	of horrible races and re-entrancy so we can forget about it
3352	 *	in the protocol layers.
3353	 *
3354	 *	Now it will return ENETDOWN, if device have just gone down,
3355	 *	but then it will block.
3356	 */
3357
3358	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3359
3360	/*
3361	 *	An error occurred so return it. Because skb_recv_datagram()
3362	 *	handles the blocking we don't see and worry about blocking
3363	 *	retries.
3364	 */
3365
3366	if (skb == NULL)
3367		goto out;
3368
3369	packet_rcv_try_clear_pressure(pkt_sk(sk));
3370
3371	if (pkt_sk(sk)->has_vnet_hdr) {
3372		err = packet_rcv_vnet(msg, skb, &len);
3373		if (err)
3374			goto out_free;
3375		vnet_hdr_len = sizeof(struct virtio_net_hdr);
3376	}
3377
3378	/* You lose any data beyond the buffer you gave. If it worries
3379	 * a user program they can ask the device for its MTU
3380	 * anyway.
3381	 */
3382	copied = skb->len;
3383	if (copied > len) {
3384		copied = len;
3385		msg->msg_flags |= MSG_TRUNC;
3386	}
3387
3388	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3389	if (err)
3390		goto out_free;
3391
3392	if (sock->type != SOCK_PACKET) {
3393		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3394
3395		/* Original length was stored in sockaddr_ll fields */
3396		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3397		sll->sll_family = AF_PACKET;
3398		sll->sll_protocol = skb->protocol;
3399	}
3400
3401	sock_recv_ts_and_drops(msg, sk, skb);
3402
3403	if (msg->msg_name) {
3404		int copy_len;
3405
3406		/* If the address length field is there to be filled
3407		 * in, we fill it in now.
3408		 */
3409		if (sock->type == SOCK_PACKET) {
3410			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3411			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3412			copy_len = msg->msg_namelen;
3413		} else {
3414			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3415
3416			msg->msg_namelen = sll->sll_halen +
3417				offsetof(struct sockaddr_ll, sll_addr);
3418			copy_len = msg->msg_namelen;
3419			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3420				memset(msg->msg_name +
3421				       offsetof(struct sockaddr_ll, sll_addr),
3422				       0, sizeof(sll->sll_addr));
3423				msg->msg_namelen = sizeof(struct sockaddr_ll);
3424			}
3425		}
3426		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3427	}
3428
3429	if (pkt_sk(sk)->auxdata) {
3430		struct tpacket_auxdata aux;
3431
3432		aux.tp_status = TP_STATUS_USER;
3433		if (skb->ip_summed == CHECKSUM_PARTIAL)
3434			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3435		else if (skb->pkt_type != PACKET_OUTGOING &&
3436			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3437			  skb_csum_unnecessary(skb)))
3438			aux.tp_status |= TP_STATUS_CSUM_VALID;
3439
3440		aux.tp_len = origlen;
3441		aux.tp_snaplen = skb->len;
3442		aux.tp_mac = 0;
3443		aux.tp_net = skb_network_offset(skb);
3444		if (skb_vlan_tag_present(skb)) {
3445			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3446			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3447			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3448		} else {
3449			aux.tp_vlan_tci = 0;
3450			aux.tp_vlan_tpid = 0;
3451		}
3452		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3453	}
3454
3455	/*
3456	 *	Free or return the buffer as appropriate. Again this
3457	 *	hides all the races and re-entrancy issues from us.
3458	 */
3459	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3460
3461out_free:
3462	skb_free_datagram(sk, skb);
3463out:
3464	return err;
3465}
3466
3467static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3468			       int peer)
3469{
3470	struct net_device *dev;
3471	struct sock *sk	= sock->sk;
3472
3473	if (peer)
3474		return -EOPNOTSUPP;
3475
3476	uaddr->sa_family = AF_PACKET;
3477	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3478	rcu_read_lock();
3479	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3480	if (dev)
3481		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3482	rcu_read_unlock();
3483
3484	return sizeof(*uaddr);
3485}
3486
3487static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3488			  int peer)
3489{
3490	struct net_device *dev;
3491	struct sock *sk = sock->sk;
3492	struct packet_sock *po = pkt_sk(sk);
3493	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3494
3495	if (peer)
3496		return -EOPNOTSUPP;
3497
3498	sll->sll_family = AF_PACKET;
3499	sll->sll_ifindex = po->ifindex;
3500	sll->sll_protocol = po->num;
3501	sll->sll_pkttype = 0;
3502	rcu_read_lock();
3503	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3504	if (dev) {
3505		sll->sll_hatype = dev->type;
3506		sll->sll_halen = dev->addr_len;
3507		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3508	} else {
3509		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3510		sll->sll_halen = 0;
3511	}
3512	rcu_read_unlock();
3513
3514	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3515}
3516
3517static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3518			 int what)
3519{
3520	switch (i->type) {
3521	case PACKET_MR_MULTICAST:
3522		if (i->alen != dev->addr_len)
3523			return -EINVAL;
3524		if (what > 0)
3525			return dev_mc_add(dev, i->addr);
3526		else
3527			return dev_mc_del(dev, i->addr);
3528		break;
3529	case PACKET_MR_PROMISC:
3530		return dev_set_promiscuity(dev, what);
3531	case PACKET_MR_ALLMULTI:
3532		return dev_set_allmulti(dev, what);
3533	case PACKET_MR_UNICAST:
3534		if (i->alen != dev->addr_len)
3535			return -EINVAL;
3536		if (what > 0)
3537			return dev_uc_add(dev, i->addr);
3538		else
3539			return dev_uc_del(dev, i->addr);
3540		break;
3541	default:
3542		break;
3543	}
3544	return 0;
3545}
3546
3547static void packet_dev_mclist_delete(struct net_device *dev,
3548				     struct packet_mclist **mlp)
3549{
3550	struct packet_mclist *ml;
3551
3552	while ((ml = *mlp) != NULL) {
3553		if (ml->ifindex == dev->ifindex) {
3554			packet_dev_mc(dev, ml, -1);
3555			*mlp = ml->next;
3556			kfree(ml);
3557		} else
3558			mlp = &ml->next;
3559	}
3560}
3561
3562static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3563{
3564	struct packet_sock *po = pkt_sk(sk);
3565	struct packet_mclist *ml, *i;
3566	struct net_device *dev;
3567	int err;
3568
3569	rtnl_lock();
3570
3571	err = -ENODEV;
3572	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3573	if (!dev)
3574		goto done;
3575
3576	err = -EINVAL;
3577	if (mreq->mr_alen > dev->addr_len)
3578		goto done;
3579
3580	err = -ENOBUFS;
3581	i = kmalloc(sizeof(*i), GFP_KERNEL);
3582	if (i == NULL)
3583		goto done;
3584
3585	err = 0;
3586	for (ml = po->mclist; ml; ml = ml->next) {
3587		if (ml->ifindex == mreq->mr_ifindex &&
3588		    ml->type == mreq->mr_type &&
3589		    ml->alen == mreq->mr_alen &&
3590		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3591			ml->count++;
3592			/* Free the new element ... */
3593			kfree(i);
3594			goto done;
3595		}
3596	}
3597
3598	i->type = mreq->mr_type;
3599	i->ifindex = mreq->mr_ifindex;
3600	i->alen = mreq->mr_alen;
3601	memcpy(i->addr, mreq->mr_address, i->alen);
3602	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3603	i->count = 1;
3604	i->next = po->mclist;
3605	po->mclist = i;
3606	err = packet_dev_mc(dev, i, 1);
3607	if (err) {
3608		po->mclist = i->next;
3609		kfree(i);
3610	}
3611
3612done:
3613	rtnl_unlock();
3614	return err;
3615}
3616
3617static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3618{
3619	struct packet_mclist *ml, **mlp;
3620
3621	rtnl_lock();
3622
3623	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3624		if (ml->ifindex == mreq->mr_ifindex &&
3625		    ml->type == mreq->mr_type &&
3626		    ml->alen == mreq->mr_alen &&
3627		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3628			if (--ml->count == 0) {
3629				struct net_device *dev;
3630				*mlp = ml->next;
3631				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3632				if (dev)
3633					packet_dev_mc(dev, ml, -1);
3634				kfree(ml);
3635			}
3636			break;
3637		}
3638	}
3639	rtnl_unlock();
3640	return 0;
3641}
3642
3643static void packet_flush_mclist(struct sock *sk)
3644{
3645	struct packet_sock *po = pkt_sk(sk);
3646	struct packet_mclist *ml;
3647
3648	if (!po->mclist)
3649		return;
3650
3651	rtnl_lock();
3652	while ((ml = po->mclist) != NULL) {
3653		struct net_device *dev;
3654
3655		po->mclist = ml->next;
3656		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3657		if (dev != NULL)
3658			packet_dev_mc(dev, ml, -1);
3659		kfree(ml);
3660	}
3661	rtnl_unlock();
3662}
3663
3664static int
3665packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3666		  unsigned int optlen)
3667{
3668	struct sock *sk = sock->sk;
3669	struct packet_sock *po = pkt_sk(sk);
3670	int ret;
3671
3672	if (level != SOL_PACKET)
3673		return -ENOPROTOOPT;
3674
3675	switch (optname) {
3676	case PACKET_ADD_MEMBERSHIP:
3677	case PACKET_DROP_MEMBERSHIP:
3678	{
3679		struct packet_mreq_max mreq;
3680		int len = optlen;
3681		memset(&mreq, 0, sizeof(mreq));
3682		if (len < sizeof(struct packet_mreq))
3683			return -EINVAL;
3684		if (len > sizeof(mreq))
3685			len = sizeof(mreq);
3686		if (copy_from_sockptr(&mreq, optval, len))
3687			return -EFAULT;
3688		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3689			return -EINVAL;
3690		if (optname == PACKET_ADD_MEMBERSHIP)
3691			ret = packet_mc_add(sk, &mreq);
3692		else
3693			ret = packet_mc_drop(sk, &mreq);
3694		return ret;
3695	}
3696
3697	case PACKET_RX_RING:
3698	case PACKET_TX_RING:
3699	{
3700		union tpacket_req_u req_u;
3701		int len;
3702
3703		lock_sock(sk);
3704		switch (po->tp_version) {
3705		case TPACKET_V1:
3706		case TPACKET_V2:
3707			len = sizeof(req_u.req);
3708			break;
3709		case TPACKET_V3:
3710		default:
3711			len = sizeof(req_u.req3);
3712			break;
3713		}
3714		if (optlen < len) {
3715			ret = -EINVAL;
3716		} else {
3717			if (copy_from_sockptr(&req_u.req, optval, len))
3718				ret = -EFAULT;
3719			else
3720				ret = packet_set_ring(sk, &req_u, 0,
3721						    optname == PACKET_TX_RING);
3722		}
3723		release_sock(sk);
3724		return ret;
3725	}
3726	case PACKET_COPY_THRESH:
3727	{
3728		int val;
3729
3730		if (optlen != sizeof(val))
3731			return -EINVAL;
3732		if (copy_from_sockptr(&val, optval, sizeof(val)))
3733			return -EFAULT;
3734
3735		pkt_sk(sk)->copy_thresh = val;
3736		return 0;
3737	}
3738	case PACKET_VERSION:
3739	{
3740		int val;
3741
3742		if (optlen != sizeof(val))
3743			return -EINVAL;
3744		if (copy_from_sockptr(&val, optval, sizeof(val)))
3745			return -EFAULT;
3746		switch (val) {
3747		case TPACKET_V1:
3748		case TPACKET_V2:
3749		case TPACKET_V3:
3750			break;
3751		default:
3752			return -EINVAL;
3753		}
3754		lock_sock(sk);
3755		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3756			ret = -EBUSY;
3757		} else {
3758			po->tp_version = val;
3759			ret = 0;
3760		}
3761		release_sock(sk);
3762		return ret;
3763	}
3764	case PACKET_RESERVE:
3765	{
3766		unsigned int val;
3767
3768		if (optlen != sizeof(val))
3769			return -EINVAL;
3770		if (copy_from_sockptr(&val, optval, sizeof(val)))
3771			return -EFAULT;
3772		if (val > INT_MAX)
3773			return -EINVAL;
3774		lock_sock(sk);
3775		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3776			ret = -EBUSY;
3777		} else {
3778			po->tp_reserve = val;
3779			ret = 0;
3780		}
3781		release_sock(sk);
3782		return ret;
3783	}
3784	case PACKET_LOSS:
3785	{
3786		unsigned int val;
3787
3788		if (optlen != sizeof(val))
3789			return -EINVAL;
3790		if (copy_from_sockptr(&val, optval, sizeof(val)))
3791			return -EFAULT;
3792
3793		lock_sock(sk);
3794		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3795			ret = -EBUSY;
3796		} else {
3797			po->tp_loss = !!val;
3798			ret = 0;
3799		}
3800		release_sock(sk);
3801		return ret;
3802	}
3803	case PACKET_AUXDATA:
3804	{
3805		int val;
3806
3807		if (optlen < sizeof(val))
3808			return -EINVAL;
3809		if (copy_from_sockptr(&val, optval, sizeof(val)))
3810			return -EFAULT;
3811
3812		lock_sock(sk);
3813		po->auxdata = !!val;
3814		release_sock(sk);
3815		return 0;
3816	}
3817	case PACKET_ORIGDEV:
3818	{
3819		int val;
3820
3821		if (optlen < sizeof(val))
3822			return -EINVAL;
3823		if (copy_from_sockptr(&val, optval, sizeof(val)))
3824			return -EFAULT;
3825
3826		lock_sock(sk);
3827		po->origdev = !!val;
3828		release_sock(sk);
3829		return 0;
3830	}
3831	case PACKET_VNET_HDR:
3832	{
3833		int val;
3834
3835		if (sock->type != SOCK_RAW)
3836			return -EINVAL;
3837		if (optlen < sizeof(val))
3838			return -EINVAL;
3839		if (copy_from_sockptr(&val, optval, sizeof(val)))
3840			return -EFAULT;
3841
3842		lock_sock(sk);
3843		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3844			ret = -EBUSY;
3845		} else {
3846			po->has_vnet_hdr = !!val;
3847			ret = 0;
3848		}
3849		release_sock(sk);
3850		return ret;
3851	}
3852	case PACKET_TIMESTAMP:
3853	{
3854		int val;
3855
3856		if (optlen != sizeof(val))
3857			return -EINVAL;
3858		if (copy_from_sockptr(&val, optval, sizeof(val)))
3859			return -EFAULT;
3860
3861		po->tp_tstamp = val;
3862		return 0;
3863	}
3864	case PACKET_FANOUT:
3865	{
3866		int val;
3867
3868		if (optlen != sizeof(val))
3869			return -EINVAL;
3870		if (copy_from_sockptr(&val, optval, sizeof(val)))
3871			return -EFAULT;
3872
3873		return fanout_add(sk, val & 0xffff, val >> 16);
3874	}
3875	case PACKET_FANOUT_DATA:
3876	{
3877		if (!po->fanout)
3878			return -EINVAL;
3879
3880		return fanout_set_data(po, optval, optlen);
3881	}
3882	case PACKET_IGNORE_OUTGOING:
3883	{
3884		int val;
3885
3886		if (optlen != sizeof(val))
3887			return -EINVAL;
3888		if (copy_from_sockptr(&val, optval, sizeof(val)))
3889			return -EFAULT;
3890		if (val < 0 || val > 1)
3891			return -EINVAL;
3892
3893		po->prot_hook.ignore_outgoing = !!val;
3894		return 0;
3895	}
3896	case PACKET_TX_HAS_OFF:
3897	{
3898		unsigned int val;
3899
3900		if (optlen != sizeof(val))
3901			return -EINVAL;
3902		if (copy_from_sockptr(&val, optval, sizeof(val)))
3903			return -EFAULT;
3904
3905		lock_sock(sk);
3906		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3907			ret = -EBUSY;
3908		} else {
3909			po->tp_tx_has_off = !!val;
3910			ret = 0;
3911		}
3912		release_sock(sk);
3913		return 0;
3914	}
3915	case PACKET_QDISC_BYPASS:
3916	{
3917		int val;
3918
3919		if (optlen != sizeof(val))
3920			return -EINVAL;
3921		if (copy_from_sockptr(&val, optval, sizeof(val)))
3922			return -EFAULT;
3923
3924		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3925		return 0;
3926	}
3927	default:
3928		return -ENOPROTOOPT;
3929	}
3930}
3931
3932static int packet_getsockopt(struct socket *sock, int level, int optname,
3933			     char __user *optval, int __user *optlen)
3934{
3935	int len;
3936	int val, lv = sizeof(val);
3937	struct sock *sk = sock->sk;
3938	struct packet_sock *po = pkt_sk(sk);
3939	void *data = &val;
3940	union tpacket_stats_u st;
3941	struct tpacket_rollover_stats rstats;
3942	int drops;
3943
3944	if (level != SOL_PACKET)
3945		return -ENOPROTOOPT;
3946
3947	if (get_user(len, optlen))
3948		return -EFAULT;
3949
3950	if (len < 0)
3951		return -EINVAL;
3952
3953	switch (optname) {
3954	case PACKET_STATISTICS:
3955		spin_lock_bh(&sk->sk_receive_queue.lock);
3956		memcpy(&st, &po->stats, sizeof(st));
3957		memset(&po->stats, 0, sizeof(po->stats));
3958		spin_unlock_bh(&sk->sk_receive_queue.lock);
3959		drops = atomic_xchg(&po->tp_drops, 0);
3960
3961		if (po->tp_version == TPACKET_V3) {
3962			lv = sizeof(struct tpacket_stats_v3);
3963			st.stats3.tp_drops = drops;
3964			st.stats3.tp_packets += drops;
3965			data = &st.stats3;
3966		} else {
3967			lv = sizeof(struct tpacket_stats);
3968			st.stats1.tp_drops = drops;
3969			st.stats1.tp_packets += drops;
3970			data = &st.stats1;
3971		}
3972
3973		break;
3974	case PACKET_AUXDATA:
3975		val = po->auxdata;
3976		break;
3977	case PACKET_ORIGDEV:
3978		val = po->origdev;
3979		break;
3980	case PACKET_VNET_HDR:
3981		val = po->has_vnet_hdr;
3982		break;
3983	case PACKET_VERSION:
3984		val = po->tp_version;
3985		break;
3986	case PACKET_HDRLEN:
3987		if (len > sizeof(int))
3988			len = sizeof(int);
3989		if (len < sizeof(int))
3990			return -EINVAL;
3991		if (copy_from_user(&val, optval, len))
3992			return -EFAULT;
3993		switch (val) {
3994		case TPACKET_V1:
3995			val = sizeof(struct tpacket_hdr);
3996			break;
3997		case TPACKET_V2:
3998			val = sizeof(struct tpacket2_hdr);
3999			break;
4000		case TPACKET_V3:
4001			val = sizeof(struct tpacket3_hdr);
4002			break;
4003		default:
4004			return -EINVAL;
4005		}
4006		break;
4007	case PACKET_RESERVE:
4008		val = po->tp_reserve;
4009		break;
4010	case PACKET_LOSS:
4011		val = po->tp_loss;
4012		break;
4013	case PACKET_TIMESTAMP:
4014		val = po->tp_tstamp;
4015		break;
4016	case PACKET_FANOUT:
4017		val = (po->fanout ?
4018		       ((u32)po->fanout->id |
4019			((u32)po->fanout->type << 16) |
4020			((u32)po->fanout->flags << 24)) :
4021		       0);
4022		break;
4023	case PACKET_IGNORE_OUTGOING:
4024		val = po->prot_hook.ignore_outgoing;
4025		break;
4026	case PACKET_ROLLOVER_STATS:
4027		if (!po->rollover)
4028			return -EINVAL;
4029		rstats.tp_all = atomic_long_read(&po->rollover->num);
4030		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4031		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4032		data = &rstats;
4033		lv = sizeof(rstats);
4034		break;
4035	case PACKET_TX_HAS_OFF:
4036		val = po->tp_tx_has_off;
4037		break;
4038	case PACKET_QDISC_BYPASS:
4039		val = packet_use_direct_xmit(po);
4040		break;
4041	default:
4042		return -ENOPROTOOPT;
4043	}
4044
4045	if (len > lv)
4046		len = lv;
4047	if (put_user(len, optlen))
4048		return -EFAULT;
4049	if (copy_to_user(optval, data, len))
4050		return -EFAULT;
4051	return 0;
4052}
4053
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4054static int packet_notifier(struct notifier_block *this,
4055			   unsigned long msg, void *ptr)
4056{
4057	struct sock *sk;
4058	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4059	struct net *net = dev_net(dev);
4060
4061	rcu_read_lock();
4062	sk_for_each_rcu(sk, &net->packet.sklist) {
4063		struct packet_sock *po = pkt_sk(sk);
4064
4065		switch (msg) {
4066		case NETDEV_UNREGISTER:
4067			if (po->mclist)
4068				packet_dev_mclist_delete(dev, &po->mclist);
4069			fallthrough;
4070
4071		case NETDEV_DOWN:
4072			if (dev->ifindex == po->ifindex) {
4073				spin_lock(&po->bind_lock);
4074				if (po->running) {
4075					__unregister_prot_hook(sk, false);
4076					sk->sk_err = ENETDOWN;
4077					if (!sock_flag(sk, SOCK_DEAD))
4078						sk->sk_error_report(sk);
4079				}
4080				if (msg == NETDEV_UNREGISTER) {
4081					packet_cached_dev_reset(po);
4082					po->ifindex = -1;
4083					if (po->prot_hook.dev)
4084						dev_put(po->prot_hook.dev);
4085					po->prot_hook.dev = NULL;
4086				}
4087				spin_unlock(&po->bind_lock);
4088			}
4089			break;
4090		case NETDEV_UP:
4091			if (dev->ifindex == po->ifindex) {
4092				spin_lock(&po->bind_lock);
4093				if (po->num)
4094					register_prot_hook(sk);
4095				spin_unlock(&po->bind_lock);
4096			}
4097			break;
4098		}
4099	}
4100	rcu_read_unlock();
4101	return NOTIFY_DONE;
4102}
4103
4104
4105static int packet_ioctl(struct socket *sock, unsigned int cmd,
4106			unsigned long arg)
4107{
4108	struct sock *sk = sock->sk;
4109
4110	switch (cmd) {
4111	case SIOCOUTQ:
4112	{
4113		int amount = sk_wmem_alloc_get(sk);
4114
4115		return put_user(amount, (int __user *)arg);
4116	}
4117	case SIOCINQ:
4118	{
4119		struct sk_buff *skb;
4120		int amount = 0;
4121
4122		spin_lock_bh(&sk->sk_receive_queue.lock);
4123		skb = skb_peek(&sk->sk_receive_queue);
4124		if (skb)
4125			amount = skb->len;
4126		spin_unlock_bh(&sk->sk_receive_queue.lock);
4127		return put_user(amount, (int __user *)arg);
4128	}
4129#ifdef CONFIG_INET
4130	case SIOCADDRT:
4131	case SIOCDELRT:
4132	case SIOCDARP:
4133	case SIOCGARP:
4134	case SIOCSARP:
4135	case SIOCGIFADDR:
4136	case SIOCSIFADDR:
4137	case SIOCGIFBRDADDR:
4138	case SIOCSIFBRDADDR:
4139	case SIOCGIFNETMASK:
4140	case SIOCSIFNETMASK:
4141	case SIOCGIFDSTADDR:
4142	case SIOCSIFDSTADDR:
4143	case SIOCSIFFLAGS:
4144		return inet_dgram_ops.ioctl(sock, cmd, arg);
4145#endif
4146
4147	default:
4148		return -ENOIOCTLCMD;
4149	}
4150	return 0;
4151}
4152
4153static __poll_t packet_poll(struct file *file, struct socket *sock,
4154				poll_table *wait)
4155{
4156	struct sock *sk = sock->sk;
4157	struct packet_sock *po = pkt_sk(sk);
4158	__poll_t mask = datagram_poll(file, sock, wait);
4159
4160	spin_lock_bh(&sk->sk_receive_queue.lock);
4161	if (po->rx_ring.pg_vec) {
4162		if (!packet_previous_rx_frame(po, &po->rx_ring,
4163			TP_STATUS_KERNEL))
4164			mask |= EPOLLIN | EPOLLRDNORM;
4165	}
4166	packet_rcv_try_clear_pressure(po);
4167	spin_unlock_bh(&sk->sk_receive_queue.lock);
4168	spin_lock_bh(&sk->sk_write_queue.lock);
4169	if (po->tx_ring.pg_vec) {
4170		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4171			mask |= EPOLLOUT | EPOLLWRNORM;
4172	}
4173	spin_unlock_bh(&sk->sk_write_queue.lock);
4174	return mask;
4175}
4176
4177
4178/* Dirty? Well, I still did not learn better way to account
4179 * for user mmaps.
4180 */
4181
4182static void packet_mm_open(struct vm_area_struct *vma)
4183{
4184	struct file *file = vma->vm_file;
4185	struct socket *sock = file->private_data;
4186	struct sock *sk = sock->sk;
4187
4188	if (sk)
4189		atomic_inc(&pkt_sk(sk)->mapped);
4190}
4191
4192static void packet_mm_close(struct vm_area_struct *vma)
4193{
4194	struct file *file = vma->vm_file;
4195	struct socket *sock = file->private_data;
4196	struct sock *sk = sock->sk;
4197
4198	if (sk)
4199		atomic_dec(&pkt_sk(sk)->mapped);
4200}
4201
4202static const struct vm_operations_struct packet_mmap_ops = {
4203	.open	=	packet_mm_open,
4204	.close	=	packet_mm_close,
4205};
4206
4207static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4208			unsigned int len)
4209{
4210	int i;
4211
4212	for (i = 0; i < len; i++) {
4213		if (likely(pg_vec[i].buffer)) {
4214			if (is_vmalloc_addr(pg_vec[i].buffer))
4215				vfree(pg_vec[i].buffer);
4216			else
4217				free_pages((unsigned long)pg_vec[i].buffer,
4218					   order);
4219			pg_vec[i].buffer = NULL;
4220		}
4221	}
4222	kfree(pg_vec);
4223}
4224
4225static char *alloc_one_pg_vec_page(unsigned long order)
4226{
4227	char *buffer;
4228	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4229			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4230
4231	buffer = (char *) __get_free_pages(gfp_flags, order);
4232	if (buffer)
4233		return buffer;
4234
4235	/* __get_free_pages failed, fall back to vmalloc */
4236	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4237	if (buffer)
4238		return buffer;
4239
4240	/* vmalloc failed, lets dig into swap here */
4241	gfp_flags &= ~__GFP_NORETRY;
4242	buffer = (char *) __get_free_pages(gfp_flags, order);
4243	if (buffer)
4244		return buffer;
4245
4246	/* complete and utter failure */
4247	return NULL;
4248}
4249
4250static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4251{
4252	unsigned int block_nr = req->tp_block_nr;
4253	struct pgv *pg_vec;
4254	int i;
4255
4256	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4257	if (unlikely(!pg_vec))
4258		goto out;
4259
4260	for (i = 0; i < block_nr; i++) {
4261		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4262		if (unlikely(!pg_vec[i].buffer))
4263			goto out_free_pgvec;
4264	}
4265
4266out:
4267	return pg_vec;
4268
4269out_free_pgvec:
4270	free_pg_vec(pg_vec, order, block_nr);
4271	pg_vec = NULL;
4272	goto out;
4273}
4274
4275static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4276		int closing, int tx_ring)
4277{
4278	struct pgv *pg_vec = NULL;
4279	struct packet_sock *po = pkt_sk(sk);
4280	unsigned long *rx_owner_map = NULL;
4281	int was_running, order = 0;
4282	struct packet_ring_buffer *rb;
4283	struct sk_buff_head *rb_queue;
4284	__be16 num;
4285	int err;
4286	/* Added to avoid minimal code churn */
4287	struct tpacket_req *req = &req_u->req;
4288
4289	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4290	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4291
4292	err = -EBUSY;
4293	if (!closing) {
4294		if (atomic_read(&po->mapped))
4295			goto out;
4296		if (packet_read_pending(rb))
4297			goto out;
4298	}
4299
4300	if (req->tp_block_nr) {
4301		unsigned int min_frame_size;
4302
4303		/* Sanity tests and some calculations */
4304		err = -EBUSY;
4305		if (unlikely(rb->pg_vec))
4306			goto out;
4307
4308		switch (po->tp_version) {
4309		case TPACKET_V1:
4310			po->tp_hdrlen = TPACKET_HDRLEN;
4311			break;
4312		case TPACKET_V2:
4313			po->tp_hdrlen = TPACKET2_HDRLEN;
4314			break;
4315		case TPACKET_V3:
4316			po->tp_hdrlen = TPACKET3_HDRLEN;
4317			break;
4318		}
4319
4320		err = -EINVAL;
4321		if (unlikely((int)req->tp_block_size <= 0))
4322			goto out;
4323		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4324			goto out;
4325		min_frame_size = po->tp_hdrlen + po->tp_reserve;
4326		if (po->tp_version >= TPACKET_V3 &&
4327		    req->tp_block_size <
4328		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4329			goto out;
4330		if (unlikely(req->tp_frame_size < min_frame_size))
4331			goto out;
4332		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4333			goto out;
4334
4335		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4336		if (unlikely(rb->frames_per_block == 0))
4337			goto out;
4338		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4339			goto out;
4340		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4341					req->tp_frame_nr))
4342			goto out;
4343
4344		err = -ENOMEM;
4345		order = get_order(req->tp_block_size);
4346		pg_vec = alloc_pg_vec(req, order);
4347		if (unlikely(!pg_vec))
4348			goto out;
4349		switch (po->tp_version) {
4350		case TPACKET_V3:
4351			/* Block transmit is not supported yet */
4352			if (!tx_ring) {
4353				init_prb_bdqc(po, rb, pg_vec, req_u);
4354			} else {
4355				struct tpacket_req3 *req3 = &req_u->req3;
4356
4357				if (req3->tp_retire_blk_tov ||
4358				    req3->tp_sizeof_priv ||
4359				    req3->tp_feature_req_word) {
4360					err = -EINVAL;
4361					goto out_free_pg_vec;
4362				}
4363			}
4364			break;
4365		default:
4366			if (!tx_ring) {
4367				rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4368					GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4369				if (!rx_owner_map)
4370					goto out_free_pg_vec;
4371			}
4372			break;
4373		}
4374	}
4375	/* Done */
4376	else {
4377		err = -EINVAL;
4378		if (unlikely(req->tp_frame_nr))
4379			goto out;
4380	}
4381
4382
4383	/* Detach socket from network */
4384	spin_lock(&po->bind_lock);
4385	was_running = po->running;
4386	num = po->num;
4387	if (was_running) {
4388		po->num = 0;
4389		__unregister_prot_hook(sk, false);
4390	}
4391	spin_unlock(&po->bind_lock);
4392
4393	synchronize_net();
4394
4395	err = -EBUSY;
4396	mutex_lock(&po->pg_vec_lock);
4397	if (closing || atomic_read(&po->mapped) == 0) {
4398		err = 0;
4399		spin_lock_bh(&rb_queue->lock);
4400		swap(rb->pg_vec, pg_vec);
4401		if (po->tp_version <= TPACKET_V2)
4402			swap(rb->rx_owner_map, rx_owner_map);
4403		rb->frame_max = (req->tp_frame_nr - 1);
4404		rb->head = 0;
4405		rb->frame_size = req->tp_frame_size;
4406		spin_unlock_bh(&rb_queue->lock);
4407
4408		swap(rb->pg_vec_order, order);
4409		swap(rb->pg_vec_len, req->tp_block_nr);
4410
4411		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4412		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4413						tpacket_rcv : packet_rcv;
4414		skb_queue_purge(rb_queue);
4415		if (atomic_read(&po->mapped))
4416			pr_err("packet_mmap: vma is busy: %d\n",
4417			       atomic_read(&po->mapped));
4418	}
4419	mutex_unlock(&po->pg_vec_lock);
4420
4421	spin_lock(&po->bind_lock);
4422	if (was_running) {
4423		po->num = num;
4424		register_prot_hook(sk);
4425	}
4426	spin_unlock(&po->bind_lock);
4427	if (pg_vec && (po->tp_version > TPACKET_V2)) {
4428		/* Because we don't support block-based V3 on tx-ring */
4429		if (!tx_ring)
4430			prb_shutdown_retire_blk_timer(po, rb_queue);
4431	}
4432
4433out_free_pg_vec:
4434	bitmap_free(rx_owner_map);
4435	if (pg_vec)
4436		free_pg_vec(pg_vec, order, req->tp_block_nr);
4437out:
4438	return err;
4439}
4440
4441static int packet_mmap(struct file *file, struct socket *sock,
4442		struct vm_area_struct *vma)
4443{
4444	struct sock *sk = sock->sk;
4445	struct packet_sock *po = pkt_sk(sk);
4446	unsigned long size, expected_size;
4447	struct packet_ring_buffer *rb;
4448	unsigned long start;
4449	int err = -EINVAL;
4450	int i;
4451
4452	if (vma->vm_pgoff)
4453		return -EINVAL;
4454
4455	mutex_lock(&po->pg_vec_lock);
4456
4457	expected_size = 0;
4458	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4459		if (rb->pg_vec) {
4460			expected_size += rb->pg_vec_len
4461						* rb->pg_vec_pages
4462						* PAGE_SIZE;
4463		}
4464	}
4465
4466	if (expected_size == 0)
4467		goto out;
4468
4469	size = vma->vm_end - vma->vm_start;
4470	if (size != expected_size)
4471		goto out;
4472
4473	start = vma->vm_start;
4474	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4475		if (rb->pg_vec == NULL)
4476			continue;
4477
4478		for (i = 0; i < rb->pg_vec_len; i++) {
4479			struct page *page;
4480			void *kaddr = rb->pg_vec[i].buffer;
4481			int pg_num;
4482
4483			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4484				page = pgv_to_page(kaddr);
4485				err = vm_insert_page(vma, start, page);
4486				if (unlikely(err))
4487					goto out;
4488				start += PAGE_SIZE;
4489				kaddr += PAGE_SIZE;
4490			}
4491		}
4492	}
4493
4494	atomic_inc(&po->mapped);
4495	vma->vm_ops = &packet_mmap_ops;
4496	err = 0;
4497
4498out:
4499	mutex_unlock(&po->pg_vec_lock);
4500	return err;
4501}
4502
4503static const struct proto_ops packet_ops_spkt = {
4504	.family =	PF_PACKET,
4505	.owner =	THIS_MODULE,
4506	.release =	packet_release,
4507	.bind =		packet_bind_spkt,
4508	.connect =	sock_no_connect,
4509	.socketpair =	sock_no_socketpair,
4510	.accept =	sock_no_accept,
4511	.getname =	packet_getname_spkt,
4512	.poll =		datagram_poll,
4513	.ioctl =	packet_ioctl,
4514	.gettstamp =	sock_gettstamp,
4515	.listen =	sock_no_listen,
4516	.shutdown =	sock_no_shutdown,
 
 
4517	.sendmsg =	packet_sendmsg_spkt,
4518	.recvmsg =	packet_recvmsg,
4519	.mmap =		sock_no_mmap,
4520	.sendpage =	sock_no_sendpage,
4521};
4522
4523static const struct proto_ops packet_ops = {
4524	.family =	PF_PACKET,
4525	.owner =	THIS_MODULE,
4526	.release =	packet_release,
4527	.bind =		packet_bind,
4528	.connect =	sock_no_connect,
4529	.socketpair =	sock_no_socketpair,
4530	.accept =	sock_no_accept,
4531	.getname =	packet_getname,
4532	.poll =		packet_poll,
4533	.ioctl =	packet_ioctl,
4534	.gettstamp =	sock_gettstamp,
4535	.listen =	sock_no_listen,
4536	.shutdown =	sock_no_shutdown,
4537	.setsockopt =	packet_setsockopt,
4538	.getsockopt =	packet_getsockopt,
 
 
 
4539	.sendmsg =	packet_sendmsg,
4540	.recvmsg =	packet_recvmsg,
4541	.mmap =		packet_mmap,
4542	.sendpage =	sock_no_sendpage,
4543};
4544
4545static const struct net_proto_family packet_family_ops = {
4546	.family =	PF_PACKET,
4547	.create =	packet_create,
4548	.owner	=	THIS_MODULE,
4549};
4550
4551static struct notifier_block packet_netdev_notifier = {
4552	.notifier_call =	packet_notifier,
4553};
4554
4555#ifdef CONFIG_PROC_FS
4556
4557static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4558	__acquires(RCU)
4559{
4560	struct net *net = seq_file_net(seq);
4561
4562	rcu_read_lock();
4563	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4564}
4565
4566static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4567{
4568	struct net *net = seq_file_net(seq);
4569	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4570}
4571
4572static void packet_seq_stop(struct seq_file *seq, void *v)
4573	__releases(RCU)
4574{
4575	rcu_read_unlock();
4576}
4577
4578static int packet_seq_show(struct seq_file *seq, void *v)
4579{
4580	if (v == SEQ_START_TOKEN)
4581		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4582	else {
4583		struct sock *s = sk_entry(v);
4584		const struct packet_sock *po = pkt_sk(s);
4585
4586		seq_printf(seq,
4587			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4588			   s,
4589			   refcount_read(&s->sk_refcnt),
4590			   s->sk_type,
4591			   ntohs(po->num),
4592			   po->ifindex,
4593			   po->running,
4594			   atomic_read(&s->sk_rmem_alloc),
4595			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4596			   sock_i_ino(s));
4597	}
4598
4599	return 0;
4600}
4601
4602static const struct seq_operations packet_seq_ops = {
4603	.start	= packet_seq_start,
4604	.next	= packet_seq_next,
4605	.stop	= packet_seq_stop,
4606	.show	= packet_seq_show,
4607};
4608#endif
4609
4610static int __net_init packet_net_init(struct net *net)
4611{
4612	mutex_init(&net->packet.sklist_lock);
4613	INIT_HLIST_HEAD(&net->packet.sklist);
4614
4615	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4616			sizeof(struct seq_net_private)))
4617		return -ENOMEM;
4618
4619	return 0;
4620}
4621
4622static void __net_exit packet_net_exit(struct net *net)
4623{
4624	remove_proc_entry("packet", net->proc_net);
4625	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4626}
4627
4628static struct pernet_operations packet_net_ops = {
4629	.init = packet_net_init,
4630	.exit = packet_net_exit,
4631};
4632
4633
4634static void __exit packet_exit(void)
4635{
4636	unregister_netdevice_notifier(&packet_netdev_notifier);
4637	unregister_pernet_subsys(&packet_net_ops);
4638	sock_unregister(PF_PACKET);
4639	proto_unregister(&packet_proto);
4640}
4641
4642static int __init packet_init(void)
4643{
4644	int rc;
4645
4646	rc = proto_register(&packet_proto, 0);
4647	if (rc)
4648		goto out;
4649	rc = sock_register(&packet_family_ops);
4650	if (rc)
4651		goto out_proto;
4652	rc = register_pernet_subsys(&packet_net_ops);
4653	if (rc)
4654		goto out_sock;
4655	rc = register_netdevice_notifier(&packet_netdev_notifier);
4656	if (rc)
4657		goto out_pernet;
4658
4659	return 0;
4660
4661out_pernet:
4662	unregister_pernet_subsys(&packet_net_ops);
4663out_sock:
4664	sock_unregister(PF_PACKET);
4665out_proto:
4666	proto_unregister(&packet_proto);
4667out:
4668	return rc;
4669}
4670
4671module_init(packet_init);
4672module_exit(packet_exit);
4673MODULE_LICENSE("GPL");
4674MODULE_ALIAS_NETPROTO(PF_PACKET);
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		PACKET - implements raw packet sockets.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *
  13 * Fixes:
  14 *		Alan Cox	:	verify_area() now used correctly
  15 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
  16 *		Alan Cox	:	tidied skbuff lists.
  17 *		Alan Cox	:	Now uses generic datagram routines I
  18 *					added. Also fixed the peek/read crash
  19 *					from all old Linux datagram code.
  20 *		Alan Cox	:	Uses the improved datagram code.
  21 *		Alan Cox	:	Added NULL's for socket options.
  22 *		Alan Cox	:	Re-commented the code.
  23 *		Alan Cox	:	Use new kernel side addressing
  24 *		Rob Janssen	:	Correct MTU usage.
  25 *		Dave Platt	:	Counter leaks caused by incorrect
  26 *					interrupt locking and some slightly
  27 *					dubious gcc output. Can you read
  28 *					compiler: it said _VOLATILE_
  29 *	Richard Kooijman	:	Timestamp fixes.
  30 *		Alan Cox	:	New buffers. Use sk->mac.raw.
  31 *		Alan Cox	:	sendmsg/recvmsg support.
  32 *		Alan Cox	:	Protocol setting support
  33 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
  34 *	Cyrus Durgin		:	Fixed kerneld for kmod.
  35 *	Michal Ostrowski        :       Module initialization cleanup.
  36 *         Ulises Alonso        :       Frame number limit removal and
  37 *                                      packet_set_ring memory leak.
  38 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
  39 *					The convention is that longer addresses
  40 *					will simply extend the hardware address
  41 *					byte arrays at the end of sockaddr_ll
  42 *					and packet_mreq.
  43 *		Johann Baudy	:	Added TX RING.
  44 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
  45 *					layer.
  46 *					Copyright (C) 2011, <lokec@ccs.neu.edu>
  47 */
  48
  49#include <linux/types.h>
  50#include <linux/mm.h>
  51#include <linux/capability.h>
  52#include <linux/fcntl.h>
  53#include <linux/socket.h>
  54#include <linux/in.h>
  55#include <linux/inet.h>
  56#include <linux/netdevice.h>
  57#include <linux/if_packet.h>
  58#include <linux/wireless.h>
  59#include <linux/kernel.h>
  60#include <linux/kmod.h>
  61#include <linux/slab.h>
  62#include <linux/vmalloc.h>
  63#include <net/net_namespace.h>
  64#include <net/ip.h>
  65#include <net/protocol.h>
  66#include <linux/skbuff.h>
  67#include <net/sock.h>
  68#include <linux/errno.h>
  69#include <linux/timer.h>
  70#include <linux/uaccess.h>
  71#include <asm/ioctls.h>
  72#include <asm/page.h>
  73#include <asm/cacheflush.h>
  74#include <asm/io.h>
  75#include <linux/proc_fs.h>
  76#include <linux/seq_file.h>
  77#include <linux/poll.h>
  78#include <linux/module.h>
  79#include <linux/init.h>
  80#include <linux/mutex.h>
  81#include <linux/if_vlan.h>
  82#include <linux/virtio_net.h>
  83#include <linux/errqueue.h>
  84#include <linux/net_tstamp.h>
  85#include <linux/percpu.h>
  86#ifdef CONFIG_INET
  87#include <net/inet_common.h>
  88#endif
  89#include <linux/bpf.h>
  90#include <net/compat.h>
  91
  92#include "internal.h"
  93
  94/*
  95   Assumptions:
  96   - if device has no dev->hard_header routine, it adds and removes ll header
  97     inside itself. In this case ll header is invisible outside of device,
  98     but higher levels still should reserve dev->hard_header_len.
  99     Some devices are enough clever to reallocate skb, when header
 100     will not fit to reserved space (tunnel), another ones are silly
 101     (PPP).
 102   - packet socket receives packets with pulled ll header,
 103     so that SOCK_RAW should push it back.
 104
 105On receive:
 106-----------
 107
 108Incoming, dev->hard_header!=NULL
 109   mac_header -> ll header
 110   data       -> data
 111
 112Outgoing, dev->hard_header!=NULL
 113   mac_header -> ll header
 114   data       -> ll header
 115
 116Incoming, dev->hard_header==NULL
 117   mac_header -> UNKNOWN position. It is very likely, that it points to ll
 118		 header.  PPP makes it, that is wrong, because introduce
 119		 assymetry between rx and tx paths.
 120   data       -> data
 121
 122Outgoing, dev->hard_header==NULL
 123   mac_header -> data. ll header is still not built!
 124   data       -> data
 125
 126Resume
 127  If dev->hard_header==NULL we are unlikely to restore sensible ll header.
 128
 129
 130On transmit:
 131------------
 132
 133dev->hard_header != NULL
 134   mac_header -> ll header
 135   data       -> ll header
 136
 137dev->hard_header == NULL (ll header is added by device, we cannot control it)
 138   mac_header -> data
 139   data       -> data
 140
 141   We should set nh.raw on output to correct posistion,
 142   packet classifier depends on it.
 143 */
 144
 145/* Private packet socket structures. */
 146
 147/* identical to struct packet_mreq except it has
 148 * a longer address field.
 149 */
 150struct packet_mreq_max {
 151	int		mr_ifindex;
 152	unsigned short	mr_type;
 153	unsigned short	mr_alen;
 154	unsigned char	mr_address[MAX_ADDR_LEN];
 155};
 156
 157union tpacket_uhdr {
 158	struct tpacket_hdr  *h1;
 159	struct tpacket2_hdr *h2;
 160	struct tpacket3_hdr *h3;
 161	void *raw;
 162};
 163
 164static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 165		int closing, int tx_ring);
 166
 167#define V3_ALIGNMENT	(8)
 168
 169#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 170
 171#define BLK_PLUS_PRIV(sz_of_priv) \
 172	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 173
 174#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 175#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 176#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 177#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 178#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 179#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 180#define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
 181
 182struct packet_sock;
 183static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 184		       struct packet_type *pt, struct net_device *orig_dev);
 185
 186static void *packet_previous_frame(struct packet_sock *po,
 187		struct packet_ring_buffer *rb,
 188		int status);
 189static void packet_increment_head(struct packet_ring_buffer *buff);
 190static int prb_curr_blk_in_use(struct tpacket_block_desc *);
 191static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 192			struct packet_sock *);
 193static void prb_retire_current_block(struct tpacket_kbdq_core *,
 194		struct packet_sock *, unsigned int status);
 195static int prb_queue_frozen(struct tpacket_kbdq_core *);
 196static void prb_open_block(struct tpacket_kbdq_core *,
 197		struct tpacket_block_desc *);
 198static void prb_retire_rx_blk_timer_expired(struct timer_list *);
 199static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 200static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 201static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 202		struct tpacket3_hdr *);
 203static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 204		struct tpacket3_hdr *);
 205static void packet_flush_mclist(struct sock *sk);
 206static u16 packet_pick_tx_queue(struct sk_buff *skb);
 207
 208struct packet_skb_cb {
 209	union {
 210		struct sockaddr_pkt pkt;
 211		union {
 212			/* Trick: alias skb original length with
 213			 * ll.sll_family and ll.protocol in order
 214			 * to save room.
 215			 */
 216			unsigned int origlen;
 217			struct sockaddr_ll ll;
 218		};
 219	} sa;
 220};
 221
 222#define vio_le() virtio_legacy_is_little_endian()
 223
 224#define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 225
 226#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 227#define GET_PBLOCK_DESC(x, bid)	\
 228	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 229#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 230	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 231#define GET_NEXT_PRB_BLK_NUM(x) \
 232	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 233	((x)->kactive_blk_num+1) : 0)
 234
 235static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 236static void __fanout_link(struct sock *sk, struct packet_sock *po);
 237
 238static int packet_direct_xmit(struct sk_buff *skb)
 239{
 240	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
 241}
 242
 243static struct net_device *packet_cached_dev_get(struct packet_sock *po)
 244{
 245	struct net_device *dev;
 246
 247	rcu_read_lock();
 248	dev = rcu_dereference(po->cached_dev);
 249	if (likely(dev))
 250		dev_hold(dev);
 251	rcu_read_unlock();
 252
 253	return dev;
 254}
 255
 256static void packet_cached_dev_assign(struct packet_sock *po,
 257				     struct net_device *dev)
 258{
 259	rcu_assign_pointer(po->cached_dev, dev);
 260}
 261
 262static void packet_cached_dev_reset(struct packet_sock *po)
 263{
 264	RCU_INIT_POINTER(po->cached_dev, NULL);
 265}
 266
 267static bool packet_use_direct_xmit(const struct packet_sock *po)
 268{
 269	return po->xmit == packet_direct_xmit;
 270}
 271
 272static u16 packet_pick_tx_queue(struct sk_buff *skb)
 273{
 274	struct net_device *dev = skb->dev;
 275	const struct net_device_ops *ops = dev->netdev_ops;
 276	int cpu = raw_smp_processor_id();
 277	u16 queue_index;
 278
 279#ifdef CONFIG_XPS
 280	skb->sender_cpu = cpu + 1;
 281#endif
 282	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
 283	if (ops->ndo_select_queue) {
 284		queue_index = ops->ndo_select_queue(dev, skb, NULL);
 285		queue_index = netdev_cap_txqueue(dev, queue_index);
 286	} else {
 287		queue_index = netdev_pick_tx(dev, skb, NULL);
 288	}
 289
 290	return queue_index;
 291}
 292
 293/* __register_prot_hook must be invoked through register_prot_hook
 294 * or from a context in which asynchronous accesses to the packet
 295 * socket is not possible (packet_create()).
 296 */
 297static void __register_prot_hook(struct sock *sk)
 298{
 299	struct packet_sock *po = pkt_sk(sk);
 300
 301	if (!po->running) {
 302		if (po->fanout)
 303			__fanout_link(sk, po);
 304		else
 305			dev_add_pack(&po->prot_hook);
 306
 307		sock_hold(sk);
 308		po->running = 1;
 309	}
 310}
 311
 312static void register_prot_hook(struct sock *sk)
 313{
 314	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
 315	__register_prot_hook(sk);
 316}
 317
 318/* If the sync parameter is true, we will temporarily drop
 319 * the po->bind_lock and do a synchronize_net to make sure no
 320 * asynchronous packet processing paths still refer to the elements
 321 * of po->prot_hook.  If the sync parameter is false, it is the
 322 * callers responsibility to take care of this.
 323 */
 324static void __unregister_prot_hook(struct sock *sk, bool sync)
 325{
 326	struct packet_sock *po = pkt_sk(sk);
 327
 328	lockdep_assert_held_once(&po->bind_lock);
 329
 330	po->running = 0;
 331
 332	if (po->fanout)
 333		__fanout_unlink(sk, po);
 334	else
 335		__dev_remove_pack(&po->prot_hook);
 336
 337	__sock_put(sk);
 338
 339	if (sync) {
 340		spin_unlock(&po->bind_lock);
 341		synchronize_net();
 342		spin_lock(&po->bind_lock);
 343	}
 344}
 345
 346static void unregister_prot_hook(struct sock *sk, bool sync)
 347{
 348	struct packet_sock *po = pkt_sk(sk);
 349
 350	if (po->running)
 351		__unregister_prot_hook(sk, sync);
 352}
 353
 354static inline struct page * __pure pgv_to_page(void *addr)
 355{
 356	if (is_vmalloc_addr(addr))
 357		return vmalloc_to_page(addr);
 358	return virt_to_page(addr);
 359}
 360
 361static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 362{
 363	union tpacket_uhdr h;
 364
 365	h.raw = frame;
 366	switch (po->tp_version) {
 367	case TPACKET_V1:
 368		h.h1->tp_status = status;
 369		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 370		break;
 371	case TPACKET_V2:
 372		h.h2->tp_status = status;
 373		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 374		break;
 375	case TPACKET_V3:
 376		h.h3->tp_status = status;
 377		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 378		break;
 379	default:
 380		WARN(1, "TPACKET version not supported.\n");
 381		BUG();
 382	}
 383
 384	smp_wmb();
 385}
 386
 387static int __packet_get_status(const struct packet_sock *po, void *frame)
 388{
 389	union tpacket_uhdr h;
 390
 391	smp_rmb();
 392
 393	h.raw = frame;
 394	switch (po->tp_version) {
 395	case TPACKET_V1:
 396		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 397		return h.h1->tp_status;
 398	case TPACKET_V2:
 399		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 400		return h.h2->tp_status;
 401	case TPACKET_V3:
 402		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 403		return h.h3->tp_status;
 404	default:
 405		WARN(1, "TPACKET version not supported.\n");
 406		BUG();
 407		return 0;
 408	}
 409}
 410
 411static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
 412				   unsigned int flags)
 413{
 414	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 415
 416	if (shhwtstamps &&
 417	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
 418	    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
 419		return TP_STATUS_TS_RAW_HARDWARE;
 420
 421	if (ktime_to_timespec_cond(skb->tstamp, ts))
 422		return TP_STATUS_TS_SOFTWARE;
 423
 424	return 0;
 425}
 426
 427static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
 428				    struct sk_buff *skb)
 429{
 430	union tpacket_uhdr h;
 431	struct timespec ts;
 432	__u32 ts_status;
 433
 434	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
 435		return 0;
 436
 437	h.raw = frame;
 
 
 
 
 
 
 
 438	switch (po->tp_version) {
 439	case TPACKET_V1:
 440		h.h1->tp_sec = ts.tv_sec;
 441		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
 442		break;
 443	case TPACKET_V2:
 444		h.h2->tp_sec = ts.tv_sec;
 445		h.h2->tp_nsec = ts.tv_nsec;
 446		break;
 447	case TPACKET_V3:
 448		h.h3->tp_sec = ts.tv_sec;
 449		h.h3->tp_nsec = ts.tv_nsec;
 450		break;
 451	default:
 452		WARN(1, "TPACKET version not supported.\n");
 453		BUG();
 454	}
 455
 456	/* one flush is safe, as both fields always lie on the same cacheline */
 457	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
 458	smp_wmb();
 459
 460	return ts_status;
 461}
 462
 463static void *packet_lookup_frame(const struct packet_sock *po,
 464				 const struct packet_ring_buffer *rb,
 465				 unsigned int position,
 466				 int status)
 467{
 468	unsigned int pg_vec_pos, frame_offset;
 469	union tpacket_uhdr h;
 470
 471	pg_vec_pos = position / rb->frames_per_block;
 472	frame_offset = position % rb->frames_per_block;
 473
 474	h.raw = rb->pg_vec[pg_vec_pos].buffer +
 475		(frame_offset * rb->frame_size);
 476
 477	if (status != __packet_get_status(po, h.raw))
 478		return NULL;
 479
 480	return h.raw;
 481}
 482
 483static void *packet_current_frame(struct packet_sock *po,
 484		struct packet_ring_buffer *rb,
 485		int status)
 486{
 487	return packet_lookup_frame(po, rb, rb->head, status);
 488}
 489
 490static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 491{
 492	del_timer_sync(&pkc->retire_blk_timer);
 493}
 494
 495static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 496		struct sk_buff_head *rb_queue)
 497{
 498	struct tpacket_kbdq_core *pkc;
 499
 500	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 501
 502	spin_lock_bh(&rb_queue->lock);
 503	pkc->delete_blk_timer = 1;
 504	spin_unlock_bh(&rb_queue->lock);
 505
 506	prb_del_retire_blk_timer(pkc);
 507}
 508
 509static void prb_setup_retire_blk_timer(struct packet_sock *po)
 510{
 511	struct tpacket_kbdq_core *pkc;
 512
 513	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 514	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
 515		    0);
 516	pkc->retire_blk_timer.expires = jiffies;
 517}
 518
 519static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 520				int blk_size_in_bytes)
 521{
 522	struct net_device *dev;
 523	unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
 524	struct ethtool_link_ksettings ecmd;
 525	int err;
 526
 527	rtnl_lock();
 528	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 529	if (unlikely(!dev)) {
 530		rtnl_unlock();
 531		return DEFAULT_PRB_RETIRE_TOV;
 532	}
 533	err = __ethtool_get_link_ksettings(dev, &ecmd);
 534	rtnl_unlock();
 535	if (!err) {
 536		/*
 537		 * If the link speed is so slow you don't really
 538		 * need to worry about perf anyways
 539		 */
 540		if (ecmd.base.speed < SPEED_1000 ||
 541		    ecmd.base.speed == SPEED_UNKNOWN) {
 542			return DEFAULT_PRB_RETIRE_TOV;
 543		} else {
 544			msec = 1;
 545			div = ecmd.base.speed / 1000;
 546		}
 547	}
 548
 
 549	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 550
 551	if (div)
 552		mbits /= div;
 553
 554	tmo = mbits * msec;
 555
 556	if (div)
 557		return tmo+1;
 558	return tmo;
 559}
 560
 561static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 562			union tpacket_req_u *req_u)
 563{
 564	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 565}
 566
 567static void init_prb_bdqc(struct packet_sock *po,
 568			struct packet_ring_buffer *rb,
 569			struct pgv *pg_vec,
 570			union tpacket_req_u *req_u)
 571{
 572	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
 573	struct tpacket_block_desc *pbd;
 574
 575	memset(p1, 0x0, sizeof(*p1));
 576
 577	p1->knxt_seq_num = 1;
 578	p1->pkbdq = pg_vec;
 579	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 580	p1->pkblk_start	= pg_vec[0].buffer;
 581	p1->kblk_size = req_u->req3.tp_block_size;
 582	p1->knum_blocks	= req_u->req3.tp_block_nr;
 583	p1->hdrlen = po->tp_hdrlen;
 584	p1->version = po->tp_version;
 585	p1->last_kactive_blk_num = 0;
 586	po->stats.stats3.tp_freeze_q_cnt = 0;
 587	if (req_u->req3.tp_retire_blk_tov)
 588		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 589	else
 590		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 591						req_u->req3.tp_block_size);
 592	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 593	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 
 594
 595	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
 596	prb_init_ft_ops(p1, req_u);
 597	prb_setup_retire_blk_timer(po);
 598	prb_open_block(p1, pbd);
 599}
 600
 601/*  Do NOT update the last_blk_num first.
 602 *  Assumes sk_buff_head lock is held.
 603 */
 604static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 605{
 606	mod_timer(&pkc->retire_blk_timer,
 607			jiffies + pkc->tov_in_jiffies);
 608	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 609}
 610
 611/*
 612 * Timer logic:
 613 * 1) We refresh the timer only when we open a block.
 614 *    By doing this we don't waste cycles refreshing the timer
 615 *	  on packet-by-packet basis.
 616 *
 617 * With a 1MB block-size, on a 1Gbps line, it will take
 618 * i) ~8 ms to fill a block + ii) memcpy etc.
 619 * In this cut we are not accounting for the memcpy time.
 620 *
 621 * So, if the user sets the 'tmo' to 10ms then the timer
 622 * will never fire while the block is still getting filled
 623 * (which is what we want). However, the user could choose
 624 * to close a block early and that's fine.
 625 *
 626 * But when the timer does fire, we check whether or not to refresh it.
 627 * Since the tmo granularity is in msecs, it is not too expensive
 628 * to refresh the timer, lets say every '8' msecs.
 629 * Either the user can set the 'tmo' or we can derive it based on
 630 * a) line-speed and b) block-size.
 631 * prb_calc_retire_blk_tmo() calculates the tmo.
 632 *
 633 */
 634static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
 635{
 636	struct packet_sock *po =
 637		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
 638	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 639	unsigned int frozen;
 640	struct tpacket_block_desc *pbd;
 641
 642	spin_lock(&po->sk.sk_receive_queue.lock);
 643
 644	frozen = prb_queue_frozen(pkc);
 645	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 646
 647	if (unlikely(pkc->delete_blk_timer))
 648		goto out;
 649
 650	/* We only need to plug the race when the block is partially filled.
 651	 * tpacket_rcv:
 652	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 653	 *		copy_bits() is in progress ...
 654	 *		timer fires on other cpu:
 655	 *		we can't retire the current block because copy_bits
 656	 *		is in progress.
 657	 *
 658	 */
 659	if (BLOCK_NUM_PKTS(pbd)) {
 660		while (atomic_read(&pkc->blk_fill_in_prog)) {
 661			/* Waiting for skb_copy_bits to finish... */
 662			cpu_relax();
 663		}
 664	}
 665
 666	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 667		if (!frozen) {
 668			if (!BLOCK_NUM_PKTS(pbd)) {
 669				/* An empty block. Just refresh the timer. */
 670				goto refresh_timer;
 671			}
 672			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 673			if (!prb_dispatch_next_block(pkc, po))
 674				goto refresh_timer;
 675			else
 676				goto out;
 677		} else {
 678			/* Case 1. Queue was frozen because user-space was
 679			 *	   lagging behind.
 680			 */
 681			if (prb_curr_blk_in_use(pbd)) {
 682				/*
 683				 * Ok, user-space is still behind.
 684				 * So just refresh the timer.
 685				 */
 686				goto refresh_timer;
 687			} else {
 688			       /* Case 2. queue was frozen,user-space caught up,
 689				* now the link went idle && the timer fired.
 690				* We don't have a block to close.So we open this
 691				* block and restart the timer.
 692				* opening a block thaws the queue,restarts timer
 693				* Thawing/timer-refresh is a side effect.
 694				*/
 695				prb_open_block(pkc, pbd);
 696				goto out;
 697			}
 698		}
 699	}
 700
 701refresh_timer:
 702	_prb_refresh_rx_retire_blk_timer(pkc);
 703
 704out:
 705	spin_unlock(&po->sk.sk_receive_queue.lock);
 706}
 707
 708static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 709		struct tpacket_block_desc *pbd1, __u32 status)
 710{
 711	/* Flush everything minus the block header */
 712
 713#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 714	u8 *start, *end;
 715
 716	start = (u8 *)pbd1;
 717
 718	/* Skip the block header(we know header WILL fit in 4K) */
 719	start += PAGE_SIZE;
 720
 721	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 722	for (; start < end; start += PAGE_SIZE)
 723		flush_dcache_page(pgv_to_page(start));
 724
 725	smp_wmb();
 726#endif
 727
 728	/* Now update the block status. */
 729
 730	BLOCK_STATUS(pbd1) = status;
 731
 732	/* Flush the block header */
 733
 734#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 735	start = (u8 *)pbd1;
 736	flush_dcache_page(pgv_to_page(start));
 737
 738	smp_wmb();
 739#endif
 740}
 741
 742/*
 743 * Side effect:
 744 *
 745 * 1) flush the block
 746 * 2) Increment active_blk_num
 747 *
 748 * Note:We DONT refresh the timer on purpose.
 749 *	Because almost always the next block will be opened.
 750 */
 751static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 752		struct tpacket_block_desc *pbd1,
 753		struct packet_sock *po, unsigned int stat)
 754{
 755	__u32 status = TP_STATUS_USER | stat;
 756
 757	struct tpacket3_hdr *last_pkt;
 758	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 759	struct sock *sk = &po->sk;
 760
 761	if (atomic_read(&po->tp_drops))
 762		status |= TP_STATUS_LOSING;
 763
 764	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 765	last_pkt->tp_next_offset = 0;
 766
 767	/* Get the ts of the last pkt */
 768	if (BLOCK_NUM_PKTS(pbd1)) {
 769		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 770		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 771	} else {
 772		/* Ok, we tmo'd - so get the current time.
 773		 *
 774		 * It shouldn't really happen as we don't close empty
 775		 * blocks. See prb_retire_rx_blk_timer_expired().
 776		 */
 777		struct timespec ts;
 778		getnstimeofday(&ts);
 779		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 780		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 781	}
 782
 783	smp_wmb();
 784
 785	/* Flush the block */
 786	prb_flush_block(pkc1, pbd1, status);
 787
 788	sk->sk_data_ready(sk);
 789
 790	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 791}
 792
 793static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 794{
 795	pkc->reset_pending_on_curr_blk = 0;
 796}
 797
 798/*
 799 * Side effect of opening a block:
 800 *
 801 * 1) prb_queue is thawed.
 802 * 2) retire_blk_timer is refreshed.
 803 *
 804 */
 805static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 806	struct tpacket_block_desc *pbd1)
 807{
 808	struct timespec ts;
 809	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 810
 811	smp_rmb();
 812
 813	/* We could have just memset this but we will lose the
 814	 * flexibility of making the priv area sticky
 815	 */
 816
 817	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 818	BLOCK_NUM_PKTS(pbd1) = 0;
 819	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 820
 821	getnstimeofday(&ts);
 822
 823	h1->ts_first_pkt.ts_sec = ts.tv_sec;
 824	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 825
 826	pkc1->pkblk_start = (char *)pbd1;
 827	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 828
 829	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 830	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 831
 832	pbd1->version = pkc1->version;
 833	pkc1->prev = pkc1->nxt_offset;
 834	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 835
 836	prb_thaw_queue(pkc1);
 837	_prb_refresh_rx_retire_blk_timer(pkc1);
 838
 839	smp_wmb();
 840}
 841
 842/*
 843 * Queue freeze logic:
 844 * 1) Assume tp_block_nr = 8 blocks.
 845 * 2) At time 't0', user opens Rx ring.
 846 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 847 * 4) user-space is either sleeping or processing block '0'.
 848 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 849 *    it will close block-7,loop around and try to fill block '0'.
 850 *    call-flow:
 851 *    __packet_lookup_frame_in_block
 852 *      prb_retire_current_block()
 853 *      prb_dispatch_next_block()
 854 *        |->(BLOCK_STATUS == USER) evaluates to true
 855 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 856 * 6) Now there are two cases:
 857 *    6.1) Link goes idle right after the queue is frozen.
 858 *         But remember, the last open_block() refreshed the timer.
 859 *         When this timer expires,it will refresh itself so that we can
 860 *         re-open block-0 in near future.
 861 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 862 *         case and __packet_lookup_frame_in_block will check if block-0
 863 *         is free and can now be re-used.
 864 */
 865static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 866				  struct packet_sock *po)
 867{
 868	pkc->reset_pending_on_curr_blk = 1;
 869	po->stats.stats3.tp_freeze_q_cnt++;
 870}
 871
 872#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 873
 874/*
 875 * If the next block is free then we will dispatch it
 876 * and return a good offset.
 877 * Else, we will freeze the queue.
 878 * So, caller must check the return value.
 879 */
 880static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 881		struct packet_sock *po)
 882{
 883	struct tpacket_block_desc *pbd;
 884
 885	smp_rmb();
 886
 887	/* 1. Get current block num */
 888	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 889
 890	/* 2. If this block is currently in_use then freeze the queue */
 891	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 892		prb_freeze_queue(pkc, po);
 893		return NULL;
 894	}
 895
 896	/*
 897	 * 3.
 898	 * open this block and return the offset where the first packet
 899	 * needs to get stored.
 900	 */
 901	prb_open_block(pkc, pbd);
 902	return (void *)pkc->nxt_offset;
 903}
 904
 905static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 906		struct packet_sock *po, unsigned int status)
 907{
 908	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 909
 910	/* retire/close the current block */
 911	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 912		/*
 913		 * Plug the case where copy_bits() is in progress on
 914		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
 915		 * have space to copy the pkt in the current block and
 916		 * called prb_retire_current_block()
 917		 *
 918		 * We don't need to worry about the TMO case because
 919		 * the timer-handler already handled this case.
 920		 */
 921		if (!(status & TP_STATUS_BLK_TMO)) {
 922			while (atomic_read(&pkc->blk_fill_in_prog)) {
 923				/* Waiting for skb_copy_bits to finish... */
 924				cpu_relax();
 925			}
 926		}
 927		prb_close_block(pkc, pbd, po, status);
 928		return;
 929	}
 930}
 931
 932static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
 933{
 934	return TP_STATUS_USER & BLOCK_STATUS(pbd);
 935}
 936
 937static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 938{
 939	return pkc->reset_pending_on_curr_blk;
 940}
 941
 942static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
 
 943{
 944	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 945	atomic_dec(&pkc->blk_fill_in_prog);
 
 946}
 947
 948static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
 949			struct tpacket3_hdr *ppd)
 950{
 951	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
 952}
 953
 954static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
 955			struct tpacket3_hdr *ppd)
 956{
 957	ppd->hv1.tp_rxhash = 0;
 958}
 959
 960static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
 961			struct tpacket3_hdr *ppd)
 962{
 963	if (skb_vlan_tag_present(pkc->skb)) {
 964		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
 965		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
 966		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
 967	} else {
 968		ppd->hv1.tp_vlan_tci = 0;
 969		ppd->hv1.tp_vlan_tpid = 0;
 970		ppd->tp_status = TP_STATUS_AVAILABLE;
 971	}
 972}
 973
 974static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
 975			struct tpacket3_hdr *ppd)
 976{
 977	ppd->hv1.tp_padding = 0;
 978	prb_fill_vlan_info(pkc, ppd);
 979
 980	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
 981		prb_fill_rxhash(pkc, ppd);
 982	else
 983		prb_clear_rxhash(pkc, ppd);
 984}
 985
 986static void prb_fill_curr_block(char *curr,
 987				struct tpacket_kbdq_core *pkc,
 988				struct tpacket_block_desc *pbd,
 989				unsigned int len)
 
 990{
 991	struct tpacket3_hdr *ppd;
 992
 993	ppd  = (struct tpacket3_hdr *)curr;
 994	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
 995	pkc->prev = curr;
 996	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
 997	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
 998	BLOCK_NUM_PKTS(pbd) += 1;
 999	atomic_inc(&pkc->blk_fill_in_prog);
1000	prb_run_all_ft_ops(pkc, ppd);
1001}
1002
1003/* Assumes caller has the sk->rx_queue.lock */
1004static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1005					    struct sk_buff *skb,
1006					    unsigned int len
1007					    )
1008{
1009	struct tpacket_kbdq_core *pkc;
1010	struct tpacket_block_desc *pbd;
1011	char *curr, *end;
1012
1013	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1014	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1015
1016	/* Queue is frozen when user space is lagging behind */
1017	if (prb_queue_frozen(pkc)) {
1018		/*
1019		 * Check if that last block which caused the queue to freeze,
1020		 * is still in_use by user-space.
1021		 */
1022		if (prb_curr_blk_in_use(pbd)) {
1023			/* Can't record this packet */
1024			return NULL;
1025		} else {
1026			/*
1027			 * Ok, the block was released by user-space.
1028			 * Now let's open that block.
1029			 * opening a block also thaws the queue.
1030			 * Thawing is a side effect.
1031			 */
1032			prb_open_block(pkc, pbd);
1033		}
1034	}
1035
1036	smp_mb();
1037	curr = pkc->nxt_offset;
1038	pkc->skb = skb;
1039	end = (char *)pbd + pkc->kblk_size;
1040
1041	/* first try the current block */
1042	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1043		prb_fill_curr_block(curr, pkc, pbd, len);
1044		return (void *)curr;
1045	}
1046
1047	/* Ok, close the current block */
1048	prb_retire_current_block(pkc, po, 0);
1049
1050	/* Now, try to dispatch the next block */
1051	curr = (char *)prb_dispatch_next_block(pkc, po);
1052	if (curr) {
1053		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1054		prb_fill_curr_block(curr, pkc, pbd, len);
1055		return (void *)curr;
1056	}
1057
1058	/*
1059	 * No free blocks are available.user_space hasn't caught up yet.
1060	 * Queue was just frozen and now this packet will get dropped.
1061	 */
1062	return NULL;
1063}
1064
1065static void *packet_current_rx_frame(struct packet_sock *po,
1066					    struct sk_buff *skb,
1067					    int status, unsigned int len)
1068{
1069	char *curr = NULL;
1070	switch (po->tp_version) {
1071	case TPACKET_V1:
1072	case TPACKET_V2:
1073		curr = packet_lookup_frame(po, &po->rx_ring,
1074					po->rx_ring.head, status);
1075		return curr;
1076	case TPACKET_V3:
1077		return __packet_lookup_frame_in_block(po, skb, len);
1078	default:
1079		WARN(1, "TPACKET version not supported\n");
1080		BUG();
1081		return NULL;
1082	}
1083}
1084
1085static void *prb_lookup_block(const struct packet_sock *po,
1086			      const struct packet_ring_buffer *rb,
1087			      unsigned int idx,
1088			      int status)
1089{
1090	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1091	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1092
1093	if (status != BLOCK_STATUS(pbd))
1094		return NULL;
1095	return pbd;
1096}
1097
1098static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1099{
1100	unsigned int prev;
1101	if (rb->prb_bdqc.kactive_blk_num)
1102		prev = rb->prb_bdqc.kactive_blk_num-1;
1103	else
1104		prev = rb->prb_bdqc.knum_blocks-1;
1105	return prev;
1106}
1107
1108/* Assumes caller has held the rx_queue.lock */
1109static void *__prb_previous_block(struct packet_sock *po,
1110					 struct packet_ring_buffer *rb,
1111					 int status)
1112{
1113	unsigned int previous = prb_previous_blk_num(rb);
1114	return prb_lookup_block(po, rb, previous, status);
1115}
1116
1117static void *packet_previous_rx_frame(struct packet_sock *po,
1118					     struct packet_ring_buffer *rb,
1119					     int status)
1120{
1121	if (po->tp_version <= TPACKET_V2)
1122		return packet_previous_frame(po, rb, status);
1123
1124	return __prb_previous_block(po, rb, status);
1125}
1126
1127static void packet_increment_rx_head(struct packet_sock *po,
1128					    struct packet_ring_buffer *rb)
1129{
1130	switch (po->tp_version) {
1131	case TPACKET_V1:
1132	case TPACKET_V2:
1133		return packet_increment_head(rb);
1134	case TPACKET_V3:
1135	default:
1136		WARN(1, "TPACKET version not supported.\n");
1137		BUG();
1138		return;
1139	}
1140}
1141
1142static void *packet_previous_frame(struct packet_sock *po,
1143		struct packet_ring_buffer *rb,
1144		int status)
1145{
1146	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1147	return packet_lookup_frame(po, rb, previous, status);
1148}
1149
1150static void packet_increment_head(struct packet_ring_buffer *buff)
1151{
1152	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1153}
1154
1155static void packet_inc_pending(struct packet_ring_buffer *rb)
1156{
1157	this_cpu_inc(*rb->pending_refcnt);
1158}
1159
1160static void packet_dec_pending(struct packet_ring_buffer *rb)
1161{
1162	this_cpu_dec(*rb->pending_refcnt);
1163}
1164
1165static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1166{
1167	unsigned int refcnt = 0;
1168	int cpu;
1169
1170	/* We don't use pending refcount in rx_ring. */
1171	if (rb->pending_refcnt == NULL)
1172		return 0;
1173
1174	for_each_possible_cpu(cpu)
1175		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1176
1177	return refcnt;
1178}
1179
1180static int packet_alloc_pending(struct packet_sock *po)
1181{
1182	po->rx_ring.pending_refcnt = NULL;
1183
1184	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1185	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1186		return -ENOBUFS;
1187
1188	return 0;
1189}
1190
1191static void packet_free_pending(struct packet_sock *po)
1192{
1193	free_percpu(po->tx_ring.pending_refcnt);
1194}
1195
1196#define ROOM_POW_OFF	2
1197#define ROOM_NONE	0x0
1198#define ROOM_LOW	0x1
1199#define ROOM_NORMAL	0x2
1200
1201static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1202{
1203	int idx, len;
1204
1205	len = READ_ONCE(po->rx_ring.frame_max) + 1;
1206	idx = READ_ONCE(po->rx_ring.head);
1207	if (pow_off)
1208		idx += len >> pow_off;
1209	if (idx >= len)
1210		idx -= len;
1211	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1212}
1213
1214static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1215{
1216	int idx, len;
1217
1218	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1219	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1220	if (pow_off)
1221		idx += len >> pow_off;
1222	if (idx >= len)
1223		idx -= len;
1224	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1225}
1226
1227static int __packet_rcv_has_room(const struct packet_sock *po,
1228				 const struct sk_buff *skb)
1229{
1230	const struct sock *sk = &po->sk;
1231	int ret = ROOM_NONE;
1232
1233	if (po->prot_hook.func != tpacket_rcv) {
1234		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1235		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1236				   - (skb ? skb->truesize : 0);
1237
1238		if (avail > (rcvbuf >> ROOM_POW_OFF))
1239			return ROOM_NORMAL;
1240		else if (avail > 0)
1241			return ROOM_LOW;
1242		else
1243			return ROOM_NONE;
1244	}
1245
1246	if (po->tp_version == TPACKET_V3) {
1247		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1248			ret = ROOM_NORMAL;
1249		else if (__tpacket_v3_has_room(po, 0))
1250			ret = ROOM_LOW;
1251	} else {
1252		if (__tpacket_has_room(po, ROOM_POW_OFF))
1253			ret = ROOM_NORMAL;
1254		else if (__tpacket_has_room(po, 0))
1255			ret = ROOM_LOW;
1256	}
1257
1258	return ret;
1259}
1260
1261static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1262{
1263	int pressure, ret;
1264
1265	ret = __packet_rcv_has_room(po, skb);
1266	pressure = ret != ROOM_NORMAL;
1267
1268	if (READ_ONCE(po->pressure) != pressure)
1269		WRITE_ONCE(po->pressure, pressure);
1270
1271	return ret;
1272}
1273
1274static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1275{
1276	if (READ_ONCE(po->pressure) &&
1277	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1278		WRITE_ONCE(po->pressure,  0);
1279}
1280
1281static void packet_sock_destruct(struct sock *sk)
1282{
1283	skb_queue_purge(&sk->sk_error_queue);
1284
1285	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1286	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1287
1288	if (!sock_flag(sk, SOCK_DEAD)) {
1289		pr_err("Attempt to release alive packet socket: %p\n", sk);
1290		return;
1291	}
1292
1293	sk_refcnt_debug_dec(sk);
1294}
1295
1296static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1297{
1298	u32 rxhash;
 
1299	int i, count = 0;
1300
1301	rxhash = skb_get_hash(skb);
1302	for (i = 0; i < ROLLOVER_HLEN; i++)
1303		if (po->rollover->history[i] == rxhash)
1304			count++;
1305
1306	po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
 
 
 
 
 
1307	return count > (ROLLOVER_HLEN >> 1);
1308}
1309
1310static unsigned int fanout_demux_hash(struct packet_fanout *f,
1311				      struct sk_buff *skb,
1312				      unsigned int num)
1313{
1314	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1315}
1316
1317static unsigned int fanout_demux_lb(struct packet_fanout *f,
1318				    struct sk_buff *skb,
1319				    unsigned int num)
1320{
1321	unsigned int val = atomic_inc_return(&f->rr_cur);
1322
1323	return val % num;
1324}
1325
1326static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1327				     struct sk_buff *skb,
1328				     unsigned int num)
1329{
1330	return smp_processor_id() % num;
1331}
1332
1333static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1334				     struct sk_buff *skb,
1335				     unsigned int num)
1336{
1337	return prandom_u32_max(num);
1338}
1339
1340static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1341					  struct sk_buff *skb,
1342					  unsigned int idx, bool try_self,
1343					  unsigned int num)
1344{
1345	struct packet_sock *po, *po_next, *po_skip = NULL;
1346	unsigned int i, j, room = ROOM_NONE;
1347
1348	po = pkt_sk(f->arr[idx]);
1349
1350	if (try_self) {
1351		room = packet_rcv_has_room(po, skb);
1352		if (room == ROOM_NORMAL ||
1353		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1354			return idx;
1355		po_skip = po;
1356	}
1357
1358	i = j = min_t(int, po->rollover->sock, num - 1);
1359	do {
1360		po_next = pkt_sk(f->arr[i]);
1361		if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1362		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1363			if (i != j)
1364				po->rollover->sock = i;
1365			atomic_long_inc(&po->rollover->num);
1366			if (room == ROOM_LOW)
1367				atomic_long_inc(&po->rollover->num_huge);
1368			return i;
1369		}
1370
1371		if (++i == num)
1372			i = 0;
1373	} while (i != j);
1374
1375	atomic_long_inc(&po->rollover->num_failed);
1376	return idx;
1377}
1378
1379static unsigned int fanout_demux_qm(struct packet_fanout *f,
1380				    struct sk_buff *skb,
1381				    unsigned int num)
1382{
1383	return skb_get_queue_mapping(skb) % num;
1384}
1385
1386static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1387				     struct sk_buff *skb,
1388				     unsigned int num)
1389{
1390	struct bpf_prog *prog;
1391	unsigned int ret = 0;
1392
1393	rcu_read_lock();
1394	prog = rcu_dereference(f->bpf_prog);
1395	if (prog)
1396		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1397	rcu_read_unlock();
1398
1399	return ret;
1400}
1401
1402static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1403{
1404	return f->flags & (flag >> 8);
1405}
1406
1407static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1408			     struct packet_type *pt, struct net_device *orig_dev)
1409{
1410	struct packet_fanout *f = pt->af_packet_priv;
1411	unsigned int num = READ_ONCE(f->num_members);
1412	struct net *net = read_pnet(&f->net);
1413	struct packet_sock *po;
1414	unsigned int idx;
1415
1416	if (!net_eq(dev_net(dev), net) || !num) {
1417		kfree_skb(skb);
1418		return 0;
1419	}
1420
1421	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1422		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1423		if (!skb)
1424			return 0;
1425	}
1426	switch (f->type) {
1427	case PACKET_FANOUT_HASH:
1428	default:
1429		idx = fanout_demux_hash(f, skb, num);
1430		break;
1431	case PACKET_FANOUT_LB:
1432		idx = fanout_demux_lb(f, skb, num);
1433		break;
1434	case PACKET_FANOUT_CPU:
1435		idx = fanout_demux_cpu(f, skb, num);
1436		break;
1437	case PACKET_FANOUT_RND:
1438		idx = fanout_demux_rnd(f, skb, num);
1439		break;
1440	case PACKET_FANOUT_QM:
1441		idx = fanout_demux_qm(f, skb, num);
1442		break;
1443	case PACKET_FANOUT_ROLLOVER:
1444		idx = fanout_demux_rollover(f, skb, 0, false, num);
1445		break;
1446	case PACKET_FANOUT_CBPF:
1447	case PACKET_FANOUT_EBPF:
1448		idx = fanout_demux_bpf(f, skb, num);
1449		break;
1450	}
1451
1452	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1453		idx = fanout_demux_rollover(f, skb, idx, true, num);
1454
1455	po = pkt_sk(f->arr[idx]);
1456	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1457}
1458
1459DEFINE_MUTEX(fanout_mutex);
1460EXPORT_SYMBOL_GPL(fanout_mutex);
1461static LIST_HEAD(fanout_list);
1462static u16 fanout_next_id;
1463
1464static void __fanout_link(struct sock *sk, struct packet_sock *po)
1465{
1466	struct packet_fanout *f = po->fanout;
1467
1468	spin_lock(&f->lock);
1469	f->arr[f->num_members] = sk;
1470	smp_wmb();
1471	f->num_members++;
1472	if (f->num_members == 1)
1473		dev_add_pack(&f->prot_hook);
1474	spin_unlock(&f->lock);
1475}
1476
1477static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1478{
1479	struct packet_fanout *f = po->fanout;
1480	int i;
1481
1482	spin_lock(&f->lock);
1483	for (i = 0; i < f->num_members; i++) {
1484		if (f->arr[i] == sk)
1485			break;
1486	}
1487	BUG_ON(i >= f->num_members);
1488	f->arr[i] = f->arr[f->num_members - 1];
1489	f->num_members--;
1490	if (f->num_members == 0)
1491		__dev_remove_pack(&f->prot_hook);
1492	spin_unlock(&f->lock);
1493}
1494
1495static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1496{
1497	if (sk->sk_family != PF_PACKET)
1498		return false;
1499
1500	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1501}
1502
1503static void fanout_init_data(struct packet_fanout *f)
1504{
1505	switch (f->type) {
1506	case PACKET_FANOUT_LB:
1507		atomic_set(&f->rr_cur, 0);
1508		break;
1509	case PACKET_FANOUT_CBPF:
1510	case PACKET_FANOUT_EBPF:
1511		RCU_INIT_POINTER(f->bpf_prog, NULL);
1512		break;
1513	}
1514}
1515
1516static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1517{
1518	struct bpf_prog *old;
1519
1520	spin_lock(&f->lock);
1521	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1522	rcu_assign_pointer(f->bpf_prog, new);
1523	spin_unlock(&f->lock);
1524
1525	if (old) {
1526		synchronize_net();
1527		bpf_prog_destroy(old);
1528	}
1529}
1530
1531static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1532				unsigned int len)
1533{
1534	struct bpf_prog *new;
1535	struct sock_fprog fprog;
1536	int ret;
1537
1538	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1539		return -EPERM;
1540	if (len != sizeof(fprog))
1541		return -EINVAL;
1542	if (copy_from_user(&fprog, data, len))
1543		return -EFAULT;
1544
1545	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1546	if (ret)
1547		return ret;
1548
1549	__fanout_set_data_bpf(po->fanout, new);
1550	return 0;
1551}
1552
1553static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1554				unsigned int len)
1555{
1556	struct bpf_prog *new;
1557	u32 fd;
1558
1559	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1560		return -EPERM;
1561	if (len != sizeof(fd))
1562		return -EINVAL;
1563	if (copy_from_user(&fd, data, len))
1564		return -EFAULT;
1565
1566	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1567	if (IS_ERR(new))
1568		return PTR_ERR(new);
1569
1570	__fanout_set_data_bpf(po->fanout, new);
1571	return 0;
1572}
1573
1574static int fanout_set_data(struct packet_sock *po, char __user *data,
1575			   unsigned int len)
1576{
1577	switch (po->fanout->type) {
1578	case PACKET_FANOUT_CBPF:
1579		return fanout_set_data_cbpf(po, data, len);
1580	case PACKET_FANOUT_EBPF:
1581		return fanout_set_data_ebpf(po, data, len);
1582	default:
1583		return -EINVAL;
1584	}
1585}
1586
1587static void fanout_release_data(struct packet_fanout *f)
1588{
1589	switch (f->type) {
1590	case PACKET_FANOUT_CBPF:
1591	case PACKET_FANOUT_EBPF:
1592		__fanout_set_data_bpf(f, NULL);
1593	}
1594}
1595
1596static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1597{
1598	struct packet_fanout *f;
1599
1600	list_for_each_entry(f, &fanout_list, list) {
1601		if (f->id == candidate_id &&
1602		    read_pnet(&f->net) == sock_net(sk)) {
1603			return false;
1604		}
1605	}
1606	return true;
1607}
1608
1609static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1610{
1611	u16 id = fanout_next_id;
1612
1613	do {
1614		if (__fanout_id_is_free(sk, id)) {
1615			*new_id = id;
1616			fanout_next_id = id + 1;
1617			return true;
1618		}
1619
1620		id++;
1621	} while (id != fanout_next_id);
1622
1623	return false;
1624}
1625
1626static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1627{
1628	struct packet_rollover *rollover = NULL;
1629	struct packet_sock *po = pkt_sk(sk);
1630	struct packet_fanout *f, *match;
1631	u8 type = type_flags & 0xff;
1632	u8 flags = type_flags >> 8;
1633	int err;
1634
1635	switch (type) {
1636	case PACKET_FANOUT_ROLLOVER:
1637		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1638			return -EINVAL;
1639	case PACKET_FANOUT_HASH:
1640	case PACKET_FANOUT_LB:
1641	case PACKET_FANOUT_CPU:
1642	case PACKET_FANOUT_RND:
1643	case PACKET_FANOUT_QM:
1644	case PACKET_FANOUT_CBPF:
1645	case PACKET_FANOUT_EBPF:
1646		break;
1647	default:
1648		return -EINVAL;
1649	}
1650
1651	mutex_lock(&fanout_mutex);
1652
1653	err = -EALREADY;
1654	if (po->fanout)
1655		goto out;
1656
1657	if (type == PACKET_FANOUT_ROLLOVER ||
1658	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1659		err = -ENOMEM;
1660		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1661		if (!rollover)
1662			goto out;
1663		atomic_long_set(&rollover->num, 0);
1664		atomic_long_set(&rollover->num_huge, 0);
1665		atomic_long_set(&rollover->num_failed, 0);
1666	}
1667
1668	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1669		if (id != 0) {
1670			err = -EINVAL;
1671			goto out;
1672		}
1673		if (!fanout_find_new_id(sk, &id)) {
1674			err = -ENOMEM;
1675			goto out;
1676		}
1677		/* ephemeral flag for the first socket in the group: drop it */
1678		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1679	}
1680
1681	match = NULL;
1682	list_for_each_entry(f, &fanout_list, list) {
1683		if (f->id == id &&
1684		    read_pnet(&f->net) == sock_net(sk)) {
1685			match = f;
1686			break;
1687		}
1688	}
1689	err = -EINVAL;
1690	if (match && match->flags != flags)
1691		goto out;
1692	if (!match) {
1693		err = -ENOMEM;
1694		match = kzalloc(sizeof(*match), GFP_KERNEL);
1695		if (!match)
1696			goto out;
1697		write_pnet(&match->net, sock_net(sk));
1698		match->id = id;
1699		match->type = type;
1700		match->flags = flags;
1701		INIT_LIST_HEAD(&match->list);
1702		spin_lock_init(&match->lock);
1703		refcount_set(&match->sk_ref, 0);
1704		fanout_init_data(match);
1705		match->prot_hook.type = po->prot_hook.type;
1706		match->prot_hook.dev = po->prot_hook.dev;
1707		match->prot_hook.func = packet_rcv_fanout;
1708		match->prot_hook.af_packet_priv = match;
1709		match->prot_hook.id_match = match_fanout_group;
1710		list_add(&match->list, &fanout_list);
1711	}
1712	err = -EINVAL;
1713
1714	spin_lock(&po->bind_lock);
1715	if (po->running &&
1716	    match->type == type &&
1717	    match->prot_hook.type == po->prot_hook.type &&
1718	    match->prot_hook.dev == po->prot_hook.dev) {
1719		err = -ENOSPC;
1720		if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1721			__dev_remove_pack(&po->prot_hook);
1722			po->fanout = match;
1723			po->rollover = rollover;
1724			rollover = NULL;
1725			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1726			__fanout_link(sk, po);
1727			err = 0;
1728		}
1729	}
1730	spin_unlock(&po->bind_lock);
1731
1732	if (err && !refcount_read(&match->sk_ref)) {
1733		list_del(&match->list);
1734		kfree(match);
1735	}
1736
1737out:
1738	kfree(rollover);
1739	mutex_unlock(&fanout_mutex);
1740	return err;
1741}
1742
1743/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1744 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1745 * It is the responsibility of the caller to call fanout_release_data() and
1746 * free the returned packet_fanout (after synchronize_net())
1747 */
1748static struct packet_fanout *fanout_release(struct sock *sk)
1749{
1750	struct packet_sock *po = pkt_sk(sk);
1751	struct packet_fanout *f;
1752
1753	mutex_lock(&fanout_mutex);
1754	f = po->fanout;
1755	if (f) {
1756		po->fanout = NULL;
1757
1758		if (refcount_dec_and_test(&f->sk_ref))
1759			list_del(&f->list);
1760		else
1761			f = NULL;
1762	}
1763	mutex_unlock(&fanout_mutex);
1764
1765	return f;
1766}
1767
1768static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1769					  struct sk_buff *skb)
1770{
1771	/* Earlier code assumed this would be a VLAN pkt, double-check
1772	 * this now that we have the actual packet in hand. We can only
1773	 * do this check on Ethernet devices.
1774	 */
1775	if (unlikely(dev->type != ARPHRD_ETHER))
1776		return false;
1777
1778	skb_reset_mac_header(skb);
1779	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1780}
1781
1782static const struct proto_ops packet_ops;
1783
1784static const struct proto_ops packet_ops_spkt;
1785
1786static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1787			   struct packet_type *pt, struct net_device *orig_dev)
1788{
1789	struct sock *sk;
1790	struct sockaddr_pkt *spkt;
1791
1792	/*
1793	 *	When we registered the protocol we saved the socket in the data
1794	 *	field for just this event.
1795	 */
1796
1797	sk = pt->af_packet_priv;
1798
1799	/*
1800	 *	Yank back the headers [hope the device set this
1801	 *	right or kerboom...]
1802	 *
1803	 *	Incoming packets have ll header pulled,
1804	 *	push it back.
1805	 *
1806	 *	For outgoing ones skb->data == skb_mac_header(skb)
1807	 *	so that this procedure is noop.
1808	 */
1809
1810	if (skb->pkt_type == PACKET_LOOPBACK)
1811		goto out;
1812
1813	if (!net_eq(dev_net(dev), sock_net(sk)))
1814		goto out;
1815
1816	skb = skb_share_check(skb, GFP_ATOMIC);
1817	if (skb == NULL)
1818		goto oom;
1819
1820	/* drop any routing info */
1821	skb_dst_drop(skb);
1822
1823	/* drop conntrack reference */
1824	nf_reset_ct(skb);
1825
1826	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1827
1828	skb_push(skb, skb->data - skb_mac_header(skb));
1829
1830	/*
1831	 *	The SOCK_PACKET socket receives _all_ frames.
1832	 */
1833
1834	spkt->spkt_family = dev->type;
1835	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1836	spkt->spkt_protocol = skb->protocol;
1837
1838	/*
1839	 *	Charge the memory to the socket. This is done specifically
1840	 *	to prevent sockets using all the memory up.
1841	 */
1842
1843	if (sock_queue_rcv_skb(sk, skb) == 0)
1844		return 0;
1845
1846out:
1847	kfree_skb(skb);
1848oom:
1849	return 0;
1850}
1851
1852static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1853{
1854	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1855	    sock->type == SOCK_RAW) {
1856		skb_reset_mac_header(skb);
1857		skb->protocol = dev_parse_header_protocol(skb);
1858	}
1859
1860	skb_probe_transport_header(skb);
1861}
1862
1863/*
1864 *	Output a raw packet to a device layer. This bypasses all the other
1865 *	protocol layers and you must therefore supply it with a complete frame
1866 */
1867
1868static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1869			       size_t len)
1870{
1871	struct sock *sk = sock->sk;
1872	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1873	struct sk_buff *skb = NULL;
1874	struct net_device *dev;
1875	struct sockcm_cookie sockc;
1876	__be16 proto = 0;
1877	int err;
1878	int extra_len = 0;
1879
1880	/*
1881	 *	Get and verify the address.
1882	 */
1883
1884	if (saddr) {
1885		if (msg->msg_namelen < sizeof(struct sockaddr))
1886			return -EINVAL;
1887		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1888			proto = saddr->spkt_protocol;
1889	} else
1890		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1891
1892	/*
1893	 *	Find the device first to size check it
1894	 */
1895
1896	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1897retry:
1898	rcu_read_lock();
1899	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1900	err = -ENODEV;
1901	if (dev == NULL)
1902		goto out_unlock;
1903
1904	err = -ENETDOWN;
1905	if (!(dev->flags & IFF_UP))
1906		goto out_unlock;
1907
1908	/*
1909	 * You may not queue a frame bigger than the mtu. This is the lowest level
1910	 * raw protocol and you must do your own fragmentation at this level.
1911	 */
1912
1913	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1914		if (!netif_supports_nofcs(dev)) {
1915			err = -EPROTONOSUPPORT;
1916			goto out_unlock;
1917		}
1918		extra_len = 4; /* We're doing our own CRC */
1919	}
1920
1921	err = -EMSGSIZE;
1922	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1923		goto out_unlock;
1924
1925	if (!skb) {
1926		size_t reserved = LL_RESERVED_SPACE(dev);
1927		int tlen = dev->needed_tailroom;
1928		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1929
1930		rcu_read_unlock();
1931		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1932		if (skb == NULL)
1933			return -ENOBUFS;
1934		/* FIXME: Save some space for broken drivers that write a hard
1935		 * header at transmission time by themselves. PPP is the notable
1936		 * one here. This should really be fixed at the driver level.
1937		 */
1938		skb_reserve(skb, reserved);
1939		skb_reset_network_header(skb);
1940
1941		/* Try to align data part correctly */
1942		if (hhlen) {
1943			skb->data -= hhlen;
1944			skb->tail -= hhlen;
1945			if (len < hhlen)
1946				skb_reset_network_header(skb);
1947		}
1948		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1949		if (err)
1950			goto out_free;
1951		goto retry;
1952	}
1953
1954	if (!dev_validate_header(dev, skb->data, len)) {
1955		err = -EINVAL;
1956		goto out_unlock;
1957	}
1958	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1959	    !packet_extra_vlan_len_allowed(dev, skb)) {
1960		err = -EMSGSIZE;
1961		goto out_unlock;
1962	}
1963
1964	sockcm_init(&sockc, sk);
1965	if (msg->msg_controllen) {
1966		err = sock_cmsg_send(sk, msg, &sockc);
1967		if (unlikely(err))
1968			goto out_unlock;
1969	}
1970
1971	skb->protocol = proto;
1972	skb->dev = dev;
1973	skb->priority = sk->sk_priority;
1974	skb->mark = sk->sk_mark;
1975	skb->tstamp = sockc.transmit_time;
1976
1977	skb_setup_tx_timestamp(skb, sockc.tsflags);
1978
1979	if (unlikely(extra_len == 4))
1980		skb->no_fcs = 1;
1981
1982	packet_parse_headers(skb, sock);
1983
1984	dev_queue_xmit(skb);
1985	rcu_read_unlock();
1986	return len;
1987
1988out_unlock:
1989	rcu_read_unlock();
1990out_free:
1991	kfree_skb(skb);
1992	return err;
1993}
1994
1995static unsigned int run_filter(struct sk_buff *skb,
1996			       const struct sock *sk,
1997			       unsigned int res)
1998{
1999	struct sk_filter *filter;
2000
2001	rcu_read_lock();
2002	filter = rcu_dereference(sk->sk_filter);
2003	if (filter != NULL)
2004		res = bpf_prog_run_clear_cb(filter->prog, skb);
2005	rcu_read_unlock();
2006
2007	return res;
2008}
2009
2010static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2011			   size_t *len)
2012{
2013	struct virtio_net_hdr vnet_hdr;
2014
2015	if (*len < sizeof(vnet_hdr))
2016		return -EINVAL;
2017	*len -= sizeof(vnet_hdr);
2018
2019	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2020		return -EINVAL;
2021
2022	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2023}
2024
2025/*
2026 * This function makes lazy skb cloning in hope that most of packets
2027 * are discarded by BPF.
2028 *
2029 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2030 * and skb->cb are mangled. It works because (and until) packets
2031 * falling here are owned by current CPU. Output packets are cloned
2032 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2033 * sequencially, so that if we return skb to original state on exit,
2034 * we will not harm anyone.
2035 */
2036
2037static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2038		      struct packet_type *pt, struct net_device *orig_dev)
2039{
2040	struct sock *sk;
2041	struct sockaddr_ll *sll;
2042	struct packet_sock *po;
2043	u8 *skb_head = skb->data;
2044	int skb_len = skb->len;
2045	unsigned int snaplen, res;
2046	bool is_drop_n_account = false;
2047
2048	if (skb->pkt_type == PACKET_LOOPBACK)
2049		goto drop;
2050
2051	sk = pt->af_packet_priv;
2052	po = pkt_sk(sk);
2053
2054	if (!net_eq(dev_net(dev), sock_net(sk)))
2055		goto drop;
2056
2057	skb->dev = dev;
2058
2059	if (dev->header_ops) {
2060		/* The device has an explicit notion of ll header,
2061		 * exported to higher levels.
2062		 *
2063		 * Otherwise, the device hides details of its frame
2064		 * structure, so that corresponding packet head is
2065		 * never delivered to user.
2066		 */
2067		if (sk->sk_type != SOCK_DGRAM)
2068			skb_push(skb, skb->data - skb_mac_header(skb));
2069		else if (skb->pkt_type == PACKET_OUTGOING) {
2070			/* Special case: outgoing packets have ll header at head */
2071			skb_pull(skb, skb_network_offset(skb));
2072		}
2073	}
2074
2075	snaplen = skb->len;
2076
2077	res = run_filter(skb, sk, snaplen);
2078	if (!res)
2079		goto drop_n_restore;
2080	if (snaplen > res)
2081		snaplen = res;
2082
2083	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2084		goto drop_n_acct;
2085
2086	if (skb_shared(skb)) {
2087		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2088		if (nskb == NULL)
2089			goto drop_n_acct;
2090
2091		if (skb_head != skb->data) {
2092			skb->data = skb_head;
2093			skb->len = skb_len;
2094		}
2095		consume_skb(skb);
2096		skb = nskb;
2097	}
2098
2099	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2100
2101	sll = &PACKET_SKB_CB(skb)->sa.ll;
2102	sll->sll_hatype = dev->type;
2103	sll->sll_pkttype = skb->pkt_type;
2104	if (unlikely(po->origdev))
2105		sll->sll_ifindex = orig_dev->ifindex;
2106	else
2107		sll->sll_ifindex = dev->ifindex;
2108
2109	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2110
2111	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2112	 * Use their space for storing the original skb length.
2113	 */
2114	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2115
2116	if (pskb_trim(skb, snaplen))
2117		goto drop_n_acct;
2118
2119	skb_set_owner_r(skb, sk);
2120	skb->dev = NULL;
2121	skb_dst_drop(skb);
2122
2123	/* drop conntrack reference */
2124	nf_reset_ct(skb);
2125
2126	spin_lock(&sk->sk_receive_queue.lock);
2127	po->stats.stats1.tp_packets++;
2128	sock_skb_set_dropcount(sk, skb);
2129	__skb_queue_tail(&sk->sk_receive_queue, skb);
2130	spin_unlock(&sk->sk_receive_queue.lock);
2131	sk->sk_data_ready(sk);
2132	return 0;
2133
2134drop_n_acct:
2135	is_drop_n_account = true;
2136	atomic_inc(&po->tp_drops);
2137	atomic_inc(&sk->sk_drops);
2138
2139drop_n_restore:
2140	if (skb_head != skb->data && skb_shared(skb)) {
2141		skb->data = skb_head;
2142		skb->len = skb_len;
2143	}
2144drop:
2145	if (!is_drop_n_account)
2146		consume_skb(skb);
2147	else
2148		kfree_skb(skb);
2149	return 0;
2150}
2151
2152static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2153		       struct packet_type *pt, struct net_device *orig_dev)
2154{
2155	struct sock *sk;
2156	struct packet_sock *po;
2157	struct sockaddr_ll *sll;
2158	union tpacket_uhdr h;
2159	u8 *skb_head = skb->data;
2160	int skb_len = skb->len;
2161	unsigned int snaplen, res;
2162	unsigned long status = TP_STATUS_USER;
2163	unsigned short macoff, netoff, hdrlen;
 
2164	struct sk_buff *copy_skb = NULL;
2165	struct timespec ts;
2166	__u32 ts_status;
2167	bool is_drop_n_account = false;
 
2168	bool do_vnet = false;
2169
2170	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2171	 * We may add members to them until current aligned size without forcing
2172	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2173	 */
2174	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2175	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2176
2177	if (skb->pkt_type == PACKET_LOOPBACK)
2178		goto drop;
2179
2180	sk = pt->af_packet_priv;
2181	po = pkt_sk(sk);
2182
2183	if (!net_eq(dev_net(dev), sock_net(sk)))
2184		goto drop;
2185
2186	if (dev->header_ops) {
2187		if (sk->sk_type != SOCK_DGRAM)
2188			skb_push(skb, skb->data - skb_mac_header(skb));
2189		else if (skb->pkt_type == PACKET_OUTGOING) {
2190			/* Special case: outgoing packets have ll header at head */
2191			skb_pull(skb, skb_network_offset(skb));
2192		}
2193	}
2194
2195	snaplen = skb->len;
2196
2197	res = run_filter(skb, sk, snaplen);
2198	if (!res)
2199		goto drop_n_restore;
2200
2201	/* If we are flooded, just give up */
2202	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2203		atomic_inc(&po->tp_drops);
2204		goto drop_n_restore;
2205	}
2206
2207	if (skb->ip_summed == CHECKSUM_PARTIAL)
2208		status |= TP_STATUS_CSUMNOTREADY;
2209	else if (skb->pkt_type != PACKET_OUTGOING &&
2210		 (skb->ip_summed == CHECKSUM_COMPLETE ||
2211		  skb_csum_unnecessary(skb)))
2212		status |= TP_STATUS_CSUM_VALID;
2213
2214	if (snaplen > res)
2215		snaplen = res;
2216
2217	if (sk->sk_type == SOCK_DGRAM) {
2218		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2219				  po->tp_reserve;
2220	} else {
2221		unsigned int maclen = skb_network_offset(skb);
2222		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2223				       (maclen < 16 ? 16 : maclen)) +
2224				       po->tp_reserve;
2225		if (po->has_vnet_hdr) {
2226			netoff += sizeof(struct virtio_net_hdr);
2227			do_vnet = true;
2228		}
2229		macoff = netoff - maclen;
2230	}
 
 
 
 
2231	if (po->tp_version <= TPACKET_V2) {
2232		if (macoff + snaplen > po->rx_ring.frame_size) {
2233			if (po->copy_thresh &&
2234			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2235				if (skb_shared(skb)) {
2236					copy_skb = skb_clone(skb, GFP_ATOMIC);
2237				} else {
2238					copy_skb = skb_get(skb);
2239					skb_head = skb->data;
2240				}
2241				if (copy_skb)
2242					skb_set_owner_r(copy_skb, sk);
2243			}
2244			snaplen = po->rx_ring.frame_size - macoff;
2245			if ((int)snaplen < 0) {
2246				snaplen = 0;
2247				do_vnet = false;
2248			}
2249		}
2250	} else if (unlikely(macoff + snaplen >
2251			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2252		u32 nval;
2253
2254		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2255		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2256			    snaplen, nval, macoff);
2257		snaplen = nval;
2258		if (unlikely((int)snaplen < 0)) {
2259			snaplen = 0;
2260			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2261			do_vnet = false;
2262		}
2263	}
2264	spin_lock(&sk->sk_receive_queue.lock);
2265	h.raw = packet_current_rx_frame(po, skb,
2266					TP_STATUS_KERNEL, (macoff+snaplen));
2267	if (!h.raw)
2268		goto drop_n_account;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2269	if (po->tp_version <= TPACKET_V2) {
2270		packet_increment_rx_head(po, &po->rx_ring);
2271	/*
2272	 * LOSING will be reported till you read the stats,
2273	 * because it's COR - Clear On Read.
2274	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2275	 * at packet level.
2276	 */
2277		if (atomic_read(&po->tp_drops))
2278			status |= TP_STATUS_LOSING;
2279	}
2280
2281	if (do_vnet &&
2282	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
2283				    sizeof(struct virtio_net_hdr),
2284				    vio_le(), true, 0))
2285		goto drop_n_account;
2286
2287	po->stats.stats1.tp_packets++;
2288	if (copy_skb) {
2289		status |= TP_STATUS_COPY;
2290		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2291	}
2292	spin_unlock(&sk->sk_receive_queue.lock);
2293
2294	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2295
2296	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2297		getnstimeofday(&ts);
2298
2299	status |= ts_status;
2300
2301	switch (po->tp_version) {
2302	case TPACKET_V1:
2303		h.h1->tp_len = skb->len;
2304		h.h1->tp_snaplen = snaplen;
2305		h.h1->tp_mac = macoff;
2306		h.h1->tp_net = netoff;
2307		h.h1->tp_sec = ts.tv_sec;
2308		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2309		hdrlen = sizeof(*h.h1);
2310		break;
2311	case TPACKET_V2:
2312		h.h2->tp_len = skb->len;
2313		h.h2->tp_snaplen = snaplen;
2314		h.h2->tp_mac = macoff;
2315		h.h2->tp_net = netoff;
2316		h.h2->tp_sec = ts.tv_sec;
2317		h.h2->tp_nsec = ts.tv_nsec;
2318		if (skb_vlan_tag_present(skb)) {
2319			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2320			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2321			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2322		} else {
2323			h.h2->tp_vlan_tci = 0;
2324			h.h2->tp_vlan_tpid = 0;
2325		}
2326		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2327		hdrlen = sizeof(*h.h2);
2328		break;
2329	case TPACKET_V3:
2330		/* tp_nxt_offset,vlan are already populated above.
2331		 * So DONT clear those fields here
2332		 */
2333		h.h3->tp_status |= status;
2334		h.h3->tp_len = skb->len;
2335		h.h3->tp_snaplen = snaplen;
2336		h.h3->tp_mac = macoff;
2337		h.h3->tp_net = netoff;
2338		h.h3->tp_sec  = ts.tv_sec;
2339		h.h3->tp_nsec = ts.tv_nsec;
2340		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2341		hdrlen = sizeof(*h.h3);
2342		break;
2343	default:
2344		BUG();
2345	}
2346
2347	sll = h.raw + TPACKET_ALIGN(hdrlen);
2348	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2349	sll->sll_family = AF_PACKET;
2350	sll->sll_hatype = dev->type;
2351	sll->sll_protocol = skb->protocol;
2352	sll->sll_pkttype = skb->pkt_type;
2353	if (unlikely(po->origdev))
2354		sll->sll_ifindex = orig_dev->ifindex;
2355	else
2356		sll->sll_ifindex = dev->ifindex;
2357
2358	smp_mb();
2359
2360#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2361	if (po->tp_version <= TPACKET_V2) {
2362		u8 *start, *end;
2363
2364		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2365					macoff + snaplen);
2366
2367		for (start = h.raw; start < end; start += PAGE_SIZE)
2368			flush_dcache_page(pgv_to_page(start));
2369	}
2370	smp_wmb();
2371#endif
2372
2373	if (po->tp_version <= TPACKET_V2) {
 
2374		__packet_set_status(po, h.raw, status);
 
 
2375		sk->sk_data_ready(sk);
2376	} else {
2377		prb_clear_blk_fill_status(&po->rx_ring);
2378	}
2379
2380drop_n_restore:
2381	if (skb_head != skb->data && skb_shared(skb)) {
2382		skb->data = skb_head;
2383		skb->len = skb_len;
2384	}
2385drop:
2386	if (!is_drop_n_account)
2387		consume_skb(skb);
2388	else
2389		kfree_skb(skb);
2390	return 0;
2391
2392drop_n_account:
2393	spin_unlock(&sk->sk_receive_queue.lock);
2394	atomic_inc(&po->tp_drops);
2395	is_drop_n_account = true;
2396
2397	sk->sk_data_ready(sk);
2398	kfree_skb(copy_skb);
2399	goto drop_n_restore;
2400}
2401
2402static void tpacket_destruct_skb(struct sk_buff *skb)
2403{
2404	struct packet_sock *po = pkt_sk(skb->sk);
2405
2406	if (likely(po->tx_ring.pg_vec)) {
2407		void *ph;
2408		__u32 ts;
2409
2410		ph = skb_zcopy_get_nouarg(skb);
2411		packet_dec_pending(&po->tx_ring);
2412
2413		ts = __packet_set_timestamp(po, ph, skb);
2414		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2415
2416		if (!packet_read_pending(&po->tx_ring))
2417			complete(&po->skb_completion);
2418	}
2419
2420	sock_wfree(skb);
2421}
2422
2423static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2424{
2425	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2426	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2427	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2428	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2429		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2430			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2431			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2432
2433	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2434		return -EINVAL;
2435
2436	return 0;
2437}
2438
2439static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2440				 struct virtio_net_hdr *vnet_hdr)
2441{
2442	if (*len < sizeof(*vnet_hdr))
2443		return -EINVAL;
2444	*len -= sizeof(*vnet_hdr);
2445
2446	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2447		return -EFAULT;
2448
2449	return __packet_snd_vnet_parse(vnet_hdr, *len);
2450}
2451
2452static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2453		void *frame, struct net_device *dev, void *data, int tp_len,
2454		__be16 proto, unsigned char *addr, int hlen, int copylen,
2455		const struct sockcm_cookie *sockc)
2456{
2457	union tpacket_uhdr ph;
2458	int to_write, offset, len, nr_frags, len_max;
2459	struct socket *sock = po->sk.sk_socket;
2460	struct page *page;
2461	int err;
2462
2463	ph.raw = frame;
2464
2465	skb->protocol = proto;
2466	skb->dev = dev;
2467	skb->priority = po->sk.sk_priority;
2468	skb->mark = po->sk.sk_mark;
2469	skb->tstamp = sockc->transmit_time;
2470	skb_setup_tx_timestamp(skb, sockc->tsflags);
2471	skb_zcopy_set_nouarg(skb, ph.raw);
2472
2473	skb_reserve(skb, hlen);
2474	skb_reset_network_header(skb);
2475
2476	to_write = tp_len;
2477
2478	if (sock->type == SOCK_DGRAM) {
2479		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2480				NULL, tp_len);
2481		if (unlikely(err < 0))
2482			return -EINVAL;
2483	} else if (copylen) {
2484		int hdrlen = min_t(int, copylen, tp_len);
2485
2486		skb_push(skb, dev->hard_header_len);
2487		skb_put(skb, copylen - dev->hard_header_len);
2488		err = skb_store_bits(skb, 0, data, hdrlen);
2489		if (unlikely(err))
2490			return err;
2491		if (!dev_validate_header(dev, skb->data, hdrlen))
2492			return -EINVAL;
2493
2494		data += hdrlen;
2495		to_write -= hdrlen;
2496	}
2497
2498	offset = offset_in_page(data);
2499	len_max = PAGE_SIZE - offset;
2500	len = ((to_write > len_max) ? len_max : to_write);
2501
2502	skb->data_len = to_write;
2503	skb->len += to_write;
2504	skb->truesize += to_write;
2505	refcount_add(to_write, &po->sk.sk_wmem_alloc);
2506
2507	while (likely(to_write)) {
2508		nr_frags = skb_shinfo(skb)->nr_frags;
2509
2510		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2511			pr_err("Packet exceed the number of skb frags(%lu)\n",
2512			       MAX_SKB_FRAGS);
2513			return -EFAULT;
2514		}
2515
2516		page = pgv_to_page(data);
2517		data += len;
2518		flush_dcache_page(page);
2519		get_page(page);
2520		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2521		to_write -= len;
2522		offset = 0;
2523		len_max = PAGE_SIZE;
2524		len = ((to_write > len_max) ? len_max : to_write);
2525	}
2526
2527	packet_parse_headers(skb, sock);
2528
2529	return tp_len;
2530}
2531
2532static int tpacket_parse_header(struct packet_sock *po, void *frame,
2533				int size_max, void **data)
2534{
2535	union tpacket_uhdr ph;
2536	int tp_len, off;
2537
2538	ph.raw = frame;
2539
2540	switch (po->tp_version) {
2541	case TPACKET_V3:
2542		if (ph.h3->tp_next_offset != 0) {
2543			pr_warn_once("variable sized slot not supported");
2544			return -EINVAL;
2545		}
2546		tp_len = ph.h3->tp_len;
2547		break;
2548	case TPACKET_V2:
2549		tp_len = ph.h2->tp_len;
2550		break;
2551	default:
2552		tp_len = ph.h1->tp_len;
2553		break;
2554	}
2555	if (unlikely(tp_len > size_max)) {
2556		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2557		return -EMSGSIZE;
2558	}
2559
2560	if (unlikely(po->tp_tx_has_off)) {
2561		int off_min, off_max;
2562
2563		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2564		off_max = po->tx_ring.frame_size - tp_len;
2565		if (po->sk.sk_type == SOCK_DGRAM) {
2566			switch (po->tp_version) {
2567			case TPACKET_V3:
2568				off = ph.h3->tp_net;
2569				break;
2570			case TPACKET_V2:
2571				off = ph.h2->tp_net;
2572				break;
2573			default:
2574				off = ph.h1->tp_net;
2575				break;
2576			}
2577		} else {
2578			switch (po->tp_version) {
2579			case TPACKET_V3:
2580				off = ph.h3->tp_mac;
2581				break;
2582			case TPACKET_V2:
2583				off = ph.h2->tp_mac;
2584				break;
2585			default:
2586				off = ph.h1->tp_mac;
2587				break;
2588			}
2589		}
2590		if (unlikely((off < off_min) || (off_max < off)))
2591			return -EINVAL;
2592	} else {
2593		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2594	}
2595
2596	*data = frame + off;
2597	return tp_len;
2598}
2599
2600static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2601{
2602	struct sk_buff *skb = NULL;
2603	struct net_device *dev;
2604	struct virtio_net_hdr *vnet_hdr = NULL;
2605	struct sockcm_cookie sockc;
2606	__be16 proto;
2607	int err, reserve = 0;
2608	void *ph;
2609	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2610	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2611	unsigned char *addr = NULL;
2612	int tp_len, size_max;
2613	void *data;
2614	int len_sum = 0;
2615	int status = TP_STATUS_AVAILABLE;
2616	int hlen, tlen, copylen = 0;
2617	long timeo = 0;
2618
2619	mutex_lock(&po->pg_vec_lock);
2620
2621	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2622	 * we need to confirm it under protection of pg_vec_lock.
2623	 */
2624	if (unlikely(!po->tx_ring.pg_vec)) {
2625		err = -EBUSY;
2626		goto out;
2627	}
2628	if (likely(saddr == NULL)) {
2629		dev	= packet_cached_dev_get(po);
2630		proto	= po->num;
2631	} else {
2632		err = -EINVAL;
2633		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2634			goto out;
2635		if (msg->msg_namelen < (saddr->sll_halen
2636					+ offsetof(struct sockaddr_ll,
2637						sll_addr)))
2638			goto out;
2639		proto	= saddr->sll_protocol;
2640		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2641		if (po->sk.sk_socket->type == SOCK_DGRAM) {
2642			if (dev && msg->msg_namelen < dev->addr_len +
2643				   offsetof(struct sockaddr_ll, sll_addr))
2644				goto out_put;
2645			addr = saddr->sll_addr;
2646		}
2647	}
2648
2649	err = -ENXIO;
2650	if (unlikely(dev == NULL))
2651		goto out;
2652	err = -ENETDOWN;
2653	if (unlikely(!(dev->flags & IFF_UP)))
2654		goto out_put;
2655
2656	sockcm_init(&sockc, &po->sk);
2657	if (msg->msg_controllen) {
2658		err = sock_cmsg_send(&po->sk, msg, &sockc);
2659		if (unlikely(err))
2660			goto out_put;
2661	}
2662
2663	if (po->sk.sk_socket->type == SOCK_RAW)
2664		reserve = dev->hard_header_len;
2665	size_max = po->tx_ring.frame_size
2666		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2667
2668	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2669		size_max = dev->mtu + reserve + VLAN_HLEN;
2670
2671	reinit_completion(&po->skb_completion);
2672
2673	do {
2674		ph = packet_current_frame(po, &po->tx_ring,
2675					  TP_STATUS_SEND_REQUEST);
2676		if (unlikely(ph == NULL)) {
2677			if (need_wait && skb) {
2678				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2679				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2680				if (timeo <= 0) {
2681					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2682					goto out_put;
2683				}
2684			}
2685			/* check for additional frames */
2686			continue;
2687		}
2688
2689		skb = NULL;
2690		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2691		if (tp_len < 0)
2692			goto tpacket_error;
2693
2694		status = TP_STATUS_SEND_REQUEST;
2695		hlen = LL_RESERVED_SPACE(dev);
2696		tlen = dev->needed_tailroom;
2697		if (po->has_vnet_hdr) {
2698			vnet_hdr = data;
2699			data += sizeof(*vnet_hdr);
2700			tp_len -= sizeof(*vnet_hdr);
2701			if (tp_len < 0 ||
2702			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2703				tp_len = -EINVAL;
2704				goto tpacket_error;
2705			}
2706			copylen = __virtio16_to_cpu(vio_le(),
2707						    vnet_hdr->hdr_len);
2708		}
2709		copylen = max_t(int, copylen, dev->hard_header_len);
2710		skb = sock_alloc_send_skb(&po->sk,
2711				hlen + tlen + sizeof(struct sockaddr_ll) +
2712				(copylen - dev->hard_header_len),
2713				!need_wait, &err);
2714
2715		if (unlikely(skb == NULL)) {
2716			/* we assume the socket was initially writeable ... */
2717			if (likely(len_sum > 0))
2718				err = len_sum;
2719			goto out_status;
2720		}
2721		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2722					  addr, hlen, copylen, &sockc);
2723		if (likely(tp_len >= 0) &&
2724		    tp_len > dev->mtu + reserve &&
2725		    !po->has_vnet_hdr &&
2726		    !packet_extra_vlan_len_allowed(dev, skb))
2727			tp_len = -EMSGSIZE;
2728
2729		if (unlikely(tp_len < 0)) {
2730tpacket_error:
2731			if (po->tp_loss) {
2732				__packet_set_status(po, ph,
2733						TP_STATUS_AVAILABLE);
2734				packet_increment_head(&po->tx_ring);
2735				kfree_skb(skb);
2736				continue;
2737			} else {
2738				status = TP_STATUS_WRONG_FORMAT;
2739				err = tp_len;
2740				goto out_status;
2741			}
2742		}
2743
2744		if (po->has_vnet_hdr) {
2745			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2746				tp_len = -EINVAL;
2747				goto tpacket_error;
2748			}
2749			virtio_net_hdr_set_proto(skb, vnet_hdr);
2750		}
2751
2752		skb->destructor = tpacket_destruct_skb;
2753		__packet_set_status(po, ph, TP_STATUS_SENDING);
2754		packet_inc_pending(&po->tx_ring);
2755
2756		status = TP_STATUS_SEND_REQUEST;
2757		err = po->xmit(skb);
2758		if (unlikely(err > 0)) {
2759			err = net_xmit_errno(err);
2760			if (err && __packet_get_status(po, ph) ==
2761				   TP_STATUS_AVAILABLE) {
2762				/* skb was destructed already */
2763				skb = NULL;
2764				goto out_status;
2765			}
2766			/*
2767			 * skb was dropped but not destructed yet;
2768			 * let's treat it like congestion or err < 0
2769			 */
2770			err = 0;
2771		}
2772		packet_increment_head(&po->tx_ring);
2773		len_sum += tp_len;
2774	} while (likely((ph != NULL) ||
2775		/* Note: packet_read_pending() might be slow if we have
2776		 * to call it as it's per_cpu variable, but in fast-path
2777		 * we already short-circuit the loop with the first
2778		 * condition, and luckily don't have to go that path
2779		 * anyway.
2780		 */
2781		 (need_wait && packet_read_pending(&po->tx_ring))));
2782
2783	err = len_sum;
2784	goto out_put;
2785
2786out_status:
2787	__packet_set_status(po, ph, status);
2788	kfree_skb(skb);
2789out_put:
2790	dev_put(dev);
2791out:
2792	mutex_unlock(&po->pg_vec_lock);
2793	return err;
2794}
2795
2796static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2797				        size_t reserve, size_t len,
2798				        size_t linear, int noblock,
2799				        int *err)
2800{
2801	struct sk_buff *skb;
2802
2803	/* Under a page?  Don't bother with paged skb. */
2804	if (prepad + len < PAGE_SIZE || !linear)
2805		linear = len;
2806
2807	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2808				   err, 0);
2809	if (!skb)
2810		return NULL;
2811
2812	skb_reserve(skb, reserve);
2813	skb_put(skb, linear);
2814	skb->data_len = len - linear;
2815	skb->len += len - linear;
2816
2817	return skb;
2818}
2819
2820static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2821{
2822	struct sock *sk = sock->sk;
2823	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2824	struct sk_buff *skb;
2825	struct net_device *dev;
2826	__be16 proto;
2827	unsigned char *addr = NULL;
2828	int err, reserve = 0;
2829	struct sockcm_cookie sockc;
2830	struct virtio_net_hdr vnet_hdr = { 0 };
2831	int offset = 0;
2832	struct packet_sock *po = pkt_sk(sk);
2833	bool has_vnet_hdr = false;
2834	int hlen, tlen, linear;
2835	int extra_len = 0;
2836
2837	/*
2838	 *	Get and verify the address.
2839	 */
2840
2841	if (likely(saddr == NULL)) {
2842		dev	= packet_cached_dev_get(po);
2843		proto	= po->num;
2844	} else {
2845		err = -EINVAL;
2846		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2847			goto out;
2848		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2849			goto out;
2850		proto	= saddr->sll_protocol;
2851		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2852		if (sock->type == SOCK_DGRAM) {
2853			if (dev && msg->msg_namelen < dev->addr_len +
2854				   offsetof(struct sockaddr_ll, sll_addr))
2855				goto out_unlock;
2856			addr = saddr->sll_addr;
2857		}
2858	}
2859
2860	err = -ENXIO;
2861	if (unlikely(dev == NULL))
2862		goto out_unlock;
2863	err = -ENETDOWN;
2864	if (unlikely(!(dev->flags & IFF_UP)))
2865		goto out_unlock;
2866
2867	sockcm_init(&sockc, sk);
2868	sockc.mark = sk->sk_mark;
2869	if (msg->msg_controllen) {
2870		err = sock_cmsg_send(sk, msg, &sockc);
2871		if (unlikely(err))
2872			goto out_unlock;
2873	}
2874
2875	if (sock->type == SOCK_RAW)
2876		reserve = dev->hard_header_len;
2877	if (po->has_vnet_hdr) {
2878		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2879		if (err)
2880			goto out_unlock;
2881		has_vnet_hdr = true;
2882	}
2883
2884	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2885		if (!netif_supports_nofcs(dev)) {
2886			err = -EPROTONOSUPPORT;
2887			goto out_unlock;
2888		}
2889		extra_len = 4; /* We're doing our own CRC */
2890	}
2891
2892	err = -EMSGSIZE;
2893	if (!vnet_hdr.gso_type &&
2894	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2895		goto out_unlock;
2896
2897	err = -ENOBUFS;
2898	hlen = LL_RESERVED_SPACE(dev);
2899	tlen = dev->needed_tailroom;
2900	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2901	linear = max(linear, min_t(int, len, dev->hard_header_len));
2902	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2903			       msg->msg_flags & MSG_DONTWAIT, &err);
2904	if (skb == NULL)
2905		goto out_unlock;
2906
2907	skb_reset_network_header(skb);
2908
2909	err = -EINVAL;
2910	if (sock->type == SOCK_DGRAM) {
2911		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2912		if (unlikely(offset < 0))
2913			goto out_free;
2914	} else if (reserve) {
2915		skb_reserve(skb, -reserve);
2916		if (len < reserve + sizeof(struct ipv6hdr) &&
2917		    dev->min_header_len != dev->hard_header_len)
2918			skb_reset_network_header(skb);
2919	}
2920
2921	/* Returns -EFAULT on error */
2922	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2923	if (err)
2924		goto out_free;
2925
2926	if (sock->type == SOCK_RAW &&
2927	    !dev_validate_header(dev, skb->data, len)) {
2928		err = -EINVAL;
2929		goto out_free;
2930	}
2931
2932	skb_setup_tx_timestamp(skb, sockc.tsflags);
2933
2934	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2935	    !packet_extra_vlan_len_allowed(dev, skb)) {
2936		err = -EMSGSIZE;
2937		goto out_free;
2938	}
2939
2940	skb->protocol = proto;
2941	skb->dev = dev;
2942	skb->priority = sk->sk_priority;
2943	skb->mark = sockc.mark;
2944	skb->tstamp = sockc.transmit_time;
2945
2946	if (has_vnet_hdr) {
2947		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2948		if (err)
2949			goto out_free;
2950		len += sizeof(vnet_hdr);
2951		virtio_net_hdr_set_proto(skb, &vnet_hdr);
2952	}
2953
2954	packet_parse_headers(skb, sock);
2955
2956	if (unlikely(extra_len == 4))
2957		skb->no_fcs = 1;
2958
2959	err = po->xmit(skb);
2960	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2961		goto out_unlock;
2962
2963	dev_put(dev);
2964
2965	return len;
2966
2967out_free:
2968	kfree_skb(skb);
2969out_unlock:
2970	if (dev)
2971		dev_put(dev);
2972out:
2973	return err;
2974}
2975
2976static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2977{
2978	struct sock *sk = sock->sk;
2979	struct packet_sock *po = pkt_sk(sk);
2980
2981	if (po->tx_ring.pg_vec)
2982		return tpacket_snd(po, msg);
2983	else
2984		return packet_snd(sock, msg, len);
2985}
2986
2987/*
2988 *	Close a PACKET socket. This is fairly simple. We immediately go
2989 *	to 'closed' state and remove our protocol entry in the device list.
2990 */
2991
2992static int packet_release(struct socket *sock)
2993{
2994	struct sock *sk = sock->sk;
2995	struct packet_sock *po;
2996	struct packet_fanout *f;
2997	struct net *net;
2998	union tpacket_req_u req_u;
2999
3000	if (!sk)
3001		return 0;
3002
3003	net = sock_net(sk);
3004	po = pkt_sk(sk);
3005
3006	mutex_lock(&net->packet.sklist_lock);
3007	sk_del_node_init_rcu(sk);
3008	mutex_unlock(&net->packet.sklist_lock);
3009
3010	preempt_disable();
3011	sock_prot_inuse_add(net, sk->sk_prot, -1);
3012	preempt_enable();
3013
3014	spin_lock(&po->bind_lock);
3015	unregister_prot_hook(sk, false);
3016	packet_cached_dev_reset(po);
3017
3018	if (po->prot_hook.dev) {
3019		dev_put(po->prot_hook.dev);
3020		po->prot_hook.dev = NULL;
3021	}
3022	spin_unlock(&po->bind_lock);
3023
3024	packet_flush_mclist(sk);
3025
3026	lock_sock(sk);
3027	if (po->rx_ring.pg_vec) {
3028		memset(&req_u, 0, sizeof(req_u));
3029		packet_set_ring(sk, &req_u, 1, 0);
3030	}
3031
3032	if (po->tx_ring.pg_vec) {
3033		memset(&req_u, 0, sizeof(req_u));
3034		packet_set_ring(sk, &req_u, 1, 1);
3035	}
3036	release_sock(sk);
3037
3038	f = fanout_release(sk);
3039
3040	synchronize_net();
3041
3042	kfree(po->rollover);
3043	if (f) {
3044		fanout_release_data(f);
3045		kfree(f);
3046	}
3047	/*
3048	 *	Now the socket is dead. No more input will appear.
3049	 */
3050	sock_orphan(sk);
3051	sock->sk = NULL;
3052
3053	/* Purge queues */
3054
3055	skb_queue_purge(&sk->sk_receive_queue);
3056	packet_free_pending(po);
3057	sk_refcnt_debug_release(sk);
3058
3059	sock_put(sk);
3060	return 0;
3061}
3062
3063/*
3064 *	Attach a packet hook.
3065 */
3066
3067static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3068			  __be16 proto)
3069{
3070	struct packet_sock *po = pkt_sk(sk);
3071	struct net_device *dev_curr;
3072	__be16 proto_curr;
3073	bool need_rehook;
3074	struct net_device *dev = NULL;
3075	int ret = 0;
3076	bool unlisted = false;
3077
3078	lock_sock(sk);
3079	spin_lock(&po->bind_lock);
3080	rcu_read_lock();
3081
3082	if (po->fanout) {
3083		ret = -EINVAL;
3084		goto out_unlock;
3085	}
3086
3087	if (name) {
3088		dev = dev_get_by_name_rcu(sock_net(sk), name);
3089		if (!dev) {
3090			ret = -ENODEV;
3091			goto out_unlock;
3092		}
3093	} else if (ifindex) {
3094		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3095		if (!dev) {
3096			ret = -ENODEV;
3097			goto out_unlock;
3098		}
3099	}
3100
3101	if (dev)
3102		dev_hold(dev);
3103
3104	proto_curr = po->prot_hook.type;
3105	dev_curr = po->prot_hook.dev;
3106
3107	need_rehook = proto_curr != proto || dev_curr != dev;
3108
3109	if (need_rehook) {
3110		if (po->running) {
3111			rcu_read_unlock();
3112			/* prevents packet_notifier() from calling
3113			 * register_prot_hook()
3114			 */
3115			po->num = 0;
3116			__unregister_prot_hook(sk, true);
3117			rcu_read_lock();
3118			dev_curr = po->prot_hook.dev;
3119			if (dev)
3120				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3121								 dev->ifindex);
3122		}
3123
3124		BUG_ON(po->running);
3125		po->num = proto;
3126		po->prot_hook.type = proto;
3127
3128		if (unlikely(unlisted)) {
3129			dev_put(dev);
3130			po->prot_hook.dev = NULL;
3131			po->ifindex = -1;
3132			packet_cached_dev_reset(po);
3133		} else {
3134			po->prot_hook.dev = dev;
3135			po->ifindex = dev ? dev->ifindex : 0;
3136			packet_cached_dev_assign(po, dev);
3137		}
3138	}
3139	if (dev_curr)
3140		dev_put(dev_curr);
3141
3142	if (proto == 0 || !need_rehook)
3143		goto out_unlock;
3144
3145	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3146		register_prot_hook(sk);
3147	} else {
3148		sk->sk_err = ENETDOWN;
3149		if (!sock_flag(sk, SOCK_DEAD))
3150			sk->sk_error_report(sk);
3151	}
3152
3153out_unlock:
3154	rcu_read_unlock();
3155	spin_unlock(&po->bind_lock);
3156	release_sock(sk);
3157	return ret;
3158}
3159
3160/*
3161 *	Bind a packet socket to a device
3162 */
3163
3164static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3165			    int addr_len)
3166{
3167	struct sock *sk = sock->sk;
3168	char name[sizeof(uaddr->sa_data) + 1];
3169
3170	/*
3171	 *	Check legality
3172	 */
3173
3174	if (addr_len != sizeof(struct sockaddr))
3175		return -EINVAL;
3176	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3177	 * zero-terminated.
3178	 */
3179	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3180	name[sizeof(uaddr->sa_data)] = 0;
3181
3182	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3183}
3184
3185static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3186{
3187	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3188	struct sock *sk = sock->sk;
3189
3190	/*
3191	 *	Check legality
3192	 */
3193
3194	if (addr_len < sizeof(struct sockaddr_ll))
3195		return -EINVAL;
3196	if (sll->sll_family != AF_PACKET)
3197		return -EINVAL;
3198
3199	return packet_do_bind(sk, NULL, sll->sll_ifindex,
3200			      sll->sll_protocol ? : pkt_sk(sk)->num);
3201}
3202
3203static struct proto packet_proto = {
3204	.name	  = "PACKET",
3205	.owner	  = THIS_MODULE,
3206	.obj_size = sizeof(struct packet_sock),
3207};
3208
3209/*
3210 *	Create a packet of type SOCK_PACKET.
3211 */
3212
3213static int packet_create(struct net *net, struct socket *sock, int protocol,
3214			 int kern)
3215{
3216	struct sock *sk;
3217	struct packet_sock *po;
3218	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3219	int err;
3220
3221	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3222		return -EPERM;
3223	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3224	    sock->type != SOCK_PACKET)
3225		return -ESOCKTNOSUPPORT;
3226
3227	sock->state = SS_UNCONNECTED;
3228
3229	err = -ENOBUFS;
3230	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3231	if (sk == NULL)
3232		goto out;
3233
3234	sock->ops = &packet_ops;
3235	if (sock->type == SOCK_PACKET)
3236		sock->ops = &packet_ops_spkt;
3237
3238	sock_init_data(sock, sk);
3239
3240	po = pkt_sk(sk);
3241	init_completion(&po->skb_completion);
3242	sk->sk_family = PF_PACKET;
3243	po->num = proto;
3244	po->xmit = dev_queue_xmit;
3245
3246	err = packet_alloc_pending(po);
3247	if (err)
3248		goto out2;
3249
3250	packet_cached_dev_reset(po);
3251
3252	sk->sk_destruct = packet_sock_destruct;
3253	sk_refcnt_debug_inc(sk);
3254
3255	/*
3256	 *	Attach a protocol block
3257	 */
3258
3259	spin_lock_init(&po->bind_lock);
3260	mutex_init(&po->pg_vec_lock);
3261	po->rollover = NULL;
3262	po->prot_hook.func = packet_rcv;
3263
3264	if (sock->type == SOCK_PACKET)
3265		po->prot_hook.func = packet_rcv_spkt;
3266
3267	po->prot_hook.af_packet_priv = sk;
3268
3269	if (proto) {
3270		po->prot_hook.type = proto;
3271		__register_prot_hook(sk);
3272	}
3273
3274	mutex_lock(&net->packet.sklist_lock);
3275	sk_add_node_tail_rcu(sk, &net->packet.sklist);
3276	mutex_unlock(&net->packet.sklist_lock);
3277
3278	preempt_disable();
3279	sock_prot_inuse_add(net, &packet_proto, 1);
3280	preempt_enable();
3281
3282	return 0;
3283out2:
3284	sk_free(sk);
3285out:
3286	return err;
3287}
3288
3289/*
3290 *	Pull a packet from our receive queue and hand it to the user.
3291 *	If necessary we block.
3292 */
3293
3294static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3295			  int flags)
3296{
3297	struct sock *sk = sock->sk;
3298	struct sk_buff *skb;
3299	int copied, err;
3300	int vnet_hdr_len = 0;
3301	unsigned int origlen = 0;
3302
3303	err = -EINVAL;
3304	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3305		goto out;
3306
3307#if 0
3308	/* What error should we return now? EUNATTACH? */
3309	if (pkt_sk(sk)->ifindex < 0)
3310		return -ENODEV;
3311#endif
3312
3313	if (flags & MSG_ERRQUEUE) {
3314		err = sock_recv_errqueue(sk, msg, len,
3315					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3316		goto out;
3317	}
3318
3319	/*
3320	 *	Call the generic datagram receiver. This handles all sorts
3321	 *	of horrible races and re-entrancy so we can forget about it
3322	 *	in the protocol layers.
3323	 *
3324	 *	Now it will return ENETDOWN, if device have just gone down,
3325	 *	but then it will block.
3326	 */
3327
3328	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3329
3330	/*
3331	 *	An error occurred so return it. Because skb_recv_datagram()
3332	 *	handles the blocking we don't see and worry about blocking
3333	 *	retries.
3334	 */
3335
3336	if (skb == NULL)
3337		goto out;
3338
3339	packet_rcv_try_clear_pressure(pkt_sk(sk));
3340
3341	if (pkt_sk(sk)->has_vnet_hdr) {
3342		err = packet_rcv_vnet(msg, skb, &len);
3343		if (err)
3344			goto out_free;
3345		vnet_hdr_len = sizeof(struct virtio_net_hdr);
3346	}
3347
3348	/* You lose any data beyond the buffer you gave. If it worries
3349	 * a user program they can ask the device for its MTU
3350	 * anyway.
3351	 */
3352	copied = skb->len;
3353	if (copied > len) {
3354		copied = len;
3355		msg->msg_flags |= MSG_TRUNC;
3356	}
3357
3358	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3359	if (err)
3360		goto out_free;
3361
3362	if (sock->type != SOCK_PACKET) {
3363		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3364
3365		/* Original length was stored in sockaddr_ll fields */
3366		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3367		sll->sll_family = AF_PACKET;
3368		sll->sll_protocol = skb->protocol;
3369	}
3370
3371	sock_recv_ts_and_drops(msg, sk, skb);
3372
3373	if (msg->msg_name) {
3374		int copy_len;
3375
3376		/* If the address length field is there to be filled
3377		 * in, we fill it in now.
3378		 */
3379		if (sock->type == SOCK_PACKET) {
3380			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3381			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3382			copy_len = msg->msg_namelen;
3383		} else {
3384			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3385
3386			msg->msg_namelen = sll->sll_halen +
3387				offsetof(struct sockaddr_ll, sll_addr);
3388			copy_len = msg->msg_namelen;
3389			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3390				memset(msg->msg_name +
3391				       offsetof(struct sockaddr_ll, sll_addr),
3392				       0, sizeof(sll->sll_addr));
3393				msg->msg_namelen = sizeof(struct sockaddr_ll);
3394			}
3395		}
3396		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3397	}
3398
3399	if (pkt_sk(sk)->auxdata) {
3400		struct tpacket_auxdata aux;
3401
3402		aux.tp_status = TP_STATUS_USER;
3403		if (skb->ip_summed == CHECKSUM_PARTIAL)
3404			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3405		else if (skb->pkt_type != PACKET_OUTGOING &&
3406			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3407			  skb_csum_unnecessary(skb)))
3408			aux.tp_status |= TP_STATUS_CSUM_VALID;
3409
3410		aux.tp_len = origlen;
3411		aux.tp_snaplen = skb->len;
3412		aux.tp_mac = 0;
3413		aux.tp_net = skb_network_offset(skb);
3414		if (skb_vlan_tag_present(skb)) {
3415			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3416			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3417			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3418		} else {
3419			aux.tp_vlan_tci = 0;
3420			aux.tp_vlan_tpid = 0;
3421		}
3422		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3423	}
3424
3425	/*
3426	 *	Free or return the buffer as appropriate. Again this
3427	 *	hides all the races and re-entrancy issues from us.
3428	 */
3429	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3430
3431out_free:
3432	skb_free_datagram(sk, skb);
3433out:
3434	return err;
3435}
3436
3437static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3438			       int peer)
3439{
3440	struct net_device *dev;
3441	struct sock *sk	= sock->sk;
3442
3443	if (peer)
3444		return -EOPNOTSUPP;
3445
3446	uaddr->sa_family = AF_PACKET;
3447	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3448	rcu_read_lock();
3449	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3450	if (dev)
3451		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3452	rcu_read_unlock();
3453
3454	return sizeof(*uaddr);
3455}
3456
3457static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3458			  int peer)
3459{
3460	struct net_device *dev;
3461	struct sock *sk = sock->sk;
3462	struct packet_sock *po = pkt_sk(sk);
3463	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3464
3465	if (peer)
3466		return -EOPNOTSUPP;
3467
3468	sll->sll_family = AF_PACKET;
3469	sll->sll_ifindex = po->ifindex;
3470	sll->sll_protocol = po->num;
3471	sll->sll_pkttype = 0;
3472	rcu_read_lock();
3473	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3474	if (dev) {
3475		sll->sll_hatype = dev->type;
3476		sll->sll_halen = dev->addr_len;
3477		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3478	} else {
3479		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3480		sll->sll_halen = 0;
3481	}
3482	rcu_read_unlock();
3483
3484	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3485}
3486
3487static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3488			 int what)
3489{
3490	switch (i->type) {
3491	case PACKET_MR_MULTICAST:
3492		if (i->alen != dev->addr_len)
3493			return -EINVAL;
3494		if (what > 0)
3495			return dev_mc_add(dev, i->addr);
3496		else
3497			return dev_mc_del(dev, i->addr);
3498		break;
3499	case PACKET_MR_PROMISC:
3500		return dev_set_promiscuity(dev, what);
3501	case PACKET_MR_ALLMULTI:
3502		return dev_set_allmulti(dev, what);
3503	case PACKET_MR_UNICAST:
3504		if (i->alen != dev->addr_len)
3505			return -EINVAL;
3506		if (what > 0)
3507			return dev_uc_add(dev, i->addr);
3508		else
3509			return dev_uc_del(dev, i->addr);
3510		break;
3511	default:
3512		break;
3513	}
3514	return 0;
3515}
3516
3517static void packet_dev_mclist_delete(struct net_device *dev,
3518				     struct packet_mclist **mlp)
3519{
3520	struct packet_mclist *ml;
3521
3522	while ((ml = *mlp) != NULL) {
3523		if (ml->ifindex == dev->ifindex) {
3524			packet_dev_mc(dev, ml, -1);
3525			*mlp = ml->next;
3526			kfree(ml);
3527		} else
3528			mlp = &ml->next;
3529	}
3530}
3531
3532static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3533{
3534	struct packet_sock *po = pkt_sk(sk);
3535	struct packet_mclist *ml, *i;
3536	struct net_device *dev;
3537	int err;
3538
3539	rtnl_lock();
3540
3541	err = -ENODEV;
3542	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3543	if (!dev)
3544		goto done;
3545
3546	err = -EINVAL;
3547	if (mreq->mr_alen > dev->addr_len)
3548		goto done;
3549
3550	err = -ENOBUFS;
3551	i = kmalloc(sizeof(*i), GFP_KERNEL);
3552	if (i == NULL)
3553		goto done;
3554
3555	err = 0;
3556	for (ml = po->mclist; ml; ml = ml->next) {
3557		if (ml->ifindex == mreq->mr_ifindex &&
3558		    ml->type == mreq->mr_type &&
3559		    ml->alen == mreq->mr_alen &&
3560		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3561			ml->count++;
3562			/* Free the new element ... */
3563			kfree(i);
3564			goto done;
3565		}
3566	}
3567
3568	i->type = mreq->mr_type;
3569	i->ifindex = mreq->mr_ifindex;
3570	i->alen = mreq->mr_alen;
3571	memcpy(i->addr, mreq->mr_address, i->alen);
3572	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3573	i->count = 1;
3574	i->next = po->mclist;
3575	po->mclist = i;
3576	err = packet_dev_mc(dev, i, 1);
3577	if (err) {
3578		po->mclist = i->next;
3579		kfree(i);
3580	}
3581
3582done:
3583	rtnl_unlock();
3584	return err;
3585}
3586
3587static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3588{
3589	struct packet_mclist *ml, **mlp;
3590
3591	rtnl_lock();
3592
3593	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3594		if (ml->ifindex == mreq->mr_ifindex &&
3595		    ml->type == mreq->mr_type &&
3596		    ml->alen == mreq->mr_alen &&
3597		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3598			if (--ml->count == 0) {
3599				struct net_device *dev;
3600				*mlp = ml->next;
3601				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3602				if (dev)
3603					packet_dev_mc(dev, ml, -1);
3604				kfree(ml);
3605			}
3606			break;
3607		}
3608	}
3609	rtnl_unlock();
3610	return 0;
3611}
3612
3613static void packet_flush_mclist(struct sock *sk)
3614{
3615	struct packet_sock *po = pkt_sk(sk);
3616	struct packet_mclist *ml;
3617
3618	if (!po->mclist)
3619		return;
3620
3621	rtnl_lock();
3622	while ((ml = po->mclist) != NULL) {
3623		struct net_device *dev;
3624
3625		po->mclist = ml->next;
3626		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3627		if (dev != NULL)
3628			packet_dev_mc(dev, ml, -1);
3629		kfree(ml);
3630	}
3631	rtnl_unlock();
3632}
3633
3634static int
3635packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
 
3636{
3637	struct sock *sk = sock->sk;
3638	struct packet_sock *po = pkt_sk(sk);
3639	int ret;
3640
3641	if (level != SOL_PACKET)
3642		return -ENOPROTOOPT;
3643
3644	switch (optname) {
3645	case PACKET_ADD_MEMBERSHIP:
3646	case PACKET_DROP_MEMBERSHIP:
3647	{
3648		struct packet_mreq_max mreq;
3649		int len = optlen;
3650		memset(&mreq, 0, sizeof(mreq));
3651		if (len < sizeof(struct packet_mreq))
3652			return -EINVAL;
3653		if (len > sizeof(mreq))
3654			len = sizeof(mreq);
3655		if (copy_from_user(&mreq, optval, len))
3656			return -EFAULT;
3657		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3658			return -EINVAL;
3659		if (optname == PACKET_ADD_MEMBERSHIP)
3660			ret = packet_mc_add(sk, &mreq);
3661		else
3662			ret = packet_mc_drop(sk, &mreq);
3663		return ret;
3664	}
3665
3666	case PACKET_RX_RING:
3667	case PACKET_TX_RING:
3668	{
3669		union tpacket_req_u req_u;
3670		int len;
3671
3672		lock_sock(sk);
3673		switch (po->tp_version) {
3674		case TPACKET_V1:
3675		case TPACKET_V2:
3676			len = sizeof(req_u.req);
3677			break;
3678		case TPACKET_V3:
3679		default:
3680			len = sizeof(req_u.req3);
3681			break;
3682		}
3683		if (optlen < len) {
3684			ret = -EINVAL;
3685		} else {
3686			if (copy_from_user(&req_u.req, optval, len))
3687				ret = -EFAULT;
3688			else
3689				ret = packet_set_ring(sk, &req_u, 0,
3690						    optname == PACKET_TX_RING);
3691		}
3692		release_sock(sk);
3693		return ret;
3694	}
3695	case PACKET_COPY_THRESH:
3696	{
3697		int val;
3698
3699		if (optlen != sizeof(val))
3700			return -EINVAL;
3701		if (copy_from_user(&val, optval, sizeof(val)))
3702			return -EFAULT;
3703
3704		pkt_sk(sk)->copy_thresh = val;
3705		return 0;
3706	}
3707	case PACKET_VERSION:
3708	{
3709		int val;
3710
3711		if (optlen != sizeof(val))
3712			return -EINVAL;
3713		if (copy_from_user(&val, optval, sizeof(val)))
3714			return -EFAULT;
3715		switch (val) {
3716		case TPACKET_V1:
3717		case TPACKET_V2:
3718		case TPACKET_V3:
3719			break;
3720		default:
3721			return -EINVAL;
3722		}
3723		lock_sock(sk);
3724		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3725			ret = -EBUSY;
3726		} else {
3727			po->tp_version = val;
3728			ret = 0;
3729		}
3730		release_sock(sk);
3731		return ret;
3732	}
3733	case PACKET_RESERVE:
3734	{
3735		unsigned int val;
3736
3737		if (optlen != sizeof(val))
3738			return -EINVAL;
3739		if (copy_from_user(&val, optval, sizeof(val)))
3740			return -EFAULT;
3741		if (val > INT_MAX)
3742			return -EINVAL;
3743		lock_sock(sk);
3744		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3745			ret = -EBUSY;
3746		} else {
3747			po->tp_reserve = val;
3748			ret = 0;
3749		}
3750		release_sock(sk);
3751		return ret;
3752	}
3753	case PACKET_LOSS:
3754	{
3755		unsigned int val;
3756
3757		if (optlen != sizeof(val))
3758			return -EINVAL;
3759		if (copy_from_user(&val, optval, sizeof(val)))
3760			return -EFAULT;
3761
3762		lock_sock(sk);
3763		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3764			ret = -EBUSY;
3765		} else {
3766			po->tp_loss = !!val;
3767			ret = 0;
3768		}
3769		release_sock(sk);
3770		return ret;
3771	}
3772	case PACKET_AUXDATA:
3773	{
3774		int val;
3775
3776		if (optlen < sizeof(val))
3777			return -EINVAL;
3778		if (copy_from_user(&val, optval, sizeof(val)))
3779			return -EFAULT;
3780
3781		lock_sock(sk);
3782		po->auxdata = !!val;
3783		release_sock(sk);
3784		return 0;
3785	}
3786	case PACKET_ORIGDEV:
3787	{
3788		int val;
3789
3790		if (optlen < sizeof(val))
3791			return -EINVAL;
3792		if (copy_from_user(&val, optval, sizeof(val)))
3793			return -EFAULT;
3794
3795		lock_sock(sk);
3796		po->origdev = !!val;
3797		release_sock(sk);
3798		return 0;
3799	}
3800	case PACKET_VNET_HDR:
3801	{
3802		int val;
3803
3804		if (sock->type != SOCK_RAW)
3805			return -EINVAL;
3806		if (optlen < sizeof(val))
3807			return -EINVAL;
3808		if (copy_from_user(&val, optval, sizeof(val)))
3809			return -EFAULT;
3810
3811		lock_sock(sk);
3812		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3813			ret = -EBUSY;
3814		} else {
3815			po->has_vnet_hdr = !!val;
3816			ret = 0;
3817		}
3818		release_sock(sk);
3819		return ret;
3820	}
3821	case PACKET_TIMESTAMP:
3822	{
3823		int val;
3824
3825		if (optlen != sizeof(val))
3826			return -EINVAL;
3827		if (copy_from_user(&val, optval, sizeof(val)))
3828			return -EFAULT;
3829
3830		po->tp_tstamp = val;
3831		return 0;
3832	}
3833	case PACKET_FANOUT:
3834	{
3835		int val;
3836
3837		if (optlen != sizeof(val))
3838			return -EINVAL;
3839		if (copy_from_user(&val, optval, sizeof(val)))
3840			return -EFAULT;
3841
3842		return fanout_add(sk, val & 0xffff, val >> 16);
3843	}
3844	case PACKET_FANOUT_DATA:
3845	{
3846		if (!po->fanout)
3847			return -EINVAL;
3848
3849		return fanout_set_data(po, optval, optlen);
3850	}
3851	case PACKET_IGNORE_OUTGOING:
3852	{
3853		int val;
3854
3855		if (optlen != sizeof(val))
3856			return -EINVAL;
3857		if (copy_from_user(&val, optval, sizeof(val)))
3858			return -EFAULT;
3859		if (val < 0 || val > 1)
3860			return -EINVAL;
3861
3862		po->prot_hook.ignore_outgoing = !!val;
3863		return 0;
3864	}
3865	case PACKET_TX_HAS_OFF:
3866	{
3867		unsigned int val;
3868
3869		if (optlen != sizeof(val))
3870			return -EINVAL;
3871		if (copy_from_user(&val, optval, sizeof(val)))
3872			return -EFAULT;
3873
3874		lock_sock(sk);
3875		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3876			ret = -EBUSY;
3877		} else {
3878			po->tp_tx_has_off = !!val;
3879			ret = 0;
3880		}
3881		release_sock(sk);
3882		return 0;
3883	}
3884	case PACKET_QDISC_BYPASS:
3885	{
3886		int val;
3887
3888		if (optlen != sizeof(val))
3889			return -EINVAL;
3890		if (copy_from_user(&val, optval, sizeof(val)))
3891			return -EFAULT;
3892
3893		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3894		return 0;
3895	}
3896	default:
3897		return -ENOPROTOOPT;
3898	}
3899}
3900
3901static int packet_getsockopt(struct socket *sock, int level, int optname,
3902			     char __user *optval, int __user *optlen)
3903{
3904	int len;
3905	int val, lv = sizeof(val);
3906	struct sock *sk = sock->sk;
3907	struct packet_sock *po = pkt_sk(sk);
3908	void *data = &val;
3909	union tpacket_stats_u st;
3910	struct tpacket_rollover_stats rstats;
3911	int drops;
3912
3913	if (level != SOL_PACKET)
3914		return -ENOPROTOOPT;
3915
3916	if (get_user(len, optlen))
3917		return -EFAULT;
3918
3919	if (len < 0)
3920		return -EINVAL;
3921
3922	switch (optname) {
3923	case PACKET_STATISTICS:
3924		spin_lock_bh(&sk->sk_receive_queue.lock);
3925		memcpy(&st, &po->stats, sizeof(st));
3926		memset(&po->stats, 0, sizeof(po->stats));
3927		spin_unlock_bh(&sk->sk_receive_queue.lock);
3928		drops = atomic_xchg(&po->tp_drops, 0);
3929
3930		if (po->tp_version == TPACKET_V3) {
3931			lv = sizeof(struct tpacket_stats_v3);
3932			st.stats3.tp_drops = drops;
3933			st.stats3.tp_packets += drops;
3934			data = &st.stats3;
3935		} else {
3936			lv = sizeof(struct tpacket_stats);
3937			st.stats1.tp_drops = drops;
3938			st.stats1.tp_packets += drops;
3939			data = &st.stats1;
3940		}
3941
3942		break;
3943	case PACKET_AUXDATA:
3944		val = po->auxdata;
3945		break;
3946	case PACKET_ORIGDEV:
3947		val = po->origdev;
3948		break;
3949	case PACKET_VNET_HDR:
3950		val = po->has_vnet_hdr;
3951		break;
3952	case PACKET_VERSION:
3953		val = po->tp_version;
3954		break;
3955	case PACKET_HDRLEN:
3956		if (len > sizeof(int))
3957			len = sizeof(int);
3958		if (len < sizeof(int))
3959			return -EINVAL;
3960		if (copy_from_user(&val, optval, len))
3961			return -EFAULT;
3962		switch (val) {
3963		case TPACKET_V1:
3964			val = sizeof(struct tpacket_hdr);
3965			break;
3966		case TPACKET_V2:
3967			val = sizeof(struct tpacket2_hdr);
3968			break;
3969		case TPACKET_V3:
3970			val = sizeof(struct tpacket3_hdr);
3971			break;
3972		default:
3973			return -EINVAL;
3974		}
3975		break;
3976	case PACKET_RESERVE:
3977		val = po->tp_reserve;
3978		break;
3979	case PACKET_LOSS:
3980		val = po->tp_loss;
3981		break;
3982	case PACKET_TIMESTAMP:
3983		val = po->tp_tstamp;
3984		break;
3985	case PACKET_FANOUT:
3986		val = (po->fanout ?
3987		       ((u32)po->fanout->id |
3988			((u32)po->fanout->type << 16) |
3989			((u32)po->fanout->flags << 24)) :
3990		       0);
3991		break;
3992	case PACKET_IGNORE_OUTGOING:
3993		val = po->prot_hook.ignore_outgoing;
3994		break;
3995	case PACKET_ROLLOVER_STATS:
3996		if (!po->rollover)
3997			return -EINVAL;
3998		rstats.tp_all = atomic_long_read(&po->rollover->num);
3999		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4000		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4001		data = &rstats;
4002		lv = sizeof(rstats);
4003		break;
4004	case PACKET_TX_HAS_OFF:
4005		val = po->tp_tx_has_off;
4006		break;
4007	case PACKET_QDISC_BYPASS:
4008		val = packet_use_direct_xmit(po);
4009		break;
4010	default:
4011		return -ENOPROTOOPT;
4012	}
4013
4014	if (len > lv)
4015		len = lv;
4016	if (put_user(len, optlen))
4017		return -EFAULT;
4018	if (copy_to_user(optval, data, len))
4019		return -EFAULT;
4020	return 0;
4021}
4022
4023
4024#ifdef CONFIG_COMPAT
4025static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
4026				    char __user *optval, unsigned int optlen)
4027{
4028	struct packet_sock *po = pkt_sk(sock->sk);
4029
4030	if (level != SOL_PACKET)
4031		return -ENOPROTOOPT;
4032
4033	if (optname == PACKET_FANOUT_DATA &&
4034	    po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
4035		optval = (char __user *)get_compat_bpf_fprog(optval);
4036		if (!optval)
4037			return -EFAULT;
4038		optlen = sizeof(struct sock_fprog);
4039	}
4040
4041	return packet_setsockopt(sock, level, optname, optval, optlen);
4042}
4043#endif
4044
4045static int packet_notifier(struct notifier_block *this,
4046			   unsigned long msg, void *ptr)
4047{
4048	struct sock *sk;
4049	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4050	struct net *net = dev_net(dev);
4051
4052	rcu_read_lock();
4053	sk_for_each_rcu(sk, &net->packet.sklist) {
4054		struct packet_sock *po = pkt_sk(sk);
4055
4056		switch (msg) {
4057		case NETDEV_UNREGISTER:
4058			if (po->mclist)
4059				packet_dev_mclist_delete(dev, &po->mclist);
4060			/* fallthrough */
4061
4062		case NETDEV_DOWN:
4063			if (dev->ifindex == po->ifindex) {
4064				spin_lock(&po->bind_lock);
4065				if (po->running) {
4066					__unregister_prot_hook(sk, false);
4067					sk->sk_err = ENETDOWN;
4068					if (!sock_flag(sk, SOCK_DEAD))
4069						sk->sk_error_report(sk);
4070				}
4071				if (msg == NETDEV_UNREGISTER) {
4072					packet_cached_dev_reset(po);
4073					po->ifindex = -1;
4074					if (po->prot_hook.dev)
4075						dev_put(po->prot_hook.dev);
4076					po->prot_hook.dev = NULL;
4077				}
4078				spin_unlock(&po->bind_lock);
4079			}
4080			break;
4081		case NETDEV_UP:
4082			if (dev->ifindex == po->ifindex) {
4083				spin_lock(&po->bind_lock);
4084				if (po->num)
4085					register_prot_hook(sk);
4086				spin_unlock(&po->bind_lock);
4087			}
4088			break;
4089		}
4090	}
4091	rcu_read_unlock();
4092	return NOTIFY_DONE;
4093}
4094
4095
4096static int packet_ioctl(struct socket *sock, unsigned int cmd,
4097			unsigned long arg)
4098{
4099	struct sock *sk = sock->sk;
4100
4101	switch (cmd) {
4102	case SIOCOUTQ:
4103	{
4104		int amount = sk_wmem_alloc_get(sk);
4105
4106		return put_user(amount, (int __user *)arg);
4107	}
4108	case SIOCINQ:
4109	{
4110		struct sk_buff *skb;
4111		int amount = 0;
4112
4113		spin_lock_bh(&sk->sk_receive_queue.lock);
4114		skb = skb_peek(&sk->sk_receive_queue);
4115		if (skb)
4116			amount = skb->len;
4117		spin_unlock_bh(&sk->sk_receive_queue.lock);
4118		return put_user(amount, (int __user *)arg);
4119	}
4120#ifdef CONFIG_INET
4121	case SIOCADDRT:
4122	case SIOCDELRT:
4123	case SIOCDARP:
4124	case SIOCGARP:
4125	case SIOCSARP:
4126	case SIOCGIFADDR:
4127	case SIOCSIFADDR:
4128	case SIOCGIFBRDADDR:
4129	case SIOCSIFBRDADDR:
4130	case SIOCGIFNETMASK:
4131	case SIOCSIFNETMASK:
4132	case SIOCGIFDSTADDR:
4133	case SIOCSIFDSTADDR:
4134	case SIOCSIFFLAGS:
4135		return inet_dgram_ops.ioctl(sock, cmd, arg);
4136#endif
4137
4138	default:
4139		return -ENOIOCTLCMD;
4140	}
4141	return 0;
4142}
4143
4144static __poll_t packet_poll(struct file *file, struct socket *sock,
4145				poll_table *wait)
4146{
4147	struct sock *sk = sock->sk;
4148	struct packet_sock *po = pkt_sk(sk);
4149	__poll_t mask = datagram_poll(file, sock, wait);
4150
4151	spin_lock_bh(&sk->sk_receive_queue.lock);
4152	if (po->rx_ring.pg_vec) {
4153		if (!packet_previous_rx_frame(po, &po->rx_ring,
4154			TP_STATUS_KERNEL))
4155			mask |= EPOLLIN | EPOLLRDNORM;
4156	}
4157	packet_rcv_try_clear_pressure(po);
4158	spin_unlock_bh(&sk->sk_receive_queue.lock);
4159	spin_lock_bh(&sk->sk_write_queue.lock);
4160	if (po->tx_ring.pg_vec) {
4161		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4162			mask |= EPOLLOUT | EPOLLWRNORM;
4163	}
4164	spin_unlock_bh(&sk->sk_write_queue.lock);
4165	return mask;
4166}
4167
4168
4169/* Dirty? Well, I still did not learn better way to account
4170 * for user mmaps.
4171 */
4172
4173static void packet_mm_open(struct vm_area_struct *vma)
4174{
4175	struct file *file = vma->vm_file;
4176	struct socket *sock = file->private_data;
4177	struct sock *sk = sock->sk;
4178
4179	if (sk)
4180		atomic_inc(&pkt_sk(sk)->mapped);
4181}
4182
4183static void packet_mm_close(struct vm_area_struct *vma)
4184{
4185	struct file *file = vma->vm_file;
4186	struct socket *sock = file->private_data;
4187	struct sock *sk = sock->sk;
4188
4189	if (sk)
4190		atomic_dec(&pkt_sk(sk)->mapped);
4191}
4192
4193static const struct vm_operations_struct packet_mmap_ops = {
4194	.open	=	packet_mm_open,
4195	.close	=	packet_mm_close,
4196};
4197
4198static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4199			unsigned int len)
4200{
4201	int i;
4202
4203	for (i = 0; i < len; i++) {
4204		if (likely(pg_vec[i].buffer)) {
4205			if (is_vmalloc_addr(pg_vec[i].buffer))
4206				vfree(pg_vec[i].buffer);
4207			else
4208				free_pages((unsigned long)pg_vec[i].buffer,
4209					   order);
4210			pg_vec[i].buffer = NULL;
4211		}
4212	}
4213	kfree(pg_vec);
4214}
4215
4216static char *alloc_one_pg_vec_page(unsigned long order)
4217{
4218	char *buffer;
4219	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4220			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4221
4222	buffer = (char *) __get_free_pages(gfp_flags, order);
4223	if (buffer)
4224		return buffer;
4225
4226	/* __get_free_pages failed, fall back to vmalloc */
4227	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4228	if (buffer)
4229		return buffer;
4230
4231	/* vmalloc failed, lets dig into swap here */
4232	gfp_flags &= ~__GFP_NORETRY;
4233	buffer = (char *) __get_free_pages(gfp_flags, order);
4234	if (buffer)
4235		return buffer;
4236
4237	/* complete and utter failure */
4238	return NULL;
4239}
4240
4241static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4242{
4243	unsigned int block_nr = req->tp_block_nr;
4244	struct pgv *pg_vec;
4245	int i;
4246
4247	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4248	if (unlikely(!pg_vec))
4249		goto out;
4250
4251	for (i = 0; i < block_nr; i++) {
4252		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4253		if (unlikely(!pg_vec[i].buffer))
4254			goto out_free_pgvec;
4255	}
4256
4257out:
4258	return pg_vec;
4259
4260out_free_pgvec:
4261	free_pg_vec(pg_vec, order, block_nr);
4262	pg_vec = NULL;
4263	goto out;
4264}
4265
4266static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4267		int closing, int tx_ring)
4268{
4269	struct pgv *pg_vec = NULL;
4270	struct packet_sock *po = pkt_sk(sk);
 
4271	int was_running, order = 0;
4272	struct packet_ring_buffer *rb;
4273	struct sk_buff_head *rb_queue;
4274	__be16 num;
4275	int err = -EINVAL;
4276	/* Added to avoid minimal code churn */
4277	struct tpacket_req *req = &req_u->req;
4278
4279	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4280	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4281
4282	err = -EBUSY;
4283	if (!closing) {
4284		if (atomic_read(&po->mapped))
4285			goto out;
4286		if (packet_read_pending(rb))
4287			goto out;
4288	}
4289
4290	if (req->tp_block_nr) {
4291		unsigned int min_frame_size;
4292
4293		/* Sanity tests and some calculations */
4294		err = -EBUSY;
4295		if (unlikely(rb->pg_vec))
4296			goto out;
4297
4298		switch (po->tp_version) {
4299		case TPACKET_V1:
4300			po->tp_hdrlen = TPACKET_HDRLEN;
4301			break;
4302		case TPACKET_V2:
4303			po->tp_hdrlen = TPACKET2_HDRLEN;
4304			break;
4305		case TPACKET_V3:
4306			po->tp_hdrlen = TPACKET3_HDRLEN;
4307			break;
4308		}
4309
4310		err = -EINVAL;
4311		if (unlikely((int)req->tp_block_size <= 0))
4312			goto out;
4313		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4314			goto out;
4315		min_frame_size = po->tp_hdrlen + po->tp_reserve;
4316		if (po->tp_version >= TPACKET_V3 &&
4317		    req->tp_block_size <
4318		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4319			goto out;
4320		if (unlikely(req->tp_frame_size < min_frame_size))
4321			goto out;
4322		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4323			goto out;
4324
4325		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4326		if (unlikely(rb->frames_per_block == 0))
4327			goto out;
4328		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4329			goto out;
4330		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4331					req->tp_frame_nr))
4332			goto out;
4333
4334		err = -ENOMEM;
4335		order = get_order(req->tp_block_size);
4336		pg_vec = alloc_pg_vec(req, order);
4337		if (unlikely(!pg_vec))
4338			goto out;
4339		switch (po->tp_version) {
4340		case TPACKET_V3:
4341			/* Block transmit is not supported yet */
4342			if (!tx_ring) {
4343				init_prb_bdqc(po, rb, pg_vec, req_u);
4344			} else {
4345				struct tpacket_req3 *req3 = &req_u->req3;
4346
4347				if (req3->tp_retire_blk_tov ||
4348				    req3->tp_sizeof_priv ||
4349				    req3->tp_feature_req_word) {
4350					err = -EINVAL;
4351					goto out_free_pg_vec;
4352				}
4353			}
4354			break;
4355		default:
 
 
 
 
 
 
4356			break;
4357		}
4358	}
4359	/* Done */
4360	else {
4361		err = -EINVAL;
4362		if (unlikely(req->tp_frame_nr))
4363			goto out;
4364	}
4365
4366
4367	/* Detach socket from network */
4368	spin_lock(&po->bind_lock);
4369	was_running = po->running;
4370	num = po->num;
4371	if (was_running) {
4372		po->num = 0;
4373		__unregister_prot_hook(sk, false);
4374	}
4375	spin_unlock(&po->bind_lock);
4376
4377	synchronize_net();
4378
4379	err = -EBUSY;
4380	mutex_lock(&po->pg_vec_lock);
4381	if (closing || atomic_read(&po->mapped) == 0) {
4382		err = 0;
4383		spin_lock_bh(&rb_queue->lock);
4384		swap(rb->pg_vec, pg_vec);
 
 
4385		rb->frame_max = (req->tp_frame_nr - 1);
4386		rb->head = 0;
4387		rb->frame_size = req->tp_frame_size;
4388		spin_unlock_bh(&rb_queue->lock);
4389
4390		swap(rb->pg_vec_order, order);
4391		swap(rb->pg_vec_len, req->tp_block_nr);
4392
4393		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4394		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4395						tpacket_rcv : packet_rcv;
4396		skb_queue_purge(rb_queue);
4397		if (atomic_read(&po->mapped))
4398			pr_err("packet_mmap: vma is busy: %d\n",
4399			       atomic_read(&po->mapped));
4400	}
4401	mutex_unlock(&po->pg_vec_lock);
4402
4403	spin_lock(&po->bind_lock);
4404	if (was_running) {
4405		po->num = num;
4406		register_prot_hook(sk);
4407	}
4408	spin_unlock(&po->bind_lock);
4409	if (pg_vec && (po->tp_version > TPACKET_V2)) {
4410		/* Because we don't support block-based V3 on tx-ring */
4411		if (!tx_ring)
4412			prb_shutdown_retire_blk_timer(po, rb_queue);
4413	}
4414
4415out_free_pg_vec:
 
4416	if (pg_vec)
4417		free_pg_vec(pg_vec, order, req->tp_block_nr);
4418out:
4419	return err;
4420}
4421
4422static int packet_mmap(struct file *file, struct socket *sock,
4423		struct vm_area_struct *vma)
4424{
4425	struct sock *sk = sock->sk;
4426	struct packet_sock *po = pkt_sk(sk);
4427	unsigned long size, expected_size;
4428	struct packet_ring_buffer *rb;
4429	unsigned long start;
4430	int err = -EINVAL;
4431	int i;
4432
4433	if (vma->vm_pgoff)
4434		return -EINVAL;
4435
4436	mutex_lock(&po->pg_vec_lock);
4437
4438	expected_size = 0;
4439	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4440		if (rb->pg_vec) {
4441			expected_size += rb->pg_vec_len
4442						* rb->pg_vec_pages
4443						* PAGE_SIZE;
4444		}
4445	}
4446
4447	if (expected_size == 0)
4448		goto out;
4449
4450	size = vma->vm_end - vma->vm_start;
4451	if (size != expected_size)
4452		goto out;
4453
4454	start = vma->vm_start;
4455	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4456		if (rb->pg_vec == NULL)
4457			continue;
4458
4459		for (i = 0; i < rb->pg_vec_len; i++) {
4460			struct page *page;
4461			void *kaddr = rb->pg_vec[i].buffer;
4462			int pg_num;
4463
4464			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4465				page = pgv_to_page(kaddr);
4466				err = vm_insert_page(vma, start, page);
4467				if (unlikely(err))
4468					goto out;
4469				start += PAGE_SIZE;
4470				kaddr += PAGE_SIZE;
4471			}
4472		}
4473	}
4474
4475	atomic_inc(&po->mapped);
4476	vma->vm_ops = &packet_mmap_ops;
4477	err = 0;
4478
4479out:
4480	mutex_unlock(&po->pg_vec_lock);
4481	return err;
4482}
4483
4484static const struct proto_ops packet_ops_spkt = {
4485	.family =	PF_PACKET,
4486	.owner =	THIS_MODULE,
4487	.release =	packet_release,
4488	.bind =		packet_bind_spkt,
4489	.connect =	sock_no_connect,
4490	.socketpair =	sock_no_socketpair,
4491	.accept =	sock_no_accept,
4492	.getname =	packet_getname_spkt,
4493	.poll =		datagram_poll,
4494	.ioctl =	packet_ioctl,
4495	.gettstamp =	sock_gettstamp,
4496	.listen =	sock_no_listen,
4497	.shutdown =	sock_no_shutdown,
4498	.setsockopt =	sock_no_setsockopt,
4499	.getsockopt =	sock_no_getsockopt,
4500	.sendmsg =	packet_sendmsg_spkt,
4501	.recvmsg =	packet_recvmsg,
4502	.mmap =		sock_no_mmap,
4503	.sendpage =	sock_no_sendpage,
4504};
4505
4506static const struct proto_ops packet_ops = {
4507	.family =	PF_PACKET,
4508	.owner =	THIS_MODULE,
4509	.release =	packet_release,
4510	.bind =		packet_bind,
4511	.connect =	sock_no_connect,
4512	.socketpair =	sock_no_socketpair,
4513	.accept =	sock_no_accept,
4514	.getname =	packet_getname,
4515	.poll =		packet_poll,
4516	.ioctl =	packet_ioctl,
4517	.gettstamp =	sock_gettstamp,
4518	.listen =	sock_no_listen,
4519	.shutdown =	sock_no_shutdown,
4520	.setsockopt =	packet_setsockopt,
4521	.getsockopt =	packet_getsockopt,
4522#ifdef CONFIG_COMPAT
4523	.compat_setsockopt = compat_packet_setsockopt,
4524#endif
4525	.sendmsg =	packet_sendmsg,
4526	.recvmsg =	packet_recvmsg,
4527	.mmap =		packet_mmap,
4528	.sendpage =	sock_no_sendpage,
4529};
4530
4531static const struct net_proto_family packet_family_ops = {
4532	.family =	PF_PACKET,
4533	.create =	packet_create,
4534	.owner	=	THIS_MODULE,
4535};
4536
4537static struct notifier_block packet_netdev_notifier = {
4538	.notifier_call =	packet_notifier,
4539};
4540
4541#ifdef CONFIG_PROC_FS
4542
4543static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4544	__acquires(RCU)
4545{
4546	struct net *net = seq_file_net(seq);
4547
4548	rcu_read_lock();
4549	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4550}
4551
4552static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4553{
4554	struct net *net = seq_file_net(seq);
4555	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4556}
4557
4558static void packet_seq_stop(struct seq_file *seq, void *v)
4559	__releases(RCU)
4560{
4561	rcu_read_unlock();
4562}
4563
4564static int packet_seq_show(struct seq_file *seq, void *v)
4565{
4566	if (v == SEQ_START_TOKEN)
4567		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4568	else {
4569		struct sock *s = sk_entry(v);
4570		const struct packet_sock *po = pkt_sk(s);
4571
4572		seq_printf(seq,
4573			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4574			   s,
4575			   refcount_read(&s->sk_refcnt),
4576			   s->sk_type,
4577			   ntohs(po->num),
4578			   po->ifindex,
4579			   po->running,
4580			   atomic_read(&s->sk_rmem_alloc),
4581			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4582			   sock_i_ino(s));
4583	}
4584
4585	return 0;
4586}
4587
4588static const struct seq_operations packet_seq_ops = {
4589	.start	= packet_seq_start,
4590	.next	= packet_seq_next,
4591	.stop	= packet_seq_stop,
4592	.show	= packet_seq_show,
4593};
4594#endif
4595
4596static int __net_init packet_net_init(struct net *net)
4597{
4598	mutex_init(&net->packet.sklist_lock);
4599	INIT_HLIST_HEAD(&net->packet.sklist);
4600
4601	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4602			sizeof(struct seq_net_private)))
4603		return -ENOMEM;
4604
4605	return 0;
4606}
4607
4608static void __net_exit packet_net_exit(struct net *net)
4609{
4610	remove_proc_entry("packet", net->proc_net);
4611	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4612}
4613
4614static struct pernet_operations packet_net_ops = {
4615	.init = packet_net_init,
4616	.exit = packet_net_exit,
4617};
4618
4619
4620static void __exit packet_exit(void)
4621{
4622	unregister_netdevice_notifier(&packet_netdev_notifier);
4623	unregister_pernet_subsys(&packet_net_ops);
4624	sock_unregister(PF_PACKET);
4625	proto_unregister(&packet_proto);
4626}
4627
4628static int __init packet_init(void)
4629{
4630	int rc;
4631
4632	rc = proto_register(&packet_proto, 0);
4633	if (rc)
4634		goto out;
4635	rc = sock_register(&packet_family_ops);
4636	if (rc)
4637		goto out_proto;
4638	rc = register_pernet_subsys(&packet_net_ops);
4639	if (rc)
4640		goto out_sock;
4641	rc = register_netdevice_notifier(&packet_netdev_notifier);
4642	if (rc)
4643		goto out_pernet;
4644
4645	return 0;
4646
4647out_pernet:
4648	unregister_pernet_subsys(&packet_net_ops);
4649out_sock:
4650	sock_unregister(PF_PACKET);
4651out_proto:
4652	proto_unregister(&packet_proto);
4653out:
4654	return rc;
4655}
4656
4657module_init(packet_init);
4658module_exit(packet_exit);
4659MODULE_LICENSE("GPL");
4660MODULE_ALIAS_NETPROTO(PF_PACKET);