Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		PACKET - implements raw packet sockets.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *
  13 * Fixes:
  14 *		Alan Cox	:	verify_area() now used correctly
  15 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
  16 *		Alan Cox	:	tidied skbuff lists.
  17 *		Alan Cox	:	Now uses generic datagram routines I
  18 *					added. Also fixed the peek/read crash
  19 *					from all old Linux datagram code.
  20 *		Alan Cox	:	Uses the improved datagram code.
  21 *		Alan Cox	:	Added NULL's for socket options.
  22 *		Alan Cox	:	Re-commented the code.
  23 *		Alan Cox	:	Use new kernel side addressing
  24 *		Rob Janssen	:	Correct MTU usage.
  25 *		Dave Platt	:	Counter leaks caused by incorrect
  26 *					interrupt locking and some slightly
  27 *					dubious gcc output. Can you read
  28 *					compiler: it said _VOLATILE_
  29 *	Richard Kooijman	:	Timestamp fixes.
  30 *		Alan Cox	:	New buffers. Use sk->mac.raw.
  31 *		Alan Cox	:	sendmsg/recvmsg support.
  32 *		Alan Cox	:	Protocol setting support
  33 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
  34 *	Cyrus Durgin		:	Fixed kerneld for kmod.
  35 *	Michal Ostrowski        :       Module initialization cleanup.
  36 *         Ulises Alonso        :       Frame number limit removal and
  37 *                                      packet_set_ring memory leak.
  38 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
  39 *					The convention is that longer addresses
  40 *					will simply extend the hardware address
  41 *					byte arrays at the end of sockaddr_ll
  42 *					and packet_mreq.
  43 *		Johann Baudy	:	Added TX RING.
  44 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
  45 *					layer.
  46 *					Copyright (C) 2011, <lokec@ccs.neu.edu>
 
 
 
 
 
 
 
  47 */
  48
  49#include <linux/types.h>
  50#include <linux/mm.h>
  51#include <linux/capability.h>
  52#include <linux/fcntl.h>
  53#include <linux/socket.h>
  54#include <linux/in.h>
  55#include <linux/inet.h>
  56#include <linux/netdevice.h>
  57#include <linux/if_packet.h>
  58#include <linux/wireless.h>
  59#include <linux/kernel.h>
  60#include <linux/kmod.h>
  61#include <linux/slab.h>
  62#include <linux/vmalloc.h>
  63#include <net/net_namespace.h>
  64#include <net/ip.h>
  65#include <net/protocol.h>
  66#include <linux/skbuff.h>
  67#include <net/sock.h>
  68#include <linux/errno.h>
  69#include <linux/timer.h>
  70#include <linux/uaccess.h>
  71#include <asm/ioctls.h>
  72#include <asm/page.h>
  73#include <asm/cacheflush.h>
  74#include <asm/io.h>
  75#include <linux/proc_fs.h>
  76#include <linux/seq_file.h>
  77#include <linux/poll.h>
  78#include <linux/module.h>
  79#include <linux/init.h>
  80#include <linux/mutex.h>
  81#include <linux/if_vlan.h>
  82#include <linux/virtio_net.h>
  83#include <linux/errqueue.h>
  84#include <linux/net_tstamp.h>
  85#include <linux/percpu.h>
  86#ifdef CONFIG_INET
  87#include <net/inet_common.h>
  88#endif
  89#include <linux/bpf.h>
  90#include <net/compat.h>
  91
  92#include "internal.h"
  93
  94/*
  95   Assumptions:
  96   - if device has no dev->hard_header routine, it adds and removes ll header
  97     inside itself. In this case ll header is invisible outside of device,
  98     but higher levels still should reserve dev->hard_header_len.
  99     Some devices are enough clever to reallocate skb, when header
 100     will not fit to reserved space (tunnel), another ones are silly
 101     (PPP).
 102   - packet socket receives packets with pulled ll header,
 103     so that SOCK_RAW should push it back.
 104
 105On receive:
 106-----------
 107
 108Incoming, dev->hard_header!=NULL
 109   mac_header -> ll header
 110   data       -> data
 111
 112Outgoing, dev->hard_header!=NULL
 113   mac_header -> ll header
 114   data       -> ll header
 115
 116Incoming, dev->hard_header==NULL
 117   mac_header -> UNKNOWN position. It is very likely, that it points to ll
 118		 header.  PPP makes it, that is wrong, because introduce
 119		 assymetry between rx and tx paths.
 120   data       -> data
 121
 122Outgoing, dev->hard_header==NULL
 123   mac_header -> data. ll header is still not built!
 124   data       -> data
 125
 126Resume
 127  If dev->hard_header==NULL we are unlikely to restore sensible ll header.
 128
 129
 130On transmit:
 131------------
 132
 133dev->hard_header != NULL
 134   mac_header -> ll header
 135   data       -> ll header
 136
 137dev->hard_header == NULL (ll header is added by device, we cannot control it)
 138   mac_header -> data
 139   data       -> data
 140
 141   We should set nh.raw on output to correct posistion,
 142   packet classifier depends on it.
 143 */
 144
 145/* Private packet socket structures. */
 146
 
 
 
 
 
 
 
 
 147/* identical to struct packet_mreq except it has
 148 * a longer address field.
 149 */
 150struct packet_mreq_max {
 151	int		mr_ifindex;
 152	unsigned short	mr_type;
 153	unsigned short	mr_alen;
 154	unsigned char	mr_address[MAX_ADDR_LEN];
 155};
 156
 157union tpacket_uhdr {
 158	struct tpacket_hdr  *h1;
 159	struct tpacket2_hdr *h2;
 160	struct tpacket3_hdr *h3;
 161	void *raw;
 162};
 163
 164static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 165		int closing, int tx_ring);
 166
 
 167#define V3_ALIGNMENT	(8)
 168
 169#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 170
 171#define BLK_PLUS_PRIV(sz_of_priv) \
 172	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 174#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 175#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 176#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 177#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 178#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 179#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 180#define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
 181
 182struct packet_sock;
 183static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 184		       struct packet_type *pt, struct net_device *orig_dev);
 185
 186static void *packet_previous_frame(struct packet_sock *po,
 187		struct packet_ring_buffer *rb,
 188		int status);
 189static void packet_increment_head(struct packet_ring_buffer *buff);
 190static int prb_curr_blk_in_use(struct tpacket_block_desc *);
 
 191static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 192			struct packet_sock *);
 193static void prb_retire_current_block(struct tpacket_kbdq_core *,
 194		struct packet_sock *, unsigned int status);
 195static int prb_queue_frozen(struct tpacket_kbdq_core *);
 196static void prb_open_block(struct tpacket_kbdq_core *,
 197		struct tpacket_block_desc *);
 198static void prb_retire_rx_blk_timer_expired(struct timer_list *);
 199static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 
 
 
 200static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 201static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 202		struct tpacket3_hdr *);
 203static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 204		struct tpacket3_hdr *);
 205static void packet_flush_mclist(struct sock *sk);
 206static u16 packet_pick_tx_queue(struct sk_buff *skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207
 208struct packet_skb_cb {
 
 209	union {
 210		struct sockaddr_pkt pkt;
 211		union {
 212			/* Trick: alias skb original length with
 213			 * ll.sll_family and ll.protocol in order
 214			 * to save room.
 215			 */
 216			unsigned int origlen;
 217			struct sockaddr_ll ll;
 218		};
 219	} sa;
 220};
 221
 222#define vio_le() virtio_legacy_is_little_endian()
 223
 224#define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 225
 226#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 227#define GET_PBLOCK_DESC(x, bid)	\
 228	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 229#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 230	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 231#define GET_NEXT_PRB_BLK_NUM(x) \
 232	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 233	((x)->kactive_blk_num+1) : 0)
 234
 235static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 236static void __fanout_link(struct sock *sk, struct packet_sock *po);
 237
 238static int packet_direct_xmit(struct sk_buff *skb)
 239{
 240	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
 241}
 242
 243static struct net_device *packet_cached_dev_get(struct packet_sock *po)
 244{
 245	struct net_device *dev;
 246
 247	rcu_read_lock();
 248	dev = rcu_dereference(po->cached_dev);
 249	if (likely(dev))
 250		dev_hold(dev);
 251	rcu_read_unlock();
 252
 253	return dev;
 254}
 255
 256static void packet_cached_dev_assign(struct packet_sock *po,
 257				     struct net_device *dev)
 258{
 259	rcu_assign_pointer(po->cached_dev, dev);
 260}
 261
 262static void packet_cached_dev_reset(struct packet_sock *po)
 263{
 264	RCU_INIT_POINTER(po->cached_dev, NULL);
 265}
 266
 267static bool packet_use_direct_xmit(const struct packet_sock *po)
 268{
 269	return po->xmit == packet_direct_xmit;
 270}
 271
 272static u16 packet_pick_tx_queue(struct sk_buff *skb)
 273{
 274	struct net_device *dev = skb->dev;
 275	const struct net_device_ops *ops = dev->netdev_ops;
 276	int cpu = raw_smp_processor_id();
 277	u16 queue_index;
 278
 279#ifdef CONFIG_XPS
 280	skb->sender_cpu = cpu + 1;
 281#endif
 282	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
 283	if (ops->ndo_select_queue) {
 284		queue_index = ops->ndo_select_queue(dev, skb, NULL);
 285		queue_index = netdev_cap_txqueue(dev, queue_index);
 286	} else {
 287		queue_index = netdev_pick_tx(dev, skb, NULL);
 288	}
 289
 290	return queue_index;
 291}
 292
 293/* __register_prot_hook must be invoked through register_prot_hook
 294 * or from a context in which asynchronous accesses to the packet
 295 * socket is not possible (packet_create()).
 296 */
 297static void __register_prot_hook(struct sock *sk)
 298{
 299	struct packet_sock *po = pkt_sk(sk);
 300
 301	if (!po->running) {
 302		if (po->fanout)
 303			__fanout_link(sk, po);
 304		else
 305			dev_add_pack(&po->prot_hook);
 306
 307		sock_hold(sk);
 308		po->running = 1;
 309	}
 310}
 311
 312static void register_prot_hook(struct sock *sk)
 313{
 314	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
 315	__register_prot_hook(sk);
 316}
 317
 318/* If the sync parameter is true, we will temporarily drop
 319 * the po->bind_lock and do a synchronize_net to make sure no
 320 * asynchronous packet processing paths still refer to the elements
 321 * of po->prot_hook.  If the sync parameter is false, it is the
 322 * callers responsibility to take care of this.
 323 */
 324static void __unregister_prot_hook(struct sock *sk, bool sync)
 325{
 326	struct packet_sock *po = pkt_sk(sk);
 327
 328	lockdep_assert_held_once(&po->bind_lock);
 329
 330	po->running = 0;
 331
 332	if (po->fanout)
 333		__fanout_unlink(sk, po);
 334	else
 335		__dev_remove_pack(&po->prot_hook);
 336
 337	__sock_put(sk);
 338
 339	if (sync) {
 340		spin_unlock(&po->bind_lock);
 341		synchronize_net();
 342		spin_lock(&po->bind_lock);
 343	}
 344}
 345
 346static void unregister_prot_hook(struct sock *sk, bool sync)
 347{
 348	struct packet_sock *po = pkt_sk(sk);
 349
 350	if (po->running)
 351		__unregister_prot_hook(sk, sync);
 352}
 353
 354static inline struct page * __pure pgv_to_page(void *addr)
 355{
 356	if (is_vmalloc_addr(addr))
 357		return vmalloc_to_page(addr);
 358	return virt_to_page(addr);
 359}
 360
 361static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 362{
 363	union tpacket_uhdr h;
 
 
 
 
 364
 365	h.raw = frame;
 366	switch (po->tp_version) {
 367	case TPACKET_V1:
 368		h.h1->tp_status = status;
 369		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 370		break;
 371	case TPACKET_V2:
 372		h.h2->tp_status = status;
 373		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 374		break;
 375	case TPACKET_V3:
 376		h.h3->tp_status = status;
 377		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 378		break;
 379	default:
 380		WARN(1, "TPACKET version not supported.\n");
 381		BUG();
 382	}
 383
 384	smp_wmb();
 385}
 386
 387static int __packet_get_status(const struct packet_sock *po, void *frame)
 388{
 389	union tpacket_uhdr h;
 
 
 
 
 390
 391	smp_rmb();
 392
 393	h.raw = frame;
 394	switch (po->tp_version) {
 395	case TPACKET_V1:
 396		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 397		return h.h1->tp_status;
 398	case TPACKET_V2:
 399		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 400		return h.h2->tp_status;
 401	case TPACKET_V3:
 402		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 403		return h.h3->tp_status;
 404	default:
 405		WARN(1, "TPACKET version not supported.\n");
 406		BUG();
 407		return 0;
 408	}
 409}
 410
 411static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
 412				   unsigned int flags)
 413{
 414	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 415
 416	if (shhwtstamps &&
 417	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
 418	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
 419		return TP_STATUS_TS_RAW_HARDWARE;
 420
 421	if (ktime_to_timespec64_cond(skb->tstamp, ts))
 422		return TP_STATUS_TS_SOFTWARE;
 423
 424	return 0;
 425}
 426
 427static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
 428				    struct sk_buff *skb)
 429{
 430	union tpacket_uhdr h;
 431	struct timespec64 ts;
 432	__u32 ts_status;
 433
 434	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
 435		return 0;
 436
 437	h.raw = frame;
 438	/*
 439	 * versions 1 through 3 overflow the timestamps in y2106, since they
 440	 * all store the seconds in a 32-bit unsigned integer.
 441	 * If we create a version 4, that should have a 64-bit timestamp,
 442	 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
 443	 * nanoseconds.
 444	 */
 445	switch (po->tp_version) {
 446	case TPACKET_V1:
 447		h.h1->tp_sec = ts.tv_sec;
 448		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
 449		break;
 450	case TPACKET_V2:
 451		h.h2->tp_sec = ts.tv_sec;
 452		h.h2->tp_nsec = ts.tv_nsec;
 453		break;
 454	case TPACKET_V3:
 455		h.h3->tp_sec = ts.tv_sec;
 456		h.h3->tp_nsec = ts.tv_nsec;
 457		break;
 458	default:
 459		WARN(1, "TPACKET version not supported.\n");
 460		BUG();
 461	}
 462
 463	/* one flush is safe, as both fields always lie on the same cacheline */
 464	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
 465	smp_wmb();
 466
 467	return ts_status;
 468}
 469
 470static void *packet_lookup_frame(const struct packet_sock *po,
 471				 const struct packet_ring_buffer *rb,
 472				 unsigned int position,
 473				 int status)
 474{
 475	unsigned int pg_vec_pos, frame_offset;
 476	union tpacket_uhdr h;
 
 
 
 
 477
 478	pg_vec_pos = position / rb->frames_per_block;
 479	frame_offset = position % rb->frames_per_block;
 480
 481	h.raw = rb->pg_vec[pg_vec_pos].buffer +
 482		(frame_offset * rb->frame_size);
 483
 484	if (status != __packet_get_status(po, h.raw))
 485		return NULL;
 486
 487	return h.raw;
 488}
 489
 490static void *packet_current_frame(struct packet_sock *po,
 491		struct packet_ring_buffer *rb,
 492		int status)
 493{
 494	return packet_lookup_frame(po, rb, rb->head, status);
 495}
 496
 497static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 498{
 499	del_timer_sync(&pkc->retire_blk_timer);
 500}
 501
 502static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 
 503		struct sk_buff_head *rb_queue)
 504{
 505	struct tpacket_kbdq_core *pkc;
 506
 507	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 508
 509	spin_lock_bh(&rb_queue->lock);
 510	pkc->delete_blk_timer = 1;
 511	spin_unlock_bh(&rb_queue->lock);
 512
 513	prb_del_retire_blk_timer(pkc);
 514}
 515
 516static void prb_setup_retire_blk_timer(struct packet_sock *po)
 
 
 
 
 
 
 
 
 
 
 517{
 518	struct tpacket_kbdq_core *pkc;
 519
 520	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 521	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
 522		    0);
 523	pkc->retire_blk_timer.expires = jiffies;
 
 524}
 525
 526static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 527				int blk_size_in_bytes)
 528{
 529	struct net_device *dev;
 530	unsigned int mbits, div;
 531	struct ethtool_link_ksettings ecmd;
 532	int err;
 533
 534	rtnl_lock();
 535	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 536	if (unlikely(!dev)) {
 537		rtnl_unlock();
 538		return DEFAULT_PRB_RETIRE_TOV;
 539	}
 540	err = __ethtool_get_link_ksettings(dev, &ecmd);
 541	rtnl_unlock();
 542	if (err)
 543		return DEFAULT_PRB_RETIRE_TOV;
 544
 545	/* If the link speed is so slow you don't really
 546	 * need to worry about perf anyways
 547	 */
 548	if (ecmd.base.speed < SPEED_1000 ||
 549	    ecmd.base.speed == SPEED_UNKNOWN)
 550		return DEFAULT_PRB_RETIRE_TOV;
 
 
 
 
 
 
 
 
 
 
 
 551
 552	div = ecmd.base.speed / 1000;
 553	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 554
 555	if (div)
 556		mbits /= div;
 557
 
 
 558	if (div)
 559		return mbits + 1;
 560	return mbits;
 561}
 562
 563static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 564			union tpacket_req_u *req_u)
 565{
 566	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 567}
 568
 569static void init_prb_bdqc(struct packet_sock *po,
 570			struct packet_ring_buffer *rb,
 571			struct pgv *pg_vec,
 572			union tpacket_req_u *req_u)
 573{
 574	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
 575	struct tpacket_block_desc *pbd;
 576
 577	memset(p1, 0x0, sizeof(*p1));
 578
 579	p1->knxt_seq_num = 1;
 580	p1->pkbdq = pg_vec;
 581	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 582	p1->pkblk_start	= pg_vec[0].buffer;
 583	p1->kblk_size = req_u->req3.tp_block_size;
 584	p1->knum_blocks	= req_u->req3.tp_block_nr;
 585	p1->hdrlen = po->tp_hdrlen;
 586	p1->version = po->tp_version;
 587	p1->last_kactive_blk_num = 0;
 588	po->stats.stats3.tp_freeze_q_cnt = 0;
 589	if (req_u->req3.tp_retire_blk_tov)
 590		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 591	else
 592		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 593						req_u->req3.tp_block_size);
 594	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 595	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 596	rwlock_init(&p1->blk_fill_in_prog_lock);
 597
 598	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
 599	prb_init_ft_ops(p1, req_u);
 600	prb_setup_retire_blk_timer(po);
 601	prb_open_block(p1, pbd);
 602}
 603
 604/*  Do NOT update the last_blk_num first.
 605 *  Assumes sk_buff_head lock is held.
 606 */
 607static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 608{
 609	mod_timer(&pkc->retire_blk_timer,
 610			jiffies + pkc->tov_in_jiffies);
 611	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 612}
 613
 614/*
 615 * Timer logic:
 616 * 1) We refresh the timer only when we open a block.
 617 *    By doing this we don't waste cycles refreshing the timer
 618 *	  on packet-by-packet basis.
 619 *
 620 * With a 1MB block-size, on a 1Gbps line, it will take
 621 * i) ~8 ms to fill a block + ii) memcpy etc.
 622 * In this cut we are not accounting for the memcpy time.
 623 *
 624 * So, if the user sets the 'tmo' to 10ms then the timer
 625 * will never fire while the block is still getting filled
 626 * (which is what we want). However, the user could choose
 627 * to close a block early and that's fine.
 628 *
 629 * But when the timer does fire, we check whether or not to refresh it.
 630 * Since the tmo granularity is in msecs, it is not too expensive
 631 * to refresh the timer, lets say every '8' msecs.
 632 * Either the user can set the 'tmo' or we can derive it based on
 633 * a) line-speed and b) block-size.
 634 * prb_calc_retire_blk_tmo() calculates the tmo.
 635 *
 636 */
 637static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
 638{
 639	struct packet_sock *po =
 640		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
 641	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 642	unsigned int frozen;
 643	struct tpacket_block_desc *pbd;
 644
 645	spin_lock(&po->sk.sk_receive_queue.lock);
 646
 647	frozen = prb_queue_frozen(pkc);
 648	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 649
 650	if (unlikely(pkc->delete_blk_timer))
 651		goto out;
 652
 653	/* We only need to plug the race when the block is partially filled.
 654	 * tpacket_rcv:
 655	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 656	 *		copy_bits() is in progress ...
 657	 *		timer fires on other cpu:
 658	 *		we can't retire the current block because copy_bits
 659	 *		is in progress.
 660	 *
 661	 */
 662	if (BLOCK_NUM_PKTS(pbd)) {
 663		/* Waiting for skb_copy_bits to finish... */
 664		write_lock(&pkc->blk_fill_in_prog_lock);
 665		write_unlock(&pkc->blk_fill_in_prog_lock);
 
 666	}
 667
 668	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 669		if (!frozen) {
 670			if (!BLOCK_NUM_PKTS(pbd)) {
 671				/* An empty block. Just refresh the timer. */
 672				goto refresh_timer;
 673			}
 674			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 675			if (!prb_dispatch_next_block(pkc, po))
 676				goto refresh_timer;
 677			else
 678				goto out;
 679		} else {
 680			/* Case 1. Queue was frozen because user-space was
 681			 *	   lagging behind.
 682			 */
 683			if (prb_curr_blk_in_use(pbd)) {
 684				/*
 685				 * Ok, user-space is still behind.
 686				 * So just refresh the timer.
 687				 */
 688				goto refresh_timer;
 689			} else {
 690			       /* Case 2. queue was frozen,user-space caught up,
 691				* now the link went idle && the timer fired.
 692				* We don't have a block to close.So we open this
 693				* block and restart the timer.
 694				* opening a block thaws the queue,restarts timer
 695				* Thawing/timer-refresh is a side effect.
 696				*/
 697				prb_open_block(pkc, pbd);
 698				goto out;
 699			}
 700		}
 701	}
 702
 703refresh_timer:
 704	_prb_refresh_rx_retire_blk_timer(pkc);
 705
 706out:
 707	spin_unlock(&po->sk.sk_receive_queue.lock);
 708}
 709
 710static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 711		struct tpacket_block_desc *pbd1, __u32 status)
 712{
 713	/* Flush everything minus the block header */
 714
 715#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 716	u8 *start, *end;
 717
 718	start = (u8 *)pbd1;
 719
 720	/* Skip the block header(we know header WILL fit in 4K) */
 721	start += PAGE_SIZE;
 722
 723	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 724	for (; start < end; start += PAGE_SIZE)
 725		flush_dcache_page(pgv_to_page(start));
 726
 727	smp_wmb();
 728#endif
 729
 730	/* Now update the block status. */
 731
 732	BLOCK_STATUS(pbd1) = status;
 733
 734	/* Flush the block header */
 735
 736#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 737	start = (u8 *)pbd1;
 738	flush_dcache_page(pgv_to_page(start));
 739
 740	smp_wmb();
 741#endif
 742}
 743
 744/*
 745 * Side effect:
 746 *
 747 * 1) flush the block
 748 * 2) Increment active_blk_num
 749 *
 750 * Note:We DONT refresh the timer on purpose.
 751 *	Because almost always the next block will be opened.
 752 */
 753static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 754		struct tpacket_block_desc *pbd1,
 755		struct packet_sock *po, unsigned int stat)
 756{
 757	__u32 status = TP_STATUS_USER | stat;
 758
 759	struct tpacket3_hdr *last_pkt;
 760	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 761	struct sock *sk = &po->sk;
 762
 763	if (atomic_read(&po->tp_drops))
 764		status |= TP_STATUS_LOSING;
 765
 766	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 767	last_pkt->tp_next_offset = 0;
 768
 769	/* Get the ts of the last pkt */
 770	if (BLOCK_NUM_PKTS(pbd1)) {
 771		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 772		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 773	} else {
 774		/* Ok, we tmo'd - so get the current time.
 775		 *
 776		 * It shouldn't really happen as we don't close empty
 777		 * blocks. See prb_retire_rx_blk_timer_expired().
 778		 */
 779		struct timespec64 ts;
 780		ktime_get_real_ts64(&ts);
 781		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 782		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 783	}
 784
 785	smp_wmb();
 786
 787	/* Flush the block */
 788	prb_flush_block(pkc1, pbd1, status);
 789
 790	sk->sk_data_ready(sk);
 791
 792	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 793}
 794
 795static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 796{
 797	pkc->reset_pending_on_curr_blk = 0;
 798}
 799
 800/*
 801 * Side effect of opening a block:
 802 *
 803 * 1) prb_queue is thawed.
 804 * 2) retire_blk_timer is refreshed.
 805 *
 806 */
 807static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 808	struct tpacket_block_desc *pbd1)
 809{
 810	struct timespec64 ts;
 811	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 812
 813	smp_rmb();
 814
 815	/* We could have just memset this but we will lose the
 816	 * flexibility of making the priv area sticky
 817	 */
 818
 819	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 820	BLOCK_NUM_PKTS(pbd1) = 0;
 821	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 822
 823	ktime_get_real_ts64(&ts);
 824
 825	h1->ts_first_pkt.ts_sec = ts.tv_sec;
 826	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 827
 828	pkc1->pkblk_start = (char *)pbd1;
 829	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 830
 831	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 832	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 833
 834	pbd1->version = pkc1->version;
 835	pkc1->prev = pkc1->nxt_offset;
 836	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 837
 838	prb_thaw_queue(pkc1);
 839	_prb_refresh_rx_retire_blk_timer(pkc1);
 840
 841	smp_wmb();
 
 
 
 842}
 843
 844/*
 845 * Queue freeze logic:
 846 * 1) Assume tp_block_nr = 8 blocks.
 847 * 2) At time 't0', user opens Rx ring.
 848 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 849 * 4) user-space is either sleeping or processing block '0'.
 850 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 851 *    it will close block-7,loop around and try to fill block '0'.
 852 *    call-flow:
 853 *    __packet_lookup_frame_in_block
 854 *      prb_retire_current_block()
 855 *      prb_dispatch_next_block()
 856 *        |->(BLOCK_STATUS == USER) evaluates to true
 857 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 858 * 6) Now there are two cases:
 859 *    6.1) Link goes idle right after the queue is frozen.
 860 *         But remember, the last open_block() refreshed the timer.
 861 *         When this timer expires,it will refresh itself so that we can
 862 *         re-open block-0 in near future.
 863 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 864 *         case and __packet_lookup_frame_in_block will check if block-0
 865 *         is free and can now be re-used.
 866 */
 867static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 868				  struct packet_sock *po)
 869{
 870	pkc->reset_pending_on_curr_blk = 1;
 871	po->stats.stats3.tp_freeze_q_cnt++;
 872}
 873
 874#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 875
 876/*
 877 * If the next block is free then we will dispatch it
 878 * and return a good offset.
 879 * Else, we will freeze the queue.
 880 * So, caller must check the return value.
 881 */
 882static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 883		struct packet_sock *po)
 884{
 885	struct tpacket_block_desc *pbd;
 886
 887	smp_rmb();
 888
 889	/* 1. Get current block num */
 890	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 891
 892	/* 2. If this block is currently in_use then freeze the queue */
 893	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 894		prb_freeze_queue(pkc, po);
 895		return NULL;
 896	}
 897
 898	/*
 899	 * 3.
 900	 * open this block and return the offset where the first packet
 901	 * needs to get stored.
 902	 */
 903	prb_open_block(pkc, pbd);
 904	return (void *)pkc->nxt_offset;
 905}
 906
 907static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 908		struct packet_sock *po, unsigned int status)
 909{
 910	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 911
 912	/* retire/close the current block */
 913	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 914		/*
 915		 * Plug the case where copy_bits() is in progress on
 916		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
 917		 * have space to copy the pkt in the current block and
 918		 * called prb_retire_current_block()
 919		 *
 920		 * We don't need to worry about the TMO case because
 921		 * the timer-handler already handled this case.
 922		 */
 923		if (!(status & TP_STATUS_BLK_TMO)) {
 924			/* Waiting for skb_copy_bits to finish... */
 925			write_lock(&pkc->blk_fill_in_prog_lock);
 926			write_unlock(&pkc->blk_fill_in_prog_lock);
 
 927		}
 928		prb_close_block(pkc, pbd, po, status);
 929		return;
 930	}
 
 
 
 
 931}
 932
 933static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
 
 934{
 935	return TP_STATUS_USER & BLOCK_STATUS(pbd);
 936}
 937
 938static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 939{
 940	return pkc->reset_pending_on_curr_blk;
 941}
 942
 943static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
 944	__releases(&pkc->blk_fill_in_prog_lock)
 945{
 946	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 947
 948	read_unlock(&pkc->blk_fill_in_prog_lock);
 949}
 950
 951static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
 952			struct tpacket3_hdr *ppd)
 953{
 954	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
 955}
 956
 957static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
 958			struct tpacket3_hdr *ppd)
 959{
 960	ppd->hv1.tp_rxhash = 0;
 961}
 962
 963static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
 964			struct tpacket3_hdr *ppd)
 965{
 966	if (skb_vlan_tag_present(pkc->skb)) {
 967		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
 968		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
 969		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
 970	} else {
 971		ppd->hv1.tp_vlan_tci = 0;
 972		ppd->hv1.tp_vlan_tpid = 0;
 973		ppd->tp_status = TP_STATUS_AVAILABLE;
 974	}
 975}
 976
 977static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
 978			struct tpacket3_hdr *ppd)
 979{
 980	ppd->hv1.tp_padding = 0;
 981	prb_fill_vlan_info(pkc, ppd);
 982
 983	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
 984		prb_fill_rxhash(pkc, ppd);
 985	else
 986		prb_clear_rxhash(pkc, ppd);
 987}
 988
 989static void prb_fill_curr_block(char *curr,
 990				struct tpacket_kbdq_core *pkc,
 991				struct tpacket_block_desc *pbd,
 992				unsigned int len)
 993	__acquires(&pkc->blk_fill_in_prog_lock)
 994{
 995	struct tpacket3_hdr *ppd;
 996
 997	ppd  = (struct tpacket3_hdr *)curr;
 998	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
 999	pkc->prev = curr;
1000	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1001	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1002	BLOCK_NUM_PKTS(pbd) += 1;
1003	read_lock(&pkc->blk_fill_in_prog_lock);
1004	prb_run_all_ft_ops(pkc, ppd);
1005}
1006
1007/* Assumes caller has the sk->rx_queue.lock */
1008static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1009					    struct sk_buff *skb,
 
1010					    unsigned int len
1011					    )
1012{
1013	struct tpacket_kbdq_core *pkc;
1014	struct tpacket_block_desc *pbd;
1015	char *curr, *end;
1016
1017	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1018	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1019
1020	/* Queue is frozen when user space is lagging behind */
1021	if (prb_queue_frozen(pkc)) {
1022		/*
1023		 * Check if that last block which caused the queue to freeze,
1024		 * is still in_use by user-space.
1025		 */
1026		if (prb_curr_blk_in_use(pbd)) {
1027			/* Can't record this packet */
1028			return NULL;
1029		} else {
1030			/*
1031			 * Ok, the block was released by user-space.
1032			 * Now let's open that block.
1033			 * opening a block also thaws the queue.
1034			 * Thawing is a side effect.
1035			 */
1036			prb_open_block(pkc, pbd);
1037		}
1038	}
1039
1040	smp_mb();
1041	curr = pkc->nxt_offset;
1042	pkc->skb = skb;
1043	end = (char *)pbd + pkc->kblk_size;
1044
1045	/* first try the current block */
1046	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1047		prb_fill_curr_block(curr, pkc, pbd, len);
1048		return (void *)curr;
1049	}
1050
1051	/* Ok, close the current block */
1052	prb_retire_current_block(pkc, po, 0);
1053
1054	/* Now, try to dispatch the next block */
1055	curr = (char *)prb_dispatch_next_block(pkc, po);
1056	if (curr) {
1057		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1058		prb_fill_curr_block(curr, pkc, pbd, len);
1059		return (void *)curr;
1060	}
1061
1062	/*
1063	 * No free blocks are available.user_space hasn't caught up yet.
1064	 * Queue was just frozen and now this packet will get dropped.
1065	 */
1066	return NULL;
1067}
1068
1069static void *packet_current_rx_frame(struct packet_sock *po,
1070					    struct sk_buff *skb,
1071					    int status, unsigned int len)
1072{
1073	char *curr = NULL;
1074	switch (po->tp_version) {
1075	case TPACKET_V1:
1076	case TPACKET_V2:
1077		curr = packet_lookup_frame(po, &po->rx_ring,
1078					po->rx_ring.head, status);
1079		return curr;
1080	case TPACKET_V3:
1081		return __packet_lookup_frame_in_block(po, skb, len);
1082	default:
1083		WARN(1, "TPACKET version not supported\n");
1084		BUG();
1085		return NULL;
1086	}
1087}
1088
1089static void *prb_lookup_block(const struct packet_sock *po,
1090			      const struct packet_ring_buffer *rb,
1091			      unsigned int idx,
1092			      int status)
1093{
1094	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1095	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1096
1097	if (status != BLOCK_STATUS(pbd))
1098		return NULL;
1099	return pbd;
1100}
1101
1102static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1103{
1104	unsigned int prev;
1105	if (rb->prb_bdqc.kactive_blk_num)
1106		prev = rb->prb_bdqc.kactive_blk_num-1;
1107	else
1108		prev = rb->prb_bdqc.knum_blocks-1;
1109	return prev;
1110}
1111
1112/* Assumes caller has held the rx_queue.lock */
1113static void *__prb_previous_block(struct packet_sock *po,
1114					 struct packet_ring_buffer *rb,
1115					 int status)
1116{
1117	unsigned int previous = prb_previous_blk_num(rb);
1118	return prb_lookup_block(po, rb, previous, status);
1119}
1120
1121static void *packet_previous_rx_frame(struct packet_sock *po,
1122					     struct packet_ring_buffer *rb,
1123					     int status)
1124{
1125	if (po->tp_version <= TPACKET_V2)
1126		return packet_previous_frame(po, rb, status);
1127
1128	return __prb_previous_block(po, rb, status);
1129}
1130
1131static void packet_increment_rx_head(struct packet_sock *po,
1132					    struct packet_ring_buffer *rb)
1133{
1134	switch (po->tp_version) {
1135	case TPACKET_V1:
1136	case TPACKET_V2:
1137		return packet_increment_head(rb);
1138	case TPACKET_V3:
1139	default:
1140		WARN(1, "TPACKET version not supported.\n");
1141		BUG();
1142		return;
1143	}
1144}
1145
1146static void *packet_previous_frame(struct packet_sock *po,
1147		struct packet_ring_buffer *rb,
1148		int status)
1149{
1150	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1151	return packet_lookup_frame(po, rb, previous, status);
1152}
1153
1154static void packet_increment_head(struct packet_ring_buffer *buff)
1155{
1156	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1157}
1158
1159static void packet_inc_pending(struct packet_ring_buffer *rb)
1160{
1161	this_cpu_inc(*rb->pending_refcnt);
1162}
1163
1164static void packet_dec_pending(struct packet_ring_buffer *rb)
1165{
1166	this_cpu_dec(*rb->pending_refcnt);
1167}
1168
1169static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1170{
1171	unsigned int refcnt = 0;
1172	int cpu;
1173
1174	/* We don't use pending refcount in rx_ring. */
1175	if (rb->pending_refcnt == NULL)
1176		return 0;
1177
1178	for_each_possible_cpu(cpu)
1179		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1180
1181	return refcnt;
1182}
1183
1184static int packet_alloc_pending(struct packet_sock *po)
1185{
1186	po->rx_ring.pending_refcnt = NULL;
1187
1188	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1189	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1190		return -ENOBUFS;
1191
1192	return 0;
1193}
1194
1195static void packet_free_pending(struct packet_sock *po)
1196{
1197	free_percpu(po->tx_ring.pending_refcnt);
1198}
1199
1200#define ROOM_POW_OFF	2
1201#define ROOM_NONE	0x0
1202#define ROOM_LOW	0x1
1203#define ROOM_NORMAL	0x2
1204
1205static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1206{
1207	int idx, len;
1208
1209	len = READ_ONCE(po->rx_ring.frame_max) + 1;
1210	idx = READ_ONCE(po->rx_ring.head);
1211	if (pow_off)
1212		idx += len >> pow_off;
1213	if (idx >= len)
1214		idx -= len;
1215	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1216}
1217
1218static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1219{
1220	int idx, len;
1221
1222	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1223	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1224	if (pow_off)
1225		idx += len >> pow_off;
1226	if (idx >= len)
1227		idx -= len;
1228	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1229}
1230
1231static int __packet_rcv_has_room(const struct packet_sock *po,
1232				 const struct sk_buff *skb)
1233{
1234	const struct sock *sk = &po->sk;
1235	int ret = ROOM_NONE;
1236
1237	if (po->prot_hook.func != tpacket_rcv) {
1238		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1239		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1240				   - (skb ? skb->truesize : 0);
1241
1242		if (avail > (rcvbuf >> ROOM_POW_OFF))
1243			return ROOM_NORMAL;
1244		else if (avail > 0)
1245			return ROOM_LOW;
1246		else
1247			return ROOM_NONE;
1248	}
1249
1250	if (po->tp_version == TPACKET_V3) {
1251		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1252			ret = ROOM_NORMAL;
1253		else if (__tpacket_v3_has_room(po, 0))
1254			ret = ROOM_LOW;
1255	} else {
1256		if (__tpacket_has_room(po, ROOM_POW_OFF))
1257			ret = ROOM_NORMAL;
1258		else if (__tpacket_has_room(po, 0))
1259			ret = ROOM_LOW;
1260	}
1261
1262	return ret;
1263}
1264
1265static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1266{
1267	int pressure, ret;
1268
1269	ret = __packet_rcv_has_room(po, skb);
1270	pressure = ret != ROOM_NORMAL;
1271
1272	if (READ_ONCE(po->pressure) != pressure)
1273		WRITE_ONCE(po->pressure, pressure);
1274
1275	return ret;
1276}
1277
1278static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1279{
1280	if (READ_ONCE(po->pressure) &&
1281	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1282		WRITE_ONCE(po->pressure,  0);
1283}
1284
1285static void packet_sock_destruct(struct sock *sk)
1286{
1287	skb_queue_purge(&sk->sk_error_queue);
1288
1289	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1290	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1291
1292	if (!sock_flag(sk, SOCK_DEAD)) {
1293		pr_err("Attempt to release alive packet socket: %p\n", sk);
1294		return;
1295	}
1296
1297	sk_refcnt_debug_dec(sk);
1298}
1299
1300static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1301{
1302	u32 *history = po->rollover->history;
1303	u32 victim, rxhash;
1304	int i, count = 0;
1305
1306	rxhash = skb_get_hash(skb);
1307	for (i = 0; i < ROLLOVER_HLEN; i++)
1308		if (READ_ONCE(history[i]) == rxhash)
1309			count++;
1310
1311	victim = prandom_u32() % ROLLOVER_HLEN;
1312
1313	/* Avoid dirtying the cache line if possible */
1314	if (READ_ONCE(history[victim]) != rxhash)
1315		WRITE_ONCE(history[victim], rxhash);
1316
1317	return count > (ROLLOVER_HLEN >> 1);
1318}
1319
1320static unsigned int fanout_demux_hash(struct packet_fanout *f,
1321				      struct sk_buff *skb,
1322				      unsigned int num)
1323{
1324	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1325}
1326
1327static unsigned int fanout_demux_lb(struct packet_fanout *f,
1328				    struct sk_buff *skb,
1329				    unsigned int num)
1330{
1331	unsigned int val = atomic_inc_return(&f->rr_cur);
1332
1333	return val % num;
1334}
1335
1336static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1337				     struct sk_buff *skb,
1338				     unsigned int num)
1339{
1340	return smp_processor_id() % num;
1341}
1342
1343static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1344				     struct sk_buff *skb,
1345				     unsigned int num)
1346{
1347	return prandom_u32_max(num);
1348}
1349
1350static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1351					  struct sk_buff *skb,
1352					  unsigned int idx, bool try_self,
1353					  unsigned int num)
1354{
1355	struct packet_sock *po, *po_next, *po_skip = NULL;
1356	unsigned int i, j, room = ROOM_NONE;
1357
1358	po = pkt_sk(f->arr[idx]);
1359
1360	if (try_self) {
1361		room = packet_rcv_has_room(po, skb);
1362		if (room == ROOM_NORMAL ||
1363		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1364			return idx;
1365		po_skip = po;
1366	}
1367
1368	i = j = min_t(int, po->rollover->sock, num - 1);
1369	do {
1370		po_next = pkt_sk(f->arr[i]);
1371		if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1372		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1373			if (i != j)
1374				po->rollover->sock = i;
1375			atomic_long_inc(&po->rollover->num);
1376			if (room == ROOM_LOW)
1377				atomic_long_inc(&po->rollover->num_huge);
1378			return i;
1379		}
1380
1381		if (++i == num)
1382			i = 0;
1383	} while (i != j);
1384
1385	atomic_long_inc(&po->rollover->num_failed);
1386	return idx;
1387}
1388
1389static unsigned int fanout_demux_qm(struct packet_fanout *f,
1390				    struct sk_buff *skb,
1391				    unsigned int num)
1392{
1393	return skb_get_queue_mapping(skb) % num;
1394}
1395
1396static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1397				     struct sk_buff *skb,
1398				     unsigned int num)
1399{
1400	struct bpf_prog *prog;
1401	unsigned int ret = 0;
1402
1403	rcu_read_lock();
1404	prog = rcu_dereference(f->bpf_prog);
1405	if (prog)
1406		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1407	rcu_read_unlock();
1408
1409	return ret;
1410}
1411
1412static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1413{
1414	return f->flags & (flag >> 8);
1415}
1416
1417static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1418			     struct packet_type *pt, struct net_device *orig_dev)
1419{
1420	struct packet_fanout *f = pt->af_packet_priv;
1421	unsigned int num = READ_ONCE(f->num_members);
1422	struct net *net = read_pnet(&f->net);
1423	struct packet_sock *po;
1424	unsigned int idx;
1425
1426	if (!net_eq(dev_net(dev), net) || !num) {
 
1427		kfree_skb(skb);
1428		return 0;
1429	}
1430
1431	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1432		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1433		if (!skb)
1434			return 0;
1435	}
1436	switch (f->type) {
1437	case PACKET_FANOUT_HASH:
1438	default:
1439		idx = fanout_demux_hash(f, skb, num);
 
 
 
 
 
 
1440		break;
1441	case PACKET_FANOUT_LB:
1442		idx = fanout_demux_lb(f, skb, num);
1443		break;
1444	case PACKET_FANOUT_CPU:
1445		idx = fanout_demux_cpu(f, skb, num);
1446		break;
1447	case PACKET_FANOUT_RND:
1448		idx = fanout_demux_rnd(f, skb, num);
1449		break;
1450	case PACKET_FANOUT_QM:
1451		idx = fanout_demux_qm(f, skb, num);
1452		break;
1453	case PACKET_FANOUT_ROLLOVER:
1454		idx = fanout_demux_rollover(f, skb, 0, false, num);
1455		break;
1456	case PACKET_FANOUT_CBPF:
1457	case PACKET_FANOUT_EBPF:
1458		idx = fanout_demux_bpf(f, skb, num);
1459		break;
1460	}
1461
1462	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1463		idx = fanout_demux_rollover(f, skb, idx, true, num);
1464
1465	po = pkt_sk(f->arr[idx]);
1466	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1467}
1468
1469DEFINE_MUTEX(fanout_mutex);
1470EXPORT_SYMBOL_GPL(fanout_mutex);
1471static LIST_HEAD(fanout_list);
1472static u16 fanout_next_id;
1473
1474static void __fanout_link(struct sock *sk, struct packet_sock *po)
1475{
1476	struct packet_fanout *f = po->fanout;
1477
1478	spin_lock(&f->lock);
1479	f->arr[f->num_members] = sk;
1480	smp_wmb();
1481	f->num_members++;
1482	if (f->num_members == 1)
1483		dev_add_pack(&f->prot_hook);
1484	spin_unlock(&f->lock);
1485}
1486
1487static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1488{
1489	struct packet_fanout *f = po->fanout;
1490	int i;
1491
1492	spin_lock(&f->lock);
1493	for (i = 0; i < f->num_members; i++) {
1494		if (f->arr[i] == sk)
1495			break;
1496	}
1497	BUG_ON(i >= f->num_members);
1498	f->arr[i] = f->arr[f->num_members - 1];
1499	f->num_members--;
1500	if (f->num_members == 0)
1501		__dev_remove_pack(&f->prot_hook);
1502	spin_unlock(&f->lock);
1503}
1504
1505static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1506{
1507	if (sk->sk_family != PF_PACKET)
1508		return false;
1509
1510	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1511}
1512
1513static void fanout_init_data(struct packet_fanout *f)
1514{
1515	switch (f->type) {
1516	case PACKET_FANOUT_LB:
1517		atomic_set(&f->rr_cur, 0);
1518		break;
1519	case PACKET_FANOUT_CBPF:
1520	case PACKET_FANOUT_EBPF:
1521		RCU_INIT_POINTER(f->bpf_prog, NULL);
1522		break;
1523	}
1524}
1525
1526static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1527{
1528	struct bpf_prog *old;
1529
1530	spin_lock(&f->lock);
1531	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1532	rcu_assign_pointer(f->bpf_prog, new);
1533	spin_unlock(&f->lock);
1534
1535	if (old) {
1536		synchronize_net();
1537		bpf_prog_destroy(old);
1538	}
1539}
1540
1541static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1542				unsigned int len)
1543{
1544	struct bpf_prog *new;
1545	struct sock_fprog fprog;
1546	int ret;
1547
1548	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1549		return -EPERM;
1550
1551	ret = copy_bpf_fprog_from_user(&fprog, data, len);
1552	if (ret)
1553		return ret;
1554
1555	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1556	if (ret)
1557		return ret;
1558
1559	__fanout_set_data_bpf(po->fanout, new);
1560	return 0;
1561}
1562
1563static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1564				unsigned int len)
1565{
1566	struct bpf_prog *new;
1567	u32 fd;
1568
1569	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1570		return -EPERM;
1571	if (len != sizeof(fd))
1572		return -EINVAL;
1573	if (copy_from_sockptr(&fd, data, len))
1574		return -EFAULT;
1575
1576	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1577	if (IS_ERR(new))
1578		return PTR_ERR(new);
1579
1580	__fanout_set_data_bpf(po->fanout, new);
1581	return 0;
1582}
1583
1584static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1585			   unsigned int len)
1586{
1587	switch (po->fanout->type) {
1588	case PACKET_FANOUT_CBPF:
1589		return fanout_set_data_cbpf(po, data, len);
1590	case PACKET_FANOUT_EBPF:
1591		return fanout_set_data_ebpf(po, data, len);
1592	default:
1593		return -EINVAL;
1594	}
1595}
1596
1597static void fanout_release_data(struct packet_fanout *f)
1598{
1599	switch (f->type) {
1600	case PACKET_FANOUT_CBPF:
1601	case PACKET_FANOUT_EBPF:
1602		__fanout_set_data_bpf(f, NULL);
1603	}
1604}
1605
1606static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1607{
1608	struct packet_fanout *f;
1609
1610	list_for_each_entry(f, &fanout_list, list) {
1611		if (f->id == candidate_id &&
1612		    read_pnet(&f->net) == sock_net(sk)) {
1613			return false;
1614		}
1615	}
1616	return true;
1617}
1618
1619static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1620{
1621	u16 id = fanout_next_id;
1622
1623	do {
1624		if (__fanout_id_is_free(sk, id)) {
1625			*new_id = id;
1626			fanout_next_id = id + 1;
1627			return true;
1628		}
1629
1630		id++;
1631	} while (id != fanout_next_id);
1632
1633	return false;
1634}
1635
1636static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1637{
1638	struct packet_rollover *rollover = NULL;
1639	struct packet_sock *po = pkt_sk(sk);
1640	struct packet_fanout *f, *match;
1641	u8 type = type_flags & 0xff;
1642	u8 flags = type_flags >> 8;
1643	int err;
1644
1645	switch (type) {
1646	case PACKET_FANOUT_ROLLOVER:
1647		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1648			return -EINVAL;
1649	case PACKET_FANOUT_HASH:
1650	case PACKET_FANOUT_LB:
1651	case PACKET_FANOUT_CPU:
1652	case PACKET_FANOUT_RND:
1653	case PACKET_FANOUT_QM:
1654	case PACKET_FANOUT_CBPF:
1655	case PACKET_FANOUT_EBPF:
1656		break;
1657	default:
1658		return -EINVAL;
1659	}
1660
1661	mutex_lock(&fanout_mutex);
 
1662
1663	err = -EALREADY;
1664	if (po->fanout)
1665		goto out;
1666
1667	if (type == PACKET_FANOUT_ROLLOVER ||
1668	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1669		err = -ENOMEM;
1670		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1671		if (!rollover)
1672			goto out;
1673		atomic_long_set(&rollover->num, 0);
1674		atomic_long_set(&rollover->num_huge, 0);
1675		atomic_long_set(&rollover->num_failed, 0);
1676	}
1677
1678	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1679		if (id != 0) {
1680			err = -EINVAL;
1681			goto out;
1682		}
1683		if (!fanout_find_new_id(sk, &id)) {
1684			err = -ENOMEM;
1685			goto out;
1686		}
1687		/* ephemeral flag for the first socket in the group: drop it */
1688		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1689	}
1690
 
1691	match = NULL;
1692	list_for_each_entry(f, &fanout_list, list) {
1693		if (f->id == id &&
1694		    read_pnet(&f->net) == sock_net(sk)) {
1695			match = f;
1696			break;
1697		}
1698	}
1699	err = -EINVAL;
1700	if (match && match->flags != flags)
1701		goto out;
1702	if (!match) {
1703		err = -ENOMEM;
1704		match = kzalloc(sizeof(*match), GFP_KERNEL);
1705		if (!match)
1706			goto out;
1707		write_pnet(&match->net, sock_net(sk));
1708		match->id = id;
1709		match->type = type;
1710		match->flags = flags;
 
1711		INIT_LIST_HEAD(&match->list);
1712		spin_lock_init(&match->lock);
1713		refcount_set(&match->sk_ref, 0);
1714		fanout_init_data(match);
1715		match->prot_hook.type = po->prot_hook.type;
1716		match->prot_hook.dev = po->prot_hook.dev;
1717		match->prot_hook.func = packet_rcv_fanout;
1718		match->prot_hook.af_packet_priv = match;
1719		match->prot_hook.id_match = match_fanout_group;
 
1720		list_add(&match->list, &fanout_list);
1721	}
1722	err = -EINVAL;
1723
1724	spin_lock(&po->bind_lock);
1725	if (po->running &&
1726	    match->type == type &&
1727	    match->prot_hook.type == po->prot_hook.type &&
1728	    match->prot_hook.dev == po->prot_hook.dev) {
1729		err = -ENOSPC;
1730		if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1731			__dev_remove_pack(&po->prot_hook);
1732			po->fanout = match;
1733			po->rollover = rollover;
1734			rollover = NULL;
1735			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1736			__fanout_link(sk, po);
1737			err = 0;
1738		}
1739	}
1740	spin_unlock(&po->bind_lock);
1741
1742	if (err && !refcount_read(&match->sk_ref)) {
1743		list_del(&match->list);
1744		kfree(match);
1745	}
1746
1747out:
1748	kfree(rollover);
1749	mutex_unlock(&fanout_mutex);
1750	return err;
1751}
1752
1753/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1754 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1755 * It is the responsibility of the caller to call fanout_release_data() and
1756 * free the returned packet_fanout (after synchronize_net())
1757 */
1758static struct packet_fanout *fanout_release(struct sock *sk)
1759{
1760	struct packet_sock *po = pkt_sk(sk);
1761	struct packet_fanout *f;
1762
1763	mutex_lock(&fanout_mutex);
1764	f = po->fanout;
1765	if (f) {
1766		po->fanout = NULL;
1767
1768		if (refcount_dec_and_test(&f->sk_ref))
1769			list_del(&f->list);
1770		else
1771			f = NULL;
 
 
 
1772	}
1773	mutex_unlock(&fanout_mutex);
1774
1775	return f;
1776}
1777
1778static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1779					  struct sk_buff *skb)
1780{
1781	/* Earlier code assumed this would be a VLAN pkt, double-check
1782	 * this now that we have the actual packet in hand. We can only
1783	 * do this check on Ethernet devices.
1784	 */
1785	if (unlikely(dev->type != ARPHRD_ETHER))
1786		return false;
1787
1788	skb_reset_mac_header(skb);
1789	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1790}
1791
1792static const struct proto_ops packet_ops;
1793
1794static const struct proto_ops packet_ops_spkt;
1795
1796static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1797			   struct packet_type *pt, struct net_device *orig_dev)
1798{
1799	struct sock *sk;
1800	struct sockaddr_pkt *spkt;
1801
1802	/*
1803	 *	When we registered the protocol we saved the socket in the data
1804	 *	field for just this event.
1805	 */
1806
1807	sk = pt->af_packet_priv;
1808
1809	/*
1810	 *	Yank back the headers [hope the device set this
1811	 *	right or kerboom...]
1812	 *
1813	 *	Incoming packets have ll header pulled,
1814	 *	push it back.
1815	 *
1816	 *	For outgoing ones skb->data == skb_mac_header(skb)
1817	 *	so that this procedure is noop.
1818	 */
1819
1820	if (skb->pkt_type == PACKET_LOOPBACK)
1821		goto out;
1822
1823	if (!net_eq(dev_net(dev), sock_net(sk)))
1824		goto out;
1825
1826	skb = skb_share_check(skb, GFP_ATOMIC);
1827	if (skb == NULL)
1828		goto oom;
1829
1830	/* drop any routing info */
1831	skb_dst_drop(skb);
1832
1833	/* drop conntrack reference */
1834	nf_reset_ct(skb);
1835
1836	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1837
1838	skb_push(skb, skb->data - skb_mac_header(skb));
1839
1840	/*
1841	 *	The SOCK_PACKET socket receives _all_ frames.
1842	 */
1843
1844	spkt->spkt_family = dev->type;
1845	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1846	spkt->spkt_protocol = skb->protocol;
1847
1848	/*
1849	 *	Charge the memory to the socket. This is done specifically
1850	 *	to prevent sockets using all the memory up.
1851	 */
1852
1853	if (sock_queue_rcv_skb(sk, skb) == 0)
1854		return 0;
1855
1856out:
1857	kfree_skb(skb);
1858oom:
1859	return 0;
1860}
1861
1862static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1863{
1864	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1865	    sock->type == SOCK_RAW) {
1866		skb_reset_mac_header(skb);
1867		skb->protocol = dev_parse_header_protocol(skb);
1868	}
1869
1870	skb_probe_transport_header(skb);
1871}
1872
1873/*
1874 *	Output a raw packet to a device layer. This bypasses all the other
1875 *	protocol layers and you must therefore supply it with a complete frame
1876 */
1877
1878static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1879			       size_t len)
1880{
1881	struct sock *sk = sock->sk;
1882	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1883	struct sk_buff *skb = NULL;
1884	struct net_device *dev;
1885	struct sockcm_cookie sockc;
1886	__be16 proto = 0;
1887	int err;
1888	int extra_len = 0;
1889
1890	/*
1891	 *	Get and verify the address.
1892	 */
1893
1894	if (saddr) {
1895		if (msg->msg_namelen < sizeof(struct sockaddr))
1896			return -EINVAL;
1897		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1898			proto = saddr->spkt_protocol;
1899	} else
1900		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1901
1902	/*
1903	 *	Find the device first to size check it
1904	 */
1905
1906	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1907retry:
1908	rcu_read_lock();
1909	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1910	err = -ENODEV;
1911	if (dev == NULL)
1912		goto out_unlock;
1913
1914	err = -ENETDOWN;
1915	if (!(dev->flags & IFF_UP))
1916		goto out_unlock;
1917
1918	/*
1919	 * You may not queue a frame bigger than the mtu. This is the lowest level
1920	 * raw protocol and you must do your own fragmentation at this level.
1921	 */
1922
1923	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1924		if (!netif_supports_nofcs(dev)) {
1925			err = -EPROTONOSUPPORT;
1926			goto out_unlock;
1927		}
1928		extra_len = 4; /* We're doing our own CRC */
1929	}
1930
1931	err = -EMSGSIZE;
1932	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1933		goto out_unlock;
1934
1935	if (!skb) {
1936		size_t reserved = LL_RESERVED_SPACE(dev);
1937		int tlen = dev->needed_tailroom;
1938		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1939
1940		rcu_read_unlock();
1941		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1942		if (skb == NULL)
1943			return -ENOBUFS;
1944		/* FIXME: Save some space for broken drivers that write a hard
1945		 * header at transmission time by themselves. PPP is the notable
1946		 * one here. This should really be fixed at the driver level.
1947		 */
1948		skb_reserve(skb, reserved);
1949		skb_reset_network_header(skb);
1950
1951		/* Try to align data part correctly */
1952		if (hhlen) {
1953			skb->data -= hhlen;
1954			skb->tail -= hhlen;
1955			if (len < hhlen)
1956				skb_reset_network_header(skb);
1957		}
1958		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1959		if (err)
1960			goto out_free;
1961		goto retry;
1962	}
1963
1964	if (!dev_validate_header(dev, skb->data, len)) {
1965		err = -EINVAL;
1966		goto out_unlock;
1967	}
1968	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1969	    !packet_extra_vlan_len_allowed(dev, skb)) {
1970		err = -EMSGSIZE;
1971		goto out_unlock;
1972	}
1973
1974	sockcm_init(&sockc, sk);
1975	if (msg->msg_controllen) {
1976		err = sock_cmsg_send(sk, msg, &sockc);
1977		if (unlikely(err))
1978			goto out_unlock;
 
1979	}
1980
1981	skb->protocol = proto;
1982	skb->dev = dev;
1983	skb->priority = sk->sk_priority;
1984	skb->mark = sk->sk_mark;
1985	skb->tstamp = sockc.transmit_time;
1986
1987	skb_setup_tx_timestamp(skb, sockc.tsflags);
1988
1989	if (unlikely(extra_len == 4))
1990		skb->no_fcs = 1;
1991
1992	packet_parse_headers(skb, sock);
1993
1994	dev_queue_xmit(skb);
1995	rcu_read_unlock();
1996	return len;
1997
1998out_unlock:
1999	rcu_read_unlock();
2000out_free:
2001	kfree_skb(skb);
2002	return err;
2003}
2004
2005static unsigned int run_filter(struct sk_buff *skb,
2006			       const struct sock *sk,
2007			       unsigned int res)
2008{
2009	struct sk_filter *filter;
2010
2011	rcu_read_lock();
2012	filter = rcu_dereference(sk->sk_filter);
2013	if (filter != NULL)
2014		res = bpf_prog_run_clear_cb(filter->prog, skb);
2015	rcu_read_unlock();
2016
2017	return res;
2018}
2019
2020static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2021			   size_t *len)
2022{
2023	struct virtio_net_hdr vnet_hdr;
2024
2025	if (*len < sizeof(vnet_hdr))
2026		return -EINVAL;
2027	*len -= sizeof(vnet_hdr);
2028
2029	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2030		return -EINVAL;
2031
2032	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2033}
2034
2035/*
2036 * This function makes lazy skb cloning in hope that most of packets
2037 * are discarded by BPF.
2038 *
2039 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2040 * and skb->cb are mangled. It works because (and until) packets
2041 * falling here are owned by current CPU. Output packets are cloned
2042 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2043 * sequencially, so that if we return skb to original state on exit,
2044 * we will not harm anyone.
2045 */
2046
2047static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2048		      struct packet_type *pt, struct net_device *orig_dev)
2049{
2050	struct sock *sk;
2051	struct sockaddr_ll *sll;
2052	struct packet_sock *po;
2053	u8 *skb_head = skb->data;
2054	int skb_len = skb->len;
2055	unsigned int snaplen, res;
2056	bool is_drop_n_account = false;
2057
2058	if (skb->pkt_type == PACKET_LOOPBACK)
2059		goto drop;
2060
2061	sk = pt->af_packet_priv;
2062	po = pkt_sk(sk);
2063
2064	if (!net_eq(dev_net(dev), sock_net(sk)))
2065		goto drop;
2066
2067	skb->dev = dev;
2068
2069	if (dev->header_ops) {
2070		/* The device has an explicit notion of ll header,
2071		 * exported to higher levels.
2072		 *
2073		 * Otherwise, the device hides details of its frame
2074		 * structure, so that corresponding packet head is
2075		 * never delivered to user.
2076		 */
2077		if (sk->sk_type != SOCK_DGRAM)
2078			skb_push(skb, skb->data - skb_mac_header(skb));
2079		else if (skb->pkt_type == PACKET_OUTGOING) {
2080			/* Special case: outgoing packets have ll header at head */
2081			skb_pull(skb, skb_network_offset(skb));
2082		}
2083	}
2084
2085	snaplen = skb->len;
2086
2087	res = run_filter(skb, sk, snaplen);
2088	if (!res)
2089		goto drop_n_restore;
2090	if (snaplen > res)
2091		snaplen = res;
2092
2093	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2094		goto drop_n_acct;
2095
2096	if (skb_shared(skb)) {
2097		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2098		if (nskb == NULL)
2099			goto drop_n_acct;
2100
2101		if (skb_head != skb->data) {
2102			skb->data = skb_head;
2103			skb->len = skb_len;
2104		}
2105		consume_skb(skb);
2106		skb = nskb;
2107	}
2108
2109	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
 
2110
2111	sll = &PACKET_SKB_CB(skb)->sa.ll;
 
2112	sll->sll_hatype = dev->type;
 
2113	sll->sll_pkttype = skb->pkt_type;
2114	if (unlikely(po->origdev))
2115		sll->sll_ifindex = orig_dev->ifindex;
2116	else
2117		sll->sll_ifindex = dev->ifindex;
2118
2119	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2120
2121	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2122	 * Use their space for storing the original skb length.
2123	 */
2124	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2125
2126	if (pskb_trim(skb, snaplen))
2127		goto drop_n_acct;
2128
2129	skb_set_owner_r(skb, sk);
2130	skb->dev = NULL;
2131	skb_dst_drop(skb);
2132
2133	/* drop conntrack reference */
2134	nf_reset_ct(skb);
2135
2136	spin_lock(&sk->sk_receive_queue.lock);
2137	po->stats.stats1.tp_packets++;
2138	sock_skb_set_dropcount(sk, skb);
2139	__skb_queue_tail(&sk->sk_receive_queue, skb);
2140	spin_unlock(&sk->sk_receive_queue.lock);
2141	sk->sk_data_ready(sk);
2142	return 0;
2143
2144drop_n_acct:
2145	is_drop_n_account = true;
2146	atomic_inc(&po->tp_drops);
2147	atomic_inc(&sk->sk_drops);
 
2148
2149drop_n_restore:
2150	if (skb_head != skb->data && skb_shared(skb)) {
2151		skb->data = skb_head;
2152		skb->len = skb_len;
2153	}
2154drop:
2155	if (!is_drop_n_account)
2156		consume_skb(skb);
2157	else
2158		kfree_skb(skb);
2159	return 0;
2160}
2161
2162static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2163		       struct packet_type *pt, struct net_device *orig_dev)
2164{
2165	struct sock *sk;
2166	struct packet_sock *po;
2167	struct sockaddr_ll *sll;
2168	union tpacket_uhdr h;
 
 
 
 
 
2169	u8 *skb_head = skb->data;
2170	int skb_len = skb->len;
2171	unsigned int snaplen, res;
2172	unsigned long status = TP_STATUS_USER;
2173	unsigned short macoff, hdrlen;
2174	unsigned int netoff;
2175	struct sk_buff *copy_skb = NULL;
2176	struct timespec64 ts;
2177	__u32 ts_status;
2178	bool is_drop_n_account = false;
2179	unsigned int slot_id = 0;
2180	bool do_vnet = false;
2181
2182	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2183	 * We may add members to them until current aligned size without forcing
2184	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2185	 */
2186	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2187	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2188
2189	if (skb->pkt_type == PACKET_LOOPBACK)
2190		goto drop;
2191
2192	sk = pt->af_packet_priv;
2193	po = pkt_sk(sk);
2194
2195	if (!net_eq(dev_net(dev), sock_net(sk)))
2196		goto drop;
2197
2198	if (dev->header_ops) {
2199		if (sk->sk_type != SOCK_DGRAM)
2200			skb_push(skb, skb->data - skb_mac_header(skb));
2201		else if (skb->pkt_type == PACKET_OUTGOING) {
2202			/* Special case: outgoing packets have ll header at head */
2203			skb_pull(skb, skb_network_offset(skb));
2204		}
2205	}
2206
 
 
 
2207	snaplen = skb->len;
2208
2209	res = run_filter(skb, sk, snaplen);
2210	if (!res)
2211		goto drop_n_restore;
2212
2213	/* If we are flooded, just give up */
2214	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2215		atomic_inc(&po->tp_drops);
2216		goto drop_n_restore;
2217	}
2218
2219	if (skb->ip_summed == CHECKSUM_PARTIAL)
2220		status |= TP_STATUS_CSUMNOTREADY;
2221	else if (skb->pkt_type != PACKET_OUTGOING &&
2222		 (skb->ip_summed == CHECKSUM_COMPLETE ||
2223		  skb_csum_unnecessary(skb)))
2224		status |= TP_STATUS_CSUM_VALID;
2225
2226	if (snaplen > res)
2227		snaplen = res;
2228
2229	if (sk->sk_type == SOCK_DGRAM) {
2230		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2231				  po->tp_reserve;
2232	} else {
2233		unsigned int maclen = skb_network_offset(skb);
2234		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2235				       (maclen < 16 ? 16 : maclen)) +
2236				       po->tp_reserve;
2237		if (po->has_vnet_hdr) {
2238			netoff += sizeof(struct virtio_net_hdr);
2239			do_vnet = true;
2240		}
2241		macoff = netoff - maclen;
2242	}
2243	if (netoff > USHRT_MAX) {
2244		atomic_inc(&po->tp_drops);
2245		goto drop_n_restore;
2246	}
2247	if (po->tp_version <= TPACKET_V2) {
2248		if (macoff + snaplen > po->rx_ring.frame_size) {
2249			if (po->copy_thresh &&
2250			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2251				if (skb_shared(skb)) {
2252					copy_skb = skb_clone(skb, GFP_ATOMIC);
2253				} else {
2254					copy_skb = skb_get(skb);
2255					skb_head = skb->data;
2256				}
2257				if (copy_skb)
2258					skb_set_owner_r(copy_skb, sk);
2259			}
2260			snaplen = po->rx_ring.frame_size - macoff;
2261			if ((int)snaplen < 0) {
2262				snaplen = 0;
2263				do_vnet = false;
2264			}
2265		}
2266	} else if (unlikely(macoff + snaplen >
2267			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2268		u32 nval;
2269
2270		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2271		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2272			    snaplen, nval, macoff);
2273		snaplen = nval;
2274		if (unlikely((int)snaplen < 0)) {
2275			snaplen = 0;
2276			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2277			do_vnet = false;
2278		}
2279	}
2280	spin_lock(&sk->sk_receive_queue.lock);
2281	h.raw = packet_current_rx_frame(po, skb,
2282					TP_STATUS_KERNEL, (macoff+snaplen));
2283	if (!h.raw)
2284		goto drop_n_account;
2285
2286	if (po->tp_version <= TPACKET_V2) {
2287		slot_id = po->rx_ring.head;
2288		if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2289			goto drop_n_account;
2290		__set_bit(slot_id, po->rx_ring.rx_owner_map);
2291	}
2292
2293	if (do_vnet &&
2294	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
2295				    sizeof(struct virtio_net_hdr),
2296				    vio_le(), true, 0)) {
2297		if (po->tp_version == TPACKET_V3)
2298			prb_clear_blk_fill_status(&po->rx_ring);
2299		goto drop_n_account;
2300	}
2301
2302	if (po->tp_version <= TPACKET_V2) {
2303		packet_increment_rx_head(po, &po->rx_ring);
2304	/*
2305	 * LOSING will be reported till you read the stats,
2306	 * because it's COR - Clear On Read.
2307	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2308	 * at packet level.
2309	 */
2310		if (atomic_read(&po->tp_drops))
2311			status |= TP_STATUS_LOSING;
2312	}
2313
2314	po->stats.stats1.tp_packets++;
2315	if (copy_skb) {
2316		status |= TP_STATUS_COPY;
2317		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2318	}
2319	spin_unlock(&sk->sk_receive_queue.lock);
2320
2321	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2322
2323	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2324		ktime_get_real_ts64(&ts);
2325
2326	status |= ts_status;
2327
2328	switch (po->tp_version) {
2329	case TPACKET_V1:
2330		h.h1->tp_len = skb->len;
2331		h.h1->tp_snaplen = snaplen;
2332		h.h1->tp_mac = macoff;
2333		h.h1->tp_net = netoff;
2334		h.h1->tp_sec = ts.tv_sec;
2335		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
 
 
 
 
 
 
 
 
 
 
2336		hdrlen = sizeof(*h.h1);
2337		break;
2338	case TPACKET_V2:
2339		h.h2->tp_len = skb->len;
2340		h.h2->tp_snaplen = snaplen;
2341		h.h2->tp_mac = macoff;
2342		h.h2->tp_net = netoff;
 
 
 
 
 
 
 
 
 
 
2343		h.h2->tp_sec = ts.tv_sec;
2344		h.h2->tp_nsec = ts.tv_nsec;
2345		if (skb_vlan_tag_present(skb)) {
2346			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2347			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2348			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2349		} else {
2350			h.h2->tp_vlan_tci = 0;
2351			h.h2->tp_vlan_tpid = 0;
2352		}
2353		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2354		hdrlen = sizeof(*h.h2);
2355		break;
2356	case TPACKET_V3:
2357		/* tp_nxt_offset,vlan are already populated above.
2358		 * So DONT clear those fields here
2359		 */
2360		h.h3->tp_status |= status;
2361		h.h3->tp_len = skb->len;
2362		h.h3->tp_snaplen = snaplen;
2363		h.h3->tp_mac = macoff;
2364		h.h3->tp_net = netoff;
 
 
 
 
 
 
 
 
 
 
2365		h.h3->tp_sec  = ts.tv_sec;
2366		h.h3->tp_nsec = ts.tv_nsec;
2367		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2368		hdrlen = sizeof(*h.h3);
2369		break;
2370	default:
2371		BUG();
2372	}
2373
2374	sll = h.raw + TPACKET_ALIGN(hdrlen);
2375	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2376	sll->sll_family = AF_PACKET;
2377	sll->sll_hatype = dev->type;
2378	sll->sll_protocol = skb->protocol;
2379	sll->sll_pkttype = skb->pkt_type;
2380	if (unlikely(po->origdev))
2381		sll->sll_ifindex = orig_dev->ifindex;
2382	else
2383		sll->sll_ifindex = dev->ifindex;
2384
2385	smp_mb();
2386
2387#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2388	if (po->tp_version <= TPACKET_V2) {
2389		u8 *start, *end;
2390
2391		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2392					macoff + snaplen);
2393
2394		for (start = h.raw; start < end; start += PAGE_SIZE)
2395			flush_dcache_page(pgv_to_page(start));
 
 
2396	}
2397	smp_wmb();
2398#endif
2399
2400	if (po->tp_version <= TPACKET_V2) {
2401		spin_lock(&sk->sk_receive_queue.lock);
2402		__packet_set_status(po, h.raw, status);
2403		__clear_bit(slot_id, po->rx_ring.rx_owner_map);
2404		spin_unlock(&sk->sk_receive_queue.lock);
2405		sk->sk_data_ready(sk);
2406	} else if (po->tp_version == TPACKET_V3) {
2407		prb_clear_blk_fill_status(&po->rx_ring);
2408	}
 
2409
2410drop_n_restore:
2411	if (skb_head != skb->data && skb_shared(skb)) {
2412		skb->data = skb_head;
2413		skb->len = skb_len;
2414	}
2415drop:
2416	if (!is_drop_n_account)
2417		consume_skb(skb);
2418	else
2419		kfree_skb(skb);
2420	return 0;
2421
2422drop_n_account:
 
2423	spin_unlock(&sk->sk_receive_queue.lock);
2424	atomic_inc(&po->tp_drops);
2425	is_drop_n_account = true;
2426
2427	sk->sk_data_ready(sk);
2428	kfree_skb(copy_skb);
2429	goto drop_n_restore;
2430}
2431
2432static void tpacket_destruct_skb(struct sk_buff *skb)
2433{
2434	struct packet_sock *po = pkt_sk(skb->sk);
 
2435
2436	if (likely(po->tx_ring.pg_vec)) {
2437		void *ph;
2438		__u32 ts;
2439
2440		ph = skb_zcopy_get_nouarg(skb);
2441		packet_dec_pending(&po->tx_ring);
2442
2443		ts = __packet_set_timestamp(po, ph, skb);
2444		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2445
2446		if (!packet_read_pending(&po->tx_ring))
2447			complete(&po->skb_completion);
2448	}
2449
2450	sock_wfree(skb);
2451}
2452
2453static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2454{
2455	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2456	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2457	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2458	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2459		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2460			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2461			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2462
2463	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2464		return -EINVAL;
2465
2466	return 0;
2467}
2468
2469static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2470				 struct virtio_net_hdr *vnet_hdr)
2471{
2472	if (*len < sizeof(*vnet_hdr))
2473		return -EINVAL;
2474	*len -= sizeof(*vnet_hdr);
2475
2476	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2477		return -EFAULT;
2478
2479	return __packet_snd_vnet_parse(vnet_hdr, *len);
2480}
2481
2482static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2483		void *frame, struct net_device *dev, void *data, int tp_len,
2484		__be16 proto, unsigned char *addr, int hlen, int copylen,
2485		const struct sockcm_cookie *sockc)
2486{
2487	union tpacket_uhdr ph;
2488	int to_write, offset, len, nr_frags, len_max;
 
 
 
 
2489	struct socket *sock = po->sk.sk_socket;
2490	struct page *page;
 
2491	int err;
2492
2493	ph.raw = frame;
2494
2495	skb->protocol = proto;
2496	skb->dev = dev;
2497	skb->priority = po->sk.sk_priority;
2498	skb->mark = po->sk.sk_mark;
2499	skb->tstamp = sockc->transmit_time;
2500	skb_setup_tx_timestamp(skb, sockc->tsflags);
2501	skb_zcopy_set_nouarg(skb, ph.raw);
 
 
 
 
 
 
 
 
 
 
 
2502
2503	skb_reserve(skb, hlen);
2504	skb_reset_network_header(skb);
2505
 
2506	to_write = tp_len;
2507
2508	if (sock->type == SOCK_DGRAM) {
2509		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2510				NULL, tp_len);
2511		if (unlikely(err < 0))
2512			return -EINVAL;
2513	} else if (copylen) {
2514		int hdrlen = min_t(int, copylen, tp_len);
 
 
 
 
 
2515
2516		skb_push(skb, dev->hard_header_len);
2517		skb_put(skb, copylen - dev->hard_header_len);
2518		err = skb_store_bits(skb, 0, data, hdrlen);
2519		if (unlikely(err))
2520			return err;
2521		if (!dev_validate_header(dev, skb->data, hdrlen))
2522			return -EINVAL;
2523
2524		data += hdrlen;
2525		to_write -= hdrlen;
2526	}
2527
 
2528	offset = offset_in_page(data);
2529	len_max = PAGE_SIZE - offset;
2530	len = ((to_write > len_max) ? len_max : to_write);
2531
2532	skb->data_len = to_write;
2533	skb->len += to_write;
2534	skb->truesize += to_write;
2535	refcount_add(to_write, &po->sk.sk_wmem_alloc);
2536
2537	while (likely(to_write)) {
2538		nr_frags = skb_shinfo(skb)->nr_frags;
2539
2540		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2541			pr_err("Packet exceed the number of skb frags(%lu)\n",
2542			       MAX_SKB_FRAGS);
2543			return -EFAULT;
2544		}
2545
2546		page = pgv_to_page(data);
2547		data += len;
2548		flush_dcache_page(page);
2549		get_page(page);
2550		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2551		to_write -= len;
2552		offset = 0;
2553		len_max = PAGE_SIZE;
2554		len = ((to_write > len_max) ? len_max : to_write);
2555	}
2556
2557	packet_parse_headers(skb, sock);
2558
2559	return tp_len;
2560}
2561
2562static int tpacket_parse_header(struct packet_sock *po, void *frame,
2563				int size_max, void **data)
2564{
2565	union tpacket_uhdr ph;
2566	int tp_len, off;
2567
2568	ph.raw = frame;
2569
2570	switch (po->tp_version) {
2571	case TPACKET_V3:
2572		if (ph.h3->tp_next_offset != 0) {
2573			pr_warn_once("variable sized slot not supported");
2574			return -EINVAL;
2575		}
2576		tp_len = ph.h3->tp_len;
2577		break;
2578	case TPACKET_V2:
2579		tp_len = ph.h2->tp_len;
2580		break;
2581	default:
2582		tp_len = ph.h1->tp_len;
2583		break;
2584	}
2585	if (unlikely(tp_len > size_max)) {
2586		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2587		return -EMSGSIZE;
2588	}
2589
2590	if (unlikely(po->tp_tx_has_off)) {
2591		int off_min, off_max;
2592
2593		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2594		off_max = po->tx_ring.frame_size - tp_len;
2595		if (po->sk.sk_type == SOCK_DGRAM) {
2596			switch (po->tp_version) {
2597			case TPACKET_V3:
2598				off = ph.h3->tp_net;
2599				break;
2600			case TPACKET_V2:
2601				off = ph.h2->tp_net;
2602				break;
2603			default:
2604				off = ph.h1->tp_net;
2605				break;
2606			}
2607		} else {
2608			switch (po->tp_version) {
2609			case TPACKET_V3:
2610				off = ph.h3->tp_mac;
2611				break;
2612			case TPACKET_V2:
2613				off = ph.h2->tp_mac;
2614				break;
2615			default:
2616				off = ph.h1->tp_mac;
2617				break;
2618			}
2619		}
2620		if (unlikely((off < off_min) || (off_max < off)))
2621			return -EINVAL;
2622	} else {
2623		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2624	}
2625
2626	*data = frame + off;
2627	return tp_len;
2628}
2629
2630static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2631{
2632	struct sk_buff *skb = NULL;
2633	struct net_device *dev;
2634	struct virtio_net_hdr *vnet_hdr = NULL;
2635	struct sockcm_cookie sockc;
2636	__be16 proto;
 
2637	int err, reserve = 0;
2638	void *ph;
2639	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2640	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2641	unsigned char *addr = NULL;
2642	int tp_len, size_max;
2643	void *data;
2644	int len_sum = 0;
2645	int status = TP_STATUS_AVAILABLE;
2646	int hlen, tlen, copylen = 0;
2647	long timeo = 0;
2648
2649	mutex_lock(&po->pg_vec_lock);
2650
2651	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2652	 * we need to confirm it under protection of pg_vec_lock.
2653	 */
2654	if (unlikely(!po->tx_ring.pg_vec)) {
2655		err = -EBUSY;
2656		goto out;
2657	}
2658	if (likely(saddr == NULL)) {
2659		dev	= packet_cached_dev_get(po);
2660		proto	= po->num;
 
2661	} else {
2662		err = -EINVAL;
2663		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2664			goto out;
2665		if (msg->msg_namelen < (saddr->sll_halen
2666					+ offsetof(struct sockaddr_ll,
2667						sll_addr)))
2668			goto out;
2669		proto	= saddr->sll_protocol;
 
2670		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2671		if (po->sk.sk_socket->type == SOCK_DGRAM) {
2672			if (dev && msg->msg_namelen < dev->addr_len +
2673				   offsetof(struct sockaddr_ll, sll_addr))
2674				goto out_put;
2675			addr = saddr->sll_addr;
2676		}
2677	}
2678
2679	err = -ENXIO;
2680	if (unlikely(dev == NULL))
2681		goto out;
 
 
 
2682	err = -ENETDOWN;
2683	if (unlikely(!(dev->flags & IFF_UP)))
2684		goto out_put;
2685
2686	sockcm_init(&sockc, &po->sk);
2687	if (msg->msg_controllen) {
2688		err = sock_cmsg_send(&po->sk, msg, &sockc);
2689		if (unlikely(err))
2690			goto out_put;
2691	}
2692
2693	if (po->sk.sk_socket->type == SOCK_RAW)
2694		reserve = dev->hard_header_len;
2695	size_max = po->tx_ring.frame_size
2696		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2697
2698	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2699		size_max = dev->mtu + reserve + VLAN_HLEN;
2700
2701	reinit_completion(&po->skb_completion);
2702
2703	do {
2704		ph = packet_current_frame(po, &po->tx_ring,
2705					  TP_STATUS_SEND_REQUEST);
 
2706		if (unlikely(ph == NULL)) {
2707			if (need_wait && skb) {
2708				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2709				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2710				if (timeo <= 0) {
2711					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2712					goto out_put;
2713				}
2714			}
2715			/* check for additional frames */
2716			continue;
2717		}
2718
2719		skb = NULL;
2720		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2721		if (tp_len < 0)
2722			goto tpacket_error;
2723
2724		status = TP_STATUS_SEND_REQUEST;
2725		hlen = LL_RESERVED_SPACE(dev);
2726		tlen = dev->needed_tailroom;
2727		if (po->has_vnet_hdr) {
2728			vnet_hdr = data;
2729			data += sizeof(*vnet_hdr);
2730			tp_len -= sizeof(*vnet_hdr);
2731			if (tp_len < 0 ||
2732			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2733				tp_len = -EINVAL;
2734				goto tpacket_error;
2735			}
2736			copylen = __virtio16_to_cpu(vio_le(),
2737						    vnet_hdr->hdr_len);
2738		}
2739		copylen = max_t(int, copylen, dev->hard_header_len);
2740		skb = sock_alloc_send_skb(&po->sk,
2741				hlen + tlen + sizeof(struct sockaddr_ll) +
2742				(copylen - dev->hard_header_len),
2743				!need_wait, &err);
2744
2745		if (unlikely(skb == NULL)) {
2746			/* we assume the socket was initially writeable ... */
2747			if (likely(len_sum > 0))
2748				err = len_sum;
2749			goto out_status;
2750		}
2751		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2752					  addr, hlen, copylen, &sockc);
2753		if (likely(tp_len >= 0) &&
2754		    tp_len > dev->mtu + reserve &&
2755		    !po->has_vnet_hdr &&
2756		    !packet_extra_vlan_len_allowed(dev, skb))
2757			tp_len = -EMSGSIZE;
2758
2759		if (unlikely(tp_len < 0)) {
2760tpacket_error:
2761			if (po->tp_loss) {
2762				__packet_set_status(po, ph,
2763						TP_STATUS_AVAILABLE);
2764				packet_increment_head(&po->tx_ring);
2765				kfree_skb(skb);
2766				continue;
2767			} else {
2768				status = TP_STATUS_WRONG_FORMAT;
2769				err = tp_len;
2770				goto out_status;
2771			}
2772		}
2773
2774		if (po->has_vnet_hdr) {
2775			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2776				tp_len = -EINVAL;
2777				goto tpacket_error;
2778			}
2779			virtio_net_hdr_set_proto(skb, vnet_hdr);
2780		}
2781
2782		skb->destructor = tpacket_destruct_skb;
2783		__packet_set_status(po, ph, TP_STATUS_SENDING);
2784		packet_inc_pending(&po->tx_ring);
2785
2786		status = TP_STATUS_SEND_REQUEST;
2787		err = po->xmit(skb);
2788		if (unlikely(err > 0)) {
2789			err = net_xmit_errno(err);
2790			if (err && __packet_get_status(po, ph) ==
2791				   TP_STATUS_AVAILABLE) {
2792				/* skb was destructed already */
2793				skb = NULL;
2794				goto out_status;
2795			}
2796			/*
2797			 * skb was dropped but not destructed yet;
2798			 * let's treat it like congestion or err < 0
2799			 */
2800			err = 0;
2801		}
2802		packet_increment_head(&po->tx_ring);
2803		len_sum += tp_len;
2804	} while (likely((ph != NULL) ||
2805		/* Note: packet_read_pending() might be slow if we have
2806		 * to call it as it's per_cpu variable, but in fast-path
2807		 * we already short-circuit the loop with the first
2808		 * condition, and luckily don't have to go that path
2809		 * anyway.
2810		 */
2811		 (need_wait && packet_read_pending(&po->tx_ring))));
2812
2813	err = len_sum;
2814	goto out_put;
2815
2816out_status:
2817	__packet_set_status(po, ph, status);
2818	kfree_skb(skb);
2819out_put:
2820	dev_put(dev);
 
2821out:
2822	mutex_unlock(&po->pg_vec_lock);
2823	return err;
2824}
2825
2826static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2827				        size_t reserve, size_t len,
2828				        size_t linear, int noblock,
2829				        int *err)
2830{
2831	struct sk_buff *skb;
2832
2833	/* Under a page?  Don't bother with paged skb. */
2834	if (prepad + len < PAGE_SIZE || !linear)
2835		linear = len;
2836
2837	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2838				   err, 0);
2839	if (!skb)
2840		return NULL;
2841
2842	skb_reserve(skb, reserve);
2843	skb_put(skb, linear);
2844	skb->data_len = len - linear;
2845	skb->len += len - linear;
2846
2847	return skb;
2848}
2849
2850static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
 
2851{
2852	struct sock *sk = sock->sk;
2853	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2854	struct sk_buff *skb;
2855	struct net_device *dev;
2856	__be16 proto;
2857	unsigned char *addr = NULL;
 
2858	int err, reserve = 0;
2859	struct sockcm_cookie sockc;
2860	struct virtio_net_hdr vnet_hdr = { 0 };
2861	int offset = 0;
 
2862	struct packet_sock *po = pkt_sk(sk);
2863	bool has_vnet_hdr = false;
2864	int hlen, tlen, linear;
2865	int extra_len = 0;
2866
2867	/*
2868	 *	Get and verify the address.
2869	 */
2870
2871	if (likely(saddr == NULL)) {
2872		dev	= packet_cached_dev_get(po);
2873		proto	= po->num;
 
2874	} else {
2875		err = -EINVAL;
2876		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2877			goto out;
2878		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2879			goto out;
2880		proto	= saddr->sll_protocol;
 
2881		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2882		if (sock->type == SOCK_DGRAM) {
2883			if (dev && msg->msg_namelen < dev->addr_len +
2884				   offsetof(struct sockaddr_ll, sll_addr))
2885				goto out_unlock;
2886			addr = saddr->sll_addr;
2887		}
2888	}
2889
2890	err = -ENXIO;
2891	if (unlikely(dev == NULL))
2892		goto out_unlock;
 
 
 
2893	err = -ENETDOWN;
2894	if (unlikely(!(dev->flags & IFF_UP)))
2895		goto out_unlock;
2896
2897	sockcm_init(&sockc, sk);
2898	sockc.mark = sk->sk_mark;
2899	if (msg->msg_controllen) {
2900		err = sock_cmsg_send(sk, msg, &sockc);
2901		if (unlikely(err))
2902			goto out_unlock;
2903	}
2904
2905	if (sock->type == SOCK_RAW)
2906		reserve = dev->hard_header_len;
2907	if (po->has_vnet_hdr) {
2908		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2909		if (err)
2910			goto out_unlock;
2911		has_vnet_hdr = true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2912	}
2913
2914	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2915		if (!netif_supports_nofcs(dev)) {
2916			err = -EPROTONOSUPPORT;
2917			goto out_unlock;
2918		}
2919		extra_len = 4; /* We're doing our own CRC */
2920	}
2921
2922	err = -EMSGSIZE;
2923	if (!vnet_hdr.gso_type &&
2924	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2925		goto out_unlock;
2926
2927	err = -ENOBUFS;
2928	hlen = LL_RESERVED_SPACE(dev);
2929	tlen = dev->needed_tailroom;
2930	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2931	linear = max(linear, min_t(int, len, dev->hard_header_len));
2932	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2933			       msg->msg_flags & MSG_DONTWAIT, &err);
2934	if (skb == NULL)
2935		goto out_unlock;
2936
2937	skb_reset_network_header(skb);
2938
2939	err = -EINVAL;
2940	if (sock->type == SOCK_DGRAM) {
2941		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2942		if (unlikely(offset < 0))
2943			goto out_free;
2944	} else if (reserve) {
2945		skb_reserve(skb, -reserve);
2946		if (len < reserve + sizeof(struct ipv6hdr) &&
2947		    dev->min_header_len != dev->hard_header_len)
2948			skb_reset_network_header(skb);
2949	}
2950
2951	/* Returns -EFAULT on error */
2952	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2953	if (err)
2954		goto out_free;
2955
2956	if (sock->type == SOCK_RAW &&
2957	    !dev_validate_header(dev, skb->data, len)) {
2958		err = -EINVAL;
2959		goto out_free;
2960	}
2961
2962	skb_setup_tx_timestamp(skb, sockc.tsflags);
2963
2964	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2965	    !packet_extra_vlan_len_allowed(dev, skb)) {
2966		err = -EMSGSIZE;
2967		goto out_free;
 
 
 
 
 
 
 
 
2968	}
2969
2970	skb->protocol = proto;
2971	skb->dev = dev;
2972	skb->priority = sk->sk_priority;
2973	skb->mark = sockc.mark;
2974	skb->tstamp = sockc.transmit_time;
2975
2976	if (has_vnet_hdr) {
2977		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2978		if (err)
2979			goto out_free;
2980		len += sizeof(vnet_hdr);
2981		virtio_net_hdr_set_proto(skb, &vnet_hdr);
2982	}
 
 
 
 
 
 
 
 
2983
2984	packet_parse_headers(skb, sock);
 
2985
2986	if (unlikely(extra_len == 4))
2987		skb->no_fcs = 1;
2988
2989	err = po->xmit(skb);
 
 
 
 
2990	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2991		goto out_unlock;
2992
2993	dev_put(dev);
 
2994
2995	return len;
2996
2997out_free:
2998	kfree_skb(skb);
2999out_unlock:
3000	if (dev)
3001		dev_put(dev);
3002out:
3003	return err;
3004}
3005
3006static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 
3007{
3008	struct sock *sk = sock->sk;
3009	struct packet_sock *po = pkt_sk(sk);
3010
3011	if (po->tx_ring.pg_vec)
3012		return tpacket_snd(po, msg);
3013	else
3014		return packet_snd(sock, msg, len);
3015}
3016
3017/*
3018 *	Close a PACKET socket. This is fairly simple. We immediately go
3019 *	to 'closed' state and remove our protocol entry in the device list.
3020 */
3021
3022static int packet_release(struct socket *sock)
3023{
3024	struct sock *sk = sock->sk;
3025	struct packet_sock *po;
3026	struct packet_fanout *f;
3027	struct net *net;
3028	union tpacket_req_u req_u;
3029
3030	if (!sk)
3031		return 0;
3032
3033	net = sock_net(sk);
3034	po = pkt_sk(sk);
3035
3036	mutex_lock(&net->packet.sklist_lock);
3037	sk_del_node_init_rcu(sk);
3038	mutex_unlock(&net->packet.sklist_lock);
3039
3040	preempt_disable();
3041	sock_prot_inuse_add(net, sk->sk_prot, -1);
3042	preempt_enable();
3043
3044	spin_lock(&po->bind_lock);
3045	unregister_prot_hook(sk, false);
3046	packet_cached_dev_reset(po);
3047
3048	if (po->prot_hook.dev) {
3049		dev_put(po->prot_hook.dev);
3050		po->prot_hook.dev = NULL;
3051	}
3052	spin_unlock(&po->bind_lock);
3053
3054	packet_flush_mclist(sk);
3055
3056	lock_sock(sk);
3057	if (po->rx_ring.pg_vec) {
3058		memset(&req_u, 0, sizeof(req_u));
3059		packet_set_ring(sk, &req_u, 1, 0);
3060	}
3061
3062	if (po->tx_ring.pg_vec) {
3063		memset(&req_u, 0, sizeof(req_u));
3064		packet_set_ring(sk, &req_u, 1, 1);
3065	}
3066	release_sock(sk);
3067
3068	f = fanout_release(sk);
3069
3070	synchronize_net();
3071
3072	kfree(po->rollover);
3073	if (f) {
3074		fanout_release_data(f);
3075		kfree(f);
3076	}
3077	/*
3078	 *	Now the socket is dead. No more input will appear.
3079	 */
3080	sock_orphan(sk);
3081	sock->sk = NULL;
3082
3083	/* Purge queues */
3084
3085	skb_queue_purge(&sk->sk_receive_queue);
3086	packet_free_pending(po);
3087	sk_refcnt_debug_release(sk);
3088
3089	sock_put(sk);
3090	return 0;
3091}
3092
3093/*
3094 *	Attach a packet hook.
3095 */
3096
3097static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3098			  __be16 proto)
3099{
3100	struct packet_sock *po = pkt_sk(sk);
3101	struct net_device *dev_curr;
3102	__be16 proto_curr;
3103	bool need_rehook;
3104	struct net_device *dev = NULL;
3105	int ret = 0;
3106	bool unlisted = false;
3107
3108	lock_sock(sk);
3109	spin_lock(&po->bind_lock);
3110	rcu_read_lock();
3111
3112	if (po->fanout) {
3113		ret = -EINVAL;
3114		goto out_unlock;
3115	}
3116
3117	if (name) {
3118		dev = dev_get_by_name_rcu(sock_net(sk), name);
3119		if (!dev) {
3120			ret = -ENODEV;
3121			goto out_unlock;
3122		}
3123	} else if (ifindex) {
3124		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3125		if (!dev) {
3126			ret = -ENODEV;
3127			goto out_unlock;
3128		}
3129	}
3130
3131	if (dev)
3132		dev_hold(dev);
3133
3134	proto_curr = po->prot_hook.type;
3135	dev_curr = po->prot_hook.dev;
3136
3137	need_rehook = proto_curr != proto || dev_curr != dev;
3138
3139	if (need_rehook) {
3140		if (po->running) {
3141			rcu_read_unlock();
3142			/* prevents packet_notifier() from calling
3143			 * register_prot_hook()
3144			 */
3145			po->num = 0;
3146			__unregister_prot_hook(sk, true);
3147			rcu_read_lock();
3148			dev_curr = po->prot_hook.dev;
3149			if (dev)
3150				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3151								 dev->ifindex);
3152		}
3153
3154		BUG_ON(po->running);
3155		po->num = proto;
3156		po->prot_hook.type = proto;
 
 
 
 
3157
3158		if (unlikely(unlisted)) {
3159			dev_put(dev);
3160			po->prot_hook.dev = NULL;
3161			po->ifindex = -1;
3162			packet_cached_dev_reset(po);
3163		} else {
3164			po->prot_hook.dev = dev;
3165			po->ifindex = dev ? dev->ifindex : 0;
3166			packet_cached_dev_assign(po, dev);
3167		}
3168	}
3169	if (dev_curr)
3170		dev_put(dev_curr);
3171
3172	if (proto == 0 || !need_rehook)
3173		goto out_unlock;
3174
3175	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3176		register_prot_hook(sk);
3177	} else {
3178		sk->sk_err = ENETDOWN;
3179		if (!sock_flag(sk, SOCK_DEAD))
3180			sk->sk_error_report(sk);
3181	}
3182
3183out_unlock:
3184	rcu_read_unlock();
3185	spin_unlock(&po->bind_lock);
3186	release_sock(sk);
3187	return ret;
3188}
3189
3190/*
3191 *	Bind a packet socket to a device
3192 */
3193
3194static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3195			    int addr_len)
3196{
3197	struct sock *sk = sock->sk;
3198	char name[sizeof(uaddr->sa_data) + 1];
 
 
3199
3200	/*
3201	 *	Check legality
3202	 */
3203
3204	if (addr_len != sizeof(struct sockaddr))
3205		return -EINVAL;
3206	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3207	 * zero-terminated.
3208	 */
3209	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3210	name[sizeof(uaddr->sa_data)] = 0;
3211
3212	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
 
 
 
3213}
3214
3215static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3216{
3217	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3218	struct sock *sk = sock->sk;
 
 
 
3219
3220	/*
3221	 *	Check legality
3222	 */
3223
3224	if (addr_len < sizeof(struct sockaddr_ll))
3225		return -EINVAL;
3226	if (sll->sll_family != AF_PACKET)
3227		return -EINVAL;
3228
3229	return packet_do_bind(sk, NULL, sll->sll_ifindex,
3230			      sll->sll_protocol ? : pkt_sk(sk)->num);
 
 
 
 
 
 
 
 
3231}
3232
3233static struct proto packet_proto = {
3234	.name	  = "PACKET",
3235	.owner	  = THIS_MODULE,
3236	.obj_size = sizeof(struct packet_sock),
3237};
3238
3239/*
3240 *	Create a packet of type SOCK_PACKET.
3241 */
3242
3243static int packet_create(struct net *net, struct socket *sock, int protocol,
3244			 int kern)
3245{
3246	struct sock *sk;
3247	struct packet_sock *po;
3248	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3249	int err;
3250
3251	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3252		return -EPERM;
3253	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3254	    sock->type != SOCK_PACKET)
3255		return -ESOCKTNOSUPPORT;
3256
3257	sock->state = SS_UNCONNECTED;
3258
3259	err = -ENOBUFS;
3260	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3261	if (sk == NULL)
3262		goto out;
3263
3264	sock->ops = &packet_ops;
3265	if (sock->type == SOCK_PACKET)
3266		sock->ops = &packet_ops_spkt;
3267
3268	sock_init_data(sock, sk);
3269
3270	po = pkt_sk(sk);
3271	init_completion(&po->skb_completion);
3272	sk->sk_family = PF_PACKET;
3273	po->num = proto;
3274	po->xmit = dev_queue_xmit;
3275
3276	err = packet_alloc_pending(po);
3277	if (err)
3278		goto out2;
3279
3280	packet_cached_dev_reset(po);
3281
3282	sk->sk_destruct = packet_sock_destruct;
3283	sk_refcnt_debug_inc(sk);
3284
3285	/*
3286	 *	Attach a protocol block
3287	 */
3288
3289	spin_lock_init(&po->bind_lock);
3290	mutex_init(&po->pg_vec_lock);
3291	po->rollover = NULL;
3292	po->prot_hook.func = packet_rcv;
3293
3294	if (sock->type == SOCK_PACKET)
3295		po->prot_hook.func = packet_rcv_spkt;
3296
3297	po->prot_hook.af_packet_priv = sk;
3298
3299	if (proto) {
3300		po->prot_hook.type = proto;
3301		__register_prot_hook(sk);
3302	}
3303
3304	mutex_lock(&net->packet.sklist_lock);
3305	sk_add_node_tail_rcu(sk, &net->packet.sklist);
3306	mutex_unlock(&net->packet.sklist_lock);
3307
3308	preempt_disable();
3309	sock_prot_inuse_add(net, &packet_proto, 1);
3310	preempt_enable();
3311
3312	return 0;
3313out2:
3314	sk_free(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3315out:
3316	return err;
3317}
3318
3319/*
3320 *	Pull a packet from our receive queue and hand it to the user.
3321 *	If necessary we block.
3322 */
3323
3324static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3325			  int flags)
3326{
3327	struct sock *sk = sock->sk;
3328	struct sk_buff *skb;
3329	int copied, err;
 
3330	int vnet_hdr_len = 0;
3331	unsigned int origlen = 0;
3332
3333	err = -EINVAL;
3334	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3335		goto out;
3336
3337#if 0
3338	/* What error should we return now? EUNATTACH? */
3339	if (pkt_sk(sk)->ifindex < 0)
3340		return -ENODEV;
3341#endif
3342
3343	if (flags & MSG_ERRQUEUE) {
3344		err = sock_recv_errqueue(sk, msg, len,
3345					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3346		goto out;
3347	}
3348
3349	/*
3350	 *	Call the generic datagram receiver. This handles all sorts
3351	 *	of horrible races and re-entrancy so we can forget about it
3352	 *	in the protocol layers.
3353	 *
3354	 *	Now it will return ENETDOWN, if device have just gone down,
3355	 *	but then it will block.
3356	 */
3357
3358	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3359
3360	/*
3361	 *	An error occurred so return it. Because skb_recv_datagram()
3362	 *	handles the blocking we don't see and worry about blocking
3363	 *	retries.
3364	 */
3365
3366	if (skb == NULL)
3367		goto out;
3368
3369	packet_rcv_try_clear_pressure(pkt_sk(sk));
3370
3371	if (pkt_sk(sk)->has_vnet_hdr) {
3372		err = packet_rcv_vnet(msg, skb, &len);
3373		if (err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3374			goto out_free;
3375		vnet_hdr_len = sizeof(struct virtio_net_hdr);
3376	}
3377
3378	/* You lose any data beyond the buffer you gave. If it worries
3379	 * a user program they can ask the device for its MTU
3380	 * anyway.
 
 
 
 
 
 
 
 
 
 
 
3381	 */
 
3382	copied = skb->len;
3383	if (copied > len) {
3384		copied = len;
3385		msg->msg_flags |= MSG_TRUNC;
3386	}
3387
3388	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3389	if (err)
3390		goto out_free;
3391
3392	if (sock->type != SOCK_PACKET) {
3393		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3394
3395		/* Original length was stored in sockaddr_ll fields */
3396		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3397		sll->sll_family = AF_PACKET;
3398		sll->sll_protocol = skb->protocol;
3399	}
3400
3401	sock_recv_ts_and_drops(msg, sk, skb);
3402
3403	if (msg->msg_name) {
3404		int copy_len;
3405
3406		/* If the address length field is there to be filled
3407		 * in, we fill it in now.
3408		 */
3409		if (sock->type == SOCK_PACKET) {
3410			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3411			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3412			copy_len = msg->msg_namelen;
3413		} else {
3414			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3415
3416			msg->msg_namelen = sll->sll_halen +
3417				offsetof(struct sockaddr_ll, sll_addr);
3418			copy_len = msg->msg_namelen;
3419			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3420				memset(msg->msg_name +
3421				       offsetof(struct sockaddr_ll, sll_addr),
3422				       0, sizeof(sll->sll_addr));
3423				msg->msg_namelen = sizeof(struct sockaddr_ll);
3424			}
3425		}
3426		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3427	}
3428
3429	if (pkt_sk(sk)->auxdata) {
3430		struct tpacket_auxdata aux;
3431
3432		aux.tp_status = TP_STATUS_USER;
3433		if (skb->ip_summed == CHECKSUM_PARTIAL)
3434			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3435		else if (skb->pkt_type != PACKET_OUTGOING &&
3436			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3437			  skb_csum_unnecessary(skb)))
3438			aux.tp_status |= TP_STATUS_CSUM_VALID;
3439
3440		aux.tp_len = origlen;
3441		aux.tp_snaplen = skb->len;
3442		aux.tp_mac = 0;
3443		aux.tp_net = skb_network_offset(skb);
3444		if (skb_vlan_tag_present(skb)) {
3445			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3446			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3447			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3448		} else {
3449			aux.tp_vlan_tci = 0;
3450			aux.tp_vlan_tpid = 0;
3451		}
 
3452		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3453	}
3454
3455	/*
3456	 *	Free or return the buffer as appropriate. Again this
3457	 *	hides all the races and re-entrancy issues from us.
3458	 */
3459	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3460
3461out_free:
3462	skb_free_datagram(sk, skb);
3463out:
3464	return err;
3465}
3466
3467static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3468			       int peer)
3469{
3470	struct net_device *dev;
3471	struct sock *sk	= sock->sk;
3472
3473	if (peer)
3474		return -EOPNOTSUPP;
3475
3476	uaddr->sa_family = AF_PACKET;
3477	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3478	rcu_read_lock();
3479	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3480	if (dev)
3481		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
 
 
3482	rcu_read_unlock();
 
3483
3484	return sizeof(*uaddr);
3485}
3486
3487static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3488			  int peer)
3489{
3490	struct net_device *dev;
3491	struct sock *sk = sock->sk;
3492	struct packet_sock *po = pkt_sk(sk);
3493	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3494
3495	if (peer)
3496		return -EOPNOTSUPP;
3497
3498	sll->sll_family = AF_PACKET;
3499	sll->sll_ifindex = po->ifindex;
3500	sll->sll_protocol = po->num;
3501	sll->sll_pkttype = 0;
3502	rcu_read_lock();
3503	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3504	if (dev) {
3505		sll->sll_hatype = dev->type;
3506		sll->sll_halen = dev->addr_len;
3507		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3508	} else {
3509		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3510		sll->sll_halen = 0;
3511	}
3512	rcu_read_unlock();
 
3513
3514	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3515}
3516
3517static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3518			 int what)
3519{
3520	switch (i->type) {
3521	case PACKET_MR_MULTICAST:
3522		if (i->alen != dev->addr_len)
3523			return -EINVAL;
3524		if (what > 0)
3525			return dev_mc_add(dev, i->addr);
3526		else
3527			return dev_mc_del(dev, i->addr);
3528		break;
3529	case PACKET_MR_PROMISC:
3530		return dev_set_promiscuity(dev, what);
 
3531	case PACKET_MR_ALLMULTI:
3532		return dev_set_allmulti(dev, what);
 
3533	case PACKET_MR_UNICAST:
3534		if (i->alen != dev->addr_len)
3535			return -EINVAL;
3536		if (what > 0)
3537			return dev_uc_add(dev, i->addr);
3538		else
3539			return dev_uc_del(dev, i->addr);
3540		break;
3541	default:
3542		break;
3543	}
3544	return 0;
3545}
3546
3547static void packet_dev_mclist_delete(struct net_device *dev,
3548				     struct packet_mclist **mlp)
3549{
3550	struct packet_mclist *ml;
3551
3552	while ((ml = *mlp) != NULL) {
3553		if (ml->ifindex == dev->ifindex) {
3554			packet_dev_mc(dev, ml, -1);
3555			*mlp = ml->next;
3556			kfree(ml);
3557		} else
3558			mlp = &ml->next;
3559	}
3560}
3561
3562static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3563{
3564	struct packet_sock *po = pkt_sk(sk);
3565	struct packet_mclist *ml, *i;
3566	struct net_device *dev;
3567	int err;
3568
3569	rtnl_lock();
3570
3571	err = -ENODEV;
3572	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3573	if (!dev)
3574		goto done;
3575
3576	err = -EINVAL;
3577	if (mreq->mr_alen > dev->addr_len)
3578		goto done;
3579
3580	err = -ENOBUFS;
3581	i = kmalloc(sizeof(*i), GFP_KERNEL);
3582	if (i == NULL)
3583		goto done;
3584
3585	err = 0;
3586	for (ml = po->mclist; ml; ml = ml->next) {
3587		if (ml->ifindex == mreq->mr_ifindex &&
3588		    ml->type == mreq->mr_type &&
3589		    ml->alen == mreq->mr_alen &&
3590		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3591			ml->count++;
3592			/* Free the new element ... */
3593			kfree(i);
3594			goto done;
3595		}
3596	}
3597
3598	i->type = mreq->mr_type;
3599	i->ifindex = mreq->mr_ifindex;
3600	i->alen = mreq->mr_alen;
3601	memcpy(i->addr, mreq->mr_address, i->alen);
3602	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3603	i->count = 1;
3604	i->next = po->mclist;
3605	po->mclist = i;
3606	err = packet_dev_mc(dev, i, 1);
3607	if (err) {
3608		po->mclist = i->next;
3609		kfree(i);
3610	}
3611
3612done:
3613	rtnl_unlock();
3614	return err;
3615}
3616
3617static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3618{
3619	struct packet_mclist *ml, **mlp;
3620
3621	rtnl_lock();
3622
3623	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3624		if (ml->ifindex == mreq->mr_ifindex &&
3625		    ml->type == mreq->mr_type &&
3626		    ml->alen == mreq->mr_alen &&
3627		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3628			if (--ml->count == 0) {
3629				struct net_device *dev;
3630				*mlp = ml->next;
3631				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3632				if (dev)
3633					packet_dev_mc(dev, ml, -1);
3634				kfree(ml);
3635			}
3636			break;
 
3637		}
3638	}
3639	rtnl_unlock();
3640	return 0;
3641}
3642
3643static void packet_flush_mclist(struct sock *sk)
3644{
3645	struct packet_sock *po = pkt_sk(sk);
3646	struct packet_mclist *ml;
3647
3648	if (!po->mclist)
3649		return;
3650
3651	rtnl_lock();
3652	while ((ml = po->mclist) != NULL) {
3653		struct net_device *dev;
3654
3655		po->mclist = ml->next;
3656		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3657		if (dev != NULL)
3658			packet_dev_mc(dev, ml, -1);
3659		kfree(ml);
3660	}
3661	rtnl_unlock();
3662}
3663
3664static int
3665packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3666		  unsigned int optlen)
3667{
3668	struct sock *sk = sock->sk;
3669	struct packet_sock *po = pkt_sk(sk);
3670	int ret;
3671
3672	if (level != SOL_PACKET)
3673		return -ENOPROTOOPT;
3674
3675	switch (optname) {
3676	case PACKET_ADD_MEMBERSHIP:
3677	case PACKET_DROP_MEMBERSHIP:
3678	{
3679		struct packet_mreq_max mreq;
3680		int len = optlen;
3681		memset(&mreq, 0, sizeof(mreq));
3682		if (len < sizeof(struct packet_mreq))
3683			return -EINVAL;
3684		if (len > sizeof(mreq))
3685			len = sizeof(mreq);
3686		if (copy_from_sockptr(&mreq, optval, len))
3687			return -EFAULT;
3688		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3689			return -EINVAL;
3690		if (optname == PACKET_ADD_MEMBERSHIP)
3691			ret = packet_mc_add(sk, &mreq);
3692		else
3693			ret = packet_mc_drop(sk, &mreq);
3694		return ret;
3695	}
3696
3697	case PACKET_RX_RING:
3698	case PACKET_TX_RING:
3699	{
3700		union tpacket_req_u req_u;
3701		int len;
3702
3703		lock_sock(sk);
3704		switch (po->tp_version) {
3705		case TPACKET_V1:
3706		case TPACKET_V2:
3707			len = sizeof(req_u.req);
3708			break;
3709		case TPACKET_V3:
3710		default:
3711			len = sizeof(req_u.req3);
3712			break;
3713		}
3714		if (optlen < len) {
3715			ret = -EINVAL;
3716		} else {
3717			if (copy_from_sockptr(&req_u.req, optval, len))
3718				ret = -EFAULT;
3719			else
3720				ret = packet_set_ring(sk, &req_u, 0,
3721						    optname == PACKET_TX_RING);
3722		}
3723		release_sock(sk);
3724		return ret;
3725	}
3726	case PACKET_COPY_THRESH:
3727	{
3728		int val;
3729
3730		if (optlen != sizeof(val))
3731			return -EINVAL;
3732		if (copy_from_sockptr(&val, optval, sizeof(val)))
3733			return -EFAULT;
3734
3735		pkt_sk(sk)->copy_thresh = val;
3736		return 0;
3737	}
3738	case PACKET_VERSION:
3739	{
3740		int val;
3741
3742		if (optlen != sizeof(val))
3743			return -EINVAL;
3744		if (copy_from_sockptr(&val, optval, sizeof(val)))
 
 
3745			return -EFAULT;
3746		switch (val) {
3747		case TPACKET_V1:
3748		case TPACKET_V2:
3749		case TPACKET_V3:
3750			break;
 
3751		default:
3752			return -EINVAL;
3753		}
3754		lock_sock(sk);
3755		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3756			ret = -EBUSY;
3757		} else {
3758			po->tp_version = val;
3759			ret = 0;
3760		}
3761		release_sock(sk);
3762		return ret;
3763	}
3764	case PACKET_RESERVE:
3765	{
3766		unsigned int val;
3767
3768		if (optlen != sizeof(val))
3769			return -EINVAL;
3770		if (copy_from_sockptr(&val, optval, sizeof(val)))
 
 
3771			return -EFAULT;
3772		if (val > INT_MAX)
3773			return -EINVAL;
3774		lock_sock(sk);
3775		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3776			ret = -EBUSY;
3777		} else {
3778			po->tp_reserve = val;
3779			ret = 0;
3780		}
3781		release_sock(sk);
3782		return ret;
3783	}
3784	case PACKET_LOSS:
3785	{
3786		unsigned int val;
3787
3788		if (optlen != sizeof(val))
3789			return -EINVAL;
3790		if (copy_from_sockptr(&val, optval, sizeof(val)))
 
 
3791			return -EFAULT;
3792
3793		lock_sock(sk);
3794		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3795			ret = -EBUSY;
3796		} else {
3797			po->tp_loss = !!val;
3798			ret = 0;
3799		}
3800		release_sock(sk);
3801		return ret;
3802	}
3803	case PACKET_AUXDATA:
3804	{
3805		int val;
3806
3807		if (optlen < sizeof(val))
3808			return -EINVAL;
3809		if (copy_from_sockptr(&val, optval, sizeof(val)))
3810			return -EFAULT;
3811
3812		lock_sock(sk);
3813		po->auxdata = !!val;
3814		release_sock(sk);
3815		return 0;
3816	}
3817	case PACKET_ORIGDEV:
3818	{
3819		int val;
3820
3821		if (optlen < sizeof(val))
3822			return -EINVAL;
3823		if (copy_from_sockptr(&val, optval, sizeof(val)))
3824			return -EFAULT;
3825
3826		lock_sock(sk);
3827		po->origdev = !!val;
3828		release_sock(sk);
3829		return 0;
3830	}
3831	case PACKET_VNET_HDR:
3832	{
3833		int val;
3834
3835		if (sock->type != SOCK_RAW)
3836			return -EINVAL;
 
 
3837		if (optlen < sizeof(val))
3838			return -EINVAL;
3839		if (copy_from_sockptr(&val, optval, sizeof(val)))
3840			return -EFAULT;
3841
3842		lock_sock(sk);
3843		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3844			ret = -EBUSY;
3845		} else {
3846			po->has_vnet_hdr = !!val;
3847			ret = 0;
3848		}
3849		release_sock(sk);
3850		return ret;
3851	}
3852	case PACKET_TIMESTAMP:
3853	{
3854		int val;
3855
3856		if (optlen != sizeof(val))
3857			return -EINVAL;
3858		if (copy_from_sockptr(&val, optval, sizeof(val)))
3859			return -EFAULT;
3860
3861		po->tp_tstamp = val;
3862		return 0;
3863	}
3864	case PACKET_FANOUT:
3865	{
3866		int val;
3867
3868		if (optlen != sizeof(val))
3869			return -EINVAL;
3870		if (copy_from_sockptr(&val, optval, sizeof(val)))
3871			return -EFAULT;
3872
3873		return fanout_add(sk, val & 0xffff, val >> 16);
3874	}
3875	case PACKET_FANOUT_DATA:
3876	{
3877		if (!po->fanout)
3878			return -EINVAL;
3879
3880		return fanout_set_data(po, optval, optlen);
3881	}
3882	case PACKET_IGNORE_OUTGOING:
3883	{
3884		int val;
3885
3886		if (optlen != sizeof(val))
3887			return -EINVAL;
3888		if (copy_from_sockptr(&val, optval, sizeof(val)))
3889			return -EFAULT;
3890		if (val < 0 || val > 1)
3891			return -EINVAL;
3892
3893		po->prot_hook.ignore_outgoing = !!val;
3894		return 0;
3895	}
3896	case PACKET_TX_HAS_OFF:
3897	{
3898		unsigned int val;
3899
3900		if (optlen != sizeof(val))
3901			return -EINVAL;
3902		if (copy_from_sockptr(&val, optval, sizeof(val)))
3903			return -EFAULT;
3904
3905		lock_sock(sk);
3906		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3907			ret = -EBUSY;
3908		} else {
3909			po->tp_tx_has_off = !!val;
3910			ret = 0;
3911		}
3912		release_sock(sk);
3913		return 0;
3914	}
3915	case PACKET_QDISC_BYPASS:
3916	{
3917		int val;
3918
3919		if (optlen != sizeof(val))
3920			return -EINVAL;
3921		if (copy_from_sockptr(&val, optval, sizeof(val)))
3922			return -EFAULT;
3923
3924		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3925		return 0;
3926	}
3927	default:
3928		return -ENOPROTOOPT;
3929	}
3930}
3931
3932static int packet_getsockopt(struct socket *sock, int level, int optname,
3933			     char __user *optval, int __user *optlen)
3934{
3935	int len;
3936	int val, lv = sizeof(val);
3937	struct sock *sk = sock->sk;
3938	struct packet_sock *po = pkt_sk(sk);
3939	void *data = &val;
3940	union tpacket_stats_u st;
3941	struct tpacket_rollover_stats rstats;
3942	int drops;
3943
3944	if (level != SOL_PACKET)
3945		return -ENOPROTOOPT;
3946
3947	if (get_user(len, optlen))
3948		return -EFAULT;
3949
3950	if (len < 0)
3951		return -EINVAL;
3952
3953	switch (optname) {
3954	case PACKET_STATISTICS:
3955		spin_lock_bh(&sk->sk_receive_queue.lock);
3956		memcpy(&st, &po->stats, sizeof(st));
3957		memset(&po->stats, 0, sizeof(po->stats));
3958		spin_unlock_bh(&sk->sk_receive_queue.lock);
3959		drops = atomic_xchg(&po->tp_drops, 0);
3960
3961		if (po->tp_version == TPACKET_V3) {
3962			lv = sizeof(struct tpacket_stats_v3);
3963			st.stats3.tp_drops = drops;
3964			st.stats3.tp_packets += drops;
3965			data = &st.stats3;
 
 
 
3966		} else {
3967			lv = sizeof(struct tpacket_stats);
3968			st.stats1.tp_drops = drops;
3969			st.stats1.tp_packets += drops;
3970			data = &st.stats1;
3971		}
3972
 
3973		break;
3974	case PACKET_AUXDATA:
3975		val = po->auxdata;
3976		break;
3977	case PACKET_ORIGDEV:
3978		val = po->origdev;
3979		break;
3980	case PACKET_VNET_HDR:
3981		val = po->has_vnet_hdr;
3982		break;
3983	case PACKET_VERSION:
3984		val = po->tp_version;
3985		break;
3986	case PACKET_HDRLEN:
3987		if (len > sizeof(int))
3988			len = sizeof(int);
3989		if (len < sizeof(int))
3990			return -EINVAL;
3991		if (copy_from_user(&val, optval, len))
3992			return -EFAULT;
3993		switch (val) {
3994		case TPACKET_V1:
3995			val = sizeof(struct tpacket_hdr);
3996			break;
3997		case TPACKET_V2:
3998			val = sizeof(struct tpacket2_hdr);
3999			break;
4000		case TPACKET_V3:
4001			val = sizeof(struct tpacket3_hdr);
4002			break;
4003		default:
4004			return -EINVAL;
4005		}
4006		break;
4007	case PACKET_RESERVE:
4008		val = po->tp_reserve;
4009		break;
4010	case PACKET_LOSS:
4011		val = po->tp_loss;
4012		break;
4013	case PACKET_TIMESTAMP:
4014		val = po->tp_tstamp;
4015		break;
4016	case PACKET_FANOUT:
4017		val = (po->fanout ?
4018		       ((u32)po->fanout->id |
4019			((u32)po->fanout->type << 16) |
4020			((u32)po->fanout->flags << 24)) :
4021		       0);
4022		break;
4023	case PACKET_IGNORE_OUTGOING:
4024		val = po->prot_hook.ignore_outgoing;
4025		break;
4026	case PACKET_ROLLOVER_STATS:
4027		if (!po->rollover)
4028			return -EINVAL;
4029		rstats.tp_all = atomic_long_read(&po->rollover->num);
4030		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4031		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4032		data = &rstats;
4033		lv = sizeof(rstats);
4034		break;
4035	case PACKET_TX_HAS_OFF:
4036		val = po->tp_tx_has_off;
4037		break;
4038	case PACKET_QDISC_BYPASS:
4039		val = packet_use_direct_xmit(po);
4040		break;
4041	default:
4042		return -ENOPROTOOPT;
4043	}
4044
4045	if (len > lv)
4046		len = lv;
4047	if (put_user(len, optlen))
4048		return -EFAULT;
4049	if (copy_to_user(optval, data, len))
4050		return -EFAULT;
4051	return 0;
4052}
4053
4054static int packet_notifier(struct notifier_block *this,
4055			   unsigned long msg, void *ptr)
4056{
4057	struct sock *sk;
4058	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
 
4059	struct net *net = dev_net(dev);
4060
4061	rcu_read_lock();
4062	sk_for_each_rcu(sk, &net->packet.sklist) {
4063		struct packet_sock *po = pkt_sk(sk);
4064
4065		switch (msg) {
4066		case NETDEV_UNREGISTER:
4067			if (po->mclist)
4068				packet_dev_mclist_delete(dev, &po->mclist);
4069			fallthrough;
4070
4071		case NETDEV_DOWN:
4072			if (dev->ifindex == po->ifindex) {
4073				spin_lock(&po->bind_lock);
4074				if (po->running) {
4075					__unregister_prot_hook(sk, false);
4076					sk->sk_err = ENETDOWN;
4077					if (!sock_flag(sk, SOCK_DEAD))
4078						sk->sk_error_report(sk);
4079				}
4080				if (msg == NETDEV_UNREGISTER) {
4081					packet_cached_dev_reset(po);
4082					po->ifindex = -1;
4083					if (po->prot_hook.dev)
4084						dev_put(po->prot_hook.dev);
4085					po->prot_hook.dev = NULL;
4086				}
4087				spin_unlock(&po->bind_lock);
4088			}
4089			break;
4090		case NETDEV_UP:
4091			if (dev->ifindex == po->ifindex) {
4092				spin_lock(&po->bind_lock);
4093				if (po->num)
4094					register_prot_hook(sk);
4095				spin_unlock(&po->bind_lock);
4096			}
4097			break;
4098		}
4099	}
4100	rcu_read_unlock();
4101	return NOTIFY_DONE;
4102}
4103
4104
4105static int packet_ioctl(struct socket *sock, unsigned int cmd,
4106			unsigned long arg)
4107{
4108	struct sock *sk = sock->sk;
4109
4110	switch (cmd) {
4111	case SIOCOUTQ:
4112	{
4113		int amount = sk_wmem_alloc_get(sk);
4114
4115		return put_user(amount, (int __user *)arg);
4116	}
4117	case SIOCINQ:
4118	{
4119		struct sk_buff *skb;
4120		int amount = 0;
4121
4122		spin_lock_bh(&sk->sk_receive_queue.lock);
4123		skb = skb_peek(&sk->sk_receive_queue);
4124		if (skb)
4125			amount = skb->len;
4126		spin_unlock_bh(&sk->sk_receive_queue.lock);
4127		return put_user(amount, (int __user *)arg);
4128	}
 
 
 
 
 
4129#ifdef CONFIG_INET
4130	case SIOCADDRT:
4131	case SIOCDELRT:
4132	case SIOCDARP:
4133	case SIOCGARP:
4134	case SIOCSARP:
4135	case SIOCGIFADDR:
4136	case SIOCSIFADDR:
4137	case SIOCGIFBRDADDR:
4138	case SIOCSIFBRDADDR:
4139	case SIOCGIFNETMASK:
4140	case SIOCSIFNETMASK:
4141	case SIOCGIFDSTADDR:
4142	case SIOCSIFDSTADDR:
4143	case SIOCSIFFLAGS:
4144		return inet_dgram_ops.ioctl(sock, cmd, arg);
4145#endif
4146
4147	default:
4148		return -ENOIOCTLCMD;
4149	}
4150	return 0;
4151}
4152
4153static __poll_t packet_poll(struct file *file, struct socket *sock,
4154				poll_table *wait)
4155{
4156	struct sock *sk = sock->sk;
4157	struct packet_sock *po = pkt_sk(sk);
4158	__poll_t mask = datagram_poll(file, sock, wait);
4159
4160	spin_lock_bh(&sk->sk_receive_queue.lock);
4161	if (po->rx_ring.pg_vec) {
4162		if (!packet_previous_rx_frame(po, &po->rx_ring,
4163			TP_STATUS_KERNEL))
4164			mask |= EPOLLIN | EPOLLRDNORM;
4165	}
4166	packet_rcv_try_clear_pressure(po);
4167	spin_unlock_bh(&sk->sk_receive_queue.lock);
4168	spin_lock_bh(&sk->sk_write_queue.lock);
4169	if (po->tx_ring.pg_vec) {
4170		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4171			mask |= EPOLLOUT | EPOLLWRNORM;
4172	}
4173	spin_unlock_bh(&sk->sk_write_queue.lock);
4174	return mask;
4175}
4176
4177
4178/* Dirty? Well, I still did not learn better way to account
4179 * for user mmaps.
4180 */
4181
4182static void packet_mm_open(struct vm_area_struct *vma)
4183{
4184	struct file *file = vma->vm_file;
4185	struct socket *sock = file->private_data;
4186	struct sock *sk = sock->sk;
4187
4188	if (sk)
4189		atomic_inc(&pkt_sk(sk)->mapped);
4190}
4191
4192static void packet_mm_close(struct vm_area_struct *vma)
4193{
4194	struct file *file = vma->vm_file;
4195	struct socket *sock = file->private_data;
4196	struct sock *sk = sock->sk;
4197
4198	if (sk)
4199		atomic_dec(&pkt_sk(sk)->mapped);
4200}
4201
4202static const struct vm_operations_struct packet_mmap_ops = {
4203	.open	=	packet_mm_open,
4204	.close	=	packet_mm_close,
4205};
4206
4207static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4208			unsigned int len)
4209{
4210	int i;
4211
4212	for (i = 0; i < len; i++) {
4213		if (likely(pg_vec[i].buffer)) {
4214			if (is_vmalloc_addr(pg_vec[i].buffer))
4215				vfree(pg_vec[i].buffer);
4216			else
4217				free_pages((unsigned long)pg_vec[i].buffer,
4218					   order);
4219			pg_vec[i].buffer = NULL;
4220		}
4221	}
4222	kfree(pg_vec);
4223}
4224
4225static char *alloc_one_pg_vec_page(unsigned long order)
4226{
4227	char *buffer;
4228	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4229			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4230
4231	buffer = (char *) __get_free_pages(gfp_flags, order);
 
4232	if (buffer)
4233		return buffer;
4234
4235	/* __get_free_pages failed, fall back to vmalloc */
4236	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
 
 
 
4237	if (buffer)
4238		return buffer;
4239
4240	/* vmalloc failed, lets dig into swap here */
 
 
4241	gfp_flags &= ~__GFP_NORETRY;
4242	buffer = (char *) __get_free_pages(gfp_flags, order);
4243	if (buffer)
4244		return buffer;
4245
4246	/* complete and utter failure */
 
 
4247	return NULL;
4248}
4249
4250static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4251{
4252	unsigned int block_nr = req->tp_block_nr;
4253	struct pgv *pg_vec;
4254	int i;
4255
4256	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4257	if (unlikely(!pg_vec))
4258		goto out;
4259
4260	for (i = 0; i < block_nr; i++) {
4261		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4262		if (unlikely(!pg_vec[i].buffer))
4263			goto out_free_pgvec;
4264	}
4265
4266out:
4267	return pg_vec;
4268
4269out_free_pgvec:
4270	free_pg_vec(pg_vec, order, block_nr);
4271	pg_vec = NULL;
4272	goto out;
4273}
4274
4275static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4276		int closing, int tx_ring)
4277{
4278	struct pgv *pg_vec = NULL;
4279	struct packet_sock *po = pkt_sk(sk);
4280	unsigned long *rx_owner_map = NULL;
4281	int was_running, order = 0;
4282	struct packet_ring_buffer *rb;
4283	struct sk_buff_head *rb_queue;
4284	__be16 num;
4285	int err;
4286	/* Added to avoid minimal code churn */
4287	struct tpacket_req *req = &req_u->req;
4288
 
 
 
 
 
 
4289	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4290	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4291
4292	err = -EBUSY;
4293	if (!closing) {
4294		if (atomic_read(&po->mapped))
4295			goto out;
4296		if (packet_read_pending(rb))
4297			goto out;
4298	}
4299
4300	if (req->tp_block_nr) {
4301		unsigned int min_frame_size;
4302
4303		/* Sanity tests and some calculations */
4304		err = -EBUSY;
4305		if (unlikely(rb->pg_vec))
4306			goto out;
4307
4308		switch (po->tp_version) {
4309		case TPACKET_V1:
4310			po->tp_hdrlen = TPACKET_HDRLEN;
4311			break;
4312		case TPACKET_V2:
4313			po->tp_hdrlen = TPACKET2_HDRLEN;
4314			break;
4315		case TPACKET_V3:
4316			po->tp_hdrlen = TPACKET3_HDRLEN;
4317			break;
4318		}
4319
4320		err = -EINVAL;
4321		if (unlikely((int)req->tp_block_size <= 0))
4322			goto out;
4323		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4324			goto out;
4325		min_frame_size = po->tp_hdrlen + po->tp_reserve;
4326		if (po->tp_version >= TPACKET_V3 &&
4327		    req->tp_block_size <
4328		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4329			goto out;
4330		if (unlikely(req->tp_frame_size < min_frame_size))
4331			goto out;
4332		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4333			goto out;
4334
4335		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4336		if (unlikely(rb->frames_per_block == 0))
4337			goto out;
4338		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4339			goto out;
4340		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4341					req->tp_frame_nr))
4342			goto out;
4343
4344		err = -ENOMEM;
4345		order = get_order(req->tp_block_size);
4346		pg_vec = alloc_pg_vec(req, order);
4347		if (unlikely(!pg_vec))
4348			goto out;
4349		switch (po->tp_version) {
4350		case TPACKET_V3:
4351			/* Block transmit is not supported yet */
4352			if (!tx_ring) {
4353				init_prb_bdqc(po, rb, pg_vec, req_u);
4354			} else {
4355				struct tpacket_req3 *req3 = &req_u->req3;
4356
4357				if (req3->tp_retire_blk_tov ||
4358				    req3->tp_sizeof_priv ||
4359				    req3->tp_feature_req_word) {
4360					err = -EINVAL;
4361					goto out_free_pg_vec;
4362				}
4363			}
4364			break;
4365		default:
4366			if (!tx_ring) {
4367				rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4368					GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4369				if (!rx_owner_map)
4370					goto out_free_pg_vec;
4371			}
4372			break;
4373		}
4374	}
4375	/* Done */
4376	else {
4377		err = -EINVAL;
4378		if (unlikely(req->tp_frame_nr))
4379			goto out;
4380	}
4381
 
4382
4383	/* Detach socket from network */
4384	spin_lock(&po->bind_lock);
4385	was_running = po->running;
4386	num = po->num;
4387	if (was_running) {
4388		po->num = 0;
4389		__unregister_prot_hook(sk, false);
4390	}
4391	spin_unlock(&po->bind_lock);
4392
4393	synchronize_net();
4394
4395	err = -EBUSY;
4396	mutex_lock(&po->pg_vec_lock);
4397	if (closing || atomic_read(&po->mapped) == 0) {
4398		err = 0;
4399		spin_lock_bh(&rb_queue->lock);
4400		swap(rb->pg_vec, pg_vec);
4401		if (po->tp_version <= TPACKET_V2)
4402			swap(rb->rx_owner_map, rx_owner_map);
4403		rb->frame_max = (req->tp_frame_nr - 1);
4404		rb->head = 0;
4405		rb->frame_size = req->tp_frame_size;
4406		spin_unlock_bh(&rb_queue->lock);
4407
4408		swap(rb->pg_vec_order, order);
4409		swap(rb->pg_vec_len, req->tp_block_nr);
4410
4411		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4412		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4413						tpacket_rcv : packet_rcv;
4414		skb_queue_purge(rb_queue);
4415		if (atomic_read(&po->mapped))
4416			pr_err("packet_mmap: vma is busy: %d\n",
4417			       atomic_read(&po->mapped));
4418	}
4419	mutex_unlock(&po->pg_vec_lock);
4420
4421	spin_lock(&po->bind_lock);
4422	if (was_running) {
4423		po->num = num;
4424		register_prot_hook(sk);
4425	}
4426	spin_unlock(&po->bind_lock);
4427	if (pg_vec && (po->tp_version > TPACKET_V2)) {
4428		/* Because we don't support block-based V3 on tx-ring */
4429		if (!tx_ring)
4430			prb_shutdown_retire_blk_timer(po, rb_queue);
4431	}
 
4432
4433out_free_pg_vec:
4434	bitmap_free(rx_owner_map);
4435	if (pg_vec)
4436		free_pg_vec(pg_vec, order, req->tp_block_nr);
4437out:
4438	return err;
4439}
4440
4441static int packet_mmap(struct file *file, struct socket *sock,
4442		struct vm_area_struct *vma)
4443{
4444	struct sock *sk = sock->sk;
4445	struct packet_sock *po = pkt_sk(sk);
4446	unsigned long size, expected_size;
4447	struct packet_ring_buffer *rb;
4448	unsigned long start;
4449	int err = -EINVAL;
4450	int i;
4451
4452	if (vma->vm_pgoff)
4453		return -EINVAL;
4454
4455	mutex_lock(&po->pg_vec_lock);
4456
4457	expected_size = 0;
4458	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4459		if (rb->pg_vec) {
4460			expected_size += rb->pg_vec_len
4461						* rb->pg_vec_pages
4462						* PAGE_SIZE;
4463		}
4464	}
4465
4466	if (expected_size == 0)
4467		goto out;
4468
4469	size = vma->vm_end - vma->vm_start;
4470	if (size != expected_size)
4471		goto out;
4472
4473	start = vma->vm_start;
4474	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4475		if (rb->pg_vec == NULL)
4476			continue;
4477
4478		for (i = 0; i < rb->pg_vec_len; i++) {
4479			struct page *page;
4480			void *kaddr = rb->pg_vec[i].buffer;
4481			int pg_num;
4482
4483			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4484				page = pgv_to_page(kaddr);
4485				err = vm_insert_page(vma, start, page);
4486				if (unlikely(err))
4487					goto out;
4488				start += PAGE_SIZE;
4489				kaddr += PAGE_SIZE;
4490			}
4491		}
4492	}
4493
4494	atomic_inc(&po->mapped);
4495	vma->vm_ops = &packet_mmap_ops;
4496	err = 0;
4497
4498out:
4499	mutex_unlock(&po->pg_vec_lock);
4500	return err;
4501}
4502
4503static const struct proto_ops packet_ops_spkt = {
4504	.family =	PF_PACKET,
4505	.owner =	THIS_MODULE,
4506	.release =	packet_release,
4507	.bind =		packet_bind_spkt,
4508	.connect =	sock_no_connect,
4509	.socketpair =	sock_no_socketpair,
4510	.accept =	sock_no_accept,
4511	.getname =	packet_getname_spkt,
4512	.poll =		datagram_poll,
4513	.ioctl =	packet_ioctl,
4514	.gettstamp =	sock_gettstamp,
4515	.listen =	sock_no_listen,
4516	.shutdown =	sock_no_shutdown,
 
 
4517	.sendmsg =	packet_sendmsg_spkt,
4518	.recvmsg =	packet_recvmsg,
4519	.mmap =		sock_no_mmap,
4520	.sendpage =	sock_no_sendpage,
4521};
4522
4523static const struct proto_ops packet_ops = {
4524	.family =	PF_PACKET,
4525	.owner =	THIS_MODULE,
4526	.release =	packet_release,
4527	.bind =		packet_bind,
4528	.connect =	sock_no_connect,
4529	.socketpair =	sock_no_socketpair,
4530	.accept =	sock_no_accept,
4531	.getname =	packet_getname,
4532	.poll =		packet_poll,
4533	.ioctl =	packet_ioctl,
4534	.gettstamp =	sock_gettstamp,
4535	.listen =	sock_no_listen,
4536	.shutdown =	sock_no_shutdown,
4537	.setsockopt =	packet_setsockopt,
4538	.getsockopt =	packet_getsockopt,
4539	.sendmsg =	packet_sendmsg,
4540	.recvmsg =	packet_recvmsg,
4541	.mmap =		packet_mmap,
4542	.sendpage =	sock_no_sendpage,
4543};
4544
4545static const struct net_proto_family packet_family_ops = {
4546	.family =	PF_PACKET,
4547	.create =	packet_create,
4548	.owner	=	THIS_MODULE,
4549};
4550
4551static struct notifier_block packet_netdev_notifier = {
4552	.notifier_call =	packet_notifier,
4553};
4554
4555#ifdef CONFIG_PROC_FS
4556
4557static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4558	__acquires(RCU)
4559{
4560	struct net *net = seq_file_net(seq);
4561
4562	rcu_read_lock();
4563	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4564}
4565
4566static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4567{
4568	struct net *net = seq_file_net(seq);
4569	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4570}
4571
4572static void packet_seq_stop(struct seq_file *seq, void *v)
4573	__releases(RCU)
4574{
4575	rcu_read_unlock();
4576}
4577
4578static int packet_seq_show(struct seq_file *seq, void *v)
4579{
4580	if (v == SEQ_START_TOKEN)
4581		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4582	else {
4583		struct sock *s = sk_entry(v);
4584		const struct packet_sock *po = pkt_sk(s);
4585
4586		seq_printf(seq,
4587			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4588			   s,
4589			   refcount_read(&s->sk_refcnt),
4590			   s->sk_type,
4591			   ntohs(po->num),
4592			   po->ifindex,
4593			   po->running,
4594			   atomic_read(&s->sk_rmem_alloc),
4595			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4596			   sock_i_ino(s));
4597	}
4598
4599	return 0;
4600}
4601
4602static const struct seq_operations packet_seq_ops = {
4603	.start	= packet_seq_start,
4604	.next	= packet_seq_next,
4605	.stop	= packet_seq_stop,
4606	.show	= packet_seq_show,
4607};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4608#endif
4609
4610static int __net_init packet_net_init(struct net *net)
4611{
4612	mutex_init(&net->packet.sklist_lock);
4613	INIT_HLIST_HEAD(&net->packet.sklist);
4614
4615	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4616			sizeof(struct seq_net_private)))
4617		return -ENOMEM;
4618
4619	return 0;
4620}
4621
4622static void __net_exit packet_net_exit(struct net *net)
4623{
4624	remove_proc_entry("packet", net->proc_net);
4625	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4626}
4627
4628static struct pernet_operations packet_net_ops = {
4629	.init = packet_net_init,
4630	.exit = packet_net_exit,
4631};
4632
4633
4634static void __exit packet_exit(void)
4635{
4636	unregister_netdevice_notifier(&packet_netdev_notifier);
4637	unregister_pernet_subsys(&packet_net_ops);
4638	sock_unregister(PF_PACKET);
4639	proto_unregister(&packet_proto);
4640}
4641
4642static int __init packet_init(void)
4643{
4644	int rc;
4645
4646	rc = proto_register(&packet_proto, 0);
4647	if (rc)
4648		goto out;
4649	rc = sock_register(&packet_family_ops);
4650	if (rc)
4651		goto out_proto;
4652	rc = register_pernet_subsys(&packet_net_ops);
4653	if (rc)
4654		goto out_sock;
4655	rc = register_netdevice_notifier(&packet_netdev_notifier);
4656	if (rc)
4657		goto out_pernet;
4658
4659	return 0;
4660
4661out_pernet:
4662	unregister_pernet_subsys(&packet_net_ops);
4663out_sock:
4664	sock_unregister(PF_PACKET);
4665out_proto:
4666	proto_unregister(&packet_proto);
4667out:
4668	return rc;
4669}
4670
4671module_init(packet_init);
4672module_exit(packet_exit);
4673MODULE_LICENSE("GPL");
4674MODULE_ALIAS_NETPROTO(PF_PACKET);
v3.5.6
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		PACKET - implements raw packet sockets.
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  11 *
  12 * Fixes:
  13 *		Alan Cox	:	verify_area() now used correctly
  14 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
  15 *		Alan Cox	:	tidied skbuff lists.
  16 *		Alan Cox	:	Now uses generic datagram routines I
  17 *					added. Also fixed the peek/read crash
  18 *					from all old Linux datagram code.
  19 *		Alan Cox	:	Uses the improved datagram code.
  20 *		Alan Cox	:	Added NULL's for socket options.
  21 *		Alan Cox	:	Re-commented the code.
  22 *		Alan Cox	:	Use new kernel side addressing
  23 *		Rob Janssen	:	Correct MTU usage.
  24 *		Dave Platt	:	Counter leaks caused by incorrect
  25 *					interrupt locking and some slightly
  26 *					dubious gcc output. Can you read
  27 *					compiler: it said _VOLATILE_
  28 *	Richard Kooijman	:	Timestamp fixes.
  29 *		Alan Cox	:	New buffers. Use sk->mac.raw.
  30 *		Alan Cox	:	sendmsg/recvmsg support.
  31 *		Alan Cox	:	Protocol setting support
  32 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
  33 *	Cyrus Durgin		:	Fixed kerneld for kmod.
  34 *	Michal Ostrowski        :       Module initialization cleanup.
  35 *         Ulises Alonso        :       Frame number limit removal and
  36 *                                      packet_set_ring memory leak.
  37 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
  38 *					The convention is that longer addresses
  39 *					will simply extend the hardware address
  40 *					byte arrays at the end of sockaddr_ll
  41 *					and packet_mreq.
  42 *		Johann Baudy	:	Added TX RING.
  43 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
  44 *					layer.
  45 *					Copyright (C) 2011, <lokec@ccs.neu.edu>
  46 *
  47 *
  48 *		This program is free software; you can redistribute it and/or
  49 *		modify it under the terms of the GNU General Public License
  50 *		as published by the Free Software Foundation; either version
  51 *		2 of the License, or (at your option) any later version.
  52 *
  53 */
  54
  55#include <linux/types.h>
  56#include <linux/mm.h>
  57#include <linux/capability.h>
  58#include <linux/fcntl.h>
  59#include <linux/socket.h>
  60#include <linux/in.h>
  61#include <linux/inet.h>
  62#include <linux/netdevice.h>
  63#include <linux/if_packet.h>
  64#include <linux/wireless.h>
  65#include <linux/kernel.h>
  66#include <linux/kmod.h>
  67#include <linux/slab.h>
  68#include <linux/vmalloc.h>
  69#include <net/net_namespace.h>
  70#include <net/ip.h>
  71#include <net/protocol.h>
  72#include <linux/skbuff.h>
  73#include <net/sock.h>
  74#include <linux/errno.h>
  75#include <linux/timer.h>
  76#include <asm/uaccess.h>
  77#include <asm/ioctls.h>
  78#include <asm/page.h>
  79#include <asm/cacheflush.h>
  80#include <asm/io.h>
  81#include <linux/proc_fs.h>
  82#include <linux/seq_file.h>
  83#include <linux/poll.h>
  84#include <linux/module.h>
  85#include <linux/init.h>
  86#include <linux/mutex.h>
  87#include <linux/if_vlan.h>
  88#include <linux/virtio_net.h>
  89#include <linux/errqueue.h>
  90#include <linux/net_tstamp.h>
  91
  92#ifdef CONFIG_INET
  93#include <net/inet_common.h>
  94#endif
 
 
 
 
  95
  96/*
  97   Assumptions:
  98   - if device has no dev->hard_header routine, it adds and removes ll header
  99     inside itself. In this case ll header is invisible outside of device,
 100     but higher levels still should reserve dev->hard_header_len.
 101     Some devices are enough clever to reallocate skb, when header
 102     will not fit to reserved space (tunnel), another ones are silly
 103     (PPP).
 104   - packet socket receives packets with pulled ll header,
 105     so that SOCK_RAW should push it back.
 106
 107On receive:
 108-----------
 109
 110Incoming, dev->hard_header!=NULL
 111   mac_header -> ll header
 112   data       -> data
 113
 114Outgoing, dev->hard_header!=NULL
 115   mac_header -> ll header
 116   data       -> ll header
 117
 118Incoming, dev->hard_header==NULL
 119   mac_header -> UNKNOWN position. It is very likely, that it points to ll
 120		 header.  PPP makes it, that is wrong, because introduce
 121		 assymetry between rx and tx paths.
 122   data       -> data
 123
 124Outgoing, dev->hard_header==NULL
 125   mac_header -> data. ll header is still not built!
 126   data       -> data
 127
 128Resume
 129  If dev->hard_header==NULL we are unlikely to restore sensible ll header.
 130
 131
 132On transmit:
 133------------
 134
 135dev->hard_header != NULL
 136   mac_header -> ll header
 137   data       -> ll header
 138
 139dev->hard_header == NULL (ll header is added by device, we cannot control it)
 140   mac_header -> data
 141   data       -> data
 142
 143   We should set nh.raw on output to correct posistion,
 144   packet classifier depends on it.
 145 */
 146
 147/* Private packet socket structures. */
 148
 149struct packet_mclist {
 150	struct packet_mclist	*next;
 151	int			ifindex;
 152	int			count;
 153	unsigned short		type;
 154	unsigned short		alen;
 155	unsigned char		addr[MAX_ADDR_LEN];
 156};
 157/* identical to struct packet_mreq except it has
 158 * a longer address field.
 159 */
 160struct packet_mreq_max {
 161	int		mr_ifindex;
 162	unsigned short	mr_type;
 163	unsigned short	mr_alen;
 164	unsigned char	mr_address[MAX_ADDR_LEN];
 165};
 166
 
 
 
 
 
 
 
 167static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 168		int closing, int tx_ring);
 169
 170
 171#define V3_ALIGNMENT	(8)
 172
 173#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 174
 175#define BLK_PLUS_PRIV(sz_of_priv) \
 176	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 177
 178/* kbdq - kernel block descriptor queue */
 179struct tpacket_kbdq_core {
 180	struct pgv	*pkbdq;
 181	unsigned int	feature_req_word;
 182	unsigned int	hdrlen;
 183	unsigned char	reset_pending_on_curr_blk;
 184	unsigned char   delete_blk_timer;
 185	unsigned short	kactive_blk_num;
 186	unsigned short	blk_sizeof_priv;
 187
 188	/* last_kactive_blk_num:
 189	 * trick to see if user-space has caught up
 190	 * in order to avoid refreshing timer when every single pkt arrives.
 191	 */
 192	unsigned short	last_kactive_blk_num;
 193
 194	char		*pkblk_start;
 195	char		*pkblk_end;
 196	int		kblk_size;
 197	unsigned int	knum_blocks;
 198	uint64_t	knxt_seq_num;
 199	char		*prev;
 200	char		*nxt_offset;
 201	struct sk_buff	*skb;
 202
 203	atomic_t	blk_fill_in_prog;
 204
 205	/* Default is set to 8ms */
 206#define DEFAULT_PRB_RETIRE_TOV	(8)
 207
 208	unsigned short  retire_blk_tov;
 209	unsigned short  version;
 210	unsigned long	tov_in_jiffies;
 211
 212	/* timer to retire an outstanding block */
 213	struct timer_list retire_blk_timer;
 214};
 215
 216#define PGV_FROM_VMALLOC 1
 217struct pgv {
 218	char *buffer;
 219};
 220
 221struct packet_ring_buffer {
 222	struct pgv		*pg_vec;
 223	unsigned int		head;
 224	unsigned int		frames_per_block;
 225	unsigned int		frame_size;
 226	unsigned int		frame_max;
 227
 228	unsigned int		pg_vec_order;
 229	unsigned int		pg_vec_pages;
 230	unsigned int		pg_vec_len;
 231
 232	struct tpacket_kbdq_core	prb_bdqc;
 233	atomic_t		pending;
 234};
 235
 236#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 237#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 238#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 239#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 240#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 241#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 242#define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
 243
 244struct packet_sock;
 245static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
 
 246
 247static void *packet_previous_frame(struct packet_sock *po,
 248		struct packet_ring_buffer *rb,
 249		int status);
 250static void packet_increment_head(struct packet_ring_buffer *buff);
 251static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
 252			struct tpacket_block_desc *);
 253static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 254			struct packet_sock *);
 255static void prb_retire_current_block(struct tpacket_kbdq_core *,
 256		struct packet_sock *, unsigned int status);
 257static int prb_queue_frozen(struct tpacket_kbdq_core *);
 258static void prb_open_block(struct tpacket_kbdq_core *,
 259		struct tpacket_block_desc *);
 260static void prb_retire_rx_blk_timer_expired(unsigned long);
 261static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 262static void prb_init_blk_timer(struct packet_sock *,
 263		struct tpacket_kbdq_core *,
 264		void (*func) (unsigned long));
 265static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 266static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 267		struct tpacket3_hdr *);
 268static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 269		struct tpacket3_hdr *);
 270static void packet_flush_mclist(struct sock *sk);
 271
 272struct packet_fanout;
 273struct packet_sock {
 274	/* struct sock has to be the first member of packet_sock */
 275	struct sock		sk;
 276	struct packet_fanout	*fanout;
 277	struct tpacket_stats	stats;
 278	union  tpacket_stats_u	stats_u;
 279	struct packet_ring_buffer	rx_ring;
 280	struct packet_ring_buffer	tx_ring;
 281	int			copy_thresh;
 282	spinlock_t		bind_lock;
 283	struct mutex		pg_vec_lock;
 284	unsigned int		running:1,	/* prot_hook is attached*/
 285				auxdata:1,
 286				origdev:1,
 287				has_vnet_hdr:1;
 288	int			ifindex;	/* bound device		*/
 289	__be16			num;
 290	struct packet_mclist	*mclist;
 291	atomic_t		mapped;
 292	enum tpacket_versions	tp_version;
 293	unsigned int		tp_hdrlen;
 294	unsigned int		tp_reserve;
 295	unsigned int		tp_loss:1;
 296	unsigned int		tp_tstamp;
 297	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
 298};
 299
 300#define PACKET_FANOUT_MAX	256
 301
 302struct packet_fanout {
 303#ifdef CONFIG_NET_NS
 304	struct net		*net;
 305#endif
 306	unsigned int		num_members;
 307	u16			id;
 308	u8			type;
 309	u8			defrag;
 310	atomic_t		rr_cur;
 311	struct list_head	list;
 312	struct sock		*arr[PACKET_FANOUT_MAX];
 313	spinlock_t		lock;
 314	atomic_t		sk_ref;
 315	struct packet_type	prot_hook ____cacheline_aligned_in_smp;
 316};
 317
 318struct packet_skb_cb {
 319	unsigned int origlen;
 320	union {
 321		struct sockaddr_pkt pkt;
 322		struct sockaddr_ll ll;
 
 
 
 
 
 
 
 323	} sa;
 324};
 325
 
 
 326#define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 327
 328#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 329#define GET_PBLOCK_DESC(x, bid)	\
 330	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 331#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 332	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 333#define GET_NEXT_PRB_BLK_NUM(x) \
 334	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 335	((x)->kactive_blk_num+1) : 0)
 336
 337static struct packet_sock *pkt_sk(struct sock *sk)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 338{
 339	return (struct packet_sock *)sk;
 340}
 341
 342static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 343static void __fanout_link(struct sock *sk, struct packet_sock *po);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 344
 345/* register_prot_hook must be invoked with the po->bind_lock held,
 346 * or from a context in which asynchronous accesses to the packet
 347 * socket is not possible (packet_create()).
 348 */
 349static void register_prot_hook(struct sock *sk)
 350{
 351	struct packet_sock *po = pkt_sk(sk);
 
 352	if (!po->running) {
 353		if (po->fanout)
 354			__fanout_link(sk, po);
 355		else
 356			dev_add_pack(&po->prot_hook);
 
 357		sock_hold(sk);
 358		po->running = 1;
 359	}
 360}
 361
 362/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
 363 * held.   If the sync parameter is true, we will temporarily drop
 
 
 
 
 
 364 * the po->bind_lock and do a synchronize_net to make sure no
 365 * asynchronous packet processing paths still refer to the elements
 366 * of po->prot_hook.  If the sync parameter is false, it is the
 367 * callers responsibility to take care of this.
 368 */
 369static void __unregister_prot_hook(struct sock *sk, bool sync)
 370{
 371	struct packet_sock *po = pkt_sk(sk);
 372
 
 
 373	po->running = 0;
 
 374	if (po->fanout)
 375		__fanout_unlink(sk, po);
 376	else
 377		__dev_remove_pack(&po->prot_hook);
 
 378	__sock_put(sk);
 379
 380	if (sync) {
 381		spin_unlock(&po->bind_lock);
 382		synchronize_net();
 383		spin_lock(&po->bind_lock);
 384	}
 385}
 386
 387static void unregister_prot_hook(struct sock *sk, bool sync)
 388{
 389	struct packet_sock *po = pkt_sk(sk);
 390
 391	if (po->running)
 392		__unregister_prot_hook(sk, sync);
 393}
 394
 395static inline __pure struct page *pgv_to_page(void *addr)
 396{
 397	if (is_vmalloc_addr(addr))
 398		return vmalloc_to_page(addr);
 399	return virt_to_page(addr);
 400}
 401
 402static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 403{
 404	union {
 405		struct tpacket_hdr *h1;
 406		struct tpacket2_hdr *h2;
 407		void *raw;
 408	} h;
 409
 410	h.raw = frame;
 411	switch (po->tp_version) {
 412	case TPACKET_V1:
 413		h.h1->tp_status = status;
 414		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 415		break;
 416	case TPACKET_V2:
 417		h.h2->tp_status = status;
 418		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 419		break;
 420	case TPACKET_V3:
 
 
 
 421	default:
 422		WARN(1, "TPACKET version not supported.\n");
 423		BUG();
 424	}
 425
 426	smp_wmb();
 427}
 428
 429static int __packet_get_status(struct packet_sock *po, void *frame)
 430{
 431	union {
 432		struct tpacket_hdr *h1;
 433		struct tpacket2_hdr *h2;
 434		void *raw;
 435	} h;
 436
 437	smp_rmb();
 438
 439	h.raw = frame;
 440	switch (po->tp_version) {
 441	case TPACKET_V1:
 442		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 443		return h.h1->tp_status;
 444	case TPACKET_V2:
 445		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 446		return h.h2->tp_status;
 447	case TPACKET_V3:
 
 
 448	default:
 449		WARN(1, "TPACKET version not supported.\n");
 450		BUG();
 451		return 0;
 452	}
 453}
 454
 455static void *packet_lookup_frame(struct packet_sock *po,
 456		struct packet_ring_buffer *rb,
 457		unsigned int position,
 458		int status)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 459{
 460	unsigned int pg_vec_pos, frame_offset;
 461	union {
 462		struct tpacket_hdr *h1;
 463		struct tpacket2_hdr *h2;
 464		void *raw;
 465	} h;
 466
 467	pg_vec_pos = position / rb->frames_per_block;
 468	frame_offset = position % rb->frames_per_block;
 469
 470	h.raw = rb->pg_vec[pg_vec_pos].buffer +
 471		(frame_offset * rb->frame_size);
 472
 473	if (status != __packet_get_status(po, h.raw))
 474		return NULL;
 475
 476	return h.raw;
 477}
 478
 479static void *packet_current_frame(struct packet_sock *po,
 480		struct packet_ring_buffer *rb,
 481		int status)
 482{
 483	return packet_lookup_frame(po, rb, rb->head, status);
 484}
 485
 486static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 487{
 488	del_timer_sync(&pkc->retire_blk_timer);
 489}
 490
 491static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 492		int tx_ring,
 493		struct sk_buff_head *rb_queue)
 494{
 495	struct tpacket_kbdq_core *pkc;
 496
 497	pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
 498
 499	spin_lock(&rb_queue->lock);
 500	pkc->delete_blk_timer = 1;
 501	spin_unlock(&rb_queue->lock);
 502
 503	prb_del_retire_blk_timer(pkc);
 504}
 505
 506static void prb_init_blk_timer(struct packet_sock *po,
 507		struct tpacket_kbdq_core *pkc,
 508		void (*func) (unsigned long))
 509{
 510	init_timer(&pkc->retire_blk_timer);
 511	pkc->retire_blk_timer.data = (long)po;
 512	pkc->retire_blk_timer.function = func;
 513	pkc->retire_blk_timer.expires = jiffies;
 514}
 515
 516static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
 517{
 518	struct tpacket_kbdq_core *pkc;
 519
 520	if (tx_ring)
 521		BUG();
 522
 523	pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
 524	prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
 525}
 526
 527static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 528				int blk_size_in_bytes)
 529{
 530	struct net_device *dev;
 531	unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
 532	struct ethtool_cmd ecmd;
 533	int err;
 534
 535	rtnl_lock();
 536	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 537	if (unlikely(!dev)) {
 538		rtnl_unlock();
 539		return DEFAULT_PRB_RETIRE_TOV;
 540	}
 541	err = __ethtool_get_settings(dev, &ecmd);
 542	rtnl_unlock();
 543	if (!err) {
 544		switch (ecmd.speed) {
 545		case SPEED_10000:
 546			msec = 1;
 547			div = 10000/1000;
 548			break;
 549		case SPEED_1000:
 550			msec = 1;
 551			div = 1000/1000;
 552			break;
 553		/*
 554		 * If the link speed is so slow you don't really
 555		 * need to worry about perf anyways
 556		 */
 557		case SPEED_100:
 558		case SPEED_10:
 559		default:
 560			return DEFAULT_PRB_RETIRE_TOV;
 561		}
 562	}
 563
 
 564	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 565
 566	if (div)
 567		mbits /= div;
 568
 569	tmo = mbits * msec;
 570
 571	if (div)
 572		return tmo+1;
 573	return tmo;
 574}
 575
 576static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 577			union tpacket_req_u *req_u)
 578{
 579	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 580}
 581
 582static void init_prb_bdqc(struct packet_sock *po,
 583			struct packet_ring_buffer *rb,
 584			struct pgv *pg_vec,
 585			union tpacket_req_u *req_u, int tx_ring)
 586{
 587	struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
 588	struct tpacket_block_desc *pbd;
 589
 590	memset(p1, 0x0, sizeof(*p1));
 591
 592	p1->knxt_seq_num = 1;
 593	p1->pkbdq = pg_vec;
 594	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 595	p1->pkblk_start	= (char *)pg_vec[0].buffer;
 596	p1->kblk_size = req_u->req3.tp_block_size;
 597	p1->knum_blocks	= req_u->req3.tp_block_nr;
 598	p1->hdrlen = po->tp_hdrlen;
 599	p1->version = po->tp_version;
 600	p1->last_kactive_blk_num = 0;
 601	po->stats_u.stats3.tp_freeze_q_cnt = 0;
 602	if (req_u->req3.tp_retire_blk_tov)
 603		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 604	else
 605		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 606						req_u->req3.tp_block_size);
 607	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 608	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 
 609
 
 610	prb_init_ft_ops(p1, req_u);
 611	prb_setup_retire_blk_timer(po, tx_ring);
 612	prb_open_block(p1, pbd);
 613}
 614
 615/*  Do NOT update the last_blk_num first.
 616 *  Assumes sk_buff_head lock is held.
 617 */
 618static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 619{
 620	mod_timer(&pkc->retire_blk_timer,
 621			jiffies + pkc->tov_in_jiffies);
 622	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 623}
 624
 625/*
 626 * Timer logic:
 627 * 1) We refresh the timer only when we open a block.
 628 *    By doing this we don't waste cycles refreshing the timer
 629 *	  on packet-by-packet basis.
 630 *
 631 * With a 1MB block-size, on a 1Gbps line, it will take
 632 * i) ~8 ms to fill a block + ii) memcpy etc.
 633 * In this cut we are not accounting for the memcpy time.
 634 *
 635 * So, if the user sets the 'tmo' to 10ms then the timer
 636 * will never fire while the block is still getting filled
 637 * (which is what we want). However, the user could choose
 638 * to close a block early and that's fine.
 639 *
 640 * But when the timer does fire, we check whether or not to refresh it.
 641 * Since the tmo granularity is in msecs, it is not too expensive
 642 * to refresh the timer, lets say every '8' msecs.
 643 * Either the user can set the 'tmo' or we can derive it based on
 644 * a) line-speed and b) block-size.
 645 * prb_calc_retire_blk_tmo() calculates the tmo.
 646 *
 647 */
 648static void prb_retire_rx_blk_timer_expired(unsigned long data)
 649{
 650	struct packet_sock *po = (struct packet_sock *)data;
 651	struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
 
 652	unsigned int frozen;
 653	struct tpacket_block_desc *pbd;
 654
 655	spin_lock(&po->sk.sk_receive_queue.lock);
 656
 657	frozen = prb_queue_frozen(pkc);
 658	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 659
 660	if (unlikely(pkc->delete_blk_timer))
 661		goto out;
 662
 663	/* We only need to plug the race when the block is partially filled.
 664	 * tpacket_rcv:
 665	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 666	 *		copy_bits() is in progress ...
 667	 *		timer fires on other cpu:
 668	 *		we can't retire the current block because copy_bits
 669	 *		is in progress.
 670	 *
 671	 */
 672	if (BLOCK_NUM_PKTS(pbd)) {
 673		while (atomic_read(&pkc->blk_fill_in_prog)) {
 674			/* Waiting for skb_copy_bits to finish... */
 675			cpu_relax();
 676		}
 677	}
 678
 679	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 680		if (!frozen) {
 
 
 
 
 681			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 682			if (!prb_dispatch_next_block(pkc, po))
 683				goto refresh_timer;
 684			else
 685				goto out;
 686		} else {
 687			/* Case 1. Queue was frozen because user-space was
 688			 *	   lagging behind.
 689			 */
 690			if (prb_curr_blk_in_use(pkc, pbd)) {
 691				/*
 692				 * Ok, user-space is still behind.
 693				 * So just refresh the timer.
 694				 */
 695				goto refresh_timer;
 696			} else {
 697			       /* Case 2. queue was frozen,user-space caught up,
 698				* now the link went idle && the timer fired.
 699				* We don't have a block to close.So we open this
 700				* block and restart the timer.
 701				* opening a block thaws the queue,restarts timer
 702				* Thawing/timer-refresh is a side effect.
 703				*/
 704				prb_open_block(pkc, pbd);
 705				goto out;
 706			}
 707		}
 708	}
 709
 710refresh_timer:
 711	_prb_refresh_rx_retire_blk_timer(pkc);
 712
 713out:
 714	spin_unlock(&po->sk.sk_receive_queue.lock);
 715}
 716
 717static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 718		struct tpacket_block_desc *pbd1, __u32 status)
 719{
 720	/* Flush everything minus the block header */
 721
 722#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 723	u8 *start, *end;
 724
 725	start = (u8 *)pbd1;
 726
 727	/* Skip the block header(we know header WILL fit in 4K) */
 728	start += PAGE_SIZE;
 729
 730	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 731	for (; start < end; start += PAGE_SIZE)
 732		flush_dcache_page(pgv_to_page(start));
 733
 734	smp_wmb();
 735#endif
 736
 737	/* Now update the block status. */
 738
 739	BLOCK_STATUS(pbd1) = status;
 740
 741	/* Flush the block header */
 742
 743#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 744	start = (u8 *)pbd1;
 745	flush_dcache_page(pgv_to_page(start));
 746
 747	smp_wmb();
 748#endif
 749}
 750
 751/*
 752 * Side effect:
 753 *
 754 * 1) flush the block
 755 * 2) Increment active_blk_num
 756 *
 757 * Note:We DONT refresh the timer on purpose.
 758 *	Because almost always the next block will be opened.
 759 */
 760static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 761		struct tpacket_block_desc *pbd1,
 762		struct packet_sock *po, unsigned int stat)
 763{
 764	__u32 status = TP_STATUS_USER | stat;
 765
 766	struct tpacket3_hdr *last_pkt;
 767	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 
 768
 769	if (po->stats.tp_drops)
 770		status |= TP_STATUS_LOSING;
 771
 772	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 773	last_pkt->tp_next_offset = 0;
 774
 775	/* Get the ts of the last pkt */
 776	if (BLOCK_NUM_PKTS(pbd1)) {
 777		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 778		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 779	} else {
 780		/* Ok, we tmo'd - so get the current time */
 781		struct timespec ts;
 782		getnstimeofday(&ts);
 
 
 
 
 783		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 784		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 785	}
 786
 787	smp_wmb();
 788
 789	/* Flush the block */
 790	prb_flush_block(pkc1, pbd1, status);
 791
 
 
 792	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 793}
 794
 795static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 796{
 797	pkc->reset_pending_on_curr_blk = 0;
 798}
 799
 800/*
 801 * Side effect of opening a block:
 802 *
 803 * 1) prb_queue is thawed.
 804 * 2) retire_blk_timer is refreshed.
 805 *
 806 */
 807static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 808	struct tpacket_block_desc *pbd1)
 809{
 810	struct timespec ts;
 811	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 812
 813	smp_rmb();
 814
 815	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd1))) {
 
 
 
 
 
 
 
 
 
 
 
 816
 817		/* We could have just memset this but we will lose the
 818		 * flexibility of making the priv area sticky
 819		 */
 820		BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 821		BLOCK_NUM_PKTS(pbd1) = 0;
 822		BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 823		getnstimeofday(&ts);
 824		h1->ts_first_pkt.ts_sec = ts.tv_sec;
 825		h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 826		pkc1->pkblk_start = (char *)pbd1;
 827		pkc1->nxt_offset = (char *)(pkc1->pkblk_start +
 828		BLK_PLUS_PRIV(pkc1->blk_sizeof_priv));
 829		BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 830		BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 831		pbd1->version = pkc1->version;
 832		pkc1->prev = pkc1->nxt_offset;
 833		pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 834		prb_thaw_queue(pkc1);
 835		_prb_refresh_rx_retire_blk_timer(pkc1);
 836
 837		smp_wmb();
 
 
 838
 839		return;
 840	}
 841
 842	WARN(1, "ERROR block:%p is NOT FREE status:%d kactive_blk_num:%d\n",
 843		pbd1, BLOCK_STATUS(pbd1), pkc1->kactive_blk_num);
 844	dump_stack();
 845	BUG();
 846}
 847
 848/*
 849 * Queue freeze logic:
 850 * 1) Assume tp_block_nr = 8 blocks.
 851 * 2) At time 't0', user opens Rx ring.
 852 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 853 * 4) user-space is either sleeping or processing block '0'.
 854 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 855 *    it will close block-7,loop around and try to fill block '0'.
 856 *    call-flow:
 857 *    __packet_lookup_frame_in_block
 858 *      prb_retire_current_block()
 859 *      prb_dispatch_next_block()
 860 *        |->(BLOCK_STATUS == USER) evaluates to true
 861 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 862 * 6) Now there are two cases:
 863 *    6.1) Link goes idle right after the queue is frozen.
 864 *         But remember, the last open_block() refreshed the timer.
 865 *         When this timer expires,it will refresh itself so that we can
 866 *         re-open block-0 in near future.
 867 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 868 *         case and __packet_lookup_frame_in_block will check if block-0
 869 *         is free and can now be re-used.
 870 */
 871static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 872				  struct packet_sock *po)
 873{
 874	pkc->reset_pending_on_curr_blk = 1;
 875	po->stats_u.stats3.tp_freeze_q_cnt++;
 876}
 877
 878#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 879
 880/*
 881 * If the next block is free then we will dispatch it
 882 * and return a good offset.
 883 * Else, we will freeze the queue.
 884 * So, caller must check the return value.
 885 */
 886static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 887		struct packet_sock *po)
 888{
 889	struct tpacket_block_desc *pbd;
 890
 891	smp_rmb();
 892
 893	/* 1. Get current block num */
 894	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 895
 896	/* 2. If this block is currently in_use then freeze the queue */
 897	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 898		prb_freeze_queue(pkc, po);
 899		return NULL;
 900	}
 901
 902	/*
 903	 * 3.
 904	 * open this block and return the offset where the first packet
 905	 * needs to get stored.
 906	 */
 907	prb_open_block(pkc, pbd);
 908	return (void *)pkc->nxt_offset;
 909}
 910
 911static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 912		struct packet_sock *po, unsigned int status)
 913{
 914	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 915
 916	/* retire/close the current block */
 917	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 918		/*
 919		 * Plug the case where copy_bits() is in progress on
 920		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
 921		 * have space to copy the pkt in the current block and
 922		 * called prb_retire_current_block()
 923		 *
 924		 * We don't need to worry about the TMO case because
 925		 * the timer-handler already handled this case.
 926		 */
 927		if (!(status & TP_STATUS_BLK_TMO)) {
 928			while (atomic_read(&pkc->blk_fill_in_prog)) {
 929				/* Waiting for skb_copy_bits to finish... */
 930				cpu_relax();
 931			}
 932		}
 933		prb_close_block(pkc, pbd, po, status);
 934		return;
 935	}
 936
 937	WARN(1, "ERROR-pbd[%d]:%p\n", pkc->kactive_blk_num, pbd);
 938	dump_stack();
 939	BUG();
 940}
 941
 942static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
 943				      struct tpacket_block_desc *pbd)
 944{
 945	return TP_STATUS_USER & BLOCK_STATUS(pbd);
 946}
 947
 948static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 949{
 950	return pkc->reset_pending_on_curr_blk;
 951}
 952
 953static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
 
 954{
 955	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 956	atomic_dec(&pkc->blk_fill_in_prog);
 
 957}
 958
 959static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
 960			struct tpacket3_hdr *ppd)
 961{
 962	ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
 963}
 964
 965static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
 966			struct tpacket3_hdr *ppd)
 967{
 968	ppd->hv1.tp_rxhash = 0;
 969}
 970
 971static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
 972			struct tpacket3_hdr *ppd)
 973{
 974	if (vlan_tx_tag_present(pkc->skb)) {
 975		ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
 976		ppd->tp_status = TP_STATUS_VLAN_VALID;
 
 977	} else {
 978		ppd->hv1.tp_vlan_tci = ppd->tp_status = 0;
 
 
 979	}
 980}
 981
 982static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
 983			struct tpacket3_hdr *ppd)
 984{
 
 985	prb_fill_vlan_info(pkc, ppd);
 986
 987	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
 988		prb_fill_rxhash(pkc, ppd);
 989	else
 990		prb_clear_rxhash(pkc, ppd);
 991}
 992
 993static void prb_fill_curr_block(char *curr,
 994				struct tpacket_kbdq_core *pkc,
 995				struct tpacket_block_desc *pbd,
 996				unsigned int len)
 
 997{
 998	struct tpacket3_hdr *ppd;
 999
1000	ppd  = (struct tpacket3_hdr *)curr;
1001	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1002	pkc->prev = curr;
1003	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1004	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1005	BLOCK_NUM_PKTS(pbd) += 1;
1006	atomic_inc(&pkc->blk_fill_in_prog);
1007	prb_run_all_ft_ops(pkc, ppd);
1008}
1009
1010/* Assumes caller has the sk->rx_queue.lock */
1011static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1012					    struct sk_buff *skb,
1013						int status,
1014					    unsigned int len
1015					    )
1016{
1017	struct tpacket_kbdq_core *pkc;
1018	struct tpacket_block_desc *pbd;
1019	char *curr, *end;
1020
1021	pkc = GET_PBDQC_FROM_RB(((struct packet_ring_buffer *)&po->rx_ring));
1022	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1023
1024	/* Queue is frozen when user space is lagging behind */
1025	if (prb_queue_frozen(pkc)) {
1026		/*
1027		 * Check if that last block which caused the queue to freeze,
1028		 * is still in_use by user-space.
1029		 */
1030		if (prb_curr_blk_in_use(pkc, pbd)) {
1031			/* Can't record this packet */
1032			return NULL;
1033		} else {
1034			/*
1035			 * Ok, the block was released by user-space.
1036			 * Now let's open that block.
1037			 * opening a block also thaws the queue.
1038			 * Thawing is a side effect.
1039			 */
1040			prb_open_block(pkc, pbd);
1041		}
1042	}
1043
1044	smp_mb();
1045	curr = pkc->nxt_offset;
1046	pkc->skb = skb;
1047	end = (char *) ((char *)pbd + pkc->kblk_size);
1048
1049	/* first try the current block */
1050	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1051		prb_fill_curr_block(curr, pkc, pbd, len);
1052		return (void *)curr;
1053	}
1054
1055	/* Ok, close the current block */
1056	prb_retire_current_block(pkc, po, 0);
1057
1058	/* Now, try to dispatch the next block */
1059	curr = (char *)prb_dispatch_next_block(pkc, po);
1060	if (curr) {
1061		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1062		prb_fill_curr_block(curr, pkc, pbd, len);
1063		return (void *)curr;
1064	}
1065
1066	/*
1067	 * No free blocks are available.user_space hasn't caught up yet.
1068	 * Queue was just frozen and now this packet will get dropped.
1069	 */
1070	return NULL;
1071}
1072
1073static void *packet_current_rx_frame(struct packet_sock *po,
1074					    struct sk_buff *skb,
1075					    int status, unsigned int len)
1076{
1077	char *curr = NULL;
1078	switch (po->tp_version) {
1079	case TPACKET_V1:
1080	case TPACKET_V2:
1081		curr = packet_lookup_frame(po, &po->rx_ring,
1082					po->rx_ring.head, status);
1083		return curr;
1084	case TPACKET_V3:
1085		return __packet_lookup_frame_in_block(po, skb, status, len);
1086	default:
1087		WARN(1, "TPACKET version not supported\n");
1088		BUG();
1089		return 0;
1090	}
1091}
1092
1093static void *prb_lookup_block(struct packet_sock *po,
1094				     struct packet_ring_buffer *rb,
1095				     unsigned int previous,
1096				     int status)
1097{
1098	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1099	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, previous);
1100
1101	if (status != BLOCK_STATUS(pbd))
1102		return NULL;
1103	return pbd;
1104}
1105
1106static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1107{
1108	unsigned int prev;
1109	if (rb->prb_bdqc.kactive_blk_num)
1110		prev = rb->prb_bdqc.kactive_blk_num-1;
1111	else
1112		prev = rb->prb_bdqc.knum_blocks-1;
1113	return prev;
1114}
1115
1116/* Assumes caller has held the rx_queue.lock */
1117static void *__prb_previous_block(struct packet_sock *po,
1118					 struct packet_ring_buffer *rb,
1119					 int status)
1120{
1121	unsigned int previous = prb_previous_blk_num(rb);
1122	return prb_lookup_block(po, rb, previous, status);
1123}
1124
1125static void *packet_previous_rx_frame(struct packet_sock *po,
1126					     struct packet_ring_buffer *rb,
1127					     int status)
1128{
1129	if (po->tp_version <= TPACKET_V2)
1130		return packet_previous_frame(po, rb, status);
1131
1132	return __prb_previous_block(po, rb, status);
1133}
1134
1135static void packet_increment_rx_head(struct packet_sock *po,
1136					    struct packet_ring_buffer *rb)
1137{
1138	switch (po->tp_version) {
1139	case TPACKET_V1:
1140	case TPACKET_V2:
1141		return packet_increment_head(rb);
1142	case TPACKET_V3:
1143	default:
1144		WARN(1, "TPACKET version not supported.\n");
1145		BUG();
1146		return;
1147	}
1148}
1149
1150static void *packet_previous_frame(struct packet_sock *po,
1151		struct packet_ring_buffer *rb,
1152		int status)
1153{
1154	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1155	return packet_lookup_frame(po, rb, previous, status);
1156}
1157
1158static void packet_increment_head(struct packet_ring_buffer *buff)
1159{
1160	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1161}
1162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1163static void packet_sock_destruct(struct sock *sk)
1164{
1165	skb_queue_purge(&sk->sk_error_queue);
1166
1167	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1168	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1169
1170	if (!sock_flag(sk, SOCK_DEAD)) {
1171		pr_err("Attempt to release alive packet socket: %p\n", sk);
1172		return;
1173	}
1174
1175	sk_refcnt_debug_dec(sk);
1176}
1177
1178static int fanout_rr_next(struct packet_fanout *f, unsigned int num)
1179{
1180	int x = atomic_read(&f->rr_cur) + 1;
 
 
 
 
 
 
 
 
 
1181
1182	if (x >= num)
1183		x = 0;
 
1184
1185	return x;
1186}
1187
1188static struct sock *fanout_demux_hash(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
 
 
1189{
1190	u32 idx, hash = skb->rxhash;
 
 
 
 
 
 
 
1191
1192	idx = ((u64)hash * num) >> 32;
 
 
 
 
 
 
 
 
1193
1194	return f->arr[idx];
 
 
 
 
1195}
1196
1197static struct sock *fanout_demux_lb(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
 
 
 
1198{
1199	int cur, old;
 
1200
1201	cur = atomic_read(&f->rr_cur);
1202	while ((old = atomic_cmpxchg(&f->rr_cur, cur,
1203				     fanout_rr_next(f, num))) != cur)
1204		cur = old;
1205	return f->arr[cur];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1206}
1207
1208static struct sock *fanout_demux_cpu(struct packet_fanout *f, struct sk_buff *skb, unsigned int num)
 
 
1209{
1210	unsigned int cpu = smp_processor_id();
 
 
 
 
 
 
 
1211
1212	return f->arr[cpu % num];
 
 
 
 
 
1213}
1214
1215static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1216			     struct packet_type *pt, struct net_device *orig_dev)
1217{
1218	struct packet_fanout *f = pt->af_packet_priv;
1219	unsigned int num = f->num_members;
 
1220	struct packet_sock *po;
1221	struct sock *sk;
1222
1223	if (!net_eq(dev_net(dev), read_pnet(&f->net)) ||
1224	    !num) {
1225		kfree_skb(skb);
1226		return 0;
1227	}
1228
 
 
 
 
 
1229	switch (f->type) {
1230	case PACKET_FANOUT_HASH:
1231	default:
1232		if (f->defrag) {
1233			skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET);
1234			if (!skb)
1235				return 0;
1236		}
1237		skb_get_rxhash(skb);
1238		sk = fanout_demux_hash(f, skb, num);
1239		break;
1240	case PACKET_FANOUT_LB:
1241		sk = fanout_demux_lb(f, skb, num);
1242		break;
1243	case PACKET_FANOUT_CPU:
1244		sk = fanout_demux_cpu(f, skb, num);
 
 
 
 
 
 
 
 
 
 
 
 
 
1245		break;
1246	}
1247
1248	po = pkt_sk(sk);
 
1249
 
1250	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1251}
1252
1253static DEFINE_MUTEX(fanout_mutex);
 
1254static LIST_HEAD(fanout_list);
 
1255
1256static void __fanout_link(struct sock *sk, struct packet_sock *po)
1257{
1258	struct packet_fanout *f = po->fanout;
1259
1260	spin_lock(&f->lock);
1261	f->arr[f->num_members] = sk;
1262	smp_wmb();
1263	f->num_members++;
 
 
1264	spin_unlock(&f->lock);
1265}
1266
1267static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1268{
1269	struct packet_fanout *f = po->fanout;
1270	int i;
1271
1272	spin_lock(&f->lock);
1273	for (i = 0; i < f->num_members; i++) {
1274		if (f->arr[i] == sk)
1275			break;
1276	}
1277	BUG_ON(i >= f->num_members);
1278	f->arr[i] = f->arr[f->num_members - 1];
1279	f->num_members--;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1280	spin_unlock(&f->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1281}
1282
1283bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
1284{
1285	if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
1286		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1287
1288	return false;
1289}
1290
1291static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1292{
 
1293	struct packet_sock *po = pkt_sk(sk);
1294	struct packet_fanout *f, *match;
1295	u8 type = type_flags & 0xff;
1296	u8 defrag = (type_flags & PACKET_FANOUT_FLAG_DEFRAG) ? 1 : 0;
1297	int err;
1298
1299	switch (type) {
 
 
 
1300	case PACKET_FANOUT_HASH:
1301	case PACKET_FANOUT_LB:
1302	case PACKET_FANOUT_CPU:
 
 
 
 
1303		break;
1304	default:
1305		return -EINVAL;
1306	}
1307
1308	if (!po->running)
1309		return -EINVAL;
1310
 
1311	if (po->fanout)
1312		return -EALREADY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1313
1314	mutex_lock(&fanout_mutex);
1315	match = NULL;
1316	list_for_each_entry(f, &fanout_list, list) {
1317		if (f->id == id &&
1318		    read_pnet(&f->net) == sock_net(sk)) {
1319			match = f;
1320			break;
1321		}
1322	}
1323	err = -EINVAL;
1324	if (match && match->defrag != defrag)
1325		goto out;
1326	if (!match) {
1327		err = -ENOMEM;
1328		match = kzalloc(sizeof(*match), GFP_KERNEL);
1329		if (!match)
1330			goto out;
1331		write_pnet(&match->net, sock_net(sk));
1332		match->id = id;
1333		match->type = type;
1334		match->defrag = defrag;
1335		atomic_set(&match->rr_cur, 0);
1336		INIT_LIST_HEAD(&match->list);
1337		spin_lock_init(&match->lock);
1338		atomic_set(&match->sk_ref, 0);
 
1339		match->prot_hook.type = po->prot_hook.type;
1340		match->prot_hook.dev = po->prot_hook.dev;
1341		match->prot_hook.func = packet_rcv_fanout;
1342		match->prot_hook.af_packet_priv = match;
1343		match->prot_hook.id_match = match_fanout_group;
1344		dev_add_pack(&match->prot_hook);
1345		list_add(&match->list, &fanout_list);
1346	}
1347	err = -EINVAL;
1348	if (match->type == type &&
 
 
 
1349	    match->prot_hook.type == po->prot_hook.type &&
1350	    match->prot_hook.dev == po->prot_hook.dev) {
1351		err = -ENOSPC;
1352		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1353			__dev_remove_pack(&po->prot_hook);
1354			po->fanout = match;
1355			atomic_inc(&match->sk_ref);
 
 
1356			__fanout_link(sk, po);
1357			err = 0;
1358		}
1359	}
 
 
 
 
 
 
 
1360out:
 
1361	mutex_unlock(&fanout_mutex);
1362	return err;
1363}
1364
1365static void fanout_release(struct sock *sk)
 
 
 
 
 
1366{
1367	struct packet_sock *po = pkt_sk(sk);
1368	struct packet_fanout *f;
1369
 
1370	f = po->fanout;
1371	if (!f)
1372		return;
1373
1374	po->fanout = NULL;
1375
1376	mutex_lock(&fanout_mutex);
1377	if (atomic_dec_and_test(&f->sk_ref)) {
1378		list_del(&f->list);
1379		dev_remove_pack(&f->prot_hook);
1380		kfree(f);
1381	}
1382	mutex_unlock(&fanout_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1383}
1384
1385static const struct proto_ops packet_ops;
1386
1387static const struct proto_ops packet_ops_spkt;
1388
1389static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1390			   struct packet_type *pt, struct net_device *orig_dev)
1391{
1392	struct sock *sk;
1393	struct sockaddr_pkt *spkt;
1394
1395	/*
1396	 *	When we registered the protocol we saved the socket in the data
1397	 *	field for just this event.
1398	 */
1399
1400	sk = pt->af_packet_priv;
1401
1402	/*
1403	 *	Yank back the headers [hope the device set this
1404	 *	right or kerboom...]
1405	 *
1406	 *	Incoming packets have ll header pulled,
1407	 *	push it back.
1408	 *
1409	 *	For outgoing ones skb->data == skb_mac_header(skb)
1410	 *	so that this procedure is noop.
1411	 */
1412
1413	if (skb->pkt_type == PACKET_LOOPBACK)
1414		goto out;
1415
1416	if (!net_eq(dev_net(dev), sock_net(sk)))
1417		goto out;
1418
1419	skb = skb_share_check(skb, GFP_ATOMIC);
1420	if (skb == NULL)
1421		goto oom;
1422
1423	/* drop any routing info */
1424	skb_dst_drop(skb);
1425
1426	/* drop conntrack reference */
1427	nf_reset(skb);
1428
1429	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1430
1431	skb_push(skb, skb->data - skb_mac_header(skb));
1432
1433	/*
1434	 *	The SOCK_PACKET socket receives _all_ frames.
1435	 */
1436
1437	spkt->spkt_family = dev->type;
1438	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1439	spkt->spkt_protocol = skb->protocol;
1440
1441	/*
1442	 *	Charge the memory to the socket. This is done specifically
1443	 *	to prevent sockets using all the memory up.
1444	 */
1445
1446	if (sock_queue_rcv_skb(sk, skb) == 0)
1447		return 0;
1448
1449out:
1450	kfree_skb(skb);
1451oom:
1452	return 0;
1453}
1454
 
 
 
 
 
 
 
 
 
 
1455
1456/*
1457 *	Output a raw packet to a device layer. This bypasses all the other
1458 *	protocol layers and you must therefore supply it with a complete frame
1459 */
1460
1461static int packet_sendmsg_spkt(struct kiocb *iocb, struct socket *sock,
1462			       struct msghdr *msg, size_t len)
1463{
1464	struct sock *sk = sock->sk;
1465	struct sockaddr_pkt *saddr = (struct sockaddr_pkt *)msg->msg_name;
1466	struct sk_buff *skb = NULL;
1467	struct net_device *dev;
 
1468	__be16 proto = 0;
1469	int err;
1470	int extra_len = 0;
1471
1472	/*
1473	 *	Get and verify the address.
1474	 */
1475
1476	if (saddr) {
1477		if (msg->msg_namelen < sizeof(struct sockaddr))
1478			return -EINVAL;
1479		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1480			proto = saddr->spkt_protocol;
1481	} else
1482		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1483
1484	/*
1485	 *	Find the device first to size check it
1486	 */
1487
1488	saddr->spkt_device[13] = 0;
1489retry:
1490	rcu_read_lock();
1491	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1492	err = -ENODEV;
1493	if (dev == NULL)
1494		goto out_unlock;
1495
1496	err = -ENETDOWN;
1497	if (!(dev->flags & IFF_UP))
1498		goto out_unlock;
1499
1500	/*
1501	 * You may not queue a frame bigger than the mtu. This is the lowest level
1502	 * raw protocol and you must do your own fragmentation at this level.
1503	 */
1504
1505	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1506		if (!netif_supports_nofcs(dev)) {
1507			err = -EPROTONOSUPPORT;
1508			goto out_unlock;
1509		}
1510		extra_len = 4; /* We're doing our own CRC */
1511	}
1512
1513	err = -EMSGSIZE;
1514	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1515		goto out_unlock;
1516
1517	if (!skb) {
1518		size_t reserved = LL_RESERVED_SPACE(dev);
1519		int tlen = dev->needed_tailroom;
1520		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1521
1522		rcu_read_unlock();
1523		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1524		if (skb == NULL)
1525			return -ENOBUFS;
1526		/* FIXME: Save some space for broken drivers that write a hard
1527		 * header at transmission time by themselves. PPP is the notable
1528		 * one here. This should really be fixed at the driver level.
1529		 */
1530		skb_reserve(skb, reserved);
1531		skb_reset_network_header(skb);
1532
1533		/* Try to align data part correctly */
1534		if (hhlen) {
1535			skb->data -= hhlen;
1536			skb->tail -= hhlen;
1537			if (len < hhlen)
1538				skb_reset_network_header(skb);
1539		}
1540		err = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
1541		if (err)
1542			goto out_free;
1543		goto retry;
1544	}
1545
1546	if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
1547		/* Earlier code assumed this would be a VLAN pkt,
1548		 * double-check this now that we have the actual
1549		 * packet in hand.
1550		 */
1551		struct ethhdr *ehdr;
1552		skb_reset_mac_header(skb);
1553		ehdr = eth_hdr(skb);
1554		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
1555			err = -EMSGSIZE;
 
 
 
 
1556			goto out_unlock;
1557		}
1558	}
1559
1560	skb->protocol = proto;
1561	skb->dev = dev;
1562	skb->priority = sk->sk_priority;
1563	skb->mark = sk->sk_mark;
1564	err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1565	if (err < 0)
1566		goto out_unlock;
1567
1568	if (unlikely(extra_len == 4))
1569		skb->no_fcs = 1;
1570
 
 
1571	dev_queue_xmit(skb);
1572	rcu_read_unlock();
1573	return len;
1574
1575out_unlock:
1576	rcu_read_unlock();
1577out_free:
1578	kfree_skb(skb);
1579	return err;
1580}
1581
1582static unsigned int run_filter(const struct sk_buff *skb,
1583				      const struct sock *sk,
1584				      unsigned int res)
1585{
1586	struct sk_filter *filter;
1587
1588	rcu_read_lock();
1589	filter = rcu_dereference(sk->sk_filter);
1590	if (filter != NULL)
1591		res = SK_RUN_FILTER(filter, skb);
1592	rcu_read_unlock();
1593
1594	return res;
1595}
1596
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1597/*
1598 * This function makes lazy skb cloning in hope that most of packets
1599 * are discarded by BPF.
1600 *
1601 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
1602 * and skb->cb are mangled. It works because (and until) packets
1603 * falling here are owned by current CPU. Output packets are cloned
1604 * by dev_queue_xmit_nit(), input packets are processed by net_bh
1605 * sequencially, so that if we return skb to original state on exit,
1606 * we will not harm anyone.
1607 */
1608
1609static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
1610		      struct packet_type *pt, struct net_device *orig_dev)
1611{
1612	struct sock *sk;
1613	struct sockaddr_ll *sll;
1614	struct packet_sock *po;
1615	u8 *skb_head = skb->data;
1616	int skb_len = skb->len;
1617	unsigned int snaplen, res;
 
1618
1619	if (skb->pkt_type == PACKET_LOOPBACK)
1620		goto drop;
1621
1622	sk = pt->af_packet_priv;
1623	po = pkt_sk(sk);
1624
1625	if (!net_eq(dev_net(dev), sock_net(sk)))
1626		goto drop;
1627
1628	skb->dev = dev;
1629
1630	if (dev->header_ops) {
1631		/* The device has an explicit notion of ll header,
1632		 * exported to higher levels.
1633		 *
1634		 * Otherwise, the device hides details of its frame
1635		 * structure, so that corresponding packet head is
1636		 * never delivered to user.
1637		 */
1638		if (sk->sk_type != SOCK_DGRAM)
1639			skb_push(skb, skb->data - skb_mac_header(skb));
1640		else if (skb->pkt_type == PACKET_OUTGOING) {
1641			/* Special case: outgoing packets have ll header at head */
1642			skb_pull(skb, skb_network_offset(skb));
1643		}
1644	}
1645
1646	snaplen = skb->len;
1647
1648	res = run_filter(skb, sk, snaplen);
1649	if (!res)
1650		goto drop_n_restore;
1651	if (snaplen > res)
1652		snaplen = res;
1653
1654	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
1655		goto drop_n_acct;
1656
1657	if (skb_shared(skb)) {
1658		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
1659		if (nskb == NULL)
1660			goto drop_n_acct;
1661
1662		if (skb_head != skb->data) {
1663			skb->data = skb_head;
1664			skb->len = skb_len;
1665		}
1666		consume_skb(skb);
1667		skb = nskb;
1668	}
1669
1670	BUILD_BUG_ON(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8 >
1671		     sizeof(skb->cb));
1672
1673	sll = &PACKET_SKB_CB(skb)->sa.ll;
1674	sll->sll_family = AF_PACKET;
1675	sll->sll_hatype = dev->type;
1676	sll->sll_protocol = skb->protocol;
1677	sll->sll_pkttype = skb->pkt_type;
1678	if (unlikely(po->origdev))
1679		sll->sll_ifindex = orig_dev->ifindex;
1680	else
1681		sll->sll_ifindex = dev->ifindex;
1682
1683	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1684
1685	PACKET_SKB_CB(skb)->origlen = skb->len;
 
 
 
1686
1687	if (pskb_trim(skb, snaplen))
1688		goto drop_n_acct;
1689
1690	skb_set_owner_r(skb, sk);
1691	skb->dev = NULL;
1692	skb_dst_drop(skb);
1693
1694	/* drop conntrack reference */
1695	nf_reset(skb);
1696
1697	spin_lock(&sk->sk_receive_queue.lock);
1698	po->stats.tp_packets++;
1699	skb->dropcount = atomic_read(&sk->sk_drops);
1700	__skb_queue_tail(&sk->sk_receive_queue, skb);
1701	spin_unlock(&sk->sk_receive_queue.lock);
1702	sk->sk_data_ready(sk, skb->len);
1703	return 0;
1704
1705drop_n_acct:
1706	spin_lock(&sk->sk_receive_queue.lock);
1707	po->stats.tp_drops++;
1708	atomic_inc(&sk->sk_drops);
1709	spin_unlock(&sk->sk_receive_queue.lock);
1710
1711drop_n_restore:
1712	if (skb_head != skb->data && skb_shared(skb)) {
1713		skb->data = skb_head;
1714		skb->len = skb_len;
1715	}
1716drop:
1717	consume_skb(skb);
 
 
 
1718	return 0;
1719}
1720
1721static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
1722		       struct packet_type *pt, struct net_device *orig_dev)
1723{
1724	struct sock *sk;
1725	struct packet_sock *po;
1726	struct sockaddr_ll *sll;
1727	union {
1728		struct tpacket_hdr *h1;
1729		struct tpacket2_hdr *h2;
1730		struct tpacket3_hdr *h3;
1731		void *raw;
1732	} h;
1733	u8 *skb_head = skb->data;
1734	int skb_len = skb->len;
1735	unsigned int snaplen, res;
1736	unsigned long status = TP_STATUS_USER;
1737	unsigned short macoff, netoff, hdrlen;
 
1738	struct sk_buff *copy_skb = NULL;
1739	struct timeval tv;
1740	struct timespec ts;
1741	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 
 
 
 
 
 
 
 
 
1742
1743	if (skb->pkt_type == PACKET_LOOPBACK)
1744		goto drop;
1745
1746	sk = pt->af_packet_priv;
1747	po = pkt_sk(sk);
1748
1749	if (!net_eq(dev_net(dev), sock_net(sk)))
1750		goto drop;
1751
1752	if (dev->header_ops) {
1753		if (sk->sk_type != SOCK_DGRAM)
1754			skb_push(skb, skb->data - skb_mac_header(skb));
1755		else if (skb->pkt_type == PACKET_OUTGOING) {
1756			/* Special case: outgoing packets have ll header at head */
1757			skb_pull(skb, skb_network_offset(skb));
1758		}
1759	}
1760
1761	if (skb->ip_summed == CHECKSUM_PARTIAL)
1762		status |= TP_STATUS_CSUMNOTREADY;
1763
1764	snaplen = skb->len;
1765
1766	res = run_filter(skb, sk, snaplen);
1767	if (!res)
1768		goto drop_n_restore;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1769	if (snaplen > res)
1770		snaplen = res;
1771
1772	if (sk->sk_type == SOCK_DGRAM) {
1773		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
1774				  po->tp_reserve;
1775	} else {
1776		unsigned int maclen = skb_network_offset(skb);
1777		netoff = TPACKET_ALIGN(po->tp_hdrlen +
1778				       (maclen < 16 ? 16 : maclen)) +
1779			po->tp_reserve;
 
 
 
 
1780		macoff = netoff - maclen;
1781	}
 
 
 
 
1782	if (po->tp_version <= TPACKET_V2) {
1783		if (macoff + snaplen > po->rx_ring.frame_size) {
1784			if (po->copy_thresh &&
1785			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
1786				if (skb_shared(skb)) {
1787					copy_skb = skb_clone(skb, GFP_ATOMIC);
1788				} else {
1789					copy_skb = skb_get(skb);
1790					skb_head = skb->data;
1791				}
1792				if (copy_skb)
1793					skb_set_owner_r(copy_skb, sk);
1794			}
1795			snaplen = po->rx_ring.frame_size - macoff;
1796			if ((int)snaplen < 0)
1797				snaplen = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1798		}
1799	}
1800	spin_lock(&sk->sk_receive_queue.lock);
1801	h.raw = packet_current_rx_frame(po, skb,
1802					TP_STATUS_KERNEL, (macoff+snaplen));
1803	if (!h.raw)
1804		goto ring_is_full;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1805	if (po->tp_version <= TPACKET_V2) {
1806		packet_increment_rx_head(po, &po->rx_ring);
1807	/*
1808	 * LOSING will be reported till you read the stats,
1809	 * because it's COR - Clear On Read.
1810	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
1811	 * at packet level.
1812	 */
1813		if (po->stats.tp_drops)
1814			status |= TP_STATUS_LOSING;
1815	}
1816	po->stats.tp_packets++;
 
1817	if (copy_skb) {
1818		status |= TP_STATUS_COPY;
1819		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
1820	}
1821	spin_unlock(&sk->sk_receive_queue.lock);
1822
1823	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
1824
 
 
 
 
 
1825	switch (po->tp_version) {
1826	case TPACKET_V1:
1827		h.h1->tp_len = skb->len;
1828		h.h1->tp_snaplen = snaplen;
1829		h.h1->tp_mac = macoff;
1830		h.h1->tp_net = netoff;
1831		if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1832				&& shhwtstamps->syststamp.tv64)
1833			tv = ktime_to_timeval(shhwtstamps->syststamp);
1834		else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1835				&& shhwtstamps->hwtstamp.tv64)
1836			tv = ktime_to_timeval(shhwtstamps->hwtstamp);
1837		else if (skb->tstamp.tv64)
1838			tv = ktime_to_timeval(skb->tstamp);
1839		else
1840			do_gettimeofday(&tv);
1841		h.h1->tp_sec = tv.tv_sec;
1842		h.h1->tp_usec = tv.tv_usec;
1843		hdrlen = sizeof(*h.h1);
1844		break;
1845	case TPACKET_V2:
1846		h.h2->tp_len = skb->len;
1847		h.h2->tp_snaplen = snaplen;
1848		h.h2->tp_mac = macoff;
1849		h.h2->tp_net = netoff;
1850		if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1851				&& shhwtstamps->syststamp.tv64)
1852			ts = ktime_to_timespec(shhwtstamps->syststamp);
1853		else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1854				&& shhwtstamps->hwtstamp.tv64)
1855			ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1856		else if (skb->tstamp.tv64)
1857			ts = ktime_to_timespec(skb->tstamp);
1858		else
1859			getnstimeofday(&ts);
1860		h.h2->tp_sec = ts.tv_sec;
1861		h.h2->tp_nsec = ts.tv_nsec;
1862		if (vlan_tx_tag_present(skb)) {
1863			h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
1864			status |= TP_STATUS_VLAN_VALID;
 
1865		} else {
1866			h.h2->tp_vlan_tci = 0;
 
1867		}
1868		h.h2->tp_padding = 0;
1869		hdrlen = sizeof(*h.h2);
1870		break;
1871	case TPACKET_V3:
1872		/* tp_nxt_offset,vlan are already populated above.
1873		 * So DONT clear those fields here
1874		 */
1875		h.h3->tp_status |= status;
1876		h.h3->tp_len = skb->len;
1877		h.h3->tp_snaplen = snaplen;
1878		h.h3->tp_mac = macoff;
1879		h.h3->tp_net = netoff;
1880		if ((po->tp_tstamp & SOF_TIMESTAMPING_SYS_HARDWARE)
1881				&& shhwtstamps->syststamp.tv64)
1882			ts = ktime_to_timespec(shhwtstamps->syststamp);
1883		else if ((po->tp_tstamp & SOF_TIMESTAMPING_RAW_HARDWARE)
1884				&& shhwtstamps->hwtstamp.tv64)
1885			ts = ktime_to_timespec(shhwtstamps->hwtstamp);
1886		else if (skb->tstamp.tv64)
1887			ts = ktime_to_timespec(skb->tstamp);
1888		else
1889			getnstimeofday(&ts);
1890		h.h3->tp_sec  = ts.tv_sec;
1891		h.h3->tp_nsec = ts.tv_nsec;
 
1892		hdrlen = sizeof(*h.h3);
1893		break;
1894	default:
1895		BUG();
1896	}
1897
1898	sll = h.raw + TPACKET_ALIGN(hdrlen);
1899	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
1900	sll->sll_family = AF_PACKET;
1901	sll->sll_hatype = dev->type;
1902	sll->sll_protocol = skb->protocol;
1903	sll->sll_pkttype = skb->pkt_type;
1904	if (unlikely(po->origdev))
1905		sll->sll_ifindex = orig_dev->ifindex;
1906	else
1907		sll->sll_ifindex = dev->ifindex;
1908
1909	smp_mb();
 
1910#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
1911	{
1912		u8 *start, *end;
1913
1914		if (po->tp_version <= TPACKET_V2) {
1915			end = (u8 *)PAGE_ALIGN((unsigned long)h.raw
1916				+ macoff + snaplen);
1917			for (start = h.raw; start < end; start += PAGE_SIZE)
1918				flush_dcache_page(pgv_to_page(start));
1919		}
1920		smp_wmb();
1921	}
 
1922#endif
1923	if (po->tp_version <= TPACKET_V2)
 
 
1924		__packet_set_status(po, h.raw, status);
1925	else
 
 
 
1926		prb_clear_blk_fill_status(&po->rx_ring);
1927
1928	sk->sk_data_ready(sk, 0);
1929
1930drop_n_restore:
1931	if (skb_head != skb->data && skb_shared(skb)) {
1932		skb->data = skb_head;
1933		skb->len = skb_len;
1934	}
1935drop:
1936	kfree_skb(skb);
 
 
 
1937	return 0;
1938
1939ring_is_full:
1940	po->stats.tp_drops++;
1941	spin_unlock(&sk->sk_receive_queue.lock);
 
 
1942
1943	sk->sk_data_ready(sk, 0);
1944	kfree_skb(copy_skb);
1945	goto drop_n_restore;
1946}
1947
1948static void tpacket_destruct_skb(struct sk_buff *skb)
1949{
1950	struct packet_sock *po = pkt_sk(skb->sk);
1951	void *ph;
1952
1953	if (likely(po->tx_ring.pg_vec)) {
1954		ph = skb_shinfo(skb)->destructor_arg;
1955		BUG_ON(atomic_read(&po->tx_ring.pending) == 0);
1956		atomic_dec(&po->tx_ring.pending);
1957		__packet_set_status(po, ph, TP_STATUS_AVAILABLE);
 
 
 
 
 
 
 
1958	}
1959
1960	sock_wfree(skb);
1961}
1962
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1963static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
1964		void *frame, struct net_device *dev, int size_max,
1965		__be16 proto, unsigned char *addr, int hlen)
 
1966{
1967	union {
1968		struct tpacket_hdr *h1;
1969		struct tpacket2_hdr *h2;
1970		void *raw;
1971	} ph;
1972	int to_write, offset, len, tp_len, nr_frags, len_max;
1973	struct socket *sock = po->sk.sk_socket;
1974	struct page *page;
1975	void *data;
1976	int err;
1977
1978	ph.raw = frame;
1979
1980	skb->protocol = proto;
1981	skb->dev = dev;
1982	skb->priority = po->sk.sk_priority;
1983	skb->mark = po->sk.sk_mark;
1984	skb_shinfo(skb)->destructor_arg = ph.raw;
1985
1986	switch (po->tp_version) {
1987	case TPACKET_V2:
1988		tp_len = ph.h2->tp_len;
1989		break;
1990	default:
1991		tp_len = ph.h1->tp_len;
1992		break;
1993	}
1994	if (unlikely(tp_len > size_max)) {
1995		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
1996		return -EMSGSIZE;
1997	}
1998
1999	skb_reserve(skb, hlen);
2000	skb_reset_network_header(skb);
2001
2002	data = ph.raw + po->tp_hdrlen - sizeof(struct sockaddr_ll);
2003	to_write = tp_len;
2004
2005	if (sock->type == SOCK_DGRAM) {
2006		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2007				NULL, tp_len);
2008		if (unlikely(err < 0))
2009			return -EINVAL;
2010	} else if (dev->hard_header_len) {
2011		/* net device doesn't like empty head */
2012		if (unlikely(tp_len <= dev->hard_header_len)) {
2013			pr_err("packet size is too short (%d < %d)\n",
2014			       tp_len, dev->hard_header_len);
2015			return -EINVAL;
2016		}
2017
2018		skb_push(skb, dev->hard_header_len);
2019		err = skb_store_bits(skb, 0, data,
2020				dev->hard_header_len);
2021		if (unlikely(err))
2022			return err;
 
 
2023
2024		data += dev->hard_header_len;
2025		to_write -= dev->hard_header_len;
2026	}
2027
2028	err = -EFAULT;
2029	offset = offset_in_page(data);
2030	len_max = PAGE_SIZE - offset;
2031	len = ((to_write > len_max) ? len_max : to_write);
2032
2033	skb->data_len = to_write;
2034	skb->len += to_write;
2035	skb->truesize += to_write;
2036	atomic_add(to_write, &po->sk.sk_wmem_alloc);
2037
2038	while (likely(to_write)) {
2039		nr_frags = skb_shinfo(skb)->nr_frags;
2040
2041		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2042			pr_err("Packet exceed the number of skb frags(%lu)\n",
2043			       MAX_SKB_FRAGS);
2044			return -EFAULT;
2045		}
2046
2047		page = pgv_to_page(data);
2048		data += len;
2049		flush_dcache_page(page);
2050		get_page(page);
2051		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2052		to_write -= len;
2053		offset = 0;
2054		len_max = PAGE_SIZE;
2055		len = ((to_write > len_max) ? len_max : to_write);
2056	}
2057
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2058	return tp_len;
2059}
2060
2061static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2062{
2063	struct sk_buff *skb;
2064	struct net_device *dev;
 
 
2065	__be16 proto;
2066	bool need_rls_dev = false;
2067	int err, reserve = 0;
2068	void *ph;
2069	struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
 
 
2070	int tp_len, size_max;
2071	unsigned char *addr;
2072	int len_sum = 0;
2073	int status = 0;
2074	int hlen, tlen;
 
2075
2076	mutex_lock(&po->pg_vec_lock);
2077
2078	err = -EBUSY;
2079	if (saddr == NULL) {
2080		dev = po->prot_hook.dev;
 
 
 
 
 
 
2081		proto	= po->num;
2082		addr	= NULL;
2083	} else {
2084		err = -EINVAL;
2085		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2086			goto out;
2087		if (msg->msg_namelen < (saddr->sll_halen
2088					+ offsetof(struct sockaddr_ll,
2089						sll_addr)))
2090			goto out;
2091		proto	= saddr->sll_protocol;
2092		addr	= saddr->sll_addr;
2093		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2094		need_rls_dev = true;
 
 
 
 
 
2095	}
2096
2097	err = -ENXIO;
2098	if (unlikely(dev == NULL))
2099		goto out;
2100
2101	reserve = dev->hard_header_len;
2102
2103	err = -ENETDOWN;
2104	if (unlikely(!(dev->flags & IFF_UP)))
2105		goto out_put;
2106
 
 
 
 
 
 
 
 
 
2107	size_max = po->tx_ring.frame_size
2108		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2109
2110	if (size_max > dev->mtu + reserve)
2111		size_max = dev->mtu + reserve;
 
 
2112
2113	do {
2114		ph = packet_current_frame(po, &po->tx_ring,
2115				TP_STATUS_SEND_REQUEST);
2116
2117		if (unlikely(ph == NULL)) {
2118			schedule();
 
 
 
 
 
 
 
 
2119			continue;
2120		}
2121
 
 
 
 
 
2122		status = TP_STATUS_SEND_REQUEST;
2123		hlen = LL_RESERVED_SPACE(dev);
2124		tlen = dev->needed_tailroom;
 
 
 
 
 
 
 
 
 
 
 
 
 
2125		skb = sock_alloc_send_skb(&po->sk,
2126				hlen + tlen + sizeof(struct sockaddr_ll),
2127				0, &err);
2128
2129		if (unlikely(skb == NULL))
 
 
 
 
2130			goto out_status;
2131
2132		tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2133				addr, hlen);
 
 
 
 
 
2134
2135		if (unlikely(tp_len < 0)) {
 
2136			if (po->tp_loss) {
2137				__packet_set_status(po, ph,
2138						TP_STATUS_AVAILABLE);
2139				packet_increment_head(&po->tx_ring);
2140				kfree_skb(skb);
2141				continue;
2142			} else {
2143				status = TP_STATUS_WRONG_FORMAT;
2144				err = tp_len;
2145				goto out_status;
2146			}
2147		}
2148
 
 
 
 
 
 
 
 
2149		skb->destructor = tpacket_destruct_skb;
2150		__packet_set_status(po, ph, TP_STATUS_SENDING);
2151		atomic_inc(&po->tx_ring.pending);
2152
2153		status = TP_STATUS_SEND_REQUEST;
2154		err = dev_queue_xmit(skb);
2155		if (unlikely(err > 0)) {
2156			err = net_xmit_errno(err);
2157			if (err && __packet_get_status(po, ph) ==
2158				   TP_STATUS_AVAILABLE) {
2159				/* skb was destructed already */
2160				skb = NULL;
2161				goto out_status;
2162			}
2163			/*
2164			 * skb was dropped but not destructed yet;
2165			 * let's treat it like congestion or err < 0
2166			 */
2167			err = 0;
2168		}
2169		packet_increment_head(&po->tx_ring);
2170		len_sum += tp_len;
2171	} while (likely((ph != NULL) ||
2172			((!(msg->msg_flags & MSG_DONTWAIT)) &&
2173			 (atomic_read(&po->tx_ring.pending))))
2174		);
 
 
 
 
2175
2176	err = len_sum;
2177	goto out_put;
2178
2179out_status:
2180	__packet_set_status(po, ph, status);
2181	kfree_skb(skb);
2182out_put:
2183	if (need_rls_dev)
2184		dev_put(dev);
2185out:
2186	mutex_unlock(&po->pg_vec_lock);
2187	return err;
2188}
2189
2190static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2191				        size_t reserve, size_t len,
2192				        size_t linear, int noblock,
2193				        int *err)
2194{
2195	struct sk_buff *skb;
2196
2197	/* Under a page?  Don't bother with paged skb. */
2198	if (prepad + len < PAGE_SIZE || !linear)
2199		linear = len;
2200
2201	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2202				   err);
2203	if (!skb)
2204		return NULL;
2205
2206	skb_reserve(skb, reserve);
2207	skb_put(skb, linear);
2208	skb->data_len = len - linear;
2209	skb->len += len - linear;
2210
2211	return skb;
2212}
2213
2214static int packet_snd(struct socket *sock,
2215			  struct msghdr *msg, size_t len)
2216{
2217	struct sock *sk = sock->sk;
2218	struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
2219	struct sk_buff *skb;
2220	struct net_device *dev;
2221	__be16 proto;
2222	bool need_rls_dev = false;
2223	unsigned char *addr;
2224	int err, reserve = 0;
 
2225	struct virtio_net_hdr vnet_hdr = { 0 };
2226	int offset = 0;
2227	int vnet_hdr_len;
2228	struct packet_sock *po = pkt_sk(sk);
2229	unsigned short gso_type = 0;
2230	int hlen, tlen;
2231	int extra_len = 0;
2232
2233	/*
2234	 *	Get and verify the address.
2235	 */
2236
2237	if (saddr == NULL) {
2238		dev = po->prot_hook.dev;
2239		proto	= po->num;
2240		addr	= NULL;
2241	} else {
2242		err = -EINVAL;
2243		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2244			goto out;
2245		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2246			goto out;
2247		proto	= saddr->sll_protocol;
2248		addr	= saddr->sll_addr;
2249		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2250		need_rls_dev = true;
 
 
 
 
 
2251	}
2252
2253	err = -ENXIO;
2254	if (dev == NULL)
2255		goto out_unlock;
2256	if (sock->type == SOCK_RAW)
2257		reserve = dev->hard_header_len;
2258
2259	err = -ENETDOWN;
2260	if (!(dev->flags & IFF_UP))
2261		goto out_unlock;
2262
2263	if (po->has_vnet_hdr) {
2264		vnet_hdr_len = sizeof(vnet_hdr);
2265
2266		err = -EINVAL;
2267		if (len < vnet_hdr_len)
2268			goto out_unlock;
 
2269
2270		len -= vnet_hdr_len;
2271
2272		err = memcpy_fromiovec((void *)&vnet_hdr, msg->msg_iov,
2273				       vnet_hdr_len);
2274		if (err < 0)
2275			goto out_unlock;
2276
2277		if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2278		    (vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
2279		      vnet_hdr.hdr_len))
2280			vnet_hdr.hdr_len = vnet_hdr.csum_start +
2281						 vnet_hdr.csum_offset + 2;
2282
2283		err = -EINVAL;
2284		if (vnet_hdr.hdr_len > len)
2285			goto out_unlock;
2286
2287		if (vnet_hdr.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2288			switch (vnet_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2289			case VIRTIO_NET_HDR_GSO_TCPV4:
2290				gso_type = SKB_GSO_TCPV4;
2291				break;
2292			case VIRTIO_NET_HDR_GSO_TCPV6:
2293				gso_type = SKB_GSO_TCPV6;
2294				break;
2295			case VIRTIO_NET_HDR_GSO_UDP:
2296				gso_type = SKB_GSO_UDP;
2297				break;
2298			default:
2299				goto out_unlock;
2300			}
2301
2302			if (vnet_hdr.gso_type & VIRTIO_NET_HDR_GSO_ECN)
2303				gso_type |= SKB_GSO_TCP_ECN;
2304
2305			if (vnet_hdr.gso_size == 0)
2306				goto out_unlock;
2307
2308		}
2309	}
2310
2311	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2312		if (!netif_supports_nofcs(dev)) {
2313			err = -EPROTONOSUPPORT;
2314			goto out_unlock;
2315		}
2316		extra_len = 4; /* We're doing our own CRC */
2317	}
2318
2319	err = -EMSGSIZE;
2320	if (!gso_type && (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
 
2321		goto out_unlock;
2322
2323	err = -ENOBUFS;
2324	hlen = LL_RESERVED_SPACE(dev);
2325	tlen = dev->needed_tailroom;
2326	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, vnet_hdr.hdr_len,
 
 
2327			       msg->msg_flags & MSG_DONTWAIT, &err);
2328	if (skb == NULL)
2329		goto out_unlock;
2330
2331	skb_set_network_header(skb, reserve);
2332
2333	err = -EINVAL;
2334	if (sock->type == SOCK_DGRAM &&
2335	    (offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len)) < 0)
2336		goto out_free;
 
 
 
 
 
 
 
2337
2338	/* Returns -EFAULT on error */
2339	err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
2340	if (err)
2341		goto out_free;
2342	err = sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2343	if (err < 0)
 
 
2344		goto out_free;
 
 
 
2345
2346	if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
2347		/* Earlier code assumed this would be a VLAN pkt,
2348		 * double-check this now that we have the actual
2349		 * packet in hand.
2350		 */
2351		struct ethhdr *ehdr;
2352		skb_reset_mac_header(skb);
2353		ehdr = eth_hdr(skb);
2354		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
2355			err = -EMSGSIZE;
2356			goto out_free;
2357		}
2358	}
2359
2360	skb->protocol = proto;
2361	skb->dev = dev;
2362	skb->priority = sk->sk_priority;
2363	skb->mark = sk->sk_mark;
 
2364
2365	if (po->has_vnet_hdr) {
2366		if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2367			if (!skb_partial_csum_set(skb, vnet_hdr.csum_start,
2368						  vnet_hdr.csum_offset)) {
2369				err = -EINVAL;
2370				goto out_free;
2371			}
2372		}
2373
2374		skb_shinfo(skb)->gso_size = vnet_hdr.gso_size;
2375		skb_shinfo(skb)->gso_type = gso_type;
2376
2377		/* Header must be checked, and gso_segs computed. */
2378		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2379		skb_shinfo(skb)->gso_segs = 0;
2380
2381		len += vnet_hdr_len;
2382	}
2383
2384	if (unlikely(extra_len == 4))
2385		skb->no_fcs = 1;
2386
2387	/*
2388	 *	Now send it
2389	 */
2390
2391	err = dev_queue_xmit(skb);
2392	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2393		goto out_unlock;
2394
2395	if (need_rls_dev)
2396		dev_put(dev);
2397
2398	return len;
2399
2400out_free:
2401	kfree_skb(skb);
2402out_unlock:
2403	if (dev && need_rls_dev)
2404		dev_put(dev);
2405out:
2406	return err;
2407}
2408
2409static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
2410		struct msghdr *msg, size_t len)
2411{
2412	struct sock *sk = sock->sk;
2413	struct packet_sock *po = pkt_sk(sk);
 
2414	if (po->tx_ring.pg_vec)
2415		return tpacket_snd(po, msg);
2416	else
2417		return packet_snd(sock, msg, len);
2418}
2419
2420/*
2421 *	Close a PACKET socket. This is fairly simple. We immediately go
2422 *	to 'closed' state and remove our protocol entry in the device list.
2423 */
2424
2425static int packet_release(struct socket *sock)
2426{
2427	struct sock *sk = sock->sk;
2428	struct packet_sock *po;
 
2429	struct net *net;
2430	union tpacket_req_u req_u;
2431
2432	if (!sk)
2433		return 0;
2434
2435	net = sock_net(sk);
2436	po = pkt_sk(sk);
2437
2438	spin_lock_bh(&net->packet.sklist_lock);
2439	sk_del_node_init_rcu(sk);
 
 
 
2440	sock_prot_inuse_add(net, sk->sk_prot, -1);
2441	spin_unlock_bh(&net->packet.sklist_lock);
2442
2443	spin_lock(&po->bind_lock);
2444	unregister_prot_hook(sk, false);
 
 
2445	if (po->prot_hook.dev) {
2446		dev_put(po->prot_hook.dev);
2447		po->prot_hook.dev = NULL;
2448	}
2449	spin_unlock(&po->bind_lock);
2450
2451	packet_flush_mclist(sk);
2452
2453	memset(&req_u, 0, sizeof(req_u));
2454
2455	if (po->rx_ring.pg_vec)
2456		packet_set_ring(sk, &req_u, 1, 0);
 
2457
2458	if (po->tx_ring.pg_vec)
 
2459		packet_set_ring(sk, &req_u, 1, 1);
 
 
2460
2461	fanout_release(sk);
2462
2463	synchronize_net();
 
 
 
 
 
 
2464	/*
2465	 *	Now the socket is dead. No more input will appear.
2466	 */
2467	sock_orphan(sk);
2468	sock->sk = NULL;
2469
2470	/* Purge queues */
2471
2472	skb_queue_purge(&sk->sk_receive_queue);
 
2473	sk_refcnt_debug_release(sk);
2474
2475	sock_put(sk);
2476	return 0;
2477}
2478
2479/*
2480 *	Attach a packet hook.
2481 */
2482
2483static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protocol)
 
2484{
2485	struct packet_sock *po = pkt_sk(sk);
 
 
 
 
 
 
 
 
 
 
2486
2487	if (po->fanout) {
2488		if (dev)
2489			dev_put(dev);
 
2490
2491		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
2492	}
2493
2494	lock_sock(sk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2495
2496	spin_lock(&po->bind_lock);
2497	unregister_prot_hook(sk, true);
2498	po->num = protocol;
2499	po->prot_hook.type = protocol;
2500	if (po->prot_hook.dev)
2501		dev_put(po->prot_hook.dev);
2502	po->prot_hook.dev = dev;
2503
2504	po->ifindex = dev ? dev->ifindex : 0;
 
 
 
 
 
 
 
 
 
 
 
 
2505
2506	if (protocol == 0)
2507		goto out_unlock;
2508
2509	if (!dev || (dev->flags & IFF_UP)) {
2510		register_prot_hook(sk);
2511	} else {
2512		sk->sk_err = ENETDOWN;
2513		if (!sock_flag(sk, SOCK_DEAD))
2514			sk->sk_error_report(sk);
2515	}
2516
2517out_unlock:
 
2518	spin_unlock(&po->bind_lock);
2519	release_sock(sk);
2520	return 0;
2521}
2522
2523/*
2524 *	Bind a packet socket to a device
2525 */
2526
2527static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
2528			    int addr_len)
2529{
2530	struct sock *sk = sock->sk;
2531	char name[15];
2532	struct net_device *dev;
2533	int err = -ENODEV;
2534
2535	/*
2536	 *	Check legality
2537	 */
2538
2539	if (addr_len != sizeof(struct sockaddr))
2540		return -EINVAL;
2541	strlcpy(name, uaddr->sa_data, sizeof(name));
 
 
 
 
2542
2543	dev = dev_get_by_name(sock_net(sk), name);
2544	if (dev)
2545		err = packet_do_bind(sk, dev, pkt_sk(sk)->num);
2546	return err;
2547}
2548
2549static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
2550{
2551	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
2552	struct sock *sk = sock->sk;
2553	struct net_device *dev = NULL;
2554	int err;
2555
2556
2557	/*
2558	 *	Check legality
2559	 */
2560
2561	if (addr_len < sizeof(struct sockaddr_ll))
2562		return -EINVAL;
2563	if (sll->sll_family != AF_PACKET)
2564		return -EINVAL;
2565
2566	if (sll->sll_ifindex) {
2567		err = -ENODEV;
2568		dev = dev_get_by_index(sock_net(sk), sll->sll_ifindex);
2569		if (dev == NULL)
2570			goto out;
2571	}
2572	err = packet_do_bind(sk, dev, sll->sll_protocol ? : pkt_sk(sk)->num);
2573
2574out:
2575	return err;
2576}
2577
2578static struct proto packet_proto = {
2579	.name	  = "PACKET",
2580	.owner	  = THIS_MODULE,
2581	.obj_size = sizeof(struct packet_sock),
2582};
2583
2584/*
2585 *	Create a packet of type SOCK_PACKET.
2586 */
2587
2588static int packet_create(struct net *net, struct socket *sock, int protocol,
2589			 int kern)
2590{
2591	struct sock *sk;
2592	struct packet_sock *po;
2593	__be16 proto = (__force __be16)protocol; /* weird, but documented */
2594	int err;
2595
2596	if (!capable(CAP_NET_RAW))
2597		return -EPERM;
2598	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
2599	    sock->type != SOCK_PACKET)
2600		return -ESOCKTNOSUPPORT;
2601
2602	sock->state = SS_UNCONNECTED;
2603
2604	err = -ENOBUFS;
2605	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto);
2606	if (sk == NULL)
2607		goto out;
2608
2609	sock->ops = &packet_ops;
2610	if (sock->type == SOCK_PACKET)
2611		sock->ops = &packet_ops_spkt;
2612
2613	sock_init_data(sock, sk);
2614
2615	po = pkt_sk(sk);
 
2616	sk->sk_family = PF_PACKET;
2617	po->num = proto;
 
 
 
 
 
 
 
2618
2619	sk->sk_destruct = packet_sock_destruct;
2620	sk_refcnt_debug_inc(sk);
2621
2622	/*
2623	 *	Attach a protocol block
2624	 */
2625
2626	spin_lock_init(&po->bind_lock);
2627	mutex_init(&po->pg_vec_lock);
 
2628	po->prot_hook.func = packet_rcv;
2629
2630	if (sock->type == SOCK_PACKET)
2631		po->prot_hook.func = packet_rcv_spkt;
2632
2633	po->prot_hook.af_packet_priv = sk;
2634
2635	if (proto) {
2636		po->prot_hook.type = proto;
2637		register_prot_hook(sk);
2638	}
2639
2640	spin_lock_bh(&net->packet.sklist_lock);
2641	sk_add_node_rcu(sk, &net->packet.sklist);
 
 
 
2642	sock_prot_inuse_add(net, &packet_proto, 1);
2643	spin_unlock_bh(&net->packet.sklist_lock);
2644
2645	return 0;
2646out:
2647	return err;
2648}
2649
2650static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
2651{
2652	struct sock_exterr_skb *serr;
2653	struct sk_buff *skb, *skb2;
2654	int copied, err;
2655
2656	err = -EAGAIN;
2657	skb = skb_dequeue(&sk->sk_error_queue);
2658	if (skb == NULL)
2659		goto out;
2660
2661	copied = skb->len;
2662	if (copied > len) {
2663		msg->msg_flags |= MSG_TRUNC;
2664		copied = len;
2665	}
2666	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2667	if (err)
2668		goto out_free_skb;
2669
2670	sock_recv_timestamp(msg, sk, skb);
2671
2672	serr = SKB_EXT_ERR(skb);
2673	put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
2674		 sizeof(serr->ee), &serr->ee);
2675
2676	msg->msg_flags |= MSG_ERRQUEUE;
2677	err = copied;
2678
2679	/* Reset and regenerate socket error */
2680	spin_lock_bh(&sk->sk_error_queue.lock);
2681	sk->sk_err = 0;
2682	if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
2683		sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
2684		spin_unlock_bh(&sk->sk_error_queue.lock);
2685		sk->sk_error_report(sk);
2686	} else
2687		spin_unlock_bh(&sk->sk_error_queue.lock);
2688
2689out_free_skb:
2690	kfree_skb(skb);
2691out:
2692	return err;
2693}
2694
2695/*
2696 *	Pull a packet from our receive queue and hand it to the user.
2697 *	If necessary we block.
2698 */
2699
2700static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
2701			  struct msghdr *msg, size_t len, int flags)
2702{
2703	struct sock *sk = sock->sk;
2704	struct sk_buff *skb;
2705	int copied, err;
2706	struct sockaddr_ll *sll;
2707	int vnet_hdr_len = 0;
 
2708
2709	err = -EINVAL;
2710	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
2711		goto out;
2712
2713#if 0
2714	/* What error should we return now? EUNATTACH? */
2715	if (pkt_sk(sk)->ifindex < 0)
2716		return -ENODEV;
2717#endif
2718
2719	if (flags & MSG_ERRQUEUE) {
2720		err = packet_recv_error(sk, msg, len);
 
2721		goto out;
2722	}
2723
2724	/*
2725	 *	Call the generic datagram receiver. This handles all sorts
2726	 *	of horrible races and re-entrancy so we can forget about it
2727	 *	in the protocol layers.
2728	 *
2729	 *	Now it will return ENETDOWN, if device have just gone down,
2730	 *	but then it will block.
2731	 */
2732
2733	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
2734
2735	/*
2736	 *	An error occurred so return it. Because skb_recv_datagram()
2737	 *	handles the blocking we don't see and worry about blocking
2738	 *	retries.
2739	 */
2740
2741	if (skb == NULL)
2742		goto out;
2743
 
 
2744	if (pkt_sk(sk)->has_vnet_hdr) {
2745		struct virtio_net_hdr vnet_hdr = { 0 };
2746
2747		err = -EINVAL;
2748		vnet_hdr_len = sizeof(vnet_hdr);
2749		if (len < vnet_hdr_len)
2750			goto out_free;
2751
2752		len -= vnet_hdr_len;
2753
2754		if (skb_is_gso(skb)) {
2755			struct skb_shared_info *sinfo = skb_shinfo(skb);
2756
2757			/* This is a hint as to how much should be linear. */
2758			vnet_hdr.hdr_len = skb_headlen(skb);
2759			vnet_hdr.gso_size = sinfo->gso_size;
2760			if (sinfo->gso_type & SKB_GSO_TCPV4)
2761				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
2762			else if (sinfo->gso_type & SKB_GSO_TCPV6)
2763				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
2764			else if (sinfo->gso_type & SKB_GSO_UDP)
2765				vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
2766			else if (sinfo->gso_type & SKB_GSO_FCOE)
2767				goto out_free;
2768			else
2769				BUG();
2770			if (sinfo->gso_type & SKB_GSO_TCP_ECN)
2771				vnet_hdr.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
2772		} else
2773			vnet_hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
2774
2775		if (skb->ip_summed == CHECKSUM_PARTIAL) {
2776			vnet_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
2777			vnet_hdr.csum_start = skb_checksum_start_offset(skb);
2778			vnet_hdr.csum_offset = skb->csum_offset;
2779		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2780			vnet_hdr.flags = VIRTIO_NET_HDR_F_DATA_VALID;
2781		} /* else everything is zero */
2782
2783		err = memcpy_toiovec(msg->msg_iov, (void *)&vnet_hdr,
2784				     vnet_hdr_len);
2785		if (err < 0)
2786			goto out_free;
 
2787	}
2788
2789	/*
2790	 *	If the address length field is there to be filled in, we fill
2791	 *	it in now.
2792	 */
2793
2794	sll = &PACKET_SKB_CB(skb)->sa.ll;
2795	if (sock->type == SOCK_PACKET)
2796		msg->msg_namelen = sizeof(struct sockaddr_pkt);
2797	else
2798		msg->msg_namelen = sll->sll_halen + offsetof(struct sockaddr_ll, sll_addr);
2799
2800	/*
2801	 *	You lose any data beyond the buffer you gave. If it worries a
2802	 *	user program they can ask the device for its MTU anyway.
2803	 */
2804
2805	copied = skb->len;
2806	if (copied > len) {
2807		copied = len;
2808		msg->msg_flags |= MSG_TRUNC;
2809	}
2810
2811	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
2812	if (err)
2813		goto out_free;
2814
 
 
 
 
 
 
 
 
 
2815	sock_recv_ts_and_drops(msg, sk, skb);
2816
2817	if (msg->msg_name)
2818		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
2819		       msg->msg_namelen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2820
2821	if (pkt_sk(sk)->auxdata) {
2822		struct tpacket_auxdata aux;
2823
2824		aux.tp_status = TP_STATUS_USER;
2825		if (skb->ip_summed == CHECKSUM_PARTIAL)
2826			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
2827		aux.tp_len = PACKET_SKB_CB(skb)->origlen;
 
 
 
 
 
2828		aux.tp_snaplen = skb->len;
2829		aux.tp_mac = 0;
2830		aux.tp_net = skb_network_offset(skb);
2831		if (vlan_tx_tag_present(skb)) {
2832			aux.tp_vlan_tci = vlan_tx_tag_get(skb);
2833			aux.tp_status |= TP_STATUS_VLAN_VALID;
 
2834		} else {
2835			aux.tp_vlan_tci = 0;
 
2836		}
2837		aux.tp_padding = 0;
2838		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
2839	}
2840
2841	/*
2842	 *	Free or return the buffer as appropriate. Again this
2843	 *	hides all the races and re-entrancy issues from us.
2844	 */
2845	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
2846
2847out_free:
2848	skb_free_datagram(sk, skb);
2849out:
2850	return err;
2851}
2852
2853static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2854			       int *uaddr_len, int peer)
2855{
2856	struct net_device *dev;
2857	struct sock *sk	= sock->sk;
2858
2859	if (peer)
2860		return -EOPNOTSUPP;
2861
2862	uaddr->sa_family = AF_PACKET;
 
2863	rcu_read_lock();
2864	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2865	if (dev)
2866		strncpy(uaddr->sa_data, dev->name, 14);
2867	else
2868		memset(uaddr->sa_data, 0, 14);
2869	rcu_read_unlock();
2870	*uaddr_len = sizeof(*uaddr);
2871
2872	return 0;
2873}
2874
2875static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2876			  int *uaddr_len, int peer)
2877{
2878	struct net_device *dev;
2879	struct sock *sk = sock->sk;
2880	struct packet_sock *po = pkt_sk(sk);
2881	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
2882
2883	if (peer)
2884		return -EOPNOTSUPP;
2885
2886	sll->sll_family = AF_PACKET;
2887	sll->sll_ifindex = po->ifindex;
2888	sll->sll_protocol = po->num;
2889	sll->sll_pkttype = 0;
2890	rcu_read_lock();
2891	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
2892	if (dev) {
2893		sll->sll_hatype = dev->type;
2894		sll->sll_halen = dev->addr_len;
2895		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
2896	} else {
2897		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
2898		sll->sll_halen = 0;
2899	}
2900	rcu_read_unlock();
2901	*uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
2902
2903	return 0;
2904}
2905
2906static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
2907			 int what)
2908{
2909	switch (i->type) {
2910	case PACKET_MR_MULTICAST:
2911		if (i->alen != dev->addr_len)
2912			return -EINVAL;
2913		if (what > 0)
2914			return dev_mc_add(dev, i->addr);
2915		else
2916			return dev_mc_del(dev, i->addr);
2917		break;
2918	case PACKET_MR_PROMISC:
2919		return dev_set_promiscuity(dev, what);
2920		break;
2921	case PACKET_MR_ALLMULTI:
2922		return dev_set_allmulti(dev, what);
2923		break;
2924	case PACKET_MR_UNICAST:
2925		if (i->alen != dev->addr_len)
2926			return -EINVAL;
2927		if (what > 0)
2928			return dev_uc_add(dev, i->addr);
2929		else
2930			return dev_uc_del(dev, i->addr);
2931		break;
2932	default:
2933		break;
2934	}
2935	return 0;
2936}
2937
2938static void packet_dev_mclist(struct net_device *dev, struct packet_mclist *i, int what)
 
2939{
2940	for ( ; i; i = i->next) {
2941		if (i->ifindex == dev->ifindex)
2942			packet_dev_mc(dev, i, what);
 
 
 
 
 
 
2943	}
2944}
2945
2946static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
2947{
2948	struct packet_sock *po = pkt_sk(sk);
2949	struct packet_mclist *ml, *i;
2950	struct net_device *dev;
2951	int err;
2952
2953	rtnl_lock();
2954
2955	err = -ENODEV;
2956	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
2957	if (!dev)
2958		goto done;
2959
2960	err = -EINVAL;
2961	if (mreq->mr_alen > dev->addr_len)
2962		goto done;
2963
2964	err = -ENOBUFS;
2965	i = kmalloc(sizeof(*i), GFP_KERNEL);
2966	if (i == NULL)
2967		goto done;
2968
2969	err = 0;
2970	for (ml = po->mclist; ml; ml = ml->next) {
2971		if (ml->ifindex == mreq->mr_ifindex &&
2972		    ml->type == mreq->mr_type &&
2973		    ml->alen == mreq->mr_alen &&
2974		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
2975			ml->count++;
2976			/* Free the new element ... */
2977			kfree(i);
2978			goto done;
2979		}
2980	}
2981
2982	i->type = mreq->mr_type;
2983	i->ifindex = mreq->mr_ifindex;
2984	i->alen = mreq->mr_alen;
2985	memcpy(i->addr, mreq->mr_address, i->alen);
 
2986	i->count = 1;
2987	i->next = po->mclist;
2988	po->mclist = i;
2989	err = packet_dev_mc(dev, i, 1);
2990	if (err) {
2991		po->mclist = i->next;
2992		kfree(i);
2993	}
2994
2995done:
2996	rtnl_unlock();
2997	return err;
2998}
2999
3000static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3001{
3002	struct packet_mclist *ml, **mlp;
3003
3004	rtnl_lock();
3005
3006	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3007		if (ml->ifindex == mreq->mr_ifindex &&
3008		    ml->type == mreq->mr_type &&
3009		    ml->alen == mreq->mr_alen &&
3010		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3011			if (--ml->count == 0) {
3012				struct net_device *dev;
3013				*mlp = ml->next;
3014				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3015				if (dev)
3016					packet_dev_mc(dev, ml, -1);
3017				kfree(ml);
3018			}
3019			rtnl_unlock();
3020			return 0;
3021		}
3022	}
3023	rtnl_unlock();
3024	return -EADDRNOTAVAIL;
3025}
3026
3027static void packet_flush_mclist(struct sock *sk)
3028{
3029	struct packet_sock *po = pkt_sk(sk);
3030	struct packet_mclist *ml;
3031
3032	if (!po->mclist)
3033		return;
3034
3035	rtnl_lock();
3036	while ((ml = po->mclist) != NULL) {
3037		struct net_device *dev;
3038
3039		po->mclist = ml->next;
3040		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3041		if (dev != NULL)
3042			packet_dev_mc(dev, ml, -1);
3043		kfree(ml);
3044	}
3045	rtnl_unlock();
3046}
3047
3048static int
3049packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
 
3050{
3051	struct sock *sk = sock->sk;
3052	struct packet_sock *po = pkt_sk(sk);
3053	int ret;
3054
3055	if (level != SOL_PACKET)
3056		return -ENOPROTOOPT;
3057
3058	switch (optname) {
3059	case PACKET_ADD_MEMBERSHIP:
3060	case PACKET_DROP_MEMBERSHIP:
3061	{
3062		struct packet_mreq_max mreq;
3063		int len = optlen;
3064		memset(&mreq, 0, sizeof(mreq));
3065		if (len < sizeof(struct packet_mreq))
3066			return -EINVAL;
3067		if (len > sizeof(mreq))
3068			len = sizeof(mreq);
3069		if (copy_from_user(&mreq, optval, len))
3070			return -EFAULT;
3071		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3072			return -EINVAL;
3073		if (optname == PACKET_ADD_MEMBERSHIP)
3074			ret = packet_mc_add(sk, &mreq);
3075		else
3076			ret = packet_mc_drop(sk, &mreq);
3077		return ret;
3078	}
3079
3080	case PACKET_RX_RING:
3081	case PACKET_TX_RING:
3082	{
3083		union tpacket_req_u req_u;
3084		int len;
3085
 
3086		switch (po->tp_version) {
3087		case TPACKET_V1:
3088		case TPACKET_V2:
3089			len = sizeof(req_u.req);
3090			break;
3091		case TPACKET_V3:
3092		default:
3093			len = sizeof(req_u.req3);
3094			break;
3095		}
3096		if (optlen < len)
3097			return -EINVAL;
3098		if (pkt_sk(sk)->has_vnet_hdr)
3099			return -EINVAL;
3100		if (copy_from_user(&req_u.req, optval, len))
3101			return -EFAULT;
3102		return packet_set_ring(sk, &req_u, 0,
3103			optname == PACKET_TX_RING);
 
 
 
3104	}
3105	case PACKET_COPY_THRESH:
3106	{
3107		int val;
3108
3109		if (optlen != sizeof(val))
3110			return -EINVAL;
3111		if (copy_from_user(&val, optval, sizeof(val)))
3112			return -EFAULT;
3113
3114		pkt_sk(sk)->copy_thresh = val;
3115		return 0;
3116	}
3117	case PACKET_VERSION:
3118	{
3119		int val;
3120
3121		if (optlen != sizeof(val))
3122			return -EINVAL;
3123		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3124			return -EBUSY;
3125		if (copy_from_user(&val, optval, sizeof(val)))
3126			return -EFAULT;
3127		switch (val) {
3128		case TPACKET_V1:
3129		case TPACKET_V2:
3130		case TPACKET_V3:
3131			po->tp_version = val;
3132			return 0;
3133		default:
3134			return -EINVAL;
3135		}
 
 
 
 
 
 
 
 
 
3136	}
3137	case PACKET_RESERVE:
3138	{
3139		unsigned int val;
3140
3141		if (optlen != sizeof(val))
3142			return -EINVAL;
3143		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3144			return -EBUSY;
3145		if (copy_from_user(&val, optval, sizeof(val)))
3146			return -EFAULT;
3147		po->tp_reserve = val;
3148		return 0;
 
 
 
 
 
 
 
 
 
3149	}
3150	case PACKET_LOSS:
3151	{
3152		unsigned int val;
3153
3154		if (optlen != sizeof(val))
3155			return -EINVAL;
3156		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3157			return -EBUSY;
3158		if (copy_from_user(&val, optval, sizeof(val)))
3159			return -EFAULT;
3160		po->tp_loss = !!val;
3161		return 0;
 
 
 
 
 
 
 
 
3162	}
3163	case PACKET_AUXDATA:
3164	{
3165		int val;
3166
3167		if (optlen < sizeof(val))
3168			return -EINVAL;
3169		if (copy_from_user(&val, optval, sizeof(val)))
3170			return -EFAULT;
3171
 
3172		po->auxdata = !!val;
 
3173		return 0;
3174	}
3175	case PACKET_ORIGDEV:
3176	{
3177		int val;
3178
3179		if (optlen < sizeof(val))
3180			return -EINVAL;
3181		if (copy_from_user(&val, optval, sizeof(val)))
3182			return -EFAULT;
3183
 
3184		po->origdev = !!val;
 
3185		return 0;
3186	}
3187	case PACKET_VNET_HDR:
3188	{
3189		int val;
3190
3191		if (sock->type != SOCK_RAW)
3192			return -EINVAL;
3193		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3194			return -EBUSY;
3195		if (optlen < sizeof(val))
3196			return -EINVAL;
3197		if (copy_from_user(&val, optval, sizeof(val)))
3198			return -EFAULT;
3199
3200		po->has_vnet_hdr = !!val;
3201		return 0;
 
 
 
 
 
 
 
3202	}
3203	case PACKET_TIMESTAMP:
3204	{
3205		int val;
3206
3207		if (optlen != sizeof(val))
3208			return -EINVAL;
3209		if (copy_from_user(&val, optval, sizeof(val)))
3210			return -EFAULT;
3211
3212		po->tp_tstamp = val;
3213		return 0;
3214	}
3215	case PACKET_FANOUT:
3216	{
3217		int val;
3218
3219		if (optlen != sizeof(val))
3220			return -EINVAL;
3221		if (copy_from_user(&val, optval, sizeof(val)))
3222			return -EFAULT;
3223
3224		return fanout_add(sk, val & 0xffff, val >> 16);
3225	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3226	default:
3227		return -ENOPROTOOPT;
3228	}
3229}
3230
3231static int packet_getsockopt(struct socket *sock, int level, int optname,
3232			     char __user *optval, int __user *optlen)
3233{
3234	int len;
3235	int val, lv = sizeof(val);
3236	struct sock *sk = sock->sk;
3237	struct packet_sock *po = pkt_sk(sk);
3238	void *data = &val;
3239	struct tpacket_stats st;
3240	union tpacket_stats_u st_u;
 
3241
3242	if (level != SOL_PACKET)
3243		return -ENOPROTOOPT;
3244
3245	if (get_user(len, optlen))
3246		return -EFAULT;
3247
3248	if (len < 0)
3249		return -EINVAL;
3250
3251	switch (optname) {
3252	case PACKET_STATISTICS:
3253		spin_lock_bh(&sk->sk_receive_queue.lock);
 
 
 
 
 
3254		if (po->tp_version == TPACKET_V3) {
3255			lv = sizeof(struct tpacket_stats_v3);
3256			memcpy(&st_u.stats3, &po->stats,
3257			       sizeof(struct tpacket_stats));
3258			st_u.stats3.tp_freeze_q_cnt =
3259					po->stats_u.stats3.tp_freeze_q_cnt;
3260			st_u.stats3.tp_packets += po->stats.tp_drops;
3261			data = &st_u.stats3;
3262		} else {
3263			lv = sizeof(struct tpacket_stats);
3264			st = po->stats;
3265			st.tp_packets += st.tp_drops;
3266			data = &st;
3267		}
3268		memset(&po->stats, 0, sizeof(st));
3269		spin_unlock_bh(&sk->sk_receive_queue.lock);
3270		break;
3271	case PACKET_AUXDATA:
3272		val = po->auxdata;
3273		break;
3274	case PACKET_ORIGDEV:
3275		val = po->origdev;
3276		break;
3277	case PACKET_VNET_HDR:
3278		val = po->has_vnet_hdr;
3279		break;
3280	case PACKET_VERSION:
3281		val = po->tp_version;
3282		break;
3283	case PACKET_HDRLEN:
3284		if (len > sizeof(int))
3285			len = sizeof(int);
 
 
3286		if (copy_from_user(&val, optval, len))
3287			return -EFAULT;
3288		switch (val) {
3289		case TPACKET_V1:
3290			val = sizeof(struct tpacket_hdr);
3291			break;
3292		case TPACKET_V2:
3293			val = sizeof(struct tpacket2_hdr);
3294			break;
3295		case TPACKET_V3:
3296			val = sizeof(struct tpacket3_hdr);
3297			break;
3298		default:
3299			return -EINVAL;
3300		}
3301		break;
3302	case PACKET_RESERVE:
3303		val = po->tp_reserve;
3304		break;
3305	case PACKET_LOSS:
3306		val = po->tp_loss;
3307		break;
3308	case PACKET_TIMESTAMP:
3309		val = po->tp_tstamp;
3310		break;
3311	case PACKET_FANOUT:
3312		val = (po->fanout ?
3313		       ((u32)po->fanout->id |
3314			((u32)po->fanout->type << 16)) :
 
3315		       0);
3316		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3317	default:
3318		return -ENOPROTOOPT;
3319	}
3320
3321	if (len > lv)
3322		len = lv;
3323	if (put_user(len, optlen))
3324		return -EFAULT;
3325	if (copy_to_user(optval, data, len))
3326		return -EFAULT;
3327	return 0;
3328}
3329
3330
3331static int packet_notifier(struct notifier_block *this, unsigned long msg, void *data)
3332{
3333	struct sock *sk;
3334	struct hlist_node *node;
3335	struct net_device *dev = data;
3336	struct net *net = dev_net(dev);
3337
3338	rcu_read_lock();
3339	sk_for_each_rcu(sk, node, &net->packet.sklist) {
3340		struct packet_sock *po = pkt_sk(sk);
3341
3342		switch (msg) {
3343		case NETDEV_UNREGISTER:
3344			if (po->mclist)
3345				packet_dev_mclist(dev, po->mclist, -1);
3346			/* fallthrough */
3347
3348		case NETDEV_DOWN:
3349			if (dev->ifindex == po->ifindex) {
3350				spin_lock(&po->bind_lock);
3351				if (po->running) {
3352					__unregister_prot_hook(sk, false);
3353					sk->sk_err = ENETDOWN;
3354					if (!sock_flag(sk, SOCK_DEAD))
3355						sk->sk_error_report(sk);
3356				}
3357				if (msg == NETDEV_UNREGISTER) {
 
3358					po->ifindex = -1;
3359					if (po->prot_hook.dev)
3360						dev_put(po->prot_hook.dev);
3361					po->prot_hook.dev = NULL;
3362				}
3363				spin_unlock(&po->bind_lock);
3364			}
3365			break;
3366		case NETDEV_UP:
3367			if (dev->ifindex == po->ifindex) {
3368				spin_lock(&po->bind_lock);
3369				if (po->num)
3370					register_prot_hook(sk);
3371				spin_unlock(&po->bind_lock);
3372			}
3373			break;
3374		}
3375	}
3376	rcu_read_unlock();
3377	return NOTIFY_DONE;
3378}
3379
3380
3381static int packet_ioctl(struct socket *sock, unsigned int cmd,
3382			unsigned long arg)
3383{
3384	struct sock *sk = sock->sk;
3385
3386	switch (cmd) {
3387	case SIOCOUTQ:
3388	{
3389		int amount = sk_wmem_alloc_get(sk);
3390
3391		return put_user(amount, (int __user *)arg);
3392	}
3393	case SIOCINQ:
3394	{
3395		struct sk_buff *skb;
3396		int amount = 0;
3397
3398		spin_lock_bh(&sk->sk_receive_queue.lock);
3399		skb = skb_peek(&sk->sk_receive_queue);
3400		if (skb)
3401			amount = skb->len;
3402		spin_unlock_bh(&sk->sk_receive_queue.lock);
3403		return put_user(amount, (int __user *)arg);
3404	}
3405	case SIOCGSTAMP:
3406		return sock_get_timestamp(sk, (struct timeval __user *)arg);
3407	case SIOCGSTAMPNS:
3408		return sock_get_timestampns(sk, (struct timespec __user *)arg);
3409
3410#ifdef CONFIG_INET
3411	case SIOCADDRT:
3412	case SIOCDELRT:
3413	case SIOCDARP:
3414	case SIOCGARP:
3415	case SIOCSARP:
3416	case SIOCGIFADDR:
3417	case SIOCSIFADDR:
3418	case SIOCGIFBRDADDR:
3419	case SIOCSIFBRDADDR:
3420	case SIOCGIFNETMASK:
3421	case SIOCSIFNETMASK:
3422	case SIOCGIFDSTADDR:
3423	case SIOCSIFDSTADDR:
3424	case SIOCSIFFLAGS:
3425		return inet_dgram_ops.ioctl(sock, cmd, arg);
3426#endif
3427
3428	default:
3429		return -ENOIOCTLCMD;
3430	}
3431	return 0;
3432}
3433
3434static unsigned int packet_poll(struct file *file, struct socket *sock,
3435				poll_table *wait)
3436{
3437	struct sock *sk = sock->sk;
3438	struct packet_sock *po = pkt_sk(sk);
3439	unsigned int mask = datagram_poll(file, sock, wait);
3440
3441	spin_lock_bh(&sk->sk_receive_queue.lock);
3442	if (po->rx_ring.pg_vec) {
3443		if (!packet_previous_rx_frame(po, &po->rx_ring,
3444			TP_STATUS_KERNEL))
3445			mask |= POLLIN | POLLRDNORM;
3446	}
 
3447	spin_unlock_bh(&sk->sk_receive_queue.lock);
3448	spin_lock_bh(&sk->sk_write_queue.lock);
3449	if (po->tx_ring.pg_vec) {
3450		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
3451			mask |= POLLOUT | POLLWRNORM;
3452	}
3453	spin_unlock_bh(&sk->sk_write_queue.lock);
3454	return mask;
3455}
3456
3457
3458/* Dirty? Well, I still did not learn better way to account
3459 * for user mmaps.
3460 */
3461
3462static void packet_mm_open(struct vm_area_struct *vma)
3463{
3464	struct file *file = vma->vm_file;
3465	struct socket *sock = file->private_data;
3466	struct sock *sk = sock->sk;
3467
3468	if (sk)
3469		atomic_inc(&pkt_sk(sk)->mapped);
3470}
3471
3472static void packet_mm_close(struct vm_area_struct *vma)
3473{
3474	struct file *file = vma->vm_file;
3475	struct socket *sock = file->private_data;
3476	struct sock *sk = sock->sk;
3477
3478	if (sk)
3479		atomic_dec(&pkt_sk(sk)->mapped);
3480}
3481
3482static const struct vm_operations_struct packet_mmap_ops = {
3483	.open	=	packet_mm_open,
3484	.close	=	packet_mm_close,
3485};
3486
3487static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
3488			unsigned int len)
3489{
3490	int i;
3491
3492	for (i = 0; i < len; i++) {
3493		if (likely(pg_vec[i].buffer)) {
3494			if (is_vmalloc_addr(pg_vec[i].buffer))
3495				vfree(pg_vec[i].buffer);
3496			else
3497				free_pages((unsigned long)pg_vec[i].buffer,
3498					   order);
3499			pg_vec[i].buffer = NULL;
3500		}
3501	}
3502	kfree(pg_vec);
3503}
3504
3505static char *alloc_one_pg_vec_page(unsigned long order)
3506{
3507	char *buffer = NULL;
3508	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
3509			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
3510
3511	buffer = (char *) __get_free_pages(gfp_flags, order);
3512
3513	if (buffer)
3514		return buffer;
3515
3516	/*
3517	 * __get_free_pages failed, fall back to vmalloc
3518	 */
3519	buffer = vzalloc((1 << order) * PAGE_SIZE);
3520
3521	if (buffer)
3522		return buffer;
3523
3524	/*
3525	 * vmalloc failed, lets dig into swap here
3526	 */
3527	gfp_flags &= ~__GFP_NORETRY;
3528	buffer = (char *)__get_free_pages(gfp_flags, order);
3529	if (buffer)
3530		return buffer;
3531
3532	/*
3533	 * complete and utter failure
3534	 */
3535	return NULL;
3536}
3537
3538static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
3539{
3540	unsigned int block_nr = req->tp_block_nr;
3541	struct pgv *pg_vec;
3542	int i;
3543
3544	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
3545	if (unlikely(!pg_vec))
3546		goto out;
3547
3548	for (i = 0; i < block_nr; i++) {
3549		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
3550		if (unlikely(!pg_vec[i].buffer))
3551			goto out_free_pgvec;
3552	}
3553
3554out:
3555	return pg_vec;
3556
3557out_free_pgvec:
3558	free_pg_vec(pg_vec, order, block_nr);
3559	pg_vec = NULL;
3560	goto out;
3561}
3562
3563static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
3564		int closing, int tx_ring)
3565{
3566	struct pgv *pg_vec = NULL;
3567	struct packet_sock *po = pkt_sk(sk);
 
3568	int was_running, order = 0;
3569	struct packet_ring_buffer *rb;
3570	struct sk_buff_head *rb_queue;
3571	__be16 num;
3572	int err = -EINVAL;
3573	/* Added to avoid minimal code churn */
3574	struct tpacket_req *req = &req_u->req;
3575
3576	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
3577	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
3578		WARN(1, "Tx-ring is not supported.\n");
3579		goto out;
3580	}
3581
3582	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
3583	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
3584
3585	err = -EBUSY;
3586	if (!closing) {
3587		if (atomic_read(&po->mapped))
3588			goto out;
3589		if (atomic_read(&rb->pending))
3590			goto out;
3591	}
3592
3593	if (req->tp_block_nr) {
 
 
3594		/* Sanity tests and some calculations */
3595		err = -EBUSY;
3596		if (unlikely(rb->pg_vec))
3597			goto out;
3598
3599		switch (po->tp_version) {
3600		case TPACKET_V1:
3601			po->tp_hdrlen = TPACKET_HDRLEN;
3602			break;
3603		case TPACKET_V2:
3604			po->tp_hdrlen = TPACKET2_HDRLEN;
3605			break;
3606		case TPACKET_V3:
3607			po->tp_hdrlen = TPACKET3_HDRLEN;
3608			break;
3609		}
3610
3611		err = -EINVAL;
3612		if (unlikely((int)req->tp_block_size <= 0))
3613			goto out;
3614		if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
3615			goto out;
3616		if (unlikely(req->tp_frame_size < po->tp_hdrlen +
3617					po->tp_reserve))
 
 
 
 
3618			goto out;
3619		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
3620			goto out;
3621
3622		rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
3623		if (unlikely(rb->frames_per_block <= 0))
 
 
3624			goto out;
3625		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
3626					req->tp_frame_nr))
3627			goto out;
3628
3629		err = -ENOMEM;
3630		order = get_order(req->tp_block_size);
3631		pg_vec = alloc_pg_vec(req, order);
3632		if (unlikely(!pg_vec))
3633			goto out;
3634		switch (po->tp_version) {
3635		case TPACKET_V3:
3636		/* Transmit path is not supported. We checked
3637		 * it above but just being paranoid
3638		 */
3639			if (!tx_ring)
3640				init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
3641				break;
 
 
 
 
 
 
 
 
3642		default:
 
 
 
 
 
 
3643			break;
3644		}
3645	}
3646	/* Done */
3647	else {
3648		err = -EINVAL;
3649		if (unlikely(req->tp_frame_nr))
3650			goto out;
3651	}
3652
3653	lock_sock(sk);
3654
3655	/* Detach socket from network */
3656	spin_lock(&po->bind_lock);
3657	was_running = po->running;
3658	num = po->num;
3659	if (was_running) {
3660		po->num = 0;
3661		__unregister_prot_hook(sk, false);
3662	}
3663	spin_unlock(&po->bind_lock);
3664
3665	synchronize_net();
3666
3667	err = -EBUSY;
3668	mutex_lock(&po->pg_vec_lock);
3669	if (closing || atomic_read(&po->mapped) == 0) {
3670		err = 0;
3671		spin_lock_bh(&rb_queue->lock);
3672		swap(rb->pg_vec, pg_vec);
 
 
3673		rb->frame_max = (req->tp_frame_nr - 1);
3674		rb->head = 0;
3675		rb->frame_size = req->tp_frame_size;
3676		spin_unlock_bh(&rb_queue->lock);
3677
3678		swap(rb->pg_vec_order, order);
3679		swap(rb->pg_vec_len, req->tp_block_nr);
3680
3681		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
3682		po->prot_hook.func = (po->rx_ring.pg_vec) ?
3683						tpacket_rcv : packet_rcv;
3684		skb_queue_purge(rb_queue);
3685		if (atomic_read(&po->mapped))
3686			pr_err("packet_mmap: vma is busy: %d\n",
3687			       atomic_read(&po->mapped));
3688	}
3689	mutex_unlock(&po->pg_vec_lock);
3690
3691	spin_lock(&po->bind_lock);
3692	if (was_running) {
3693		po->num = num;
3694		register_prot_hook(sk);
3695	}
3696	spin_unlock(&po->bind_lock);
3697	if (closing && (po->tp_version > TPACKET_V2)) {
3698		/* Because we don't support block-based V3 on tx-ring */
3699		if (!tx_ring)
3700			prb_shutdown_retire_blk_timer(po, tx_ring, rb_queue);
3701	}
3702	release_sock(sk);
3703
 
 
3704	if (pg_vec)
3705		free_pg_vec(pg_vec, order, req->tp_block_nr);
3706out:
3707	return err;
3708}
3709
3710static int packet_mmap(struct file *file, struct socket *sock,
3711		struct vm_area_struct *vma)
3712{
3713	struct sock *sk = sock->sk;
3714	struct packet_sock *po = pkt_sk(sk);
3715	unsigned long size, expected_size;
3716	struct packet_ring_buffer *rb;
3717	unsigned long start;
3718	int err = -EINVAL;
3719	int i;
3720
3721	if (vma->vm_pgoff)
3722		return -EINVAL;
3723
3724	mutex_lock(&po->pg_vec_lock);
3725
3726	expected_size = 0;
3727	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3728		if (rb->pg_vec) {
3729			expected_size += rb->pg_vec_len
3730						* rb->pg_vec_pages
3731						* PAGE_SIZE;
3732		}
3733	}
3734
3735	if (expected_size == 0)
3736		goto out;
3737
3738	size = vma->vm_end - vma->vm_start;
3739	if (size != expected_size)
3740		goto out;
3741
3742	start = vma->vm_start;
3743	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
3744		if (rb->pg_vec == NULL)
3745			continue;
3746
3747		for (i = 0; i < rb->pg_vec_len; i++) {
3748			struct page *page;
3749			void *kaddr = rb->pg_vec[i].buffer;
3750			int pg_num;
3751
3752			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
3753				page = pgv_to_page(kaddr);
3754				err = vm_insert_page(vma, start, page);
3755				if (unlikely(err))
3756					goto out;
3757				start += PAGE_SIZE;
3758				kaddr += PAGE_SIZE;
3759			}
3760		}
3761	}
3762
3763	atomic_inc(&po->mapped);
3764	vma->vm_ops = &packet_mmap_ops;
3765	err = 0;
3766
3767out:
3768	mutex_unlock(&po->pg_vec_lock);
3769	return err;
3770}
3771
3772static const struct proto_ops packet_ops_spkt = {
3773	.family =	PF_PACKET,
3774	.owner =	THIS_MODULE,
3775	.release =	packet_release,
3776	.bind =		packet_bind_spkt,
3777	.connect =	sock_no_connect,
3778	.socketpair =	sock_no_socketpair,
3779	.accept =	sock_no_accept,
3780	.getname =	packet_getname_spkt,
3781	.poll =		datagram_poll,
3782	.ioctl =	packet_ioctl,
 
3783	.listen =	sock_no_listen,
3784	.shutdown =	sock_no_shutdown,
3785	.setsockopt =	sock_no_setsockopt,
3786	.getsockopt =	sock_no_getsockopt,
3787	.sendmsg =	packet_sendmsg_spkt,
3788	.recvmsg =	packet_recvmsg,
3789	.mmap =		sock_no_mmap,
3790	.sendpage =	sock_no_sendpage,
3791};
3792
3793static const struct proto_ops packet_ops = {
3794	.family =	PF_PACKET,
3795	.owner =	THIS_MODULE,
3796	.release =	packet_release,
3797	.bind =		packet_bind,
3798	.connect =	sock_no_connect,
3799	.socketpair =	sock_no_socketpair,
3800	.accept =	sock_no_accept,
3801	.getname =	packet_getname,
3802	.poll =		packet_poll,
3803	.ioctl =	packet_ioctl,
 
3804	.listen =	sock_no_listen,
3805	.shutdown =	sock_no_shutdown,
3806	.setsockopt =	packet_setsockopt,
3807	.getsockopt =	packet_getsockopt,
3808	.sendmsg =	packet_sendmsg,
3809	.recvmsg =	packet_recvmsg,
3810	.mmap =		packet_mmap,
3811	.sendpage =	sock_no_sendpage,
3812};
3813
3814static const struct net_proto_family packet_family_ops = {
3815	.family =	PF_PACKET,
3816	.create =	packet_create,
3817	.owner	=	THIS_MODULE,
3818};
3819
3820static struct notifier_block packet_netdev_notifier = {
3821	.notifier_call =	packet_notifier,
3822};
3823
3824#ifdef CONFIG_PROC_FS
3825
3826static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
3827	__acquires(RCU)
3828{
3829	struct net *net = seq_file_net(seq);
3830
3831	rcu_read_lock();
3832	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
3833}
3834
3835static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3836{
3837	struct net *net = seq_file_net(seq);
3838	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
3839}
3840
3841static void packet_seq_stop(struct seq_file *seq, void *v)
3842	__releases(RCU)
3843{
3844	rcu_read_unlock();
3845}
3846
3847static int packet_seq_show(struct seq_file *seq, void *v)
3848{
3849	if (v == SEQ_START_TOKEN)
3850		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
3851	else {
3852		struct sock *s = sk_entry(v);
3853		const struct packet_sock *po = pkt_sk(s);
3854
3855		seq_printf(seq,
3856			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
3857			   s,
3858			   atomic_read(&s->sk_refcnt),
3859			   s->sk_type,
3860			   ntohs(po->num),
3861			   po->ifindex,
3862			   po->running,
3863			   atomic_read(&s->sk_rmem_alloc),
3864			   sock_i_uid(s),
3865			   sock_i_ino(s));
3866	}
3867
3868	return 0;
3869}
3870
3871static const struct seq_operations packet_seq_ops = {
3872	.start	= packet_seq_start,
3873	.next	= packet_seq_next,
3874	.stop	= packet_seq_stop,
3875	.show	= packet_seq_show,
3876};
3877
3878static int packet_seq_open(struct inode *inode, struct file *file)
3879{
3880	return seq_open_net(inode, file, &packet_seq_ops,
3881			    sizeof(struct seq_net_private));
3882}
3883
3884static const struct file_operations packet_seq_fops = {
3885	.owner		= THIS_MODULE,
3886	.open		= packet_seq_open,
3887	.read		= seq_read,
3888	.llseek		= seq_lseek,
3889	.release	= seq_release_net,
3890};
3891
3892#endif
3893
3894static int __net_init packet_net_init(struct net *net)
3895{
3896	spin_lock_init(&net->packet.sklist_lock);
3897	INIT_HLIST_HEAD(&net->packet.sklist);
3898
3899	if (!proc_net_fops_create(net, "packet", 0, &packet_seq_fops))
 
3900		return -ENOMEM;
3901
3902	return 0;
3903}
3904
3905static void __net_exit packet_net_exit(struct net *net)
3906{
3907	proc_net_remove(net, "packet");
 
3908}
3909
3910static struct pernet_operations packet_net_ops = {
3911	.init = packet_net_init,
3912	.exit = packet_net_exit,
3913};
3914
3915
3916static void __exit packet_exit(void)
3917{
3918	unregister_netdevice_notifier(&packet_netdev_notifier);
3919	unregister_pernet_subsys(&packet_net_ops);
3920	sock_unregister(PF_PACKET);
3921	proto_unregister(&packet_proto);
3922}
3923
3924static int __init packet_init(void)
3925{
3926	int rc = proto_register(&packet_proto, 0);
3927
3928	if (rc != 0)
 
3929		goto out;
 
 
 
 
 
 
 
 
 
 
 
3930
3931	sock_register(&packet_family_ops);
3932	register_pernet_subsys(&packet_net_ops);
3933	register_netdevice_notifier(&packet_netdev_notifier);
 
 
 
3934out:
3935	return rc;
3936}
3937
3938module_init(packet_init);
3939module_exit(packet_exit);
3940MODULE_LICENSE("GPL");
3941MODULE_ALIAS_NETPROTO(PF_PACKET);