Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		PACKET - implements raw packet sockets.
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  11 *
  12 * Fixes:
  13 *		Alan Cox	:	verify_area() now used correctly
  14 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
  15 *		Alan Cox	:	tidied skbuff lists.
  16 *		Alan Cox	:	Now uses generic datagram routines I
  17 *					added. Also fixed the peek/read crash
  18 *					from all old Linux datagram code.
  19 *		Alan Cox	:	Uses the improved datagram code.
  20 *		Alan Cox	:	Added NULL's for socket options.
  21 *		Alan Cox	:	Re-commented the code.
  22 *		Alan Cox	:	Use new kernel side addressing
  23 *		Rob Janssen	:	Correct MTU usage.
  24 *		Dave Platt	:	Counter leaks caused by incorrect
  25 *					interrupt locking and some slightly
  26 *					dubious gcc output. Can you read
  27 *					compiler: it said _VOLATILE_
  28 *	Richard Kooijman	:	Timestamp fixes.
  29 *		Alan Cox	:	New buffers. Use sk->mac.raw.
  30 *		Alan Cox	:	sendmsg/recvmsg support.
  31 *		Alan Cox	:	Protocol setting support
  32 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
  33 *	Cyrus Durgin		:	Fixed kerneld for kmod.
  34 *	Michal Ostrowski        :       Module initialization cleanup.
  35 *         Ulises Alonso        :       Frame number limit removal and
  36 *                                      packet_set_ring memory leak.
  37 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
  38 *					The convention is that longer addresses
  39 *					will simply extend the hardware address
  40 *					byte arrays at the end of sockaddr_ll
  41 *					and packet_mreq.
  42 *		Johann Baudy	:	Added TX RING.
  43 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
  44 *					layer.
  45 *					Copyright (C) 2011, <lokec@ccs.neu.edu>
  46 *
  47 *
  48 *		This program is free software; you can redistribute it and/or
  49 *		modify it under the terms of the GNU General Public License
  50 *		as published by the Free Software Foundation; either version
  51 *		2 of the License, or (at your option) any later version.
  52 *
  53 */
  54
  55#include <linux/types.h>
  56#include <linux/mm.h>
  57#include <linux/capability.h>
  58#include <linux/fcntl.h>
  59#include <linux/socket.h>
  60#include <linux/in.h>
  61#include <linux/inet.h>
  62#include <linux/netdevice.h>
  63#include <linux/if_packet.h>
  64#include <linux/wireless.h>
  65#include <linux/kernel.h>
  66#include <linux/kmod.h>
  67#include <linux/slab.h>
  68#include <linux/vmalloc.h>
  69#include <net/net_namespace.h>
  70#include <net/ip.h>
  71#include <net/protocol.h>
  72#include <linux/skbuff.h>
  73#include <net/sock.h>
  74#include <linux/errno.h>
  75#include <linux/timer.h>
  76#include <asm/uaccess.h>
  77#include <asm/ioctls.h>
  78#include <asm/page.h>
  79#include <asm/cacheflush.h>
  80#include <asm/io.h>
  81#include <linux/proc_fs.h>
  82#include <linux/seq_file.h>
  83#include <linux/poll.h>
  84#include <linux/module.h>
  85#include <linux/init.h>
  86#include <linux/mutex.h>
  87#include <linux/if_vlan.h>
  88#include <linux/virtio_net.h>
  89#include <linux/errqueue.h>
  90#include <linux/net_tstamp.h>
  91#include <linux/percpu.h>
  92#ifdef CONFIG_INET
  93#include <net/inet_common.h>
  94#endif
  95#include <linux/bpf.h>
 
  96
  97#include "internal.h"
  98
  99/*
 100   Assumptions:
 101   - if device has no dev->hard_header routine, it adds and removes ll header
 102     inside itself. In this case ll header is invisible outside of device,
 103     but higher levels still should reserve dev->hard_header_len.
 104     Some devices are enough clever to reallocate skb, when header
 105     will not fit to reserved space (tunnel), another ones are silly
 106     (PPP).
 107   - packet socket receives packets with pulled ll header,
 108     so that SOCK_RAW should push it back.
 109
 110On receive:
 111-----------
 112
 113Incoming, dev->hard_header!=NULL
 114   mac_header -> ll header
 115   data       -> data
 116
 117Outgoing, dev->hard_header!=NULL
 118   mac_header -> ll header
 119   data       -> ll header
 120
 121Incoming, dev->hard_header==NULL
 122   mac_header -> UNKNOWN position. It is very likely, that it points to ll
 123		 header.  PPP makes it, that is wrong, because introduce
 124		 assymetry between rx and tx paths.
 125   data       -> data
 126
 127Outgoing, dev->hard_header==NULL
 128   mac_header -> data. ll header is still not built!
 129   data       -> data
 130
 131Resume
 132  If dev->hard_header==NULL we are unlikely to restore sensible ll header.
 133
 134
 135On transmit:
 136------------
 137
 138dev->hard_header != NULL
 139   mac_header -> ll header
 140   data       -> ll header
 141
 142dev->hard_header == NULL (ll header is added by device, we cannot control it)
 143   mac_header -> data
 144   data       -> data
 145
 146   We should set nh.raw on output to correct posistion,
 147   packet classifier depends on it.
 148 */
 149
 150/* Private packet socket structures. */
 151
 152/* identical to struct packet_mreq except it has
 153 * a longer address field.
 154 */
 155struct packet_mreq_max {
 156	int		mr_ifindex;
 157	unsigned short	mr_type;
 158	unsigned short	mr_alen;
 159	unsigned char	mr_address[MAX_ADDR_LEN];
 160};
 161
 162union tpacket_uhdr {
 163	struct tpacket_hdr  *h1;
 164	struct tpacket2_hdr *h2;
 165	struct tpacket3_hdr *h3;
 166	void *raw;
 167};
 168
 169static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 170		int closing, int tx_ring);
 171
 172#define V3_ALIGNMENT	(8)
 173
 174#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 175
 176#define BLK_PLUS_PRIV(sz_of_priv) \
 177	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 178
 179#define PGV_FROM_VMALLOC 1
 180
 181#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 182#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 183#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 184#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 185#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 186#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 187#define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
 188
 189struct packet_sock;
 190static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
 191static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 192		       struct packet_type *pt, struct net_device *orig_dev);
 193
 194static void *packet_previous_frame(struct packet_sock *po,
 195		struct packet_ring_buffer *rb,
 196		int status);
 197static void packet_increment_head(struct packet_ring_buffer *buff);
 198static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
 199			struct tpacket_block_desc *);
 200static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 201			struct packet_sock *);
 202static void prb_retire_current_block(struct tpacket_kbdq_core *,
 203		struct packet_sock *, unsigned int status);
 204static int prb_queue_frozen(struct tpacket_kbdq_core *);
 205static void prb_open_block(struct tpacket_kbdq_core *,
 206		struct tpacket_block_desc *);
 207static void prb_retire_rx_blk_timer_expired(unsigned long);
 208static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 209static void prb_init_blk_timer(struct packet_sock *,
 210		struct tpacket_kbdq_core *,
 211		void (*func) (unsigned long));
 212static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 213static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 214		struct tpacket3_hdr *);
 215static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 216		struct tpacket3_hdr *);
 217static void packet_flush_mclist(struct sock *sk);
 
 218
 219struct packet_skb_cb {
 220	union {
 221		struct sockaddr_pkt pkt;
 222		union {
 223			/* Trick: alias skb original length with
 224			 * ll.sll_family and ll.protocol in order
 225			 * to save room.
 226			 */
 227			unsigned int origlen;
 228			struct sockaddr_ll ll;
 229		};
 230	} sa;
 231};
 232
 233#define vio_le() virtio_legacy_is_little_endian()
 234
 235#define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 236
 237#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 238#define GET_PBLOCK_DESC(x, bid)	\
 239	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 240#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 241	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 242#define GET_NEXT_PRB_BLK_NUM(x) \
 243	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 244	((x)->kactive_blk_num+1) : 0)
 245
 246static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 247static void __fanout_link(struct sock *sk, struct packet_sock *po);
 248
 249static int packet_direct_xmit(struct sk_buff *skb)
 250{
 251	struct net_device *dev = skb->dev;
 252	netdev_features_t features;
 253	struct netdev_queue *txq;
 254	int ret = NETDEV_TX_BUSY;
 255
 256	if (unlikely(!netif_running(dev) ||
 257		     !netif_carrier_ok(dev)))
 258		goto drop;
 259
 260	features = netif_skb_features(skb);
 261	if (skb_needs_linearize(skb, features) &&
 262	    __skb_linearize(skb))
 263		goto drop;
 264
 265	txq = skb_get_tx_queue(dev, skb);
 266
 267	local_bh_disable();
 268
 269	HARD_TX_LOCK(dev, txq, smp_processor_id());
 270	if (!netif_xmit_frozen_or_drv_stopped(txq))
 271		ret = netdev_start_xmit(skb, dev, txq, false);
 272	HARD_TX_UNLOCK(dev, txq);
 273
 274	local_bh_enable();
 275
 276	if (!dev_xmit_complete(ret))
 277		kfree_skb(skb);
 278
 279	return ret;
 280drop:
 281	atomic_long_inc(&dev->tx_dropped);
 282	kfree_skb(skb);
 283	return NET_XMIT_DROP;
 284}
 285
 286static struct net_device *packet_cached_dev_get(struct packet_sock *po)
 287{
 288	struct net_device *dev;
 289
 290	rcu_read_lock();
 291	dev = rcu_dereference(po->cached_dev);
 292	if (likely(dev))
 293		dev_hold(dev);
 294	rcu_read_unlock();
 295
 296	return dev;
 297}
 298
 299static void packet_cached_dev_assign(struct packet_sock *po,
 300				     struct net_device *dev)
 301{
 302	rcu_assign_pointer(po->cached_dev, dev);
 303}
 304
 305static void packet_cached_dev_reset(struct packet_sock *po)
 306{
 307	RCU_INIT_POINTER(po->cached_dev, NULL);
 308}
 309
 310static bool packet_use_direct_xmit(const struct packet_sock *po)
 311{
 312	return po->xmit == packet_direct_xmit;
 313}
 314
 315static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
 316{
 317	return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
 318}
 319
 320static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
 321{
 
 322	const struct net_device_ops *ops = dev->netdev_ops;
 
 323	u16 queue_index;
 324
 
 
 
 
 325	if (ops->ndo_select_queue) {
 326		queue_index = ops->ndo_select_queue(dev, skb, NULL,
 327						    __packet_pick_tx_queue);
 328		queue_index = netdev_cap_txqueue(dev, queue_index);
 329	} else {
 330		queue_index = __packet_pick_tx_queue(dev, skb);
 331	}
 332
 333	skb_set_queue_mapping(skb, queue_index);
 334}
 335
 336/* register_prot_hook must be invoked with the po->bind_lock held,
 337 * or from a context in which asynchronous accesses to the packet
 338 * socket is not possible (packet_create()).
 339 */
 340static void register_prot_hook(struct sock *sk)
 341{
 342	struct packet_sock *po = pkt_sk(sk);
 343
 344	if (!po->running) {
 345		if (po->fanout)
 346			__fanout_link(sk, po);
 347		else
 348			dev_add_pack(&po->prot_hook);
 349
 350		sock_hold(sk);
 351		po->running = 1;
 352	}
 353}
 354
 355/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
 356 * held.   If the sync parameter is true, we will temporarily drop
 
 
 
 
 
 357 * the po->bind_lock and do a synchronize_net to make sure no
 358 * asynchronous packet processing paths still refer to the elements
 359 * of po->prot_hook.  If the sync parameter is false, it is the
 360 * callers responsibility to take care of this.
 361 */
 362static void __unregister_prot_hook(struct sock *sk, bool sync)
 363{
 364	struct packet_sock *po = pkt_sk(sk);
 365
 
 
 366	po->running = 0;
 367
 368	if (po->fanout)
 369		__fanout_unlink(sk, po);
 370	else
 371		__dev_remove_pack(&po->prot_hook);
 372
 373	__sock_put(sk);
 374
 375	if (sync) {
 376		spin_unlock(&po->bind_lock);
 377		synchronize_net();
 378		spin_lock(&po->bind_lock);
 379	}
 380}
 381
 382static void unregister_prot_hook(struct sock *sk, bool sync)
 383{
 384	struct packet_sock *po = pkt_sk(sk);
 385
 386	if (po->running)
 387		__unregister_prot_hook(sk, sync);
 388}
 389
 390static inline struct page * __pure pgv_to_page(void *addr)
 391{
 392	if (is_vmalloc_addr(addr))
 393		return vmalloc_to_page(addr);
 394	return virt_to_page(addr);
 395}
 396
 397static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 398{
 399	union tpacket_uhdr h;
 400
 401	h.raw = frame;
 402	switch (po->tp_version) {
 403	case TPACKET_V1:
 404		h.h1->tp_status = status;
 405		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 406		break;
 407	case TPACKET_V2:
 408		h.h2->tp_status = status;
 409		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 410		break;
 411	case TPACKET_V3:
 
 
 
 412	default:
 413		WARN(1, "TPACKET version not supported.\n");
 414		BUG();
 415	}
 416
 417	smp_wmb();
 418}
 419
 420static int __packet_get_status(struct packet_sock *po, void *frame)
 421{
 422	union tpacket_uhdr h;
 423
 424	smp_rmb();
 425
 426	h.raw = frame;
 427	switch (po->tp_version) {
 428	case TPACKET_V1:
 429		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 430		return h.h1->tp_status;
 431	case TPACKET_V2:
 432		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 433		return h.h2->tp_status;
 434	case TPACKET_V3:
 
 
 435	default:
 436		WARN(1, "TPACKET version not supported.\n");
 437		BUG();
 438		return 0;
 439	}
 440}
 441
 442static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
 443				   unsigned int flags)
 444{
 445	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 446
 447	if (shhwtstamps &&
 448	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
 449	    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
 450		return TP_STATUS_TS_RAW_HARDWARE;
 451
 452	if (ktime_to_timespec_cond(skb->tstamp, ts))
 453		return TP_STATUS_TS_SOFTWARE;
 454
 455	return 0;
 456}
 457
 458static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
 459				    struct sk_buff *skb)
 460{
 461	union tpacket_uhdr h;
 462	struct timespec ts;
 463	__u32 ts_status;
 464
 465	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
 466		return 0;
 467
 468	h.raw = frame;
 
 
 
 
 
 
 
 469	switch (po->tp_version) {
 470	case TPACKET_V1:
 471		h.h1->tp_sec = ts.tv_sec;
 472		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
 473		break;
 474	case TPACKET_V2:
 475		h.h2->tp_sec = ts.tv_sec;
 476		h.h2->tp_nsec = ts.tv_nsec;
 477		break;
 478	case TPACKET_V3:
 
 
 
 479	default:
 480		WARN(1, "TPACKET version not supported.\n");
 481		BUG();
 482	}
 483
 484	/* one flush is safe, as both fields always lie on the same cacheline */
 485	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
 486	smp_wmb();
 487
 488	return ts_status;
 489}
 490
 491static void *packet_lookup_frame(struct packet_sock *po,
 492		struct packet_ring_buffer *rb,
 493		unsigned int position,
 494		int status)
 495{
 496	unsigned int pg_vec_pos, frame_offset;
 497	union tpacket_uhdr h;
 498
 499	pg_vec_pos = position / rb->frames_per_block;
 500	frame_offset = position % rb->frames_per_block;
 501
 502	h.raw = rb->pg_vec[pg_vec_pos].buffer +
 503		(frame_offset * rb->frame_size);
 504
 505	if (status != __packet_get_status(po, h.raw))
 506		return NULL;
 507
 508	return h.raw;
 509}
 510
 511static void *packet_current_frame(struct packet_sock *po,
 512		struct packet_ring_buffer *rb,
 513		int status)
 514{
 515	return packet_lookup_frame(po, rb, rb->head, status);
 516}
 517
 518static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 519{
 520	del_timer_sync(&pkc->retire_blk_timer);
 521}
 522
 523static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 524		struct sk_buff_head *rb_queue)
 525{
 526	struct tpacket_kbdq_core *pkc;
 527
 528	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 529
 530	spin_lock_bh(&rb_queue->lock);
 531	pkc->delete_blk_timer = 1;
 532	spin_unlock_bh(&rb_queue->lock);
 533
 534	prb_del_retire_blk_timer(pkc);
 535}
 536
 537static void prb_init_blk_timer(struct packet_sock *po,
 538		struct tpacket_kbdq_core *pkc,
 539		void (*func) (unsigned long))
 540{
 541	init_timer(&pkc->retire_blk_timer);
 542	pkc->retire_blk_timer.data = (long)po;
 543	pkc->retire_blk_timer.function = func;
 544	pkc->retire_blk_timer.expires = jiffies;
 545}
 546
 547static void prb_setup_retire_blk_timer(struct packet_sock *po)
 548{
 549	struct tpacket_kbdq_core *pkc;
 550
 551	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 552	prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
 
 
 553}
 554
 555static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 556				int blk_size_in_bytes)
 557{
 558	struct net_device *dev;
 559	unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
 560	struct ethtool_link_ksettings ecmd;
 561	int err;
 562
 563	rtnl_lock();
 564	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 565	if (unlikely(!dev)) {
 566		rtnl_unlock();
 567		return DEFAULT_PRB_RETIRE_TOV;
 568	}
 569	err = __ethtool_get_link_ksettings(dev, &ecmd);
 570	rtnl_unlock();
 571	if (!err) {
 572		/*
 573		 * If the link speed is so slow you don't really
 574		 * need to worry about perf anyways
 575		 */
 576		if (ecmd.base.speed < SPEED_1000 ||
 577		    ecmd.base.speed == SPEED_UNKNOWN) {
 578			return DEFAULT_PRB_RETIRE_TOV;
 579		} else {
 580			msec = 1;
 581			div = ecmd.base.speed / 1000;
 582		}
 583	}
 584
 
 
 
 
 
 
 
 
 585	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 586
 587	if (div)
 588		mbits /= div;
 589
 590	tmo = mbits * msec;
 591
 592	if (div)
 593		return tmo+1;
 594	return tmo;
 595}
 596
 597static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 598			union tpacket_req_u *req_u)
 599{
 600	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 601}
 602
 603static void init_prb_bdqc(struct packet_sock *po,
 604			struct packet_ring_buffer *rb,
 605			struct pgv *pg_vec,
 606			union tpacket_req_u *req_u)
 607{
 608	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
 609	struct tpacket_block_desc *pbd;
 610
 611	memset(p1, 0x0, sizeof(*p1));
 612
 613	p1->knxt_seq_num = 1;
 614	p1->pkbdq = pg_vec;
 615	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 616	p1->pkblk_start	= pg_vec[0].buffer;
 617	p1->kblk_size = req_u->req3.tp_block_size;
 618	p1->knum_blocks	= req_u->req3.tp_block_nr;
 619	p1->hdrlen = po->tp_hdrlen;
 620	p1->version = po->tp_version;
 621	p1->last_kactive_blk_num = 0;
 622	po->stats.stats3.tp_freeze_q_cnt = 0;
 623	if (req_u->req3.tp_retire_blk_tov)
 624		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 625	else
 626		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 627						req_u->req3.tp_block_size);
 628	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 629	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 
 630
 631	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
 632	prb_init_ft_ops(p1, req_u);
 633	prb_setup_retire_blk_timer(po);
 634	prb_open_block(p1, pbd);
 635}
 636
 637/*  Do NOT update the last_blk_num first.
 638 *  Assumes sk_buff_head lock is held.
 639 */
 640static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 641{
 642	mod_timer(&pkc->retire_blk_timer,
 643			jiffies + pkc->tov_in_jiffies);
 644	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 645}
 646
 647/*
 648 * Timer logic:
 649 * 1) We refresh the timer only when we open a block.
 650 *    By doing this we don't waste cycles refreshing the timer
 651 *	  on packet-by-packet basis.
 652 *
 653 * With a 1MB block-size, on a 1Gbps line, it will take
 654 * i) ~8 ms to fill a block + ii) memcpy etc.
 655 * In this cut we are not accounting for the memcpy time.
 656 *
 657 * So, if the user sets the 'tmo' to 10ms then the timer
 658 * will never fire while the block is still getting filled
 659 * (which is what we want). However, the user could choose
 660 * to close a block early and that's fine.
 661 *
 662 * But when the timer does fire, we check whether or not to refresh it.
 663 * Since the tmo granularity is in msecs, it is not too expensive
 664 * to refresh the timer, lets say every '8' msecs.
 665 * Either the user can set the 'tmo' or we can derive it based on
 666 * a) line-speed and b) block-size.
 667 * prb_calc_retire_blk_tmo() calculates the tmo.
 668 *
 669 */
 670static void prb_retire_rx_blk_timer_expired(unsigned long data)
 671{
 672	struct packet_sock *po = (struct packet_sock *)data;
 
 673	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 674	unsigned int frozen;
 675	struct tpacket_block_desc *pbd;
 676
 677	spin_lock(&po->sk.sk_receive_queue.lock);
 678
 679	frozen = prb_queue_frozen(pkc);
 680	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 681
 682	if (unlikely(pkc->delete_blk_timer))
 683		goto out;
 684
 685	/* We only need to plug the race when the block is partially filled.
 686	 * tpacket_rcv:
 687	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 688	 *		copy_bits() is in progress ...
 689	 *		timer fires on other cpu:
 690	 *		we can't retire the current block because copy_bits
 691	 *		is in progress.
 692	 *
 693	 */
 694	if (BLOCK_NUM_PKTS(pbd)) {
 695		while (atomic_read(&pkc->blk_fill_in_prog)) {
 696			/* Waiting for skb_copy_bits to finish... */
 697			cpu_relax();
 698		}
 699	}
 700
 701	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 702		if (!frozen) {
 703			if (!BLOCK_NUM_PKTS(pbd)) {
 704				/* An empty block. Just refresh the timer. */
 705				goto refresh_timer;
 706			}
 707			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 708			if (!prb_dispatch_next_block(pkc, po))
 709				goto refresh_timer;
 710			else
 711				goto out;
 712		} else {
 713			/* Case 1. Queue was frozen because user-space was
 714			 *	   lagging behind.
 715			 */
 716			if (prb_curr_blk_in_use(pkc, pbd)) {
 717				/*
 718				 * Ok, user-space is still behind.
 719				 * So just refresh the timer.
 720				 */
 721				goto refresh_timer;
 722			} else {
 723			       /* Case 2. queue was frozen,user-space caught up,
 724				* now the link went idle && the timer fired.
 725				* We don't have a block to close.So we open this
 726				* block and restart the timer.
 727				* opening a block thaws the queue,restarts timer
 728				* Thawing/timer-refresh is a side effect.
 729				*/
 730				prb_open_block(pkc, pbd);
 731				goto out;
 732			}
 733		}
 734	}
 735
 736refresh_timer:
 737	_prb_refresh_rx_retire_blk_timer(pkc);
 738
 739out:
 740	spin_unlock(&po->sk.sk_receive_queue.lock);
 741}
 742
 743static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 744		struct tpacket_block_desc *pbd1, __u32 status)
 745{
 746	/* Flush everything minus the block header */
 747
 748#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 749	u8 *start, *end;
 750
 751	start = (u8 *)pbd1;
 752
 753	/* Skip the block header(we know header WILL fit in 4K) */
 754	start += PAGE_SIZE;
 755
 756	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 757	for (; start < end; start += PAGE_SIZE)
 758		flush_dcache_page(pgv_to_page(start));
 759
 760	smp_wmb();
 761#endif
 762
 763	/* Now update the block status. */
 764
 765	BLOCK_STATUS(pbd1) = status;
 766
 767	/* Flush the block header */
 768
 769#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 770	start = (u8 *)pbd1;
 771	flush_dcache_page(pgv_to_page(start));
 772
 773	smp_wmb();
 774#endif
 775}
 776
 777/*
 778 * Side effect:
 779 *
 780 * 1) flush the block
 781 * 2) Increment active_blk_num
 782 *
 783 * Note:We DONT refresh the timer on purpose.
 784 *	Because almost always the next block will be opened.
 785 */
 786static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 787		struct tpacket_block_desc *pbd1,
 788		struct packet_sock *po, unsigned int stat)
 789{
 790	__u32 status = TP_STATUS_USER | stat;
 791
 792	struct tpacket3_hdr *last_pkt;
 793	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 794	struct sock *sk = &po->sk;
 795
 796	if (po->stats.stats3.tp_drops)
 797		status |= TP_STATUS_LOSING;
 798
 799	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 800	last_pkt->tp_next_offset = 0;
 801
 802	/* Get the ts of the last pkt */
 803	if (BLOCK_NUM_PKTS(pbd1)) {
 804		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 805		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 806	} else {
 807		/* Ok, we tmo'd - so get the current time.
 808		 *
 809		 * It shouldn't really happen as we don't close empty
 810		 * blocks. See prb_retire_rx_blk_timer_expired().
 811		 */
 812		struct timespec ts;
 813		getnstimeofday(&ts);
 814		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 815		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 816	}
 817
 818	smp_wmb();
 819
 820	/* Flush the block */
 821	prb_flush_block(pkc1, pbd1, status);
 822
 823	sk->sk_data_ready(sk);
 824
 825	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 826}
 827
 828static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 829{
 830	pkc->reset_pending_on_curr_blk = 0;
 831}
 832
 833/*
 834 * Side effect of opening a block:
 835 *
 836 * 1) prb_queue is thawed.
 837 * 2) retire_blk_timer is refreshed.
 838 *
 839 */
 840static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 841	struct tpacket_block_desc *pbd1)
 842{
 843	struct timespec ts;
 844	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 845
 846	smp_rmb();
 847
 848	/* We could have just memset this but we will lose the
 849	 * flexibility of making the priv area sticky
 850	 */
 851
 852	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 853	BLOCK_NUM_PKTS(pbd1) = 0;
 854	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 855
 856	getnstimeofday(&ts);
 857
 858	h1->ts_first_pkt.ts_sec = ts.tv_sec;
 859	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 860
 861	pkc1->pkblk_start = (char *)pbd1;
 862	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 863
 864	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 865	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 866
 867	pbd1->version = pkc1->version;
 868	pkc1->prev = pkc1->nxt_offset;
 869	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 870
 871	prb_thaw_queue(pkc1);
 872	_prb_refresh_rx_retire_blk_timer(pkc1);
 873
 874	smp_wmb();
 875}
 876
 877/*
 878 * Queue freeze logic:
 879 * 1) Assume tp_block_nr = 8 blocks.
 880 * 2) At time 't0', user opens Rx ring.
 881 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 882 * 4) user-space is either sleeping or processing block '0'.
 883 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 884 *    it will close block-7,loop around and try to fill block '0'.
 885 *    call-flow:
 886 *    __packet_lookup_frame_in_block
 887 *      prb_retire_current_block()
 888 *      prb_dispatch_next_block()
 889 *        |->(BLOCK_STATUS == USER) evaluates to true
 890 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 891 * 6) Now there are two cases:
 892 *    6.1) Link goes idle right after the queue is frozen.
 893 *         But remember, the last open_block() refreshed the timer.
 894 *         When this timer expires,it will refresh itself so that we can
 895 *         re-open block-0 in near future.
 896 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 897 *         case and __packet_lookup_frame_in_block will check if block-0
 898 *         is free and can now be re-used.
 899 */
 900static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 901				  struct packet_sock *po)
 902{
 903	pkc->reset_pending_on_curr_blk = 1;
 904	po->stats.stats3.tp_freeze_q_cnt++;
 905}
 906
 907#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 908
 909/*
 910 * If the next block is free then we will dispatch it
 911 * and return a good offset.
 912 * Else, we will freeze the queue.
 913 * So, caller must check the return value.
 914 */
 915static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 916		struct packet_sock *po)
 917{
 918	struct tpacket_block_desc *pbd;
 919
 920	smp_rmb();
 921
 922	/* 1. Get current block num */
 923	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 924
 925	/* 2. If this block is currently in_use then freeze the queue */
 926	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 927		prb_freeze_queue(pkc, po);
 928		return NULL;
 929	}
 930
 931	/*
 932	 * 3.
 933	 * open this block and return the offset where the first packet
 934	 * needs to get stored.
 935	 */
 936	prb_open_block(pkc, pbd);
 937	return (void *)pkc->nxt_offset;
 938}
 939
 940static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 941		struct packet_sock *po, unsigned int status)
 942{
 943	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 944
 945	/* retire/close the current block */
 946	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 947		/*
 948		 * Plug the case where copy_bits() is in progress on
 949		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
 950		 * have space to copy the pkt in the current block and
 951		 * called prb_retire_current_block()
 952		 *
 953		 * We don't need to worry about the TMO case because
 954		 * the timer-handler already handled this case.
 955		 */
 956		if (!(status & TP_STATUS_BLK_TMO)) {
 957			while (atomic_read(&pkc->blk_fill_in_prog)) {
 958				/* Waiting for skb_copy_bits to finish... */
 959				cpu_relax();
 960			}
 961		}
 962		prb_close_block(pkc, pbd, po, status);
 963		return;
 964	}
 965}
 966
 967static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
 968				      struct tpacket_block_desc *pbd)
 969{
 970	return TP_STATUS_USER & BLOCK_STATUS(pbd);
 971}
 972
 973static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 974{
 975	return pkc->reset_pending_on_curr_blk;
 976}
 977
 978static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
 
 979{
 980	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 981	atomic_dec(&pkc->blk_fill_in_prog);
 
 982}
 983
 984static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
 985			struct tpacket3_hdr *ppd)
 986{
 987	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
 988}
 989
 990static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
 991			struct tpacket3_hdr *ppd)
 992{
 993	ppd->hv1.tp_rxhash = 0;
 994}
 995
 996static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
 997			struct tpacket3_hdr *ppd)
 998{
 999	if (skb_vlan_tag_present(pkc->skb)) {
1000		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1001		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1002		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1003	} else {
1004		ppd->hv1.tp_vlan_tci = 0;
1005		ppd->hv1.tp_vlan_tpid = 0;
1006		ppd->tp_status = TP_STATUS_AVAILABLE;
1007	}
1008}
1009
1010static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1011			struct tpacket3_hdr *ppd)
1012{
1013	ppd->hv1.tp_padding = 0;
1014	prb_fill_vlan_info(pkc, ppd);
1015
1016	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1017		prb_fill_rxhash(pkc, ppd);
1018	else
1019		prb_clear_rxhash(pkc, ppd);
1020}
1021
1022static void prb_fill_curr_block(char *curr,
1023				struct tpacket_kbdq_core *pkc,
1024				struct tpacket_block_desc *pbd,
1025				unsigned int len)
 
1026{
1027	struct tpacket3_hdr *ppd;
1028
1029	ppd  = (struct tpacket3_hdr *)curr;
1030	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1031	pkc->prev = curr;
1032	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1033	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1034	BLOCK_NUM_PKTS(pbd) += 1;
1035	atomic_inc(&pkc->blk_fill_in_prog);
1036	prb_run_all_ft_ops(pkc, ppd);
1037}
1038
1039/* Assumes caller has the sk->rx_queue.lock */
1040static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1041					    struct sk_buff *skb,
1042						int status,
1043					    unsigned int len
1044					    )
1045{
1046	struct tpacket_kbdq_core *pkc;
1047	struct tpacket_block_desc *pbd;
1048	char *curr, *end;
1049
1050	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1051	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1052
1053	/* Queue is frozen when user space is lagging behind */
1054	if (prb_queue_frozen(pkc)) {
1055		/*
1056		 * Check if that last block which caused the queue to freeze,
1057		 * is still in_use by user-space.
1058		 */
1059		if (prb_curr_blk_in_use(pkc, pbd)) {
1060			/* Can't record this packet */
1061			return NULL;
1062		} else {
1063			/*
1064			 * Ok, the block was released by user-space.
1065			 * Now let's open that block.
1066			 * opening a block also thaws the queue.
1067			 * Thawing is a side effect.
1068			 */
1069			prb_open_block(pkc, pbd);
1070		}
1071	}
1072
1073	smp_mb();
1074	curr = pkc->nxt_offset;
1075	pkc->skb = skb;
1076	end = (char *)pbd + pkc->kblk_size;
1077
1078	/* first try the current block */
1079	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1080		prb_fill_curr_block(curr, pkc, pbd, len);
1081		return (void *)curr;
1082	}
1083
1084	/* Ok, close the current block */
1085	prb_retire_current_block(pkc, po, 0);
1086
1087	/* Now, try to dispatch the next block */
1088	curr = (char *)prb_dispatch_next_block(pkc, po);
1089	if (curr) {
1090		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1091		prb_fill_curr_block(curr, pkc, pbd, len);
1092		return (void *)curr;
1093	}
1094
1095	/*
1096	 * No free blocks are available.user_space hasn't caught up yet.
1097	 * Queue was just frozen and now this packet will get dropped.
1098	 */
1099	return NULL;
1100}
1101
1102static void *packet_current_rx_frame(struct packet_sock *po,
1103					    struct sk_buff *skb,
1104					    int status, unsigned int len)
1105{
1106	char *curr = NULL;
1107	switch (po->tp_version) {
1108	case TPACKET_V1:
1109	case TPACKET_V2:
1110		curr = packet_lookup_frame(po, &po->rx_ring,
1111					po->rx_ring.head, status);
1112		return curr;
1113	case TPACKET_V3:
1114		return __packet_lookup_frame_in_block(po, skb, status, len);
1115	default:
1116		WARN(1, "TPACKET version not supported\n");
1117		BUG();
1118		return NULL;
1119	}
1120}
1121
1122static void *prb_lookup_block(struct packet_sock *po,
1123				     struct packet_ring_buffer *rb,
1124				     unsigned int idx,
1125				     int status)
1126{
1127	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1128	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1129
1130	if (status != BLOCK_STATUS(pbd))
1131		return NULL;
1132	return pbd;
1133}
1134
1135static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1136{
1137	unsigned int prev;
1138	if (rb->prb_bdqc.kactive_blk_num)
1139		prev = rb->prb_bdqc.kactive_blk_num-1;
1140	else
1141		prev = rb->prb_bdqc.knum_blocks-1;
1142	return prev;
1143}
1144
1145/* Assumes caller has held the rx_queue.lock */
1146static void *__prb_previous_block(struct packet_sock *po,
1147					 struct packet_ring_buffer *rb,
1148					 int status)
1149{
1150	unsigned int previous = prb_previous_blk_num(rb);
1151	return prb_lookup_block(po, rb, previous, status);
1152}
1153
1154static void *packet_previous_rx_frame(struct packet_sock *po,
1155					     struct packet_ring_buffer *rb,
1156					     int status)
1157{
1158	if (po->tp_version <= TPACKET_V2)
1159		return packet_previous_frame(po, rb, status);
1160
1161	return __prb_previous_block(po, rb, status);
1162}
1163
1164static void packet_increment_rx_head(struct packet_sock *po,
1165					    struct packet_ring_buffer *rb)
1166{
1167	switch (po->tp_version) {
1168	case TPACKET_V1:
1169	case TPACKET_V2:
1170		return packet_increment_head(rb);
1171	case TPACKET_V3:
1172	default:
1173		WARN(1, "TPACKET version not supported.\n");
1174		BUG();
1175		return;
1176	}
1177}
1178
1179static void *packet_previous_frame(struct packet_sock *po,
1180		struct packet_ring_buffer *rb,
1181		int status)
1182{
1183	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1184	return packet_lookup_frame(po, rb, previous, status);
1185}
1186
1187static void packet_increment_head(struct packet_ring_buffer *buff)
1188{
1189	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1190}
1191
1192static void packet_inc_pending(struct packet_ring_buffer *rb)
1193{
1194	this_cpu_inc(*rb->pending_refcnt);
1195}
1196
1197static void packet_dec_pending(struct packet_ring_buffer *rb)
1198{
1199	this_cpu_dec(*rb->pending_refcnt);
1200}
1201
1202static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1203{
1204	unsigned int refcnt = 0;
1205	int cpu;
1206
1207	/* We don't use pending refcount in rx_ring. */
1208	if (rb->pending_refcnt == NULL)
1209		return 0;
1210
1211	for_each_possible_cpu(cpu)
1212		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1213
1214	return refcnt;
1215}
1216
1217static int packet_alloc_pending(struct packet_sock *po)
1218{
1219	po->rx_ring.pending_refcnt = NULL;
1220
1221	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1222	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1223		return -ENOBUFS;
1224
1225	return 0;
1226}
1227
1228static void packet_free_pending(struct packet_sock *po)
1229{
1230	free_percpu(po->tx_ring.pending_refcnt);
1231}
1232
1233#define ROOM_POW_OFF	2
1234#define ROOM_NONE	0x0
1235#define ROOM_LOW	0x1
1236#define ROOM_NORMAL	0x2
1237
1238static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
1239{
1240	int idx, len;
1241
1242	len = po->rx_ring.frame_max + 1;
1243	idx = po->rx_ring.head;
1244	if (pow_off)
1245		idx += len >> pow_off;
1246	if (idx >= len)
1247		idx -= len;
1248	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1249}
1250
1251static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1252{
1253	int idx, len;
1254
1255	len = po->rx_ring.prb_bdqc.knum_blocks;
1256	idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1257	if (pow_off)
1258		idx += len >> pow_off;
1259	if (idx >= len)
1260		idx -= len;
1261	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1262}
1263
1264static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
 
1265{
1266	struct sock *sk = &po->sk;
1267	int ret = ROOM_NONE;
1268
1269	if (po->prot_hook.func != tpacket_rcv) {
1270		int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1271					  - (skb ? skb->truesize : 0);
1272		if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
 
 
1273			return ROOM_NORMAL;
1274		else if (avail > 0)
1275			return ROOM_LOW;
1276		else
1277			return ROOM_NONE;
1278	}
1279
1280	if (po->tp_version == TPACKET_V3) {
1281		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1282			ret = ROOM_NORMAL;
1283		else if (__tpacket_v3_has_room(po, 0))
1284			ret = ROOM_LOW;
1285	} else {
1286		if (__tpacket_has_room(po, ROOM_POW_OFF))
1287			ret = ROOM_NORMAL;
1288		else if (__tpacket_has_room(po, 0))
1289			ret = ROOM_LOW;
1290	}
1291
1292	return ret;
1293}
1294
1295static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1296{
1297	int ret;
1298	bool has_room;
1299
1300	spin_lock_bh(&po->sk.sk_receive_queue.lock);
1301	ret = __packet_rcv_has_room(po, skb);
1302	has_room = ret == ROOM_NORMAL;
1303	if (po->pressure == has_room)
1304		po->pressure = !has_room;
1305	spin_unlock_bh(&po->sk.sk_receive_queue.lock);
1306
1307	return ret;
1308}
1309
 
 
 
 
 
 
 
1310static void packet_sock_destruct(struct sock *sk)
1311{
1312	skb_queue_purge(&sk->sk_error_queue);
1313
1314	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1315	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1316
1317	if (!sock_flag(sk, SOCK_DEAD)) {
1318		pr_err("Attempt to release alive packet socket: %p\n", sk);
1319		return;
1320	}
1321
1322	sk_refcnt_debug_dec(sk);
1323}
1324
1325static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1326{
1327	u32 rxhash;
 
1328	int i, count = 0;
1329
1330	rxhash = skb_get_hash(skb);
1331	for (i = 0; i < ROLLOVER_HLEN; i++)
1332		if (po->rollover->history[i] == rxhash)
1333			count++;
1334
1335	po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
 
 
 
 
 
1336	return count > (ROLLOVER_HLEN >> 1);
1337}
1338
1339static unsigned int fanout_demux_hash(struct packet_fanout *f,
1340				      struct sk_buff *skb,
1341				      unsigned int num)
1342{
1343	return reciprocal_scale(skb_get_hash(skb), num);
1344}
1345
1346static unsigned int fanout_demux_lb(struct packet_fanout *f,
1347				    struct sk_buff *skb,
1348				    unsigned int num)
1349{
1350	unsigned int val = atomic_inc_return(&f->rr_cur);
1351
1352	return val % num;
1353}
1354
1355static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1356				     struct sk_buff *skb,
1357				     unsigned int num)
1358{
1359	return smp_processor_id() % num;
1360}
1361
1362static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1363				     struct sk_buff *skb,
1364				     unsigned int num)
1365{
1366	return prandom_u32_max(num);
1367}
1368
1369static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1370					  struct sk_buff *skb,
1371					  unsigned int idx, bool try_self,
1372					  unsigned int num)
1373{
1374	struct packet_sock *po, *po_next, *po_skip = NULL;
1375	unsigned int i, j, room = ROOM_NONE;
1376
1377	po = pkt_sk(f->arr[idx]);
1378
1379	if (try_self) {
1380		room = packet_rcv_has_room(po, skb);
1381		if (room == ROOM_NORMAL ||
1382		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1383			return idx;
1384		po_skip = po;
1385	}
1386
1387	i = j = min_t(int, po->rollover->sock, num - 1);
1388	do {
1389		po_next = pkt_sk(f->arr[i]);
1390		if (po_next != po_skip && !po_next->pressure &&
1391		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1392			if (i != j)
1393				po->rollover->sock = i;
1394			atomic_long_inc(&po->rollover->num);
1395			if (room == ROOM_LOW)
1396				atomic_long_inc(&po->rollover->num_huge);
1397			return i;
1398		}
1399
1400		if (++i == num)
1401			i = 0;
1402	} while (i != j);
1403
1404	atomic_long_inc(&po->rollover->num_failed);
1405	return idx;
1406}
1407
1408static unsigned int fanout_demux_qm(struct packet_fanout *f,
1409				    struct sk_buff *skb,
1410				    unsigned int num)
1411{
1412	return skb_get_queue_mapping(skb) % num;
1413}
1414
1415static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1416				     struct sk_buff *skb,
1417				     unsigned int num)
1418{
1419	struct bpf_prog *prog;
1420	unsigned int ret = 0;
1421
1422	rcu_read_lock();
1423	prog = rcu_dereference(f->bpf_prog);
1424	if (prog)
1425		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1426	rcu_read_unlock();
1427
1428	return ret;
1429}
1430
1431static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1432{
1433	return f->flags & (flag >> 8);
1434}
1435
1436static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1437			     struct packet_type *pt, struct net_device *orig_dev)
1438{
1439	struct packet_fanout *f = pt->af_packet_priv;
1440	unsigned int num = READ_ONCE(f->num_members);
1441	struct net *net = read_pnet(&f->net);
1442	struct packet_sock *po;
1443	unsigned int idx;
1444
1445	if (!net_eq(dev_net(dev), net) || !num) {
1446		kfree_skb(skb);
1447		return 0;
1448	}
1449
1450	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1451		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1452		if (!skb)
1453			return 0;
1454	}
1455	switch (f->type) {
1456	case PACKET_FANOUT_HASH:
1457	default:
1458		idx = fanout_demux_hash(f, skb, num);
1459		break;
1460	case PACKET_FANOUT_LB:
1461		idx = fanout_demux_lb(f, skb, num);
1462		break;
1463	case PACKET_FANOUT_CPU:
1464		idx = fanout_demux_cpu(f, skb, num);
1465		break;
1466	case PACKET_FANOUT_RND:
1467		idx = fanout_demux_rnd(f, skb, num);
1468		break;
1469	case PACKET_FANOUT_QM:
1470		idx = fanout_demux_qm(f, skb, num);
1471		break;
1472	case PACKET_FANOUT_ROLLOVER:
1473		idx = fanout_demux_rollover(f, skb, 0, false, num);
1474		break;
1475	case PACKET_FANOUT_CBPF:
1476	case PACKET_FANOUT_EBPF:
1477		idx = fanout_demux_bpf(f, skb, num);
1478		break;
1479	}
1480
1481	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1482		idx = fanout_demux_rollover(f, skb, idx, true, num);
1483
1484	po = pkt_sk(f->arr[idx]);
1485	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1486}
1487
1488DEFINE_MUTEX(fanout_mutex);
1489EXPORT_SYMBOL_GPL(fanout_mutex);
1490static LIST_HEAD(fanout_list);
 
1491
1492static void __fanout_link(struct sock *sk, struct packet_sock *po)
1493{
1494	struct packet_fanout *f = po->fanout;
1495
1496	spin_lock(&f->lock);
1497	f->arr[f->num_members] = sk;
1498	smp_wmb();
1499	f->num_members++;
 
 
1500	spin_unlock(&f->lock);
1501}
1502
1503static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1504{
1505	struct packet_fanout *f = po->fanout;
1506	int i;
1507
1508	spin_lock(&f->lock);
1509	for (i = 0; i < f->num_members; i++) {
1510		if (f->arr[i] == sk)
1511			break;
1512	}
1513	BUG_ON(i >= f->num_members);
1514	f->arr[i] = f->arr[f->num_members - 1];
1515	f->num_members--;
 
 
1516	spin_unlock(&f->lock);
1517}
1518
1519static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1520{
1521	if (sk->sk_family != PF_PACKET)
1522		return false;
1523
1524	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1525}
1526
1527static void fanout_init_data(struct packet_fanout *f)
1528{
1529	switch (f->type) {
1530	case PACKET_FANOUT_LB:
1531		atomic_set(&f->rr_cur, 0);
1532		break;
1533	case PACKET_FANOUT_CBPF:
1534	case PACKET_FANOUT_EBPF:
1535		RCU_INIT_POINTER(f->bpf_prog, NULL);
1536		break;
1537	}
1538}
1539
1540static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1541{
1542	struct bpf_prog *old;
1543
1544	spin_lock(&f->lock);
1545	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1546	rcu_assign_pointer(f->bpf_prog, new);
1547	spin_unlock(&f->lock);
1548
1549	if (old) {
1550		synchronize_net();
1551		bpf_prog_destroy(old);
1552	}
1553}
1554
1555static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1556				unsigned int len)
1557{
1558	struct bpf_prog *new;
1559	struct sock_fprog fprog;
1560	int ret;
1561
1562	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1563		return -EPERM;
1564	if (len != sizeof(fprog))
1565		return -EINVAL;
1566	if (copy_from_user(&fprog, data, len))
1567		return -EFAULT;
1568
1569	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1570	if (ret)
1571		return ret;
1572
1573	__fanout_set_data_bpf(po->fanout, new);
1574	return 0;
1575}
1576
1577static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1578				unsigned int len)
1579{
1580	struct bpf_prog *new;
1581	u32 fd;
1582
1583	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1584		return -EPERM;
1585	if (len != sizeof(fd))
1586		return -EINVAL;
1587	if (copy_from_user(&fd, data, len))
1588		return -EFAULT;
1589
1590	new = bpf_prog_get(fd);
1591	if (IS_ERR(new))
1592		return PTR_ERR(new);
1593	if (new->type != BPF_PROG_TYPE_SOCKET_FILTER) {
1594		bpf_prog_put(new);
1595		return -EINVAL;
1596	}
1597
1598	__fanout_set_data_bpf(po->fanout, new);
1599	return 0;
1600}
1601
1602static int fanout_set_data(struct packet_sock *po, char __user *data,
1603			   unsigned int len)
1604{
1605	switch (po->fanout->type) {
1606	case PACKET_FANOUT_CBPF:
1607		return fanout_set_data_cbpf(po, data, len);
1608	case PACKET_FANOUT_EBPF:
1609		return fanout_set_data_ebpf(po, data, len);
1610	default:
1611		return -EINVAL;
1612	};
1613}
1614
1615static void fanout_release_data(struct packet_fanout *f)
1616{
1617	switch (f->type) {
1618	case PACKET_FANOUT_CBPF:
1619	case PACKET_FANOUT_EBPF:
1620		__fanout_set_data_bpf(f, NULL);
1621	};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1622}
1623
1624static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1625{
 
1626	struct packet_sock *po = pkt_sk(sk);
1627	struct packet_fanout *f, *match;
1628	u8 type = type_flags & 0xff;
1629	u8 flags = type_flags >> 8;
1630	int err;
1631
1632	switch (type) {
1633	case PACKET_FANOUT_ROLLOVER:
1634		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1635			return -EINVAL;
1636	case PACKET_FANOUT_HASH:
1637	case PACKET_FANOUT_LB:
1638	case PACKET_FANOUT_CPU:
1639	case PACKET_FANOUT_RND:
1640	case PACKET_FANOUT_QM:
1641	case PACKET_FANOUT_CBPF:
1642	case PACKET_FANOUT_EBPF:
1643		break;
1644	default:
1645		return -EINVAL;
1646	}
1647
1648	if (!po->running)
1649		return -EINVAL;
1650
 
1651	if (po->fanout)
1652		return -EALREADY;
1653
1654	if (type == PACKET_FANOUT_ROLLOVER ||
1655	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1656		po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
1657		if (!po->rollover)
1658			return -ENOMEM;
1659		atomic_long_set(&po->rollover->num, 0);
1660		atomic_long_set(&po->rollover->num_huge, 0);
1661		atomic_long_set(&po->rollover->num_failed, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1662	}
1663
1664	mutex_lock(&fanout_mutex);
1665	match = NULL;
1666	list_for_each_entry(f, &fanout_list, list) {
1667		if (f->id == id &&
1668		    read_pnet(&f->net) == sock_net(sk)) {
1669			match = f;
1670			break;
1671		}
1672	}
1673	err = -EINVAL;
1674	if (match && match->flags != flags)
1675		goto out;
1676	if (!match) {
1677		err = -ENOMEM;
1678		match = kzalloc(sizeof(*match), GFP_KERNEL);
1679		if (!match)
1680			goto out;
1681		write_pnet(&match->net, sock_net(sk));
1682		match->id = id;
1683		match->type = type;
1684		match->flags = flags;
1685		INIT_LIST_HEAD(&match->list);
1686		spin_lock_init(&match->lock);
1687		atomic_set(&match->sk_ref, 0);
1688		fanout_init_data(match);
1689		match->prot_hook.type = po->prot_hook.type;
1690		match->prot_hook.dev = po->prot_hook.dev;
1691		match->prot_hook.func = packet_rcv_fanout;
1692		match->prot_hook.af_packet_priv = match;
1693		match->prot_hook.id_match = match_fanout_group;
1694		dev_add_pack(&match->prot_hook);
1695		list_add(&match->list, &fanout_list);
1696	}
1697	err = -EINVAL;
1698	if (match->type == type &&
 
 
 
1699	    match->prot_hook.type == po->prot_hook.type &&
1700	    match->prot_hook.dev == po->prot_hook.dev) {
1701		err = -ENOSPC;
1702		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1703			__dev_remove_pack(&po->prot_hook);
1704			po->fanout = match;
1705			atomic_inc(&match->sk_ref);
 
 
1706			__fanout_link(sk, po);
1707			err = 0;
1708		}
1709	}
 
 
 
 
 
 
 
1710out:
 
1711	mutex_unlock(&fanout_mutex);
1712	if (err) {
1713		kfree(po->rollover);
1714		po->rollover = NULL;
1715	}
1716	return err;
1717}
1718
1719static void fanout_release(struct sock *sk)
 
 
 
 
 
1720{
1721	struct packet_sock *po = pkt_sk(sk);
1722	struct packet_fanout *f;
1723
1724	f = po->fanout;
1725	if (!f)
1726		return;
1727
1728	mutex_lock(&fanout_mutex);
1729	po->fanout = NULL;
 
 
1730
1731	if (atomic_dec_and_test(&f->sk_ref)) {
1732		list_del(&f->list);
1733		dev_remove_pack(&f->prot_hook);
1734		fanout_release_data(f);
1735		kfree(f);
1736	}
1737	mutex_unlock(&fanout_mutex);
1738
1739	if (po->rollover)
1740		kfree_rcu(po->rollover, rcu);
1741}
1742
1743static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1744					  struct sk_buff *skb)
1745{
1746	/* Earlier code assumed this would be a VLAN pkt, double-check
1747	 * this now that we have the actual packet in hand. We can only
1748	 * do this check on Ethernet devices.
1749	 */
1750	if (unlikely(dev->type != ARPHRD_ETHER))
1751		return false;
1752
1753	skb_reset_mac_header(skb);
1754	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1755}
1756
1757static const struct proto_ops packet_ops;
1758
1759static const struct proto_ops packet_ops_spkt;
1760
1761static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1762			   struct packet_type *pt, struct net_device *orig_dev)
1763{
1764	struct sock *sk;
1765	struct sockaddr_pkt *spkt;
1766
1767	/*
1768	 *	When we registered the protocol we saved the socket in the data
1769	 *	field for just this event.
1770	 */
1771
1772	sk = pt->af_packet_priv;
1773
1774	/*
1775	 *	Yank back the headers [hope the device set this
1776	 *	right or kerboom...]
1777	 *
1778	 *	Incoming packets have ll header pulled,
1779	 *	push it back.
1780	 *
1781	 *	For outgoing ones skb->data == skb_mac_header(skb)
1782	 *	so that this procedure is noop.
1783	 */
1784
1785	if (skb->pkt_type == PACKET_LOOPBACK)
1786		goto out;
1787
1788	if (!net_eq(dev_net(dev), sock_net(sk)))
1789		goto out;
1790
1791	skb = skb_share_check(skb, GFP_ATOMIC);
1792	if (skb == NULL)
1793		goto oom;
1794
1795	/* drop any routing info */
1796	skb_dst_drop(skb);
1797
1798	/* drop conntrack reference */
1799	nf_reset(skb);
1800
1801	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1802
1803	skb_push(skb, skb->data - skb_mac_header(skb));
1804
1805	/*
1806	 *	The SOCK_PACKET socket receives _all_ frames.
1807	 */
1808
1809	spkt->spkt_family = dev->type;
1810	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1811	spkt->spkt_protocol = skb->protocol;
1812
1813	/*
1814	 *	Charge the memory to the socket. This is done specifically
1815	 *	to prevent sockets using all the memory up.
1816	 */
1817
1818	if (sock_queue_rcv_skb(sk, skb) == 0)
1819		return 0;
1820
1821out:
1822	kfree_skb(skb);
1823oom:
1824	return 0;
1825}
1826
 
 
 
 
 
 
 
 
 
 
1827
1828/*
1829 *	Output a raw packet to a device layer. This bypasses all the other
1830 *	protocol layers and you must therefore supply it with a complete frame
1831 */
1832
1833static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1834			       size_t len)
1835{
1836	struct sock *sk = sock->sk;
1837	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1838	struct sk_buff *skb = NULL;
1839	struct net_device *dev;
 
1840	__be16 proto = 0;
1841	int err;
1842	int extra_len = 0;
1843
1844	/*
1845	 *	Get and verify the address.
1846	 */
1847
1848	if (saddr) {
1849		if (msg->msg_namelen < sizeof(struct sockaddr))
1850			return -EINVAL;
1851		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1852			proto = saddr->spkt_protocol;
1853	} else
1854		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1855
1856	/*
1857	 *	Find the device first to size check it
1858	 */
1859
1860	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1861retry:
1862	rcu_read_lock();
1863	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1864	err = -ENODEV;
1865	if (dev == NULL)
1866		goto out_unlock;
1867
1868	err = -ENETDOWN;
1869	if (!(dev->flags & IFF_UP))
1870		goto out_unlock;
1871
1872	/*
1873	 * You may not queue a frame bigger than the mtu. This is the lowest level
1874	 * raw protocol and you must do your own fragmentation at this level.
1875	 */
1876
1877	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1878		if (!netif_supports_nofcs(dev)) {
1879			err = -EPROTONOSUPPORT;
1880			goto out_unlock;
1881		}
1882		extra_len = 4; /* We're doing our own CRC */
1883	}
1884
1885	err = -EMSGSIZE;
1886	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1887		goto out_unlock;
1888
1889	if (!skb) {
1890		size_t reserved = LL_RESERVED_SPACE(dev);
1891		int tlen = dev->needed_tailroom;
1892		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1893
1894		rcu_read_unlock();
1895		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1896		if (skb == NULL)
1897			return -ENOBUFS;
1898		/* FIXME: Save some space for broken drivers that write a hard
1899		 * header at transmission time by themselves. PPP is the notable
1900		 * one here. This should really be fixed at the driver level.
1901		 */
1902		skb_reserve(skb, reserved);
1903		skb_reset_network_header(skb);
1904
1905		/* Try to align data part correctly */
1906		if (hhlen) {
1907			skb->data -= hhlen;
1908			skb->tail -= hhlen;
1909			if (len < hhlen)
1910				skb_reset_network_header(skb);
1911		}
1912		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1913		if (err)
1914			goto out_free;
1915		goto retry;
1916	}
1917
1918	if (!dev_validate_header(dev, skb->data, len)) {
1919		err = -EINVAL;
1920		goto out_unlock;
1921	}
1922	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1923	    !packet_extra_vlan_len_allowed(dev, skb)) {
1924		err = -EMSGSIZE;
1925		goto out_unlock;
1926	}
1927
 
 
 
 
 
 
 
1928	skb->protocol = proto;
1929	skb->dev = dev;
1930	skb->priority = sk->sk_priority;
1931	skb->mark = sk->sk_mark;
 
1932
1933	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
1934
1935	if (unlikely(extra_len == 4))
1936		skb->no_fcs = 1;
1937
1938	skb_probe_transport_header(skb, 0);
1939
1940	dev_queue_xmit(skb);
1941	rcu_read_unlock();
1942	return len;
1943
1944out_unlock:
1945	rcu_read_unlock();
1946out_free:
1947	kfree_skb(skb);
1948	return err;
1949}
1950
1951static unsigned int run_filter(struct sk_buff *skb,
1952			       const struct sock *sk,
1953			       unsigned int res)
1954{
1955	struct sk_filter *filter;
1956
1957	rcu_read_lock();
1958	filter = rcu_dereference(sk->sk_filter);
1959	if (filter != NULL)
1960		res = bpf_prog_run_clear_cb(filter->prog, skb);
1961	rcu_read_unlock();
1962
1963	return res;
1964}
1965
1966static int __packet_rcv_vnet(const struct sk_buff *skb,
1967			     struct virtio_net_hdr *vnet_hdr)
1968{
1969	*vnet_hdr = (const struct virtio_net_hdr) { 0 };
1970
1971	if (skb_is_gso(skb)) {
1972		struct skb_shared_info *sinfo = skb_shinfo(skb);
1973
1974		/* This is a hint as to how much should be linear. */
1975		vnet_hdr->hdr_len =
1976			__cpu_to_virtio16(vio_le(), skb_headlen(skb));
1977		vnet_hdr->gso_size =
1978			__cpu_to_virtio16(vio_le(), sinfo->gso_size);
1979
1980		if (sinfo->gso_type & SKB_GSO_TCPV4)
1981			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1982		else if (sinfo->gso_type & SKB_GSO_TCPV6)
1983			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1984		else if (sinfo->gso_type & SKB_GSO_UDP)
1985			vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
1986		else if (sinfo->gso_type & SKB_GSO_FCOE)
1987			return -EINVAL;
1988		else
1989			BUG();
1990
1991		if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1992			vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1993	} else
1994		vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
1995
1996	if (skb->ip_summed == CHECKSUM_PARTIAL) {
1997		vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1998		vnet_hdr->csum_start = __cpu_to_virtio16(vio_le(),
1999				  skb_checksum_start_offset(skb));
2000		vnet_hdr->csum_offset = __cpu_to_virtio16(vio_le(),
2001						 skb->csum_offset);
2002	} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
2003		vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
2004	} /* else everything is zero */
2005
2006	return 0;
2007}
2008
2009static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2010			   size_t *len)
2011{
2012	struct virtio_net_hdr vnet_hdr;
2013
2014	if (*len < sizeof(vnet_hdr))
2015		return -EINVAL;
2016	*len -= sizeof(vnet_hdr);
2017
2018	if (__packet_rcv_vnet(skb, &vnet_hdr))
2019		return -EINVAL;
2020
2021	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2022}
2023
2024/*
2025 * This function makes lazy skb cloning in hope that most of packets
2026 * are discarded by BPF.
2027 *
2028 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2029 * and skb->cb are mangled. It works because (and until) packets
2030 * falling here are owned by current CPU. Output packets are cloned
2031 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2032 * sequencially, so that if we return skb to original state on exit,
2033 * we will not harm anyone.
2034 */
2035
2036static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2037		      struct packet_type *pt, struct net_device *orig_dev)
2038{
2039	struct sock *sk;
2040	struct sockaddr_ll *sll;
2041	struct packet_sock *po;
2042	u8 *skb_head = skb->data;
2043	int skb_len = skb->len;
2044	unsigned int snaplen, res;
 
2045
2046	if (skb->pkt_type == PACKET_LOOPBACK)
2047		goto drop;
2048
2049	sk = pt->af_packet_priv;
2050	po = pkt_sk(sk);
2051
2052	if (!net_eq(dev_net(dev), sock_net(sk)))
2053		goto drop;
2054
2055	skb->dev = dev;
2056
2057	if (dev->header_ops) {
2058		/* The device has an explicit notion of ll header,
2059		 * exported to higher levels.
2060		 *
2061		 * Otherwise, the device hides details of its frame
2062		 * structure, so that corresponding packet head is
2063		 * never delivered to user.
2064		 */
2065		if (sk->sk_type != SOCK_DGRAM)
2066			skb_push(skb, skb->data - skb_mac_header(skb));
2067		else if (skb->pkt_type == PACKET_OUTGOING) {
2068			/* Special case: outgoing packets have ll header at head */
2069			skb_pull(skb, skb_network_offset(skb));
2070		}
2071	}
2072
2073	snaplen = skb->len;
2074
2075	res = run_filter(skb, sk, snaplen);
2076	if (!res)
2077		goto drop_n_restore;
2078	if (snaplen > res)
2079		snaplen = res;
2080
2081	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2082		goto drop_n_acct;
2083
2084	if (skb_shared(skb)) {
2085		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2086		if (nskb == NULL)
2087			goto drop_n_acct;
2088
2089		if (skb_head != skb->data) {
2090			skb->data = skb_head;
2091			skb->len = skb_len;
2092		}
2093		consume_skb(skb);
2094		skb = nskb;
2095	}
2096
2097	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2098
2099	sll = &PACKET_SKB_CB(skb)->sa.ll;
2100	sll->sll_hatype = dev->type;
2101	sll->sll_pkttype = skb->pkt_type;
2102	if (unlikely(po->origdev))
2103		sll->sll_ifindex = orig_dev->ifindex;
2104	else
2105		sll->sll_ifindex = dev->ifindex;
2106
2107	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2108
2109	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2110	 * Use their space for storing the original skb length.
2111	 */
2112	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2113
2114	if (pskb_trim(skb, snaplen))
2115		goto drop_n_acct;
2116
2117	skb_set_owner_r(skb, sk);
2118	skb->dev = NULL;
2119	skb_dst_drop(skb);
2120
2121	/* drop conntrack reference */
2122	nf_reset(skb);
2123
2124	spin_lock(&sk->sk_receive_queue.lock);
2125	po->stats.stats1.tp_packets++;
2126	sock_skb_set_dropcount(sk, skb);
2127	__skb_queue_tail(&sk->sk_receive_queue, skb);
2128	spin_unlock(&sk->sk_receive_queue.lock);
2129	sk->sk_data_ready(sk);
2130	return 0;
2131
2132drop_n_acct:
2133	spin_lock(&sk->sk_receive_queue.lock);
2134	po->stats.stats1.tp_drops++;
2135	atomic_inc(&sk->sk_drops);
2136	spin_unlock(&sk->sk_receive_queue.lock);
2137
2138drop_n_restore:
2139	if (skb_head != skb->data && skb_shared(skb)) {
2140		skb->data = skb_head;
2141		skb->len = skb_len;
2142	}
2143drop:
2144	consume_skb(skb);
 
 
 
2145	return 0;
2146}
2147
2148static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2149		       struct packet_type *pt, struct net_device *orig_dev)
2150{
2151	struct sock *sk;
2152	struct packet_sock *po;
2153	struct sockaddr_ll *sll;
2154	union tpacket_uhdr h;
2155	u8 *skb_head = skb->data;
2156	int skb_len = skb->len;
2157	unsigned int snaplen, res;
2158	unsigned long status = TP_STATUS_USER;
2159	unsigned short macoff, netoff, hdrlen;
 
2160	struct sk_buff *copy_skb = NULL;
2161	struct timespec ts;
2162	__u32 ts_status;
 
 
 
2163
2164	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2165	 * We may add members to them until current aligned size without forcing
2166	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2167	 */
2168	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2169	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2170
2171	if (skb->pkt_type == PACKET_LOOPBACK)
2172		goto drop;
2173
2174	sk = pt->af_packet_priv;
2175	po = pkt_sk(sk);
2176
2177	if (!net_eq(dev_net(dev), sock_net(sk)))
2178		goto drop;
2179
2180	if (dev->header_ops) {
2181		if (sk->sk_type != SOCK_DGRAM)
2182			skb_push(skb, skb->data - skb_mac_header(skb));
2183		else if (skb->pkt_type == PACKET_OUTGOING) {
2184			/* Special case: outgoing packets have ll header at head */
2185			skb_pull(skb, skb_network_offset(skb));
2186		}
2187	}
2188
2189	snaplen = skb->len;
2190
2191	res = run_filter(skb, sk, snaplen);
2192	if (!res)
2193		goto drop_n_restore;
2194
 
 
 
 
 
 
2195	if (skb->ip_summed == CHECKSUM_PARTIAL)
2196		status |= TP_STATUS_CSUMNOTREADY;
2197	else if (skb->pkt_type != PACKET_OUTGOING &&
2198		 (skb->ip_summed == CHECKSUM_COMPLETE ||
2199		  skb_csum_unnecessary(skb)))
2200		status |= TP_STATUS_CSUM_VALID;
2201
2202	if (snaplen > res)
2203		snaplen = res;
2204
2205	if (sk->sk_type == SOCK_DGRAM) {
2206		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2207				  po->tp_reserve;
2208	} else {
2209		unsigned int maclen = skb_network_offset(skb);
2210		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2211				       (maclen < 16 ? 16 : maclen)) +
2212				       po->tp_reserve;
2213		if (po->has_vnet_hdr)
2214			netoff += sizeof(struct virtio_net_hdr);
 
 
2215		macoff = netoff - maclen;
2216	}
 
 
 
 
2217	if (po->tp_version <= TPACKET_V2) {
2218		if (macoff + snaplen > po->rx_ring.frame_size) {
2219			if (po->copy_thresh &&
2220			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2221				if (skb_shared(skb)) {
2222					copy_skb = skb_clone(skb, GFP_ATOMIC);
2223				} else {
2224					copy_skb = skb_get(skb);
2225					skb_head = skb->data;
2226				}
2227				if (copy_skb)
2228					skb_set_owner_r(copy_skb, sk);
2229			}
2230			snaplen = po->rx_ring.frame_size - macoff;
2231			if ((int)snaplen < 0)
2232				snaplen = 0;
 
 
2233		}
2234	} else if (unlikely(macoff + snaplen >
2235			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2236		u32 nval;
2237
2238		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2239		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2240			    snaplen, nval, macoff);
2241		snaplen = nval;
2242		if (unlikely((int)snaplen < 0)) {
2243			snaplen = 0;
2244			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
 
2245		}
2246	}
2247	spin_lock(&sk->sk_receive_queue.lock);
2248	h.raw = packet_current_rx_frame(po, skb,
2249					TP_STATUS_KERNEL, (macoff+snaplen));
2250	if (!h.raw)
2251		goto drop_n_account;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2252	if (po->tp_version <= TPACKET_V2) {
2253		packet_increment_rx_head(po, &po->rx_ring);
2254	/*
2255	 * LOSING will be reported till you read the stats,
2256	 * because it's COR - Clear On Read.
2257	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2258	 * at packet level.
2259	 */
2260		if (po->stats.stats1.tp_drops)
2261			status |= TP_STATUS_LOSING;
2262	}
 
2263	po->stats.stats1.tp_packets++;
2264	if (copy_skb) {
2265		status |= TP_STATUS_COPY;
2266		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2267	}
2268	spin_unlock(&sk->sk_receive_queue.lock);
2269
2270	if (po->has_vnet_hdr) {
2271		if (__packet_rcv_vnet(skb, h.raw + macoff -
2272					   sizeof(struct virtio_net_hdr))) {
2273			spin_lock(&sk->sk_receive_queue.lock);
2274			goto drop_n_account;
2275		}
2276	}
2277
2278	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2279
2280	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2281		getnstimeofday(&ts);
2282
2283	status |= ts_status;
2284
2285	switch (po->tp_version) {
2286	case TPACKET_V1:
2287		h.h1->tp_len = skb->len;
2288		h.h1->tp_snaplen = snaplen;
2289		h.h1->tp_mac = macoff;
2290		h.h1->tp_net = netoff;
2291		h.h1->tp_sec = ts.tv_sec;
2292		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2293		hdrlen = sizeof(*h.h1);
2294		break;
2295	case TPACKET_V2:
2296		h.h2->tp_len = skb->len;
2297		h.h2->tp_snaplen = snaplen;
2298		h.h2->tp_mac = macoff;
2299		h.h2->tp_net = netoff;
2300		h.h2->tp_sec = ts.tv_sec;
2301		h.h2->tp_nsec = ts.tv_nsec;
2302		if (skb_vlan_tag_present(skb)) {
2303			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2304			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2305			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2306		} else {
2307			h.h2->tp_vlan_tci = 0;
2308			h.h2->tp_vlan_tpid = 0;
2309		}
2310		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2311		hdrlen = sizeof(*h.h2);
2312		break;
2313	case TPACKET_V3:
2314		/* tp_nxt_offset,vlan are already populated above.
2315		 * So DONT clear those fields here
2316		 */
2317		h.h3->tp_status |= status;
2318		h.h3->tp_len = skb->len;
2319		h.h3->tp_snaplen = snaplen;
2320		h.h3->tp_mac = macoff;
2321		h.h3->tp_net = netoff;
2322		h.h3->tp_sec  = ts.tv_sec;
2323		h.h3->tp_nsec = ts.tv_nsec;
2324		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2325		hdrlen = sizeof(*h.h3);
2326		break;
2327	default:
2328		BUG();
2329	}
2330
2331	sll = h.raw + TPACKET_ALIGN(hdrlen);
2332	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2333	sll->sll_family = AF_PACKET;
2334	sll->sll_hatype = dev->type;
2335	sll->sll_protocol = skb->protocol;
2336	sll->sll_pkttype = skb->pkt_type;
2337	if (unlikely(po->origdev))
2338		sll->sll_ifindex = orig_dev->ifindex;
2339	else
2340		sll->sll_ifindex = dev->ifindex;
2341
2342	smp_mb();
2343
2344#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2345	if (po->tp_version <= TPACKET_V2) {
2346		u8 *start, *end;
2347
2348		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2349					macoff + snaplen);
2350
2351		for (start = h.raw; start < end; start += PAGE_SIZE)
2352			flush_dcache_page(pgv_to_page(start));
2353	}
2354	smp_wmb();
2355#endif
2356
2357	if (po->tp_version <= TPACKET_V2) {
 
2358		__packet_set_status(po, h.raw, status);
 
 
2359		sk->sk_data_ready(sk);
2360	} else {
2361		prb_clear_blk_fill_status(&po->rx_ring);
2362	}
2363
2364drop_n_restore:
2365	if (skb_head != skb->data && skb_shared(skb)) {
2366		skb->data = skb_head;
2367		skb->len = skb_len;
2368	}
2369drop:
2370	kfree_skb(skb);
 
 
 
2371	return 0;
2372
2373drop_n_account:
2374	po->stats.stats1.tp_drops++;
2375	spin_unlock(&sk->sk_receive_queue.lock);
 
 
2376
2377	sk->sk_data_ready(sk);
2378	kfree_skb(copy_skb);
2379	goto drop_n_restore;
2380}
2381
2382static void tpacket_destruct_skb(struct sk_buff *skb)
2383{
2384	struct packet_sock *po = pkt_sk(skb->sk);
2385
2386	if (likely(po->tx_ring.pg_vec)) {
2387		void *ph;
2388		__u32 ts;
2389
2390		ph = skb_shinfo(skb)->destructor_arg;
2391		packet_dec_pending(&po->tx_ring);
2392
2393		ts = __packet_set_timestamp(po, ph, skb);
2394		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
 
 
 
2395	}
2396
2397	sock_wfree(skb);
2398}
2399
2400static void tpacket_set_protocol(const struct net_device *dev,
2401				 struct sk_buff *skb)
2402{
2403	if (dev->type == ARPHRD_ETHER) {
2404		skb_reset_mac_header(skb);
2405		skb->protocol = eth_hdr(skb)->h_proto;
2406	}
2407}
2408
2409static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2410{
2411	unsigned short gso_type = 0;
2412
2413	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2414	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2415	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2416	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2417		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2418			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2419			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2420
2421	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2422		return -EINVAL;
2423
2424	if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
2425		switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
2426		case VIRTIO_NET_HDR_GSO_TCPV4:
2427			gso_type = SKB_GSO_TCPV4;
2428			break;
2429		case VIRTIO_NET_HDR_GSO_TCPV6:
2430			gso_type = SKB_GSO_TCPV6;
2431			break;
2432		case VIRTIO_NET_HDR_GSO_UDP:
2433			gso_type = SKB_GSO_UDP;
2434			break;
2435		default:
2436			return -EINVAL;
2437		}
2438
2439		if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
2440			gso_type |= SKB_GSO_TCP_ECN;
2441
2442		if (vnet_hdr->gso_size == 0)
2443			return -EINVAL;
2444	}
2445
2446	vnet_hdr->gso_type = gso_type;	/* changes type, temporary storage */
2447	return 0;
2448}
2449
2450static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2451				 struct virtio_net_hdr *vnet_hdr)
2452{
2453	int n;
2454
2455	if (*len < sizeof(*vnet_hdr))
2456		return -EINVAL;
2457	*len -= sizeof(*vnet_hdr);
2458
2459	n = copy_from_iter(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter);
2460	if (n != sizeof(*vnet_hdr))
2461		return -EFAULT;
2462
2463	return __packet_snd_vnet_parse(vnet_hdr, *len);
2464}
2465
2466static int packet_snd_vnet_gso(struct sk_buff *skb,
2467			       struct virtio_net_hdr *vnet_hdr)
2468{
2469	if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
2470		u16 s = __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start);
2471		u16 o = __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset);
2472
2473		if (!skb_partial_csum_set(skb, s, o))
2474			return -EINVAL;
2475	}
2476
2477	skb_shinfo(skb)->gso_size =
2478		__virtio16_to_cpu(vio_le(), vnet_hdr->gso_size);
2479	skb_shinfo(skb)->gso_type = vnet_hdr->gso_type;
2480
2481	/* Header must be checked, and gso_segs computed. */
2482	skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
2483	skb_shinfo(skb)->gso_segs = 0;
2484	return 0;
2485}
2486
2487static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2488		void *frame, struct net_device *dev, void *data, int tp_len,
2489		__be16 proto, unsigned char *addr, int hlen, int copylen)
 
2490{
2491	union tpacket_uhdr ph;
2492	int to_write, offset, len, nr_frags, len_max;
2493	struct socket *sock = po->sk.sk_socket;
2494	struct page *page;
2495	int err;
2496
2497	ph.raw = frame;
2498
2499	skb->protocol = proto;
2500	skb->dev = dev;
2501	skb->priority = po->sk.sk_priority;
2502	skb->mark = po->sk.sk_mark;
2503	sock_tx_timestamp(&po->sk, &skb_shinfo(skb)->tx_flags);
2504	skb_shinfo(skb)->destructor_arg = ph.raw;
 
2505
2506	skb_reserve(skb, hlen);
2507	skb_reset_network_header(skb);
2508
2509	to_write = tp_len;
2510
2511	if (sock->type == SOCK_DGRAM) {
2512		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2513				NULL, tp_len);
2514		if (unlikely(err < 0))
2515			return -EINVAL;
2516	} else if (copylen) {
2517		int hdrlen = min_t(int, copylen, tp_len);
2518
2519		skb_push(skb, dev->hard_header_len);
2520		skb_put(skb, copylen - dev->hard_header_len);
2521		err = skb_store_bits(skb, 0, data, hdrlen);
2522		if (unlikely(err))
2523			return err;
2524		if (!dev_validate_header(dev, skb->data, hdrlen))
2525			return -EINVAL;
2526		if (!skb->protocol)
2527			tpacket_set_protocol(dev, skb);
2528
2529		data += hdrlen;
2530		to_write -= hdrlen;
2531	}
2532
2533	offset = offset_in_page(data);
2534	len_max = PAGE_SIZE - offset;
2535	len = ((to_write > len_max) ? len_max : to_write);
2536
2537	skb->data_len = to_write;
2538	skb->len += to_write;
2539	skb->truesize += to_write;
2540	atomic_add(to_write, &po->sk.sk_wmem_alloc);
2541
2542	while (likely(to_write)) {
2543		nr_frags = skb_shinfo(skb)->nr_frags;
2544
2545		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2546			pr_err("Packet exceed the number of skb frags(%lu)\n",
2547			       MAX_SKB_FRAGS);
2548			return -EFAULT;
2549		}
2550
2551		page = pgv_to_page(data);
2552		data += len;
2553		flush_dcache_page(page);
2554		get_page(page);
2555		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2556		to_write -= len;
2557		offset = 0;
2558		len_max = PAGE_SIZE;
2559		len = ((to_write > len_max) ? len_max : to_write);
2560	}
2561
2562	skb_probe_transport_header(skb, 0);
2563
2564	return tp_len;
2565}
2566
2567static int tpacket_parse_header(struct packet_sock *po, void *frame,
2568				int size_max, void **data)
2569{
2570	union tpacket_uhdr ph;
2571	int tp_len, off;
2572
2573	ph.raw = frame;
2574
2575	switch (po->tp_version) {
 
 
 
 
 
 
 
2576	case TPACKET_V2:
2577		tp_len = ph.h2->tp_len;
2578		break;
2579	default:
2580		tp_len = ph.h1->tp_len;
2581		break;
2582	}
2583	if (unlikely(tp_len > size_max)) {
2584		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2585		return -EMSGSIZE;
2586	}
2587
2588	if (unlikely(po->tp_tx_has_off)) {
2589		int off_min, off_max;
2590
2591		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2592		off_max = po->tx_ring.frame_size - tp_len;
2593		if (po->sk.sk_type == SOCK_DGRAM) {
2594			switch (po->tp_version) {
 
 
 
2595			case TPACKET_V2:
2596				off = ph.h2->tp_net;
2597				break;
2598			default:
2599				off = ph.h1->tp_net;
2600				break;
2601			}
2602		} else {
2603			switch (po->tp_version) {
 
 
 
2604			case TPACKET_V2:
2605				off = ph.h2->tp_mac;
2606				break;
2607			default:
2608				off = ph.h1->tp_mac;
2609				break;
2610			}
2611		}
2612		if (unlikely((off < off_min) || (off_max < off)))
2613			return -EINVAL;
2614	} else {
2615		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2616	}
2617
2618	*data = frame + off;
2619	return tp_len;
2620}
2621
2622static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2623{
2624	struct sk_buff *skb;
2625	struct net_device *dev;
2626	struct virtio_net_hdr *vnet_hdr = NULL;
 
2627	__be16 proto;
2628	int err, reserve = 0;
2629	void *ph;
2630	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2631	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
 
2632	int tp_len, size_max;
2633	unsigned char *addr;
2634	void *data;
2635	int len_sum = 0;
2636	int status = TP_STATUS_AVAILABLE;
2637	int hlen, tlen, copylen = 0;
 
2638
2639	mutex_lock(&po->pg_vec_lock);
2640
 
 
 
 
 
 
 
2641	if (likely(saddr == NULL)) {
2642		dev	= packet_cached_dev_get(po);
2643		proto	= po->num;
2644		addr	= NULL;
2645	} else {
2646		err = -EINVAL;
2647		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2648			goto out;
2649		if (msg->msg_namelen < (saddr->sll_halen
2650					+ offsetof(struct sockaddr_ll,
2651						sll_addr)))
2652			goto out;
2653		proto	= saddr->sll_protocol;
2654		addr	= saddr->sll_addr;
2655		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
 
 
 
 
 
 
2656	}
2657
2658	err = -ENXIO;
2659	if (unlikely(dev == NULL))
2660		goto out;
2661	err = -ENETDOWN;
2662	if (unlikely(!(dev->flags & IFF_UP)))
2663		goto out_put;
2664
 
 
 
 
 
 
 
2665	if (po->sk.sk_socket->type == SOCK_RAW)
2666		reserve = dev->hard_header_len;
2667	size_max = po->tx_ring.frame_size
2668		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2669
2670	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2671		size_max = dev->mtu + reserve + VLAN_HLEN;
2672
 
 
2673	do {
2674		ph = packet_current_frame(po, &po->tx_ring,
2675					  TP_STATUS_SEND_REQUEST);
2676		if (unlikely(ph == NULL)) {
2677			if (need_wait && need_resched())
2678				schedule();
 
 
 
 
 
 
 
2679			continue;
2680		}
2681
2682		skb = NULL;
2683		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2684		if (tp_len < 0)
2685			goto tpacket_error;
2686
2687		status = TP_STATUS_SEND_REQUEST;
2688		hlen = LL_RESERVED_SPACE(dev);
2689		tlen = dev->needed_tailroom;
2690		if (po->has_vnet_hdr) {
2691			vnet_hdr = data;
2692			data += sizeof(*vnet_hdr);
2693			tp_len -= sizeof(*vnet_hdr);
2694			if (tp_len < 0 ||
2695			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2696				tp_len = -EINVAL;
2697				goto tpacket_error;
2698			}
2699			copylen = __virtio16_to_cpu(vio_le(),
2700						    vnet_hdr->hdr_len);
2701		}
2702		copylen = max_t(int, copylen, dev->hard_header_len);
2703		skb = sock_alloc_send_skb(&po->sk,
2704				hlen + tlen + sizeof(struct sockaddr_ll) +
2705				(copylen - dev->hard_header_len),
2706				!need_wait, &err);
2707
2708		if (unlikely(skb == NULL)) {
2709			/* we assume the socket was initially writeable ... */
2710			if (likely(len_sum > 0))
2711				err = len_sum;
2712			goto out_status;
2713		}
2714		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2715					  addr, hlen, copylen);
2716		if (likely(tp_len >= 0) &&
2717		    tp_len > dev->mtu + reserve &&
2718		    !po->has_vnet_hdr &&
2719		    !packet_extra_vlan_len_allowed(dev, skb))
2720			tp_len = -EMSGSIZE;
2721
2722		if (unlikely(tp_len < 0)) {
2723tpacket_error:
2724			if (po->tp_loss) {
2725				__packet_set_status(po, ph,
2726						TP_STATUS_AVAILABLE);
2727				packet_increment_head(&po->tx_ring);
2728				kfree_skb(skb);
2729				continue;
2730			} else {
2731				status = TP_STATUS_WRONG_FORMAT;
2732				err = tp_len;
2733				goto out_status;
2734			}
2735		}
2736
2737		if (po->has_vnet_hdr && packet_snd_vnet_gso(skb, vnet_hdr)) {
2738			tp_len = -EINVAL;
2739			goto tpacket_error;
 
 
 
2740		}
2741
2742		packet_pick_tx_queue(dev, skb);
2743
2744		skb->destructor = tpacket_destruct_skb;
2745		__packet_set_status(po, ph, TP_STATUS_SENDING);
2746		packet_inc_pending(&po->tx_ring);
2747
2748		status = TP_STATUS_SEND_REQUEST;
2749		err = po->xmit(skb);
2750		if (unlikely(err > 0)) {
2751			err = net_xmit_errno(err);
2752			if (err && __packet_get_status(po, ph) ==
2753				   TP_STATUS_AVAILABLE) {
2754				/* skb was destructed already */
2755				skb = NULL;
2756				goto out_status;
2757			}
2758			/*
2759			 * skb was dropped but not destructed yet;
2760			 * let's treat it like congestion or err < 0
2761			 */
2762			err = 0;
2763		}
2764		packet_increment_head(&po->tx_ring);
2765		len_sum += tp_len;
2766	} while (likely((ph != NULL) ||
2767		/* Note: packet_read_pending() might be slow if we have
2768		 * to call it as it's per_cpu variable, but in fast-path
2769		 * we already short-circuit the loop with the first
2770		 * condition, and luckily don't have to go that path
2771		 * anyway.
2772		 */
2773		 (need_wait && packet_read_pending(&po->tx_ring))));
2774
2775	err = len_sum;
2776	goto out_put;
2777
2778out_status:
2779	__packet_set_status(po, ph, status);
2780	kfree_skb(skb);
2781out_put:
2782	dev_put(dev);
2783out:
2784	mutex_unlock(&po->pg_vec_lock);
2785	return err;
2786}
2787
2788static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2789				        size_t reserve, size_t len,
2790				        size_t linear, int noblock,
2791				        int *err)
2792{
2793	struct sk_buff *skb;
2794
2795	/* Under a page?  Don't bother with paged skb. */
2796	if (prepad + len < PAGE_SIZE || !linear)
2797		linear = len;
2798
2799	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2800				   err, 0);
2801	if (!skb)
2802		return NULL;
2803
2804	skb_reserve(skb, reserve);
2805	skb_put(skb, linear);
2806	skb->data_len = len - linear;
2807	skb->len += len - linear;
2808
2809	return skb;
2810}
2811
2812static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2813{
2814	struct sock *sk = sock->sk;
2815	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2816	struct sk_buff *skb;
2817	struct net_device *dev;
2818	__be16 proto;
2819	unsigned char *addr;
2820	int err, reserve = 0;
2821	struct sockcm_cookie sockc;
2822	struct virtio_net_hdr vnet_hdr = { 0 };
2823	int offset = 0;
2824	struct packet_sock *po = pkt_sk(sk);
2825	int hlen, tlen;
 
2826	int extra_len = 0;
2827
2828	/*
2829	 *	Get and verify the address.
2830	 */
2831
2832	if (likely(saddr == NULL)) {
2833		dev	= packet_cached_dev_get(po);
2834		proto	= po->num;
2835		addr	= NULL;
2836	} else {
2837		err = -EINVAL;
2838		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2839			goto out;
2840		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2841			goto out;
2842		proto	= saddr->sll_protocol;
2843		addr	= saddr->sll_addr;
2844		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
 
 
 
 
 
 
2845	}
2846
2847	err = -ENXIO;
2848	if (unlikely(dev == NULL))
2849		goto out_unlock;
2850	err = -ENETDOWN;
2851	if (unlikely(!(dev->flags & IFF_UP)))
2852		goto out_unlock;
2853
 
2854	sockc.mark = sk->sk_mark;
2855	if (msg->msg_controllen) {
2856		err = sock_cmsg_send(sk, msg, &sockc);
2857		if (unlikely(err))
2858			goto out_unlock;
2859	}
2860
2861	if (sock->type == SOCK_RAW)
2862		reserve = dev->hard_header_len;
2863	if (po->has_vnet_hdr) {
2864		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2865		if (err)
2866			goto out_unlock;
 
2867	}
2868
2869	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2870		if (!netif_supports_nofcs(dev)) {
2871			err = -EPROTONOSUPPORT;
2872			goto out_unlock;
2873		}
2874		extra_len = 4; /* We're doing our own CRC */
2875	}
2876
2877	err = -EMSGSIZE;
2878	if (!vnet_hdr.gso_type &&
2879	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2880		goto out_unlock;
2881
2882	err = -ENOBUFS;
2883	hlen = LL_RESERVED_SPACE(dev);
2884	tlen = dev->needed_tailroom;
2885	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len,
2886			       __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len),
 
2887			       msg->msg_flags & MSG_DONTWAIT, &err);
2888	if (skb == NULL)
2889		goto out_unlock;
2890
2891	skb_set_network_header(skb, reserve);
2892
2893	err = -EINVAL;
2894	if (sock->type == SOCK_DGRAM) {
2895		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2896		if (unlikely(offset < 0))
2897			goto out_free;
 
 
 
 
 
2898	}
2899
2900	/* Returns -EFAULT on error */
2901	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2902	if (err)
2903		goto out_free;
2904
2905	if (sock->type == SOCK_RAW &&
2906	    !dev_validate_header(dev, skb->data, len)) {
2907		err = -EINVAL;
2908		goto out_free;
2909	}
2910
2911	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
2912
2913	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2914	    !packet_extra_vlan_len_allowed(dev, skb)) {
2915		err = -EMSGSIZE;
2916		goto out_free;
2917	}
2918
2919	skb->protocol = proto;
2920	skb->dev = dev;
2921	skb->priority = sk->sk_priority;
2922	skb->mark = sockc.mark;
 
2923
2924	packet_pick_tx_queue(dev, skb);
2925
2926	if (po->has_vnet_hdr) {
2927		err = packet_snd_vnet_gso(skb, &vnet_hdr);
2928		if (err)
2929			goto out_free;
2930		len += sizeof(vnet_hdr);
 
2931	}
2932
2933	skb_probe_transport_header(skb, reserve);
2934
2935	if (unlikely(extra_len == 4))
2936		skb->no_fcs = 1;
2937
2938	err = po->xmit(skb);
2939	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2940		goto out_unlock;
2941
2942	dev_put(dev);
2943
2944	return len;
2945
2946out_free:
2947	kfree_skb(skb);
2948out_unlock:
2949	if (dev)
2950		dev_put(dev);
2951out:
2952	return err;
2953}
2954
2955static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2956{
2957	struct sock *sk = sock->sk;
2958	struct packet_sock *po = pkt_sk(sk);
2959
2960	if (po->tx_ring.pg_vec)
2961		return tpacket_snd(po, msg);
2962	else
2963		return packet_snd(sock, msg, len);
2964}
2965
2966/*
2967 *	Close a PACKET socket. This is fairly simple. We immediately go
2968 *	to 'closed' state and remove our protocol entry in the device list.
2969 */
2970
2971static int packet_release(struct socket *sock)
2972{
2973	struct sock *sk = sock->sk;
2974	struct packet_sock *po;
 
2975	struct net *net;
2976	union tpacket_req_u req_u;
2977
2978	if (!sk)
2979		return 0;
2980
2981	net = sock_net(sk);
2982	po = pkt_sk(sk);
2983
2984	mutex_lock(&net->packet.sklist_lock);
2985	sk_del_node_init_rcu(sk);
2986	mutex_unlock(&net->packet.sklist_lock);
2987
2988	preempt_disable();
2989	sock_prot_inuse_add(net, sk->sk_prot, -1);
2990	preempt_enable();
2991
2992	spin_lock(&po->bind_lock);
2993	unregister_prot_hook(sk, false);
2994	packet_cached_dev_reset(po);
2995
2996	if (po->prot_hook.dev) {
2997		dev_put(po->prot_hook.dev);
2998		po->prot_hook.dev = NULL;
2999	}
3000	spin_unlock(&po->bind_lock);
3001
3002	packet_flush_mclist(sk);
3003
 
3004	if (po->rx_ring.pg_vec) {
3005		memset(&req_u, 0, sizeof(req_u));
3006		packet_set_ring(sk, &req_u, 1, 0);
3007	}
3008
3009	if (po->tx_ring.pg_vec) {
3010		memset(&req_u, 0, sizeof(req_u));
3011		packet_set_ring(sk, &req_u, 1, 1);
3012	}
 
3013
3014	fanout_release(sk);
3015
3016	synchronize_net();
 
 
 
 
 
 
3017	/*
3018	 *	Now the socket is dead. No more input will appear.
3019	 */
3020	sock_orphan(sk);
3021	sock->sk = NULL;
3022
3023	/* Purge queues */
3024
3025	skb_queue_purge(&sk->sk_receive_queue);
3026	packet_free_pending(po);
3027	sk_refcnt_debug_release(sk);
3028
3029	sock_put(sk);
3030	return 0;
3031}
3032
3033/*
3034 *	Attach a packet hook.
3035 */
3036
3037static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3038			  __be16 proto)
3039{
3040	struct packet_sock *po = pkt_sk(sk);
3041	struct net_device *dev_curr;
3042	__be16 proto_curr;
3043	bool need_rehook;
3044	struct net_device *dev = NULL;
3045	int ret = 0;
3046	bool unlisted = false;
3047
3048	if (po->fanout)
3049		return -EINVAL;
3050
3051	lock_sock(sk);
3052	spin_lock(&po->bind_lock);
3053	rcu_read_lock();
3054
 
 
 
 
 
3055	if (name) {
3056		dev = dev_get_by_name_rcu(sock_net(sk), name);
3057		if (!dev) {
3058			ret = -ENODEV;
3059			goto out_unlock;
3060		}
3061	} else if (ifindex) {
3062		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3063		if (!dev) {
3064			ret = -ENODEV;
3065			goto out_unlock;
3066		}
3067	}
3068
3069	if (dev)
3070		dev_hold(dev);
3071
3072	proto_curr = po->prot_hook.type;
3073	dev_curr = po->prot_hook.dev;
3074
3075	need_rehook = proto_curr != proto || dev_curr != dev;
3076
3077	if (need_rehook) {
3078		if (po->running) {
3079			rcu_read_unlock();
 
 
 
 
3080			__unregister_prot_hook(sk, true);
3081			rcu_read_lock();
3082			dev_curr = po->prot_hook.dev;
3083			if (dev)
3084				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3085								 dev->ifindex);
3086		}
3087
 
3088		po->num = proto;
3089		po->prot_hook.type = proto;
3090
3091		if (unlikely(unlisted)) {
3092			dev_put(dev);
3093			po->prot_hook.dev = NULL;
3094			po->ifindex = -1;
3095			packet_cached_dev_reset(po);
3096		} else {
3097			po->prot_hook.dev = dev;
3098			po->ifindex = dev ? dev->ifindex : 0;
3099			packet_cached_dev_assign(po, dev);
3100		}
3101	}
3102	if (dev_curr)
3103		dev_put(dev_curr);
3104
3105	if (proto == 0 || !need_rehook)
3106		goto out_unlock;
3107
3108	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3109		register_prot_hook(sk);
3110	} else {
3111		sk->sk_err = ENETDOWN;
3112		if (!sock_flag(sk, SOCK_DEAD))
3113			sk->sk_error_report(sk);
3114	}
3115
3116out_unlock:
3117	rcu_read_unlock();
3118	spin_unlock(&po->bind_lock);
3119	release_sock(sk);
3120	return ret;
3121}
3122
3123/*
3124 *	Bind a packet socket to a device
3125 */
3126
3127static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3128			    int addr_len)
3129{
3130	struct sock *sk = sock->sk;
3131	char name[15];
3132
3133	/*
3134	 *	Check legality
3135	 */
3136
3137	if (addr_len != sizeof(struct sockaddr))
3138		return -EINVAL;
3139	strlcpy(name, uaddr->sa_data, sizeof(name));
 
 
 
 
3140
3141	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3142}
3143
3144static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3145{
3146	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3147	struct sock *sk = sock->sk;
3148
3149	/*
3150	 *	Check legality
3151	 */
3152
3153	if (addr_len < sizeof(struct sockaddr_ll))
3154		return -EINVAL;
3155	if (sll->sll_family != AF_PACKET)
3156		return -EINVAL;
3157
3158	return packet_do_bind(sk, NULL, sll->sll_ifindex,
3159			      sll->sll_protocol ? : pkt_sk(sk)->num);
3160}
3161
3162static struct proto packet_proto = {
3163	.name	  = "PACKET",
3164	.owner	  = THIS_MODULE,
3165	.obj_size = sizeof(struct packet_sock),
3166};
3167
3168/*
3169 *	Create a packet of type SOCK_PACKET.
3170 */
3171
3172static int packet_create(struct net *net, struct socket *sock, int protocol,
3173			 int kern)
3174{
3175	struct sock *sk;
3176	struct packet_sock *po;
3177	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3178	int err;
3179
3180	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3181		return -EPERM;
3182	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3183	    sock->type != SOCK_PACKET)
3184		return -ESOCKTNOSUPPORT;
3185
3186	sock->state = SS_UNCONNECTED;
3187
3188	err = -ENOBUFS;
3189	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3190	if (sk == NULL)
3191		goto out;
3192
3193	sock->ops = &packet_ops;
3194	if (sock->type == SOCK_PACKET)
3195		sock->ops = &packet_ops_spkt;
3196
3197	sock_init_data(sock, sk);
3198
3199	po = pkt_sk(sk);
 
3200	sk->sk_family = PF_PACKET;
3201	po->num = proto;
3202	po->xmit = dev_queue_xmit;
3203
3204	err = packet_alloc_pending(po);
3205	if (err)
3206		goto out2;
3207
3208	packet_cached_dev_reset(po);
3209
3210	sk->sk_destruct = packet_sock_destruct;
3211	sk_refcnt_debug_inc(sk);
3212
3213	/*
3214	 *	Attach a protocol block
3215	 */
3216
3217	spin_lock_init(&po->bind_lock);
3218	mutex_init(&po->pg_vec_lock);
3219	po->rollover = NULL;
3220	po->prot_hook.func = packet_rcv;
3221
3222	if (sock->type == SOCK_PACKET)
3223		po->prot_hook.func = packet_rcv_spkt;
3224
3225	po->prot_hook.af_packet_priv = sk;
3226
3227	if (proto) {
3228		po->prot_hook.type = proto;
3229		register_prot_hook(sk);
3230	}
3231
3232	mutex_lock(&net->packet.sklist_lock);
3233	sk_add_node_rcu(sk, &net->packet.sklist);
3234	mutex_unlock(&net->packet.sklist_lock);
3235
3236	preempt_disable();
3237	sock_prot_inuse_add(net, &packet_proto, 1);
3238	preempt_enable();
3239
3240	return 0;
3241out2:
3242	sk_free(sk);
3243out:
3244	return err;
3245}
3246
3247/*
3248 *	Pull a packet from our receive queue and hand it to the user.
3249 *	If necessary we block.
3250 */
3251
3252static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3253			  int flags)
3254{
3255	struct sock *sk = sock->sk;
3256	struct sk_buff *skb;
3257	int copied, err;
3258	int vnet_hdr_len = 0;
3259	unsigned int origlen = 0;
3260
3261	err = -EINVAL;
3262	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3263		goto out;
3264
3265#if 0
3266	/* What error should we return now? EUNATTACH? */
3267	if (pkt_sk(sk)->ifindex < 0)
3268		return -ENODEV;
3269#endif
3270
3271	if (flags & MSG_ERRQUEUE) {
3272		err = sock_recv_errqueue(sk, msg, len,
3273					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3274		goto out;
3275	}
3276
3277	/*
3278	 *	Call the generic datagram receiver. This handles all sorts
3279	 *	of horrible races and re-entrancy so we can forget about it
3280	 *	in the protocol layers.
3281	 *
3282	 *	Now it will return ENETDOWN, if device have just gone down,
3283	 *	but then it will block.
3284	 */
3285
3286	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3287
3288	/*
3289	 *	An error occurred so return it. Because skb_recv_datagram()
3290	 *	handles the blocking we don't see and worry about blocking
3291	 *	retries.
3292	 */
3293
3294	if (skb == NULL)
3295		goto out;
3296
3297	if (pkt_sk(sk)->pressure)
3298		packet_rcv_has_room(pkt_sk(sk), NULL);
3299
3300	if (pkt_sk(sk)->has_vnet_hdr) {
3301		err = packet_rcv_vnet(msg, skb, &len);
3302		if (err)
3303			goto out_free;
3304		vnet_hdr_len = sizeof(struct virtio_net_hdr);
3305	}
3306
3307	/* You lose any data beyond the buffer you gave. If it worries
3308	 * a user program they can ask the device for its MTU
3309	 * anyway.
3310	 */
3311	copied = skb->len;
3312	if (copied > len) {
3313		copied = len;
3314		msg->msg_flags |= MSG_TRUNC;
3315	}
3316
3317	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3318	if (err)
3319		goto out_free;
3320
3321	if (sock->type != SOCK_PACKET) {
3322		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3323
3324		/* Original length was stored in sockaddr_ll fields */
3325		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3326		sll->sll_family = AF_PACKET;
3327		sll->sll_protocol = skb->protocol;
3328	}
3329
3330	sock_recv_ts_and_drops(msg, sk, skb);
3331
3332	if (msg->msg_name) {
 
 
3333		/* If the address length field is there to be filled
3334		 * in, we fill it in now.
3335		 */
3336		if (sock->type == SOCK_PACKET) {
3337			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3338			msg->msg_namelen = sizeof(struct sockaddr_pkt);
 
3339		} else {
3340			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3341
3342			msg->msg_namelen = sll->sll_halen +
3343				offsetof(struct sockaddr_ll, sll_addr);
 
 
 
 
 
 
 
3344		}
3345		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3346		       msg->msg_namelen);
3347	}
3348
3349	if (pkt_sk(sk)->auxdata) {
3350		struct tpacket_auxdata aux;
3351
3352		aux.tp_status = TP_STATUS_USER;
3353		if (skb->ip_summed == CHECKSUM_PARTIAL)
3354			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3355		else if (skb->pkt_type != PACKET_OUTGOING &&
3356			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3357			  skb_csum_unnecessary(skb)))
3358			aux.tp_status |= TP_STATUS_CSUM_VALID;
3359
3360		aux.tp_len = origlen;
3361		aux.tp_snaplen = skb->len;
3362		aux.tp_mac = 0;
3363		aux.tp_net = skb_network_offset(skb);
3364		if (skb_vlan_tag_present(skb)) {
3365			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3366			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3367			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3368		} else {
3369			aux.tp_vlan_tci = 0;
3370			aux.tp_vlan_tpid = 0;
3371		}
3372		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3373	}
3374
3375	/*
3376	 *	Free or return the buffer as appropriate. Again this
3377	 *	hides all the races and re-entrancy issues from us.
3378	 */
3379	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3380
3381out_free:
3382	skb_free_datagram(sk, skb);
3383out:
3384	return err;
3385}
3386
3387static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3388			       int *uaddr_len, int peer)
3389{
3390	struct net_device *dev;
3391	struct sock *sk	= sock->sk;
3392
3393	if (peer)
3394		return -EOPNOTSUPP;
3395
3396	uaddr->sa_family = AF_PACKET;
3397	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3398	rcu_read_lock();
3399	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3400	if (dev)
3401		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3402	rcu_read_unlock();
3403	*uaddr_len = sizeof(*uaddr);
3404
3405	return 0;
3406}
3407
3408static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3409			  int *uaddr_len, int peer)
3410{
3411	struct net_device *dev;
3412	struct sock *sk = sock->sk;
3413	struct packet_sock *po = pkt_sk(sk);
3414	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3415
3416	if (peer)
3417		return -EOPNOTSUPP;
3418
3419	sll->sll_family = AF_PACKET;
3420	sll->sll_ifindex = po->ifindex;
3421	sll->sll_protocol = po->num;
3422	sll->sll_pkttype = 0;
3423	rcu_read_lock();
3424	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3425	if (dev) {
3426		sll->sll_hatype = dev->type;
3427		sll->sll_halen = dev->addr_len;
3428		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3429	} else {
3430		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3431		sll->sll_halen = 0;
3432	}
3433	rcu_read_unlock();
3434	*uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3435
3436	return 0;
3437}
3438
3439static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3440			 int what)
3441{
3442	switch (i->type) {
3443	case PACKET_MR_MULTICAST:
3444		if (i->alen != dev->addr_len)
3445			return -EINVAL;
3446		if (what > 0)
3447			return dev_mc_add(dev, i->addr);
3448		else
3449			return dev_mc_del(dev, i->addr);
3450		break;
3451	case PACKET_MR_PROMISC:
3452		return dev_set_promiscuity(dev, what);
3453	case PACKET_MR_ALLMULTI:
3454		return dev_set_allmulti(dev, what);
3455	case PACKET_MR_UNICAST:
3456		if (i->alen != dev->addr_len)
3457			return -EINVAL;
3458		if (what > 0)
3459			return dev_uc_add(dev, i->addr);
3460		else
3461			return dev_uc_del(dev, i->addr);
3462		break;
3463	default:
3464		break;
3465	}
3466	return 0;
3467}
3468
3469static void packet_dev_mclist_delete(struct net_device *dev,
3470				     struct packet_mclist **mlp)
3471{
3472	struct packet_mclist *ml;
3473
3474	while ((ml = *mlp) != NULL) {
3475		if (ml->ifindex == dev->ifindex) {
3476			packet_dev_mc(dev, ml, -1);
3477			*mlp = ml->next;
3478			kfree(ml);
3479		} else
3480			mlp = &ml->next;
3481	}
3482}
3483
3484static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3485{
3486	struct packet_sock *po = pkt_sk(sk);
3487	struct packet_mclist *ml, *i;
3488	struct net_device *dev;
3489	int err;
3490
3491	rtnl_lock();
3492
3493	err = -ENODEV;
3494	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3495	if (!dev)
3496		goto done;
3497
3498	err = -EINVAL;
3499	if (mreq->mr_alen > dev->addr_len)
3500		goto done;
3501
3502	err = -ENOBUFS;
3503	i = kmalloc(sizeof(*i), GFP_KERNEL);
3504	if (i == NULL)
3505		goto done;
3506
3507	err = 0;
3508	for (ml = po->mclist; ml; ml = ml->next) {
3509		if (ml->ifindex == mreq->mr_ifindex &&
3510		    ml->type == mreq->mr_type &&
3511		    ml->alen == mreq->mr_alen &&
3512		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3513			ml->count++;
3514			/* Free the new element ... */
3515			kfree(i);
3516			goto done;
3517		}
3518	}
3519
3520	i->type = mreq->mr_type;
3521	i->ifindex = mreq->mr_ifindex;
3522	i->alen = mreq->mr_alen;
3523	memcpy(i->addr, mreq->mr_address, i->alen);
3524	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3525	i->count = 1;
3526	i->next = po->mclist;
3527	po->mclist = i;
3528	err = packet_dev_mc(dev, i, 1);
3529	if (err) {
3530		po->mclist = i->next;
3531		kfree(i);
3532	}
3533
3534done:
3535	rtnl_unlock();
3536	return err;
3537}
3538
3539static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3540{
3541	struct packet_mclist *ml, **mlp;
3542
3543	rtnl_lock();
3544
3545	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3546		if (ml->ifindex == mreq->mr_ifindex &&
3547		    ml->type == mreq->mr_type &&
3548		    ml->alen == mreq->mr_alen &&
3549		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3550			if (--ml->count == 0) {
3551				struct net_device *dev;
3552				*mlp = ml->next;
3553				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3554				if (dev)
3555					packet_dev_mc(dev, ml, -1);
3556				kfree(ml);
3557			}
3558			break;
3559		}
3560	}
3561	rtnl_unlock();
3562	return 0;
3563}
3564
3565static void packet_flush_mclist(struct sock *sk)
3566{
3567	struct packet_sock *po = pkt_sk(sk);
3568	struct packet_mclist *ml;
3569
3570	if (!po->mclist)
3571		return;
3572
3573	rtnl_lock();
3574	while ((ml = po->mclist) != NULL) {
3575		struct net_device *dev;
3576
3577		po->mclist = ml->next;
3578		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3579		if (dev != NULL)
3580			packet_dev_mc(dev, ml, -1);
3581		kfree(ml);
3582	}
3583	rtnl_unlock();
3584}
3585
3586static int
3587packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
 
3588{
3589	struct sock *sk = sock->sk;
3590	struct packet_sock *po = pkt_sk(sk);
3591	int ret;
3592
3593	if (level != SOL_PACKET)
3594		return -ENOPROTOOPT;
3595
3596	switch (optname) {
3597	case PACKET_ADD_MEMBERSHIP:
3598	case PACKET_DROP_MEMBERSHIP:
3599	{
3600		struct packet_mreq_max mreq;
3601		int len = optlen;
3602		memset(&mreq, 0, sizeof(mreq));
3603		if (len < sizeof(struct packet_mreq))
3604			return -EINVAL;
3605		if (len > sizeof(mreq))
3606			len = sizeof(mreq);
3607		if (copy_from_user(&mreq, optval, len))
3608			return -EFAULT;
3609		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3610			return -EINVAL;
3611		if (optname == PACKET_ADD_MEMBERSHIP)
3612			ret = packet_mc_add(sk, &mreq);
3613		else
3614			ret = packet_mc_drop(sk, &mreq);
3615		return ret;
3616	}
3617
3618	case PACKET_RX_RING:
3619	case PACKET_TX_RING:
3620	{
3621		union tpacket_req_u req_u;
3622		int len;
3623
 
3624		switch (po->tp_version) {
3625		case TPACKET_V1:
3626		case TPACKET_V2:
3627			len = sizeof(req_u.req);
3628			break;
3629		case TPACKET_V3:
3630		default:
3631			len = sizeof(req_u.req3);
3632			break;
3633		}
3634		if (optlen < len)
3635			return -EINVAL;
3636		if (copy_from_user(&req_u.req, optval, len))
3637			return -EFAULT;
3638		return packet_set_ring(sk, &req_u, 0,
3639			optname == PACKET_TX_RING);
 
 
 
 
 
3640	}
3641	case PACKET_COPY_THRESH:
3642	{
3643		int val;
3644
3645		if (optlen != sizeof(val))
3646			return -EINVAL;
3647		if (copy_from_user(&val, optval, sizeof(val)))
3648			return -EFAULT;
3649
3650		pkt_sk(sk)->copy_thresh = val;
3651		return 0;
3652	}
3653	case PACKET_VERSION:
3654	{
3655		int val;
3656
3657		if (optlen != sizeof(val))
3658			return -EINVAL;
3659		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3660			return -EBUSY;
3661		if (copy_from_user(&val, optval, sizeof(val)))
3662			return -EFAULT;
3663		switch (val) {
3664		case TPACKET_V1:
3665		case TPACKET_V2:
3666		case TPACKET_V3:
3667			po->tp_version = val;
3668			return 0;
3669		default:
3670			return -EINVAL;
3671		}
 
 
 
 
 
 
 
 
 
3672	}
3673	case PACKET_RESERVE:
3674	{
3675		unsigned int val;
3676
3677		if (optlen != sizeof(val))
3678			return -EINVAL;
3679		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3680			return -EBUSY;
3681		if (copy_from_user(&val, optval, sizeof(val)))
3682			return -EFAULT;
3683		po->tp_reserve = val;
3684		return 0;
 
 
 
 
 
 
 
 
 
3685	}
3686	case PACKET_LOSS:
3687	{
3688		unsigned int val;
3689
3690		if (optlen != sizeof(val))
3691			return -EINVAL;
3692		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3693			return -EBUSY;
3694		if (copy_from_user(&val, optval, sizeof(val)))
3695			return -EFAULT;
3696		po->tp_loss = !!val;
3697		return 0;
 
 
 
 
 
 
 
 
3698	}
3699	case PACKET_AUXDATA:
3700	{
3701		int val;
3702
3703		if (optlen < sizeof(val))
3704			return -EINVAL;
3705		if (copy_from_user(&val, optval, sizeof(val)))
3706			return -EFAULT;
3707
 
3708		po->auxdata = !!val;
 
3709		return 0;
3710	}
3711	case PACKET_ORIGDEV:
3712	{
3713		int val;
3714
3715		if (optlen < sizeof(val))
3716			return -EINVAL;
3717		if (copy_from_user(&val, optval, sizeof(val)))
3718			return -EFAULT;
3719
 
3720		po->origdev = !!val;
 
3721		return 0;
3722	}
3723	case PACKET_VNET_HDR:
3724	{
3725		int val;
3726
3727		if (sock->type != SOCK_RAW)
3728			return -EINVAL;
3729		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3730			return -EBUSY;
3731		if (optlen < sizeof(val))
3732			return -EINVAL;
3733		if (copy_from_user(&val, optval, sizeof(val)))
3734			return -EFAULT;
3735
3736		po->has_vnet_hdr = !!val;
3737		return 0;
 
 
 
 
 
 
 
3738	}
3739	case PACKET_TIMESTAMP:
3740	{
3741		int val;
3742
3743		if (optlen != sizeof(val))
3744			return -EINVAL;
3745		if (copy_from_user(&val, optval, sizeof(val)))
3746			return -EFAULT;
3747
3748		po->tp_tstamp = val;
3749		return 0;
3750	}
3751	case PACKET_FANOUT:
3752	{
3753		int val;
3754
3755		if (optlen != sizeof(val))
3756			return -EINVAL;
3757		if (copy_from_user(&val, optval, sizeof(val)))
3758			return -EFAULT;
3759
3760		return fanout_add(sk, val & 0xffff, val >> 16);
3761	}
3762	case PACKET_FANOUT_DATA:
3763	{
3764		if (!po->fanout)
3765			return -EINVAL;
3766
3767		return fanout_set_data(po, optval, optlen);
3768	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3769	case PACKET_TX_HAS_OFF:
3770	{
3771		unsigned int val;
3772
3773		if (optlen != sizeof(val))
3774			return -EINVAL;
3775		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3776			return -EBUSY;
3777		if (copy_from_user(&val, optval, sizeof(val)))
3778			return -EFAULT;
3779		po->tp_tx_has_off = !!val;
 
 
 
 
 
 
 
 
3780		return 0;
3781	}
3782	case PACKET_QDISC_BYPASS:
3783	{
3784		int val;
3785
3786		if (optlen != sizeof(val))
3787			return -EINVAL;
3788		if (copy_from_user(&val, optval, sizeof(val)))
3789			return -EFAULT;
3790
3791		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3792		return 0;
3793	}
3794	default:
3795		return -ENOPROTOOPT;
3796	}
3797}
3798
3799static int packet_getsockopt(struct socket *sock, int level, int optname,
3800			     char __user *optval, int __user *optlen)
3801{
3802	int len;
3803	int val, lv = sizeof(val);
3804	struct sock *sk = sock->sk;
3805	struct packet_sock *po = pkt_sk(sk);
3806	void *data = &val;
3807	union tpacket_stats_u st;
3808	struct tpacket_rollover_stats rstats;
 
3809
3810	if (level != SOL_PACKET)
3811		return -ENOPROTOOPT;
3812
3813	if (get_user(len, optlen))
3814		return -EFAULT;
3815
3816	if (len < 0)
3817		return -EINVAL;
3818
3819	switch (optname) {
3820	case PACKET_STATISTICS:
3821		spin_lock_bh(&sk->sk_receive_queue.lock);
3822		memcpy(&st, &po->stats, sizeof(st));
3823		memset(&po->stats, 0, sizeof(po->stats));
3824		spin_unlock_bh(&sk->sk_receive_queue.lock);
 
3825
3826		if (po->tp_version == TPACKET_V3) {
3827			lv = sizeof(struct tpacket_stats_v3);
3828			st.stats3.tp_packets += st.stats3.tp_drops;
 
3829			data = &st.stats3;
3830		} else {
3831			lv = sizeof(struct tpacket_stats);
3832			st.stats1.tp_packets += st.stats1.tp_drops;
 
3833			data = &st.stats1;
3834		}
3835
3836		break;
3837	case PACKET_AUXDATA:
3838		val = po->auxdata;
3839		break;
3840	case PACKET_ORIGDEV:
3841		val = po->origdev;
3842		break;
3843	case PACKET_VNET_HDR:
3844		val = po->has_vnet_hdr;
3845		break;
3846	case PACKET_VERSION:
3847		val = po->tp_version;
3848		break;
3849	case PACKET_HDRLEN:
3850		if (len > sizeof(int))
3851			len = sizeof(int);
 
 
3852		if (copy_from_user(&val, optval, len))
3853			return -EFAULT;
3854		switch (val) {
3855		case TPACKET_V1:
3856			val = sizeof(struct tpacket_hdr);
3857			break;
3858		case TPACKET_V2:
3859			val = sizeof(struct tpacket2_hdr);
3860			break;
3861		case TPACKET_V3:
3862			val = sizeof(struct tpacket3_hdr);
3863			break;
3864		default:
3865			return -EINVAL;
3866		}
3867		break;
3868	case PACKET_RESERVE:
3869		val = po->tp_reserve;
3870		break;
3871	case PACKET_LOSS:
3872		val = po->tp_loss;
3873		break;
3874	case PACKET_TIMESTAMP:
3875		val = po->tp_tstamp;
3876		break;
3877	case PACKET_FANOUT:
3878		val = (po->fanout ?
3879		       ((u32)po->fanout->id |
3880			((u32)po->fanout->type << 16) |
3881			((u32)po->fanout->flags << 24)) :
3882		       0);
3883		break;
 
 
 
3884	case PACKET_ROLLOVER_STATS:
3885		if (!po->rollover)
3886			return -EINVAL;
3887		rstats.tp_all = atomic_long_read(&po->rollover->num);
3888		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3889		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3890		data = &rstats;
3891		lv = sizeof(rstats);
3892		break;
3893	case PACKET_TX_HAS_OFF:
3894		val = po->tp_tx_has_off;
3895		break;
3896	case PACKET_QDISC_BYPASS:
3897		val = packet_use_direct_xmit(po);
3898		break;
3899	default:
3900		return -ENOPROTOOPT;
3901	}
3902
3903	if (len > lv)
3904		len = lv;
3905	if (put_user(len, optlen))
3906		return -EFAULT;
3907	if (copy_to_user(optval, data, len))
3908		return -EFAULT;
3909	return 0;
3910}
3911
3912
3913static int packet_notifier(struct notifier_block *this,
3914			   unsigned long msg, void *ptr)
3915{
3916	struct sock *sk;
3917	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3918	struct net *net = dev_net(dev);
3919
3920	rcu_read_lock();
3921	sk_for_each_rcu(sk, &net->packet.sklist) {
3922		struct packet_sock *po = pkt_sk(sk);
3923
3924		switch (msg) {
3925		case NETDEV_UNREGISTER:
3926			if (po->mclist)
3927				packet_dev_mclist_delete(dev, &po->mclist);
3928			/* fallthrough */
3929
3930		case NETDEV_DOWN:
3931			if (dev->ifindex == po->ifindex) {
3932				spin_lock(&po->bind_lock);
3933				if (po->running) {
3934					__unregister_prot_hook(sk, false);
3935					sk->sk_err = ENETDOWN;
3936					if (!sock_flag(sk, SOCK_DEAD))
3937						sk->sk_error_report(sk);
3938				}
3939				if (msg == NETDEV_UNREGISTER) {
3940					packet_cached_dev_reset(po);
3941					po->ifindex = -1;
3942					if (po->prot_hook.dev)
3943						dev_put(po->prot_hook.dev);
3944					po->prot_hook.dev = NULL;
3945				}
3946				spin_unlock(&po->bind_lock);
3947			}
3948			break;
3949		case NETDEV_UP:
3950			if (dev->ifindex == po->ifindex) {
3951				spin_lock(&po->bind_lock);
3952				if (po->num)
3953					register_prot_hook(sk);
3954				spin_unlock(&po->bind_lock);
3955			}
3956			break;
3957		}
3958	}
3959	rcu_read_unlock();
3960	return NOTIFY_DONE;
3961}
3962
3963
3964static int packet_ioctl(struct socket *sock, unsigned int cmd,
3965			unsigned long arg)
3966{
3967	struct sock *sk = sock->sk;
3968
3969	switch (cmd) {
3970	case SIOCOUTQ:
3971	{
3972		int amount = sk_wmem_alloc_get(sk);
3973
3974		return put_user(amount, (int __user *)arg);
3975	}
3976	case SIOCINQ:
3977	{
3978		struct sk_buff *skb;
3979		int amount = 0;
3980
3981		spin_lock_bh(&sk->sk_receive_queue.lock);
3982		skb = skb_peek(&sk->sk_receive_queue);
3983		if (skb)
3984			amount = skb->len;
3985		spin_unlock_bh(&sk->sk_receive_queue.lock);
3986		return put_user(amount, (int __user *)arg);
3987	}
3988	case SIOCGSTAMP:
3989		return sock_get_timestamp(sk, (struct timeval __user *)arg);
3990	case SIOCGSTAMPNS:
3991		return sock_get_timestampns(sk, (struct timespec __user *)arg);
3992
3993#ifdef CONFIG_INET
3994	case SIOCADDRT:
3995	case SIOCDELRT:
3996	case SIOCDARP:
3997	case SIOCGARP:
3998	case SIOCSARP:
3999	case SIOCGIFADDR:
4000	case SIOCSIFADDR:
4001	case SIOCGIFBRDADDR:
4002	case SIOCSIFBRDADDR:
4003	case SIOCGIFNETMASK:
4004	case SIOCSIFNETMASK:
4005	case SIOCGIFDSTADDR:
4006	case SIOCSIFDSTADDR:
4007	case SIOCSIFFLAGS:
4008		return inet_dgram_ops.ioctl(sock, cmd, arg);
4009#endif
4010
4011	default:
4012		return -ENOIOCTLCMD;
4013	}
4014	return 0;
4015}
4016
4017static unsigned int packet_poll(struct file *file, struct socket *sock,
4018				poll_table *wait)
4019{
4020	struct sock *sk = sock->sk;
4021	struct packet_sock *po = pkt_sk(sk);
4022	unsigned int mask = datagram_poll(file, sock, wait);
4023
4024	spin_lock_bh(&sk->sk_receive_queue.lock);
4025	if (po->rx_ring.pg_vec) {
4026		if (!packet_previous_rx_frame(po, &po->rx_ring,
4027			TP_STATUS_KERNEL))
4028			mask |= POLLIN | POLLRDNORM;
4029	}
4030	if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
4031		po->pressure = 0;
4032	spin_unlock_bh(&sk->sk_receive_queue.lock);
4033	spin_lock_bh(&sk->sk_write_queue.lock);
4034	if (po->tx_ring.pg_vec) {
4035		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4036			mask |= POLLOUT | POLLWRNORM;
4037	}
4038	spin_unlock_bh(&sk->sk_write_queue.lock);
4039	return mask;
4040}
4041
4042
4043/* Dirty? Well, I still did not learn better way to account
4044 * for user mmaps.
4045 */
4046
4047static void packet_mm_open(struct vm_area_struct *vma)
4048{
4049	struct file *file = vma->vm_file;
4050	struct socket *sock = file->private_data;
4051	struct sock *sk = sock->sk;
4052
4053	if (sk)
4054		atomic_inc(&pkt_sk(sk)->mapped);
4055}
4056
4057static void packet_mm_close(struct vm_area_struct *vma)
4058{
4059	struct file *file = vma->vm_file;
4060	struct socket *sock = file->private_data;
4061	struct sock *sk = sock->sk;
4062
4063	if (sk)
4064		atomic_dec(&pkt_sk(sk)->mapped);
4065}
4066
4067static const struct vm_operations_struct packet_mmap_ops = {
4068	.open	=	packet_mm_open,
4069	.close	=	packet_mm_close,
4070};
4071
4072static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4073			unsigned int len)
4074{
4075	int i;
4076
4077	for (i = 0; i < len; i++) {
4078		if (likely(pg_vec[i].buffer)) {
4079			if (is_vmalloc_addr(pg_vec[i].buffer))
4080				vfree(pg_vec[i].buffer);
4081			else
4082				free_pages((unsigned long)pg_vec[i].buffer,
4083					   order);
4084			pg_vec[i].buffer = NULL;
4085		}
4086	}
4087	kfree(pg_vec);
4088}
4089
4090static char *alloc_one_pg_vec_page(unsigned long order)
4091{
4092	char *buffer;
4093	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4094			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4095
4096	buffer = (char *) __get_free_pages(gfp_flags, order);
4097	if (buffer)
4098		return buffer;
4099
4100	/* __get_free_pages failed, fall back to vmalloc */
4101	buffer = vzalloc((1 << order) * PAGE_SIZE);
4102	if (buffer)
4103		return buffer;
4104
4105	/* vmalloc failed, lets dig into swap here */
4106	gfp_flags &= ~__GFP_NORETRY;
4107	buffer = (char *) __get_free_pages(gfp_flags, order);
4108	if (buffer)
4109		return buffer;
4110
4111	/* complete and utter failure */
4112	return NULL;
4113}
4114
4115static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4116{
4117	unsigned int block_nr = req->tp_block_nr;
4118	struct pgv *pg_vec;
4119	int i;
4120
4121	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4122	if (unlikely(!pg_vec))
4123		goto out;
4124
4125	for (i = 0; i < block_nr; i++) {
4126		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4127		if (unlikely(!pg_vec[i].buffer))
4128			goto out_free_pgvec;
4129	}
4130
4131out:
4132	return pg_vec;
4133
4134out_free_pgvec:
4135	free_pg_vec(pg_vec, order, block_nr);
4136	pg_vec = NULL;
4137	goto out;
4138}
4139
4140static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4141		int closing, int tx_ring)
4142{
4143	struct pgv *pg_vec = NULL;
4144	struct packet_sock *po = pkt_sk(sk);
 
4145	int was_running, order = 0;
4146	struct packet_ring_buffer *rb;
4147	struct sk_buff_head *rb_queue;
4148	__be16 num;
4149	int err = -EINVAL;
4150	/* Added to avoid minimal code churn */
4151	struct tpacket_req *req = &req_u->req;
4152
4153	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
4154	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
4155		net_warn_ratelimited("Tx-ring is not supported.\n");
4156		goto out;
4157	}
4158
4159	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4160	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4161
4162	err = -EBUSY;
4163	if (!closing) {
4164		if (atomic_read(&po->mapped))
4165			goto out;
4166		if (packet_read_pending(rb))
4167			goto out;
4168	}
4169
4170	if (req->tp_block_nr) {
 
 
4171		/* Sanity tests and some calculations */
4172		err = -EBUSY;
4173		if (unlikely(rb->pg_vec))
4174			goto out;
4175
4176		switch (po->tp_version) {
4177		case TPACKET_V1:
4178			po->tp_hdrlen = TPACKET_HDRLEN;
4179			break;
4180		case TPACKET_V2:
4181			po->tp_hdrlen = TPACKET2_HDRLEN;
4182			break;
4183		case TPACKET_V3:
4184			po->tp_hdrlen = TPACKET3_HDRLEN;
4185			break;
4186		}
4187
4188		err = -EINVAL;
4189		if (unlikely((int)req->tp_block_size <= 0))
4190			goto out;
4191		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4192			goto out;
 
4193		if (po->tp_version >= TPACKET_V3 &&
4194		    (int)(req->tp_block_size -
4195			  BLK_PLUS_PRIV(req_u->req3.tp_sizeof_priv)) <= 0)
4196			goto out;
4197		if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4198					po->tp_reserve))
4199			goto out;
4200		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4201			goto out;
4202
4203		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4204		if (unlikely(rb->frames_per_block == 0))
4205			goto out;
 
 
4206		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4207					req->tp_frame_nr))
4208			goto out;
4209
4210		err = -ENOMEM;
4211		order = get_order(req->tp_block_size);
4212		pg_vec = alloc_pg_vec(req, order);
4213		if (unlikely(!pg_vec))
4214			goto out;
4215		switch (po->tp_version) {
4216		case TPACKET_V3:
4217		/* Transmit path is not supported. We checked
4218		 * it above but just being paranoid
4219		 */
4220			if (!tx_ring)
4221				init_prb_bdqc(po, rb, pg_vec, req_u);
 
 
 
 
 
 
 
 
 
 
4222			break;
4223		default:
 
 
 
 
 
 
4224			break;
4225		}
4226	}
4227	/* Done */
4228	else {
4229		err = -EINVAL;
4230		if (unlikely(req->tp_frame_nr))
4231			goto out;
4232	}
4233
4234	lock_sock(sk);
4235
4236	/* Detach socket from network */
4237	spin_lock(&po->bind_lock);
4238	was_running = po->running;
4239	num = po->num;
4240	if (was_running) {
4241		po->num = 0;
4242		__unregister_prot_hook(sk, false);
4243	}
4244	spin_unlock(&po->bind_lock);
4245
4246	synchronize_net();
4247
4248	err = -EBUSY;
4249	mutex_lock(&po->pg_vec_lock);
4250	if (closing || atomic_read(&po->mapped) == 0) {
4251		err = 0;
4252		spin_lock_bh(&rb_queue->lock);
4253		swap(rb->pg_vec, pg_vec);
 
 
4254		rb->frame_max = (req->tp_frame_nr - 1);
4255		rb->head = 0;
4256		rb->frame_size = req->tp_frame_size;
4257		spin_unlock_bh(&rb_queue->lock);
4258
4259		swap(rb->pg_vec_order, order);
4260		swap(rb->pg_vec_len, req->tp_block_nr);
4261
4262		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4263		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4264						tpacket_rcv : packet_rcv;
4265		skb_queue_purge(rb_queue);
4266		if (atomic_read(&po->mapped))
4267			pr_err("packet_mmap: vma is busy: %d\n",
4268			       atomic_read(&po->mapped));
4269	}
4270	mutex_unlock(&po->pg_vec_lock);
4271
4272	spin_lock(&po->bind_lock);
4273	if (was_running) {
4274		po->num = num;
4275		register_prot_hook(sk);
4276	}
4277	spin_unlock(&po->bind_lock);
4278	if (closing && (po->tp_version > TPACKET_V2)) {
4279		/* Because we don't support block-based V3 on tx-ring */
4280		if (!tx_ring)
4281			prb_shutdown_retire_blk_timer(po, rb_queue);
4282	}
4283	release_sock(sk);
4284
 
 
4285	if (pg_vec)
4286		free_pg_vec(pg_vec, order, req->tp_block_nr);
4287out:
4288	return err;
4289}
4290
4291static int packet_mmap(struct file *file, struct socket *sock,
4292		struct vm_area_struct *vma)
4293{
4294	struct sock *sk = sock->sk;
4295	struct packet_sock *po = pkt_sk(sk);
4296	unsigned long size, expected_size;
4297	struct packet_ring_buffer *rb;
4298	unsigned long start;
4299	int err = -EINVAL;
4300	int i;
4301
4302	if (vma->vm_pgoff)
4303		return -EINVAL;
4304
4305	mutex_lock(&po->pg_vec_lock);
4306
4307	expected_size = 0;
4308	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4309		if (rb->pg_vec) {
4310			expected_size += rb->pg_vec_len
4311						* rb->pg_vec_pages
4312						* PAGE_SIZE;
4313		}
4314	}
4315
4316	if (expected_size == 0)
4317		goto out;
4318
4319	size = vma->vm_end - vma->vm_start;
4320	if (size != expected_size)
4321		goto out;
4322
4323	start = vma->vm_start;
4324	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4325		if (rb->pg_vec == NULL)
4326			continue;
4327
4328		for (i = 0; i < rb->pg_vec_len; i++) {
4329			struct page *page;
4330			void *kaddr = rb->pg_vec[i].buffer;
4331			int pg_num;
4332
4333			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4334				page = pgv_to_page(kaddr);
4335				err = vm_insert_page(vma, start, page);
4336				if (unlikely(err))
4337					goto out;
4338				start += PAGE_SIZE;
4339				kaddr += PAGE_SIZE;
4340			}
4341		}
4342	}
4343
4344	atomic_inc(&po->mapped);
4345	vma->vm_ops = &packet_mmap_ops;
4346	err = 0;
4347
4348out:
4349	mutex_unlock(&po->pg_vec_lock);
4350	return err;
4351}
4352
4353static const struct proto_ops packet_ops_spkt = {
4354	.family =	PF_PACKET,
4355	.owner =	THIS_MODULE,
4356	.release =	packet_release,
4357	.bind =		packet_bind_spkt,
4358	.connect =	sock_no_connect,
4359	.socketpair =	sock_no_socketpair,
4360	.accept =	sock_no_accept,
4361	.getname =	packet_getname_spkt,
4362	.poll =		datagram_poll,
4363	.ioctl =	packet_ioctl,
 
4364	.listen =	sock_no_listen,
4365	.shutdown =	sock_no_shutdown,
4366	.setsockopt =	sock_no_setsockopt,
4367	.getsockopt =	sock_no_getsockopt,
4368	.sendmsg =	packet_sendmsg_spkt,
4369	.recvmsg =	packet_recvmsg,
4370	.mmap =		sock_no_mmap,
4371	.sendpage =	sock_no_sendpage,
4372};
4373
4374static const struct proto_ops packet_ops = {
4375	.family =	PF_PACKET,
4376	.owner =	THIS_MODULE,
4377	.release =	packet_release,
4378	.bind =		packet_bind,
4379	.connect =	sock_no_connect,
4380	.socketpair =	sock_no_socketpair,
4381	.accept =	sock_no_accept,
4382	.getname =	packet_getname,
4383	.poll =		packet_poll,
4384	.ioctl =	packet_ioctl,
 
4385	.listen =	sock_no_listen,
4386	.shutdown =	sock_no_shutdown,
4387	.setsockopt =	packet_setsockopt,
4388	.getsockopt =	packet_getsockopt,
4389	.sendmsg =	packet_sendmsg,
4390	.recvmsg =	packet_recvmsg,
4391	.mmap =		packet_mmap,
4392	.sendpage =	sock_no_sendpage,
4393};
4394
4395static const struct net_proto_family packet_family_ops = {
4396	.family =	PF_PACKET,
4397	.create =	packet_create,
4398	.owner	=	THIS_MODULE,
4399};
4400
4401static struct notifier_block packet_netdev_notifier = {
4402	.notifier_call =	packet_notifier,
4403};
4404
4405#ifdef CONFIG_PROC_FS
4406
4407static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4408	__acquires(RCU)
4409{
4410	struct net *net = seq_file_net(seq);
4411
4412	rcu_read_lock();
4413	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4414}
4415
4416static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4417{
4418	struct net *net = seq_file_net(seq);
4419	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4420}
4421
4422static void packet_seq_stop(struct seq_file *seq, void *v)
4423	__releases(RCU)
4424{
4425	rcu_read_unlock();
4426}
4427
4428static int packet_seq_show(struct seq_file *seq, void *v)
4429{
4430	if (v == SEQ_START_TOKEN)
4431		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4432	else {
4433		struct sock *s = sk_entry(v);
4434		const struct packet_sock *po = pkt_sk(s);
4435
4436		seq_printf(seq,
4437			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4438			   s,
4439			   atomic_read(&s->sk_refcnt),
4440			   s->sk_type,
4441			   ntohs(po->num),
4442			   po->ifindex,
4443			   po->running,
4444			   atomic_read(&s->sk_rmem_alloc),
4445			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4446			   sock_i_ino(s));
4447	}
4448
4449	return 0;
4450}
4451
4452static const struct seq_operations packet_seq_ops = {
4453	.start	= packet_seq_start,
4454	.next	= packet_seq_next,
4455	.stop	= packet_seq_stop,
4456	.show	= packet_seq_show,
4457};
4458
4459static int packet_seq_open(struct inode *inode, struct file *file)
4460{
4461	return seq_open_net(inode, file, &packet_seq_ops,
4462			    sizeof(struct seq_net_private));
4463}
4464
4465static const struct file_operations packet_seq_fops = {
4466	.owner		= THIS_MODULE,
4467	.open		= packet_seq_open,
4468	.read		= seq_read,
4469	.llseek		= seq_lseek,
4470	.release	= seq_release_net,
4471};
4472
4473#endif
4474
4475static int __net_init packet_net_init(struct net *net)
4476{
4477	mutex_init(&net->packet.sklist_lock);
4478	INIT_HLIST_HEAD(&net->packet.sklist);
4479
4480	if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
 
4481		return -ENOMEM;
4482
4483	return 0;
4484}
4485
4486static void __net_exit packet_net_exit(struct net *net)
4487{
4488	remove_proc_entry("packet", net->proc_net);
 
4489}
4490
4491static struct pernet_operations packet_net_ops = {
4492	.init = packet_net_init,
4493	.exit = packet_net_exit,
4494};
4495
4496
4497static void __exit packet_exit(void)
4498{
4499	unregister_netdevice_notifier(&packet_netdev_notifier);
4500	unregister_pernet_subsys(&packet_net_ops);
4501	sock_unregister(PF_PACKET);
4502	proto_unregister(&packet_proto);
4503}
4504
4505static int __init packet_init(void)
4506{
4507	int rc = proto_register(&packet_proto, 0);
4508
4509	if (rc != 0)
 
4510		goto out;
 
 
 
 
 
 
 
 
 
4511
4512	sock_register(&packet_family_ops);
4513	register_pernet_subsys(&packet_net_ops);
4514	register_netdevice_notifier(&packet_netdev_notifier);
 
 
 
 
 
4515out:
4516	return rc;
4517}
4518
4519module_init(packet_init);
4520module_exit(packet_exit);
4521MODULE_LICENSE("GPL");
4522MODULE_ALIAS_NETPROTO(PF_PACKET);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		PACKET - implements raw packet sockets.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *
  13 * Fixes:
  14 *		Alan Cox	:	verify_area() now used correctly
  15 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
  16 *		Alan Cox	:	tidied skbuff lists.
  17 *		Alan Cox	:	Now uses generic datagram routines I
  18 *					added. Also fixed the peek/read crash
  19 *					from all old Linux datagram code.
  20 *		Alan Cox	:	Uses the improved datagram code.
  21 *		Alan Cox	:	Added NULL's for socket options.
  22 *		Alan Cox	:	Re-commented the code.
  23 *		Alan Cox	:	Use new kernel side addressing
  24 *		Rob Janssen	:	Correct MTU usage.
  25 *		Dave Platt	:	Counter leaks caused by incorrect
  26 *					interrupt locking and some slightly
  27 *					dubious gcc output. Can you read
  28 *					compiler: it said _VOLATILE_
  29 *	Richard Kooijman	:	Timestamp fixes.
  30 *		Alan Cox	:	New buffers. Use sk->mac.raw.
  31 *		Alan Cox	:	sendmsg/recvmsg support.
  32 *		Alan Cox	:	Protocol setting support
  33 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
  34 *	Cyrus Durgin		:	Fixed kerneld for kmod.
  35 *	Michal Ostrowski        :       Module initialization cleanup.
  36 *         Ulises Alonso        :       Frame number limit removal and
  37 *                                      packet_set_ring memory leak.
  38 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
  39 *					The convention is that longer addresses
  40 *					will simply extend the hardware address
  41 *					byte arrays at the end of sockaddr_ll
  42 *					and packet_mreq.
  43 *		Johann Baudy	:	Added TX RING.
  44 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
  45 *					layer.
  46 *					Copyright (C) 2011, <lokec@ccs.neu.edu>
 
 
 
 
 
 
 
  47 */
  48
  49#include <linux/types.h>
  50#include <linux/mm.h>
  51#include <linux/capability.h>
  52#include <linux/fcntl.h>
  53#include <linux/socket.h>
  54#include <linux/in.h>
  55#include <linux/inet.h>
  56#include <linux/netdevice.h>
  57#include <linux/if_packet.h>
  58#include <linux/wireless.h>
  59#include <linux/kernel.h>
  60#include <linux/kmod.h>
  61#include <linux/slab.h>
  62#include <linux/vmalloc.h>
  63#include <net/net_namespace.h>
  64#include <net/ip.h>
  65#include <net/protocol.h>
  66#include <linux/skbuff.h>
  67#include <net/sock.h>
  68#include <linux/errno.h>
  69#include <linux/timer.h>
  70#include <linux/uaccess.h>
  71#include <asm/ioctls.h>
  72#include <asm/page.h>
  73#include <asm/cacheflush.h>
  74#include <asm/io.h>
  75#include <linux/proc_fs.h>
  76#include <linux/seq_file.h>
  77#include <linux/poll.h>
  78#include <linux/module.h>
  79#include <linux/init.h>
  80#include <linux/mutex.h>
  81#include <linux/if_vlan.h>
  82#include <linux/virtio_net.h>
  83#include <linux/errqueue.h>
  84#include <linux/net_tstamp.h>
  85#include <linux/percpu.h>
  86#ifdef CONFIG_INET
  87#include <net/inet_common.h>
  88#endif
  89#include <linux/bpf.h>
  90#include <net/compat.h>
  91
  92#include "internal.h"
  93
  94/*
  95   Assumptions:
  96   - if device has no dev->hard_header routine, it adds and removes ll header
  97     inside itself. In this case ll header is invisible outside of device,
  98     but higher levels still should reserve dev->hard_header_len.
  99     Some devices are enough clever to reallocate skb, when header
 100     will not fit to reserved space (tunnel), another ones are silly
 101     (PPP).
 102   - packet socket receives packets with pulled ll header,
 103     so that SOCK_RAW should push it back.
 104
 105On receive:
 106-----------
 107
 108Incoming, dev->hard_header!=NULL
 109   mac_header -> ll header
 110   data       -> data
 111
 112Outgoing, dev->hard_header!=NULL
 113   mac_header -> ll header
 114   data       -> ll header
 115
 116Incoming, dev->hard_header==NULL
 117   mac_header -> UNKNOWN position. It is very likely, that it points to ll
 118		 header.  PPP makes it, that is wrong, because introduce
 119		 assymetry between rx and tx paths.
 120   data       -> data
 121
 122Outgoing, dev->hard_header==NULL
 123   mac_header -> data. ll header is still not built!
 124   data       -> data
 125
 126Resume
 127  If dev->hard_header==NULL we are unlikely to restore sensible ll header.
 128
 129
 130On transmit:
 131------------
 132
 133dev->hard_header != NULL
 134   mac_header -> ll header
 135   data       -> ll header
 136
 137dev->hard_header == NULL (ll header is added by device, we cannot control it)
 138   mac_header -> data
 139   data       -> data
 140
 141   We should set nh.raw on output to correct posistion,
 142   packet classifier depends on it.
 143 */
 144
 145/* Private packet socket structures. */
 146
 147/* identical to struct packet_mreq except it has
 148 * a longer address field.
 149 */
 150struct packet_mreq_max {
 151	int		mr_ifindex;
 152	unsigned short	mr_type;
 153	unsigned short	mr_alen;
 154	unsigned char	mr_address[MAX_ADDR_LEN];
 155};
 156
 157union tpacket_uhdr {
 158	struct tpacket_hdr  *h1;
 159	struct tpacket2_hdr *h2;
 160	struct tpacket3_hdr *h3;
 161	void *raw;
 162};
 163
 164static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 165		int closing, int tx_ring);
 166
 167#define V3_ALIGNMENT	(8)
 168
 169#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 170
 171#define BLK_PLUS_PRIV(sz_of_priv) \
 172	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 173
 
 
 174#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 175#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 176#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 177#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 178#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 179#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 180#define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
 181
 182struct packet_sock;
 
 183static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 184		       struct packet_type *pt, struct net_device *orig_dev);
 185
 186static void *packet_previous_frame(struct packet_sock *po,
 187		struct packet_ring_buffer *rb,
 188		int status);
 189static void packet_increment_head(struct packet_ring_buffer *buff);
 190static int prb_curr_blk_in_use(struct tpacket_block_desc *);
 
 191static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 192			struct packet_sock *);
 193static void prb_retire_current_block(struct tpacket_kbdq_core *,
 194		struct packet_sock *, unsigned int status);
 195static int prb_queue_frozen(struct tpacket_kbdq_core *);
 196static void prb_open_block(struct tpacket_kbdq_core *,
 197		struct tpacket_block_desc *);
 198static void prb_retire_rx_blk_timer_expired(struct timer_list *);
 199static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 
 
 
 200static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 201static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 202		struct tpacket3_hdr *);
 203static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 204		struct tpacket3_hdr *);
 205static void packet_flush_mclist(struct sock *sk);
 206static u16 packet_pick_tx_queue(struct sk_buff *skb);
 207
 208struct packet_skb_cb {
 209	union {
 210		struct sockaddr_pkt pkt;
 211		union {
 212			/* Trick: alias skb original length with
 213			 * ll.sll_family and ll.protocol in order
 214			 * to save room.
 215			 */
 216			unsigned int origlen;
 217			struct sockaddr_ll ll;
 218		};
 219	} sa;
 220};
 221
 222#define vio_le() virtio_legacy_is_little_endian()
 223
 224#define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 225
 226#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 227#define GET_PBLOCK_DESC(x, bid)	\
 228	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 229#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 230	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 231#define GET_NEXT_PRB_BLK_NUM(x) \
 232	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 233	((x)->kactive_blk_num+1) : 0)
 234
 235static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 236static void __fanout_link(struct sock *sk, struct packet_sock *po);
 237
 238static int packet_direct_xmit(struct sk_buff *skb)
 239{
 240	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 241}
 242
 243static struct net_device *packet_cached_dev_get(struct packet_sock *po)
 244{
 245	struct net_device *dev;
 246
 247	rcu_read_lock();
 248	dev = rcu_dereference(po->cached_dev);
 249	if (likely(dev))
 250		dev_hold(dev);
 251	rcu_read_unlock();
 252
 253	return dev;
 254}
 255
 256static void packet_cached_dev_assign(struct packet_sock *po,
 257				     struct net_device *dev)
 258{
 259	rcu_assign_pointer(po->cached_dev, dev);
 260}
 261
 262static void packet_cached_dev_reset(struct packet_sock *po)
 263{
 264	RCU_INIT_POINTER(po->cached_dev, NULL);
 265}
 266
 267static bool packet_use_direct_xmit(const struct packet_sock *po)
 268{
 269	return po->xmit == packet_direct_xmit;
 270}
 271
 272static u16 packet_pick_tx_queue(struct sk_buff *skb)
 
 
 
 
 
 273{
 274	struct net_device *dev = skb->dev;
 275	const struct net_device_ops *ops = dev->netdev_ops;
 276	int cpu = raw_smp_processor_id();
 277	u16 queue_index;
 278
 279#ifdef CONFIG_XPS
 280	skb->sender_cpu = cpu + 1;
 281#endif
 282	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
 283	if (ops->ndo_select_queue) {
 284		queue_index = ops->ndo_select_queue(dev, skb, NULL);
 
 285		queue_index = netdev_cap_txqueue(dev, queue_index);
 286	} else {
 287		queue_index = netdev_pick_tx(dev, skb, NULL);
 288	}
 289
 290	return queue_index;
 291}
 292
 293/* __register_prot_hook must be invoked through register_prot_hook
 294 * or from a context in which asynchronous accesses to the packet
 295 * socket is not possible (packet_create()).
 296 */
 297static void __register_prot_hook(struct sock *sk)
 298{
 299	struct packet_sock *po = pkt_sk(sk);
 300
 301	if (!po->running) {
 302		if (po->fanout)
 303			__fanout_link(sk, po);
 304		else
 305			dev_add_pack(&po->prot_hook);
 306
 307		sock_hold(sk);
 308		po->running = 1;
 309	}
 310}
 311
 312static void register_prot_hook(struct sock *sk)
 313{
 314	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
 315	__register_prot_hook(sk);
 316}
 317
 318/* If the sync parameter is true, we will temporarily drop
 319 * the po->bind_lock and do a synchronize_net to make sure no
 320 * asynchronous packet processing paths still refer to the elements
 321 * of po->prot_hook.  If the sync parameter is false, it is the
 322 * callers responsibility to take care of this.
 323 */
 324static void __unregister_prot_hook(struct sock *sk, bool sync)
 325{
 326	struct packet_sock *po = pkt_sk(sk);
 327
 328	lockdep_assert_held_once(&po->bind_lock);
 329
 330	po->running = 0;
 331
 332	if (po->fanout)
 333		__fanout_unlink(sk, po);
 334	else
 335		__dev_remove_pack(&po->prot_hook);
 336
 337	__sock_put(sk);
 338
 339	if (sync) {
 340		spin_unlock(&po->bind_lock);
 341		synchronize_net();
 342		spin_lock(&po->bind_lock);
 343	}
 344}
 345
 346static void unregister_prot_hook(struct sock *sk, bool sync)
 347{
 348	struct packet_sock *po = pkt_sk(sk);
 349
 350	if (po->running)
 351		__unregister_prot_hook(sk, sync);
 352}
 353
 354static inline struct page * __pure pgv_to_page(void *addr)
 355{
 356	if (is_vmalloc_addr(addr))
 357		return vmalloc_to_page(addr);
 358	return virt_to_page(addr);
 359}
 360
 361static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 362{
 363	union tpacket_uhdr h;
 364
 365	h.raw = frame;
 366	switch (po->tp_version) {
 367	case TPACKET_V1:
 368		h.h1->tp_status = status;
 369		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 370		break;
 371	case TPACKET_V2:
 372		h.h2->tp_status = status;
 373		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 374		break;
 375	case TPACKET_V3:
 376		h.h3->tp_status = status;
 377		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 378		break;
 379	default:
 380		WARN(1, "TPACKET version not supported.\n");
 381		BUG();
 382	}
 383
 384	smp_wmb();
 385}
 386
 387static int __packet_get_status(const struct packet_sock *po, void *frame)
 388{
 389	union tpacket_uhdr h;
 390
 391	smp_rmb();
 392
 393	h.raw = frame;
 394	switch (po->tp_version) {
 395	case TPACKET_V1:
 396		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 397		return h.h1->tp_status;
 398	case TPACKET_V2:
 399		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 400		return h.h2->tp_status;
 401	case TPACKET_V3:
 402		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 403		return h.h3->tp_status;
 404	default:
 405		WARN(1, "TPACKET version not supported.\n");
 406		BUG();
 407		return 0;
 408	}
 409}
 410
 411static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
 412				   unsigned int flags)
 413{
 414	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 415
 416	if (shhwtstamps &&
 417	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
 418	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
 419		return TP_STATUS_TS_RAW_HARDWARE;
 420
 421	if (ktime_to_timespec64_cond(skb->tstamp, ts))
 422		return TP_STATUS_TS_SOFTWARE;
 423
 424	return 0;
 425}
 426
 427static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
 428				    struct sk_buff *skb)
 429{
 430	union tpacket_uhdr h;
 431	struct timespec64 ts;
 432	__u32 ts_status;
 433
 434	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
 435		return 0;
 436
 437	h.raw = frame;
 438	/*
 439	 * versions 1 through 3 overflow the timestamps in y2106, since they
 440	 * all store the seconds in a 32-bit unsigned integer.
 441	 * If we create a version 4, that should have a 64-bit timestamp,
 442	 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
 443	 * nanoseconds.
 444	 */
 445	switch (po->tp_version) {
 446	case TPACKET_V1:
 447		h.h1->tp_sec = ts.tv_sec;
 448		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
 449		break;
 450	case TPACKET_V2:
 451		h.h2->tp_sec = ts.tv_sec;
 452		h.h2->tp_nsec = ts.tv_nsec;
 453		break;
 454	case TPACKET_V3:
 455		h.h3->tp_sec = ts.tv_sec;
 456		h.h3->tp_nsec = ts.tv_nsec;
 457		break;
 458	default:
 459		WARN(1, "TPACKET version not supported.\n");
 460		BUG();
 461	}
 462
 463	/* one flush is safe, as both fields always lie on the same cacheline */
 464	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
 465	smp_wmb();
 466
 467	return ts_status;
 468}
 469
 470static void *packet_lookup_frame(const struct packet_sock *po,
 471				 const struct packet_ring_buffer *rb,
 472				 unsigned int position,
 473				 int status)
 474{
 475	unsigned int pg_vec_pos, frame_offset;
 476	union tpacket_uhdr h;
 477
 478	pg_vec_pos = position / rb->frames_per_block;
 479	frame_offset = position % rb->frames_per_block;
 480
 481	h.raw = rb->pg_vec[pg_vec_pos].buffer +
 482		(frame_offset * rb->frame_size);
 483
 484	if (status != __packet_get_status(po, h.raw))
 485		return NULL;
 486
 487	return h.raw;
 488}
 489
 490static void *packet_current_frame(struct packet_sock *po,
 491		struct packet_ring_buffer *rb,
 492		int status)
 493{
 494	return packet_lookup_frame(po, rb, rb->head, status);
 495}
 496
 497static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 498{
 499	del_timer_sync(&pkc->retire_blk_timer);
 500}
 501
 502static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 503		struct sk_buff_head *rb_queue)
 504{
 505	struct tpacket_kbdq_core *pkc;
 506
 507	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 508
 509	spin_lock_bh(&rb_queue->lock);
 510	pkc->delete_blk_timer = 1;
 511	spin_unlock_bh(&rb_queue->lock);
 512
 513	prb_del_retire_blk_timer(pkc);
 514}
 515
 
 
 
 
 
 
 
 
 
 
 516static void prb_setup_retire_blk_timer(struct packet_sock *po)
 517{
 518	struct tpacket_kbdq_core *pkc;
 519
 520	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 521	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
 522		    0);
 523	pkc->retire_blk_timer.expires = jiffies;
 524}
 525
 526static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 527				int blk_size_in_bytes)
 528{
 529	struct net_device *dev;
 530	unsigned int mbits, div;
 531	struct ethtool_link_ksettings ecmd;
 532	int err;
 533
 534	rtnl_lock();
 535	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 536	if (unlikely(!dev)) {
 537		rtnl_unlock();
 538		return DEFAULT_PRB_RETIRE_TOV;
 539	}
 540	err = __ethtool_get_link_ksettings(dev, &ecmd);
 541	rtnl_unlock();
 542	if (err)
 543		return DEFAULT_PRB_RETIRE_TOV;
 
 
 
 
 
 
 
 
 
 
 
 544
 545	/* If the link speed is so slow you don't really
 546	 * need to worry about perf anyways
 547	 */
 548	if (ecmd.base.speed < SPEED_1000 ||
 549	    ecmd.base.speed == SPEED_UNKNOWN)
 550		return DEFAULT_PRB_RETIRE_TOV;
 551
 552	div = ecmd.base.speed / 1000;
 553	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 554
 555	if (div)
 556		mbits /= div;
 557
 
 
 558	if (div)
 559		return mbits + 1;
 560	return mbits;
 561}
 562
 563static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 564			union tpacket_req_u *req_u)
 565{
 566	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 567}
 568
 569static void init_prb_bdqc(struct packet_sock *po,
 570			struct packet_ring_buffer *rb,
 571			struct pgv *pg_vec,
 572			union tpacket_req_u *req_u)
 573{
 574	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
 575	struct tpacket_block_desc *pbd;
 576
 577	memset(p1, 0x0, sizeof(*p1));
 578
 579	p1->knxt_seq_num = 1;
 580	p1->pkbdq = pg_vec;
 581	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 582	p1->pkblk_start	= pg_vec[0].buffer;
 583	p1->kblk_size = req_u->req3.tp_block_size;
 584	p1->knum_blocks	= req_u->req3.tp_block_nr;
 585	p1->hdrlen = po->tp_hdrlen;
 586	p1->version = po->tp_version;
 587	p1->last_kactive_blk_num = 0;
 588	po->stats.stats3.tp_freeze_q_cnt = 0;
 589	if (req_u->req3.tp_retire_blk_tov)
 590		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 591	else
 592		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 593						req_u->req3.tp_block_size);
 594	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 595	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 596	rwlock_init(&p1->blk_fill_in_prog_lock);
 597
 598	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
 599	prb_init_ft_ops(p1, req_u);
 600	prb_setup_retire_blk_timer(po);
 601	prb_open_block(p1, pbd);
 602}
 603
 604/*  Do NOT update the last_blk_num first.
 605 *  Assumes sk_buff_head lock is held.
 606 */
 607static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 608{
 609	mod_timer(&pkc->retire_blk_timer,
 610			jiffies + pkc->tov_in_jiffies);
 611	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 612}
 613
 614/*
 615 * Timer logic:
 616 * 1) We refresh the timer only when we open a block.
 617 *    By doing this we don't waste cycles refreshing the timer
 618 *	  on packet-by-packet basis.
 619 *
 620 * With a 1MB block-size, on a 1Gbps line, it will take
 621 * i) ~8 ms to fill a block + ii) memcpy etc.
 622 * In this cut we are not accounting for the memcpy time.
 623 *
 624 * So, if the user sets the 'tmo' to 10ms then the timer
 625 * will never fire while the block is still getting filled
 626 * (which is what we want). However, the user could choose
 627 * to close a block early and that's fine.
 628 *
 629 * But when the timer does fire, we check whether or not to refresh it.
 630 * Since the tmo granularity is in msecs, it is not too expensive
 631 * to refresh the timer, lets say every '8' msecs.
 632 * Either the user can set the 'tmo' or we can derive it based on
 633 * a) line-speed and b) block-size.
 634 * prb_calc_retire_blk_tmo() calculates the tmo.
 635 *
 636 */
 637static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
 638{
 639	struct packet_sock *po =
 640		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
 641	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 642	unsigned int frozen;
 643	struct tpacket_block_desc *pbd;
 644
 645	spin_lock(&po->sk.sk_receive_queue.lock);
 646
 647	frozen = prb_queue_frozen(pkc);
 648	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 649
 650	if (unlikely(pkc->delete_blk_timer))
 651		goto out;
 652
 653	/* We only need to plug the race when the block is partially filled.
 654	 * tpacket_rcv:
 655	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 656	 *		copy_bits() is in progress ...
 657	 *		timer fires on other cpu:
 658	 *		we can't retire the current block because copy_bits
 659	 *		is in progress.
 660	 *
 661	 */
 662	if (BLOCK_NUM_PKTS(pbd)) {
 663		/* Waiting for skb_copy_bits to finish... */
 664		write_lock(&pkc->blk_fill_in_prog_lock);
 665		write_unlock(&pkc->blk_fill_in_prog_lock);
 
 666	}
 667
 668	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 669		if (!frozen) {
 670			if (!BLOCK_NUM_PKTS(pbd)) {
 671				/* An empty block. Just refresh the timer. */
 672				goto refresh_timer;
 673			}
 674			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 675			if (!prb_dispatch_next_block(pkc, po))
 676				goto refresh_timer;
 677			else
 678				goto out;
 679		} else {
 680			/* Case 1. Queue was frozen because user-space was
 681			 *	   lagging behind.
 682			 */
 683			if (prb_curr_blk_in_use(pbd)) {
 684				/*
 685				 * Ok, user-space is still behind.
 686				 * So just refresh the timer.
 687				 */
 688				goto refresh_timer;
 689			} else {
 690			       /* Case 2. queue was frozen,user-space caught up,
 691				* now the link went idle && the timer fired.
 692				* We don't have a block to close.So we open this
 693				* block and restart the timer.
 694				* opening a block thaws the queue,restarts timer
 695				* Thawing/timer-refresh is a side effect.
 696				*/
 697				prb_open_block(pkc, pbd);
 698				goto out;
 699			}
 700		}
 701	}
 702
 703refresh_timer:
 704	_prb_refresh_rx_retire_blk_timer(pkc);
 705
 706out:
 707	spin_unlock(&po->sk.sk_receive_queue.lock);
 708}
 709
 710static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 711		struct tpacket_block_desc *pbd1, __u32 status)
 712{
 713	/* Flush everything minus the block header */
 714
 715#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 716	u8 *start, *end;
 717
 718	start = (u8 *)pbd1;
 719
 720	/* Skip the block header(we know header WILL fit in 4K) */
 721	start += PAGE_SIZE;
 722
 723	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 724	for (; start < end; start += PAGE_SIZE)
 725		flush_dcache_page(pgv_to_page(start));
 726
 727	smp_wmb();
 728#endif
 729
 730	/* Now update the block status. */
 731
 732	BLOCK_STATUS(pbd1) = status;
 733
 734	/* Flush the block header */
 735
 736#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 737	start = (u8 *)pbd1;
 738	flush_dcache_page(pgv_to_page(start));
 739
 740	smp_wmb();
 741#endif
 742}
 743
 744/*
 745 * Side effect:
 746 *
 747 * 1) flush the block
 748 * 2) Increment active_blk_num
 749 *
 750 * Note:We DONT refresh the timer on purpose.
 751 *	Because almost always the next block will be opened.
 752 */
 753static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 754		struct tpacket_block_desc *pbd1,
 755		struct packet_sock *po, unsigned int stat)
 756{
 757	__u32 status = TP_STATUS_USER | stat;
 758
 759	struct tpacket3_hdr *last_pkt;
 760	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 761	struct sock *sk = &po->sk;
 762
 763	if (atomic_read(&po->tp_drops))
 764		status |= TP_STATUS_LOSING;
 765
 766	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 767	last_pkt->tp_next_offset = 0;
 768
 769	/* Get the ts of the last pkt */
 770	if (BLOCK_NUM_PKTS(pbd1)) {
 771		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 772		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 773	} else {
 774		/* Ok, we tmo'd - so get the current time.
 775		 *
 776		 * It shouldn't really happen as we don't close empty
 777		 * blocks. See prb_retire_rx_blk_timer_expired().
 778		 */
 779		struct timespec64 ts;
 780		ktime_get_real_ts64(&ts);
 781		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 782		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 783	}
 784
 785	smp_wmb();
 786
 787	/* Flush the block */
 788	prb_flush_block(pkc1, pbd1, status);
 789
 790	sk->sk_data_ready(sk);
 791
 792	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 793}
 794
 795static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 796{
 797	pkc->reset_pending_on_curr_blk = 0;
 798}
 799
 800/*
 801 * Side effect of opening a block:
 802 *
 803 * 1) prb_queue is thawed.
 804 * 2) retire_blk_timer is refreshed.
 805 *
 806 */
 807static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 808	struct tpacket_block_desc *pbd1)
 809{
 810	struct timespec64 ts;
 811	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 812
 813	smp_rmb();
 814
 815	/* We could have just memset this but we will lose the
 816	 * flexibility of making the priv area sticky
 817	 */
 818
 819	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 820	BLOCK_NUM_PKTS(pbd1) = 0;
 821	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 822
 823	ktime_get_real_ts64(&ts);
 824
 825	h1->ts_first_pkt.ts_sec = ts.tv_sec;
 826	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 827
 828	pkc1->pkblk_start = (char *)pbd1;
 829	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 830
 831	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 832	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 833
 834	pbd1->version = pkc1->version;
 835	pkc1->prev = pkc1->nxt_offset;
 836	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 837
 838	prb_thaw_queue(pkc1);
 839	_prb_refresh_rx_retire_blk_timer(pkc1);
 840
 841	smp_wmb();
 842}
 843
 844/*
 845 * Queue freeze logic:
 846 * 1) Assume tp_block_nr = 8 blocks.
 847 * 2) At time 't0', user opens Rx ring.
 848 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 849 * 4) user-space is either sleeping or processing block '0'.
 850 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 851 *    it will close block-7,loop around and try to fill block '0'.
 852 *    call-flow:
 853 *    __packet_lookup_frame_in_block
 854 *      prb_retire_current_block()
 855 *      prb_dispatch_next_block()
 856 *        |->(BLOCK_STATUS == USER) evaluates to true
 857 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 858 * 6) Now there are two cases:
 859 *    6.1) Link goes idle right after the queue is frozen.
 860 *         But remember, the last open_block() refreshed the timer.
 861 *         When this timer expires,it will refresh itself so that we can
 862 *         re-open block-0 in near future.
 863 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 864 *         case and __packet_lookup_frame_in_block will check if block-0
 865 *         is free and can now be re-used.
 866 */
 867static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 868				  struct packet_sock *po)
 869{
 870	pkc->reset_pending_on_curr_blk = 1;
 871	po->stats.stats3.tp_freeze_q_cnt++;
 872}
 873
 874#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 875
 876/*
 877 * If the next block is free then we will dispatch it
 878 * and return a good offset.
 879 * Else, we will freeze the queue.
 880 * So, caller must check the return value.
 881 */
 882static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 883		struct packet_sock *po)
 884{
 885	struct tpacket_block_desc *pbd;
 886
 887	smp_rmb();
 888
 889	/* 1. Get current block num */
 890	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 891
 892	/* 2. If this block is currently in_use then freeze the queue */
 893	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 894		prb_freeze_queue(pkc, po);
 895		return NULL;
 896	}
 897
 898	/*
 899	 * 3.
 900	 * open this block and return the offset where the first packet
 901	 * needs to get stored.
 902	 */
 903	prb_open_block(pkc, pbd);
 904	return (void *)pkc->nxt_offset;
 905}
 906
 907static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 908		struct packet_sock *po, unsigned int status)
 909{
 910	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 911
 912	/* retire/close the current block */
 913	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 914		/*
 915		 * Plug the case where copy_bits() is in progress on
 916		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
 917		 * have space to copy the pkt in the current block and
 918		 * called prb_retire_current_block()
 919		 *
 920		 * We don't need to worry about the TMO case because
 921		 * the timer-handler already handled this case.
 922		 */
 923		if (!(status & TP_STATUS_BLK_TMO)) {
 924			/* Waiting for skb_copy_bits to finish... */
 925			write_lock(&pkc->blk_fill_in_prog_lock);
 926			write_unlock(&pkc->blk_fill_in_prog_lock);
 
 927		}
 928		prb_close_block(pkc, pbd, po, status);
 929		return;
 930	}
 931}
 932
 933static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
 
 934{
 935	return TP_STATUS_USER & BLOCK_STATUS(pbd);
 936}
 937
 938static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 939{
 940	return pkc->reset_pending_on_curr_blk;
 941}
 942
 943static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
 944	__releases(&pkc->blk_fill_in_prog_lock)
 945{
 946	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 947
 948	read_unlock(&pkc->blk_fill_in_prog_lock);
 949}
 950
 951static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
 952			struct tpacket3_hdr *ppd)
 953{
 954	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
 955}
 956
 957static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
 958			struct tpacket3_hdr *ppd)
 959{
 960	ppd->hv1.tp_rxhash = 0;
 961}
 962
 963static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
 964			struct tpacket3_hdr *ppd)
 965{
 966	if (skb_vlan_tag_present(pkc->skb)) {
 967		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
 968		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
 969		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
 970	} else {
 971		ppd->hv1.tp_vlan_tci = 0;
 972		ppd->hv1.tp_vlan_tpid = 0;
 973		ppd->tp_status = TP_STATUS_AVAILABLE;
 974	}
 975}
 976
 977static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
 978			struct tpacket3_hdr *ppd)
 979{
 980	ppd->hv1.tp_padding = 0;
 981	prb_fill_vlan_info(pkc, ppd);
 982
 983	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
 984		prb_fill_rxhash(pkc, ppd);
 985	else
 986		prb_clear_rxhash(pkc, ppd);
 987}
 988
 989static void prb_fill_curr_block(char *curr,
 990				struct tpacket_kbdq_core *pkc,
 991				struct tpacket_block_desc *pbd,
 992				unsigned int len)
 993	__acquires(&pkc->blk_fill_in_prog_lock)
 994{
 995	struct tpacket3_hdr *ppd;
 996
 997	ppd  = (struct tpacket3_hdr *)curr;
 998	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
 999	pkc->prev = curr;
1000	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1001	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1002	BLOCK_NUM_PKTS(pbd) += 1;
1003	read_lock(&pkc->blk_fill_in_prog_lock);
1004	prb_run_all_ft_ops(pkc, ppd);
1005}
1006
1007/* Assumes caller has the sk->rx_queue.lock */
1008static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1009					    struct sk_buff *skb,
 
1010					    unsigned int len
1011					    )
1012{
1013	struct tpacket_kbdq_core *pkc;
1014	struct tpacket_block_desc *pbd;
1015	char *curr, *end;
1016
1017	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1018	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1019
1020	/* Queue is frozen when user space is lagging behind */
1021	if (prb_queue_frozen(pkc)) {
1022		/*
1023		 * Check if that last block which caused the queue to freeze,
1024		 * is still in_use by user-space.
1025		 */
1026		if (prb_curr_blk_in_use(pbd)) {
1027			/* Can't record this packet */
1028			return NULL;
1029		} else {
1030			/*
1031			 * Ok, the block was released by user-space.
1032			 * Now let's open that block.
1033			 * opening a block also thaws the queue.
1034			 * Thawing is a side effect.
1035			 */
1036			prb_open_block(pkc, pbd);
1037		}
1038	}
1039
1040	smp_mb();
1041	curr = pkc->nxt_offset;
1042	pkc->skb = skb;
1043	end = (char *)pbd + pkc->kblk_size;
1044
1045	/* first try the current block */
1046	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1047		prb_fill_curr_block(curr, pkc, pbd, len);
1048		return (void *)curr;
1049	}
1050
1051	/* Ok, close the current block */
1052	prb_retire_current_block(pkc, po, 0);
1053
1054	/* Now, try to dispatch the next block */
1055	curr = (char *)prb_dispatch_next_block(pkc, po);
1056	if (curr) {
1057		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1058		prb_fill_curr_block(curr, pkc, pbd, len);
1059		return (void *)curr;
1060	}
1061
1062	/*
1063	 * No free blocks are available.user_space hasn't caught up yet.
1064	 * Queue was just frozen and now this packet will get dropped.
1065	 */
1066	return NULL;
1067}
1068
1069static void *packet_current_rx_frame(struct packet_sock *po,
1070					    struct sk_buff *skb,
1071					    int status, unsigned int len)
1072{
1073	char *curr = NULL;
1074	switch (po->tp_version) {
1075	case TPACKET_V1:
1076	case TPACKET_V2:
1077		curr = packet_lookup_frame(po, &po->rx_ring,
1078					po->rx_ring.head, status);
1079		return curr;
1080	case TPACKET_V3:
1081		return __packet_lookup_frame_in_block(po, skb, len);
1082	default:
1083		WARN(1, "TPACKET version not supported\n");
1084		BUG();
1085		return NULL;
1086	}
1087}
1088
1089static void *prb_lookup_block(const struct packet_sock *po,
1090			      const struct packet_ring_buffer *rb,
1091			      unsigned int idx,
1092			      int status)
1093{
1094	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1095	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1096
1097	if (status != BLOCK_STATUS(pbd))
1098		return NULL;
1099	return pbd;
1100}
1101
1102static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1103{
1104	unsigned int prev;
1105	if (rb->prb_bdqc.kactive_blk_num)
1106		prev = rb->prb_bdqc.kactive_blk_num-1;
1107	else
1108		prev = rb->prb_bdqc.knum_blocks-1;
1109	return prev;
1110}
1111
1112/* Assumes caller has held the rx_queue.lock */
1113static void *__prb_previous_block(struct packet_sock *po,
1114					 struct packet_ring_buffer *rb,
1115					 int status)
1116{
1117	unsigned int previous = prb_previous_blk_num(rb);
1118	return prb_lookup_block(po, rb, previous, status);
1119}
1120
1121static void *packet_previous_rx_frame(struct packet_sock *po,
1122					     struct packet_ring_buffer *rb,
1123					     int status)
1124{
1125	if (po->tp_version <= TPACKET_V2)
1126		return packet_previous_frame(po, rb, status);
1127
1128	return __prb_previous_block(po, rb, status);
1129}
1130
1131static void packet_increment_rx_head(struct packet_sock *po,
1132					    struct packet_ring_buffer *rb)
1133{
1134	switch (po->tp_version) {
1135	case TPACKET_V1:
1136	case TPACKET_V2:
1137		return packet_increment_head(rb);
1138	case TPACKET_V3:
1139	default:
1140		WARN(1, "TPACKET version not supported.\n");
1141		BUG();
1142		return;
1143	}
1144}
1145
1146static void *packet_previous_frame(struct packet_sock *po,
1147		struct packet_ring_buffer *rb,
1148		int status)
1149{
1150	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1151	return packet_lookup_frame(po, rb, previous, status);
1152}
1153
1154static void packet_increment_head(struct packet_ring_buffer *buff)
1155{
1156	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1157}
1158
1159static void packet_inc_pending(struct packet_ring_buffer *rb)
1160{
1161	this_cpu_inc(*rb->pending_refcnt);
1162}
1163
1164static void packet_dec_pending(struct packet_ring_buffer *rb)
1165{
1166	this_cpu_dec(*rb->pending_refcnt);
1167}
1168
1169static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1170{
1171	unsigned int refcnt = 0;
1172	int cpu;
1173
1174	/* We don't use pending refcount in rx_ring. */
1175	if (rb->pending_refcnt == NULL)
1176		return 0;
1177
1178	for_each_possible_cpu(cpu)
1179		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1180
1181	return refcnt;
1182}
1183
1184static int packet_alloc_pending(struct packet_sock *po)
1185{
1186	po->rx_ring.pending_refcnt = NULL;
1187
1188	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1189	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1190		return -ENOBUFS;
1191
1192	return 0;
1193}
1194
1195static void packet_free_pending(struct packet_sock *po)
1196{
1197	free_percpu(po->tx_ring.pending_refcnt);
1198}
1199
1200#define ROOM_POW_OFF	2
1201#define ROOM_NONE	0x0
1202#define ROOM_LOW	0x1
1203#define ROOM_NORMAL	0x2
1204
1205static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1206{
1207	int idx, len;
1208
1209	len = READ_ONCE(po->rx_ring.frame_max) + 1;
1210	idx = READ_ONCE(po->rx_ring.head);
1211	if (pow_off)
1212		idx += len >> pow_off;
1213	if (idx >= len)
1214		idx -= len;
1215	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1216}
1217
1218static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1219{
1220	int idx, len;
1221
1222	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1223	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1224	if (pow_off)
1225		idx += len >> pow_off;
1226	if (idx >= len)
1227		idx -= len;
1228	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1229}
1230
1231static int __packet_rcv_has_room(const struct packet_sock *po,
1232				 const struct sk_buff *skb)
1233{
1234	const struct sock *sk = &po->sk;
1235	int ret = ROOM_NONE;
1236
1237	if (po->prot_hook.func != tpacket_rcv) {
1238		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1239		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1240				   - (skb ? skb->truesize : 0);
1241
1242		if (avail > (rcvbuf >> ROOM_POW_OFF))
1243			return ROOM_NORMAL;
1244		else if (avail > 0)
1245			return ROOM_LOW;
1246		else
1247			return ROOM_NONE;
1248	}
1249
1250	if (po->tp_version == TPACKET_V3) {
1251		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1252			ret = ROOM_NORMAL;
1253		else if (__tpacket_v3_has_room(po, 0))
1254			ret = ROOM_LOW;
1255	} else {
1256		if (__tpacket_has_room(po, ROOM_POW_OFF))
1257			ret = ROOM_NORMAL;
1258		else if (__tpacket_has_room(po, 0))
1259			ret = ROOM_LOW;
1260	}
1261
1262	return ret;
1263}
1264
1265static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1266{
1267	int pressure, ret;
 
1268
 
1269	ret = __packet_rcv_has_room(po, skb);
1270	pressure = ret != ROOM_NORMAL;
1271
1272	if (READ_ONCE(po->pressure) != pressure)
1273		WRITE_ONCE(po->pressure, pressure);
1274
1275	return ret;
1276}
1277
1278static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1279{
1280	if (READ_ONCE(po->pressure) &&
1281	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1282		WRITE_ONCE(po->pressure,  0);
1283}
1284
1285static void packet_sock_destruct(struct sock *sk)
1286{
1287	skb_queue_purge(&sk->sk_error_queue);
1288
1289	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1290	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1291
1292	if (!sock_flag(sk, SOCK_DEAD)) {
1293		pr_err("Attempt to release alive packet socket: %p\n", sk);
1294		return;
1295	}
1296
1297	sk_refcnt_debug_dec(sk);
1298}
1299
1300static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1301{
1302	u32 *history = po->rollover->history;
1303	u32 victim, rxhash;
1304	int i, count = 0;
1305
1306	rxhash = skb_get_hash(skb);
1307	for (i = 0; i < ROLLOVER_HLEN; i++)
1308		if (READ_ONCE(history[i]) == rxhash)
1309			count++;
1310
1311	victim = prandom_u32() % ROLLOVER_HLEN;
1312
1313	/* Avoid dirtying the cache line if possible */
1314	if (READ_ONCE(history[victim]) != rxhash)
1315		WRITE_ONCE(history[victim], rxhash);
1316
1317	return count > (ROLLOVER_HLEN >> 1);
1318}
1319
1320static unsigned int fanout_demux_hash(struct packet_fanout *f,
1321				      struct sk_buff *skb,
1322				      unsigned int num)
1323{
1324	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1325}
1326
1327static unsigned int fanout_demux_lb(struct packet_fanout *f,
1328				    struct sk_buff *skb,
1329				    unsigned int num)
1330{
1331	unsigned int val = atomic_inc_return(&f->rr_cur);
1332
1333	return val % num;
1334}
1335
1336static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1337				     struct sk_buff *skb,
1338				     unsigned int num)
1339{
1340	return smp_processor_id() % num;
1341}
1342
1343static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1344				     struct sk_buff *skb,
1345				     unsigned int num)
1346{
1347	return prandom_u32_max(num);
1348}
1349
1350static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1351					  struct sk_buff *skb,
1352					  unsigned int idx, bool try_self,
1353					  unsigned int num)
1354{
1355	struct packet_sock *po, *po_next, *po_skip = NULL;
1356	unsigned int i, j, room = ROOM_NONE;
1357
1358	po = pkt_sk(f->arr[idx]);
1359
1360	if (try_self) {
1361		room = packet_rcv_has_room(po, skb);
1362		if (room == ROOM_NORMAL ||
1363		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1364			return idx;
1365		po_skip = po;
1366	}
1367
1368	i = j = min_t(int, po->rollover->sock, num - 1);
1369	do {
1370		po_next = pkt_sk(f->arr[i]);
1371		if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1372		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1373			if (i != j)
1374				po->rollover->sock = i;
1375			atomic_long_inc(&po->rollover->num);
1376			if (room == ROOM_LOW)
1377				atomic_long_inc(&po->rollover->num_huge);
1378			return i;
1379		}
1380
1381		if (++i == num)
1382			i = 0;
1383	} while (i != j);
1384
1385	atomic_long_inc(&po->rollover->num_failed);
1386	return idx;
1387}
1388
1389static unsigned int fanout_demux_qm(struct packet_fanout *f,
1390				    struct sk_buff *skb,
1391				    unsigned int num)
1392{
1393	return skb_get_queue_mapping(skb) % num;
1394}
1395
1396static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1397				     struct sk_buff *skb,
1398				     unsigned int num)
1399{
1400	struct bpf_prog *prog;
1401	unsigned int ret = 0;
1402
1403	rcu_read_lock();
1404	prog = rcu_dereference(f->bpf_prog);
1405	if (prog)
1406		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1407	rcu_read_unlock();
1408
1409	return ret;
1410}
1411
1412static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1413{
1414	return f->flags & (flag >> 8);
1415}
1416
1417static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1418			     struct packet_type *pt, struct net_device *orig_dev)
1419{
1420	struct packet_fanout *f = pt->af_packet_priv;
1421	unsigned int num = READ_ONCE(f->num_members);
1422	struct net *net = read_pnet(&f->net);
1423	struct packet_sock *po;
1424	unsigned int idx;
1425
1426	if (!net_eq(dev_net(dev), net) || !num) {
1427		kfree_skb(skb);
1428		return 0;
1429	}
1430
1431	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1432		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1433		if (!skb)
1434			return 0;
1435	}
1436	switch (f->type) {
1437	case PACKET_FANOUT_HASH:
1438	default:
1439		idx = fanout_demux_hash(f, skb, num);
1440		break;
1441	case PACKET_FANOUT_LB:
1442		idx = fanout_demux_lb(f, skb, num);
1443		break;
1444	case PACKET_FANOUT_CPU:
1445		idx = fanout_demux_cpu(f, skb, num);
1446		break;
1447	case PACKET_FANOUT_RND:
1448		idx = fanout_demux_rnd(f, skb, num);
1449		break;
1450	case PACKET_FANOUT_QM:
1451		idx = fanout_demux_qm(f, skb, num);
1452		break;
1453	case PACKET_FANOUT_ROLLOVER:
1454		idx = fanout_demux_rollover(f, skb, 0, false, num);
1455		break;
1456	case PACKET_FANOUT_CBPF:
1457	case PACKET_FANOUT_EBPF:
1458		idx = fanout_demux_bpf(f, skb, num);
1459		break;
1460	}
1461
1462	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1463		idx = fanout_demux_rollover(f, skb, idx, true, num);
1464
1465	po = pkt_sk(f->arr[idx]);
1466	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1467}
1468
1469DEFINE_MUTEX(fanout_mutex);
1470EXPORT_SYMBOL_GPL(fanout_mutex);
1471static LIST_HEAD(fanout_list);
1472static u16 fanout_next_id;
1473
1474static void __fanout_link(struct sock *sk, struct packet_sock *po)
1475{
1476	struct packet_fanout *f = po->fanout;
1477
1478	spin_lock(&f->lock);
1479	f->arr[f->num_members] = sk;
1480	smp_wmb();
1481	f->num_members++;
1482	if (f->num_members == 1)
1483		dev_add_pack(&f->prot_hook);
1484	spin_unlock(&f->lock);
1485}
1486
1487static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1488{
1489	struct packet_fanout *f = po->fanout;
1490	int i;
1491
1492	spin_lock(&f->lock);
1493	for (i = 0; i < f->num_members; i++) {
1494		if (f->arr[i] == sk)
1495			break;
1496	}
1497	BUG_ON(i >= f->num_members);
1498	f->arr[i] = f->arr[f->num_members - 1];
1499	f->num_members--;
1500	if (f->num_members == 0)
1501		__dev_remove_pack(&f->prot_hook);
1502	spin_unlock(&f->lock);
1503}
1504
1505static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1506{
1507	if (sk->sk_family != PF_PACKET)
1508		return false;
1509
1510	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1511}
1512
1513static void fanout_init_data(struct packet_fanout *f)
1514{
1515	switch (f->type) {
1516	case PACKET_FANOUT_LB:
1517		atomic_set(&f->rr_cur, 0);
1518		break;
1519	case PACKET_FANOUT_CBPF:
1520	case PACKET_FANOUT_EBPF:
1521		RCU_INIT_POINTER(f->bpf_prog, NULL);
1522		break;
1523	}
1524}
1525
1526static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1527{
1528	struct bpf_prog *old;
1529
1530	spin_lock(&f->lock);
1531	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1532	rcu_assign_pointer(f->bpf_prog, new);
1533	spin_unlock(&f->lock);
1534
1535	if (old) {
1536		synchronize_net();
1537		bpf_prog_destroy(old);
1538	}
1539}
1540
1541static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1542				unsigned int len)
1543{
1544	struct bpf_prog *new;
1545	struct sock_fprog fprog;
1546	int ret;
1547
1548	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1549		return -EPERM;
1550
1551	ret = copy_bpf_fprog_from_user(&fprog, data, len);
1552	if (ret)
1553		return ret;
1554
1555	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1556	if (ret)
1557		return ret;
1558
1559	__fanout_set_data_bpf(po->fanout, new);
1560	return 0;
1561}
1562
1563static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1564				unsigned int len)
1565{
1566	struct bpf_prog *new;
1567	u32 fd;
1568
1569	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1570		return -EPERM;
1571	if (len != sizeof(fd))
1572		return -EINVAL;
1573	if (copy_from_sockptr(&fd, data, len))
1574		return -EFAULT;
1575
1576	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1577	if (IS_ERR(new))
1578		return PTR_ERR(new);
 
 
 
 
1579
1580	__fanout_set_data_bpf(po->fanout, new);
1581	return 0;
1582}
1583
1584static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1585			   unsigned int len)
1586{
1587	switch (po->fanout->type) {
1588	case PACKET_FANOUT_CBPF:
1589		return fanout_set_data_cbpf(po, data, len);
1590	case PACKET_FANOUT_EBPF:
1591		return fanout_set_data_ebpf(po, data, len);
1592	default:
1593		return -EINVAL;
1594	}
1595}
1596
1597static void fanout_release_data(struct packet_fanout *f)
1598{
1599	switch (f->type) {
1600	case PACKET_FANOUT_CBPF:
1601	case PACKET_FANOUT_EBPF:
1602		__fanout_set_data_bpf(f, NULL);
1603	}
1604}
1605
1606static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1607{
1608	struct packet_fanout *f;
1609
1610	list_for_each_entry(f, &fanout_list, list) {
1611		if (f->id == candidate_id &&
1612		    read_pnet(&f->net) == sock_net(sk)) {
1613			return false;
1614		}
1615	}
1616	return true;
1617}
1618
1619static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1620{
1621	u16 id = fanout_next_id;
1622
1623	do {
1624		if (__fanout_id_is_free(sk, id)) {
1625			*new_id = id;
1626			fanout_next_id = id + 1;
1627			return true;
1628		}
1629
1630		id++;
1631	} while (id != fanout_next_id);
1632
1633	return false;
1634}
1635
1636static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1637{
1638	struct packet_rollover *rollover = NULL;
1639	struct packet_sock *po = pkt_sk(sk);
1640	struct packet_fanout *f, *match;
1641	u8 type = type_flags & 0xff;
1642	u8 flags = type_flags >> 8;
1643	int err;
1644
1645	switch (type) {
1646	case PACKET_FANOUT_ROLLOVER:
1647		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1648			return -EINVAL;
1649	case PACKET_FANOUT_HASH:
1650	case PACKET_FANOUT_LB:
1651	case PACKET_FANOUT_CPU:
1652	case PACKET_FANOUT_RND:
1653	case PACKET_FANOUT_QM:
1654	case PACKET_FANOUT_CBPF:
1655	case PACKET_FANOUT_EBPF:
1656		break;
1657	default:
1658		return -EINVAL;
1659	}
1660
1661	mutex_lock(&fanout_mutex);
 
1662
1663	err = -EALREADY;
1664	if (po->fanout)
1665		goto out;
1666
1667	if (type == PACKET_FANOUT_ROLLOVER ||
1668	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1669		err = -ENOMEM;
1670		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1671		if (!rollover)
1672			goto out;
1673		atomic_long_set(&rollover->num, 0);
1674		atomic_long_set(&rollover->num_huge, 0);
1675		atomic_long_set(&rollover->num_failed, 0);
1676	}
1677
1678	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1679		if (id != 0) {
1680			err = -EINVAL;
1681			goto out;
1682		}
1683		if (!fanout_find_new_id(sk, &id)) {
1684			err = -ENOMEM;
1685			goto out;
1686		}
1687		/* ephemeral flag for the first socket in the group: drop it */
1688		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1689	}
1690
 
1691	match = NULL;
1692	list_for_each_entry(f, &fanout_list, list) {
1693		if (f->id == id &&
1694		    read_pnet(&f->net) == sock_net(sk)) {
1695			match = f;
1696			break;
1697		}
1698	}
1699	err = -EINVAL;
1700	if (match && match->flags != flags)
1701		goto out;
1702	if (!match) {
1703		err = -ENOMEM;
1704		match = kzalloc(sizeof(*match), GFP_KERNEL);
1705		if (!match)
1706			goto out;
1707		write_pnet(&match->net, sock_net(sk));
1708		match->id = id;
1709		match->type = type;
1710		match->flags = flags;
1711		INIT_LIST_HEAD(&match->list);
1712		spin_lock_init(&match->lock);
1713		refcount_set(&match->sk_ref, 0);
1714		fanout_init_data(match);
1715		match->prot_hook.type = po->prot_hook.type;
1716		match->prot_hook.dev = po->prot_hook.dev;
1717		match->prot_hook.func = packet_rcv_fanout;
1718		match->prot_hook.af_packet_priv = match;
1719		match->prot_hook.id_match = match_fanout_group;
 
1720		list_add(&match->list, &fanout_list);
1721	}
1722	err = -EINVAL;
1723
1724	spin_lock(&po->bind_lock);
1725	if (po->running &&
1726	    match->type == type &&
1727	    match->prot_hook.type == po->prot_hook.type &&
1728	    match->prot_hook.dev == po->prot_hook.dev) {
1729		err = -ENOSPC;
1730		if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1731			__dev_remove_pack(&po->prot_hook);
1732			po->fanout = match;
1733			po->rollover = rollover;
1734			rollover = NULL;
1735			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1736			__fanout_link(sk, po);
1737			err = 0;
1738		}
1739	}
1740	spin_unlock(&po->bind_lock);
1741
1742	if (err && !refcount_read(&match->sk_ref)) {
1743		list_del(&match->list);
1744		kfree(match);
1745	}
1746
1747out:
1748	kfree(rollover);
1749	mutex_unlock(&fanout_mutex);
 
 
 
 
1750	return err;
1751}
1752
1753/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1754 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1755 * It is the responsibility of the caller to call fanout_release_data() and
1756 * free the returned packet_fanout (after synchronize_net())
1757 */
1758static struct packet_fanout *fanout_release(struct sock *sk)
1759{
1760	struct packet_sock *po = pkt_sk(sk);
1761	struct packet_fanout *f;
1762
 
 
 
 
1763	mutex_lock(&fanout_mutex);
1764	f = po->fanout;
1765	if (f) {
1766		po->fanout = NULL;
1767
1768		if (refcount_dec_and_test(&f->sk_ref))
1769			list_del(&f->list);
1770		else
1771			f = NULL;
 
1772	}
1773	mutex_unlock(&fanout_mutex);
1774
1775	return f;
 
1776}
1777
1778static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1779					  struct sk_buff *skb)
1780{
1781	/* Earlier code assumed this would be a VLAN pkt, double-check
1782	 * this now that we have the actual packet in hand. We can only
1783	 * do this check on Ethernet devices.
1784	 */
1785	if (unlikely(dev->type != ARPHRD_ETHER))
1786		return false;
1787
1788	skb_reset_mac_header(skb);
1789	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1790}
1791
1792static const struct proto_ops packet_ops;
1793
1794static const struct proto_ops packet_ops_spkt;
1795
1796static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1797			   struct packet_type *pt, struct net_device *orig_dev)
1798{
1799	struct sock *sk;
1800	struct sockaddr_pkt *spkt;
1801
1802	/*
1803	 *	When we registered the protocol we saved the socket in the data
1804	 *	field for just this event.
1805	 */
1806
1807	sk = pt->af_packet_priv;
1808
1809	/*
1810	 *	Yank back the headers [hope the device set this
1811	 *	right or kerboom...]
1812	 *
1813	 *	Incoming packets have ll header pulled,
1814	 *	push it back.
1815	 *
1816	 *	For outgoing ones skb->data == skb_mac_header(skb)
1817	 *	so that this procedure is noop.
1818	 */
1819
1820	if (skb->pkt_type == PACKET_LOOPBACK)
1821		goto out;
1822
1823	if (!net_eq(dev_net(dev), sock_net(sk)))
1824		goto out;
1825
1826	skb = skb_share_check(skb, GFP_ATOMIC);
1827	if (skb == NULL)
1828		goto oom;
1829
1830	/* drop any routing info */
1831	skb_dst_drop(skb);
1832
1833	/* drop conntrack reference */
1834	nf_reset_ct(skb);
1835
1836	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1837
1838	skb_push(skb, skb->data - skb_mac_header(skb));
1839
1840	/*
1841	 *	The SOCK_PACKET socket receives _all_ frames.
1842	 */
1843
1844	spkt->spkt_family = dev->type;
1845	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1846	spkt->spkt_protocol = skb->protocol;
1847
1848	/*
1849	 *	Charge the memory to the socket. This is done specifically
1850	 *	to prevent sockets using all the memory up.
1851	 */
1852
1853	if (sock_queue_rcv_skb(sk, skb) == 0)
1854		return 0;
1855
1856out:
1857	kfree_skb(skb);
1858oom:
1859	return 0;
1860}
1861
1862static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1863{
1864	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1865	    sock->type == SOCK_RAW) {
1866		skb_reset_mac_header(skb);
1867		skb->protocol = dev_parse_header_protocol(skb);
1868	}
1869
1870	skb_probe_transport_header(skb);
1871}
1872
1873/*
1874 *	Output a raw packet to a device layer. This bypasses all the other
1875 *	protocol layers and you must therefore supply it with a complete frame
1876 */
1877
1878static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1879			       size_t len)
1880{
1881	struct sock *sk = sock->sk;
1882	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1883	struct sk_buff *skb = NULL;
1884	struct net_device *dev;
1885	struct sockcm_cookie sockc;
1886	__be16 proto = 0;
1887	int err;
1888	int extra_len = 0;
1889
1890	/*
1891	 *	Get and verify the address.
1892	 */
1893
1894	if (saddr) {
1895		if (msg->msg_namelen < sizeof(struct sockaddr))
1896			return -EINVAL;
1897		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1898			proto = saddr->spkt_protocol;
1899	} else
1900		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1901
1902	/*
1903	 *	Find the device first to size check it
1904	 */
1905
1906	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1907retry:
1908	rcu_read_lock();
1909	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1910	err = -ENODEV;
1911	if (dev == NULL)
1912		goto out_unlock;
1913
1914	err = -ENETDOWN;
1915	if (!(dev->flags & IFF_UP))
1916		goto out_unlock;
1917
1918	/*
1919	 * You may not queue a frame bigger than the mtu. This is the lowest level
1920	 * raw protocol and you must do your own fragmentation at this level.
1921	 */
1922
1923	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1924		if (!netif_supports_nofcs(dev)) {
1925			err = -EPROTONOSUPPORT;
1926			goto out_unlock;
1927		}
1928		extra_len = 4; /* We're doing our own CRC */
1929	}
1930
1931	err = -EMSGSIZE;
1932	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1933		goto out_unlock;
1934
1935	if (!skb) {
1936		size_t reserved = LL_RESERVED_SPACE(dev);
1937		int tlen = dev->needed_tailroom;
1938		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1939
1940		rcu_read_unlock();
1941		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1942		if (skb == NULL)
1943			return -ENOBUFS;
1944		/* FIXME: Save some space for broken drivers that write a hard
1945		 * header at transmission time by themselves. PPP is the notable
1946		 * one here. This should really be fixed at the driver level.
1947		 */
1948		skb_reserve(skb, reserved);
1949		skb_reset_network_header(skb);
1950
1951		/* Try to align data part correctly */
1952		if (hhlen) {
1953			skb->data -= hhlen;
1954			skb->tail -= hhlen;
1955			if (len < hhlen)
1956				skb_reset_network_header(skb);
1957		}
1958		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1959		if (err)
1960			goto out_free;
1961		goto retry;
1962	}
1963
1964	if (!dev_validate_header(dev, skb->data, len)) {
1965		err = -EINVAL;
1966		goto out_unlock;
1967	}
1968	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1969	    !packet_extra_vlan_len_allowed(dev, skb)) {
1970		err = -EMSGSIZE;
1971		goto out_unlock;
1972	}
1973
1974	sockcm_init(&sockc, sk);
1975	if (msg->msg_controllen) {
1976		err = sock_cmsg_send(sk, msg, &sockc);
1977		if (unlikely(err))
1978			goto out_unlock;
1979	}
1980
1981	skb->protocol = proto;
1982	skb->dev = dev;
1983	skb->priority = sk->sk_priority;
1984	skb->mark = sk->sk_mark;
1985	skb->tstamp = sockc.transmit_time;
1986
1987	skb_setup_tx_timestamp(skb, sockc.tsflags);
1988
1989	if (unlikely(extra_len == 4))
1990		skb->no_fcs = 1;
1991
1992	packet_parse_headers(skb, sock);
1993
1994	dev_queue_xmit(skb);
1995	rcu_read_unlock();
1996	return len;
1997
1998out_unlock:
1999	rcu_read_unlock();
2000out_free:
2001	kfree_skb(skb);
2002	return err;
2003}
2004
2005static unsigned int run_filter(struct sk_buff *skb,
2006			       const struct sock *sk,
2007			       unsigned int res)
2008{
2009	struct sk_filter *filter;
2010
2011	rcu_read_lock();
2012	filter = rcu_dereference(sk->sk_filter);
2013	if (filter != NULL)
2014		res = bpf_prog_run_clear_cb(filter->prog, skb);
2015	rcu_read_unlock();
2016
2017	return res;
2018}
2019
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2020static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2021			   size_t *len)
2022{
2023	struct virtio_net_hdr vnet_hdr;
2024
2025	if (*len < sizeof(vnet_hdr))
2026		return -EINVAL;
2027	*len -= sizeof(vnet_hdr);
2028
2029	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2030		return -EINVAL;
2031
2032	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2033}
2034
2035/*
2036 * This function makes lazy skb cloning in hope that most of packets
2037 * are discarded by BPF.
2038 *
2039 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2040 * and skb->cb are mangled. It works because (and until) packets
2041 * falling here are owned by current CPU. Output packets are cloned
2042 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2043 * sequencially, so that if we return skb to original state on exit,
2044 * we will not harm anyone.
2045 */
2046
2047static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2048		      struct packet_type *pt, struct net_device *orig_dev)
2049{
2050	struct sock *sk;
2051	struct sockaddr_ll *sll;
2052	struct packet_sock *po;
2053	u8 *skb_head = skb->data;
2054	int skb_len = skb->len;
2055	unsigned int snaplen, res;
2056	bool is_drop_n_account = false;
2057
2058	if (skb->pkt_type == PACKET_LOOPBACK)
2059		goto drop;
2060
2061	sk = pt->af_packet_priv;
2062	po = pkt_sk(sk);
2063
2064	if (!net_eq(dev_net(dev), sock_net(sk)))
2065		goto drop;
2066
2067	skb->dev = dev;
2068
2069	if (dev->header_ops) {
2070		/* The device has an explicit notion of ll header,
2071		 * exported to higher levels.
2072		 *
2073		 * Otherwise, the device hides details of its frame
2074		 * structure, so that corresponding packet head is
2075		 * never delivered to user.
2076		 */
2077		if (sk->sk_type != SOCK_DGRAM)
2078			skb_push(skb, skb->data - skb_mac_header(skb));
2079		else if (skb->pkt_type == PACKET_OUTGOING) {
2080			/* Special case: outgoing packets have ll header at head */
2081			skb_pull(skb, skb_network_offset(skb));
2082		}
2083	}
2084
2085	snaplen = skb->len;
2086
2087	res = run_filter(skb, sk, snaplen);
2088	if (!res)
2089		goto drop_n_restore;
2090	if (snaplen > res)
2091		snaplen = res;
2092
2093	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2094		goto drop_n_acct;
2095
2096	if (skb_shared(skb)) {
2097		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2098		if (nskb == NULL)
2099			goto drop_n_acct;
2100
2101		if (skb_head != skb->data) {
2102			skb->data = skb_head;
2103			skb->len = skb_len;
2104		}
2105		consume_skb(skb);
2106		skb = nskb;
2107	}
2108
2109	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2110
2111	sll = &PACKET_SKB_CB(skb)->sa.ll;
2112	sll->sll_hatype = dev->type;
2113	sll->sll_pkttype = skb->pkt_type;
2114	if (unlikely(po->origdev))
2115		sll->sll_ifindex = orig_dev->ifindex;
2116	else
2117		sll->sll_ifindex = dev->ifindex;
2118
2119	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2120
2121	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2122	 * Use their space for storing the original skb length.
2123	 */
2124	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2125
2126	if (pskb_trim(skb, snaplen))
2127		goto drop_n_acct;
2128
2129	skb_set_owner_r(skb, sk);
2130	skb->dev = NULL;
2131	skb_dst_drop(skb);
2132
2133	/* drop conntrack reference */
2134	nf_reset_ct(skb);
2135
2136	spin_lock(&sk->sk_receive_queue.lock);
2137	po->stats.stats1.tp_packets++;
2138	sock_skb_set_dropcount(sk, skb);
2139	__skb_queue_tail(&sk->sk_receive_queue, skb);
2140	spin_unlock(&sk->sk_receive_queue.lock);
2141	sk->sk_data_ready(sk);
2142	return 0;
2143
2144drop_n_acct:
2145	is_drop_n_account = true;
2146	atomic_inc(&po->tp_drops);
2147	atomic_inc(&sk->sk_drops);
 
2148
2149drop_n_restore:
2150	if (skb_head != skb->data && skb_shared(skb)) {
2151		skb->data = skb_head;
2152		skb->len = skb_len;
2153	}
2154drop:
2155	if (!is_drop_n_account)
2156		consume_skb(skb);
2157	else
2158		kfree_skb(skb);
2159	return 0;
2160}
2161
2162static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2163		       struct packet_type *pt, struct net_device *orig_dev)
2164{
2165	struct sock *sk;
2166	struct packet_sock *po;
2167	struct sockaddr_ll *sll;
2168	union tpacket_uhdr h;
2169	u8 *skb_head = skb->data;
2170	int skb_len = skb->len;
2171	unsigned int snaplen, res;
2172	unsigned long status = TP_STATUS_USER;
2173	unsigned short macoff, hdrlen;
2174	unsigned int netoff;
2175	struct sk_buff *copy_skb = NULL;
2176	struct timespec64 ts;
2177	__u32 ts_status;
2178	bool is_drop_n_account = false;
2179	unsigned int slot_id = 0;
2180	bool do_vnet = false;
2181
2182	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2183	 * We may add members to them until current aligned size without forcing
2184	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2185	 */
2186	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2187	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2188
2189	if (skb->pkt_type == PACKET_LOOPBACK)
2190		goto drop;
2191
2192	sk = pt->af_packet_priv;
2193	po = pkt_sk(sk);
2194
2195	if (!net_eq(dev_net(dev), sock_net(sk)))
2196		goto drop;
2197
2198	if (dev->header_ops) {
2199		if (sk->sk_type != SOCK_DGRAM)
2200			skb_push(skb, skb->data - skb_mac_header(skb));
2201		else if (skb->pkt_type == PACKET_OUTGOING) {
2202			/* Special case: outgoing packets have ll header at head */
2203			skb_pull(skb, skb_network_offset(skb));
2204		}
2205	}
2206
2207	snaplen = skb->len;
2208
2209	res = run_filter(skb, sk, snaplen);
2210	if (!res)
2211		goto drop_n_restore;
2212
2213	/* If we are flooded, just give up */
2214	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2215		atomic_inc(&po->tp_drops);
2216		goto drop_n_restore;
2217	}
2218
2219	if (skb->ip_summed == CHECKSUM_PARTIAL)
2220		status |= TP_STATUS_CSUMNOTREADY;
2221	else if (skb->pkt_type != PACKET_OUTGOING &&
2222		 (skb->ip_summed == CHECKSUM_COMPLETE ||
2223		  skb_csum_unnecessary(skb)))
2224		status |= TP_STATUS_CSUM_VALID;
2225
2226	if (snaplen > res)
2227		snaplen = res;
2228
2229	if (sk->sk_type == SOCK_DGRAM) {
2230		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2231				  po->tp_reserve;
2232	} else {
2233		unsigned int maclen = skb_network_offset(skb);
2234		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2235				       (maclen < 16 ? 16 : maclen)) +
2236				       po->tp_reserve;
2237		if (po->has_vnet_hdr) {
2238			netoff += sizeof(struct virtio_net_hdr);
2239			do_vnet = true;
2240		}
2241		macoff = netoff - maclen;
2242	}
2243	if (netoff > USHRT_MAX) {
2244		atomic_inc(&po->tp_drops);
2245		goto drop_n_restore;
2246	}
2247	if (po->tp_version <= TPACKET_V2) {
2248		if (macoff + snaplen > po->rx_ring.frame_size) {
2249			if (po->copy_thresh &&
2250			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2251				if (skb_shared(skb)) {
2252					copy_skb = skb_clone(skb, GFP_ATOMIC);
2253				} else {
2254					copy_skb = skb_get(skb);
2255					skb_head = skb->data;
2256				}
2257				if (copy_skb)
2258					skb_set_owner_r(copy_skb, sk);
2259			}
2260			snaplen = po->rx_ring.frame_size - macoff;
2261			if ((int)snaplen < 0) {
2262				snaplen = 0;
2263				do_vnet = false;
2264			}
2265		}
2266	} else if (unlikely(macoff + snaplen >
2267			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2268		u32 nval;
2269
2270		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2271		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2272			    snaplen, nval, macoff);
2273		snaplen = nval;
2274		if (unlikely((int)snaplen < 0)) {
2275			snaplen = 0;
2276			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2277			do_vnet = false;
2278		}
2279	}
2280	spin_lock(&sk->sk_receive_queue.lock);
2281	h.raw = packet_current_rx_frame(po, skb,
2282					TP_STATUS_KERNEL, (macoff+snaplen));
2283	if (!h.raw)
2284		goto drop_n_account;
2285
2286	if (po->tp_version <= TPACKET_V2) {
2287		slot_id = po->rx_ring.head;
2288		if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2289			goto drop_n_account;
2290		__set_bit(slot_id, po->rx_ring.rx_owner_map);
2291	}
2292
2293	if (do_vnet &&
2294	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
2295				    sizeof(struct virtio_net_hdr),
2296				    vio_le(), true, 0)) {
2297		if (po->tp_version == TPACKET_V3)
2298			prb_clear_blk_fill_status(&po->rx_ring);
2299		goto drop_n_account;
2300	}
2301
2302	if (po->tp_version <= TPACKET_V2) {
2303		packet_increment_rx_head(po, &po->rx_ring);
2304	/*
2305	 * LOSING will be reported till you read the stats,
2306	 * because it's COR - Clear On Read.
2307	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2308	 * at packet level.
2309	 */
2310		if (atomic_read(&po->tp_drops))
2311			status |= TP_STATUS_LOSING;
2312	}
2313
2314	po->stats.stats1.tp_packets++;
2315	if (copy_skb) {
2316		status |= TP_STATUS_COPY;
2317		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2318	}
2319	spin_unlock(&sk->sk_receive_queue.lock);
2320
 
 
 
 
 
 
 
 
2321	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2322
2323	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2324		ktime_get_real_ts64(&ts);
2325
2326	status |= ts_status;
2327
2328	switch (po->tp_version) {
2329	case TPACKET_V1:
2330		h.h1->tp_len = skb->len;
2331		h.h1->tp_snaplen = snaplen;
2332		h.h1->tp_mac = macoff;
2333		h.h1->tp_net = netoff;
2334		h.h1->tp_sec = ts.tv_sec;
2335		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2336		hdrlen = sizeof(*h.h1);
2337		break;
2338	case TPACKET_V2:
2339		h.h2->tp_len = skb->len;
2340		h.h2->tp_snaplen = snaplen;
2341		h.h2->tp_mac = macoff;
2342		h.h2->tp_net = netoff;
2343		h.h2->tp_sec = ts.tv_sec;
2344		h.h2->tp_nsec = ts.tv_nsec;
2345		if (skb_vlan_tag_present(skb)) {
2346			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2347			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2348			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2349		} else {
2350			h.h2->tp_vlan_tci = 0;
2351			h.h2->tp_vlan_tpid = 0;
2352		}
2353		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2354		hdrlen = sizeof(*h.h2);
2355		break;
2356	case TPACKET_V3:
2357		/* tp_nxt_offset,vlan are already populated above.
2358		 * So DONT clear those fields here
2359		 */
2360		h.h3->tp_status |= status;
2361		h.h3->tp_len = skb->len;
2362		h.h3->tp_snaplen = snaplen;
2363		h.h3->tp_mac = macoff;
2364		h.h3->tp_net = netoff;
2365		h.h3->tp_sec  = ts.tv_sec;
2366		h.h3->tp_nsec = ts.tv_nsec;
2367		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2368		hdrlen = sizeof(*h.h3);
2369		break;
2370	default:
2371		BUG();
2372	}
2373
2374	sll = h.raw + TPACKET_ALIGN(hdrlen);
2375	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2376	sll->sll_family = AF_PACKET;
2377	sll->sll_hatype = dev->type;
2378	sll->sll_protocol = skb->protocol;
2379	sll->sll_pkttype = skb->pkt_type;
2380	if (unlikely(po->origdev))
2381		sll->sll_ifindex = orig_dev->ifindex;
2382	else
2383		sll->sll_ifindex = dev->ifindex;
2384
2385	smp_mb();
2386
2387#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2388	if (po->tp_version <= TPACKET_V2) {
2389		u8 *start, *end;
2390
2391		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2392					macoff + snaplen);
2393
2394		for (start = h.raw; start < end; start += PAGE_SIZE)
2395			flush_dcache_page(pgv_to_page(start));
2396	}
2397	smp_wmb();
2398#endif
2399
2400	if (po->tp_version <= TPACKET_V2) {
2401		spin_lock(&sk->sk_receive_queue.lock);
2402		__packet_set_status(po, h.raw, status);
2403		__clear_bit(slot_id, po->rx_ring.rx_owner_map);
2404		spin_unlock(&sk->sk_receive_queue.lock);
2405		sk->sk_data_ready(sk);
2406	} else if (po->tp_version == TPACKET_V3) {
2407		prb_clear_blk_fill_status(&po->rx_ring);
2408	}
2409
2410drop_n_restore:
2411	if (skb_head != skb->data && skb_shared(skb)) {
2412		skb->data = skb_head;
2413		skb->len = skb_len;
2414	}
2415drop:
2416	if (!is_drop_n_account)
2417		consume_skb(skb);
2418	else
2419		kfree_skb(skb);
2420	return 0;
2421
2422drop_n_account:
 
2423	spin_unlock(&sk->sk_receive_queue.lock);
2424	atomic_inc(&po->tp_drops);
2425	is_drop_n_account = true;
2426
2427	sk->sk_data_ready(sk);
2428	kfree_skb(copy_skb);
2429	goto drop_n_restore;
2430}
2431
2432static void tpacket_destruct_skb(struct sk_buff *skb)
2433{
2434	struct packet_sock *po = pkt_sk(skb->sk);
2435
2436	if (likely(po->tx_ring.pg_vec)) {
2437		void *ph;
2438		__u32 ts;
2439
2440		ph = skb_zcopy_get_nouarg(skb);
2441		packet_dec_pending(&po->tx_ring);
2442
2443		ts = __packet_set_timestamp(po, ph, skb);
2444		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2445
2446		if (!packet_read_pending(&po->tx_ring))
2447			complete(&po->skb_completion);
2448	}
2449
2450	sock_wfree(skb);
2451}
2452
 
 
 
 
 
 
 
 
 
2453static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2454{
 
 
2455	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2456	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2457	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2458	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2459		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2460			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2461			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2462
2463	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2464		return -EINVAL;
2465
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2466	return 0;
2467}
2468
2469static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2470				 struct virtio_net_hdr *vnet_hdr)
2471{
 
 
2472	if (*len < sizeof(*vnet_hdr))
2473		return -EINVAL;
2474	*len -= sizeof(*vnet_hdr);
2475
2476	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
 
2477		return -EFAULT;
2478
2479	return __packet_snd_vnet_parse(vnet_hdr, *len);
2480}
2481
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2482static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2483		void *frame, struct net_device *dev, void *data, int tp_len,
2484		__be16 proto, unsigned char *addr, int hlen, int copylen,
2485		const struct sockcm_cookie *sockc)
2486{
2487	union tpacket_uhdr ph;
2488	int to_write, offset, len, nr_frags, len_max;
2489	struct socket *sock = po->sk.sk_socket;
2490	struct page *page;
2491	int err;
2492
2493	ph.raw = frame;
2494
2495	skb->protocol = proto;
2496	skb->dev = dev;
2497	skb->priority = po->sk.sk_priority;
2498	skb->mark = po->sk.sk_mark;
2499	skb->tstamp = sockc->transmit_time;
2500	skb_setup_tx_timestamp(skb, sockc->tsflags);
2501	skb_zcopy_set_nouarg(skb, ph.raw);
2502
2503	skb_reserve(skb, hlen);
2504	skb_reset_network_header(skb);
2505
2506	to_write = tp_len;
2507
2508	if (sock->type == SOCK_DGRAM) {
2509		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2510				NULL, tp_len);
2511		if (unlikely(err < 0))
2512			return -EINVAL;
2513	} else if (copylen) {
2514		int hdrlen = min_t(int, copylen, tp_len);
2515
2516		skb_push(skb, dev->hard_header_len);
2517		skb_put(skb, copylen - dev->hard_header_len);
2518		err = skb_store_bits(skb, 0, data, hdrlen);
2519		if (unlikely(err))
2520			return err;
2521		if (!dev_validate_header(dev, skb->data, hdrlen))
2522			return -EINVAL;
 
 
2523
2524		data += hdrlen;
2525		to_write -= hdrlen;
2526	}
2527
2528	offset = offset_in_page(data);
2529	len_max = PAGE_SIZE - offset;
2530	len = ((to_write > len_max) ? len_max : to_write);
2531
2532	skb->data_len = to_write;
2533	skb->len += to_write;
2534	skb->truesize += to_write;
2535	refcount_add(to_write, &po->sk.sk_wmem_alloc);
2536
2537	while (likely(to_write)) {
2538		nr_frags = skb_shinfo(skb)->nr_frags;
2539
2540		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2541			pr_err("Packet exceed the number of skb frags(%lu)\n",
2542			       MAX_SKB_FRAGS);
2543			return -EFAULT;
2544		}
2545
2546		page = pgv_to_page(data);
2547		data += len;
2548		flush_dcache_page(page);
2549		get_page(page);
2550		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2551		to_write -= len;
2552		offset = 0;
2553		len_max = PAGE_SIZE;
2554		len = ((to_write > len_max) ? len_max : to_write);
2555	}
2556
2557	packet_parse_headers(skb, sock);
2558
2559	return tp_len;
2560}
2561
2562static int tpacket_parse_header(struct packet_sock *po, void *frame,
2563				int size_max, void **data)
2564{
2565	union tpacket_uhdr ph;
2566	int tp_len, off;
2567
2568	ph.raw = frame;
2569
2570	switch (po->tp_version) {
2571	case TPACKET_V3:
2572		if (ph.h3->tp_next_offset != 0) {
2573			pr_warn_once("variable sized slot not supported");
2574			return -EINVAL;
2575		}
2576		tp_len = ph.h3->tp_len;
2577		break;
2578	case TPACKET_V2:
2579		tp_len = ph.h2->tp_len;
2580		break;
2581	default:
2582		tp_len = ph.h1->tp_len;
2583		break;
2584	}
2585	if (unlikely(tp_len > size_max)) {
2586		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2587		return -EMSGSIZE;
2588	}
2589
2590	if (unlikely(po->tp_tx_has_off)) {
2591		int off_min, off_max;
2592
2593		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2594		off_max = po->tx_ring.frame_size - tp_len;
2595		if (po->sk.sk_type == SOCK_DGRAM) {
2596			switch (po->tp_version) {
2597			case TPACKET_V3:
2598				off = ph.h3->tp_net;
2599				break;
2600			case TPACKET_V2:
2601				off = ph.h2->tp_net;
2602				break;
2603			default:
2604				off = ph.h1->tp_net;
2605				break;
2606			}
2607		} else {
2608			switch (po->tp_version) {
2609			case TPACKET_V3:
2610				off = ph.h3->tp_mac;
2611				break;
2612			case TPACKET_V2:
2613				off = ph.h2->tp_mac;
2614				break;
2615			default:
2616				off = ph.h1->tp_mac;
2617				break;
2618			}
2619		}
2620		if (unlikely((off < off_min) || (off_max < off)))
2621			return -EINVAL;
2622	} else {
2623		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2624	}
2625
2626	*data = frame + off;
2627	return tp_len;
2628}
2629
2630static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2631{
2632	struct sk_buff *skb = NULL;
2633	struct net_device *dev;
2634	struct virtio_net_hdr *vnet_hdr = NULL;
2635	struct sockcm_cookie sockc;
2636	__be16 proto;
2637	int err, reserve = 0;
2638	void *ph;
2639	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2640	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2641	unsigned char *addr = NULL;
2642	int tp_len, size_max;
 
2643	void *data;
2644	int len_sum = 0;
2645	int status = TP_STATUS_AVAILABLE;
2646	int hlen, tlen, copylen = 0;
2647	long timeo = 0;
2648
2649	mutex_lock(&po->pg_vec_lock);
2650
2651	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2652	 * we need to confirm it under protection of pg_vec_lock.
2653	 */
2654	if (unlikely(!po->tx_ring.pg_vec)) {
2655		err = -EBUSY;
2656		goto out;
2657	}
2658	if (likely(saddr == NULL)) {
2659		dev	= packet_cached_dev_get(po);
2660		proto	= po->num;
 
2661	} else {
2662		err = -EINVAL;
2663		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2664			goto out;
2665		if (msg->msg_namelen < (saddr->sll_halen
2666					+ offsetof(struct sockaddr_ll,
2667						sll_addr)))
2668			goto out;
2669		proto	= saddr->sll_protocol;
 
2670		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2671		if (po->sk.sk_socket->type == SOCK_DGRAM) {
2672			if (dev && msg->msg_namelen < dev->addr_len +
2673				   offsetof(struct sockaddr_ll, sll_addr))
2674				goto out_put;
2675			addr = saddr->sll_addr;
2676		}
2677	}
2678
2679	err = -ENXIO;
2680	if (unlikely(dev == NULL))
2681		goto out;
2682	err = -ENETDOWN;
2683	if (unlikely(!(dev->flags & IFF_UP)))
2684		goto out_put;
2685
2686	sockcm_init(&sockc, &po->sk);
2687	if (msg->msg_controllen) {
2688		err = sock_cmsg_send(&po->sk, msg, &sockc);
2689		if (unlikely(err))
2690			goto out_put;
2691	}
2692
2693	if (po->sk.sk_socket->type == SOCK_RAW)
2694		reserve = dev->hard_header_len;
2695	size_max = po->tx_ring.frame_size
2696		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2697
2698	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2699		size_max = dev->mtu + reserve + VLAN_HLEN;
2700
2701	reinit_completion(&po->skb_completion);
2702
2703	do {
2704		ph = packet_current_frame(po, &po->tx_ring,
2705					  TP_STATUS_SEND_REQUEST);
2706		if (unlikely(ph == NULL)) {
2707			if (need_wait && skb) {
2708				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2709				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2710				if (timeo <= 0) {
2711					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2712					goto out_put;
2713				}
2714			}
2715			/* check for additional frames */
2716			continue;
2717		}
2718
2719		skb = NULL;
2720		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2721		if (tp_len < 0)
2722			goto tpacket_error;
2723
2724		status = TP_STATUS_SEND_REQUEST;
2725		hlen = LL_RESERVED_SPACE(dev);
2726		tlen = dev->needed_tailroom;
2727		if (po->has_vnet_hdr) {
2728			vnet_hdr = data;
2729			data += sizeof(*vnet_hdr);
2730			tp_len -= sizeof(*vnet_hdr);
2731			if (tp_len < 0 ||
2732			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2733				tp_len = -EINVAL;
2734				goto tpacket_error;
2735			}
2736			copylen = __virtio16_to_cpu(vio_le(),
2737						    vnet_hdr->hdr_len);
2738		}
2739		copylen = max_t(int, copylen, dev->hard_header_len);
2740		skb = sock_alloc_send_skb(&po->sk,
2741				hlen + tlen + sizeof(struct sockaddr_ll) +
2742				(copylen - dev->hard_header_len),
2743				!need_wait, &err);
2744
2745		if (unlikely(skb == NULL)) {
2746			/* we assume the socket was initially writeable ... */
2747			if (likely(len_sum > 0))
2748				err = len_sum;
2749			goto out_status;
2750		}
2751		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2752					  addr, hlen, copylen, &sockc);
2753		if (likely(tp_len >= 0) &&
2754		    tp_len > dev->mtu + reserve &&
2755		    !po->has_vnet_hdr &&
2756		    !packet_extra_vlan_len_allowed(dev, skb))
2757			tp_len = -EMSGSIZE;
2758
2759		if (unlikely(tp_len < 0)) {
2760tpacket_error:
2761			if (po->tp_loss) {
2762				__packet_set_status(po, ph,
2763						TP_STATUS_AVAILABLE);
2764				packet_increment_head(&po->tx_ring);
2765				kfree_skb(skb);
2766				continue;
2767			} else {
2768				status = TP_STATUS_WRONG_FORMAT;
2769				err = tp_len;
2770				goto out_status;
2771			}
2772		}
2773
2774		if (po->has_vnet_hdr) {
2775			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2776				tp_len = -EINVAL;
2777				goto tpacket_error;
2778			}
2779			virtio_net_hdr_set_proto(skb, vnet_hdr);
2780		}
2781
 
 
2782		skb->destructor = tpacket_destruct_skb;
2783		__packet_set_status(po, ph, TP_STATUS_SENDING);
2784		packet_inc_pending(&po->tx_ring);
2785
2786		status = TP_STATUS_SEND_REQUEST;
2787		err = po->xmit(skb);
2788		if (unlikely(err > 0)) {
2789			err = net_xmit_errno(err);
2790			if (err && __packet_get_status(po, ph) ==
2791				   TP_STATUS_AVAILABLE) {
2792				/* skb was destructed already */
2793				skb = NULL;
2794				goto out_status;
2795			}
2796			/*
2797			 * skb was dropped but not destructed yet;
2798			 * let's treat it like congestion or err < 0
2799			 */
2800			err = 0;
2801		}
2802		packet_increment_head(&po->tx_ring);
2803		len_sum += tp_len;
2804	} while (likely((ph != NULL) ||
2805		/* Note: packet_read_pending() might be slow if we have
2806		 * to call it as it's per_cpu variable, but in fast-path
2807		 * we already short-circuit the loop with the first
2808		 * condition, and luckily don't have to go that path
2809		 * anyway.
2810		 */
2811		 (need_wait && packet_read_pending(&po->tx_ring))));
2812
2813	err = len_sum;
2814	goto out_put;
2815
2816out_status:
2817	__packet_set_status(po, ph, status);
2818	kfree_skb(skb);
2819out_put:
2820	dev_put(dev);
2821out:
2822	mutex_unlock(&po->pg_vec_lock);
2823	return err;
2824}
2825
2826static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2827				        size_t reserve, size_t len,
2828				        size_t linear, int noblock,
2829				        int *err)
2830{
2831	struct sk_buff *skb;
2832
2833	/* Under a page?  Don't bother with paged skb. */
2834	if (prepad + len < PAGE_SIZE || !linear)
2835		linear = len;
2836
2837	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2838				   err, 0);
2839	if (!skb)
2840		return NULL;
2841
2842	skb_reserve(skb, reserve);
2843	skb_put(skb, linear);
2844	skb->data_len = len - linear;
2845	skb->len += len - linear;
2846
2847	return skb;
2848}
2849
2850static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2851{
2852	struct sock *sk = sock->sk;
2853	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2854	struct sk_buff *skb;
2855	struct net_device *dev;
2856	__be16 proto;
2857	unsigned char *addr = NULL;
2858	int err, reserve = 0;
2859	struct sockcm_cookie sockc;
2860	struct virtio_net_hdr vnet_hdr = { 0 };
2861	int offset = 0;
2862	struct packet_sock *po = pkt_sk(sk);
2863	bool has_vnet_hdr = false;
2864	int hlen, tlen, linear;
2865	int extra_len = 0;
2866
2867	/*
2868	 *	Get and verify the address.
2869	 */
2870
2871	if (likely(saddr == NULL)) {
2872		dev	= packet_cached_dev_get(po);
2873		proto	= po->num;
 
2874	} else {
2875		err = -EINVAL;
2876		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2877			goto out;
2878		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2879			goto out;
2880		proto	= saddr->sll_protocol;
 
2881		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2882		if (sock->type == SOCK_DGRAM) {
2883			if (dev && msg->msg_namelen < dev->addr_len +
2884				   offsetof(struct sockaddr_ll, sll_addr))
2885				goto out_unlock;
2886			addr = saddr->sll_addr;
2887		}
2888	}
2889
2890	err = -ENXIO;
2891	if (unlikely(dev == NULL))
2892		goto out_unlock;
2893	err = -ENETDOWN;
2894	if (unlikely(!(dev->flags & IFF_UP)))
2895		goto out_unlock;
2896
2897	sockcm_init(&sockc, sk);
2898	sockc.mark = sk->sk_mark;
2899	if (msg->msg_controllen) {
2900		err = sock_cmsg_send(sk, msg, &sockc);
2901		if (unlikely(err))
2902			goto out_unlock;
2903	}
2904
2905	if (sock->type == SOCK_RAW)
2906		reserve = dev->hard_header_len;
2907	if (po->has_vnet_hdr) {
2908		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2909		if (err)
2910			goto out_unlock;
2911		has_vnet_hdr = true;
2912	}
2913
2914	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2915		if (!netif_supports_nofcs(dev)) {
2916			err = -EPROTONOSUPPORT;
2917			goto out_unlock;
2918		}
2919		extra_len = 4; /* We're doing our own CRC */
2920	}
2921
2922	err = -EMSGSIZE;
2923	if (!vnet_hdr.gso_type &&
2924	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2925		goto out_unlock;
2926
2927	err = -ENOBUFS;
2928	hlen = LL_RESERVED_SPACE(dev);
2929	tlen = dev->needed_tailroom;
2930	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2931	linear = max(linear, min_t(int, len, dev->hard_header_len));
2932	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2933			       msg->msg_flags & MSG_DONTWAIT, &err);
2934	if (skb == NULL)
2935		goto out_unlock;
2936
2937	skb_reset_network_header(skb);
2938
2939	err = -EINVAL;
2940	if (sock->type == SOCK_DGRAM) {
2941		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2942		if (unlikely(offset < 0))
2943			goto out_free;
2944	} else if (reserve) {
2945		skb_reserve(skb, -reserve);
2946		if (len < reserve + sizeof(struct ipv6hdr) &&
2947		    dev->min_header_len != dev->hard_header_len)
2948			skb_reset_network_header(skb);
2949	}
2950
2951	/* Returns -EFAULT on error */
2952	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2953	if (err)
2954		goto out_free;
2955
2956	if (sock->type == SOCK_RAW &&
2957	    !dev_validate_header(dev, skb->data, len)) {
2958		err = -EINVAL;
2959		goto out_free;
2960	}
2961
2962	skb_setup_tx_timestamp(skb, sockc.tsflags);
2963
2964	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2965	    !packet_extra_vlan_len_allowed(dev, skb)) {
2966		err = -EMSGSIZE;
2967		goto out_free;
2968	}
2969
2970	skb->protocol = proto;
2971	skb->dev = dev;
2972	skb->priority = sk->sk_priority;
2973	skb->mark = sockc.mark;
2974	skb->tstamp = sockc.transmit_time;
2975
2976	if (has_vnet_hdr) {
2977		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
 
 
2978		if (err)
2979			goto out_free;
2980		len += sizeof(vnet_hdr);
2981		virtio_net_hdr_set_proto(skb, &vnet_hdr);
2982	}
2983
2984	packet_parse_headers(skb, sock);
2985
2986	if (unlikely(extra_len == 4))
2987		skb->no_fcs = 1;
2988
2989	err = po->xmit(skb);
2990	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2991		goto out_unlock;
2992
2993	dev_put(dev);
2994
2995	return len;
2996
2997out_free:
2998	kfree_skb(skb);
2999out_unlock:
3000	if (dev)
3001		dev_put(dev);
3002out:
3003	return err;
3004}
3005
3006static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3007{
3008	struct sock *sk = sock->sk;
3009	struct packet_sock *po = pkt_sk(sk);
3010
3011	if (po->tx_ring.pg_vec)
3012		return tpacket_snd(po, msg);
3013	else
3014		return packet_snd(sock, msg, len);
3015}
3016
3017/*
3018 *	Close a PACKET socket. This is fairly simple. We immediately go
3019 *	to 'closed' state and remove our protocol entry in the device list.
3020 */
3021
3022static int packet_release(struct socket *sock)
3023{
3024	struct sock *sk = sock->sk;
3025	struct packet_sock *po;
3026	struct packet_fanout *f;
3027	struct net *net;
3028	union tpacket_req_u req_u;
3029
3030	if (!sk)
3031		return 0;
3032
3033	net = sock_net(sk);
3034	po = pkt_sk(sk);
3035
3036	mutex_lock(&net->packet.sklist_lock);
3037	sk_del_node_init_rcu(sk);
3038	mutex_unlock(&net->packet.sklist_lock);
3039
3040	preempt_disable();
3041	sock_prot_inuse_add(net, sk->sk_prot, -1);
3042	preempt_enable();
3043
3044	spin_lock(&po->bind_lock);
3045	unregister_prot_hook(sk, false);
3046	packet_cached_dev_reset(po);
3047
3048	if (po->prot_hook.dev) {
3049		dev_put(po->prot_hook.dev);
3050		po->prot_hook.dev = NULL;
3051	}
3052	spin_unlock(&po->bind_lock);
3053
3054	packet_flush_mclist(sk);
3055
3056	lock_sock(sk);
3057	if (po->rx_ring.pg_vec) {
3058		memset(&req_u, 0, sizeof(req_u));
3059		packet_set_ring(sk, &req_u, 1, 0);
3060	}
3061
3062	if (po->tx_ring.pg_vec) {
3063		memset(&req_u, 0, sizeof(req_u));
3064		packet_set_ring(sk, &req_u, 1, 1);
3065	}
3066	release_sock(sk);
3067
3068	f = fanout_release(sk);
3069
3070	synchronize_net();
3071
3072	kfree(po->rollover);
3073	if (f) {
3074		fanout_release_data(f);
3075		kfree(f);
3076	}
3077	/*
3078	 *	Now the socket is dead. No more input will appear.
3079	 */
3080	sock_orphan(sk);
3081	sock->sk = NULL;
3082
3083	/* Purge queues */
3084
3085	skb_queue_purge(&sk->sk_receive_queue);
3086	packet_free_pending(po);
3087	sk_refcnt_debug_release(sk);
3088
3089	sock_put(sk);
3090	return 0;
3091}
3092
3093/*
3094 *	Attach a packet hook.
3095 */
3096
3097static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3098			  __be16 proto)
3099{
3100	struct packet_sock *po = pkt_sk(sk);
3101	struct net_device *dev_curr;
3102	__be16 proto_curr;
3103	bool need_rehook;
3104	struct net_device *dev = NULL;
3105	int ret = 0;
3106	bool unlisted = false;
3107
 
 
 
3108	lock_sock(sk);
3109	spin_lock(&po->bind_lock);
3110	rcu_read_lock();
3111
3112	if (po->fanout) {
3113		ret = -EINVAL;
3114		goto out_unlock;
3115	}
3116
3117	if (name) {
3118		dev = dev_get_by_name_rcu(sock_net(sk), name);
3119		if (!dev) {
3120			ret = -ENODEV;
3121			goto out_unlock;
3122		}
3123	} else if (ifindex) {
3124		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3125		if (!dev) {
3126			ret = -ENODEV;
3127			goto out_unlock;
3128		}
3129	}
3130
3131	if (dev)
3132		dev_hold(dev);
3133
3134	proto_curr = po->prot_hook.type;
3135	dev_curr = po->prot_hook.dev;
3136
3137	need_rehook = proto_curr != proto || dev_curr != dev;
3138
3139	if (need_rehook) {
3140		if (po->running) {
3141			rcu_read_unlock();
3142			/* prevents packet_notifier() from calling
3143			 * register_prot_hook()
3144			 */
3145			po->num = 0;
3146			__unregister_prot_hook(sk, true);
3147			rcu_read_lock();
3148			dev_curr = po->prot_hook.dev;
3149			if (dev)
3150				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3151								 dev->ifindex);
3152		}
3153
3154		BUG_ON(po->running);
3155		po->num = proto;
3156		po->prot_hook.type = proto;
3157
3158		if (unlikely(unlisted)) {
3159			dev_put(dev);
3160			po->prot_hook.dev = NULL;
3161			po->ifindex = -1;
3162			packet_cached_dev_reset(po);
3163		} else {
3164			po->prot_hook.dev = dev;
3165			po->ifindex = dev ? dev->ifindex : 0;
3166			packet_cached_dev_assign(po, dev);
3167		}
3168	}
3169	if (dev_curr)
3170		dev_put(dev_curr);
3171
3172	if (proto == 0 || !need_rehook)
3173		goto out_unlock;
3174
3175	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3176		register_prot_hook(sk);
3177	} else {
3178		sk->sk_err = ENETDOWN;
3179		if (!sock_flag(sk, SOCK_DEAD))
3180			sk->sk_error_report(sk);
3181	}
3182
3183out_unlock:
3184	rcu_read_unlock();
3185	spin_unlock(&po->bind_lock);
3186	release_sock(sk);
3187	return ret;
3188}
3189
3190/*
3191 *	Bind a packet socket to a device
3192 */
3193
3194static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3195			    int addr_len)
3196{
3197	struct sock *sk = sock->sk;
3198	char name[sizeof(uaddr->sa_data) + 1];
3199
3200	/*
3201	 *	Check legality
3202	 */
3203
3204	if (addr_len != sizeof(struct sockaddr))
3205		return -EINVAL;
3206	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3207	 * zero-terminated.
3208	 */
3209	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3210	name[sizeof(uaddr->sa_data)] = 0;
3211
3212	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3213}
3214
3215static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3216{
3217	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3218	struct sock *sk = sock->sk;
3219
3220	/*
3221	 *	Check legality
3222	 */
3223
3224	if (addr_len < sizeof(struct sockaddr_ll))
3225		return -EINVAL;
3226	if (sll->sll_family != AF_PACKET)
3227		return -EINVAL;
3228
3229	return packet_do_bind(sk, NULL, sll->sll_ifindex,
3230			      sll->sll_protocol ? : pkt_sk(sk)->num);
3231}
3232
3233static struct proto packet_proto = {
3234	.name	  = "PACKET",
3235	.owner	  = THIS_MODULE,
3236	.obj_size = sizeof(struct packet_sock),
3237};
3238
3239/*
3240 *	Create a packet of type SOCK_PACKET.
3241 */
3242
3243static int packet_create(struct net *net, struct socket *sock, int protocol,
3244			 int kern)
3245{
3246	struct sock *sk;
3247	struct packet_sock *po;
3248	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3249	int err;
3250
3251	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3252		return -EPERM;
3253	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3254	    sock->type != SOCK_PACKET)
3255		return -ESOCKTNOSUPPORT;
3256
3257	sock->state = SS_UNCONNECTED;
3258
3259	err = -ENOBUFS;
3260	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3261	if (sk == NULL)
3262		goto out;
3263
3264	sock->ops = &packet_ops;
3265	if (sock->type == SOCK_PACKET)
3266		sock->ops = &packet_ops_spkt;
3267
3268	sock_init_data(sock, sk);
3269
3270	po = pkt_sk(sk);
3271	init_completion(&po->skb_completion);
3272	sk->sk_family = PF_PACKET;
3273	po->num = proto;
3274	po->xmit = dev_queue_xmit;
3275
3276	err = packet_alloc_pending(po);
3277	if (err)
3278		goto out2;
3279
3280	packet_cached_dev_reset(po);
3281
3282	sk->sk_destruct = packet_sock_destruct;
3283	sk_refcnt_debug_inc(sk);
3284
3285	/*
3286	 *	Attach a protocol block
3287	 */
3288
3289	spin_lock_init(&po->bind_lock);
3290	mutex_init(&po->pg_vec_lock);
3291	po->rollover = NULL;
3292	po->prot_hook.func = packet_rcv;
3293
3294	if (sock->type == SOCK_PACKET)
3295		po->prot_hook.func = packet_rcv_spkt;
3296
3297	po->prot_hook.af_packet_priv = sk;
3298
3299	if (proto) {
3300		po->prot_hook.type = proto;
3301		__register_prot_hook(sk);
3302	}
3303
3304	mutex_lock(&net->packet.sklist_lock);
3305	sk_add_node_tail_rcu(sk, &net->packet.sklist);
3306	mutex_unlock(&net->packet.sklist_lock);
3307
3308	preempt_disable();
3309	sock_prot_inuse_add(net, &packet_proto, 1);
3310	preempt_enable();
3311
3312	return 0;
3313out2:
3314	sk_free(sk);
3315out:
3316	return err;
3317}
3318
3319/*
3320 *	Pull a packet from our receive queue and hand it to the user.
3321 *	If necessary we block.
3322 */
3323
3324static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3325			  int flags)
3326{
3327	struct sock *sk = sock->sk;
3328	struct sk_buff *skb;
3329	int copied, err;
3330	int vnet_hdr_len = 0;
3331	unsigned int origlen = 0;
3332
3333	err = -EINVAL;
3334	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3335		goto out;
3336
3337#if 0
3338	/* What error should we return now? EUNATTACH? */
3339	if (pkt_sk(sk)->ifindex < 0)
3340		return -ENODEV;
3341#endif
3342
3343	if (flags & MSG_ERRQUEUE) {
3344		err = sock_recv_errqueue(sk, msg, len,
3345					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3346		goto out;
3347	}
3348
3349	/*
3350	 *	Call the generic datagram receiver. This handles all sorts
3351	 *	of horrible races and re-entrancy so we can forget about it
3352	 *	in the protocol layers.
3353	 *
3354	 *	Now it will return ENETDOWN, if device have just gone down,
3355	 *	but then it will block.
3356	 */
3357
3358	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3359
3360	/*
3361	 *	An error occurred so return it. Because skb_recv_datagram()
3362	 *	handles the blocking we don't see and worry about blocking
3363	 *	retries.
3364	 */
3365
3366	if (skb == NULL)
3367		goto out;
3368
3369	packet_rcv_try_clear_pressure(pkt_sk(sk));
 
3370
3371	if (pkt_sk(sk)->has_vnet_hdr) {
3372		err = packet_rcv_vnet(msg, skb, &len);
3373		if (err)
3374			goto out_free;
3375		vnet_hdr_len = sizeof(struct virtio_net_hdr);
3376	}
3377
3378	/* You lose any data beyond the buffer you gave. If it worries
3379	 * a user program they can ask the device for its MTU
3380	 * anyway.
3381	 */
3382	copied = skb->len;
3383	if (copied > len) {
3384		copied = len;
3385		msg->msg_flags |= MSG_TRUNC;
3386	}
3387
3388	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3389	if (err)
3390		goto out_free;
3391
3392	if (sock->type != SOCK_PACKET) {
3393		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3394
3395		/* Original length was stored in sockaddr_ll fields */
3396		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3397		sll->sll_family = AF_PACKET;
3398		sll->sll_protocol = skb->protocol;
3399	}
3400
3401	sock_recv_ts_and_drops(msg, sk, skb);
3402
3403	if (msg->msg_name) {
3404		int copy_len;
3405
3406		/* If the address length field is there to be filled
3407		 * in, we fill it in now.
3408		 */
3409		if (sock->type == SOCK_PACKET) {
3410			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3411			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3412			copy_len = msg->msg_namelen;
3413		} else {
3414			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3415
3416			msg->msg_namelen = sll->sll_halen +
3417				offsetof(struct sockaddr_ll, sll_addr);
3418			copy_len = msg->msg_namelen;
3419			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3420				memset(msg->msg_name +
3421				       offsetof(struct sockaddr_ll, sll_addr),
3422				       0, sizeof(sll->sll_addr));
3423				msg->msg_namelen = sizeof(struct sockaddr_ll);
3424			}
3425		}
3426		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
 
3427	}
3428
3429	if (pkt_sk(sk)->auxdata) {
3430		struct tpacket_auxdata aux;
3431
3432		aux.tp_status = TP_STATUS_USER;
3433		if (skb->ip_summed == CHECKSUM_PARTIAL)
3434			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3435		else if (skb->pkt_type != PACKET_OUTGOING &&
3436			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3437			  skb_csum_unnecessary(skb)))
3438			aux.tp_status |= TP_STATUS_CSUM_VALID;
3439
3440		aux.tp_len = origlen;
3441		aux.tp_snaplen = skb->len;
3442		aux.tp_mac = 0;
3443		aux.tp_net = skb_network_offset(skb);
3444		if (skb_vlan_tag_present(skb)) {
3445			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3446			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3447			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3448		} else {
3449			aux.tp_vlan_tci = 0;
3450			aux.tp_vlan_tpid = 0;
3451		}
3452		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3453	}
3454
3455	/*
3456	 *	Free or return the buffer as appropriate. Again this
3457	 *	hides all the races and re-entrancy issues from us.
3458	 */
3459	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3460
3461out_free:
3462	skb_free_datagram(sk, skb);
3463out:
3464	return err;
3465}
3466
3467static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3468			       int peer)
3469{
3470	struct net_device *dev;
3471	struct sock *sk	= sock->sk;
3472
3473	if (peer)
3474		return -EOPNOTSUPP;
3475
3476	uaddr->sa_family = AF_PACKET;
3477	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3478	rcu_read_lock();
3479	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3480	if (dev)
3481		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3482	rcu_read_unlock();
 
3483
3484	return sizeof(*uaddr);
3485}
3486
3487static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3488			  int peer)
3489{
3490	struct net_device *dev;
3491	struct sock *sk = sock->sk;
3492	struct packet_sock *po = pkt_sk(sk);
3493	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3494
3495	if (peer)
3496		return -EOPNOTSUPP;
3497
3498	sll->sll_family = AF_PACKET;
3499	sll->sll_ifindex = po->ifindex;
3500	sll->sll_protocol = po->num;
3501	sll->sll_pkttype = 0;
3502	rcu_read_lock();
3503	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3504	if (dev) {
3505		sll->sll_hatype = dev->type;
3506		sll->sll_halen = dev->addr_len;
3507		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3508	} else {
3509		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3510		sll->sll_halen = 0;
3511	}
3512	rcu_read_unlock();
 
3513
3514	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3515}
3516
3517static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3518			 int what)
3519{
3520	switch (i->type) {
3521	case PACKET_MR_MULTICAST:
3522		if (i->alen != dev->addr_len)
3523			return -EINVAL;
3524		if (what > 0)
3525			return dev_mc_add(dev, i->addr);
3526		else
3527			return dev_mc_del(dev, i->addr);
3528		break;
3529	case PACKET_MR_PROMISC:
3530		return dev_set_promiscuity(dev, what);
3531	case PACKET_MR_ALLMULTI:
3532		return dev_set_allmulti(dev, what);
3533	case PACKET_MR_UNICAST:
3534		if (i->alen != dev->addr_len)
3535			return -EINVAL;
3536		if (what > 0)
3537			return dev_uc_add(dev, i->addr);
3538		else
3539			return dev_uc_del(dev, i->addr);
3540		break;
3541	default:
3542		break;
3543	}
3544	return 0;
3545}
3546
3547static void packet_dev_mclist_delete(struct net_device *dev,
3548				     struct packet_mclist **mlp)
3549{
3550	struct packet_mclist *ml;
3551
3552	while ((ml = *mlp) != NULL) {
3553		if (ml->ifindex == dev->ifindex) {
3554			packet_dev_mc(dev, ml, -1);
3555			*mlp = ml->next;
3556			kfree(ml);
3557		} else
3558			mlp = &ml->next;
3559	}
3560}
3561
3562static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3563{
3564	struct packet_sock *po = pkt_sk(sk);
3565	struct packet_mclist *ml, *i;
3566	struct net_device *dev;
3567	int err;
3568
3569	rtnl_lock();
3570
3571	err = -ENODEV;
3572	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3573	if (!dev)
3574		goto done;
3575
3576	err = -EINVAL;
3577	if (mreq->mr_alen > dev->addr_len)
3578		goto done;
3579
3580	err = -ENOBUFS;
3581	i = kmalloc(sizeof(*i), GFP_KERNEL);
3582	if (i == NULL)
3583		goto done;
3584
3585	err = 0;
3586	for (ml = po->mclist; ml; ml = ml->next) {
3587		if (ml->ifindex == mreq->mr_ifindex &&
3588		    ml->type == mreq->mr_type &&
3589		    ml->alen == mreq->mr_alen &&
3590		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3591			ml->count++;
3592			/* Free the new element ... */
3593			kfree(i);
3594			goto done;
3595		}
3596	}
3597
3598	i->type = mreq->mr_type;
3599	i->ifindex = mreq->mr_ifindex;
3600	i->alen = mreq->mr_alen;
3601	memcpy(i->addr, mreq->mr_address, i->alen);
3602	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3603	i->count = 1;
3604	i->next = po->mclist;
3605	po->mclist = i;
3606	err = packet_dev_mc(dev, i, 1);
3607	if (err) {
3608		po->mclist = i->next;
3609		kfree(i);
3610	}
3611
3612done:
3613	rtnl_unlock();
3614	return err;
3615}
3616
3617static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3618{
3619	struct packet_mclist *ml, **mlp;
3620
3621	rtnl_lock();
3622
3623	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3624		if (ml->ifindex == mreq->mr_ifindex &&
3625		    ml->type == mreq->mr_type &&
3626		    ml->alen == mreq->mr_alen &&
3627		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3628			if (--ml->count == 0) {
3629				struct net_device *dev;
3630				*mlp = ml->next;
3631				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3632				if (dev)
3633					packet_dev_mc(dev, ml, -1);
3634				kfree(ml);
3635			}
3636			break;
3637		}
3638	}
3639	rtnl_unlock();
3640	return 0;
3641}
3642
3643static void packet_flush_mclist(struct sock *sk)
3644{
3645	struct packet_sock *po = pkt_sk(sk);
3646	struct packet_mclist *ml;
3647
3648	if (!po->mclist)
3649		return;
3650
3651	rtnl_lock();
3652	while ((ml = po->mclist) != NULL) {
3653		struct net_device *dev;
3654
3655		po->mclist = ml->next;
3656		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3657		if (dev != NULL)
3658			packet_dev_mc(dev, ml, -1);
3659		kfree(ml);
3660	}
3661	rtnl_unlock();
3662}
3663
3664static int
3665packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3666		  unsigned int optlen)
3667{
3668	struct sock *sk = sock->sk;
3669	struct packet_sock *po = pkt_sk(sk);
3670	int ret;
3671
3672	if (level != SOL_PACKET)
3673		return -ENOPROTOOPT;
3674
3675	switch (optname) {
3676	case PACKET_ADD_MEMBERSHIP:
3677	case PACKET_DROP_MEMBERSHIP:
3678	{
3679		struct packet_mreq_max mreq;
3680		int len = optlen;
3681		memset(&mreq, 0, sizeof(mreq));
3682		if (len < sizeof(struct packet_mreq))
3683			return -EINVAL;
3684		if (len > sizeof(mreq))
3685			len = sizeof(mreq);
3686		if (copy_from_sockptr(&mreq, optval, len))
3687			return -EFAULT;
3688		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3689			return -EINVAL;
3690		if (optname == PACKET_ADD_MEMBERSHIP)
3691			ret = packet_mc_add(sk, &mreq);
3692		else
3693			ret = packet_mc_drop(sk, &mreq);
3694		return ret;
3695	}
3696
3697	case PACKET_RX_RING:
3698	case PACKET_TX_RING:
3699	{
3700		union tpacket_req_u req_u;
3701		int len;
3702
3703		lock_sock(sk);
3704		switch (po->tp_version) {
3705		case TPACKET_V1:
3706		case TPACKET_V2:
3707			len = sizeof(req_u.req);
3708			break;
3709		case TPACKET_V3:
3710		default:
3711			len = sizeof(req_u.req3);
3712			break;
3713		}
3714		if (optlen < len) {
3715			ret = -EINVAL;
3716		} else {
3717			if (copy_from_sockptr(&req_u.req, optval, len))
3718				ret = -EFAULT;
3719			else
3720				ret = packet_set_ring(sk, &req_u, 0,
3721						    optname == PACKET_TX_RING);
3722		}
3723		release_sock(sk);
3724		return ret;
3725	}
3726	case PACKET_COPY_THRESH:
3727	{
3728		int val;
3729
3730		if (optlen != sizeof(val))
3731			return -EINVAL;
3732		if (copy_from_sockptr(&val, optval, sizeof(val)))
3733			return -EFAULT;
3734
3735		pkt_sk(sk)->copy_thresh = val;
3736		return 0;
3737	}
3738	case PACKET_VERSION:
3739	{
3740		int val;
3741
3742		if (optlen != sizeof(val))
3743			return -EINVAL;
3744		if (copy_from_sockptr(&val, optval, sizeof(val)))
 
 
3745			return -EFAULT;
3746		switch (val) {
3747		case TPACKET_V1:
3748		case TPACKET_V2:
3749		case TPACKET_V3:
3750			break;
 
3751		default:
3752			return -EINVAL;
3753		}
3754		lock_sock(sk);
3755		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3756			ret = -EBUSY;
3757		} else {
3758			po->tp_version = val;
3759			ret = 0;
3760		}
3761		release_sock(sk);
3762		return ret;
3763	}
3764	case PACKET_RESERVE:
3765	{
3766		unsigned int val;
3767
3768		if (optlen != sizeof(val))
3769			return -EINVAL;
3770		if (copy_from_sockptr(&val, optval, sizeof(val)))
 
 
3771			return -EFAULT;
3772		if (val > INT_MAX)
3773			return -EINVAL;
3774		lock_sock(sk);
3775		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3776			ret = -EBUSY;
3777		} else {
3778			po->tp_reserve = val;
3779			ret = 0;
3780		}
3781		release_sock(sk);
3782		return ret;
3783	}
3784	case PACKET_LOSS:
3785	{
3786		unsigned int val;
3787
3788		if (optlen != sizeof(val))
3789			return -EINVAL;
3790		if (copy_from_sockptr(&val, optval, sizeof(val)))
 
 
3791			return -EFAULT;
3792
3793		lock_sock(sk);
3794		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3795			ret = -EBUSY;
3796		} else {
3797			po->tp_loss = !!val;
3798			ret = 0;
3799		}
3800		release_sock(sk);
3801		return ret;
3802	}
3803	case PACKET_AUXDATA:
3804	{
3805		int val;
3806
3807		if (optlen < sizeof(val))
3808			return -EINVAL;
3809		if (copy_from_sockptr(&val, optval, sizeof(val)))
3810			return -EFAULT;
3811
3812		lock_sock(sk);
3813		po->auxdata = !!val;
3814		release_sock(sk);
3815		return 0;
3816	}
3817	case PACKET_ORIGDEV:
3818	{
3819		int val;
3820
3821		if (optlen < sizeof(val))
3822			return -EINVAL;
3823		if (copy_from_sockptr(&val, optval, sizeof(val)))
3824			return -EFAULT;
3825
3826		lock_sock(sk);
3827		po->origdev = !!val;
3828		release_sock(sk);
3829		return 0;
3830	}
3831	case PACKET_VNET_HDR:
3832	{
3833		int val;
3834
3835		if (sock->type != SOCK_RAW)
3836			return -EINVAL;
 
 
3837		if (optlen < sizeof(val))
3838			return -EINVAL;
3839		if (copy_from_sockptr(&val, optval, sizeof(val)))
3840			return -EFAULT;
3841
3842		lock_sock(sk);
3843		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3844			ret = -EBUSY;
3845		} else {
3846			po->has_vnet_hdr = !!val;
3847			ret = 0;
3848		}
3849		release_sock(sk);
3850		return ret;
3851	}
3852	case PACKET_TIMESTAMP:
3853	{
3854		int val;
3855
3856		if (optlen != sizeof(val))
3857			return -EINVAL;
3858		if (copy_from_sockptr(&val, optval, sizeof(val)))
3859			return -EFAULT;
3860
3861		po->tp_tstamp = val;
3862		return 0;
3863	}
3864	case PACKET_FANOUT:
3865	{
3866		int val;
3867
3868		if (optlen != sizeof(val))
3869			return -EINVAL;
3870		if (copy_from_sockptr(&val, optval, sizeof(val)))
3871			return -EFAULT;
3872
3873		return fanout_add(sk, val & 0xffff, val >> 16);
3874	}
3875	case PACKET_FANOUT_DATA:
3876	{
3877		if (!po->fanout)
3878			return -EINVAL;
3879
3880		return fanout_set_data(po, optval, optlen);
3881	}
3882	case PACKET_IGNORE_OUTGOING:
3883	{
3884		int val;
3885
3886		if (optlen != sizeof(val))
3887			return -EINVAL;
3888		if (copy_from_sockptr(&val, optval, sizeof(val)))
3889			return -EFAULT;
3890		if (val < 0 || val > 1)
3891			return -EINVAL;
3892
3893		po->prot_hook.ignore_outgoing = !!val;
3894		return 0;
3895	}
3896	case PACKET_TX_HAS_OFF:
3897	{
3898		unsigned int val;
3899
3900		if (optlen != sizeof(val))
3901			return -EINVAL;
3902		if (copy_from_sockptr(&val, optval, sizeof(val)))
 
 
3903			return -EFAULT;
3904
3905		lock_sock(sk);
3906		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3907			ret = -EBUSY;
3908		} else {
3909			po->tp_tx_has_off = !!val;
3910			ret = 0;
3911		}
3912		release_sock(sk);
3913		return 0;
3914	}
3915	case PACKET_QDISC_BYPASS:
3916	{
3917		int val;
3918
3919		if (optlen != sizeof(val))
3920			return -EINVAL;
3921		if (copy_from_sockptr(&val, optval, sizeof(val)))
3922			return -EFAULT;
3923
3924		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3925		return 0;
3926	}
3927	default:
3928		return -ENOPROTOOPT;
3929	}
3930}
3931
3932static int packet_getsockopt(struct socket *sock, int level, int optname,
3933			     char __user *optval, int __user *optlen)
3934{
3935	int len;
3936	int val, lv = sizeof(val);
3937	struct sock *sk = sock->sk;
3938	struct packet_sock *po = pkt_sk(sk);
3939	void *data = &val;
3940	union tpacket_stats_u st;
3941	struct tpacket_rollover_stats rstats;
3942	int drops;
3943
3944	if (level != SOL_PACKET)
3945		return -ENOPROTOOPT;
3946
3947	if (get_user(len, optlen))
3948		return -EFAULT;
3949
3950	if (len < 0)
3951		return -EINVAL;
3952
3953	switch (optname) {
3954	case PACKET_STATISTICS:
3955		spin_lock_bh(&sk->sk_receive_queue.lock);
3956		memcpy(&st, &po->stats, sizeof(st));
3957		memset(&po->stats, 0, sizeof(po->stats));
3958		spin_unlock_bh(&sk->sk_receive_queue.lock);
3959		drops = atomic_xchg(&po->tp_drops, 0);
3960
3961		if (po->tp_version == TPACKET_V3) {
3962			lv = sizeof(struct tpacket_stats_v3);
3963			st.stats3.tp_drops = drops;
3964			st.stats3.tp_packets += drops;
3965			data = &st.stats3;
3966		} else {
3967			lv = sizeof(struct tpacket_stats);
3968			st.stats1.tp_drops = drops;
3969			st.stats1.tp_packets += drops;
3970			data = &st.stats1;
3971		}
3972
3973		break;
3974	case PACKET_AUXDATA:
3975		val = po->auxdata;
3976		break;
3977	case PACKET_ORIGDEV:
3978		val = po->origdev;
3979		break;
3980	case PACKET_VNET_HDR:
3981		val = po->has_vnet_hdr;
3982		break;
3983	case PACKET_VERSION:
3984		val = po->tp_version;
3985		break;
3986	case PACKET_HDRLEN:
3987		if (len > sizeof(int))
3988			len = sizeof(int);
3989		if (len < sizeof(int))
3990			return -EINVAL;
3991		if (copy_from_user(&val, optval, len))
3992			return -EFAULT;
3993		switch (val) {
3994		case TPACKET_V1:
3995			val = sizeof(struct tpacket_hdr);
3996			break;
3997		case TPACKET_V2:
3998			val = sizeof(struct tpacket2_hdr);
3999			break;
4000		case TPACKET_V3:
4001			val = sizeof(struct tpacket3_hdr);
4002			break;
4003		default:
4004			return -EINVAL;
4005		}
4006		break;
4007	case PACKET_RESERVE:
4008		val = po->tp_reserve;
4009		break;
4010	case PACKET_LOSS:
4011		val = po->tp_loss;
4012		break;
4013	case PACKET_TIMESTAMP:
4014		val = po->tp_tstamp;
4015		break;
4016	case PACKET_FANOUT:
4017		val = (po->fanout ?
4018		       ((u32)po->fanout->id |
4019			((u32)po->fanout->type << 16) |
4020			((u32)po->fanout->flags << 24)) :
4021		       0);
4022		break;
4023	case PACKET_IGNORE_OUTGOING:
4024		val = po->prot_hook.ignore_outgoing;
4025		break;
4026	case PACKET_ROLLOVER_STATS:
4027		if (!po->rollover)
4028			return -EINVAL;
4029		rstats.tp_all = atomic_long_read(&po->rollover->num);
4030		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4031		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4032		data = &rstats;
4033		lv = sizeof(rstats);
4034		break;
4035	case PACKET_TX_HAS_OFF:
4036		val = po->tp_tx_has_off;
4037		break;
4038	case PACKET_QDISC_BYPASS:
4039		val = packet_use_direct_xmit(po);
4040		break;
4041	default:
4042		return -ENOPROTOOPT;
4043	}
4044
4045	if (len > lv)
4046		len = lv;
4047	if (put_user(len, optlen))
4048		return -EFAULT;
4049	if (copy_to_user(optval, data, len))
4050		return -EFAULT;
4051	return 0;
4052}
4053
 
4054static int packet_notifier(struct notifier_block *this,
4055			   unsigned long msg, void *ptr)
4056{
4057	struct sock *sk;
4058	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4059	struct net *net = dev_net(dev);
4060
4061	rcu_read_lock();
4062	sk_for_each_rcu(sk, &net->packet.sklist) {
4063		struct packet_sock *po = pkt_sk(sk);
4064
4065		switch (msg) {
4066		case NETDEV_UNREGISTER:
4067			if (po->mclist)
4068				packet_dev_mclist_delete(dev, &po->mclist);
4069			fallthrough;
4070
4071		case NETDEV_DOWN:
4072			if (dev->ifindex == po->ifindex) {
4073				spin_lock(&po->bind_lock);
4074				if (po->running) {
4075					__unregister_prot_hook(sk, false);
4076					sk->sk_err = ENETDOWN;
4077					if (!sock_flag(sk, SOCK_DEAD))
4078						sk->sk_error_report(sk);
4079				}
4080				if (msg == NETDEV_UNREGISTER) {
4081					packet_cached_dev_reset(po);
4082					po->ifindex = -1;
4083					if (po->prot_hook.dev)
4084						dev_put(po->prot_hook.dev);
4085					po->prot_hook.dev = NULL;
4086				}
4087				spin_unlock(&po->bind_lock);
4088			}
4089			break;
4090		case NETDEV_UP:
4091			if (dev->ifindex == po->ifindex) {
4092				spin_lock(&po->bind_lock);
4093				if (po->num)
4094					register_prot_hook(sk);
4095				spin_unlock(&po->bind_lock);
4096			}
4097			break;
4098		}
4099	}
4100	rcu_read_unlock();
4101	return NOTIFY_DONE;
4102}
4103
4104
4105static int packet_ioctl(struct socket *sock, unsigned int cmd,
4106			unsigned long arg)
4107{
4108	struct sock *sk = sock->sk;
4109
4110	switch (cmd) {
4111	case SIOCOUTQ:
4112	{
4113		int amount = sk_wmem_alloc_get(sk);
4114
4115		return put_user(amount, (int __user *)arg);
4116	}
4117	case SIOCINQ:
4118	{
4119		struct sk_buff *skb;
4120		int amount = 0;
4121
4122		spin_lock_bh(&sk->sk_receive_queue.lock);
4123		skb = skb_peek(&sk->sk_receive_queue);
4124		if (skb)
4125			amount = skb->len;
4126		spin_unlock_bh(&sk->sk_receive_queue.lock);
4127		return put_user(amount, (int __user *)arg);
4128	}
 
 
 
 
 
4129#ifdef CONFIG_INET
4130	case SIOCADDRT:
4131	case SIOCDELRT:
4132	case SIOCDARP:
4133	case SIOCGARP:
4134	case SIOCSARP:
4135	case SIOCGIFADDR:
4136	case SIOCSIFADDR:
4137	case SIOCGIFBRDADDR:
4138	case SIOCSIFBRDADDR:
4139	case SIOCGIFNETMASK:
4140	case SIOCSIFNETMASK:
4141	case SIOCGIFDSTADDR:
4142	case SIOCSIFDSTADDR:
4143	case SIOCSIFFLAGS:
4144		return inet_dgram_ops.ioctl(sock, cmd, arg);
4145#endif
4146
4147	default:
4148		return -ENOIOCTLCMD;
4149	}
4150	return 0;
4151}
4152
4153static __poll_t packet_poll(struct file *file, struct socket *sock,
4154				poll_table *wait)
4155{
4156	struct sock *sk = sock->sk;
4157	struct packet_sock *po = pkt_sk(sk);
4158	__poll_t mask = datagram_poll(file, sock, wait);
4159
4160	spin_lock_bh(&sk->sk_receive_queue.lock);
4161	if (po->rx_ring.pg_vec) {
4162		if (!packet_previous_rx_frame(po, &po->rx_ring,
4163			TP_STATUS_KERNEL))
4164			mask |= EPOLLIN | EPOLLRDNORM;
4165	}
4166	packet_rcv_try_clear_pressure(po);
 
4167	spin_unlock_bh(&sk->sk_receive_queue.lock);
4168	spin_lock_bh(&sk->sk_write_queue.lock);
4169	if (po->tx_ring.pg_vec) {
4170		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4171			mask |= EPOLLOUT | EPOLLWRNORM;
4172	}
4173	spin_unlock_bh(&sk->sk_write_queue.lock);
4174	return mask;
4175}
4176
4177
4178/* Dirty? Well, I still did not learn better way to account
4179 * for user mmaps.
4180 */
4181
4182static void packet_mm_open(struct vm_area_struct *vma)
4183{
4184	struct file *file = vma->vm_file;
4185	struct socket *sock = file->private_data;
4186	struct sock *sk = sock->sk;
4187
4188	if (sk)
4189		atomic_inc(&pkt_sk(sk)->mapped);
4190}
4191
4192static void packet_mm_close(struct vm_area_struct *vma)
4193{
4194	struct file *file = vma->vm_file;
4195	struct socket *sock = file->private_data;
4196	struct sock *sk = sock->sk;
4197
4198	if (sk)
4199		atomic_dec(&pkt_sk(sk)->mapped);
4200}
4201
4202static const struct vm_operations_struct packet_mmap_ops = {
4203	.open	=	packet_mm_open,
4204	.close	=	packet_mm_close,
4205};
4206
4207static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4208			unsigned int len)
4209{
4210	int i;
4211
4212	for (i = 0; i < len; i++) {
4213		if (likely(pg_vec[i].buffer)) {
4214			if (is_vmalloc_addr(pg_vec[i].buffer))
4215				vfree(pg_vec[i].buffer);
4216			else
4217				free_pages((unsigned long)pg_vec[i].buffer,
4218					   order);
4219			pg_vec[i].buffer = NULL;
4220		}
4221	}
4222	kfree(pg_vec);
4223}
4224
4225static char *alloc_one_pg_vec_page(unsigned long order)
4226{
4227	char *buffer;
4228	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4229			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4230
4231	buffer = (char *) __get_free_pages(gfp_flags, order);
4232	if (buffer)
4233		return buffer;
4234
4235	/* __get_free_pages failed, fall back to vmalloc */
4236	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4237	if (buffer)
4238		return buffer;
4239
4240	/* vmalloc failed, lets dig into swap here */
4241	gfp_flags &= ~__GFP_NORETRY;
4242	buffer = (char *) __get_free_pages(gfp_flags, order);
4243	if (buffer)
4244		return buffer;
4245
4246	/* complete and utter failure */
4247	return NULL;
4248}
4249
4250static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4251{
4252	unsigned int block_nr = req->tp_block_nr;
4253	struct pgv *pg_vec;
4254	int i;
4255
4256	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4257	if (unlikely(!pg_vec))
4258		goto out;
4259
4260	for (i = 0; i < block_nr; i++) {
4261		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4262		if (unlikely(!pg_vec[i].buffer))
4263			goto out_free_pgvec;
4264	}
4265
4266out:
4267	return pg_vec;
4268
4269out_free_pgvec:
4270	free_pg_vec(pg_vec, order, block_nr);
4271	pg_vec = NULL;
4272	goto out;
4273}
4274
4275static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4276		int closing, int tx_ring)
4277{
4278	struct pgv *pg_vec = NULL;
4279	struct packet_sock *po = pkt_sk(sk);
4280	unsigned long *rx_owner_map = NULL;
4281	int was_running, order = 0;
4282	struct packet_ring_buffer *rb;
4283	struct sk_buff_head *rb_queue;
4284	__be16 num;
4285	int err;
4286	/* Added to avoid minimal code churn */
4287	struct tpacket_req *req = &req_u->req;
4288
 
 
 
 
 
 
4289	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4290	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4291
4292	err = -EBUSY;
4293	if (!closing) {
4294		if (atomic_read(&po->mapped))
4295			goto out;
4296		if (packet_read_pending(rb))
4297			goto out;
4298	}
4299
4300	if (req->tp_block_nr) {
4301		unsigned int min_frame_size;
4302
4303		/* Sanity tests and some calculations */
4304		err = -EBUSY;
4305		if (unlikely(rb->pg_vec))
4306			goto out;
4307
4308		switch (po->tp_version) {
4309		case TPACKET_V1:
4310			po->tp_hdrlen = TPACKET_HDRLEN;
4311			break;
4312		case TPACKET_V2:
4313			po->tp_hdrlen = TPACKET2_HDRLEN;
4314			break;
4315		case TPACKET_V3:
4316			po->tp_hdrlen = TPACKET3_HDRLEN;
4317			break;
4318		}
4319
4320		err = -EINVAL;
4321		if (unlikely((int)req->tp_block_size <= 0))
4322			goto out;
4323		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4324			goto out;
4325		min_frame_size = po->tp_hdrlen + po->tp_reserve;
4326		if (po->tp_version >= TPACKET_V3 &&
4327		    req->tp_block_size <
4328		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4329			goto out;
4330		if (unlikely(req->tp_frame_size < min_frame_size))
 
4331			goto out;
4332		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4333			goto out;
4334
4335		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4336		if (unlikely(rb->frames_per_block == 0))
4337			goto out;
4338		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4339			goto out;
4340		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4341					req->tp_frame_nr))
4342			goto out;
4343
4344		err = -ENOMEM;
4345		order = get_order(req->tp_block_size);
4346		pg_vec = alloc_pg_vec(req, order);
4347		if (unlikely(!pg_vec))
4348			goto out;
4349		switch (po->tp_version) {
4350		case TPACKET_V3:
4351			/* Block transmit is not supported yet */
4352			if (!tx_ring) {
 
 
4353				init_prb_bdqc(po, rb, pg_vec, req_u);
4354			} else {
4355				struct tpacket_req3 *req3 = &req_u->req3;
4356
4357				if (req3->tp_retire_blk_tov ||
4358				    req3->tp_sizeof_priv ||
4359				    req3->tp_feature_req_word) {
4360					err = -EINVAL;
4361					goto out_free_pg_vec;
4362				}
4363			}
4364			break;
4365		default:
4366			if (!tx_ring) {
4367				rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4368					GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4369				if (!rx_owner_map)
4370					goto out_free_pg_vec;
4371			}
4372			break;
4373		}
4374	}
4375	/* Done */
4376	else {
4377		err = -EINVAL;
4378		if (unlikely(req->tp_frame_nr))
4379			goto out;
4380	}
4381
 
4382
4383	/* Detach socket from network */
4384	spin_lock(&po->bind_lock);
4385	was_running = po->running;
4386	num = po->num;
4387	if (was_running) {
4388		po->num = 0;
4389		__unregister_prot_hook(sk, false);
4390	}
4391	spin_unlock(&po->bind_lock);
4392
4393	synchronize_net();
4394
4395	err = -EBUSY;
4396	mutex_lock(&po->pg_vec_lock);
4397	if (closing || atomic_read(&po->mapped) == 0) {
4398		err = 0;
4399		spin_lock_bh(&rb_queue->lock);
4400		swap(rb->pg_vec, pg_vec);
4401		if (po->tp_version <= TPACKET_V2)
4402			swap(rb->rx_owner_map, rx_owner_map);
4403		rb->frame_max = (req->tp_frame_nr - 1);
4404		rb->head = 0;
4405		rb->frame_size = req->tp_frame_size;
4406		spin_unlock_bh(&rb_queue->lock);
4407
4408		swap(rb->pg_vec_order, order);
4409		swap(rb->pg_vec_len, req->tp_block_nr);
4410
4411		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4412		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4413						tpacket_rcv : packet_rcv;
4414		skb_queue_purge(rb_queue);
4415		if (atomic_read(&po->mapped))
4416			pr_err("packet_mmap: vma is busy: %d\n",
4417			       atomic_read(&po->mapped));
4418	}
4419	mutex_unlock(&po->pg_vec_lock);
4420
4421	spin_lock(&po->bind_lock);
4422	if (was_running) {
4423		po->num = num;
4424		register_prot_hook(sk);
4425	}
4426	spin_unlock(&po->bind_lock);
4427	if (pg_vec && (po->tp_version > TPACKET_V2)) {
4428		/* Because we don't support block-based V3 on tx-ring */
4429		if (!tx_ring)
4430			prb_shutdown_retire_blk_timer(po, rb_queue);
4431	}
 
4432
4433out_free_pg_vec:
4434	bitmap_free(rx_owner_map);
4435	if (pg_vec)
4436		free_pg_vec(pg_vec, order, req->tp_block_nr);
4437out:
4438	return err;
4439}
4440
4441static int packet_mmap(struct file *file, struct socket *sock,
4442		struct vm_area_struct *vma)
4443{
4444	struct sock *sk = sock->sk;
4445	struct packet_sock *po = pkt_sk(sk);
4446	unsigned long size, expected_size;
4447	struct packet_ring_buffer *rb;
4448	unsigned long start;
4449	int err = -EINVAL;
4450	int i;
4451
4452	if (vma->vm_pgoff)
4453		return -EINVAL;
4454
4455	mutex_lock(&po->pg_vec_lock);
4456
4457	expected_size = 0;
4458	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4459		if (rb->pg_vec) {
4460			expected_size += rb->pg_vec_len
4461						* rb->pg_vec_pages
4462						* PAGE_SIZE;
4463		}
4464	}
4465
4466	if (expected_size == 0)
4467		goto out;
4468
4469	size = vma->vm_end - vma->vm_start;
4470	if (size != expected_size)
4471		goto out;
4472
4473	start = vma->vm_start;
4474	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4475		if (rb->pg_vec == NULL)
4476			continue;
4477
4478		for (i = 0; i < rb->pg_vec_len; i++) {
4479			struct page *page;
4480			void *kaddr = rb->pg_vec[i].buffer;
4481			int pg_num;
4482
4483			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4484				page = pgv_to_page(kaddr);
4485				err = vm_insert_page(vma, start, page);
4486				if (unlikely(err))
4487					goto out;
4488				start += PAGE_SIZE;
4489				kaddr += PAGE_SIZE;
4490			}
4491		}
4492	}
4493
4494	atomic_inc(&po->mapped);
4495	vma->vm_ops = &packet_mmap_ops;
4496	err = 0;
4497
4498out:
4499	mutex_unlock(&po->pg_vec_lock);
4500	return err;
4501}
4502
4503static const struct proto_ops packet_ops_spkt = {
4504	.family =	PF_PACKET,
4505	.owner =	THIS_MODULE,
4506	.release =	packet_release,
4507	.bind =		packet_bind_spkt,
4508	.connect =	sock_no_connect,
4509	.socketpair =	sock_no_socketpair,
4510	.accept =	sock_no_accept,
4511	.getname =	packet_getname_spkt,
4512	.poll =		datagram_poll,
4513	.ioctl =	packet_ioctl,
4514	.gettstamp =	sock_gettstamp,
4515	.listen =	sock_no_listen,
4516	.shutdown =	sock_no_shutdown,
 
 
4517	.sendmsg =	packet_sendmsg_spkt,
4518	.recvmsg =	packet_recvmsg,
4519	.mmap =		sock_no_mmap,
4520	.sendpage =	sock_no_sendpage,
4521};
4522
4523static const struct proto_ops packet_ops = {
4524	.family =	PF_PACKET,
4525	.owner =	THIS_MODULE,
4526	.release =	packet_release,
4527	.bind =		packet_bind,
4528	.connect =	sock_no_connect,
4529	.socketpair =	sock_no_socketpair,
4530	.accept =	sock_no_accept,
4531	.getname =	packet_getname,
4532	.poll =		packet_poll,
4533	.ioctl =	packet_ioctl,
4534	.gettstamp =	sock_gettstamp,
4535	.listen =	sock_no_listen,
4536	.shutdown =	sock_no_shutdown,
4537	.setsockopt =	packet_setsockopt,
4538	.getsockopt =	packet_getsockopt,
4539	.sendmsg =	packet_sendmsg,
4540	.recvmsg =	packet_recvmsg,
4541	.mmap =		packet_mmap,
4542	.sendpage =	sock_no_sendpage,
4543};
4544
4545static const struct net_proto_family packet_family_ops = {
4546	.family =	PF_PACKET,
4547	.create =	packet_create,
4548	.owner	=	THIS_MODULE,
4549};
4550
4551static struct notifier_block packet_netdev_notifier = {
4552	.notifier_call =	packet_notifier,
4553};
4554
4555#ifdef CONFIG_PROC_FS
4556
4557static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4558	__acquires(RCU)
4559{
4560	struct net *net = seq_file_net(seq);
4561
4562	rcu_read_lock();
4563	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4564}
4565
4566static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4567{
4568	struct net *net = seq_file_net(seq);
4569	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4570}
4571
4572static void packet_seq_stop(struct seq_file *seq, void *v)
4573	__releases(RCU)
4574{
4575	rcu_read_unlock();
4576}
4577
4578static int packet_seq_show(struct seq_file *seq, void *v)
4579{
4580	if (v == SEQ_START_TOKEN)
4581		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
4582	else {
4583		struct sock *s = sk_entry(v);
4584		const struct packet_sock *po = pkt_sk(s);
4585
4586		seq_printf(seq,
4587			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4588			   s,
4589			   refcount_read(&s->sk_refcnt),
4590			   s->sk_type,
4591			   ntohs(po->num),
4592			   po->ifindex,
4593			   po->running,
4594			   atomic_read(&s->sk_rmem_alloc),
4595			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4596			   sock_i_ino(s));
4597	}
4598
4599	return 0;
4600}
4601
4602static const struct seq_operations packet_seq_ops = {
4603	.start	= packet_seq_start,
4604	.next	= packet_seq_next,
4605	.stop	= packet_seq_stop,
4606	.show	= packet_seq_show,
4607};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4608#endif
4609
4610static int __net_init packet_net_init(struct net *net)
4611{
4612	mutex_init(&net->packet.sklist_lock);
4613	INIT_HLIST_HEAD(&net->packet.sklist);
4614
4615	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4616			sizeof(struct seq_net_private)))
4617		return -ENOMEM;
4618
4619	return 0;
4620}
4621
4622static void __net_exit packet_net_exit(struct net *net)
4623{
4624	remove_proc_entry("packet", net->proc_net);
4625	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4626}
4627
4628static struct pernet_operations packet_net_ops = {
4629	.init = packet_net_init,
4630	.exit = packet_net_exit,
4631};
4632
4633
4634static void __exit packet_exit(void)
4635{
4636	unregister_netdevice_notifier(&packet_netdev_notifier);
4637	unregister_pernet_subsys(&packet_net_ops);
4638	sock_unregister(PF_PACKET);
4639	proto_unregister(&packet_proto);
4640}
4641
4642static int __init packet_init(void)
4643{
4644	int rc;
4645
4646	rc = proto_register(&packet_proto, 0);
4647	if (rc)
4648		goto out;
4649	rc = sock_register(&packet_family_ops);
4650	if (rc)
4651		goto out_proto;
4652	rc = register_pernet_subsys(&packet_net_ops);
4653	if (rc)
4654		goto out_sock;
4655	rc = register_netdevice_notifier(&packet_netdev_notifier);
4656	if (rc)
4657		goto out_pernet;
4658
4659	return 0;
4660
4661out_pernet:
4662	unregister_pernet_subsys(&packet_net_ops);
4663out_sock:
4664	sock_unregister(PF_PACKET);
4665out_proto:
4666	proto_unregister(&packet_proto);
4667out:
4668	return rc;
4669}
4670
4671module_init(packet_init);
4672module_exit(packet_exit);
4673MODULE_LICENSE("GPL");
4674MODULE_ALIAS_NETPROTO(PF_PACKET);