Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		PACKET - implements raw packet sockets.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *
  13 * Fixes:
  14 *		Alan Cox	:	verify_area() now used correctly
  15 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
  16 *		Alan Cox	:	tidied skbuff lists.
  17 *		Alan Cox	:	Now uses generic datagram routines I
  18 *					added. Also fixed the peek/read crash
  19 *					from all old Linux datagram code.
  20 *		Alan Cox	:	Uses the improved datagram code.
  21 *		Alan Cox	:	Added NULL's for socket options.
  22 *		Alan Cox	:	Re-commented the code.
  23 *		Alan Cox	:	Use new kernel side addressing
  24 *		Rob Janssen	:	Correct MTU usage.
  25 *		Dave Platt	:	Counter leaks caused by incorrect
  26 *					interrupt locking and some slightly
  27 *					dubious gcc output. Can you read
  28 *					compiler: it said _VOLATILE_
  29 *	Richard Kooijman	:	Timestamp fixes.
  30 *		Alan Cox	:	New buffers. Use sk->mac.raw.
  31 *		Alan Cox	:	sendmsg/recvmsg support.
  32 *		Alan Cox	:	Protocol setting support
  33 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
  34 *	Cyrus Durgin		:	Fixed kerneld for kmod.
  35 *	Michal Ostrowski        :       Module initialization cleanup.
  36 *         Ulises Alonso        :       Frame number limit removal and
  37 *                                      packet_set_ring memory leak.
  38 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
  39 *					The convention is that longer addresses
  40 *					will simply extend the hardware address
  41 *					byte arrays at the end of sockaddr_ll
  42 *					and packet_mreq.
  43 *		Johann Baudy	:	Added TX RING.
  44 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
  45 *					layer.
  46 *					Copyright (C) 2011, <lokec@ccs.neu.edu>
  47 */
  48
  49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  50
  51#include <linux/ethtool.h>
  52#include <linux/filter.h>
  53#include <linux/types.h>
  54#include <linux/mm.h>
  55#include <linux/capability.h>
  56#include <linux/fcntl.h>
  57#include <linux/socket.h>
  58#include <linux/in.h>
  59#include <linux/inet.h>
  60#include <linux/netdevice.h>
  61#include <linux/if_packet.h>
  62#include <linux/wireless.h>
  63#include <linux/kernel.h>
  64#include <linux/kmod.h>
  65#include <linux/slab.h>
  66#include <linux/vmalloc.h>
  67#include <net/net_namespace.h>
  68#include <net/ip.h>
  69#include <net/protocol.h>
  70#include <linux/skbuff.h>
  71#include <net/sock.h>
  72#include <linux/errno.h>
  73#include <linux/timer.h>
  74#include <linux/uaccess.h>
  75#include <asm/ioctls.h>
  76#include <asm/page.h>
  77#include <asm/cacheflush.h>
  78#include <asm/io.h>
  79#include <linux/proc_fs.h>
  80#include <linux/seq_file.h>
  81#include <linux/poll.h>
  82#include <linux/module.h>
  83#include <linux/init.h>
  84#include <linux/mutex.h>
  85#include <linux/if_vlan.h>
  86#include <linux/virtio_net.h>
  87#include <linux/errqueue.h>
  88#include <linux/net_tstamp.h>
  89#include <linux/percpu.h>
  90#ifdef CONFIG_INET
  91#include <net/inet_common.h>
  92#endif
  93#include <linux/bpf.h>
  94#include <net/compat.h>
  95#include <linux/netfilter_netdev.h>
  96
  97#include "internal.h"
  98
  99/*
 100   Assumptions:
 101   - If the device has no dev->header_ops->create, there is no LL header
 102     visible above the device. In this case, its hard_header_len should be 0.
 103     The device may prepend its own header internally. In this case, its
 104     needed_headroom should be set to the space needed for it to add its
 105     internal header.
 106     For example, a WiFi driver pretending to be an Ethernet driver should
 107     set its hard_header_len to be the Ethernet header length, and set its
 108     needed_headroom to be (the real WiFi header length - the fake Ethernet
 109     header length).
 110   - packet socket receives packets with pulled ll header,
 111     so that SOCK_RAW should push it back.
 112
 113On receive:
 114-----------
 115
 116Incoming, dev_has_header(dev) == true
 117   mac_header -> ll header
 118   data       -> data
 119
 120Outgoing, dev_has_header(dev) == true
 121   mac_header -> ll header
 122   data       -> ll header
 123
 124Incoming, dev_has_header(dev) == false
 125   mac_header -> data
 126     However drivers often make it point to the ll header.
 127     This is incorrect because the ll header should be invisible to us.
 128   data       -> data
 129
 130Outgoing, dev_has_header(dev) == false
 131   mac_header -> data. ll header is invisible to us.
 132   data       -> data
 133
 134Resume
 135  If dev_has_header(dev) == false we are unable to restore the ll header,
 136    because it is invisible to us.
 137
 138
 139On transmit:
 140------------
 141
 142dev_has_header(dev) == true
 143   mac_header -> ll header
 144   data       -> ll header
 145
 146dev_has_header(dev) == false (ll header is invisible to us)
 147   mac_header -> data
 148   data       -> data
 149
 150   We should set network_header on output to the correct position,
 151   packet classifier depends on it.
 152 */
 153
 154/* Private packet socket structures. */
 155
 156/* identical to struct packet_mreq except it has
 157 * a longer address field.
 158 */
 159struct packet_mreq_max {
 160	int		mr_ifindex;
 161	unsigned short	mr_type;
 162	unsigned short	mr_alen;
 163	unsigned char	mr_address[MAX_ADDR_LEN];
 164};
 165
 166union tpacket_uhdr {
 167	struct tpacket_hdr  *h1;
 168	struct tpacket2_hdr *h2;
 169	struct tpacket3_hdr *h3;
 170	void *raw;
 171};
 172
 173static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 174		int closing, int tx_ring);
 175
 176#define V3_ALIGNMENT	(8)
 177
 178#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 179
 180#define BLK_PLUS_PRIV(sz_of_priv) \
 181	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 182
 183#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 184#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 185#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 186#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 187#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 188#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 189
 190struct packet_sock;
 191static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 192		       struct packet_type *pt, struct net_device *orig_dev);
 193
 194static void *packet_previous_frame(struct packet_sock *po,
 195		struct packet_ring_buffer *rb,
 196		int status);
 197static void packet_increment_head(struct packet_ring_buffer *buff);
 198static int prb_curr_blk_in_use(struct tpacket_block_desc *);
 199static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 200			struct packet_sock *);
 201static void prb_retire_current_block(struct tpacket_kbdq_core *,
 202		struct packet_sock *, unsigned int status);
 203static int prb_queue_frozen(struct tpacket_kbdq_core *);
 204static void prb_open_block(struct tpacket_kbdq_core *,
 205		struct tpacket_block_desc *);
 206static void prb_retire_rx_blk_timer_expired(struct timer_list *);
 207static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 208static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 209static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 210		struct tpacket3_hdr *);
 211static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 212		struct tpacket3_hdr *);
 213static void packet_flush_mclist(struct sock *sk);
 214static u16 packet_pick_tx_queue(struct sk_buff *skb);
 215
 216struct packet_skb_cb {
 217	union {
 218		struct sockaddr_pkt pkt;
 219		union {
 220			/* Trick: alias skb original length with
 221			 * ll.sll_family and ll.protocol in order
 222			 * to save room.
 223			 */
 224			unsigned int origlen;
 225			struct sockaddr_ll ll;
 226		};
 227	} sa;
 228};
 229
 230#define vio_le() virtio_legacy_is_little_endian()
 231
 232#define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 233
 234#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 235#define GET_PBLOCK_DESC(x, bid)	\
 236	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 237#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 238	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 239#define GET_NEXT_PRB_BLK_NUM(x) \
 240	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 241	((x)->kactive_blk_num+1) : 0)
 242
 243static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 244static void __fanout_link(struct sock *sk, struct packet_sock *po);
 245
 246#ifdef CONFIG_NETFILTER_EGRESS
 247static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb)
 248{
 249	struct sk_buff *next, *head = NULL, *tail;
 250	int rc;
 251
 252	rcu_read_lock();
 253	for (; skb != NULL; skb = next) {
 254		next = skb->next;
 255		skb_mark_not_on_list(skb);
 256
 257		if (!nf_hook_egress(skb, &rc, skb->dev))
 258			continue;
 259
 260		if (!head)
 261			head = skb;
 262		else
 263			tail->next = skb;
 264
 265		tail = skb;
 266	}
 267	rcu_read_unlock();
 268
 269	return head;
 270}
 271#endif
 272
 273static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb)
 274{
 275	if (!packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS))
 276		return dev_queue_xmit(skb);
 277
 278#ifdef CONFIG_NETFILTER_EGRESS
 279	if (nf_hook_egress_active()) {
 280		skb = nf_hook_direct_egress(skb);
 281		if (!skb)
 282			return NET_XMIT_DROP;
 283	}
 284#endif
 285	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
 286}
 287
 288static struct net_device *packet_cached_dev_get(struct packet_sock *po)
 289{
 290	struct net_device *dev;
 291
 292	rcu_read_lock();
 293	dev = rcu_dereference(po->cached_dev);
 294	dev_hold(dev);
 
 295	rcu_read_unlock();
 296
 297	return dev;
 298}
 299
 300static void packet_cached_dev_assign(struct packet_sock *po,
 301				     struct net_device *dev)
 302{
 303	rcu_assign_pointer(po->cached_dev, dev);
 304}
 305
 306static void packet_cached_dev_reset(struct packet_sock *po)
 307{
 308	RCU_INIT_POINTER(po->cached_dev, NULL);
 309}
 310
 
 
 
 
 
 311static u16 packet_pick_tx_queue(struct sk_buff *skb)
 312{
 313	struct net_device *dev = skb->dev;
 314	const struct net_device_ops *ops = dev->netdev_ops;
 315	int cpu = raw_smp_processor_id();
 316	u16 queue_index;
 317
 318#ifdef CONFIG_XPS
 319	skb->sender_cpu = cpu + 1;
 320#endif
 321	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
 322	if (ops->ndo_select_queue) {
 323		queue_index = ops->ndo_select_queue(dev, skb, NULL);
 324		queue_index = netdev_cap_txqueue(dev, queue_index);
 325	} else {
 326		queue_index = netdev_pick_tx(dev, skb, NULL);
 327	}
 328
 329	return queue_index;
 330}
 331
 332/* __register_prot_hook must be invoked through register_prot_hook
 333 * or from a context in which asynchronous accesses to the packet
 334 * socket is not possible (packet_create()).
 335 */
 336static void __register_prot_hook(struct sock *sk)
 337{
 338	struct packet_sock *po = pkt_sk(sk);
 339
 340	if (!packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
 341		if (po->fanout)
 342			__fanout_link(sk, po);
 343		else
 344			dev_add_pack(&po->prot_hook);
 345
 346		sock_hold(sk);
 347		packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 1);
 348	}
 349}
 350
 351static void register_prot_hook(struct sock *sk)
 352{
 353	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
 354	__register_prot_hook(sk);
 355}
 356
 357/* If the sync parameter is true, we will temporarily drop
 358 * the po->bind_lock and do a synchronize_net to make sure no
 359 * asynchronous packet processing paths still refer to the elements
 360 * of po->prot_hook.  If the sync parameter is false, it is the
 361 * callers responsibility to take care of this.
 362 */
 363static void __unregister_prot_hook(struct sock *sk, bool sync)
 364{
 365	struct packet_sock *po = pkt_sk(sk);
 366
 367	lockdep_assert_held_once(&po->bind_lock);
 368
 369	packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 0);
 370
 371	if (po->fanout)
 372		__fanout_unlink(sk, po);
 373	else
 374		__dev_remove_pack(&po->prot_hook);
 375
 376	__sock_put(sk);
 377
 378	if (sync) {
 379		spin_unlock(&po->bind_lock);
 380		synchronize_net();
 381		spin_lock(&po->bind_lock);
 382	}
 383}
 384
 385static void unregister_prot_hook(struct sock *sk, bool sync)
 386{
 387	struct packet_sock *po = pkt_sk(sk);
 388
 389	if (packet_sock_flag(po, PACKET_SOCK_RUNNING))
 390		__unregister_prot_hook(sk, sync);
 391}
 392
 393static inline struct page * __pure pgv_to_page(void *addr)
 394{
 395	if (is_vmalloc_addr(addr))
 396		return vmalloc_to_page(addr);
 397	return virt_to_page(addr);
 398}
 399
 400static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 401{
 402	union tpacket_uhdr h;
 403
 404	/* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */
 405
 406	h.raw = frame;
 407	switch (po->tp_version) {
 408	case TPACKET_V1:
 409		WRITE_ONCE(h.h1->tp_status, status);
 410		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 411		break;
 412	case TPACKET_V2:
 413		WRITE_ONCE(h.h2->tp_status, status);
 414		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 415		break;
 416	case TPACKET_V3:
 417		WRITE_ONCE(h.h3->tp_status, status);
 418		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 419		break;
 420	default:
 421		WARN(1, "TPACKET version not supported.\n");
 422		BUG();
 423	}
 424
 425	smp_wmb();
 426}
 427
 428static int __packet_get_status(const struct packet_sock *po, void *frame)
 429{
 430	union tpacket_uhdr h;
 431
 432	smp_rmb();
 433
 434	/* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */
 435
 436	h.raw = frame;
 437	switch (po->tp_version) {
 438	case TPACKET_V1:
 439		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 440		return READ_ONCE(h.h1->tp_status);
 441	case TPACKET_V2:
 442		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 443		return READ_ONCE(h.h2->tp_status);
 444	case TPACKET_V3:
 445		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 446		return READ_ONCE(h.h3->tp_status);
 447	default:
 448		WARN(1, "TPACKET version not supported.\n");
 449		BUG();
 450		return 0;
 451	}
 452}
 453
 454static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
 455				   unsigned int flags)
 456{
 457	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 458
 459	if (shhwtstamps &&
 460	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
 461	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
 462		return TP_STATUS_TS_RAW_HARDWARE;
 463
 464	if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
 465	    ktime_to_timespec64_cond(skb_tstamp(skb), ts))
 466		return TP_STATUS_TS_SOFTWARE;
 467
 468	return 0;
 469}
 470
 471static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
 472				    struct sk_buff *skb)
 473{
 474	union tpacket_uhdr h;
 475	struct timespec64 ts;
 476	__u32 ts_status;
 477
 478	if (!(ts_status = tpacket_get_timestamp(skb, &ts, READ_ONCE(po->tp_tstamp))))
 479		return 0;
 480
 481	h.raw = frame;
 482	/*
 483	 * versions 1 through 3 overflow the timestamps in y2106, since they
 484	 * all store the seconds in a 32-bit unsigned integer.
 485	 * If we create a version 4, that should have a 64-bit timestamp,
 486	 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
 487	 * nanoseconds.
 488	 */
 489	switch (po->tp_version) {
 490	case TPACKET_V1:
 491		h.h1->tp_sec = ts.tv_sec;
 492		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
 493		break;
 494	case TPACKET_V2:
 495		h.h2->tp_sec = ts.tv_sec;
 496		h.h2->tp_nsec = ts.tv_nsec;
 497		break;
 498	case TPACKET_V3:
 499		h.h3->tp_sec = ts.tv_sec;
 500		h.h3->tp_nsec = ts.tv_nsec;
 501		break;
 502	default:
 503		WARN(1, "TPACKET version not supported.\n");
 504		BUG();
 505	}
 506
 507	/* one flush is safe, as both fields always lie on the same cacheline */
 508	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
 509	smp_wmb();
 510
 511	return ts_status;
 512}
 513
 514static void *packet_lookup_frame(const struct packet_sock *po,
 515				 const struct packet_ring_buffer *rb,
 516				 unsigned int position,
 517				 int status)
 518{
 519	unsigned int pg_vec_pos, frame_offset;
 520	union tpacket_uhdr h;
 521
 522	pg_vec_pos = position / rb->frames_per_block;
 523	frame_offset = position % rb->frames_per_block;
 524
 525	h.raw = rb->pg_vec[pg_vec_pos].buffer +
 526		(frame_offset * rb->frame_size);
 527
 528	if (status != __packet_get_status(po, h.raw))
 529		return NULL;
 530
 531	return h.raw;
 532}
 533
 534static void *packet_current_frame(struct packet_sock *po,
 535		struct packet_ring_buffer *rb,
 536		int status)
 537{
 538	return packet_lookup_frame(po, rb, rb->head, status);
 539}
 540
 541static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev)
 542{
 543	struct vlan_hdr vhdr, *vh;
 544	unsigned int header_len;
 545
 546	if (!dev)
 547		return 0;
 548
 549	/* In the SOCK_DGRAM scenario, skb data starts at the network
 550	 * protocol, which is after the VLAN headers. The outer VLAN
 551	 * header is at the hard_header_len offset in non-variable
 552	 * length link layer headers. If it's a VLAN device, the
 553	 * min_header_len should be used to exclude the VLAN header
 554	 * size.
 555	 */
 556	if (dev->min_header_len == dev->hard_header_len)
 557		header_len = dev->hard_header_len;
 558	else if (is_vlan_dev(dev))
 559		header_len = dev->min_header_len;
 560	else
 561		return 0;
 562
 563	vh = skb_header_pointer(skb, skb_mac_offset(skb) + header_len,
 564				sizeof(vhdr), &vhdr);
 565	if (unlikely(!vh))
 566		return 0;
 567
 568	return ntohs(vh->h_vlan_TCI);
 569}
 570
 571static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
 572{
 573	__be16 proto = skb->protocol;
 574
 575	if (unlikely(eth_type_vlan(proto)))
 576		proto = __vlan_get_protocol_offset(skb, proto,
 577						   skb_mac_offset(skb), NULL);
 578
 579	return proto;
 580}
 581
 582static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 583{
 584	del_timer_sync(&pkc->retire_blk_timer);
 585}
 586
 587static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 588		struct sk_buff_head *rb_queue)
 589{
 590	struct tpacket_kbdq_core *pkc;
 591
 592	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 593
 594	spin_lock_bh(&rb_queue->lock);
 595	pkc->delete_blk_timer = 1;
 596	spin_unlock_bh(&rb_queue->lock);
 597
 598	prb_del_retire_blk_timer(pkc);
 599}
 600
 601static void prb_setup_retire_blk_timer(struct packet_sock *po)
 602{
 603	struct tpacket_kbdq_core *pkc;
 604
 605	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 606	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
 607		    0);
 608	pkc->retire_blk_timer.expires = jiffies;
 609}
 610
 611static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 612				int blk_size_in_bytes)
 613{
 614	struct net_device *dev;
 615	unsigned int mbits, div;
 616	struct ethtool_link_ksettings ecmd;
 617	int err;
 618
 619	rtnl_lock();
 620	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 621	if (unlikely(!dev)) {
 622		rtnl_unlock();
 623		return DEFAULT_PRB_RETIRE_TOV;
 624	}
 625	err = __ethtool_get_link_ksettings(dev, &ecmd);
 626	rtnl_unlock();
 627	if (err)
 628		return DEFAULT_PRB_RETIRE_TOV;
 629
 630	/* If the link speed is so slow you don't really
 631	 * need to worry about perf anyways
 632	 */
 633	if (ecmd.base.speed < SPEED_1000 ||
 634	    ecmd.base.speed == SPEED_UNKNOWN)
 635		return DEFAULT_PRB_RETIRE_TOV;
 636
 637	div = ecmd.base.speed / 1000;
 638	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 639
 640	if (div)
 641		mbits /= div;
 642
 643	if (div)
 644		return mbits + 1;
 645	return mbits;
 646}
 647
 648static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 649			union tpacket_req_u *req_u)
 650{
 651	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 652}
 653
 654static void init_prb_bdqc(struct packet_sock *po,
 655			struct packet_ring_buffer *rb,
 656			struct pgv *pg_vec,
 657			union tpacket_req_u *req_u)
 658{
 659	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
 660	struct tpacket_block_desc *pbd;
 661
 662	memset(p1, 0x0, sizeof(*p1));
 663
 664	p1->knxt_seq_num = 1;
 665	p1->pkbdq = pg_vec;
 666	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 667	p1->pkblk_start	= pg_vec[0].buffer;
 668	p1->kblk_size = req_u->req3.tp_block_size;
 669	p1->knum_blocks	= req_u->req3.tp_block_nr;
 670	p1->hdrlen = po->tp_hdrlen;
 671	p1->version = po->tp_version;
 672	p1->last_kactive_blk_num = 0;
 673	po->stats.stats3.tp_freeze_q_cnt = 0;
 674	if (req_u->req3.tp_retire_blk_tov)
 675		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 676	else
 677		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 678						req_u->req3.tp_block_size);
 679	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 680	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 681	rwlock_init(&p1->blk_fill_in_prog_lock);
 682
 683	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
 684	prb_init_ft_ops(p1, req_u);
 685	prb_setup_retire_blk_timer(po);
 686	prb_open_block(p1, pbd);
 687}
 688
 689/*  Do NOT update the last_blk_num first.
 690 *  Assumes sk_buff_head lock is held.
 691 */
 692static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 693{
 694	mod_timer(&pkc->retire_blk_timer,
 695			jiffies + pkc->tov_in_jiffies);
 696	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 697}
 698
 699/*
 700 * Timer logic:
 701 * 1) We refresh the timer only when we open a block.
 702 *    By doing this we don't waste cycles refreshing the timer
 703 *	  on packet-by-packet basis.
 704 *
 705 * With a 1MB block-size, on a 1Gbps line, it will take
 706 * i) ~8 ms to fill a block + ii) memcpy etc.
 707 * In this cut we are not accounting for the memcpy time.
 708 *
 709 * So, if the user sets the 'tmo' to 10ms then the timer
 710 * will never fire while the block is still getting filled
 711 * (which is what we want). However, the user could choose
 712 * to close a block early and that's fine.
 713 *
 714 * But when the timer does fire, we check whether or not to refresh it.
 715 * Since the tmo granularity is in msecs, it is not too expensive
 716 * to refresh the timer, lets say every '8' msecs.
 717 * Either the user can set the 'tmo' or we can derive it based on
 718 * a) line-speed and b) block-size.
 719 * prb_calc_retire_blk_tmo() calculates the tmo.
 720 *
 721 */
 722static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
 723{
 724	struct packet_sock *po =
 725		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
 726	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 727	unsigned int frozen;
 728	struct tpacket_block_desc *pbd;
 729
 730	spin_lock(&po->sk.sk_receive_queue.lock);
 731
 732	frozen = prb_queue_frozen(pkc);
 733	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 734
 735	if (unlikely(pkc->delete_blk_timer))
 736		goto out;
 737
 738	/* We only need to plug the race when the block is partially filled.
 739	 * tpacket_rcv:
 740	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 741	 *		copy_bits() is in progress ...
 742	 *		timer fires on other cpu:
 743	 *		we can't retire the current block because copy_bits
 744	 *		is in progress.
 745	 *
 746	 */
 747	if (BLOCK_NUM_PKTS(pbd)) {
 748		/* Waiting for skb_copy_bits to finish... */
 749		write_lock(&pkc->blk_fill_in_prog_lock);
 750		write_unlock(&pkc->blk_fill_in_prog_lock);
 751	}
 752
 753	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 754		if (!frozen) {
 755			if (!BLOCK_NUM_PKTS(pbd)) {
 756				/* An empty block. Just refresh the timer. */
 757				goto refresh_timer;
 758			}
 759			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 760			if (!prb_dispatch_next_block(pkc, po))
 761				goto refresh_timer;
 762			else
 763				goto out;
 764		} else {
 765			/* Case 1. Queue was frozen because user-space was
 766			 *	   lagging behind.
 767			 */
 768			if (prb_curr_blk_in_use(pbd)) {
 769				/*
 770				 * Ok, user-space is still behind.
 771				 * So just refresh the timer.
 772				 */
 773				goto refresh_timer;
 774			} else {
 775			       /* Case 2. queue was frozen,user-space caught up,
 776				* now the link went idle && the timer fired.
 777				* We don't have a block to close.So we open this
 778				* block and restart the timer.
 779				* opening a block thaws the queue,restarts timer
 780				* Thawing/timer-refresh is a side effect.
 781				*/
 782				prb_open_block(pkc, pbd);
 783				goto out;
 784			}
 785		}
 786	}
 787
 788refresh_timer:
 789	_prb_refresh_rx_retire_blk_timer(pkc);
 790
 791out:
 792	spin_unlock(&po->sk.sk_receive_queue.lock);
 793}
 794
 795static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 796		struct tpacket_block_desc *pbd1, __u32 status)
 797{
 798	/* Flush everything minus the block header */
 799
 800#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 801	u8 *start, *end;
 802
 803	start = (u8 *)pbd1;
 804
 805	/* Skip the block header(we know header WILL fit in 4K) */
 806	start += PAGE_SIZE;
 807
 808	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 809	for (; start < end; start += PAGE_SIZE)
 810		flush_dcache_page(pgv_to_page(start));
 811
 812	smp_wmb();
 813#endif
 814
 815	/* Now update the block status. */
 816
 817	BLOCK_STATUS(pbd1) = status;
 818
 819	/* Flush the block header */
 820
 821#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 822	start = (u8 *)pbd1;
 823	flush_dcache_page(pgv_to_page(start));
 824
 825	smp_wmb();
 826#endif
 827}
 828
 829/*
 830 * Side effect:
 831 *
 832 * 1) flush the block
 833 * 2) Increment active_blk_num
 834 *
 835 * Note:We DONT refresh the timer on purpose.
 836 *	Because almost always the next block will be opened.
 837 */
 838static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 839		struct tpacket_block_desc *pbd1,
 840		struct packet_sock *po, unsigned int stat)
 841{
 842	__u32 status = TP_STATUS_USER | stat;
 843
 844	struct tpacket3_hdr *last_pkt;
 845	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 846	struct sock *sk = &po->sk;
 847
 848	if (atomic_read(&po->tp_drops))
 849		status |= TP_STATUS_LOSING;
 850
 851	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 852	last_pkt->tp_next_offset = 0;
 853
 854	/* Get the ts of the last pkt */
 855	if (BLOCK_NUM_PKTS(pbd1)) {
 856		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 857		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 858	} else {
 859		/* Ok, we tmo'd - so get the current time.
 860		 *
 861		 * It shouldn't really happen as we don't close empty
 862		 * blocks. See prb_retire_rx_blk_timer_expired().
 863		 */
 864		struct timespec64 ts;
 865		ktime_get_real_ts64(&ts);
 866		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 867		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 868	}
 869
 870	smp_wmb();
 871
 872	/* Flush the block */
 873	prb_flush_block(pkc1, pbd1, status);
 874
 875	sk->sk_data_ready(sk);
 876
 877	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 878}
 879
 880static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 881{
 882	pkc->reset_pending_on_curr_blk = 0;
 883}
 884
 885/*
 886 * Side effect of opening a block:
 887 *
 888 * 1) prb_queue is thawed.
 889 * 2) retire_blk_timer is refreshed.
 890 *
 891 */
 892static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 893	struct tpacket_block_desc *pbd1)
 894{
 895	struct timespec64 ts;
 896	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 897
 898	smp_rmb();
 899
 900	/* We could have just memset this but we will lose the
 901	 * flexibility of making the priv area sticky
 902	 */
 903
 904	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 905	BLOCK_NUM_PKTS(pbd1) = 0;
 906	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 907
 908	ktime_get_real_ts64(&ts);
 909
 910	h1->ts_first_pkt.ts_sec = ts.tv_sec;
 911	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 912
 913	pkc1->pkblk_start = (char *)pbd1;
 914	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 915
 916	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 917	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 918
 919	pbd1->version = pkc1->version;
 920	pkc1->prev = pkc1->nxt_offset;
 921	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 922
 923	prb_thaw_queue(pkc1);
 924	_prb_refresh_rx_retire_blk_timer(pkc1);
 925
 926	smp_wmb();
 927}
 928
 929/*
 930 * Queue freeze logic:
 931 * 1) Assume tp_block_nr = 8 blocks.
 932 * 2) At time 't0', user opens Rx ring.
 933 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 934 * 4) user-space is either sleeping or processing block '0'.
 935 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 936 *    it will close block-7,loop around and try to fill block '0'.
 937 *    call-flow:
 938 *    __packet_lookup_frame_in_block
 939 *      prb_retire_current_block()
 940 *      prb_dispatch_next_block()
 941 *        |->(BLOCK_STATUS == USER) evaluates to true
 942 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 943 * 6) Now there are two cases:
 944 *    6.1) Link goes idle right after the queue is frozen.
 945 *         But remember, the last open_block() refreshed the timer.
 946 *         When this timer expires,it will refresh itself so that we can
 947 *         re-open block-0 in near future.
 948 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 949 *         case and __packet_lookup_frame_in_block will check if block-0
 950 *         is free and can now be re-used.
 951 */
 952static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 953				  struct packet_sock *po)
 954{
 955	pkc->reset_pending_on_curr_blk = 1;
 956	po->stats.stats3.tp_freeze_q_cnt++;
 957}
 958
 959#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 960
 961/*
 962 * If the next block is free then we will dispatch it
 963 * and return a good offset.
 964 * Else, we will freeze the queue.
 965 * So, caller must check the return value.
 966 */
 967static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 968		struct packet_sock *po)
 969{
 970	struct tpacket_block_desc *pbd;
 971
 972	smp_rmb();
 973
 974	/* 1. Get current block num */
 975	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 976
 977	/* 2. If this block is currently in_use then freeze the queue */
 978	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 979		prb_freeze_queue(pkc, po);
 980		return NULL;
 981	}
 982
 983	/*
 984	 * 3.
 985	 * open this block and return the offset where the first packet
 986	 * needs to get stored.
 987	 */
 988	prb_open_block(pkc, pbd);
 989	return (void *)pkc->nxt_offset;
 990}
 991
 992static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 993		struct packet_sock *po, unsigned int status)
 994{
 995	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 996
 997	/* retire/close the current block */
 998	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 999		/*
1000		 * Plug the case where copy_bits() is in progress on
1001		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
1002		 * have space to copy the pkt in the current block and
1003		 * called prb_retire_current_block()
1004		 *
1005		 * We don't need to worry about the TMO case because
1006		 * the timer-handler already handled this case.
1007		 */
1008		if (!(status & TP_STATUS_BLK_TMO)) {
1009			/* Waiting for skb_copy_bits to finish... */
1010			write_lock(&pkc->blk_fill_in_prog_lock);
1011			write_unlock(&pkc->blk_fill_in_prog_lock);
1012		}
1013		prb_close_block(pkc, pbd, po, status);
1014		return;
1015	}
1016}
1017
1018static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
1019{
1020	return TP_STATUS_USER & BLOCK_STATUS(pbd);
1021}
1022
1023static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
1024{
1025	return pkc->reset_pending_on_curr_blk;
1026}
1027
1028static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
1029	__releases(&pkc->blk_fill_in_prog_lock)
1030{
1031	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1032
1033	read_unlock(&pkc->blk_fill_in_prog_lock);
1034}
1035
1036static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
1037			struct tpacket3_hdr *ppd)
1038{
1039	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
1040}
1041
1042static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
1043			struct tpacket3_hdr *ppd)
1044{
1045	ppd->hv1.tp_rxhash = 0;
1046}
1047
1048static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1049			struct tpacket3_hdr *ppd)
1050{
1051	struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc);
1052
1053	if (skb_vlan_tag_present(pkc->skb)) {
1054		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1055		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1056		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1057	} else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) {
1058		ppd->hv1.tp_vlan_tci = vlan_get_tci(pkc->skb, pkc->skb->dev);
1059		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol);
1060		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1061	} else {
1062		ppd->hv1.tp_vlan_tci = 0;
1063		ppd->hv1.tp_vlan_tpid = 0;
1064		ppd->tp_status = TP_STATUS_AVAILABLE;
1065	}
1066}
1067
1068static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1069			struct tpacket3_hdr *ppd)
1070{
1071	ppd->hv1.tp_padding = 0;
1072	prb_fill_vlan_info(pkc, ppd);
1073
1074	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1075		prb_fill_rxhash(pkc, ppd);
1076	else
1077		prb_clear_rxhash(pkc, ppd);
1078}
1079
1080static void prb_fill_curr_block(char *curr,
1081				struct tpacket_kbdq_core *pkc,
1082				struct tpacket_block_desc *pbd,
1083				unsigned int len)
1084	__acquires(&pkc->blk_fill_in_prog_lock)
1085{
1086	struct tpacket3_hdr *ppd;
1087
1088	ppd  = (struct tpacket3_hdr *)curr;
1089	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1090	pkc->prev = curr;
1091	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1092	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1093	BLOCK_NUM_PKTS(pbd) += 1;
1094	read_lock(&pkc->blk_fill_in_prog_lock);
1095	prb_run_all_ft_ops(pkc, ppd);
1096}
1097
1098/* Assumes caller has the sk->rx_queue.lock */
1099static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1100					    struct sk_buff *skb,
1101					    unsigned int len
1102					    )
1103{
1104	struct tpacket_kbdq_core *pkc;
1105	struct tpacket_block_desc *pbd;
1106	char *curr, *end;
1107
1108	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1109	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1110
1111	/* Queue is frozen when user space is lagging behind */
1112	if (prb_queue_frozen(pkc)) {
1113		/*
1114		 * Check if that last block which caused the queue to freeze,
1115		 * is still in_use by user-space.
1116		 */
1117		if (prb_curr_blk_in_use(pbd)) {
1118			/* Can't record this packet */
1119			return NULL;
1120		} else {
1121			/*
1122			 * Ok, the block was released by user-space.
1123			 * Now let's open that block.
1124			 * opening a block also thaws the queue.
1125			 * Thawing is a side effect.
1126			 */
1127			prb_open_block(pkc, pbd);
1128		}
1129	}
1130
1131	smp_mb();
1132	curr = pkc->nxt_offset;
1133	pkc->skb = skb;
1134	end = (char *)pbd + pkc->kblk_size;
1135
1136	/* first try the current block */
1137	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1138		prb_fill_curr_block(curr, pkc, pbd, len);
1139		return (void *)curr;
1140	}
1141
1142	/* Ok, close the current block */
1143	prb_retire_current_block(pkc, po, 0);
1144
1145	/* Now, try to dispatch the next block */
1146	curr = (char *)prb_dispatch_next_block(pkc, po);
1147	if (curr) {
1148		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1149		prb_fill_curr_block(curr, pkc, pbd, len);
1150		return (void *)curr;
1151	}
1152
1153	/*
1154	 * No free blocks are available.user_space hasn't caught up yet.
1155	 * Queue was just frozen and now this packet will get dropped.
1156	 */
1157	return NULL;
1158}
1159
1160static void *packet_current_rx_frame(struct packet_sock *po,
1161					    struct sk_buff *skb,
1162					    int status, unsigned int len)
1163{
1164	char *curr = NULL;
1165	switch (po->tp_version) {
1166	case TPACKET_V1:
1167	case TPACKET_V2:
1168		curr = packet_lookup_frame(po, &po->rx_ring,
1169					po->rx_ring.head, status);
1170		return curr;
1171	case TPACKET_V3:
1172		return __packet_lookup_frame_in_block(po, skb, len);
1173	default:
1174		WARN(1, "TPACKET version not supported\n");
1175		BUG();
1176		return NULL;
1177	}
1178}
1179
1180static void *prb_lookup_block(const struct packet_sock *po,
1181			      const struct packet_ring_buffer *rb,
1182			      unsigned int idx,
1183			      int status)
1184{
1185	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1186	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1187
1188	if (status != BLOCK_STATUS(pbd))
1189		return NULL;
1190	return pbd;
1191}
1192
1193static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1194{
1195	unsigned int prev;
1196	if (rb->prb_bdqc.kactive_blk_num)
1197		prev = rb->prb_bdqc.kactive_blk_num-1;
1198	else
1199		prev = rb->prb_bdqc.knum_blocks-1;
1200	return prev;
1201}
1202
1203/* Assumes caller has held the rx_queue.lock */
1204static void *__prb_previous_block(struct packet_sock *po,
1205					 struct packet_ring_buffer *rb,
1206					 int status)
1207{
1208	unsigned int previous = prb_previous_blk_num(rb);
1209	return prb_lookup_block(po, rb, previous, status);
1210}
1211
1212static void *packet_previous_rx_frame(struct packet_sock *po,
1213					     struct packet_ring_buffer *rb,
1214					     int status)
1215{
1216	if (po->tp_version <= TPACKET_V2)
1217		return packet_previous_frame(po, rb, status);
1218
1219	return __prb_previous_block(po, rb, status);
1220}
1221
1222static void packet_increment_rx_head(struct packet_sock *po,
1223					    struct packet_ring_buffer *rb)
1224{
1225	switch (po->tp_version) {
1226	case TPACKET_V1:
1227	case TPACKET_V2:
1228		return packet_increment_head(rb);
1229	case TPACKET_V3:
1230	default:
1231		WARN(1, "TPACKET version not supported.\n");
1232		BUG();
1233		return;
1234	}
1235}
1236
1237static void *packet_previous_frame(struct packet_sock *po,
1238		struct packet_ring_buffer *rb,
1239		int status)
1240{
1241	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1242	return packet_lookup_frame(po, rb, previous, status);
1243}
1244
1245static void packet_increment_head(struct packet_ring_buffer *buff)
1246{
1247	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1248}
1249
1250static void packet_inc_pending(struct packet_ring_buffer *rb)
1251{
1252	this_cpu_inc(*rb->pending_refcnt);
1253}
1254
1255static void packet_dec_pending(struct packet_ring_buffer *rb)
1256{
1257	this_cpu_dec(*rb->pending_refcnt);
1258}
1259
1260static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1261{
1262	unsigned int refcnt = 0;
1263	int cpu;
1264
1265	/* We don't use pending refcount in rx_ring. */
1266	if (rb->pending_refcnt == NULL)
1267		return 0;
1268
1269	for_each_possible_cpu(cpu)
1270		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1271
1272	return refcnt;
1273}
1274
1275static int packet_alloc_pending(struct packet_sock *po)
1276{
1277	po->rx_ring.pending_refcnt = NULL;
1278
1279	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1280	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1281		return -ENOBUFS;
1282
1283	return 0;
1284}
1285
1286static void packet_free_pending(struct packet_sock *po)
1287{
1288	free_percpu(po->tx_ring.pending_refcnt);
1289}
1290
1291#define ROOM_POW_OFF	2
1292#define ROOM_NONE	0x0
1293#define ROOM_LOW	0x1
1294#define ROOM_NORMAL	0x2
1295
1296static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1297{
1298	int idx, len;
1299
1300	len = READ_ONCE(po->rx_ring.frame_max) + 1;
1301	idx = READ_ONCE(po->rx_ring.head);
1302	if (pow_off)
1303		idx += len >> pow_off;
1304	if (idx >= len)
1305		idx -= len;
1306	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1307}
1308
1309static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1310{
1311	int idx, len;
1312
1313	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1314	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1315	if (pow_off)
1316		idx += len >> pow_off;
1317	if (idx >= len)
1318		idx -= len;
1319	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1320}
1321
1322static int __packet_rcv_has_room(const struct packet_sock *po,
1323				 const struct sk_buff *skb)
1324{
1325	const struct sock *sk = &po->sk;
1326	int ret = ROOM_NONE;
1327
1328	if (po->prot_hook.func != tpacket_rcv) {
1329		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1330		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1331				   - (skb ? skb->truesize : 0);
1332
1333		if (avail > (rcvbuf >> ROOM_POW_OFF))
1334			return ROOM_NORMAL;
1335		else if (avail > 0)
1336			return ROOM_LOW;
1337		else
1338			return ROOM_NONE;
1339	}
1340
1341	if (po->tp_version == TPACKET_V3) {
1342		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1343			ret = ROOM_NORMAL;
1344		else if (__tpacket_v3_has_room(po, 0))
1345			ret = ROOM_LOW;
1346	} else {
1347		if (__tpacket_has_room(po, ROOM_POW_OFF))
1348			ret = ROOM_NORMAL;
1349		else if (__tpacket_has_room(po, 0))
1350			ret = ROOM_LOW;
1351	}
1352
1353	return ret;
1354}
1355
1356static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1357{
1358	bool pressure;
1359	int ret;
1360
1361	ret = __packet_rcv_has_room(po, skb);
1362	pressure = ret != ROOM_NORMAL;
1363
1364	if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) != pressure)
1365		packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, pressure);
1366
1367	return ret;
1368}
1369
1370static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1371{
1372	if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) &&
1373	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1374		packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, false);
1375}
1376
1377static void packet_sock_destruct(struct sock *sk)
1378{
1379	skb_queue_purge(&sk->sk_error_queue);
1380
1381	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1382	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1383
1384	if (!sock_flag(sk, SOCK_DEAD)) {
1385		pr_err("Attempt to release alive packet socket: %p\n", sk);
1386		return;
1387	}
 
 
1388}
1389
1390static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1391{
1392	u32 *history = po->rollover->history;
1393	u32 victim, rxhash;
1394	int i, count = 0;
1395
1396	rxhash = skb_get_hash(skb);
1397	for (i = 0; i < ROLLOVER_HLEN; i++)
1398		if (READ_ONCE(history[i]) == rxhash)
1399			count++;
1400
1401	victim = get_random_u32_below(ROLLOVER_HLEN);
1402
1403	/* Avoid dirtying the cache line if possible */
1404	if (READ_ONCE(history[victim]) != rxhash)
1405		WRITE_ONCE(history[victim], rxhash);
1406
1407	return count > (ROLLOVER_HLEN >> 1);
1408}
1409
1410static unsigned int fanout_demux_hash(struct packet_fanout *f,
1411				      struct sk_buff *skb,
1412				      unsigned int num)
1413{
1414	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1415}
1416
1417static unsigned int fanout_demux_lb(struct packet_fanout *f,
1418				    struct sk_buff *skb,
1419				    unsigned int num)
1420{
1421	unsigned int val = atomic_inc_return(&f->rr_cur);
1422
1423	return val % num;
1424}
1425
1426static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1427				     struct sk_buff *skb,
1428				     unsigned int num)
1429{
1430	return smp_processor_id() % num;
1431}
1432
1433static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1434				     struct sk_buff *skb,
1435				     unsigned int num)
1436{
1437	return get_random_u32_below(num);
1438}
1439
1440static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1441					  struct sk_buff *skb,
1442					  unsigned int idx, bool try_self,
1443					  unsigned int num)
1444{
1445	struct packet_sock *po, *po_next, *po_skip = NULL;
1446	unsigned int i, j, room = ROOM_NONE;
1447
1448	po = pkt_sk(rcu_dereference(f->arr[idx]));
1449
1450	if (try_self) {
1451		room = packet_rcv_has_room(po, skb);
1452		if (room == ROOM_NORMAL ||
1453		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1454			return idx;
1455		po_skip = po;
1456	}
1457
1458	i = j = min_t(int, po->rollover->sock, num - 1);
1459	do {
1460		po_next = pkt_sk(rcu_dereference(f->arr[i]));
1461		if (po_next != po_skip &&
1462		    !packet_sock_flag(po_next, PACKET_SOCK_PRESSURE) &&
1463		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1464			if (i != j)
1465				po->rollover->sock = i;
1466			atomic_long_inc(&po->rollover->num);
1467			if (room == ROOM_LOW)
1468				atomic_long_inc(&po->rollover->num_huge);
1469			return i;
1470		}
1471
1472		if (++i == num)
1473			i = 0;
1474	} while (i != j);
1475
1476	atomic_long_inc(&po->rollover->num_failed);
1477	return idx;
1478}
1479
1480static unsigned int fanout_demux_qm(struct packet_fanout *f,
1481				    struct sk_buff *skb,
1482				    unsigned int num)
1483{
1484	return skb_get_queue_mapping(skb) % num;
1485}
1486
1487static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1488				     struct sk_buff *skb,
1489				     unsigned int num)
1490{
1491	struct bpf_prog *prog;
1492	unsigned int ret = 0;
1493
1494	rcu_read_lock();
1495	prog = rcu_dereference(f->bpf_prog);
1496	if (prog)
1497		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1498	rcu_read_unlock();
1499
1500	return ret;
1501}
1502
1503static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1504{
1505	return f->flags & (flag >> 8);
1506}
1507
1508static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1509			     struct packet_type *pt, struct net_device *orig_dev)
1510{
1511	struct packet_fanout *f = pt->af_packet_priv;
1512	unsigned int num = READ_ONCE(f->num_members);
1513	struct net *net = read_pnet(&f->net);
1514	struct packet_sock *po;
1515	unsigned int idx;
1516
1517	if (!net_eq(dev_net(dev), net) || !num) {
1518		kfree_skb(skb);
1519		return 0;
1520	}
1521
1522	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1523		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1524		if (!skb)
1525			return 0;
1526	}
1527	switch (f->type) {
1528	case PACKET_FANOUT_HASH:
1529	default:
1530		idx = fanout_demux_hash(f, skb, num);
1531		break;
1532	case PACKET_FANOUT_LB:
1533		idx = fanout_demux_lb(f, skb, num);
1534		break;
1535	case PACKET_FANOUT_CPU:
1536		idx = fanout_demux_cpu(f, skb, num);
1537		break;
1538	case PACKET_FANOUT_RND:
1539		idx = fanout_demux_rnd(f, skb, num);
1540		break;
1541	case PACKET_FANOUT_QM:
1542		idx = fanout_demux_qm(f, skb, num);
1543		break;
1544	case PACKET_FANOUT_ROLLOVER:
1545		idx = fanout_demux_rollover(f, skb, 0, false, num);
1546		break;
1547	case PACKET_FANOUT_CBPF:
1548	case PACKET_FANOUT_EBPF:
1549		idx = fanout_demux_bpf(f, skb, num);
1550		break;
1551	}
1552
1553	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1554		idx = fanout_demux_rollover(f, skb, idx, true, num);
1555
1556	po = pkt_sk(rcu_dereference(f->arr[idx]));
1557	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1558}
1559
1560DEFINE_MUTEX(fanout_mutex);
1561EXPORT_SYMBOL_GPL(fanout_mutex);
1562static LIST_HEAD(fanout_list);
1563static u16 fanout_next_id;
1564
1565static void __fanout_link(struct sock *sk, struct packet_sock *po)
1566{
1567	struct packet_fanout *f = po->fanout;
1568
1569	spin_lock(&f->lock);
1570	rcu_assign_pointer(f->arr[f->num_members], sk);
1571	smp_wmb();
1572	f->num_members++;
1573	if (f->num_members == 1)
1574		dev_add_pack(&f->prot_hook);
1575	spin_unlock(&f->lock);
1576}
1577
1578static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1579{
1580	struct packet_fanout *f = po->fanout;
1581	int i;
1582
1583	spin_lock(&f->lock);
1584	for (i = 0; i < f->num_members; i++) {
1585		if (rcu_dereference_protected(f->arr[i],
1586					      lockdep_is_held(&f->lock)) == sk)
1587			break;
1588	}
1589	BUG_ON(i >= f->num_members);
1590	rcu_assign_pointer(f->arr[i],
1591			   rcu_dereference_protected(f->arr[f->num_members - 1],
1592						     lockdep_is_held(&f->lock)));
1593	f->num_members--;
1594	if (f->num_members == 0)
1595		__dev_remove_pack(&f->prot_hook);
1596	spin_unlock(&f->lock);
1597}
1598
1599static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1600{
1601	if (sk->sk_family != PF_PACKET)
1602		return false;
1603
1604	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1605}
1606
1607static void fanout_init_data(struct packet_fanout *f)
1608{
1609	switch (f->type) {
1610	case PACKET_FANOUT_LB:
1611		atomic_set(&f->rr_cur, 0);
1612		break;
1613	case PACKET_FANOUT_CBPF:
1614	case PACKET_FANOUT_EBPF:
1615		RCU_INIT_POINTER(f->bpf_prog, NULL);
1616		break;
1617	}
1618}
1619
1620static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1621{
1622	struct bpf_prog *old;
1623
1624	spin_lock(&f->lock);
1625	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1626	rcu_assign_pointer(f->bpf_prog, new);
1627	spin_unlock(&f->lock);
1628
1629	if (old) {
1630		synchronize_net();
1631		bpf_prog_destroy(old);
1632	}
1633}
1634
1635static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1636				unsigned int len)
1637{
1638	struct bpf_prog *new;
1639	struct sock_fprog fprog;
1640	int ret;
1641
1642	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1643		return -EPERM;
1644
1645	ret = copy_bpf_fprog_from_user(&fprog, data, len);
1646	if (ret)
1647		return ret;
1648
1649	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1650	if (ret)
1651		return ret;
1652
1653	__fanout_set_data_bpf(po->fanout, new);
1654	return 0;
1655}
1656
1657static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1658				unsigned int len)
1659{
1660	struct bpf_prog *new;
1661	u32 fd;
1662
1663	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1664		return -EPERM;
1665	if (len != sizeof(fd))
1666		return -EINVAL;
1667	if (copy_from_sockptr(&fd, data, len))
1668		return -EFAULT;
1669
1670	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1671	if (IS_ERR(new))
1672		return PTR_ERR(new);
1673
1674	__fanout_set_data_bpf(po->fanout, new);
1675	return 0;
1676}
1677
1678static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1679			   unsigned int len)
1680{
1681	switch (po->fanout->type) {
1682	case PACKET_FANOUT_CBPF:
1683		return fanout_set_data_cbpf(po, data, len);
1684	case PACKET_FANOUT_EBPF:
1685		return fanout_set_data_ebpf(po, data, len);
1686	default:
1687		return -EINVAL;
1688	}
1689}
1690
1691static void fanout_release_data(struct packet_fanout *f)
1692{
1693	switch (f->type) {
1694	case PACKET_FANOUT_CBPF:
1695	case PACKET_FANOUT_EBPF:
1696		__fanout_set_data_bpf(f, NULL);
1697	}
1698}
1699
1700static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1701{
1702	struct packet_fanout *f;
1703
1704	list_for_each_entry(f, &fanout_list, list) {
1705		if (f->id == candidate_id &&
1706		    read_pnet(&f->net) == sock_net(sk)) {
1707			return false;
1708		}
1709	}
1710	return true;
1711}
1712
1713static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1714{
1715	u16 id = fanout_next_id;
1716
1717	do {
1718		if (__fanout_id_is_free(sk, id)) {
1719			*new_id = id;
1720			fanout_next_id = id + 1;
1721			return true;
1722		}
1723
1724		id++;
1725	} while (id != fanout_next_id);
1726
1727	return false;
1728}
1729
1730static int fanout_add(struct sock *sk, struct fanout_args *args)
1731{
1732	struct packet_rollover *rollover = NULL;
1733	struct packet_sock *po = pkt_sk(sk);
1734	u16 type_flags = args->type_flags;
1735	struct packet_fanout *f, *match;
1736	u8 type = type_flags & 0xff;
1737	u8 flags = type_flags >> 8;
1738	u16 id = args->id;
1739	int err;
1740
1741	switch (type) {
1742	case PACKET_FANOUT_ROLLOVER:
1743		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1744			return -EINVAL;
1745		break;
1746	case PACKET_FANOUT_HASH:
1747	case PACKET_FANOUT_LB:
1748	case PACKET_FANOUT_CPU:
1749	case PACKET_FANOUT_RND:
1750	case PACKET_FANOUT_QM:
1751	case PACKET_FANOUT_CBPF:
1752	case PACKET_FANOUT_EBPF:
1753		break;
1754	default:
1755		return -EINVAL;
1756	}
1757
1758	mutex_lock(&fanout_mutex);
1759
1760	err = -EALREADY;
1761	if (po->fanout)
1762		goto out;
1763
1764	if (type == PACKET_FANOUT_ROLLOVER ||
1765	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1766		err = -ENOMEM;
1767		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1768		if (!rollover)
1769			goto out;
1770		atomic_long_set(&rollover->num, 0);
1771		atomic_long_set(&rollover->num_huge, 0);
1772		atomic_long_set(&rollover->num_failed, 0);
1773	}
1774
1775	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1776		if (id != 0) {
1777			err = -EINVAL;
1778			goto out;
1779		}
1780		if (!fanout_find_new_id(sk, &id)) {
1781			err = -ENOMEM;
1782			goto out;
1783		}
1784		/* ephemeral flag for the first socket in the group: drop it */
1785		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1786	}
1787
1788	match = NULL;
1789	list_for_each_entry(f, &fanout_list, list) {
1790		if (f->id == id &&
1791		    read_pnet(&f->net) == sock_net(sk)) {
1792			match = f;
1793			break;
1794		}
1795	}
1796	err = -EINVAL;
1797	if (match) {
1798		if (match->flags != flags)
1799			goto out;
1800		if (args->max_num_members &&
1801		    args->max_num_members != match->max_num_members)
1802			goto out;
1803	} else {
1804		if (args->max_num_members > PACKET_FANOUT_MAX)
1805			goto out;
1806		if (!args->max_num_members)
1807			/* legacy PACKET_FANOUT_MAX */
1808			args->max_num_members = 256;
1809		err = -ENOMEM;
1810		match = kvzalloc(struct_size(match, arr, args->max_num_members),
1811				 GFP_KERNEL);
1812		if (!match)
1813			goto out;
1814		write_pnet(&match->net, sock_net(sk));
1815		match->id = id;
1816		match->type = type;
1817		match->flags = flags;
1818		INIT_LIST_HEAD(&match->list);
1819		spin_lock_init(&match->lock);
1820		refcount_set(&match->sk_ref, 0);
1821		fanout_init_data(match);
1822		match->prot_hook.type = po->prot_hook.type;
1823		match->prot_hook.dev = po->prot_hook.dev;
1824		match->prot_hook.func = packet_rcv_fanout;
1825		match->prot_hook.af_packet_priv = match;
1826		match->prot_hook.af_packet_net = read_pnet(&match->net);
1827		match->prot_hook.id_match = match_fanout_group;
1828		match->max_num_members = args->max_num_members;
1829		match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING;
1830		list_add(&match->list, &fanout_list);
1831	}
1832	err = -EINVAL;
1833
1834	spin_lock(&po->bind_lock);
1835	if (po->num &&
1836	    match->type == type &&
1837	    match->prot_hook.type == po->prot_hook.type &&
1838	    match->prot_hook.dev == po->prot_hook.dev) {
1839		err = -ENOSPC;
1840		if (refcount_read(&match->sk_ref) < match->max_num_members) {
1841			/* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1842			WRITE_ONCE(po->fanout, match);
1843
1844			po->rollover = rollover;
1845			rollover = NULL;
1846			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1847			if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
1848				__dev_remove_pack(&po->prot_hook);
1849				__fanout_link(sk, po);
1850			}
1851			err = 0;
1852		}
1853	}
1854	spin_unlock(&po->bind_lock);
1855
1856	if (err && !refcount_read(&match->sk_ref)) {
1857		list_del(&match->list);
1858		kvfree(match);
1859	}
1860
1861out:
1862	kfree(rollover);
1863	mutex_unlock(&fanout_mutex);
1864	return err;
1865}
1866
1867/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1868 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1869 * It is the responsibility of the caller to call fanout_release_data() and
1870 * free the returned packet_fanout (after synchronize_net())
1871 */
1872static struct packet_fanout *fanout_release(struct sock *sk)
1873{
1874	struct packet_sock *po = pkt_sk(sk);
1875	struct packet_fanout *f;
1876
1877	mutex_lock(&fanout_mutex);
1878	f = po->fanout;
1879	if (f) {
1880		po->fanout = NULL;
1881
1882		if (refcount_dec_and_test(&f->sk_ref))
1883			list_del(&f->list);
1884		else
1885			f = NULL;
1886	}
1887	mutex_unlock(&fanout_mutex);
1888
1889	return f;
1890}
1891
1892static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1893					  struct sk_buff *skb)
1894{
1895	/* Earlier code assumed this would be a VLAN pkt, double-check
1896	 * this now that we have the actual packet in hand. We can only
1897	 * do this check on Ethernet devices.
1898	 */
1899	if (unlikely(dev->type != ARPHRD_ETHER))
1900		return false;
1901
1902	skb_reset_mac_header(skb);
1903	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1904}
1905
1906static const struct proto_ops packet_ops;
1907
1908static const struct proto_ops packet_ops_spkt;
1909
1910static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1911			   struct packet_type *pt, struct net_device *orig_dev)
1912{
1913	struct sock *sk;
1914	struct sockaddr_pkt *spkt;
1915
1916	/*
1917	 *	When we registered the protocol we saved the socket in the data
1918	 *	field for just this event.
1919	 */
1920
1921	sk = pt->af_packet_priv;
1922
1923	/*
1924	 *	Yank back the headers [hope the device set this
1925	 *	right or kerboom...]
1926	 *
1927	 *	Incoming packets have ll header pulled,
1928	 *	push it back.
1929	 *
1930	 *	For outgoing ones skb->data == skb_mac_header(skb)
1931	 *	so that this procedure is noop.
1932	 */
1933
1934	if (skb->pkt_type == PACKET_LOOPBACK)
1935		goto out;
1936
1937	if (!net_eq(dev_net(dev), sock_net(sk)))
1938		goto out;
1939
1940	skb = skb_share_check(skb, GFP_ATOMIC);
1941	if (skb == NULL)
1942		goto oom;
1943
1944	/* drop any routing info */
1945	skb_dst_drop(skb);
1946
1947	/* drop conntrack reference */
1948	nf_reset_ct(skb);
1949
1950	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1951
1952	skb_push(skb, skb->data - skb_mac_header(skb));
1953
1954	/*
1955	 *	The SOCK_PACKET socket receives _all_ frames.
1956	 */
1957
1958	spkt->spkt_family = dev->type;
1959	strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1960	spkt->spkt_protocol = skb->protocol;
1961
1962	/*
1963	 *	Charge the memory to the socket. This is done specifically
1964	 *	to prevent sockets using all the memory up.
1965	 */
1966
1967	if (sock_queue_rcv_skb(sk, skb) == 0)
1968		return 0;
1969
1970out:
1971	kfree_skb(skb);
1972oom:
1973	return 0;
1974}
1975
1976static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1977{
1978	int depth;
1979
1980	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1981	    sock->type == SOCK_RAW) {
1982		skb_reset_mac_header(skb);
1983		skb->protocol = dev_parse_header_protocol(skb);
1984	}
1985
1986	/* Move network header to the right position for VLAN tagged packets */
1987	if (likely(skb->dev->type == ARPHRD_ETHER) &&
1988	    eth_type_vlan(skb->protocol) &&
1989	    vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
1990		skb_set_network_header(skb, depth);
1991
1992	skb_probe_transport_header(skb);
1993}
1994
1995/*
1996 *	Output a raw packet to a device layer. This bypasses all the other
1997 *	protocol layers and you must therefore supply it with a complete frame
1998 */
1999
2000static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
2001			       size_t len)
2002{
2003	struct sock *sk = sock->sk;
2004	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
2005	struct sk_buff *skb = NULL;
2006	struct net_device *dev;
2007	struct sockcm_cookie sockc;
2008	__be16 proto = 0;
2009	int err;
2010	int extra_len = 0;
2011
2012	/*
2013	 *	Get and verify the address.
2014	 */
2015
2016	if (saddr) {
2017		if (msg->msg_namelen < sizeof(struct sockaddr))
2018			return -EINVAL;
2019		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
2020			proto = saddr->spkt_protocol;
2021	} else
2022		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
2023
2024	/*
2025	 *	Find the device first to size check it
2026	 */
2027
2028	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
2029retry:
2030	rcu_read_lock();
2031	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
2032	err = -ENODEV;
2033	if (dev == NULL)
2034		goto out_unlock;
2035
2036	err = -ENETDOWN;
2037	if (!(dev->flags & IFF_UP))
2038		goto out_unlock;
2039
2040	/*
2041	 * You may not queue a frame bigger than the mtu. This is the lowest level
2042	 * raw protocol and you must do your own fragmentation at this level.
2043	 */
2044
2045	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2046		if (!netif_supports_nofcs(dev)) {
2047			err = -EPROTONOSUPPORT;
2048			goto out_unlock;
2049		}
2050		extra_len = 4; /* We're doing our own CRC */
2051	}
2052
2053	err = -EMSGSIZE;
2054	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
2055		goto out_unlock;
2056
2057	if (!skb) {
2058		size_t reserved = LL_RESERVED_SPACE(dev);
2059		int tlen = dev->needed_tailroom;
2060		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
2061
2062		rcu_read_unlock();
2063		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
2064		if (skb == NULL)
2065			return -ENOBUFS;
2066		/* FIXME: Save some space for broken drivers that write a hard
2067		 * header at transmission time by themselves. PPP is the notable
2068		 * one here. This should really be fixed at the driver level.
2069		 */
2070		skb_reserve(skb, reserved);
2071		skb_reset_network_header(skb);
2072
2073		/* Try to align data part correctly */
2074		if (hhlen) {
2075			skb->data -= hhlen;
2076			skb->tail -= hhlen;
2077			if (len < hhlen)
2078				skb_reset_network_header(skb);
2079		}
2080		err = memcpy_from_msg(skb_put(skb, len), msg, len);
2081		if (err)
2082			goto out_free;
2083		goto retry;
2084	}
2085
2086	if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
2087		err = -EINVAL;
2088		goto out_unlock;
2089	}
2090	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
2091	    !packet_extra_vlan_len_allowed(dev, skb)) {
2092		err = -EMSGSIZE;
2093		goto out_unlock;
2094	}
2095
2096	sockcm_init(&sockc, sk);
2097	if (msg->msg_controllen) {
2098		err = sock_cmsg_send(sk, msg, &sockc);
2099		if (unlikely(err))
2100			goto out_unlock;
2101	}
2102
2103	skb->protocol = proto;
2104	skb->dev = dev;
2105	skb->priority = READ_ONCE(sk->sk_priority);
2106	skb->mark = READ_ONCE(sk->sk_mark);
2107	skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid);
2108	skb_setup_tx_timestamp(skb, &sockc);
 
2109
2110	if (unlikely(extra_len == 4))
2111		skb->no_fcs = 1;
2112
2113	packet_parse_headers(skb, sock);
2114
2115	dev_queue_xmit(skb);
2116	rcu_read_unlock();
2117	return len;
2118
2119out_unlock:
2120	rcu_read_unlock();
2121out_free:
2122	kfree_skb(skb);
2123	return err;
2124}
2125
2126static unsigned int run_filter(struct sk_buff *skb,
2127			       const struct sock *sk,
2128			       unsigned int res)
2129{
2130	struct sk_filter *filter;
2131
2132	rcu_read_lock();
2133	filter = rcu_dereference(sk->sk_filter);
2134	if (filter != NULL)
2135		res = bpf_prog_run_clear_cb(filter->prog, skb);
2136	rcu_read_unlock();
2137
2138	return res;
2139}
2140
2141static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2142			   size_t *len, int vnet_hdr_sz)
2143{
2144	struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 };
2145
2146	if (*len < vnet_hdr_sz)
2147		return -EINVAL;
2148	*len -= vnet_hdr_sz;
2149
2150	if (virtio_net_hdr_from_skb(skb, (struct virtio_net_hdr *)&vnet_hdr, vio_le(), true, 0))
2151		return -EINVAL;
2152
2153	return memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz);
2154}
2155
2156/*
2157 * This function makes lazy skb cloning in hope that most of packets
2158 * are discarded by BPF.
2159 *
2160 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2161 * and skb->cb are mangled. It works because (and until) packets
2162 * falling here are owned by current CPU. Output packets are cloned
2163 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2164 * sequentially, so that if we return skb to original state on exit,
2165 * we will not harm anyone.
2166 */
2167
2168static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2169		      struct packet_type *pt, struct net_device *orig_dev)
2170{
2171	enum skb_drop_reason drop_reason = SKB_CONSUMED;
2172	struct sock *sk = NULL;
2173	struct sockaddr_ll *sll;
2174	struct packet_sock *po;
2175	u8 *skb_head = skb->data;
2176	int skb_len = skb->len;
2177	unsigned int snaplen, res;
 
2178
2179	if (skb->pkt_type == PACKET_LOOPBACK)
2180		goto drop;
2181
2182	sk = pt->af_packet_priv;
2183	po = pkt_sk(sk);
2184
2185	if (!net_eq(dev_net(dev), sock_net(sk)))
2186		goto drop;
2187
2188	skb->dev = dev;
2189
2190	if (dev_has_header(dev)) {
2191		/* The device has an explicit notion of ll header,
2192		 * exported to higher levels.
2193		 *
2194		 * Otherwise, the device hides details of its frame
2195		 * structure, so that corresponding packet head is
2196		 * never delivered to user.
2197		 */
2198		if (sk->sk_type != SOCK_DGRAM)
2199			skb_push(skb, skb->data - skb_mac_header(skb));
2200		else if (skb->pkt_type == PACKET_OUTGOING) {
2201			/* Special case: outgoing packets have ll header at head */
2202			skb_pull(skb, skb_network_offset(skb));
2203		}
2204	}
2205
2206	snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb);
2207
2208	res = run_filter(skb, sk, snaplen);
2209	if (!res)
2210		goto drop_n_restore;
2211	if (snaplen > res)
2212		snaplen = res;
2213
2214	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2215		goto drop_n_acct;
2216
2217	if (skb_shared(skb)) {
2218		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2219		if (nskb == NULL)
2220			goto drop_n_acct;
2221
2222		if (skb_head != skb->data) {
2223			skb->data = skb_head;
2224			skb->len = skb_len;
2225		}
2226		consume_skb(skb);
2227		skb = nskb;
2228	}
2229
2230	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2231
2232	sll = &PACKET_SKB_CB(skb)->sa.ll;
2233	sll->sll_hatype = dev->type;
2234	sll->sll_pkttype = skb->pkt_type;
2235	if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2236		sll->sll_ifindex = orig_dev->ifindex;
2237	else
2238		sll->sll_ifindex = dev->ifindex;
2239
2240	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2241
2242	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2243	 * Use their space for storing the original skb length.
2244	 */
2245	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2246
2247	if (pskb_trim(skb, snaplen))
2248		goto drop_n_acct;
2249
2250	skb_set_owner_r(skb, sk);
2251	skb->dev = NULL;
2252	skb_dst_drop(skb);
2253
2254	/* drop conntrack reference */
2255	nf_reset_ct(skb);
2256
2257	spin_lock(&sk->sk_receive_queue.lock);
2258	po->stats.stats1.tp_packets++;
2259	sock_skb_set_dropcount(sk, skb);
2260	skb_clear_delivery_time(skb);
2261	__skb_queue_tail(&sk->sk_receive_queue, skb);
2262	spin_unlock(&sk->sk_receive_queue.lock);
2263	sk->sk_data_ready(sk);
2264	return 0;
2265
2266drop_n_acct:
 
2267	atomic_inc(&po->tp_drops);
2268	atomic_inc(&sk->sk_drops);
2269	drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
2270
2271drop_n_restore:
2272	if (skb_head != skb->data && skb_shared(skb)) {
2273		skb->data = skb_head;
2274		skb->len = skb_len;
2275	}
2276drop:
2277	sk_skb_reason_drop(sk, skb, drop_reason);
 
 
 
2278	return 0;
2279}
2280
2281static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2282		       struct packet_type *pt, struct net_device *orig_dev)
2283{
2284	enum skb_drop_reason drop_reason = SKB_CONSUMED;
2285	struct sock *sk = NULL;
2286	struct packet_sock *po;
2287	struct sockaddr_ll *sll;
2288	union tpacket_uhdr h;
2289	u8 *skb_head = skb->data;
2290	int skb_len = skb->len;
2291	unsigned int snaplen, res;
2292	unsigned long status = TP_STATUS_USER;
2293	unsigned short macoff, hdrlen;
2294	unsigned int netoff;
2295	struct sk_buff *copy_skb = NULL;
2296	struct timespec64 ts;
2297	__u32 ts_status;
 
2298	unsigned int slot_id = 0;
2299	int vnet_hdr_sz = 0;
2300
2301	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2302	 * We may add members to them until current aligned size without forcing
2303	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2304	 */
2305	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2306	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2307
2308	if (skb->pkt_type == PACKET_LOOPBACK)
2309		goto drop;
2310
2311	sk = pt->af_packet_priv;
2312	po = pkt_sk(sk);
2313
2314	if (!net_eq(dev_net(dev), sock_net(sk)))
2315		goto drop;
2316
2317	if (dev_has_header(dev)) {
2318		if (sk->sk_type != SOCK_DGRAM)
2319			skb_push(skb, skb->data - skb_mac_header(skb));
2320		else if (skb->pkt_type == PACKET_OUTGOING) {
2321			/* Special case: outgoing packets have ll header at head */
2322			skb_pull(skb, skb_network_offset(skb));
2323		}
2324	}
2325
2326	snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb);
2327
2328	res = run_filter(skb, sk, snaplen);
2329	if (!res)
2330		goto drop_n_restore;
2331
2332	/* If we are flooded, just give up */
2333	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2334		atomic_inc(&po->tp_drops);
2335		goto drop_n_restore;
2336	}
2337
2338	if (skb->ip_summed == CHECKSUM_PARTIAL)
2339		status |= TP_STATUS_CSUMNOTREADY;
2340	else if (skb->pkt_type != PACKET_OUTGOING &&
2341		 skb_csum_unnecessary(skb))
 
2342		status |= TP_STATUS_CSUM_VALID;
2343	if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
2344		status |= TP_STATUS_GSO_TCP;
2345
2346	if (snaplen > res)
2347		snaplen = res;
2348
2349	if (sk->sk_type == SOCK_DGRAM) {
2350		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2351				  po->tp_reserve;
2352	} else {
2353		unsigned int maclen = skb_network_offset(skb);
2354		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2355				       (maclen < 16 ? 16 : maclen)) +
2356				       po->tp_reserve;
2357		vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2358		if (vnet_hdr_sz)
2359			netoff += vnet_hdr_sz;
 
2360		macoff = netoff - maclen;
2361	}
2362	if (netoff > USHRT_MAX) {
2363		atomic_inc(&po->tp_drops);
2364		goto drop_n_restore;
2365	}
2366	if (po->tp_version <= TPACKET_V2) {
2367		if (macoff + snaplen > po->rx_ring.frame_size) {
2368			if (READ_ONCE(po->copy_thresh) &&
2369			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2370				if (skb_shared(skb)) {
2371					copy_skb = skb_clone(skb, GFP_ATOMIC);
2372				} else {
2373					copy_skb = skb_get(skb);
2374					skb_head = skb->data;
2375				}
2376				if (copy_skb) {
2377					memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
2378					       sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
2379					skb_set_owner_r(copy_skb, sk);
2380				}
2381			}
2382			snaplen = po->rx_ring.frame_size - macoff;
2383			if ((int)snaplen < 0) {
2384				snaplen = 0;
2385				vnet_hdr_sz = 0;
2386			}
2387		}
2388	} else if (unlikely(macoff + snaplen >
2389			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2390		u32 nval;
2391
2392		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2393		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2394			    snaplen, nval, macoff);
2395		snaplen = nval;
2396		if (unlikely((int)snaplen < 0)) {
2397			snaplen = 0;
2398			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2399			vnet_hdr_sz = 0;
2400		}
2401	}
2402	spin_lock(&sk->sk_receive_queue.lock);
2403	h.raw = packet_current_rx_frame(po, skb,
2404					TP_STATUS_KERNEL, (macoff+snaplen));
2405	if (!h.raw)
2406		goto drop_n_account;
2407
2408	if (po->tp_version <= TPACKET_V2) {
2409		slot_id = po->rx_ring.head;
2410		if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2411			goto drop_n_account;
2412		__set_bit(slot_id, po->rx_ring.rx_owner_map);
2413	}
2414
2415	if (vnet_hdr_sz &&
2416	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
2417				    sizeof(struct virtio_net_hdr),
2418				    vio_le(), true, 0)) {
2419		if (po->tp_version == TPACKET_V3)
2420			prb_clear_blk_fill_status(&po->rx_ring);
2421		goto drop_n_account;
2422	}
2423
2424	if (po->tp_version <= TPACKET_V2) {
2425		packet_increment_rx_head(po, &po->rx_ring);
2426	/*
2427	 * LOSING will be reported till you read the stats,
2428	 * because it's COR - Clear On Read.
2429	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2430	 * at packet level.
2431	 */
2432		if (atomic_read(&po->tp_drops))
2433			status |= TP_STATUS_LOSING;
2434	}
2435
2436	po->stats.stats1.tp_packets++;
2437	if (copy_skb) {
2438		status |= TP_STATUS_COPY;
2439		skb_clear_delivery_time(copy_skb);
2440		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2441	}
2442	spin_unlock(&sk->sk_receive_queue.lock);
2443
2444	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2445
2446	/* Always timestamp; prefer an existing software timestamp taken
2447	 * closer to the time of capture.
2448	 */
2449	ts_status = tpacket_get_timestamp(skb, &ts,
2450					  READ_ONCE(po->tp_tstamp) |
2451					  SOF_TIMESTAMPING_SOFTWARE);
2452	if (!ts_status)
2453		ktime_get_real_ts64(&ts);
2454
2455	status |= ts_status;
2456
2457	switch (po->tp_version) {
2458	case TPACKET_V1:
2459		h.h1->tp_len = skb->len;
2460		h.h1->tp_snaplen = snaplen;
2461		h.h1->tp_mac = macoff;
2462		h.h1->tp_net = netoff;
2463		h.h1->tp_sec = ts.tv_sec;
2464		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2465		hdrlen = sizeof(*h.h1);
2466		break;
2467	case TPACKET_V2:
2468		h.h2->tp_len = skb->len;
2469		h.h2->tp_snaplen = snaplen;
2470		h.h2->tp_mac = macoff;
2471		h.h2->tp_net = netoff;
2472		h.h2->tp_sec = ts.tv_sec;
2473		h.h2->tp_nsec = ts.tv_nsec;
2474		if (skb_vlan_tag_present(skb)) {
2475			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2476			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2477			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2478		} else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
2479			h.h2->tp_vlan_tci = vlan_get_tci(skb, skb->dev);
2480			h.h2->tp_vlan_tpid = ntohs(skb->protocol);
2481			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2482		} else {
2483			h.h2->tp_vlan_tci = 0;
2484			h.h2->tp_vlan_tpid = 0;
2485		}
2486		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2487		hdrlen = sizeof(*h.h2);
2488		break;
2489	case TPACKET_V3:
2490		/* tp_nxt_offset,vlan are already populated above.
2491		 * So DONT clear those fields here
2492		 */
2493		h.h3->tp_status |= status;
2494		h.h3->tp_len = skb->len;
2495		h.h3->tp_snaplen = snaplen;
2496		h.h3->tp_mac = macoff;
2497		h.h3->tp_net = netoff;
2498		h.h3->tp_sec  = ts.tv_sec;
2499		h.h3->tp_nsec = ts.tv_nsec;
2500		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2501		hdrlen = sizeof(*h.h3);
2502		break;
2503	default:
2504		BUG();
2505	}
2506
2507	sll = h.raw + TPACKET_ALIGN(hdrlen);
2508	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2509	sll->sll_family = AF_PACKET;
2510	sll->sll_hatype = dev->type;
2511	sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ?
2512		vlan_get_protocol_dgram(skb) : skb->protocol;
2513	sll->sll_pkttype = skb->pkt_type;
2514	if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2515		sll->sll_ifindex = orig_dev->ifindex;
2516	else
2517		sll->sll_ifindex = dev->ifindex;
2518
2519	smp_mb();
2520
2521#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2522	if (po->tp_version <= TPACKET_V2) {
2523		u8 *start, *end;
2524
2525		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2526					macoff + snaplen);
2527
2528		for (start = h.raw; start < end; start += PAGE_SIZE)
2529			flush_dcache_page(pgv_to_page(start));
2530	}
2531	smp_wmb();
2532#endif
2533
2534	if (po->tp_version <= TPACKET_V2) {
2535		spin_lock(&sk->sk_receive_queue.lock);
2536		__packet_set_status(po, h.raw, status);
2537		__clear_bit(slot_id, po->rx_ring.rx_owner_map);
2538		spin_unlock(&sk->sk_receive_queue.lock);
2539		sk->sk_data_ready(sk);
2540	} else if (po->tp_version == TPACKET_V3) {
2541		prb_clear_blk_fill_status(&po->rx_ring);
2542	}
2543
2544drop_n_restore:
2545	if (skb_head != skb->data && skb_shared(skb)) {
2546		skb->data = skb_head;
2547		skb->len = skb_len;
2548	}
2549drop:
2550	sk_skb_reason_drop(sk, skb, drop_reason);
 
 
 
2551	return 0;
2552
2553drop_n_account:
2554	spin_unlock(&sk->sk_receive_queue.lock);
2555	atomic_inc(&po->tp_drops);
2556	drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
2557
2558	sk->sk_data_ready(sk);
2559	sk_skb_reason_drop(sk, copy_skb, drop_reason);
2560	goto drop_n_restore;
2561}
2562
2563static void tpacket_destruct_skb(struct sk_buff *skb)
2564{
2565	struct packet_sock *po = pkt_sk(skb->sk);
2566
2567	if (likely(po->tx_ring.pg_vec)) {
2568		void *ph;
2569		__u32 ts;
2570
2571		ph = skb_zcopy_get_nouarg(skb);
2572		packet_dec_pending(&po->tx_ring);
2573
2574		ts = __packet_set_timestamp(po, ph, skb);
2575		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2576
2577		complete(&po->skb_completion);
 
2578	}
2579
2580	sock_wfree(skb);
2581}
2582
2583static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2584{
2585	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2586	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2587	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2588	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2589		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2590			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2591			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2592
2593	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2594		return -EINVAL;
2595
2596	return 0;
2597}
2598
2599static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2600				 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
2601{
2602	int ret;
2603
2604	if (*len < vnet_hdr_sz)
2605		return -EINVAL;
2606	*len -= vnet_hdr_sz;
2607
2608	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2609		return -EFAULT;
2610
2611	ret = __packet_snd_vnet_parse(vnet_hdr, *len);
2612	if (ret)
2613		return ret;
2614
2615	/* move iter to point to the start of mac header */
2616	if (vnet_hdr_sz != sizeof(struct virtio_net_hdr))
2617		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
2618
2619	return 0;
2620}
2621
2622static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2623		void *frame, struct net_device *dev, void *data, int tp_len,
2624		__be16 proto, unsigned char *addr, int hlen, int copylen,
2625		const struct sockcm_cookie *sockc)
2626{
2627	union tpacket_uhdr ph;
2628	int to_write, offset, len, nr_frags, len_max;
2629	struct socket *sock = po->sk.sk_socket;
2630	struct page *page;
2631	int err;
2632
2633	ph.raw = frame;
2634
2635	skb->protocol = proto;
2636	skb->dev = dev;
2637	skb->priority = READ_ONCE(po->sk.sk_priority);
2638	skb->mark = READ_ONCE(po->sk.sk_mark);
2639	skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, po->sk.sk_clockid);
2640	skb_setup_tx_timestamp(skb, sockc);
2641	skb_zcopy_set_nouarg(skb, ph.raw);
2642
2643	skb_reserve(skb, hlen);
2644	skb_reset_network_header(skb);
2645
2646	to_write = tp_len;
2647
2648	if (sock->type == SOCK_DGRAM) {
2649		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2650				NULL, tp_len);
2651		if (unlikely(err < 0))
2652			return -EINVAL;
2653	} else if (copylen) {
2654		int hdrlen = min_t(int, copylen, tp_len);
2655
2656		skb_push(skb, dev->hard_header_len);
2657		skb_put(skb, copylen - dev->hard_header_len);
2658		err = skb_store_bits(skb, 0, data, hdrlen);
2659		if (unlikely(err))
2660			return err;
2661		if (!dev_validate_header(dev, skb->data, hdrlen))
2662			return -EINVAL;
2663
2664		data += hdrlen;
2665		to_write -= hdrlen;
2666	}
2667
2668	offset = offset_in_page(data);
2669	len_max = PAGE_SIZE - offset;
2670	len = ((to_write > len_max) ? len_max : to_write);
2671
2672	skb->data_len = to_write;
2673	skb->len += to_write;
2674	skb->truesize += to_write;
2675	refcount_add(to_write, &po->sk.sk_wmem_alloc);
2676
2677	while (likely(to_write)) {
2678		nr_frags = skb_shinfo(skb)->nr_frags;
2679
2680		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2681			pr_err("Packet exceed the number of skb frags(%u)\n",
2682			       (unsigned int)MAX_SKB_FRAGS);
2683			return -EFAULT;
2684		}
2685
2686		page = pgv_to_page(data);
2687		data += len;
2688		flush_dcache_page(page);
2689		get_page(page);
2690		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2691		to_write -= len;
2692		offset = 0;
2693		len_max = PAGE_SIZE;
2694		len = ((to_write > len_max) ? len_max : to_write);
2695	}
2696
2697	packet_parse_headers(skb, sock);
2698
2699	return tp_len;
2700}
2701
2702static int tpacket_parse_header(struct packet_sock *po, void *frame,
2703				int size_max, void **data)
2704{
2705	union tpacket_uhdr ph;
2706	int tp_len, off;
2707
2708	ph.raw = frame;
2709
2710	switch (po->tp_version) {
2711	case TPACKET_V3:
2712		if (ph.h3->tp_next_offset != 0) {
2713			pr_warn_once("variable sized slot not supported");
2714			return -EINVAL;
2715		}
2716		tp_len = ph.h3->tp_len;
2717		break;
2718	case TPACKET_V2:
2719		tp_len = ph.h2->tp_len;
2720		break;
2721	default:
2722		tp_len = ph.h1->tp_len;
2723		break;
2724	}
2725	if (unlikely(tp_len > size_max)) {
2726		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2727		return -EMSGSIZE;
2728	}
2729
2730	if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) {
2731		int off_min, off_max;
2732
2733		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2734		off_max = po->tx_ring.frame_size - tp_len;
2735		if (po->sk.sk_type == SOCK_DGRAM) {
2736			switch (po->tp_version) {
2737			case TPACKET_V3:
2738				off = ph.h3->tp_net;
2739				break;
2740			case TPACKET_V2:
2741				off = ph.h2->tp_net;
2742				break;
2743			default:
2744				off = ph.h1->tp_net;
2745				break;
2746			}
2747		} else {
2748			switch (po->tp_version) {
2749			case TPACKET_V3:
2750				off = ph.h3->tp_mac;
2751				break;
2752			case TPACKET_V2:
2753				off = ph.h2->tp_mac;
2754				break;
2755			default:
2756				off = ph.h1->tp_mac;
2757				break;
2758			}
2759		}
2760		if (unlikely((off < off_min) || (off_max < off)))
2761			return -EINVAL;
2762	} else {
2763		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2764	}
2765
2766	*data = frame + off;
2767	return tp_len;
2768}
2769
2770static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2771{
2772	struct sk_buff *skb = NULL;
2773	struct net_device *dev;
2774	struct virtio_net_hdr *vnet_hdr = NULL;
2775	struct sockcm_cookie sockc;
2776	__be16 proto;
2777	int err, reserve = 0;
2778	void *ph;
2779	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2780	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2781	int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2782	unsigned char *addr = NULL;
2783	int tp_len, size_max;
2784	void *data;
2785	int len_sum = 0;
2786	int status = TP_STATUS_AVAILABLE;
2787	int hlen, tlen, copylen = 0;
2788	long timeo = 0;
2789
2790	mutex_lock(&po->pg_vec_lock);
2791
2792	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2793	 * we need to confirm it under protection of pg_vec_lock.
2794	 */
2795	if (unlikely(!po->tx_ring.pg_vec)) {
2796		err = -EBUSY;
2797		goto out;
2798	}
2799	if (likely(saddr == NULL)) {
2800		dev	= packet_cached_dev_get(po);
2801		proto	= READ_ONCE(po->num);
2802	} else {
2803		err = -EINVAL;
2804		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2805			goto out;
2806		if (msg->msg_namelen < (saddr->sll_halen
2807					+ offsetof(struct sockaddr_ll,
2808						sll_addr)))
2809			goto out;
2810		proto	= saddr->sll_protocol;
2811		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2812		if (po->sk.sk_socket->type == SOCK_DGRAM) {
2813			if (dev && msg->msg_namelen < dev->addr_len +
2814				   offsetof(struct sockaddr_ll, sll_addr))
2815				goto out_put;
2816			addr = saddr->sll_addr;
2817		}
2818	}
2819
2820	err = -ENXIO;
2821	if (unlikely(dev == NULL))
2822		goto out;
2823	err = -ENETDOWN;
2824	if (unlikely(!(dev->flags & IFF_UP)))
2825		goto out_put;
2826
2827	sockcm_init(&sockc, &po->sk);
2828	if (msg->msg_controllen) {
2829		err = sock_cmsg_send(&po->sk, msg, &sockc);
2830		if (unlikely(err))
2831			goto out_put;
2832	}
2833
2834	if (po->sk.sk_socket->type == SOCK_RAW)
2835		reserve = dev->hard_header_len;
2836	size_max = po->tx_ring.frame_size
2837		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2838
2839	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz)
2840		size_max = dev->mtu + reserve + VLAN_HLEN;
2841
2842	reinit_completion(&po->skb_completion);
2843
2844	do {
2845		ph = packet_current_frame(po, &po->tx_ring,
2846					  TP_STATUS_SEND_REQUEST);
2847		if (unlikely(ph == NULL)) {
2848			if (need_wait && skb) {
2849				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2850				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2851				if (timeo <= 0) {
2852					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2853					goto out_put;
2854				}
2855			}
2856			/* check for additional frames */
2857			continue;
2858		}
2859
2860		skb = NULL;
2861		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2862		if (tp_len < 0)
2863			goto tpacket_error;
2864
2865		status = TP_STATUS_SEND_REQUEST;
2866		hlen = LL_RESERVED_SPACE(dev);
2867		tlen = dev->needed_tailroom;
2868		if (vnet_hdr_sz) {
2869			vnet_hdr = data;
2870			data += vnet_hdr_sz;
2871			tp_len -= vnet_hdr_sz;
2872			if (tp_len < 0 ||
2873			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2874				tp_len = -EINVAL;
2875				goto tpacket_error;
2876			}
2877			copylen = __virtio16_to_cpu(vio_le(),
2878						    vnet_hdr->hdr_len);
2879		}
2880		copylen = max_t(int, copylen, dev->hard_header_len);
2881		skb = sock_alloc_send_skb(&po->sk,
2882				hlen + tlen + sizeof(struct sockaddr_ll) +
2883				(copylen - dev->hard_header_len),
2884				!need_wait, &err);
2885
2886		if (unlikely(skb == NULL)) {
2887			/* we assume the socket was initially writeable ... */
2888			if (likely(len_sum > 0))
2889				err = len_sum;
2890			goto out_status;
2891		}
2892		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2893					  addr, hlen, copylen, &sockc);
2894		if (likely(tp_len >= 0) &&
2895		    tp_len > dev->mtu + reserve &&
2896		    !vnet_hdr_sz &&
2897		    !packet_extra_vlan_len_allowed(dev, skb))
2898			tp_len = -EMSGSIZE;
2899
2900		if (unlikely(tp_len < 0)) {
2901tpacket_error:
2902			if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) {
2903				__packet_set_status(po, ph,
2904						TP_STATUS_AVAILABLE);
2905				packet_increment_head(&po->tx_ring);
2906				kfree_skb(skb);
2907				continue;
2908			} else {
2909				status = TP_STATUS_WRONG_FORMAT;
2910				err = tp_len;
2911				goto out_status;
2912			}
2913		}
2914
2915		if (vnet_hdr_sz) {
2916			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2917				tp_len = -EINVAL;
2918				goto tpacket_error;
2919			}
2920			virtio_net_hdr_set_proto(skb, vnet_hdr);
2921		}
2922
2923		skb->destructor = tpacket_destruct_skb;
2924		__packet_set_status(po, ph, TP_STATUS_SENDING);
2925		packet_inc_pending(&po->tx_ring);
2926
2927		status = TP_STATUS_SEND_REQUEST;
2928		err = packet_xmit(po, skb);
2929		if (unlikely(err != 0)) {
2930			if (err > 0)
2931				err = net_xmit_errno(err);
2932			if (err && __packet_get_status(po, ph) ==
2933				   TP_STATUS_AVAILABLE) {
2934				/* skb was destructed already */
2935				skb = NULL;
2936				goto out_status;
2937			}
2938			/*
2939			 * skb was dropped but not destructed yet;
2940			 * let's treat it like congestion or err < 0
2941			 */
2942			err = 0;
2943		}
2944		packet_increment_head(&po->tx_ring);
2945		len_sum += tp_len;
2946	} while (likely((ph != NULL) ||
2947		/* Note: packet_read_pending() might be slow if we have
2948		 * to call it as it's per_cpu variable, but in fast-path
2949		 * we already short-circuit the loop with the first
2950		 * condition, and luckily don't have to go that path
2951		 * anyway.
2952		 */
2953		 (need_wait && packet_read_pending(&po->tx_ring))));
2954
2955	err = len_sum;
2956	goto out_put;
2957
2958out_status:
2959	__packet_set_status(po, ph, status);
2960	kfree_skb(skb);
2961out_put:
2962	dev_put(dev);
2963out:
2964	mutex_unlock(&po->pg_vec_lock);
2965	return err;
2966}
2967
2968static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2969				        size_t reserve, size_t len,
2970				        size_t linear, int noblock,
2971				        int *err)
2972{
2973	struct sk_buff *skb;
2974
2975	/* Under a page?  Don't bother with paged skb. */
2976	if (prepad + len < PAGE_SIZE || !linear)
2977		linear = len;
2978
2979	if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
2980		linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
2981	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2982				   err, PAGE_ALLOC_COSTLY_ORDER);
2983	if (!skb)
2984		return NULL;
2985
2986	skb_reserve(skb, reserve);
2987	skb_put(skb, linear);
2988	skb->data_len = len - linear;
2989	skb->len += len - linear;
2990
2991	return skb;
2992}
2993
2994static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2995{
2996	struct sock *sk = sock->sk;
2997	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2998	struct sk_buff *skb;
2999	struct net_device *dev;
3000	__be16 proto;
3001	unsigned char *addr = NULL;
3002	int err, reserve = 0;
3003	struct sockcm_cookie sockc;
3004	struct virtio_net_hdr vnet_hdr = { 0 };
3005	int offset = 0;
3006	struct packet_sock *po = pkt_sk(sk);
3007	int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
3008	int hlen, tlen, linear;
3009	int extra_len = 0;
3010
3011	/*
3012	 *	Get and verify the address.
3013	 */
3014
3015	if (likely(saddr == NULL)) {
3016		dev	= packet_cached_dev_get(po);
3017		proto	= READ_ONCE(po->num);
3018	} else {
3019		err = -EINVAL;
3020		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
3021			goto out;
3022		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
3023			goto out;
3024		proto	= saddr->sll_protocol;
3025		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
3026		if (sock->type == SOCK_DGRAM) {
3027			if (dev && msg->msg_namelen < dev->addr_len +
3028				   offsetof(struct sockaddr_ll, sll_addr))
3029				goto out_unlock;
3030			addr = saddr->sll_addr;
3031		}
3032	}
3033
3034	err = -ENXIO;
3035	if (unlikely(dev == NULL))
3036		goto out_unlock;
3037	err = -ENETDOWN;
3038	if (unlikely(!(dev->flags & IFF_UP)))
3039		goto out_unlock;
3040
3041	sockcm_init(&sockc, sk);
3042	sockc.mark = READ_ONCE(sk->sk_mark);
3043	if (msg->msg_controllen) {
3044		err = sock_cmsg_send(sk, msg, &sockc);
3045		if (unlikely(err))
3046			goto out_unlock;
3047	}
3048
3049	if (sock->type == SOCK_RAW)
3050		reserve = dev->hard_header_len;
3051	if (vnet_hdr_sz) {
3052		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
3053		if (err)
3054			goto out_unlock;
 
3055	}
3056
3057	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
3058		if (!netif_supports_nofcs(dev)) {
3059			err = -EPROTONOSUPPORT;
3060			goto out_unlock;
3061		}
3062		extra_len = 4; /* We're doing our own CRC */
3063	}
3064
3065	err = -EMSGSIZE;
3066	if (!vnet_hdr.gso_type &&
3067	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
3068		goto out_unlock;
3069
3070	err = -ENOBUFS;
3071	hlen = LL_RESERVED_SPACE(dev);
3072	tlen = dev->needed_tailroom;
3073	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
3074	linear = max(linear, min_t(int, len, dev->hard_header_len));
3075	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
3076			       msg->msg_flags & MSG_DONTWAIT, &err);
3077	if (skb == NULL)
3078		goto out_unlock;
3079
3080	skb_reset_network_header(skb);
3081
3082	err = -EINVAL;
3083	if (sock->type == SOCK_DGRAM) {
3084		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
3085		if (unlikely(offset < 0))
3086			goto out_free;
3087	} else if (reserve) {
3088		skb_reserve(skb, -reserve);
3089		if (len < reserve + sizeof(struct ipv6hdr) &&
3090		    dev->min_header_len != dev->hard_header_len)
3091			skb_reset_network_header(skb);
3092	}
3093
3094	/* Returns -EFAULT on error */
3095	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
3096	if (err)
3097		goto out_free;
3098
3099	if ((sock->type == SOCK_RAW &&
3100	     !dev_validate_header(dev, skb->data, len)) || !skb->len) {
3101		err = -EINVAL;
3102		goto out_free;
3103	}
3104
3105	skb_setup_tx_timestamp(skb, &sockc);
3106
3107	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
3108	    !packet_extra_vlan_len_allowed(dev, skb)) {
3109		err = -EMSGSIZE;
3110		goto out_free;
3111	}
3112
3113	skb->protocol = proto;
3114	skb->dev = dev;
3115	skb->priority = READ_ONCE(sk->sk_priority);
3116	skb->mark = sockc.mark;
3117	skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid);
3118
3119	if (unlikely(extra_len == 4))
3120		skb->no_fcs = 1;
3121
3122	packet_parse_headers(skb, sock);
3123
3124	if (vnet_hdr_sz) {
3125		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3126		if (err)
3127			goto out_free;
3128		len += vnet_hdr_sz;
3129		virtio_net_hdr_set_proto(skb, &vnet_hdr);
3130	}
3131
3132	err = packet_xmit(po, skb);
3133
3134	if (unlikely(err != 0)) {
3135		if (err > 0)
3136			err = net_xmit_errno(err);
3137		if (err)
3138			goto out_unlock;
3139	}
3140
3141	dev_put(dev);
3142
3143	return len;
3144
3145out_free:
3146	kfree_skb(skb);
3147out_unlock:
3148	dev_put(dev);
 
3149out:
3150	return err;
3151}
3152
3153static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3154{
3155	struct sock *sk = sock->sk;
3156	struct packet_sock *po = pkt_sk(sk);
3157
3158	/* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3159	 * tpacket_snd() will redo the check safely.
3160	 */
3161	if (data_race(po->tx_ring.pg_vec))
3162		return tpacket_snd(po, msg);
3163
3164	return packet_snd(sock, msg, len);
3165}
3166
3167/*
3168 *	Close a PACKET socket. This is fairly simple. We immediately go
3169 *	to 'closed' state and remove our protocol entry in the device list.
3170 */
3171
3172static int packet_release(struct socket *sock)
3173{
3174	struct sock *sk = sock->sk;
3175	struct packet_sock *po;
3176	struct packet_fanout *f;
3177	struct net *net;
3178	union tpacket_req_u req_u;
3179
3180	if (!sk)
3181		return 0;
3182
3183	net = sock_net(sk);
3184	po = pkt_sk(sk);
3185
3186	mutex_lock(&net->packet.sklist_lock);
3187	sk_del_node_init_rcu(sk);
3188	mutex_unlock(&net->packet.sklist_lock);
3189
 
3190	sock_prot_inuse_add(net, sk->sk_prot, -1);
 
3191
3192	spin_lock(&po->bind_lock);
3193	unregister_prot_hook(sk, false);
3194	packet_cached_dev_reset(po);
3195
3196	if (po->prot_hook.dev) {
3197		netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3198		po->prot_hook.dev = NULL;
3199	}
3200	spin_unlock(&po->bind_lock);
3201
3202	packet_flush_mclist(sk);
3203
3204	lock_sock(sk);
3205	if (po->rx_ring.pg_vec) {
3206		memset(&req_u, 0, sizeof(req_u));
3207		packet_set_ring(sk, &req_u, 1, 0);
3208	}
3209
3210	if (po->tx_ring.pg_vec) {
3211		memset(&req_u, 0, sizeof(req_u));
3212		packet_set_ring(sk, &req_u, 1, 1);
3213	}
3214	release_sock(sk);
3215
3216	f = fanout_release(sk);
3217
3218	synchronize_net();
3219
3220	kfree(po->rollover);
3221	if (f) {
3222		fanout_release_data(f);
3223		kvfree(f);
3224	}
3225	/*
3226	 *	Now the socket is dead. No more input will appear.
3227	 */
3228	sock_orphan(sk);
3229	sock->sk = NULL;
3230
3231	/* Purge queues */
3232
3233	skb_queue_purge(&sk->sk_receive_queue);
3234	packet_free_pending(po);
 
3235
3236	sock_put(sk);
3237	return 0;
3238}
3239
3240/*
3241 *	Attach a packet hook.
3242 */
3243
3244static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3245			  __be16 proto)
3246{
3247	struct packet_sock *po = pkt_sk(sk);
3248	struct net_device *dev = NULL;
3249	bool unlisted = false;
3250	bool need_rehook;
 
3251	int ret = 0;
 
3252
3253	lock_sock(sk);
3254	spin_lock(&po->bind_lock);
3255	if (!proto)
3256		proto = po->num;
3257
3258	rcu_read_lock();
3259
3260	if (po->fanout) {
3261		ret = -EINVAL;
3262		goto out_unlock;
3263	}
3264
3265	if (name) {
3266		dev = dev_get_by_name_rcu(sock_net(sk), name);
3267		if (!dev) {
3268			ret = -ENODEV;
3269			goto out_unlock;
3270		}
3271	} else if (ifindex) {
3272		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3273		if (!dev) {
3274			ret = -ENODEV;
3275			goto out_unlock;
3276		}
3277	}
3278
3279	need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev;
 
 
 
 
 
 
3280
3281	if (need_rehook) {
3282		dev_hold(dev);
3283		if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
3284			rcu_read_unlock();
3285			/* prevents packet_notifier() from calling
3286			 * register_prot_hook()
3287			 */
3288			WRITE_ONCE(po->num, 0);
3289			__unregister_prot_hook(sk, true);
3290			rcu_read_lock();
 
3291			if (dev)
3292				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3293								 dev->ifindex);
3294		}
3295
3296		BUG_ON(packet_sock_flag(po, PACKET_SOCK_RUNNING));
3297		WRITE_ONCE(po->num, proto);
3298		po->prot_hook.type = proto;
3299
3300		netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3301
3302		if (unlikely(unlisted)) {
 
3303			po->prot_hook.dev = NULL;
3304			WRITE_ONCE(po->ifindex, -1);
3305			packet_cached_dev_reset(po);
3306		} else {
3307			netdev_hold(dev, &po->prot_hook.dev_tracker,
3308				    GFP_ATOMIC);
3309			po->prot_hook.dev = dev;
3310			WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3311			packet_cached_dev_assign(po, dev);
3312		}
3313		dev_put(dev);
3314	}
 
 
3315
3316	if (proto == 0 || !need_rehook)
3317		goto out_unlock;
3318
3319	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3320		register_prot_hook(sk);
3321	} else {
3322		sk->sk_err = ENETDOWN;
3323		if (!sock_flag(sk, SOCK_DEAD))
3324			sk_error_report(sk);
3325	}
3326
3327out_unlock:
3328	rcu_read_unlock();
3329	spin_unlock(&po->bind_lock);
3330	release_sock(sk);
3331	return ret;
3332}
3333
3334/*
3335 *	Bind a packet socket to a device
3336 */
3337
3338static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3339			    int addr_len)
3340{
3341	struct sock *sk = sock->sk;
3342	char name[sizeof(uaddr->sa_data_min) + 1];
3343
3344	/*
3345	 *	Check legality
3346	 */
3347
3348	if (addr_len != sizeof(struct sockaddr))
3349		return -EINVAL;
3350	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3351	 * zero-terminated.
3352	 */
3353	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min));
3354	name[sizeof(uaddr->sa_data_min)] = 0;
3355
3356	return packet_do_bind(sk, name, 0, 0);
3357}
3358
3359static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3360{
3361	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3362	struct sock *sk = sock->sk;
3363
3364	/*
3365	 *	Check legality
3366	 */
3367
3368	if (addr_len < sizeof(struct sockaddr_ll))
3369		return -EINVAL;
3370	if (sll->sll_family != AF_PACKET)
3371		return -EINVAL;
3372
3373	return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol);
 
3374}
3375
3376static struct proto packet_proto = {
3377	.name	  = "PACKET",
3378	.owner	  = THIS_MODULE,
3379	.obj_size = sizeof(struct packet_sock),
3380};
3381
3382/*
3383 *	Create a packet of type SOCK_PACKET.
3384 */
3385
3386static int packet_create(struct net *net, struct socket *sock, int protocol,
3387			 int kern)
3388{
3389	struct sock *sk;
3390	struct packet_sock *po;
3391	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3392	int err;
3393
3394	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3395		return -EPERM;
3396	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3397	    sock->type != SOCK_PACKET)
3398		return -ESOCKTNOSUPPORT;
3399
3400	sock->state = SS_UNCONNECTED;
3401
3402	err = -ENOBUFS;
3403	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3404	if (sk == NULL)
3405		goto out;
3406
3407	sock->ops = &packet_ops;
3408	if (sock->type == SOCK_PACKET)
3409		sock->ops = &packet_ops_spkt;
3410
3411	po = pkt_sk(sk);
3412	err = packet_alloc_pending(po);
3413	if (err)
3414		goto out_sk_free;
3415
3416	sock_init_data(sock, sk);
3417
 
3418	init_completion(&po->skb_completion);
3419	sk->sk_family = PF_PACKET;
3420	po->num = proto;
 
 
 
 
 
3421
3422	packet_cached_dev_reset(po);
3423
3424	sk->sk_destruct = packet_sock_destruct;
 
3425
3426	/*
3427	 *	Attach a protocol block
3428	 */
3429
3430	spin_lock_init(&po->bind_lock);
3431	mutex_init(&po->pg_vec_lock);
3432	po->rollover = NULL;
3433	po->prot_hook.func = packet_rcv;
3434
3435	if (sock->type == SOCK_PACKET)
3436		po->prot_hook.func = packet_rcv_spkt;
3437
3438	po->prot_hook.af_packet_priv = sk;
3439	po->prot_hook.af_packet_net = sock_net(sk);
3440
3441	if (proto) {
3442		po->prot_hook.type = proto;
3443		__register_prot_hook(sk);
3444	}
3445
3446	mutex_lock(&net->packet.sklist_lock);
3447	sk_add_node_tail_rcu(sk, &net->packet.sklist);
3448	mutex_unlock(&net->packet.sklist_lock);
3449
 
3450	sock_prot_inuse_add(net, &packet_proto, 1);
 
3451
3452	return 0;
3453out_sk_free:
3454	sk_free(sk);
3455out:
3456	return err;
3457}
3458
3459/*
3460 *	Pull a packet from our receive queue and hand it to the user.
3461 *	If necessary we block.
3462 */
3463
3464static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3465			  int flags)
3466{
3467	struct sock *sk = sock->sk;
3468	struct sk_buff *skb;
3469	int copied, err;
3470	int vnet_hdr_len = READ_ONCE(pkt_sk(sk)->vnet_hdr_sz);
3471	unsigned int origlen = 0;
3472
3473	err = -EINVAL;
3474	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3475		goto out;
3476
3477#if 0
3478	/* What error should we return now? EUNATTACH? */
3479	if (pkt_sk(sk)->ifindex < 0)
3480		return -ENODEV;
3481#endif
3482
3483	if (flags & MSG_ERRQUEUE) {
3484		err = sock_recv_errqueue(sk, msg, len,
3485					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3486		goto out;
3487	}
3488
3489	/*
3490	 *	Call the generic datagram receiver. This handles all sorts
3491	 *	of horrible races and re-entrancy so we can forget about it
3492	 *	in the protocol layers.
3493	 *
3494	 *	Now it will return ENETDOWN, if device have just gone down,
3495	 *	but then it will block.
3496	 */
3497
3498	skb = skb_recv_datagram(sk, flags, &err);
3499
3500	/*
3501	 *	An error occurred so return it. Because skb_recv_datagram()
3502	 *	handles the blocking we don't see and worry about blocking
3503	 *	retries.
3504	 */
3505
3506	if (skb == NULL)
3507		goto out;
3508
3509	packet_rcv_try_clear_pressure(pkt_sk(sk));
3510
3511	if (vnet_hdr_len) {
3512		err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
3513		if (err)
3514			goto out_free;
 
3515	}
3516
3517	/* You lose any data beyond the buffer you gave. If it worries
3518	 * a user program they can ask the device for its MTU
3519	 * anyway.
3520	 */
3521	copied = skb->len;
3522	if (copied > len) {
3523		copied = len;
3524		msg->msg_flags |= MSG_TRUNC;
3525	}
3526
3527	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3528	if (err)
3529		goto out_free;
3530
3531	if (sock->type != SOCK_PACKET) {
3532		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3533
3534		/* Original length was stored in sockaddr_ll fields */
3535		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3536		sll->sll_family = AF_PACKET;
3537		sll->sll_protocol = (sock->type == SOCK_DGRAM) ?
3538			vlan_get_protocol_dgram(skb) : skb->protocol;
3539	}
3540
3541	sock_recv_cmsgs(msg, sk, skb);
3542
3543	if (msg->msg_name) {
3544		const size_t max_len = min(sizeof(skb->cb),
3545					   sizeof(struct sockaddr_storage));
3546		int copy_len;
3547
3548		/* If the address length field is there to be filled
3549		 * in, we fill it in now.
3550		 */
3551		if (sock->type == SOCK_PACKET) {
3552			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3553			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3554			copy_len = msg->msg_namelen;
3555		} else {
3556			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3557
3558			msg->msg_namelen = sll->sll_halen +
3559				offsetof(struct sockaddr_ll, sll_addr);
3560			copy_len = msg->msg_namelen;
3561			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3562				memset(msg->msg_name +
3563				       offsetof(struct sockaddr_ll, sll_addr),
3564				       0, sizeof(sll->sll_addr));
3565				msg->msg_namelen = sizeof(struct sockaddr_ll);
3566			}
3567		}
3568		if (WARN_ON_ONCE(copy_len > max_len)) {
3569			copy_len = max_len;
3570			msg->msg_namelen = copy_len;
3571		}
3572		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3573	}
3574
3575	if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
3576		struct tpacket_auxdata aux;
3577
3578		aux.tp_status = TP_STATUS_USER;
3579		if (skb->ip_summed == CHECKSUM_PARTIAL)
3580			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3581		else if (skb->pkt_type != PACKET_OUTGOING &&
3582			 skb_csum_unnecessary(skb))
 
3583			aux.tp_status |= TP_STATUS_CSUM_VALID;
3584		if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
3585			aux.tp_status |= TP_STATUS_GSO_TCP;
3586
3587		aux.tp_len = origlen;
3588		aux.tp_snaplen = skb->len;
3589		aux.tp_mac = 0;
3590		aux.tp_net = skb_network_offset(skb);
3591		if (skb_vlan_tag_present(skb)) {
3592			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3593			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3594			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3595		} else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
3596			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3597			struct net_device *dev;
3598
3599			rcu_read_lock();
3600			dev = dev_get_by_index_rcu(sock_net(sk), sll->sll_ifindex);
3601			if (dev) {
3602				aux.tp_vlan_tci = vlan_get_tci(skb, dev);
3603				aux.tp_vlan_tpid = ntohs(skb->protocol);
3604				aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3605			} else {
3606				aux.tp_vlan_tci = 0;
3607				aux.tp_vlan_tpid = 0;
3608			}
3609			rcu_read_unlock();
3610		} else {
3611			aux.tp_vlan_tci = 0;
3612			aux.tp_vlan_tpid = 0;
3613		}
3614		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3615	}
3616
3617	/*
3618	 *	Free or return the buffer as appropriate. Again this
3619	 *	hides all the races and re-entrancy issues from us.
3620	 */
3621	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3622
3623out_free:
3624	skb_free_datagram(sk, skb);
3625out:
3626	return err;
3627}
3628
3629static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3630			       int peer)
3631{
3632	struct net_device *dev;
3633	struct sock *sk	= sock->sk;
3634
3635	if (peer)
3636		return -EOPNOTSUPP;
3637
3638	uaddr->sa_family = AF_PACKET;
3639	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min));
3640	rcu_read_lock();
3641	dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3642	if (dev)
3643		strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min));
3644	rcu_read_unlock();
3645
3646	return sizeof(*uaddr);
3647}
3648
3649static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3650			  int peer)
3651{
3652	struct net_device *dev;
3653	struct sock *sk = sock->sk;
3654	struct packet_sock *po = pkt_sk(sk);
3655	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3656	int ifindex;
3657
3658	if (peer)
3659		return -EOPNOTSUPP;
3660
3661	ifindex = READ_ONCE(po->ifindex);
3662	sll->sll_family = AF_PACKET;
3663	sll->sll_ifindex = ifindex;
3664	sll->sll_protocol = READ_ONCE(po->num);
3665	sll->sll_pkttype = 0;
3666	rcu_read_lock();
3667	dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3668	if (dev) {
3669		sll->sll_hatype = dev->type;
3670		sll->sll_halen = dev->addr_len;
3671
3672		/* Let __fortify_memcpy_chk() know the actual buffer size. */
3673		memcpy(((struct sockaddr_storage *)sll)->__data +
3674		       offsetof(struct sockaddr_ll, sll_addr) -
3675		       offsetofend(struct sockaddr_ll, sll_family),
3676		       dev->dev_addr, dev->addr_len);
3677	} else {
3678		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3679		sll->sll_halen = 0;
3680	}
3681	rcu_read_unlock();
3682
3683	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3684}
3685
3686static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3687			 int what)
3688{
3689	switch (i->type) {
3690	case PACKET_MR_MULTICAST:
3691		if (i->alen != dev->addr_len)
3692			return -EINVAL;
3693		if (what > 0)
3694			return dev_mc_add(dev, i->addr);
3695		else
3696			return dev_mc_del(dev, i->addr);
3697		break;
3698	case PACKET_MR_PROMISC:
3699		return dev_set_promiscuity(dev, what);
3700	case PACKET_MR_ALLMULTI:
3701		return dev_set_allmulti(dev, what);
3702	case PACKET_MR_UNICAST:
3703		if (i->alen != dev->addr_len)
3704			return -EINVAL;
3705		if (what > 0)
3706			return dev_uc_add(dev, i->addr);
3707		else
3708			return dev_uc_del(dev, i->addr);
3709		break;
3710	default:
3711		break;
3712	}
3713	return 0;
3714}
3715
3716static void packet_dev_mclist_delete(struct net_device *dev,
3717				     struct packet_mclist **mlp)
3718{
3719	struct packet_mclist *ml;
3720
3721	while ((ml = *mlp) != NULL) {
3722		if (ml->ifindex == dev->ifindex) {
3723			packet_dev_mc(dev, ml, -1);
3724			*mlp = ml->next;
3725			kfree(ml);
3726		} else
3727			mlp = &ml->next;
3728	}
3729}
3730
3731static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3732{
3733	struct packet_sock *po = pkt_sk(sk);
3734	struct packet_mclist *ml, *i;
3735	struct net_device *dev;
3736	int err;
3737
3738	rtnl_lock();
3739
3740	err = -ENODEV;
3741	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3742	if (!dev)
3743		goto done;
3744
3745	err = -EINVAL;
3746	if (mreq->mr_alen > dev->addr_len)
3747		goto done;
3748
3749	err = -ENOBUFS;
3750	i = kmalloc(sizeof(*i), GFP_KERNEL);
3751	if (i == NULL)
3752		goto done;
3753
3754	err = 0;
3755	for (ml = po->mclist; ml; ml = ml->next) {
3756		if (ml->ifindex == mreq->mr_ifindex &&
3757		    ml->type == mreq->mr_type &&
3758		    ml->alen == mreq->mr_alen &&
3759		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3760			ml->count++;
3761			/* Free the new element ... */
3762			kfree(i);
3763			goto done;
3764		}
3765	}
3766
3767	i->type = mreq->mr_type;
3768	i->ifindex = mreq->mr_ifindex;
3769	i->alen = mreq->mr_alen;
3770	memcpy(i->addr, mreq->mr_address, i->alen);
3771	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3772	i->count = 1;
3773	i->next = po->mclist;
3774	po->mclist = i;
3775	err = packet_dev_mc(dev, i, 1);
3776	if (err) {
3777		po->mclist = i->next;
3778		kfree(i);
3779	}
3780
3781done:
3782	rtnl_unlock();
3783	return err;
3784}
3785
3786static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3787{
3788	struct packet_mclist *ml, **mlp;
3789
3790	rtnl_lock();
3791
3792	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3793		if (ml->ifindex == mreq->mr_ifindex &&
3794		    ml->type == mreq->mr_type &&
3795		    ml->alen == mreq->mr_alen &&
3796		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3797			if (--ml->count == 0) {
3798				struct net_device *dev;
3799				*mlp = ml->next;
3800				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3801				if (dev)
3802					packet_dev_mc(dev, ml, -1);
3803				kfree(ml);
3804			}
3805			break;
3806		}
3807	}
3808	rtnl_unlock();
3809	return 0;
3810}
3811
3812static void packet_flush_mclist(struct sock *sk)
3813{
3814	struct packet_sock *po = pkt_sk(sk);
3815	struct packet_mclist *ml;
3816
3817	if (!po->mclist)
3818		return;
3819
3820	rtnl_lock();
3821	while ((ml = po->mclist) != NULL) {
3822		struct net_device *dev;
3823
3824		po->mclist = ml->next;
3825		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3826		if (dev != NULL)
3827			packet_dev_mc(dev, ml, -1);
3828		kfree(ml);
3829	}
3830	rtnl_unlock();
3831}
3832
3833static int
3834packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3835		  unsigned int optlen)
3836{
3837	struct sock *sk = sock->sk;
3838	struct packet_sock *po = pkt_sk(sk);
3839	int ret;
3840
3841	if (level != SOL_PACKET)
3842		return -ENOPROTOOPT;
3843
3844	switch (optname) {
3845	case PACKET_ADD_MEMBERSHIP:
3846	case PACKET_DROP_MEMBERSHIP:
3847	{
3848		struct packet_mreq_max mreq;
3849		int len = optlen;
3850		memset(&mreq, 0, sizeof(mreq));
3851		if (len < sizeof(struct packet_mreq))
3852			return -EINVAL;
3853		if (len > sizeof(mreq))
3854			len = sizeof(mreq);
3855		if (copy_from_sockptr(&mreq, optval, len))
3856			return -EFAULT;
3857		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3858			return -EINVAL;
3859		if (optname == PACKET_ADD_MEMBERSHIP)
3860			ret = packet_mc_add(sk, &mreq);
3861		else
3862			ret = packet_mc_drop(sk, &mreq);
3863		return ret;
3864	}
3865
3866	case PACKET_RX_RING:
3867	case PACKET_TX_RING:
3868	{
3869		union tpacket_req_u req_u;
 
3870
3871		ret = -EINVAL;
3872		lock_sock(sk);
3873		switch (po->tp_version) {
3874		case TPACKET_V1:
3875		case TPACKET_V2:
3876			if (optlen < sizeof(req_u.req))
3877				break;
3878			ret = copy_from_sockptr(&req_u.req, optval,
3879						sizeof(req_u.req)) ?
3880						-EINVAL : 0;
3881			break;
3882		case TPACKET_V3:
3883		default:
3884			if (optlen < sizeof(req_u.req3))
3885				break;
3886			ret = copy_from_sockptr(&req_u.req3, optval,
3887						sizeof(req_u.req3)) ?
3888						-EINVAL : 0;
3889			break;
3890		}
3891		if (!ret)
3892			ret = packet_set_ring(sk, &req_u, 0,
3893					      optname == PACKET_TX_RING);
 
 
 
 
 
 
3894		release_sock(sk);
3895		return ret;
3896	}
3897	case PACKET_COPY_THRESH:
3898	{
3899		int val;
3900
3901		if (optlen != sizeof(val))
3902			return -EINVAL;
3903		if (copy_from_sockptr(&val, optval, sizeof(val)))
3904			return -EFAULT;
3905
3906		WRITE_ONCE(pkt_sk(sk)->copy_thresh, val);
3907		return 0;
3908	}
3909	case PACKET_VERSION:
3910	{
3911		int val;
3912
3913		if (optlen != sizeof(val))
3914			return -EINVAL;
3915		if (copy_from_sockptr(&val, optval, sizeof(val)))
3916			return -EFAULT;
3917		switch (val) {
3918		case TPACKET_V1:
3919		case TPACKET_V2:
3920		case TPACKET_V3:
3921			break;
3922		default:
3923			return -EINVAL;
3924		}
3925		lock_sock(sk);
3926		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3927			ret = -EBUSY;
3928		} else {
3929			po->tp_version = val;
3930			ret = 0;
3931		}
3932		release_sock(sk);
3933		return ret;
3934	}
3935	case PACKET_RESERVE:
3936	{
3937		unsigned int val;
3938
3939		if (optlen != sizeof(val))
3940			return -EINVAL;
3941		if (copy_from_sockptr(&val, optval, sizeof(val)))
3942			return -EFAULT;
3943		if (val > INT_MAX)
3944			return -EINVAL;
3945		lock_sock(sk);
3946		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3947			ret = -EBUSY;
3948		} else {
3949			po->tp_reserve = val;
3950			ret = 0;
3951		}
3952		release_sock(sk);
3953		return ret;
3954	}
3955	case PACKET_LOSS:
3956	{
3957		unsigned int val;
3958
3959		if (optlen != sizeof(val))
3960			return -EINVAL;
3961		if (copy_from_sockptr(&val, optval, sizeof(val)))
3962			return -EFAULT;
3963
3964		lock_sock(sk);
3965		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3966			ret = -EBUSY;
3967		} else {
3968			packet_sock_flag_set(po, PACKET_SOCK_TP_LOSS, val);
3969			ret = 0;
3970		}
3971		release_sock(sk);
3972		return ret;
3973	}
3974	case PACKET_AUXDATA:
3975	{
3976		int val;
3977
3978		if (optlen < sizeof(val))
3979			return -EINVAL;
3980		if (copy_from_sockptr(&val, optval, sizeof(val)))
3981			return -EFAULT;
3982
3983		packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
 
 
3984		return 0;
3985	}
3986	case PACKET_ORIGDEV:
3987	{
3988		int val;
3989
3990		if (optlen < sizeof(val))
3991			return -EINVAL;
3992		if (copy_from_sockptr(&val, optval, sizeof(val)))
3993			return -EFAULT;
3994
3995		packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
 
 
3996		return 0;
3997	}
3998	case PACKET_VNET_HDR:
3999	case PACKET_VNET_HDR_SZ:
4000	{
4001		int val, hdr_len;
4002
4003		if (sock->type != SOCK_RAW)
4004			return -EINVAL;
4005		if (optlen < sizeof(val))
4006			return -EINVAL;
4007		if (copy_from_sockptr(&val, optval, sizeof(val)))
4008			return -EFAULT;
4009
4010		if (optname == PACKET_VNET_HDR_SZ) {
4011			if (val && val != sizeof(struct virtio_net_hdr) &&
4012			    val != sizeof(struct virtio_net_hdr_mrg_rxbuf))
4013				return -EINVAL;
4014			hdr_len = val;
4015		} else {
4016			hdr_len = val ? sizeof(struct virtio_net_hdr) : 0;
4017		}
4018		lock_sock(sk);
4019		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
4020			ret = -EBUSY;
4021		} else {
4022			WRITE_ONCE(po->vnet_hdr_sz, hdr_len);
4023			ret = 0;
4024		}
4025		release_sock(sk);
4026		return ret;
4027	}
4028	case PACKET_TIMESTAMP:
4029	{
4030		int val;
4031
4032		if (optlen != sizeof(val))
4033			return -EINVAL;
4034		if (copy_from_sockptr(&val, optval, sizeof(val)))
4035			return -EFAULT;
4036
4037		WRITE_ONCE(po->tp_tstamp, val);
4038		return 0;
4039	}
4040	case PACKET_FANOUT:
4041	{
4042		struct fanout_args args = { 0 };
4043
4044		if (optlen != sizeof(int) && optlen != sizeof(args))
4045			return -EINVAL;
4046		if (copy_from_sockptr(&args, optval, optlen))
4047			return -EFAULT;
4048
4049		return fanout_add(sk, &args);
4050	}
4051	case PACKET_FANOUT_DATA:
4052	{
4053		/* Paired with the WRITE_ONCE() in fanout_add() */
4054		if (!READ_ONCE(po->fanout))
4055			return -EINVAL;
4056
4057		return fanout_set_data(po, optval, optlen);
4058	}
4059	case PACKET_IGNORE_OUTGOING:
4060	{
4061		int val;
4062
4063		if (optlen != sizeof(val))
4064			return -EINVAL;
4065		if (copy_from_sockptr(&val, optval, sizeof(val)))
4066			return -EFAULT;
4067		if (val < 0 || val > 1)
4068			return -EINVAL;
4069
4070		WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
4071		return 0;
4072	}
4073	case PACKET_TX_HAS_OFF:
4074	{
4075		unsigned int val;
4076
4077		if (optlen != sizeof(val))
4078			return -EINVAL;
4079		if (copy_from_sockptr(&val, optval, sizeof(val)))
4080			return -EFAULT;
4081
4082		lock_sock(sk);
4083		if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
4084			packet_sock_flag_set(po, PACKET_SOCK_TX_HAS_OFF, val);
4085
4086		release_sock(sk);
4087		return 0;
4088	}
4089	case PACKET_QDISC_BYPASS:
4090	{
4091		int val;
4092
4093		if (optlen != sizeof(val))
4094			return -EINVAL;
4095		if (copy_from_sockptr(&val, optval, sizeof(val)))
4096			return -EFAULT;
4097
4098		packet_sock_flag_set(po, PACKET_SOCK_QDISC_BYPASS, val);
4099		return 0;
4100	}
4101	default:
4102		return -ENOPROTOOPT;
4103	}
4104}
4105
4106static int packet_getsockopt(struct socket *sock, int level, int optname,
4107			     char __user *optval, int __user *optlen)
4108{
4109	int len;
4110	int val, lv = sizeof(val);
4111	struct sock *sk = sock->sk;
4112	struct packet_sock *po = pkt_sk(sk);
4113	void *data = &val;
4114	union tpacket_stats_u st;
4115	struct tpacket_rollover_stats rstats;
4116	int drops;
4117
4118	if (level != SOL_PACKET)
4119		return -ENOPROTOOPT;
4120
4121	if (get_user(len, optlen))
4122		return -EFAULT;
4123
4124	if (len < 0)
4125		return -EINVAL;
4126
4127	switch (optname) {
4128	case PACKET_STATISTICS:
4129		spin_lock_bh(&sk->sk_receive_queue.lock);
4130		memcpy(&st, &po->stats, sizeof(st));
4131		memset(&po->stats, 0, sizeof(po->stats));
4132		spin_unlock_bh(&sk->sk_receive_queue.lock);
4133		drops = atomic_xchg(&po->tp_drops, 0);
4134
4135		if (po->tp_version == TPACKET_V3) {
4136			lv = sizeof(struct tpacket_stats_v3);
4137			st.stats3.tp_drops = drops;
4138			st.stats3.tp_packets += drops;
4139			data = &st.stats3;
4140		} else {
4141			lv = sizeof(struct tpacket_stats);
4142			st.stats1.tp_drops = drops;
4143			st.stats1.tp_packets += drops;
4144			data = &st.stats1;
4145		}
4146
4147		break;
4148	case PACKET_AUXDATA:
4149		val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
4150		break;
4151	case PACKET_ORIGDEV:
4152		val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
4153		break;
4154	case PACKET_VNET_HDR:
4155		val = !!READ_ONCE(po->vnet_hdr_sz);
4156		break;
4157	case PACKET_VNET_HDR_SZ:
4158		val = READ_ONCE(po->vnet_hdr_sz);
4159		break;
4160	case PACKET_COPY_THRESH:
4161		val = READ_ONCE(pkt_sk(sk)->copy_thresh);
4162		break;
4163	case PACKET_VERSION:
4164		val = po->tp_version;
4165		break;
4166	case PACKET_HDRLEN:
4167		if (len > sizeof(int))
4168			len = sizeof(int);
4169		if (len < sizeof(int))
4170			return -EINVAL;
4171		if (copy_from_user(&val, optval, len))
4172			return -EFAULT;
4173		switch (val) {
4174		case TPACKET_V1:
4175			val = sizeof(struct tpacket_hdr);
4176			break;
4177		case TPACKET_V2:
4178			val = sizeof(struct tpacket2_hdr);
4179			break;
4180		case TPACKET_V3:
4181			val = sizeof(struct tpacket3_hdr);
4182			break;
4183		default:
4184			return -EINVAL;
4185		}
4186		break;
4187	case PACKET_RESERVE:
4188		val = po->tp_reserve;
4189		break;
4190	case PACKET_LOSS:
4191		val = packet_sock_flag(po, PACKET_SOCK_TP_LOSS);
4192		break;
4193	case PACKET_TIMESTAMP:
4194		val = READ_ONCE(po->tp_tstamp);
4195		break;
4196	case PACKET_FANOUT:
4197		val = (po->fanout ?
4198		       ((u32)po->fanout->id |
4199			((u32)po->fanout->type << 16) |
4200			((u32)po->fanout->flags << 24)) :
4201		       0);
4202		break;
4203	case PACKET_IGNORE_OUTGOING:
4204		val = READ_ONCE(po->prot_hook.ignore_outgoing);
4205		break;
4206	case PACKET_ROLLOVER_STATS:
4207		if (!po->rollover)
4208			return -EINVAL;
4209		rstats.tp_all = atomic_long_read(&po->rollover->num);
4210		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4211		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4212		data = &rstats;
4213		lv = sizeof(rstats);
4214		break;
4215	case PACKET_TX_HAS_OFF:
4216		val = packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF);
4217		break;
4218	case PACKET_QDISC_BYPASS:
4219		val = packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS);
4220		break;
4221	default:
4222		return -ENOPROTOOPT;
4223	}
4224
4225	if (len > lv)
4226		len = lv;
4227	if (put_user(len, optlen))
4228		return -EFAULT;
4229	if (copy_to_user(optval, data, len))
4230		return -EFAULT;
4231	return 0;
4232}
4233
4234static int packet_notifier(struct notifier_block *this,
4235			   unsigned long msg, void *ptr)
4236{
4237	struct sock *sk;
4238	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4239	struct net *net = dev_net(dev);
4240
4241	rcu_read_lock();
4242	sk_for_each_rcu(sk, &net->packet.sklist) {
4243		struct packet_sock *po = pkt_sk(sk);
4244
4245		switch (msg) {
4246		case NETDEV_UNREGISTER:
4247			if (po->mclist)
4248				packet_dev_mclist_delete(dev, &po->mclist);
4249			fallthrough;
4250
4251		case NETDEV_DOWN:
4252			if (dev->ifindex == po->ifindex) {
4253				spin_lock(&po->bind_lock);
4254				if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
4255					__unregister_prot_hook(sk, false);
4256					sk->sk_err = ENETDOWN;
4257					if (!sock_flag(sk, SOCK_DEAD))
4258						sk_error_report(sk);
4259				}
4260				if (msg == NETDEV_UNREGISTER) {
4261					packet_cached_dev_reset(po);
4262					WRITE_ONCE(po->ifindex, -1);
4263					netdev_put(po->prot_hook.dev,
4264						   &po->prot_hook.dev_tracker);
4265					po->prot_hook.dev = NULL;
4266				}
4267				spin_unlock(&po->bind_lock);
4268			}
4269			break;
4270		case NETDEV_UP:
4271			if (dev->ifindex == po->ifindex) {
4272				spin_lock(&po->bind_lock);
4273				if (po->num)
4274					register_prot_hook(sk);
4275				spin_unlock(&po->bind_lock);
4276			}
4277			break;
4278		}
4279	}
4280	rcu_read_unlock();
4281	return NOTIFY_DONE;
4282}
4283
4284
4285static int packet_ioctl(struct socket *sock, unsigned int cmd,
4286			unsigned long arg)
4287{
4288	struct sock *sk = sock->sk;
4289
4290	switch (cmd) {
4291	case SIOCOUTQ:
4292	{
4293		int amount = sk_wmem_alloc_get(sk);
4294
4295		return put_user(amount, (int __user *)arg);
4296	}
4297	case SIOCINQ:
4298	{
4299		struct sk_buff *skb;
4300		int amount = 0;
4301
4302		spin_lock_bh(&sk->sk_receive_queue.lock);
4303		skb = skb_peek(&sk->sk_receive_queue);
4304		if (skb)
4305			amount = skb->len;
4306		spin_unlock_bh(&sk->sk_receive_queue.lock);
4307		return put_user(amount, (int __user *)arg);
4308	}
4309#ifdef CONFIG_INET
4310	case SIOCADDRT:
4311	case SIOCDELRT:
4312	case SIOCDARP:
4313	case SIOCGARP:
4314	case SIOCSARP:
4315	case SIOCGIFADDR:
4316	case SIOCSIFADDR:
4317	case SIOCGIFBRDADDR:
4318	case SIOCSIFBRDADDR:
4319	case SIOCGIFNETMASK:
4320	case SIOCSIFNETMASK:
4321	case SIOCGIFDSTADDR:
4322	case SIOCSIFDSTADDR:
4323	case SIOCSIFFLAGS:
4324		return inet_dgram_ops.ioctl(sock, cmd, arg);
4325#endif
4326
4327	default:
4328		return -ENOIOCTLCMD;
4329	}
4330	return 0;
4331}
4332
4333static __poll_t packet_poll(struct file *file, struct socket *sock,
4334				poll_table *wait)
4335{
4336	struct sock *sk = sock->sk;
4337	struct packet_sock *po = pkt_sk(sk);
4338	__poll_t mask = datagram_poll(file, sock, wait);
4339
4340	spin_lock_bh(&sk->sk_receive_queue.lock);
4341	if (po->rx_ring.pg_vec) {
4342		if (!packet_previous_rx_frame(po, &po->rx_ring,
4343			TP_STATUS_KERNEL))
4344			mask |= EPOLLIN | EPOLLRDNORM;
4345	}
4346	packet_rcv_try_clear_pressure(po);
4347	spin_unlock_bh(&sk->sk_receive_queue.lock);
4348	spin_lock_bh(&sk->sk_write_queue.lock);
4349	if (po->tx_ring.pg_vec) {
4350		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4351			mask |= EPOLLOUT | EPOLLWRNORM;
4352	}
4353	spin_unlock_bh(&sk->sk_write_queue.lock);
4354	return mask;
4355}
4356
4357
4358/* Dirty? Well, I still did not learn better way to account
4359 * for user mmaps.
4360 */
4361
4362static void packet_mm_open(struct vm_area_struct *vma)
4363{
4364	struct file *file = vma->vm_file;
4365	struct socket *sock = file->private_data;
4366	struct sock *sk = sock->sk;
4367
4368	if (sk)
4369		atomic_long_inc(&pkt_sk(sk)->mapped);
4370}
4371
4372static void packet_mm_close(struct vm_area_struct *vma)
4373{
4374	struct file *file = vma->vm_file;
4375	struct socket *sock = file->private_data;
4376	struct sock *sk = sock->sk;
4377
4378	if (sk)
4379		atomic_long_dec(&pkt_sk(sk)->mapped);
4380}
4381
4382static const struct vm_operations_struct packet_mmap_ops = {
4383	.open	=	packet_mm_open,
4384	.close	=	packet_mm_close,
4385};
4386
4387static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4388			unsigned int len)
4389{
4390	int i;
4391
4392	for (i = 0; i < len; i++) {
4393		if (likely(pg_vec[i].buffer)) {
4394			if (is_vmalloc_addr(pg_vec[i].buffer))
4395				vfree(pg_vec[i].buffer);
4396			else
4397				free_pages((unsigned long)pg_vec[i].buffer,
4398					   order);
4399			pg_vec[i].buffer = NULL;
4400		}
4401	}
4402	kfree(pg_vec);
4403}
4404
4405static char *alloc_one_pg_vec_page(unsigned long order)
4406{
4407	char *buffer;
4408	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4409			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4410
4411	buffer = (char *) __get_free_pages(gfp_flags, order);
4412	if (buffer)
4413		return buffer;
4414
4415	/* __get_free_pages failed, fall back to vmalloc */
4416	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4417	if (buffer)
4418		return buffer;
4419
4420	/* vmalloc failed, lets dig into swap here */
4421	gfp_flags &= ~__GFP_NORETRY;
4422	buffer = (char *) __get_free_pages(gfp_flags, order);
4423	if (buffer)
4424		return buffer;
4425
4426	/* complete and utter failure */
4427	return NULL;
4428}
4429
4430static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4431{
4432	unsigned int block_nr = req->tp_block_nr;
4433	struct pgv *pg_vec;
4434	int i;
4435
4436	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4437	if (unlikely(!pg_vec))
4438		goto out;
4439
4440	for (i = 0; i < block_nr; i++) {
4441		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4442		if (unlikely(!pg_vec[i].buffer))
4443			goto out_free_pgvec;
4444	}
4445
4446out:
4447	return pg_vec;
4448
4449out_free_pgvec:
4450	free_pg_vec(pg_vec, order, block_nr);
4451	pg_vec = NULL;
4452	goto out;
4453}
4454
4455static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4456		int closing, int tx_ring)
4457{
4458	struct pgv *pg_vec = NULL;
4459	struct packet_sock *po = pkt_sk(sk);
4460	unsigned long *rx_owner_map = NULL;
4461	int was_running, order = 0;
4462	struct packet_ring_buffer *rb;
4463	struct sk_buff_head *rb_queue;
4464	__be16 num;
4465	int err;
4466	/* Added to avoid minimal code churn */
4467	struct tpacket_req *req = &req_u->req;
4468
4469	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4470	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4471
4472	err = -EBUSY;
4473	if (!closing) {
4474		if (atomic_long_read(&po->mapped))
4475			goto out;
4476		if (packet_read_pending(rb))
4477			goto out;
4478	}
4479
4480	if (req->tp_block_nr) {
4481		unsigned int min_frame_size;
4482
4483		/* Sanity tests and some calculations */
4484		err = -EBUSY;
4485		if (unlikely(rb->pg_vec))
4486			goto out;
4487
4488		switch (po->tp_version) {
4489		case TPACKET_V1:
4490			po->tp_hdrlen = TPACKET_HDRLEN;
4491			break;
4492		case TPACKET_V2:
4493			po->tp_hdrlen = TPACKET2_HDRLEN;
4494			break;
4495		case TPACKET_V3:
4496			po->tp_hdrlen = TPACKET3_HDRLEN;
4497			break;
4498		}
4499
4500		err = -EINVAL;
4501		if (unlikely((int)req->tp_block_size <= 0))
4502			goto out;
4503		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4504			goto out;
4505		min_frame_size = po->tp_hdrlen + po->tp_reserve;
4506		if (po->tp_version >= TPACKET_V3 &&
4507		    req->tp_block_size <
4508		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4509			goto out;
4510		if (unlikely(req->tp_frame_size < min_frame_size))
4511			goto out;
4512		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4513			goto out;
4514
4515		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4516		if (unlikely(rb->frames_per_block == 0))
4517			goto out;
4518		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4519			goto out;
4520		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4521					req->tp_frame_nr))
4522			goto out;
4523
4524		err = -ENOMEM;
4525		order = get_order(req->tp_block_size);
4526		pg_vec = alloc_pg_vec(req, order);
4527		if (unlikely(!pg_vec))
4528			goto out;
4529		switch (po->tp_version) {
4530		case TPACKET_V3:
4531			/* Block transmit is not supported yet */
4532			if (!tx_ring) {
4533				init_prb_bdqc(po, rb, pg_vec, req_u);
4534			} else {
4535				struct tpacket_req3 *req3 = &req_u->req3;
4536
4537				if (req3->tp_retire_blk_tov ||
4538				    req3->tp_sizeof_priv ||
4539				    req3->tp_feature_req_word) {
4540					err = -EINVAL;
4541					goto out_free_pg_vec;
4542				}
4543			}
4544			break;
4545		default:
4546			if (!tx_ring) {
4547				rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4548					GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4549				if (!rx_owner_map)
4550					goto out_free_pg_vec;
4551			}
4552			break;
4553		}
4554	}
4555	/* Done */
4556	else {
4557		err = -EINVAL;
4558		if (unlikely(req->tp_frame_nr))
4559			goto out;
4560	}
4561
4562
4563	/* Detach socket from network */
4564	spin_lock(&po->bind_lock);
4565	was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
4566	num = po->num;
4567	if (was_running) {
4568		WRITE_ONCE(po->num, 0);
4569		__unregister_prot_hook(sk, false);
4570	}
4571	spin_unlock(&po->bind_lock);
4572
4573	synchronize_net();
4574
4575	err = -EBUSY;
4576	mutex_lock(&po->pg_vec_lock);
4577	if (closing || atomic_long_read(&po->mapped) == 0) {
4578		err = 0;
4579		spin_lock_bh(&rb_queue->lock);
4580		swap(rb->pg_vec, pg_vec);
4581		if (po->tp_version <= TPACKET_V2)
4582			swap(rb->rx_owner_map, rx_owner_map);
4583		rb->frame_max = (req->tp_frame_nr - 1);
4584		rb->head = 0;
4585		rb->frame_size = req->tp_frame_size;
4586		spin_unlock_bh(&rb_queue->lock);
4587
4588		swap(rb->pg_vec_order, order);
4589		swap(rb->pg_vec_len, req->tp_block_nr);
4590
4591		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4592		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4593						tpacket_rcv : packet_rcv;
4594		skb_queue_purge(rb_queue);
4595		if (atomic_long_read(&po->mapped))
4596			pr_err("packet_mmap: vma is busy: %ld\n",
4597			       atomic_long_read(&po->mapped));
4598	}
4599	mutex_unlock(&po->pg_vec_lock);
4600
4601	spin_lock(&po->bind_lock);
4602	if (was_running) {
4603		WRITE_ONCE(po->num, num);
4604		register_prot_hook(sk);
4605	}
4606	spin_unlock(&po->bind_lock);
4607	if (pg_vec && (po->tp_version > TPACKET_V2)) {
4608		/* Because we don't support block-based V3 on tx-ring */
4609		if (!tx_ring)
4610			prb_shutdown_retire_blk_timer(po, rb_queue);
4611	}
4612
4613out_free_pg_vec:
4614	if (pg_vec) {
4615		bitmap_free(rx_owner_map);
4616		free_pg_vec(pg_vec, order, req->tp_block_nr);
4617	}
4618out:
4619	return err;
4620}
4621
4622static int packet_mmap(struct file *file, struct socket *sock,
4623		struct vm_area_struct *vma)
4624{
4625	struct sock *sk = sock->sk;
4626	struct packet_sock *po = pkt_sk(sk);
4627	unsigned long size, expected_size;
4628	struct packet_ring_buffer *rb;
4629	unsigned long start;
4630	int err = -EINVAL;
4631	int i;
4632
4633	if (vma->vm_pgoff)
4634		return -EINVAL;
4635
4636	mutex_lock(&po->pg_vec_lock);
4637
4638	expected_size = 0;
4639	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4640		if (rb->pg_vec) {
4641			expected_size += rb->pg_vec_len
4642						* rb->pg_vec_pages
4643						* PAGE_SIZE;
4644		}
4645	}
4646
4647	if (expected_size == 0)
4648		goto out;
4649
4650	size = vma->vm_end - vma->vm_start;
4651	if (size != expected_size)
4652		goto out;
4653
4654	start = vma->vm_start;
4655	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4656		if (rb->pg_vec == NULL)
4657			continue;
4658
4659		for (i = 0; i < rb->pg_vec_len; i++) {
4660			struct page *page;
4661			void *kaddr = rb->pg_vec[i].buffer;
4662			int pg_num;
4663
4664			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4665				page = pgv_to_page(kaddr);
4666				err = vm_insert_page(vma, start, page);
4667				if (unlikely(err))
4668					goto out;
4669				start += PAGE_SIZE;
4670				kaddr += PAGE_SIZE;
4671			}
4672		}
4673	}
4674
4675	atomic_long_inc(&po->mapped);
4676	vma->vm_ops = &packet_mmap_ops;
4677	err = 0;
4678
4679out:
4680	mutex_unlock(&po->pg_vec_lock);
4681	return err;
4682}
4683
4684static const struct proto_ops packet_ops_spkt = {
4685	.family =	PF_PACKET,
4686	.owner =	THIS_MODULE,
4687	.release =	packet_release,
4688	.bind =		packet_bind_spkt,
4689	.connect =	sock_no_connect,
4690	.socketpair =	sock_no_socketpair,
4691	.accept =	sock_no_accept,
4692	.getname =	packet_getname_spkt,
4693	.poll =		datagram_poll,
4694	.ioctl =	packet_ioctl,
4695	.gettstamp =	sock_gettstamp,
4696	.listen =	sock_no_listen,
4697	.shutdown =	sock_no_shutdown,
4698	.sendmsg =	packet_sendmsg_spkt,
4699	.recvmsg =	packet_recvmsg,
4700	.mmap =		sock_no_mmap,
 
4701};
4702
4703static const struct proto_ops packet_ops = {
4704	.family =	PF_PACKET,
4705	.owner =	THIS_MODULE,
4706	.release =	packet_release,
4707	.bind =		packet_bind,
4708	.connect =	sock_no_connect,
4709	.socketpair =	sock_no_socketpair,
4710	.accept =	sock_no_accept,
4711	.getname =	packet_getname,
4712	.poll =		packet_poll,
4713	.ioctl =	packet_ioctl,
4714	.gettstamp =	sock_gettstamp,
4715	.listen =	sock_no_listen,
4716	.shutdown =	sock_no_shutdown,
4717	.setsockopt =	packet_setsockopt,
4718	.getsockopt =	packet_getsockopt,
4719	.sendmsg =	packet_sendmsg,
4720	.recvmsg =	packet_recvmsg,
4721	.mmap =		packet_mmap,
 
4722};
4723
4724static const struct net_proto_family packet_family_ops = {
4725	.family =	PF_PACKET,
4726	.create =	packet_create,
4727	.owner	=	THIS_MODULE,
4728};
4729
4730static struct notifier_block packet_netdev_notifier = {
4731	.notifier_call =	packet_notifier,
4732};
4733
4734#ifdef CONFIG_PROC_FS
4735
4736static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4737	__acquires(RCU)
4738{
4739	struct net *net = seq_file_net(seq);
4740
4741	rcu_read_lock();
4742	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4743}
4744
4745static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4746{
4747	struct net *net = seq_file_net(seq);
4748	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4749}
4750
4751static void packet_seq_stop(struct seq_file *seq, void *v)
4752	__releases(RCU)
4753{
4754	rcu_read_unlock();
4755}
4756
4757static int packet_seq_show(struct seq_file *seq, void *v)
4758{
4759	if (v == SEQ_START_TOKEN)
4760		seq_printf(seq,
4761			   "%*sRefCnt Type Proto  Iface R Rmem   User   Inode\n",
4762			   IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
4763	else {
4764		struct sock *s = sk_entry(v);
4765		const struct packet_sock *po = pkt_sk(s);
4766
4767		seq_printf(seq,
4768			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4769			   s,
4770			   refcount_read(&s->sk_refcnt),
4771			   s->sk_type,
4772			   ntohs(READ_ONCE(po->num)),
4773			   READ_ONCE(po->ifindex),
4774			   packet_sock_flag(po, PACKET_SOCK_RUNNING),
4775			   atomic_read(&s->sk_rmem_alloc),
4776			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4777			   sock_i_ino(s));
4778	}
4779
4780	return 0;
4781}
4782
4783static const struct seq_operations packet_seq_ops = {
4784	.start	= packet_seq_start,
4785	.next	= packet_seq_next,
4786	.stop	= packet_seq_stop,
4787	.show	= packet_seq_show,
4788};
4789#endif
4790
4791static int __net_init packet_net_init(struct net *net)
4792{
4793	mutex_init(&net->packet.sklist_lock);
4794	INIT_HLIST_HEAD(&net->packet.sklist);
4795
4796#ifdef CONFIG_PROC_FS
4797	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4798			sizeof(struct seq_net_private)))
4799		return -ENOMEM;
4800#endif /* CONFIG_PROC_FS */
4801
4802	return 0;
4803}
4804
4805static void __net_exit packet_net_exit(struct net *net)
4806{
4807	remove_proc_entry("packet", net->proc_net);
4808	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4809}
4810
4811static struct pernet_operations packet_net_ops = {
4812	.init = packet_net_init,
4813	.exit = packet_net_exit,
4814};
4815
4816
4817static void __exit packet_exit(void)
4818{
4819	sock_unregister(PF_PACKET);
4820	proto_unregister(&packet_proto);
4821	unregister_netdevice_notifier(&packet_netdev_notifier);
4822	unregister_pernet_subsys(&packet_net_ops);
 
 
4823}
4824
4825static int __init packet_init(void)
4826{
4827	int rc;
4828
4829	rc = register_pernet_subsys(&packet_net_ops);
4830	if (rc)
4831		goto out;
4832	rc = register_netdevice_notifier(&packet_netdev_notifier);
4833	if (rc)
4834		goto out_pernet;
4835	rc = proto_register(&packet_proto, 0);
4836	if (rc)
4837		goto out_notifier;
4838	rc = sock_register(&packet_family_ops);
4839	if (rc)
4840		goto out_proto;
 
 
 
 
 
 
4841
4842	return 0;
4843
4844out_proto:
4845	proto_unregister(&packet_proto);
4846out_notifier:
4847	unregister_netdevice_notifier(&packet_netdev_notifier);
4848out_pernet:
4849	unregister_pernet_subsys(&packet_net_ops);
 
 
 
 
4850out:
4851	return rc;
4852}
4853
4854module_init(packet_init);
4855module_exit(packet_exit);
4856MODULE_DESCRIPTION("Packet socket support (AF_PACKET)");
4857MODULE_LICENSE("GPL");
4858MODULE_ALIAS_NETPROTO(PF_PACKET);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		PACKET - implements raw packet sockets.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *
  13 * Fixes:
  14 *		Alan Cox	:	verify_area() now used correctly
  15 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
  16 *		Alan Cox	:	tidied skbuff lists.
  17 *		Alan Cox	:	Now uses generic datagram routines I
  18 *					added. Also fixed the peek/read crash
  19 *					from all old Linux datagram code.
  20 *		Alan Cox	:	Uses the improved datagram code.
  21 *		Alan Cox	:	Added NULL's for socket options.
  22 *		Alan Cox	:	Re-commented the code.
  23 *		Alan Cox	:	Use new kernel side addressing
  24 *		Rob Janssen	:	Correct MTU usage.
  25 *		Dave Platt	:	Counter leaks caused by incorrect
  26 *					interrupt locking and some slightly
  27 *					dubious gcc output. Can you read
  28 *					compiler: it said _VOLATILE_
  29 *	Richard Kooijman	:	Timestamp fixes.
  30 *		Alan Cox	:	New buffers. Use sk->mac.raw.
  31 *		Alan Cox	:	sendmsg/recvmsg support.
  32 *		Alan Cox	:	Protocol setting support
  33 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
  34 *	Cyrus Durgin		:	Fixed kerneld for kmod.
  35 *	Michal Ostrowski        :       Module initialization cleanup.
  36 *         Ulises Alonso        :       Frame number limit removal and
  37 *                                      packet_set_ring memory leak.
  38 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
  39 *					The convention is that longer addresses
  40 *					will simply extend the hardware address
  41 *					byte arrays at the end of sockaddr_ll
  42 *					and packet_mreq.
  43 *		Johann Baudy	:	Added TX RING.
  44 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
  45 *					layer.
  46 *					Copyright (C) 2011, <lokec@ccs.neu.edu>
  47 */
  48
 
 
  49#include <linux/ethtool.h>
 
  50#include <linux/types.h>
  51#include <linux/mm.h>
  52#include <linux/capability.h>
  53#include <linux/fcntl.h>
  54#include <linux/socket.h>
  55#include <linux/in.h>
  56#include <linux/inet.h>
  57#include <linux/netdevice.h>
  58#include <linux/if_packet.h>
  59#include <linux/wireless.h>
  60#include <linux/kernel.h>
  61#include <linux/kmod.h>
  62#include <linux/slab.h>
  63#include <linux/vmalloc.h>
  64#include <net/net_namespace.h>
  65#include <net/ip.h>
  66#include <net/protocol.h>
  67#include <linux/skbuff.h>
  68#include <net/sock.h>
  69#include <linux/errno.h>
  70#include <linux/timer.h>
  71#include <linux/uaccess.h>
  72#include <asm/ioctls.h>
  73#include <asm/page.h>
  74#include <asm/cacheflush.h>
  75#include <asm/io.h>
  76#include <linux/proc_fs.h>
  77#include <linux/seq_file.h>
  78#include <linux/poll.h>
  79#include <linux/module.h>
  80#include <linux/init.h>
  81#include <linux/mutex.h>
  82#include <linux/if_vlan.h>
  83#include <linux/virtio_net.h>
  84#include <linux/errqueue.h>
  85#include <linux/net_tstamp.h>
  86#include <linux/percpu.h>
  87#ifdef CONFIG_INET
  88#include <net/inet_common.h>
  89#endif
  90#include <linux/bpf.h>
  91#include <net/compat.h>
 
  92
  93#include "internal.h"
  94
  95/*
  96   Assumptions:
  97   - If the device has no dev->header_ops->create, there is no LL header
  98     visible above the device. In this case, its hard_header_len should be 0.
  99     The device may prepend its own header internally. In this case, its
 100     needed_headroom should be set to the space needed for it to add its
 101     internal header.
 102     For example, a WiFi driver pretending to be an Ethernet driver should
 103     set its hard_header_len to be the Ethernet header length, and set its
 104     needed_headroom to be (the real WiFi header length - the fake Ethernet
 105     header length).
 106   - packet socket receives packets with pulled ll header,
 107     so that SOCK_RAW should push it back.
 108
 109On receive:
 110-----------
 111
 112Incoming, dev_has_header(dev) == true
 113   mac_header -> ll header
 114   data       -> data
 115
 116Outgoing, dev_has_header(dev) == true
 117   mac_header -> ll header
 118   data       -> ll header
 119
 120Incoming, dev_has_header(dev) == false
 121   mac_header -> data
 122     However drivers often make it point to the ll header.
 123     This is incorrect because the ll header should be invisible to us.
 124   data       -> data
 125
 126Outgoing, dev_has_header(dev) == false
 127   mac_header -> data. ll header is invisible to us.
 128   data       -> data
 129
 130Resume
 131  If dev_has_header(dev) == false we are unable to restore the ll header,
 132    because it is invisible to us.
 133
 134
 135On transmit:
 136------------
 137
 138dev_has_header(dev) == true
 139   mac_header -> ll header
 140   data       -> ll header
 141
 142dev_has_header(dev) == false (ll header is invisible to us)
 143   mac_header -> data
 144   data       -> data
 145
 146   We should set network_header on output to the correct position,
 147   packet classifier depends on it.
 148 */
 149
 150/* Private packet socket structures. */
 151
 152/* identical to struct packet_mreq except it has
 153 * a longer address field.
 154 */
 155struct packet_mreq_max {
 156	int		mr_ifindex;
 157	unsigned short	mr_type;
 158	unsigned short	mr_alen;
 159	unsigned char	mr_address[MAX_ADDR_LEN];
 160};
 161
 162union tpacket_uhdr {
 163	struct tpacket_hdr  *h1;
 164	struct tpacket2_hdr *h2;
 165	struct tpacket3_hdr *h3;
 166	void *raw;
 167};
 168
 169static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 170		int closing, int tx_ring);
 171
 172#define V3_ALIGNMENT	(8)
 173
 174#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 175
 176#define BLK_PLUS_PRIV(sz_of_priv) \
 177	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 178
 179#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 180#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 181#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 182#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 183#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 184#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 185
 186struct packet_sock;
 187static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 188		       struct packet_type *pt, struct net_device *orig_dev);
 189
 190static void *packet_previous_frame(struct packet_sock *po,
 191		struct packet_ring_buffer *rb,
 192		int status);
 193static void packet_increment_head(struct packet_ring_buffer *buff);
 194static int prb_curr_blk_in_use(struct tpacket_block_desc *);
 195static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 196			struct packet_sock *);
 197static void prb_retire_current_block(struct tpacket_kbdq_core *,
 198		struct packet_sock *, unsigned int status);
 199static int prb_queue_frozen(struct tpacket_kbdq_core *);
 200static void prb_open_block(struct tpacket_kbdq_core *,
 201		struct tpacket_block_desc *);
 202static void prb_retire_rx_blk_timer_expired(struct timer_list *);
 203static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 204static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 205static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 206		struct tpacket3_hdr *);
 207static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 208		struct tpacket3_hdr *);
 209static void packet_flush_mclist(struct sock *sk);
 210static u16 packet_pick_tx_queue(struct sk_buff *skb);
 211
 212struct packet_skb_cb {
 213	union {
 214		struct sockaddr_pkt pkt;
 215		union {
 216			/* Trick: alias skb original length with
 217			 * ll.sll_family and ll.protocol in order
 218			 * to save room.
 219			 */
 220			unsigned int origlen;
 221			struct sockaddr_ll ll;
 222		};
 223	} sa;
 224};
 225
 226#define vio_le() virtio_legacy_is_little_endian()
 227
 228#define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 229
 230#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 231#define GET_PBLOCK_DESC(x, bid)	\
 232	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 233#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 234	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 235#define GET_NEXT_PRB_BLK_NUM(x) \
 236	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 237	((x)->kactive_blk_num+1) : 0)
 238
 239static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 240static void __fanout_link(struct sock *sk, struct packet_sock *po);
 241
 242static int packet_direct_xmit(struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 243{
 
 
 
 
 
 
 
 
 
 
 244	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
 245}
 246
 247static struct net_device *packet_cached_dev_get(struct packet_sock *po)
 248{
 249	struct net_device *dev;
 250
 251	rcu_read_lock();
 252	dev = rcu_dereference(po->cached_dev);
 253	if (likely(dev))
 254		dev_hold(dev);
 255	rcu_read_unlock();
 256
 257	return dev;
 258}
 259
 260static void packet_cached_dev_assign(struct packet_sock *po,
 261				     struct net_device *dev)
 262{
 263	rcu_assign_pointer(po->cached_dev, dev);
 264}
 265
 266static void packet_cached_dev_reset(struct packet_sock *po)
 267{
 268	RCU_INIT_POINTER(po->cached_dev, NULL);
 269}
 270
 271static bool packet_use_direct_xmit(const struct packet_sock *po)
 272{
 273	return po->xmit == packet_direct_xmit;
 274}
 275
 276static u16 packet_pick_tx_queue(struct sk_buff *skb)
 277{
 278	struct net_device *dev = skb->dev;
 279	const struct net_device_ops *ops = dev->netdev_ops;
 280	int cpu = raw_smp_processor_id();
 281	u16 queue_index;
 282
 283#ifdef CONFIG_XPS
 284	skb->sender_cpu = cpu + 1;
 285#endif
 286	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
 287	if (ops->ndo_select_queue) {
 288		queue_index = ops->ndo_select_queue(dev, skb, NULL);
 289		queue_index = netdev_cap_txqueue(dev, queue_index);
 290	} else {
 291		queue_index = netdev_pick_tx(dev, skb, NULL);
 292	}
 293
 294	return queue_index;
 295}
 296
 297/* __register_prot_hook must be invoked through register_prot_hook
 298 * or from a context in which asynchronous accesses to the packet
 299 * socket is not possible (packet_create()).
 300 */
 301static void __register_prot_hook(struct sock *sk)
 302{
 303	struct packet_sock *po = pkt_sk(sk);
 304
 305	if (!po->running) {
 306		if (po->fanout)
 307			__fanout_link(sk, po);
 308		else
 309			dev_add_pack(&po->prot_hook);
 310
 311		sock_hold(sk);
 312		po->running = 1;
 313	}
 314}
 315
 316static void register_prot_hook(struct sock *sk)
 317{
 318	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
 319	__register_prot_hook(sk);
 320}
 321
 322/* If the sync parameter is true, we will temporarily drop
 323 * the po->bind_lock and do a synchronize_net to make sure no
 324 * asynchronous packet processing paths still refer to the elements
 325 * of po->prot_hook.  If the sync parameter is false, it is the
 326 * callers responsibility to take care of this.
 327 */
 328static void __unregister_prot_hook(struct sock *sk, bool sync)
 329{
 330	struct packet_sock *po = pkt_sk(sk);
 331
 332	lockdep_assert_held_once(&po->bind_lock);
 333
 334	po->running = 0;
 335
 336	if (po->fanout)
 337		__fanout_unlink(sk, po);
 338	else
 339		__dev_remove_pack(&po->prot_hook);
 340
 341	__sock_put(sk);
 342
 343	if (sync) {
 344		spin_unlock(&po->bind_lock);
 345		synchronize_net();
 346		spin_lock(&po->bind_lock);
 347	}
 348}
 349
 350static void unregister_prot_hook(struct sock *sk, bool sync)
 351{
 352	struct packet_sock *po = pkt_sk(sk);
 353
 354	if (po->running)
 355		__unregister_prot_hook(sk, sync);
 356}
 357
 358static inline struct page * __pure pgv_to_page(void *addr)
 359{
 360	if (is_vmalloc_addr(addr))
 361		return vmalloc_to_page(addr);
 362	return virt_to_page(addr);
 363}
 364
 365static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 366{
 367	union tpacket_uhdr h;
 368
 
 
 369	h.raw = frame;
 370	switch (po->tp_version) {
 371	case TPACKET_V1:
 372		h.h1->tp_status = status;
 373		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 374		break;
 375	case TPACKET_V2:
 376		h.h2->tp_status = status;
 377		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 378		break;
 379	case TPACKET_V3:
 380		h.h3->tp_status = status;
 381		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 382		break;
 383	default:
 384		WARN(1, "TPACKET version not supported.\n");
 385		BUG();
 386	}
 387
 388	smp_wmb();
 389}
 390
 391static int __packet_get_status(const struct packet_sock *po, void *frame)
 392{
 393	union tpacket_uhdr h;
 394
 395	smp_rmb();
 396
 
 
 397	h.raw = frame;
 398	switch (po->tp_version) {
 399	case TPACKET_V1:
 400		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 401		return h.h1->tp_status;
 402	case TPACKET_V2:
 403		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 404		return h.h2->tp_status;
 405	case TPACKET_V3:
 406		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 407		return h.h3->tp_status;
 408	default:
 409		WARN(1, "TPACKET version not supported.\n");
 410		BUG();
 411		return 0;
 412	}
 413}
 414
 415static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
 416				   unsigned int flags)
 417{
 418	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 419
 420	if (shhwtstamps &&
 421	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
 422	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
 423		return TP_STATUS_TS_RAW_HARDWARE;
 424
 425	if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
 426	    ktime_to_timespec64_cond(skb->tstamp, ts))
 427		return TP_STATUS_TS_SOFTWARE;
 428
 429	return 0;
 430}
 431
 432static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
 433				    struct sk_buff *skb)
 434{
 435	union tpacket_uhdr h;
 436	struct timespec64 ts;
 437	__u32 ts_status;
 438
 439	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
 440		return 0;
 441
 442	h.raw = frame;
 443	/*
 444	 * versions 1 through 3 overflow the timestamps in y2106, since they
 445	 * all store the seconds in a 32-bit unsigned integer.
 446	 * If we create a version 4, that should have a 64-bit timestamp,
 447	 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
 448	 * nanoseconds.
 449	 */
 450	switch (po->tp_version) {
 451	case TPACKET_V1:
 452		h.h1->tp_sec = ts.tv_sec;
 453		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
 454		break;
 455	case TPACKET_V2:
 456		h.h2->tp_sec = ts.tv_sec;
 457		h.h2->tp_nsec = ts.tv_nsec;
 458		break;
 459	case TPACKET_V3:
 460		h.h3->tp_sec = ts.tv_sec;
 461		h.h3->tp_nsec = ts.tv_nsec;
 462		break;
 463	default:
 464		WARN(1, "TPACKET version not supported.\n");
 465		BUG();
 466	}
 467
 468	/* one flush is safe, as both fields always lie on the same cacheline */
 469	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
 470	smp_wmb();
 471
 472	return ts_status;
 473}
 474
 475static void *packet_lookup_frame(const struct packet_sock *po,
 476				 const struct packet_ring_buffer *rb,
 477				 unsigned int position,
 478				 int status)
 479{
 480	unsigned int pg_vec_pos, frame_offset;
 481	union tpacket_uhdr h;
 482
 483	pg_vec_pos = position / rb->frames_per_block;
 484	frame_offset = position % rb->frames_per_block;
 485
 486	h.raw = rb->pg_vec[pg_vec_pos].buffer +
 487		(frame_offset * rb->frame_size);
 488
 489	if (status != __packet_get_status(po, h.raw))
 490		return NULL;
 491
 492	return h.raw;
 493}
 494
 495static void *packet_current_frame(struct packet_sock *po,
 496		struct packet_ring_buffer *rb,
 497		int status)
 498{
 499	return packet_lookup_frame(po, rb, rb->head, status);
 500}
 501
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 502static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 503{
 504	del_timer_sync(&pkc->retire_blk_timer);
 505}
 506
 507static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 508		struct sk_buff_head *rb_queue)
 509{
 510	struct tpacket_kbdq_core *pkc;
 511
 512	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 513
 514	spin_lock_bh(&rb_queue->lock);
 515	pkc->delete_blk_timer = 1;
 516	spin_unlock_bh(&rb_queue->lock);
 517
 518	prb_del_retire_blk_timer(pkc);
 519}
 520
 521static void prb_setup_retire_blk_timer(struct packet_sock *po)
 522{
 523	struct tpacket_kbdq_core *pkc;
 524
 525	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 526	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
 527		    0);
 528	pkc->retire_blk_timer.expires = jiffies;
 529}
 530
 531static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 532				int blk_size_in_bytes)
 533{
 534	struct net_device *dev;
 535	unsigned int mbits, div;
 536	struct ethtool_link_ksettings ecmd;
 537	int err;
 538
 539	rtnl_lock();
 540	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 541	if (unlikely(!dev)) {
 542		rtnl_unlock();
 543		return DEFAULT_PRB_RETIRE_TOV;
 544	}
 545	err = __ethtool_get_link_ksettings(dev, &ecmd);
 546	rtnl_unlock();
 547	if (err)
 548		return DEFAULT_PRB_RETIRE_TOV;
 549
 550	/* If the link speed is so slow you don't really
 551	 * need to worry about perf anyways
 552	 */
 553	if (ecmd.base.speed < SPEED_1000 ||
 554	    ecmd.base.speed == SPEED_UNKNOWN)
 555		return DEFAULT_PRB_RETIRE_TOV;
 556
 557	div = ecmd.base.speed / 1000;
 558	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 559
 560	if (div)
 561		mbits /= div;
 562
 563	if (div)
 564		return mbits + 1;
 565	return mbits;
 566}
 567
 568static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 569			union tpacket_req_u *req_u)
 570{
 571	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 572}
 573
 574static void init_prb_bdqc(struct packet_sock *po,
 575			struct packet_ring_buffer *rb,
 576			struct pgv *pg_vec,
 577			union tpacket_req_u *req_u)
 578{
 579	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
 580	struct tpacket_block_desc *pbd;
 581
 582	memset(p1, 0x0, sizeof(*p1));
 583
 584	p1->knxt_seq_num = 1;
 585	p1->pkbdq = pg_vec;
 586	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 587	p1->pkblk_start	= pg_vec[0].buffer;
 588	p1->kblk_size = req_u->req3.tp_block_size;
 589	p1->knum_blocks	= req_u->req3.tp_block_nr;
 590	p1->hdrlen = po->tp_hdrlen;
 591	p1->version = po->tp_version;
 592	p1->last_kactive_blk_num = 0;
 593	po->stats.stats3.tp_freeze_q_cnt = 0;
 594	if (req_u->req3.tp_retire_blk_tov)
 595		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 596	else
 597		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 598						req_u->req3.tp_block_size);
 599	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 600	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 601	rwlock_init(&p1->blk_fill_in_prog_lock);
 602
 603	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
 604	prb_init_ft_ops(p1, req_u);
 605	prb_setup_retire_blk_timer(po);
 606	prb_open_block(p1, pbd);
 607}
 608
 609/*  Do NOT update the last_blk_num first.
 610 *  Assumes sk_buff_head lock is held.
 611 */
 612static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 613{
 614	mod_timer(&pkc->retire_blk_timer,
 615			jiffies + pkc->tov_in_jiffies);
 616	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 617}
 618
 619/*
 620 * Timer logic:
 621 * 1) We refresh the timer only when we open a block.
 622 *    By doing this we don't waste cycles refreshing the timer
 623 *	  on packet-by-packet basis.
 624 *
 625 * With a 1MB block-size, on a 1Gbps line, it will take
 626 * i) ~8 ms to fill a block + ii) memcpy etc.
 627 * In this cut we are not accounting for the memcpy time.
 628 *
 629 * So, if the user sets the 'tmo' to 10ms then the timer
 630 * will never fire while the block is still getting filled
 631 * (which is what we want). However, the user could choose
 632 * to close a block early and that's fine.
 633 *
 634 * But when the timer does fire, we check whether or not to refresh it.
 635 * Since the tmo granularity is in msecs, it is not too expensive
 636 * to refresh the timer, lets say every '8' msecs.
 637 * Either the user can set the 'tmo' or we can derive it based on
 638 * a) line-speed and b) block-size.
 639 * prb_calc_retire_blk_tmo() calculates the tmo.
 640 *
 641 */
 642static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
 643{
 644	struct packet_sock *po =
 645		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
 646	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 647	unsigned int frozen;
 648	struct tpacket_block_desc *pbd;
 649
 650	spin_lock(&po->sk.sk_receive_queue.lock);
 651
 652	frozen = prb_queue_frozen(pkc);
 653	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 654
 655	if (unlikely(pkc->delete_blk_timer))
 656		goto out;
 657
 658	/* We only need to plug the race when the block is partially filled.
 659	 * tpacket_rcv:
 660	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 661	 *		copy_bits() is in progress ...
 662	 *		timer fires on other cpu:
 663	 *		we can't retire the current block because copy_bits
 664	 *		is in progress.
 665	 *
 666	 */
 667	if (BLOCK_NUM_PKTS(pbd)) {
 668		/* Waiting for skb_copy_bits to finish... */
 669		write_lock(&pkc->blk_fill_in_prog_lock);
 670		write_unlock(&pkc->blk_fill_in_prog_lock);
 671	}
 672
 673	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 674		if (!frozen) {
 675			if (!BLOCK_NUM_PKTS(pbd)) {
 676				/* An empty block. Just refresh the timer. */
 677				goto refresh_timer;
 678			}
 679			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 680			if (!prb_dispatch_next_block(pkc, po))
 681				goto refresh_timer;
 682			else
 683				goto out;
 684		} else {
 685			/* Case 1. Queue was frozen because user-space was
 686			 *	   lagging behind.
 687			 */
 688			if (prb_curr_blk_in_use(pbd)) {
 689				/*
 690				 * Ok, user-space is still behind.
 691				 * So just refresh the timer.
 692				 */
 693				goto refresh_timer;
 694			} else {
 695			       /* Case 2. queue was frozen,user-space caught up,
 696				* now the link went idle && the timer fired.
 697				* We don't have a block to close.So we open this
 698				* block and restart the timer.
 699				* opening a block thaws the queue,restarts timer
 700				* Thawing/timer-refresh is a side effect.
 701				*/
 702				prb_open_block(pkc, pbd);
 703				goto out;
 704			}
 705		}
 706	}
 707
 708refresh_timer:
 709	_prb_refresh_rx_retire_blk_timer(pkc);
 710
 711out:
 712	spin_unlock(&po->sk.sk_receive_queue.lock);
 713}
 714
 715static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 716		struct tpacket_block_desc *pbd1, __u32 status)
 717{
 718	/* Flush everything minus the block header */
 719
 720#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 721	u8 *start, *end;
 722
 723	start = (u8 *)pbd1;
 724
 725	/* Skip the block header(we know header WILL fit in 4K) */
 726	start += PAGE_SIZE;
 727
 728	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 729	for (; start < end; start += PAGE_SIZE)
 730		flush_dcache_page(pgv_to_page(start));
 731
 732	smp_wmb();
 733#endif
 734
 735	/* Now update the block status. */
 736
 737	BLOCK_STATUS(pbd1) = status;
 738
 739	/* Flush the block header */
 740
 741#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 742	start = (u8 *)pbd1;
 743	flush_dcache_page(pgv_to_page(start));
 744
 745	smp_wmb();
 746#endif
 747}
 748
 749/*
 750 * Side effect:
 751 *
 752 * 1) flush the block
 753 * 2) Increment active_blk_num
 754 *
 755 * Note:We DONT refresh the timer on purpose.
 756 *	Because almost always the next block will be opened.
 757 */
 758static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 759		struct tpacket_block_desc *pbd1,
 760		struct packet_sock *po, unsigned int stat)
 761{
 762	__u32 status = TP_STATUS_USER | stat;
 763
 764	struct tpacket3_hdr *last_pkt;
 765	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 766	struct sock *sk = &po->sk;
 767
 768	if (atomic_read(&po->tp_drops))
 769		status |= TP_STATUS_LOSING;
 770
 771	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 772	last_pkt->tp_next_offset = 0;
 773
 774	/* Get the ts of the last pkt */
 775	if (BLOCK_NUM_PKTS(pbd1)) {
 776		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 777		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 778	} else {
 779		/* Ok, we tmo'd - so get the current time.
 780		 *
 781		 * It shouldn't really happen as we don't close empty
 782		 * blocks. See prb_retire_rx_blk_timer_expired().
 783		 */
 784		struct timespec64 ts;
 785		ktime_get_real_ts64(&ts);
 786		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 787		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 788	}
 789
 790	smp_wmb();
 791
 792	/* Flush the block */
 793	prb_flush_block(pkc1, pbd1, status);
 794
 795	sk->sk_data_ready(sk);
 796
 797	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 798}
 799
 800static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 801{
 802	pkc->reset_pending_on_curr_blk = 0;
 803}
 804
 805/*
 806 * Side effect of opening a block:
 807 *
 808 * 1) prb_queue is thawed.
 809 * 2) retire_blk_timer is refreshed.
 810 *
 811 */
 812static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 813	struct tpacket_block_desc *pbd1)
 814{
 815	struct timespec64 ts;
 816	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 817
 818	smp_rmb();
 819
 820	/* We could have just memset this but we will lose the
 821	 * flexibility of making the priv area sticky
 822	 */
 823
 824	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 825	BLOCK_NUM_PKTS(pbd1) = 0;
 826	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 827
 828	ktime_get_real_ts64(&ts);
 829
 830	h1->ts_first_pkt.ts_sec = ts.tv_sec;
 831	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 832
 833	pkc1->pkblk_start = (char *)pbd1;
 834	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 835
 836	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 837	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 838
 839	pbd1->version = pkc1->version;
 840	pkc1->prev = pkc1->nxt_offset;
 841	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 842
 843	prb_thaw_queue(pkc1);
 844	_prb_refresh_rx_retire_blk_timer(pkc1);
 845
 846	smp_wmb();
 847}
 848
 849/*
 850 * Queue freeze logic:
 851 * 1) Assume tp_block_nr = 8 blocks.
 852 * 2) At time 't0', user opens Rx ring.
 853 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 854 * 4) user-space is either sleeping or processing block '0'.
 855 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 856 *    it will close block-7,loop around and try to fill block '0'.
 857 *    call-flow:
 858 *    __packet_lookup_frame_in_block
 859 *      prb_retire_current_block()
 860 *      prb_dispatch_next_block()
 861 *        |->(BLOCK_STATUS == USER) evaluates to true
 862 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 863 * 6) Now there are two cases:
 864 *    6.1) Link goes idle right after the queue is frozen.
 865 *         But remember, the last open_block() refreshed the timer.
 866 *         When this timer expires,it will refresh itself so that we can
 867 *         re-open block-0 in near future.
 868 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 869 *         case and __packet_lookup_frame_in_block will check if block-0
 870 *         is free and can now be re-used.
 871 */
 872static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 873				  struct packet_sock *po)
 874{
 875	pkc->reset_pending_on_curr_blk = 1;
 876	po->stats.stats3.tp_freeze_q_cnt++;
 877}
 878
 879#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 880
 881/*
 882 * If the next block is free then we will dispatch it
 883 * and return a good offset.
 884 * Else, we will freeze the queue.
 885 * So, caller must check the return value.
 886 */
 887static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 888		struct packet_sock *po)
 889{
 890	struct tpacket_block_desc *pbd;
 891
 892	smp_rmb();
 893
 894	/* 1. Get current block num */
 895	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 896
 897	/* 2. If this block is currently in_use then freeze the queue */
 898	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 899		prb_freeze_queue(pkc, po);
 900		return NULL;
 901	}
 902
 903	/*
 904	 * 3.
 905	 * open this block and return the offset where the first packet
 906	 * needs to get stored.
 907	 */
 908	prb_open_block(pkc, pbd);
 909	return (void *)pkc->nxt_offset;
 910}
 911
 912static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 913		struct packet_sock *po, unsigned int status)
 914{
 915	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 916
 917	/* retire/close the current block */
 918	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 919		/*
 920		 * Plug the case where copy_bits() is in progress on
 921		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
 922		 * have space to copy the pkt in the current block and
 923		 * called prb_retire_current_block()
 924		 *
 925		 * We don't need to worry about the TMO case because
 926		 * the timer-handler already handled this case.
 927		 */
 928		if (!(status & TP_STATUS_BLK_TMO)) {
 929			/* Waiting for skb_copy_bits to finish... */
 930			write_lock(&pkc->blk_fill_in_prog_lock);
 931			write_unlock(&pkc->blk_fill_in_prog_lock);
 932		}
 933		prb_close_block(pkc, pbd, po, status);
 934		return;
 935	}
 936}
 937
 938static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
 939{
 940	return TP_STATUS_USER & BLOCK_STATUS(pbd);
 941}
 942
 943static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 944{
 945	return pkc->reset_pending_on_curr_blk;
 946}
 947
 948static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
 949	__releases(&pkc->blk_fill_in_prog_lock)
 950{
 951	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 952
 953	read_unlock(&pkc->blk_fill_in_prog_lock);
 954}
 955
 956static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
 957			struct tpacket3_hdr *ppd)
 958{
 959	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
 960}
 961
 962static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
 963			struct tpacket3_hdr *ppd)
 964{
 965	ppd->hv1.tp_rxhash = 0;
 966}
 967
 968static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
 969			struct tpacket3_hdr *ppd)
 970{
 
 
 971	if (skb_vlan_tag_present(pkc->skb)) {
 972		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
 973		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
 974		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
 
 
 
 
 975	} else {
 976		ppd->hv1.tp_vlan_tci = 0;
 977		ppd->hv1.tp_vlan_tpid = 0;
 978		ppd->tp_status = TP_STATUS_AVAILABLE;
 979	}
 980}
 981
 982static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
 983			struct tpacket3_hdr *ppd)
 984{
 985	ppd->hv1.tp_padding = 0;
 986	prb_fill_vlan_info(pkc, ppd);
 987
 988	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
 989		prb_fill_rxhash(pkc, ppd);
 990	else
 991		prb_clear_rxhash(pkc, ppd);
 992}
 993
 994static void prb_fill_curr_block(char *curr,
 995				struct tpacket_kbdq_core *pkc,
 996				struct tpacket_block_desc *pbd,
 997				unsigned int len)
 998	__acquires(&pkc->blk_fill_in_prog_lock)
 999{
1000	struct tpacket3_hdr *ppd;
1001
1002	ppd  = (struct tpacket3_hdr *)curr;
1003	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1004	pkc->prev = curr;
1005	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1006	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1007	BLOCK_NUM_PKTS(pbd) += 1;
1008	read_lock(&pkc->blk_fill_in_prog_lock);
1009	prb_run_all_ft_ops(pkc, ppd);
1010}
1011
1012/* Assumes caller has the sk->rx_queue.lock */
1013static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1014					    struct sk_buff *skb,
1015					    unsigned int len
1016					    )
1017{
1018	struct tpacket_kbdq_core *pkc;
1019	struct tpacket_block_desc *pbd;
1020	char *curr, *end;
1021
1022	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1023	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1024
1025	/* Queue is frozen when user space is lagging behind */
1026	if (prb_queue_frozen(pkc)) {
1027		/*
1028		 * Check if that last block which caused the queue to freeze,
1029		 * is still in_use by user-space.
1030		 */
1031		if (prb_curr_blk_in_use(pbd)) {
1032			/* Can't record this packet */
1033			return NULL;
1034		} else {
1035			/*
1036			 * Ok, the block was released by user-space.
1037			 * Now let's open that block.
1038			 * opening a block also thaws the queue.
1039			 * Thawing is a side effect.
1040			 */
1041			prb_open_block(pkc, pbd);
1042		}
1043	}
1044
1045	smp_mb();
1046	curr = pkc->nxt_offset;
1047	pkc->skb = skb;
1048	end = (char *)pbd + pkc->kblk_size;
1049
1050	/* first try the current block */
1051	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1052		prb_fill_curr_block(curr, pkc, pbd, len);
1053		return (void *)curr;
1054	}
1055
1056	/* Ok, close the current block */
1057	prb_retire_current_block(pkc, po, 0);
1058
1059	/* Now, try to dispatch the next block */
1060	curr = (char *)prb_dispatch_next_block(pkc, po);
1061	if (curr) {
1062		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1063		prb_fill_curr_block(curr, pkc, pbd, len);
1064		return (void *)curr;
1065	}
1066
1067	/*
1068	 * No free blocks are available.user_space hasn't caught up yet.
1069	 * Queue was just frozen and now this packet will get dropped.
1070	 */
1071	return NULL;
1072}
1073
1074static void *packet_current_rx_frame(struct packet_sock *po,
1075					    struct sk_buff *skb,
1076					    int status, unsigned int len)
1077{
1078	char *curr = NULL;
1079	switch (po->tp_version) {
1080	case TPACKET_V1:
1081	case TPACKET_V2:
1082		curr = packet_lookup_frame(po, &po->rx_ring,
1083					po->rx_ring.head, status);
1084		return curr;
1085	case TPACKET_V3:
1086		return __packet_lookup_frame_in_block(po, skb, len);
1087	default:
1088		WARN(1, "TPACKET version not supported\n");
1089		BUG();
1090		return NULL;
1091	}
1092}
1093
1094static void *prb_lookup_block(const struct packet_sock *po,
1095			      const struct packet_ring_buffer *rb,
1096			      unsigned int idx,
1097			      int status)
1098{
1099	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1100	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1101
1102	if (status != BLOCK_STATUS(pbd))
1103		return NULL;
1104	return pbd;
1105}
1106
1107static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1108{
1109	unsigned int prev;
1110	if (rb->prb_bdqc.kactive_blk_num)
1111		prev = rb->prb_bdqc.kactive_blk_num-1;
1112	else
1113		prev = rb->prb_bdqc.knum_blocks-1;
1114	return prev;
1115}
1116
1117/* Assumes caller has held the rx_queue.lock */
1118static void *__prb_previous_block(struct packet_sock *po,
1119					 struct packet_ring_buffer *rb,
1120					 int status)
1121{
1122	unsigned int previous = prb_previous_blk_num(rb);
1123	return prb_lookup_block(po, rb, previous, status);
1124}
1125
1126static void *packet_previous_rx_frame(struct packet_sock *po,
1127					     struct packet_ring_buffer *rb,
1128					     int status)
1129{
1130	if (po->tp_version <= TPACKET_V2)
1131		return packet_previous_frame(po, rb, status);
1132
1133	return __prb_previous_block(po, rb, status);
1134}
1135
1136static void packet_increment_rx_head(struct packet_sock *po,
1137					    struct packet_ring_buffer *rb)
1138{
1139	switch (po->tp_version) {
1140	case TPACKET_V1:
1141	case TPACKET_V2:
1142		return packet_increment_head(rb);
1143	case TPACKET_V3:
1144	default:
1145		WARN(1, "TPACKET version not supported.\n");
1146		BUG();
1147		return;
1148	}
1149}
1150
1151static void *packet_previous_frame(struct packet_sock *po,
1152		struct packet_ring_buffer *rb,
1153		int status)
1154{
1155	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1156	return packet_lookup_frame(po, rb, previous, status);
1157}
1158
1159static void packet_increment_head(struct packet_ring_buffer *buff)
1160{
1161	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1162}
1163
1164static void packet_inc_pending(struct packet_ring_buffer *rb)
1165{
1166	this_cpu_inc(*rb->pending_refcnt);
1167}
1168
1169static void packet_dec_pending(struct packet_ring_buffer *rb)
1170{
1171	this_cpu_dec(*rb->pending_refcnt);
1172}
1173
1174static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1175{
1176	unsigned int refcnt = 0;
1177	int cpu;
1178
1179	/* We don't use pending refcount in rx_ring. */
1180	if (rb->pending_refcnt == NULL)
1181		return 0;
1182
1183	for_each_possible_cpu(cpu)
1184		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1185
1186	return refcnt;
1187}
1188
1189static int packet_alloc_pending(struct packet_sock *po)
1190{
1191	po->rx_ring.pending_refcnt = NULL;
1192
1193	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1194	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1195		return -ENOBUFS;
1196
1197	return 0;
1198}
1199
1200static void packet_free_pending(struct packet_sock *po)
1201{
1202	free_percpu(po->tx_ring.pending_refcnt);
1203}
1204
1205#define ROOM_POW_OFF	2
1206#define ROOM_NONE	0x0
1207#define ROOM_LOW	0x1
1208#define ROOM_NORMAL	0x2
1209
1210static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1211{
1212	int idx, len;
1213
1214	len = READ_ONCE(po->rx_ring.frame_max) + 1;
1215	idx = READ_ONCE(po->rx_ring.head);
1216	if (pow_off)
1217		idx += len >> pow_off;
1218	if (idx >= len)
1219		idx -= len;
1220	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1221}
1222
1223static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1224{
1225	int idx, len;
1226
1227	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1228	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1229	if (pow_off)
1230		idx += len >> pow_off;
1231	if (idx >= len)
1232		idx -= len;
1233	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1234}
1235
1236static int __packet_rcv_has_room(const struct packet_sock *po,
1237				 const struct sk_buff *skb)
1238{
1239	const struct sock *sk = &po->sk;
1240	int ret = ROOM_NONE;
1241
1242	if (po->prot_hook.func != tpacket_rcv) {
1243		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1244		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1245				   - (skb ? skb->truesize : 0);
1246
1247		if (avail > (rcvbuf >> ROOM_POW_OFF))
1248			return ROOM_NORMAL;
1249		else if (avail > 0)
1250			return ROOM_LOW;
1251		else
1252			return ROOM_NONE;
1253	}
1254
1255	if (po->tp_version == TPACKET_V3) {
1256		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1257			ret = ROOM_NORMAL;
1258		else if (__tpacket_v3_has_room(po, 0))
1259			ret = ROOM_LOW;
1260	} else {
1261		if (__tpacket_has_room(po, ROOM_POW_OFF))
1262			ret = ROOM_NORMAL;
1263		else if (__tpacket_has_room(po, 0))
1264			ret = ROOM_LOW;
1265	}
1266
1267	return ret;
1268}
1269
1270static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1271{
1272	int pressure, ret;
 
1273
1274	ret = __packet_rcv_has_room(po, skb);
1275	pressure = ret != ROOM_NORMAL;
1276
1277	if (READ_ONCE(po->pressure) != pressure)
1278		WRITE_ONCE(po->pressure, pressure);
1279
1280	return ret;
1281}
1282
1283static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1284{
1285	if (READ_ONCE(po->pressure) &&
1286	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1287		WRITE_ONCE(po->pressure,  0);
1288}
1289
1290static void packet_sock_destruct(struct sock *sk)
1291{
1292	skb_queue_purge(&sk->sk_error_queue);
1293
1294	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1295	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1296
1297	if (!sock_flag(sk, SOCK_DEAD)) {
1298		pr_err("Attempt to release alive packet socket: %p\n", sk);
1299		return;
1300	}
1301
1302	sk_refcnt_debug_dec(sk);
1303}
1304
1305static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1306{
1307	u32 *history = po->rollover->history;
1308	u32 victim, rxhash;
1309	int i, count = 0;
1310
1311	rxhash = skb_get_hash(skb);
1312	for (i = 0; i < ROLLOVER_HLEN; i++)
1313		if (READ_ONCE(history[i]) == rxhash)
1314			count++;
1315
1316	victim = prandom_u32() % ROLLOVER_HLEN;
1317
1318	/* Avoid dirtying the cache line if possible */
1319	if (READ_ONCE(history[victim]) != rxhash)
1320		WRITE_ONCE(history[victim], rxhash);
1321
1322	return count > (ROLLOVER_HLEN >> 1);
1323}
1324
1325static unsigned int fanout_demux_hash(struct packet_fanout *f,
1326				      struct sk_buff *skb,
1327				      unsigned int num)
1328{
1329	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1330}
1331
1332static unsigned int fanout_demux_lb(struct packet_fanout *f,
1333				    struct sk_buff *skb,
1334				    unsigned int num)
1335{
1336	unsigned int val = atomic_inc_return(&f->rr_cur);
1337
1338	return val % num;
1339}
1340
1341static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1342				     struct sk_buff *skb,
1343				     unsigned int num)
1344{
1345	return smp_processor_id() % num;
1346}
1347
1348static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1349				     struct sk_buff *skb,
1350				     unsigned int num)
1351{
1352	return prandom_u32_max(num);
1353}
1354
1355static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1356					  struct sk_buff *skb,
1357					  unsigned int idx, bool try_self,
1358					  unsigned int num)
1359{
1360	struct packet_sock *po, *po_next, *po_skip = NULL;
1361	unsigned int i, j, room = ROOM_NONE;
1362
1363	po = pkt_sk(rcu_dereference(f->arr[idx]));
1364
1365	if (try_self) {
1366		room = packet_rcv_has_room(po, skb);
1367		if (room == ROOM_NORMAL ||
1368		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1369			return idx;
1370		po_skip = po;
1371	}
1372
1373	i = j = min_t(int, po->rollover->sock, num - 1);
1374	do {
1375		po_next = pkt_sk(rcu_dereference(f->arr[i]));
1376		if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
 
1377		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1378			if (i != j)
1379				po->rollover->sock = i;
1380			atomic_long_inc(&po->rollover->num);
1381			if (room == ROOM_LOW)
1382				atomic_long_inc(&po->rollover->num_huge);
1383			return i;
1384		}
1385
1386		if (++i == num)
1387			i = 0;
1388	} while (i != j);
1389
1390	atomic_long_inc(&po->rollover->num_failed);
1391	return idx;
1392}
1393
1394static unsigned int fanout_demux_qm(struct packet_fanout *f,
1395				    struct sk_buff *skb,
1396				    unsigned int num)
1397{
1398	return skb_get_queue_mapping(skb) % num;
1399}
1400
1401static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1402				     struct sk_buff *skb,
1403				     unsigned int num)
1404{
1405	struct bpf_prog *prog;
1406	unsigned int ret = 0;
1407
1408	rcu_read_lock();
1409	prog = rcu_dereference(f->bpf_prog);
1410	if (prog)
1411		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1412	rcu_read_unlock();
1413
1414	return ret;
1415}
1416
1417static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1418{
1419	return f->flags & (flag >> 8);
1420}
1421
1422static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1423			     struct packet_type *pt, struct net_device *orig_dev)
1424{
1425	struct packet_fanout *f = pt->af_packet_priv;
1426	unsigned int num = READ_ONCE(f->num_members);
1427	struct net *net = read_pnet(&f->net);
1428	struct packet_sock *po;
1429	unsigned int idx;
1430
1431	if (!net_eq(dev_net(dev), net) || !num) {
1432		kfree_skb(skb);
1433		return 0;
1434	}
1435
1436	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1437		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1438		if (!skb)
1439			return 0;
1440	}
1441	switch (f->type) {
1442	case PACKET_FANOUT_HASH:
1443	default:
1444		idx = fanout_demux_hash(f, skb, num);
1445		break;
1446	case PACKET_FANOUT_LB:
1447		idx = fanout_demux_lb(f, skb, num);
1448		break;
1449	case PACKET_FANOUT_CPU:
1450		idx = fanout_demux_cpu(f, skb, num);
1451		break;
1452	case PACKET_FANOUT_RND:
1453		idx = fanout_demux_rnd(f, skb, num);
1454		break;
1455	case PACKET_FANOUT_QM:
1456		idx = fanout_demux_qm(f, skb, num);
1457		break;
1458	case PACKET_FANOUT_ROLLOVER:
1459		idx = fanout_demux_rollover(f, skb, 0, false, num);
1460		break;
1461	case PACKET_FANOUT_CBPF:
1462	case PACKET_FANOUT_EBPF:
1463		idx = fanout_demux_bpf(f, skb, num);
1464		break;
1465	}
1466
1467	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1468		idx = fanout_demux_rollover(f, skb, idx, true, num);
1469
1470	po = pkt_sk(rcu_dereference(f->arr[idx]));
1471	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1472}
1473
1474DEFINE_MUTEX(fanout_mutex);
1475EXPORT_SYMBOL_GPL(fanout_mutex);
1476static LIST_HEAD(fanout_list);
1477static u16 fanout_next_id;
1478
1479static void __fanout_link(struct sock *sk, struct packet_sock *po)
1480{
1481	struct packet_fanout *f = po->fanout;
1482
1483	spin_lock(&f->lock);
1484	rcu_assign_pointer(f->arr[f->num_members], sk);
1485	smp_wmb();
1486	f->num_members++;
1487	if (f->num_members == 1)
1488		dev_add_pack(&f->prot_hook);
1489	spin_unlock(&f->lock);
1490}
1491
1492static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1493{
1494	struct packet_fanout *f = po->fanout;
1495	int i;
1496
1497	spin_lock(&f->lock);
1498	for (i = 0; i < f->num_members; i++) {
1499		if (rcu_dereference_protected(f->arr[i],
1500					      lockdep_is_held(&f->lock)) == sk)
1501			break;
1502	}
1503	BUG_ON(i >= f->num_members);
1504	rcu_assign_pointer(f->arr[i],
1505			   rcu_dereference_protected(f->arr[f->num_members - 1],
1506						     lockdep_is_held(&f->lock)));
1507	f->num_members--;
1508	if (f->num_members == 0)
1509		__dev_remove_pack(&f->prot_hook);
1510	spin_unlock(&f->lock);
1511}
1512
1513static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1514{
1515	if (sk->sk_family != PF_PACKET)
1516		return false;
1517
1518	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1519}
1520
1521static void fanout_init_data(struct packet_fanout *f)
1522{
1523	switch (f->type) {
1524	case PACKET_FANOUT_LB:
1525		atomic_set(&f->rr_cur, 0);
1526		break;
1527	case PACKET_FANOUT_CBPF:
1528	case PACKET_FANOUT_EBPF:
1529		RCU_INIT_POINTER(f->bpf_prog, NULL);
1530		break;
1531	}
1532}
1533
1534static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1535{
1536	struct bpf_prog *old;
1537
1538	spin_lock(&f->lock);
1539	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1540	rcu_assign_pointer(f->bpf_prog, new);
1541	spin_unlock(&f->lock);
1542
1543	if (old) {
1544		synchronize_net();
1545		bpf_prog_destroy(old);
1546	}
1547}
1548
1549static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1550				unsigned int len)
1551{
1552	struct bpf_prog *new;
1553	struct sock_fprog fprog;
1554	int ret;
1555
1556	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1557		return -EPERM;
1558
1559	ret = copy_bpf_fprog_from_user(&fprog, data, len);
1560	if (ret)
1561		return ret;
1562
1563	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1564	if (ret)
1565		return ret;
1566
1567	__fanout_set_data_bpf(po->fanout, new);
1568	return 0;
1569}
1570
1571static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1572				unsigned int len)
1573{
1574	struct bpf_prog *new;
1575	u32 fd;
1576
1577	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1578		return -EPERM;
1579	if (len != sizeof(fd))
1580		return -EINVAL;
1581	if (copy_from_sockptr(&fd, data, len))
1582		return -EFAULT;
1583
1584	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1585	if (IS_ERR(new))
1586		return PTR_ERR(new);
1587
1588	__fanout_set_data_bpf(po->fanout, new);
1589	return 0;
1590}
1591
1592static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1593			   unsigned int len)
1594{
1595	switch (po->fanout->type) {
1596	case PACKET_FANOUT_CBPF:
1597		return fanout_set_data_cbpf(po, data, len);
1598	case PACKET_FANOUT_EBPF:
1599		return fanout_set_data_ebpf(po, data, len);
1600	default:
1601		return -EINVAL;
1602	}
1603}
1604
1605static void fanout_release_data(struct packet_fanout *f)
1606{
1607	switch (f->type) {
1608	case PACKET_FANOUT_CBPF:
1609	case PACKET_FANOUT_EBPF:
1610		__fanout_set_data_bpf(f, NULL);
1611	}
1612}
1613
1614static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1615{
1616	struct packet_fanout *f;
1617
1618	list_for_each_entry(f, &fanout_list, list) {
1619		if (f->id == candidate_id &&
1620		    read_pnet(&f->net) == sock_net(sk)) {
1621			return false;
1622		}
1623	}
1624	return true;
1625}
1626
1627static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1628{
1629	u16 id = fanout_next_id;
1630
1631	do {
1632		if (__fanout_id_is_free(sk, id)) {
1633			*new_id = id;
1634			fanout_next_id = id + 1;
1635			return true;
1636		}
1637
1638		id++;
1639	} while (id != fanout_next_id);
1640
1641	return false;
1642}
1643
1644static int fanout_add(struct sock *sk, struct fanout_args *args)
1645{
1646	struct packet_rollover *rollover = NULL;
1647	struct packet_sock *po = pkt_sk(sk);
1648	u16 type_flags = args->type_flags;
1649	struct packet_fanout *f, *match;
1650	u8 type = type_flags & 0xff;
1651	u8 flags = type_flags >> 8;
1652	u16 id = args->id;
1653	int err;
1654
1655	switch (type) {
1656	case PACKET_FANOUT_ROLLOVER:
1657		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1658			return -EINVAL;
1659		break;
1660	case PACKET_FANOUT_HASH:
1661	case PACKET_FANOUT_LB:
1662	case PACKET_FANOUT_CPU:
1663	case PACKET_FANOUT_RND:
1664	case PACKET_FANOUT_QM:
1665	case PACKET_FANOUT_CBPF:
1666	case PACKET_FANOUT_EBPF:
1667		break;
1668	default:
1669		return -EINVAL;
1670	}
1671
1672	mutex_lock(&fanout_mutex);
1673
1674	err = -EALREADY;
1675	if (po->fanout)
1676		goto out;
1677
1678	if (type == PACKET_FANOUT_ROLLOVER ||
1679	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1680		err = -ENOMEM;
1681		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1682		if (!rollover)
1683			goto out;
1684		atomic_long_set(&rollover->num, 0);
1685		atomic_long_set(&rollover->num_huge, 0);
1686		atomic_long_set(&rollover->num_failed, 0);
1687	}
1688
1689	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1690		if (id != 0) {
1691			err = -EINVAL;
1692			goto out;
1693		}
1694		if (!fanout_find_new_id(sk, &id)) {
1695			err = -ENOMEM;
1696			goto out;
1697		}
1698		/* ephemeral flag for the first socket in the group: drop it */
1699		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1700	}
1701
1702	match = NULL;
1703	list_for_each_entry(f, &fanout_list, list) {
1704		if (f->id == id &&
1705		    read_pnet(&f->net) == sock_net(sk)) {
1706			match = f;
1707			break;
1708		}
1709	}
1710	err = -EINVAL;
1711	if (match) {
1712		if (match->flags != flags)
1713			goto out;
1714		if (args->max_num_members &&
1715		    args->max_num_members != match->max_num_members)
1716			goto out;
1717	} else {
1718		if (args->max_num_members > PACKET_FANOUT_MAX)
1719			goto out;
1720		if (!args->max_num_members)
1721			/* legacy PACKET_FANOUT_MAX */
1722			args->max_num_members = 256;
1723		err = -ENOMEM;
1724		match = kvzalloc(struct_size(match, arr, args->max_num_members),
1725				 GFP_KERNEL);
1726		if (!match)
1727			goto out;
1728		write_pnet(&match->net, sock_net(sk));
1729		match->id = id;
1730		match->type = type;
1731		match->flags = flags;
1732		INIT_LIST_HEAD(&match->list);
1733		spin_lock_init(&match->lock);
1734		refcount_set(&match->sk_ref, 0);
1735		fanout_init_data(match);
1736		match->prot_hook.type = po->prot_hook.type;
1737		match->prot_hook.dev = po->prot_hook.dev;
1738		match->prot_hook.func = packet_rcv_fanout;
1739		match->prot_hook.af_packet_priv = match;
 
1740		match->prot_hook.id_match = match_fanout_group;
1741		match->max_num_members = args->max_num_members;
 
1742		list_add(&match->list, &fanout_list);
1743	}
1744	err = -EINVAL;
1745
1746	spin_lock(&po->bind_lock);
1747	if (po->running &&
1748	    match->type == type &&
1749	    match->prot_hook.type == po->prot_hook.type &&
1750	    match->prot_hook.dev == po->prot_hook.dev) {
1751		err = -ENOSPC;
1752		if (refcount_read(&match->sk_ref) < match->max_num_members) {
1753			__dev_remove_pack(&po->prot_hook);
1754			po->fanout = match;
 
1755			po->rollover = rollover;
1756			rollover = NULL;
1757			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1758			__fanout_link(sk, po);
 
 
 
1759			err = 0;
1760		}
1761	}
1762	spin_unlock(&po->bind_lock);
1763
1764	if (err && !refcount_read(&match->sk_ref)) {
1765		list_del(&match->list);
1766		kvfree(match);
1767	}
1768
1769out:
1770	kfree(rollover);
1771	mutex_unlock(&fanout_mutex);
1772	return err;
1773}
1774
1775/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1776 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1777 * It is the responsibility of the caller to call fanout_release_data() and
1778 * free the returned packet_fanout (after synchronize_net())
1779 */
1780static struct packet_fanout *fanout_release(struct sock *sk)
1781{
1782	struct packet_sock *po = pkt_sk(sk);
1783	struct packet_fanout *f;
1784
1785	mutex_lock(&fanout_mutex);
1786	f = po->fanout;
1787	if (f) {
1788		po->fanout = NULL;
1789
1790		if (refcount_dec_and_test(&f->sk_ref))
1791			list_del(&f->list);
1792		else
1793			f = NULL;
1794	}
1795	mutex_unlock(&fanout_mutex);
1796
1797	return f;
1798}
1799
1800static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1801					  struct sk_buff *skb)
1802{
1803	/* Earlier code assumed this would be a VLAN pkt, double-check
1804	 * this now that we have the actual packet in hand. We can only
1805	 * do this check on Ethernet devices.
1806	 */
1807	if (unlikely(dev->type != ARPHRD_ETHER))
1808		return false;
1809
1810	skb_reset_mac_header(skb);
1811	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1812}
1813
1814static const struct proto_ops packet_ops;
1815
1816static const struct proto_ops packet_ops_spkt;
1817
1818static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1819			   struct packet_type *pt, struct net_device *orig_dev)
1820{
1821	struct sock *sk;
1822	struct sockaddr_pkt *spkt;
1823
1824	/*
1825	 *	When we registered the protocol we saved the socket in the data
1826	 *	field for just this event.
1827	 */
1828
1829	sk = pt->af_packet_priv;
1830
1831	/*
1832	 *	Yank back the headers [hope the device set this
1833	 *	right or kerboom...]
1834	 *
1835	 *	Incoming packets have ll header pulled,
1836	 *	push it back.
1837	 *
1838	 *	For outgoing ones skb->data == skb_mac_header(skb)
1839	 *	so that this procedure is noop.
1840	 */
1841
1842	if (skb->pkt_type == PACKET_LOOPBACK)
1843		goto out;
1844
1845	if (!net_eq(dev_net(dev), sock_net(sk)))
1846		goto out;
1847
1848	skb = skb_share_check(skb, GFP_ATOMIC);
1849	if (skb == NULL)
1850		goto oom;
1851
1852	/* drop any routing info */
1853	skb_dst_drop(skb);
1854
1855	/* drop conntrack reference */
1856	nf_reset_ct(skb);
1857
1858	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1859
1860	skb_push(skb, skb->data - skb_mac_header(skb));
1861
1862	/*
1863	 *	The SOCK_PACKET socket receives _all_ frames.
1864	 */
1865
1866	spkt->spkt_family = dev->type;
1867	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1868	spkt->spkt_protocol = skb->protocol;
1869
1870	/*
1871	 *	Charge the memory to the socket. This is done specifically
1872	 *	to prevent sockets using all the memory up.
1873	 */
1874
1875	if (sock_queue_rcv_skb(sk, skb) == 0)
1876		return 0;
1877
1878out:
1879	kfree_skb(skb);
1880oom:
1881	return 0;
1882}
1883
1884static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1885{
 
 
1886	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1887	    sock->type == SOCK_RAW) {
1888		skb_reset_mac_header(skb);
1889		skb->protocol = dev_parse_header_protocol(skb);
1890	}
1891
 
 
 
 
 
 
1892	skb_probe_transport_header(skb);
1893}
1894
1895/*
1896 *	Output a raw packet to a device layer. This bypasses all the other
1897 *	protocol layers and you must therefore supply it with a complete frame
1898 */
1899
1900static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1901			       size_t len)
1902{
1903	struct sock *sk = sock->sk;
1904	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1905	struct sk_buff *skb = NULL;
1906	struct net_device *dev;
1907	struct sockcm_cookie sockc;
1908	__be16 proto = 0;
1909	int err;
1910	int extra_len = 0;
1911
1912	/*
1913	 *	Get and verify the address.
1914	 */
1915
1916	if (saddr) {
1917		if (msg->msg_namelen < sizeof(struct sockaddr))
1918			return -EINVAL;
1919		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1920			proto = saddr->spkt_protocol;
1921	} else
1922		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1923
1924	/*
1925	 *	Find the device first to size check it
1926	 */
1927
1928	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1929retry:
1930	rcu_read_lock();
1931	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1932	err = -ENODEV;
1933	if (dev == NULL)
1934		goto out_unlock;
1935
1936	err = -ENETDOWN;
1937	if (!(dev->flags & IFF_UP))
1938		goto out_unlock;
1939
1940	/*
1941	 * You may not queue a frame bigger than the mtu. This is the lowest level
1942	 * raw protocol and you must do your own fragmentation at this level.
1943	 */
1944
1945	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1946		if (!netif_supports_nofcs(dev)) {
1947			err = -EPROTONOSUPPORT;
1948			goto out_unlock;
1949		}
1950		extra_len = 4; /* We're doing our own CRC */
1951	}
1952
1953	err = -EMSGSIZE;
1954	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1955		goto out_unlock;
1956
1957	if (!skb) {
1958		size_t reserved = LL_RESERVED_SPACE(dev);
1959		int tlen = dev->needed_tailroom;
1960		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1961
1962		rcu_read_unlock();
1963		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1964		if (skb == NULL)
1965			return -ENOBUFS;
1966		/* FIXME: Save some space for broken drivers that write a hard
1967		 * header at transmission time by themselves. PPP is the notable
1968		 * one here. This should really be fixed at the driver level.
1969		 */
1970		skb_reserve(skb, reserved);
1971		skb_reset_network_header(skb);
1972
1973		/* Try to align data part correctly */
1974		if (hhlen) {
1975			skb->data -= hhlen;
1976			skb->tail -= hhlen;
1977			if (len < hhlen)
1978				skb_reset_network_header(skb);
1979		}
1980		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1981		if (err)
1982			goto out_free;
1983		goto retry;
1984	}
1985
1986	if (!dev_validate_header(dev, skb->data, len)) {
1987		err = -EINVAL;
1988		goto out_unlock;
1989	}
1990	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1991	    !packet_extra_vlan_len_allowed(dev, skb)) {
1992		err = -EMSGSIZE;
1993		goto out_unlock;
1994	}
1995
1996	sockcm_init(&sockc, sk);
1997	if (msg->msg_controllen) {
1998		err = sock_cmsg_send(sk, msg, &sockc);
1999		if (unlikely(err))
2000			goto out_unlock;
2001	}
2002
2003	skb->protocol = proto;
2004	skb->dev = dev;
2005	skb->priority = sk->sk_priority;
2006	skb->mark = sk->sk_mark;
2007	skb->tstamp = sockc.transmit_time;
2008
2009	skb_setup_tx_timestamp(skb, sockc.tsflags);
2010
2011	if (unlikely(extra_len == 4))
2012		skb->no_fcs = 1;
2013
2014	packet_parse_headers(skb, sock);
2015
2016	dev_queue_xmit(skb);
2017	rcu_read_unlock();
2018	return len;
2019
2020out_unlock:
2021	rcu_read_unlock();
2022out_free:
2023	kfree_skb(skb);
2024	return err;
2025}
2026
2027static unsigned int run_filter(struct sk_buff *skb,
2028			       const struct sock *sk,
2029			       unsigned int res)
2030{
2031	struct sk_filter *filter;
2032
2033	rcu_read_lock();
2034	filter = rcu_dereference(sk->sk_filter);
2035	if (filter != NULL)
2036		res = bpf_prog_run_clear_cb(filter->prog, skb);
2037	rcu_read_unlock();
2038
2039	return res;
2040}
2041
2042static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2043			   size_t *len)
2044{
2045	struct virtio_net_hdr vnet_hdr;
2046
2047	if (*len < sizeof(vnet_hdr))
2048		return -EINVAL;
2049	*len -= sizeof(vnet_hdr);
2050
2051	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2052		return -EINVAL;
2053
2054	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2055}
2056
2057/*
2058 * This function makes lazy skb cloning in hope that most of packets
2059 * are discarded by BPF.
2060 *
2061 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2062 * and skb->cb are mangled. It works because (and until) packets
2063 * falling here are owned by current CPU. Output packets are cloned
2064 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2065 * sequentially, so that if we return skb to original state on exit,
2066 * we will not harm anyone.
2067 */
2068
2069static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2070		      struct packet_type *pt, struct net_device *orig_dev)
2071{
2072	struct sock *sk;
 
2073	struct sockaddr_ll *sll;
2074	struct packet_sock *po;
2075	u8 *skb_head = skb->data;
2076	int skb_len = skb->len;
2077	unsigned int snaplen, res;
2078	bool is_drop_n_account = false;
2079
2080	if (skb->pkt_type == PACKET_LOOPBACK)
2081		goto drop;
2082
2083	sk = pt->af_packet_priv;
2084	po = pkt_sk(sk);
2085
2086	if (!net_eq(dev_net(dev), sock_net(sk)))
2087		goto drop;
2088
2089	skb->dev = dev;
2090
2091	if (dev_has_header(dev)) {
2092		/* The device has an explicit notion of ll header,
2093		 * exported to higher levels.
2094		 *
2095		 * Otherwise, the device hides details of its frame
2096		 * structure, so that corresponding packet head is
2097		 * never delivered to user.
2098		 */
2099		if (sk->sk_type != SOCK_DGRAM)
2100			skb_push(skb, skb->data - skb_mac_header(skb));
2101		else if (skb->pkt_type == PACKET_OUTGOING) {
2102			/* Special case: outgoing packets have ll header at head */
2103			skb_pull(skb, skb_network_offset(skb));
2104		}
2105	}
2106
2107	snaplen = skb->len;
2108
2109	res = run_filter(skb, sk, snaplen);
2110	if (!res)
2111		goto drop_n_restore;
2112	if (snaplen > res)
2113		snaplen = res;
2114
2115	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2116		goto drop_n_acct;
2117
2118	if (skb_shared(skb)) {
2119		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2120		if (nskb == NULL)
2121			goto drop_n_acct;
2122
2123		if (skb_head != skb->data) {
2124			skb->data = skb_head;
2125			skb->len = skb_len;
2126		}
2127		consume_skb(skb);
2128		skb = nskb;
2129	}
2130
2131	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2132
2133	sll = &PACKET_SKB_CB(skb)->sa.ll;
2134	sll->sll_hatype = dev->type;
2135	sll->sll_pkttype = skb->pkt_type;
2136	if (unlikely(po->origdev))
2137		sll->sll_ifindex = orig_dev->ifindex;
2138	else
2139		sll->sll_ifindex = dev->ifindex;
2140
2141	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2142
2143	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2144	 * Use their space for storing the original skb length.
2145	 */
2146	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2147
2148	if (pskb_trim(skb, snaplen))
2149		goto drop_n_acct;
2150
2151	skb_set_owner_r(skb, sk);
2152	skb->dev = NULL;
2153	skb_dst_drop(skb);
2154
2155	/* drop conntrack reference */
2156	nf_reset_ct(skb);
2157
2158	spin_lock(&sk->sk_receive_queue.lock);
2159	po->stats.stats1.tp_packets++;
2160	sock_skb_set_dropcount(sk, skb);
 
2161	__skb_queue_tail(&sk->sk_receive_queue, skb);
2162	spin_unlock(&sk->sk_receive_queue.lock);
2163	sk->sk_data_ready(sk);
2164	return 0;
2165
2166drop_n_acct:
2167	is_drop_n_account = true;
2168	atomic_inc(&po->tp_drops);
2169	atomic_inc(&sk->sk_drops);
 
2170
2171drop_n_restore:
2172	if (skb_head != skb->data && skb_shared(skb)) {
2173		skb->data = skb_head;
2174		skb->len = skb_len;
2175	}
2176drop:
2177	if (!is_drop_n_account)
2178		consume_skb(skb);
2179	else
2180		kfree_skb(skb);
2181	return 0;
2182}
2183
2184static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2185		       struct packet_type *pt, struct net_device *orig_dev)
2186{
2187	struct sock *sk;
 
2188	struct packet_sock *po;
2189	struct sockaddr_ll *sll;
2190	union tpacket_uhdr h;
2191	u8 *skb_head = skb->data;
2192	int skb_len = skb->len;
2193	unsigned int snaplen, res;
2194	unsigned long status = TP_STATUS_USER;
2195	unsigned short macoff, hdrlen;
2196	unsigned int netoff;
2197	struct sk_buff *copy_skb = NULL;
2198	struct timespec64 ts;
2199	__u32 ts_status;
2200	bool is_drop_n_account = false;
2201	unsigned int slot_id = 0;
2202	bool do_vnet = false;
2203
2204	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2205	 * We may add members to them until current aligned size without forcing
2206	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2207	 */
2208	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2209	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2210
2211	if (skb->pkt_type == PACKET_LOOPBACK)
2212		goto drop;
2213
2214	sk = pt->af_packet_priv;
2215	po = pkt_sk(sk);
2216
2217	if (!net_eq(dev_net(dev), sock_net(sk)))
2218		goto drop;
2219
2220	if (dev_has_header(dev)) {
2221		if (sk->sk_type != SOCK_DGRAM)
2222			skb_push(skb, skb->data - skb_mac_header(skb));
2223		else if (skb->pkt_type == PACKET_OUTGOING) {
2224			/* Special case: outgoing packets have ll header at head */
2225			skb_pull(skb, skb_network_offset(skb));
2226		}
2227	}
2228
2229	snaplen = skb->len;
2230
2231	res = run_filter(skb, sk, snaplen);
2232	if (!res)
2233		goto drop_n_restore;
2234
2235	/* If we are flooded, just give up */
2236	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2237		atomic_inc(&po->tp_drops);
2238		goto drop_n_restore;
2239	}
2240
2241	if (skb->ip_summed == CHECKSUM_PARTIAL)
2242		status |= TP_STATUS_CSUMNOTREADY;
2243	else if (skb->pkt_type != PACKET_OUTGOING &&
2244		 (skb->ip_summed == CHECKSUM_COMPLETE ||
2245		  skb_csum_unnecessary(skb)))
2246		status |= TP_STATUS_CSUM_VALID;
 
 
2247
2248	if (snaplen > res)
2249		snaplen = res;
2250
2251	if (sk->sk_type == SOCK_DGRAM) {
2252		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2253				  po->tp_reserve;
2254	} else {
2255		unsigned int maclen = skb_network_offset(skb);
2256		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2257				       (maclen < 16 ? 16 : maclen)) +
2258				       po->tp_reserve;
2259		if (po->has_vnet_hdr) {
2260			netoff += sizeof(struct virtio_net_hdr);
2261			do_vnet = true;
2262		}
2263		macoff = netoff - maclen;
2264	}
2265	if (netoff > USHRT_MAX) {
2266		atomic_inc(&po->tp_drops);
2267		goto drop_n_restore;
2268	}
2269	if (po->tp_version <= TPACKET_V2) {
2270		if (macoff + snaplen > po->rx_ring.frame_size) {
2271			if (po->copy_thresh &&
2272			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2273				if (skb_shared(skb)) {
2274					copy_skb = skb_clone(skb, GFP_ATOMIC);
2275				} else {
2276					copy_skb = skb_get(skb);
2277					skb_head = skb->data;
2278				}
2279				if (copy_skb)
 
 
2280					skb_set_owner_r(copy_skb, sk);
 
2281			}
2282			snaplen = po->rx_ring.frame_size - macoff;
2283			if ((int)snaplen < 0) {
2284				snaplen = 0;
2285				do_vnet = false;
2286			}
2287		}
2288	} else if (unlikely(macoff + snaplen >
2289			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2290		u32 nval;
2291
2292		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2293		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2294			    snaplen, nval, macoff);
2295		snaplen = nval;
2296		if (unlikely((int)snaplen < 0)) {
2297			snaplen = 0;
2298			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2299			do_vnet = false;
2300		}
2301	}
2302	spin_lock(&sk->sk_receive_queue.lock);
2303	h.raw = packet_current_rx_frame(po, skb,
2304					TP_STATUS_KERNEL, (macoff+snaplen));
2305	if (!h.raw)
2306		goto drop_n_account;
2307
2308	if (po->tp_version <= TPACKET_V2) {
2309		slot_id = po->rx_ring.head;
2310		if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2311			goto drop_n_account;
2312		__set_bit(slot_id, po->rx_ring.rx_owner_map);
2313	}
2314
2315	if (do_vnet &&
2316	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
2317				    sizeof(struct virtio_net_hdr),
2318				    vio_le(), true, 0)) {
2319		if (po->tp_version == TPACKET_V3)
2320			prb_clear_blk_fill_status(&po->rx_ring);
2321		goto drop_n_account;
2322	}
2323
2324	if (po->tp_version <= TPACKET_V2) {
2325		packet_increment_rx_head(po, &po->rx_ring);
2326	/*
2327	 * LOSING will be reported till you read the stats,
2328	 * because it's COR - Clear On Read.
2329	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2330	 * at packet level.
2331	 */
2332		if (atomic_read(&po->tp_drops))
2333			status |= TP_STATUS_LOSING;
2334	}
2335
2336	po->stats.stats1.tp_packets++;
2337	if (copy_skb) {
2338		status |= TP_STATUS_COPY;
 
2339		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2340	}
2341	spin_unlock(&sk->sk_receive_queue.lock);
2342
2343	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2344
2345	/* Always timestamp; prefer an existing software timestamp taken
2346	 * closer to the time of capture.
2347	 */
2348	ts_status = tpacket_get_timestamp(skb, &ts,
2349					  po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
 
2350	if (!ts_status)
2351		ktime_get_real_ts64(&ts);
2352
2353	status |= ts_status;
2354
2355	switch (po->tp_version) {
2356	case TPACKET_V1:
2357		h.h1->tp_len = skb->len;
2358		h.h1->tp_snaplen = snaplen;
2359		h.h1->tp_mac = macoff;
2360		h.h1->tp_net = netoff;
2361		h.h1->tp_sec = ts.tv_sec;
2362		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2363		hdrlen = sizeof(*h.h1);
2364		break;
2365	case TPACKET_V2:
2366		h.h2->tp_len = skb->len;
2367		h.h2->tp_snaplen = snaplen;
2368		h.h2->tp_mac = macoff;
2369		h.h2->tp_net = netoff;
2370		h.h2->tp_sec = ts.tv_sec;
2371		h.h2->tp_nsec = ts.tv_nsec;
2372		if (skb_vlan_tag_present(skb)) {
2373			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2374			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2375			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
 
 
 
 
2376		} else {
2377			h.h2->tp_vlan_tci = 0;
2378			h.h2->tp_vlan_tpid = 0;
2379		}
2380		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2381		hdrlen = sizeof(*h.h2);
2382		break;
2383	case TPACKET_V3:
2384		/* tp_nxt_offset,vlan are already populated above.
2385		 * So DONT clear those fields here
2386		 */
2387		h.h3->tp_status |= status;
2388		h.h3->tp_len = skb->len;
2389		h.h3->tp_snaplen = snaplen;
2390		h.h3->tp_mac = macoff;
2391		h.h3->tp_net = netoff;
2392		h.h3->tp_sec  = ts.tv_sec;
2393		h.h3->tp_nsec = ts.tv_nsec;
2394		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2395		hdrlen = sizeof(*h.h3);
2396		break;
2397	default:
2398		BUG();
2399	}
2400
2401	sll = h.raw + TPACKET_ALIGN(hdrlen);
2402	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2403	sll->sll_family = AF_PACKET;
2404	sll->sll_hatype = dev->type;
2405	sll->sll_protocol = skb->protocol;
 
2406	sll->sll_pkttype = skb->pkt_type;
2407	if (unlikely(po->origdev))
2408		sll->sll_ifindex = orig_dev->ifindex;
2409	else
2410		sll->sll_ifindex = dev->ifindex;
2411
2412	smp_mb();
2413
2414#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2415	if (po->tp_version <= TPACKET_V2) {
2416		u8 *start, *end;
2417
2418		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2419					macoff + snaplen);
2420
2421		for (start = h.raw; start < end; start += PAGE_SIZE)
2422			flush_dcache_page(pgv_to_page(start));
2423	}
2424	smp_wmb();
2425#endif
2426
2427	if (po->tp_version <= TPACKET_V2) {
2428		spin_lock(&sk->sk_receive_queue.lock);
2429		__packet_set_status(po, h.raw, status);
2430		__clear_bit(slot_id, po->rx_ring.rx_owner_map);
2431		spin_unlock(&sk->sk_receive_queue.lock);
2432		sk->sk_data_ready(sk);
2433	} else if (po->tp_version == TPACKET_V3) {
2434		prb_clear_blk_fill_status(&po->rx_ring);
2435	}
2436
2437drop_n_restore:
2438	if (skb_head != skb->data && skb_shared(skb)) {
2439		skb->data = skb_head;
2440		skb->len = skb_len;
2441	}
2442drop:
2443	if (!is_drop_n_account)
2444		consume_skb(skb);
2445	else
2446		kfree_skb(skb);
2447	return 0;
2448
2449drop_n_account:
2450	spin_unlock(&sk->sk_receive_queue.lock);
2451	atomic_inc(&po->tp_drops);
2452	is_drop_n_account = true;
2453
2454	sk->sk_data_ready(sk);
2455	kfree_skb(copy_skb);
2456	goto drop_n_restore;
2457}
2458
2459static void tpacket_destruct_skb(struct sk_buff *skb)
2460{
2461	struct packet_sock *po = pkt_sk(skb->sk);
2462
2463	if (likely(po->tx_ring.pg_vec)) {
2464		void *ph;
2465		__u32 ts;
2466
2467		ph = skb_zcopy_get_nouarg(skb);
2468		packet_dec_pending(&po->tx_ring);
2469
2470		ts = __packet_set_timestamp(po, ph, skb);
2471		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2472
2473		if (!packet_read_pending(&po->tx_ring))
2474			complete(&po->skb_completion);
2475	}
2476
2477	sock_wfree(skb);
2478}
2479
2480static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2481{
2482	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2483	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2484	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2485	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2486		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2487			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2488			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2489
2490	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2491		return -EINVAL;
2492
2493	return 0;
2494}
2495
2496static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2497				 struct virtio_net_hdr *vnet_hdr)
2498{
2499	if (*len < sizeof(*vnet_hdr))
 
 
2500		return -EINVAL;
2501	*len -= sizeof(*vnet_hdr);
2502
2503	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2504		return -EFAULT;
2505
2506	return __packet_snd_vnet_parse(vnet_hdr, *len);
 
 
 
 
 
 
 
 
2507}
2508
2509static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2510		void *frame, struct net_device *dev, void *data, int tp_len,
2511		__be16 proto, unsigned char *addr, int hlen, int copylen,
2512		const struct sockcm_cookie *sockc)
2513{
2514	union tpacket_uhdr ph;
2515	int to_write, offset, len, nr_frags, len_max;
2516	struct socket *sock = po->sk.sk_socket;
2517	struct page *page;
2518	int err;
2519
2520	ph.raw = frame;
2521
2522	skb->protocol = proto;
2523	skb->dev = dev;
2524	skb->priority = po->sk.sk_priority;
2525	skb->mark = po->sk.sk_mark;
2526	skb->tstamp = sockc->transmit_time;
2527	skb_setup_tx_timestamp(skb, sockc->tsflags);
2528	skb_zcopy_set_nouarg(skb, ph.raw);
2529
2530	skb_reserve(skb, hlen);
2531	skb_reset_network_header(skb);
2532
2533	to_write = tp_len;
2534
2535	if (sock->type == SOCK_DGRAM) {
2536		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2537				NULL, tp_len);
2538		if (unlikely(err < 0))
2539			return -EINVAL;
2540	} else if (copylen) {
2541		int hdrlen = min_t(int, copylen, tp_len);
2542
2543		skb_push(skb, dev->hard_header_len);
2544		skb_put(skb, copylen - dev->hard_header_len);
2545		err = skb_store_bits(skb, 0, data, hdrlen);
2546		if (unlikely(err))
2547			return err;
2548		if (!dev_validate_header(dev, skb->data, hdrlen))
2549			return -EINVAL;
2550
2551		data += hdrlen;
2552		to_write -= hdrlen;
2553	}
2554
2555	offset = offset_in_page(data);
2556	len_max = PAGE_SIZE - offset;
2557	len = ((to_write > len_max) ? len_max : to_write);
2558
2559	skb->data_len = to_write;
2560	skb->len += to_write;
2561	skb->truesize += to_write;
2562	refcount_add(to_write, &po->sk.sk_wmem_alloc);
2563
2564	while (likely(to_write)) {
2565		nr_frags = skb_shinfo(skb)->nr_frags;
2566
2567		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2568			pr_err("Packet exceed the number of skb frags(%lu)\n",
2569			       MAX_SKB_FRAGS);
2570			return -EFAULT;
2571		}
2572
2573		page = pgv_to_page(data);
2574		data += len;
2575		flush_dcache_page(page);
2576		get_page(page);
2577		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2578		to_write -= len;
2579		offset = 0;
2580		len_max = PAGE_SIZE;
2581		len = ((to_write > len_max) ? len_max : to_write);
2582	}
2583
2584	packet_parse_headers(skb, sock);
2585
2586	return tp_len;
2587}
2588
2589static int tpacket_parse_header(struct packet_sock *po, void *frame,
2590				int size_max, void **data)
2591{
2592	union tpacket_uhdr ph;
2593	int tp_len, off;
2594
2595	ph.raw = frame;
2596
2597	switch (po->tp_version) {
2598	case TPACKET_V3:
2599		if (ph.h3->tp_next_offset != 0) {
2600			pr_warn_once("variable sized slot not supported");
2601			return -EINVAL;
2602		}
2603		tp_len = ph.h3->tp_len;
2604		break;
2605	case TPACKET_V2:
2606		tp_len = ph.h2->tp_len;
2607		break;
2608	default:
2609		tp_len = ph.h1->tp_len;
2610		break;
2611	}
2612	if (unlikely(tp_len > size_max)) {
2613		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2614		return -EMSGSIZE;
2615	}
2616
2617	if (unlikely(po->tp_tx_has_off)) {
2618		int off_min, off_max;
2619
2620		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2621		off_max = po->tx_ring.frame_size - tp_len;
2622		if (po->sk.sk_type == SOCK_DGRAM) {
2623			switch (po->tp_version) {
2624			case TPACKET_V3:
2625				off = ph.h3->tp_net;
2626				break;
2627			case TPACKET_V2:
2628				off = ph.h2->tp_net;
2629				break;
2630			default:
2631				off = ph.h1->tp_net;
2632				break;
2633			}
2634		} else {
2635			switch (po->tp_version) {
2636			case TPACKET_V3:
2637				off = ph.h3->tp_mac;
2638				break;
2639			case TPACKET_V2:
2640				off = ph.h2->tp_mac;
2641				break;
2642			default:
2643				off = ph.h1->tp_mac;
2644				break;
2645			}
2646		}
2647		if (unlikely((off < off_min) || (off_max < off)))
2648			return -EINVAL;
2649	} else {
2650		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2651	}
2652
2653	*data = frame + off;
2654	return tp_len;
2655}
2656
2657static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2658{
2659	struct sk_buff *skb = NULL;
2660	struct net_device *dev;
2661	struct virtio_net_hdr *vnet_hdr = NULL;
2662	struct sockcm_cookie sockc;
2663	__be16 proto;
2664	int err, reserve = 0;
2665	void *ph;
2666	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2667	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
 
2668	unsigned char *addr = NULL;
2669	int tp_len, size_max;
2670	void *data;
2671	int len_sum = 0;
2672	int status = TP_STATUS_AVAILABLE;
2673	int hlen, tlen, copylen = 0;
2674	long timeo = 0;
2675
2676	mutex_lock(&po->pg_vec_lock);
2677
2678	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2679	 * we need to confirm it under protection of pg_vec_lock.
2680	 */
2681	if (unlikely(!po->tx_ring.pg_vec)) {
2682		err = -EBUSY;
2683		goto out;
2684	}
2685	if (likely(saddr == NULL)) {
2686		dev	= packet_cached_dev_get(po);
2687		proto	= READ_ONCE(po->num);
2688	} else {
2689		err = -EINVAL;
2690		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2691			goto out;
2692		if (msg->msg_namelen < (saddr->sll_halen
2693					+ offsetof(struct sockaddr_ll,
2694						sll_addr)))
2695			goto out;
2696		proto	= saddr->sll_protocol;
2697		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2698		if (po->sk.sk_socket->type == SOCK_DGRAM) {
2699			if (dev && msg->msg_namelen < dev->addr_len +
2700				   offsetof(struct sockaddr_ll, sll_addr))
2701				goto out_put;
2702			addr = saddr->sll_addr;
2703		}
2704	}
2705
2706	err = -ENXIO;
2707	if (unlikely(dev == NULL))
2708		goto out;
2709	err = -ENETDOWN;
2710	if (unlikely(!(dev->flags & IFF_UP)))
2711		goto out_put;
2712
2713	sockcm_init(&sockc, &po->sk);
2714	if (msg->msg_controllen) {
2715		err = sock_cmsg_send(&po->sk, msg, &sockc);
2716		if (unlikely(err))
2717			goto out_put;
2718	}
2719
2720	if (po->sk.sk_socket->type == SOCK_RAW)
2721		reserve = dev->hard_header_len;
2722	size_max = po->tx_ring.frame_size
2723		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2724
2725	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2726		size_max = dev->mtu + reserve + VLAN_HLEN;
2727
2728	reinit_completion(&po->skb_completion);
2729
2730	do {
2731		ph = packet_current_frame(po, &po->tx_ring,
2732					  TP_STATUS_SEND_REQUEST);
2733		if (unlikely(ph == NULL)) {
2734			if (need_wait && skb) {
2735				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2736				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2737				if (timeo <= 0) {
2738					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2739					goto out_put;
2740				}
2741			}
2742			/* check for additional frames */
2743			continue;
2744		}
2745
2746		skb = NULL;
2747		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2748		if (tp_len < 0)
2749			goto tpacket_error;
2750
2751		status = TP_STATUS_SEND_REQUEST;
2752		hlen = LL_RESERVED_SPACE(dev);
2753		tlen = dev->needed_tailroom;
2754		if (po->has_vnet_hdr) {
2755			vnet_hdr = data;
2756			data += sizeof(*vnet_hdr);
2757			tp_len -= sizeof(*vnet_hdr);
2758			if (tp_len < 0 ||
2759			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2760				tp_len = -EINVAL;
2761				goto tpacket_error;
2762			}
2763			copylen = __virtio16_to_cpu(vio_le(),
2764						    vnet_hdr->hdr_len);
2765		}
2766		copylen = max_t(int, copylen, dev->hard_header_len);
2767		skb = sock_alloc_send_skb(&po->sk,
2768				hlen + tlen + sizeof(struct sockaddr_ll) +
2769				(copylen - dev->hard_header_len),
2770				!need_wait, &err);
2771
2772		if (unlikely(skb == NULL)) {
2773			/* we assume the socket was initially writeable ... */
2774			if (likely(len_sum > 0))
2775				err = len_sum;
2776			goto out_status;
2777		}
2778		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2779					  addr, hlen, copylen, &sockc);
2780		if (likely(tp_len >= 0) &&
2781		    tp_len > dev->mtu + reserve &&
2782		    !po->has_vnet_hdr &&
2783		    !packet_extra_vlan_len_allowed(dev, skb))
2784			tp_len = -EMSGSIZE;
2785
2786		if (unlikely(tp_len < 0)) {
2787tpacket_error:
2788			if (po->tp_loss) {
2789				__packet_set_status(po, ph,
2790						TP_STATUS_AVAILABLE);
2791				packet_increment_head(&po->tx_ring);
2792				kfree_skb(skb);
2793				continue;
2794			} else {
2795				status = TP_STATUS_WRONG_FORMAT;
2796				err = tp_len;
2797				goto out_status;
2798			}
2799		}
2800
2801		if (po->has_vnet_hdr) {
2802			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2803				tp_len = -EINVAL;
2804				goto tpacket_error;
2805			}
2806			virtio_net_hdr_set_proto(skb, vnet_hdr);
2807		}
2808
2809		skb->destructor = tpacket_destruct_skb;
2810		__packet_set_status(po, ph, TP_STATUS_SENDING);
2811		packet_inc_pending(&po->tx_ring);
2812
2813		status = TP_STATUS_SEND_REQUEST;
2814		err = po->xmit(skb);
2815		if (unlikely(err > 0)) {
2816			err = net_xmit_errno(err);
 
2817			if (err && __packet_get_status(po, ph) ==
2818				   TP_STATUS_AVAILABLE) {
2819				/* skb was destructed already */
2820				skb = NULL;
2821				goto out_status;
2822			}
2823			/*
2824			 * skb was dropped but not destructed yet;
2825			 * let's treat it like congestion or err < 0
2826			 */
2827			err = 0;
2828		}
2829		packet_increment_head(&po->tx_ring);
2830		len_sum += tp_len;
2831	} while (likely((ph != NULL) ||
2832		/* Note: packet_read_pending() might be slow if we have
2833		 * to call it as it's per_cpu variable, but in fast-path
2834		 * we already short-circuit the loop with the first
2835		 * condition, and luckily don't have to go that path
2836		 * anyway.
2837		 */
2838		 (need_wait && packet_read_pending(&po->tx_ring))));
2839
2840	err = len_sum;
2841	goto out_put;
2842
2843out_status:
2844	__packet_set_status(po, ph, status);
2845	kfree_skb(skb);
2846out_put:
2847	dev_put(dev);
2848out:
2849	mutex_unlock(&po->pg_vec_lock);
2850	return err;
2851}
2852
2853static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2854				        size_t reserve, size_t len,
2855				        size_t linear, int noblock,
2856				        int *err)
2857{
2858	struct sk_buff *skb;
2859
2860	/* Under a page?  Don't bother with paged skb. */
2861	if (prepad + len < PAGE_SIZE || !linear)
2862		linear = len;
2863
 
 
2864	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2865				   err, 0);
2866	if (!skb)
2867		return NULL;
2868
2869	skb_reserve(skb, reserve);
2870	skb_put(skb, linear);
2871	skb->data_len = len - linear;
2872	skb->len += len - linear;
2873
2874	return skb;
2875}
2876
2877static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2878{
2879	struct sock *sk = sock->sk;
2880	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2881	struct sk_buff *skb;
2882	struct net_device *dev;
2883	__be16 proto;
2884	unsigned char *addr = NULL;
2885	int err, reserve = 0;
2886	struct sockcm_cookie sockc;
2887	struct virtio_net_hdr vnet_hdr = { 0 };
2888	int offset = 0;
2889	struct packet_sock *po = pkt_sk(sk);
2890	bool has_vnet_hdr = false;
2891	int hlen, tlen, linear;
2892	int extra_len = 0;
2893
2894	/*
2895	 *	Get and verify the address.
2896	 */
2897
2898	if (likely(saddr == NULL)) {
2899		dev	= packet_cached_dev_get(po);
2900		proto	= READ_ONCE(po->num);
2901	} else {
2902		err = -EINVAL;
2903		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2904			goto out;
2905		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2906			goto out;
2907		proto	= saddr->sll_protocol;
2908		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2909		if (sock->type == SOCK_DGRAM) {
2910			if (dev && msg->msg_namelen < dev->addr_len +
2911				   offsetof(struct sockaddr_ll, sll_addr))
2912				goto out_unlock;
2913			addr = saddr->sll_addr;
2914		}
2915	}
2916
2917	err = -ENXIO;
2918	if (unlikely(dev == NULL))
2919		goto out_unlock;
2920	err = -ENETDOWN;
2921	if (unlikely(!(dev->flags & IFF_UP)))
2922		goto out_unlock;
2923
2924	sockcm_init(&sockc, sk);
2925	sockc.mark = sk->sk_mark;
2926	if (msg->msg_controllen) {
2927		err = sock_cmsg_send(sk, msg, &sockc);
2928		if (unlikely(err))
2929			goto out_unlock;
2930	}
2931
2932	if (sock->type == SOCK_RAW)
2933		reserve = dev->hard_header_len;
2934	if (po->has_vnet_hdr) {
2935		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2936		if (err)
2937			goto out_unlock;
2938		has_vnet_hdr = true;
2939	}
2940
2941	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2942		if (!netif_supports_nofcs(dev)) {
2943			err = -EPROTONOSUPPORT;
2944			goto out_unlock;
2945		}
2946		extra_len = 4; /* We're doing our own CRC */
2947	}
2948
2949	err = -EMSGSIZE;
2950	if (!vnet_hdr.gso_type &&
2951	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2952		goto out_unlock;
2953
2954	err = -ENOBUFS;
2955	hlen = LL_RESERVED_SPACE(dev);
2956	tlen = dev->needed_tailroom;
2957	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2958	linear = max(linear, min_t(int, len, dev->hard_header_len));
2959	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2960			       msg->msg_flags & MSG_DONTWAIT, &err);
2961	if (skb == NULL)
2962		goto out_unlock;
2963
2964	skb_reset_network_header(skb);
2965
2966	err = -EINVAL;
2967	if (sock->type == SOCK_DGRAM) {
2968		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2969		if (unlikely(offset < 0))
2970			goto out_free;
2971	} else if (reserve) {
2972		skb_reserve(skb, -reserve);
2973		if (len < reserve + sizeof(struct ipv6hdr) &&
2974		    dev->min_header_len != dev->hard_header_len)
2975			skb_reset_network_header(skb);
2976	}
2977
2978	/* Returns -EFAULT on error */
2979	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2980	if (err)
2981		goto out_free;
2982
2983	if (sock->type == SOCK_RAW &&
2984	    !dev_validate_header(dev, skb->data, len)) {
2985		err = -EINVAL;
2986		goto out_free;
2987	}
2988
2989	skb_setup_tx_timestamp(skb, sockc.tsflags);
2990
2991	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2992	    !packet_extra_vlan_len_allowed(dev, skb)) {
2993		err = -EMSGSIZE;
2994		goto out_free;
2995	}
2996
2997	skb->protocol = proto;
2998	skb->dev = dev;
2999	skb->priority = sk->sk_priority;
3000	skb->mark = sockc.mark;
3001	skb->tstamp = sockc.transmit_time;
3002
3003	if (has_vnet_hdr) {
 
 
 
 
 
3004		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3005		if (err)
3006			goto out_free;
3007		len += sizeof(vnet_hdr);
3008		virtio_net_hdr_set_proto(skb, &vnet_hdr);
3009	}
3010
3011	packet_parse_headers(skb, sock);
3012
3013	if (unlikely(extra_len == 4))
3014		skb->no_fcs = 1;
3015
3016	err = po->xmit(skb);
3017	if (err > 0 && (err = net_xmit_errno(err)) != 0)
3018		goto out_unlock;
3019
3020	dev_put(dev);
3021
3022	return len;
3023
3024out_free:
3025	kfree_skb(skb);
3026out_unlock:
3027	if (dev)
3028		dev_put(dev);
3029out:
3030	return err;
3031}
3032
3033static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3034{
3035	struct sock *sk = sock->sk;
3036	struct packet_sock *po = pkt_sk(sk);
3037
3038	/* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3039	 * tpacket_snd() will redo the check safely.
3040	 */
3041	if (data_race(po->tx_ring.pg_vec))
3042		return tpacket_snd(po, msg);
3043
3044	return packet_snd(sock, msg, len);
3045}
3046
3047/*
3048 *	Close a PACKET socket. This is fairly simple. We immediately go
3049 *	to 'closed' state and remove our protocol entry in the device list.
3050 */
3051
3052static int packet_release(struct socket *sock)
3053{
3054	struct sock *sk = sock->sk;
3055	struct packet_sock *po;
3056	struct packet_fanout *f;
3057	struct net *net;
3058	union tpacket_req_u req_u;
3059
3060	if (!sk)
3061		return 0;
3062
3063	net = sock_net(sk);
3064	po = pkt_sk(sk);
3065
3066	mutex_lock(&net->packet.sklist_lock);
3067	sk_del_node_init_rcu(sk);
3068	mutex_unlock(&net->packet.sklist_lock);
3069
3070	preempt_disable();
3071	sock_prot_inuse_add(net, sk->sk_prot, -1);
3072	preempt_enable();
3073
3074	spin_lock(&po->bind_lock);
3075	unregister_prot_hook(sk, false);
3076	packet_cached_dev_reset(po);
3077
3078	if (po->prot_hook.dev) {
3079		dev_put(po->prot_hook.dev);
3080		po->prot_hook.dev = NULL;
3081	}
3082	spin_unlock(&po->bind_lock);
3083
3084	packet_flush_mclist(sk);
3085
3086	lock_sock(sk);
3087	if (po->rx_ring.pg_vec) {
3088		memset(&req_u, 0, sizeof(req_u));
3089		packet_set_ring(sk, &req_u, 1, 0);
3090	}
3091
3092	if (po->tx_ring.pg_vec) {
3093		memset(&req_u, 0, sizeof(req_u));
3094		packet_set_ring(sk, &req_u, 1, 1);
3095	}
3096	release_sock(sk);
3097
3098	f = fanout_release(sk);
3099
3100	synchronize_net();
3101
3102	kfree(po->rollover);
3103	if (f) {
3104		fanout_release_data(f);
3105		kvfree(f);
3106	}
3107	/*
3108	 *	Now the socket is dead. No more input will appear.
3109	 */
3110	sock_orphan(sk);
3111	sock->sk = NULL;
3112
3113	/* Purge queues */
3114
3115	skb_queue_purge(&sk->sk_receive_queue);
3116	packet_free_pending(po);
3117	sk_refcnt_debug_release(sk);
3118
3119	sock_put(sk);
3120	return 0;
3121}
3122
3123/*
3124 *	Attach a packet hook.
3125 */
3126
3127static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3128			  __be16 proto)
3129{
3130	struct packet_sock *po = pkt_sk(sk);
3131	struct net_device *dev_curr;
3132	__be16 proto_curr;
3133	bool need_rehook;
3134	struct net_device *dev = NULL;
3135	int ret = 0;
3136	bool unlisted = false;
3137
3138	lock_sock(sk);
3139	spin_lock(&po->bind_lock);
 
 
 
3140	rcu_read_lock();
3141
3142	if (po->fanout) {
3143		ret = -EINVAL;
3144		goto out_unlock;
3145	}
3146
3147	if (name) {
3148		dev = dev_get_by_name_rcu(sock_net(sk), name);
3149		if (!dev) {
3150			ret = -ENODEV;
3151			goto out_unlock;
3152		}
3153	} else if (ifindex) {
3154		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3155		if (!dev) {
3156			ret = -ENODEV;
3157			goto out_unlock;
3158		}
3159	}
3160
3161	if (dev)
3162		dev_hold(dev);
3163
3164	proto_curr = po->prot_hook.type;
3165	dev_curr = po->prot_hook.dev;
3166
3167	need_rehook = proto_curr != proto || dev_curr != dev;
3168
3169	if (need_rehook) {
3170		if (po->running) {
 
3171			rcu_read_unlock();
3172			/* prevents packet_notifier() from calling
3173			 * register_prot_hook()
3174			 */
3175			WRITE_ONCE(po->num, 0);
3176			__unregister_prot_hook(sk, true);
3177			rcu_read_lock();
3178			dev_curr = po->prot_hook.dev;
3179			if (dev)
3180				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3181								 dev->ifindex);
3182		}
3183
3184		BUG_ON(po->running);
3185		WRITE_ONCE(po->num, proto);
3186		po->prot_hook.type = proto;
3187
 
 
3188		if (unlikely(unlisted)) {
3189			dev_put(dev);
3190			po->prot_hook.dev = NULL;
3191			WRITE_ONCE(po->ifindex, -1);
3192			packet_cached_dev_reset(po);
3193		} else {
 
 
3194			po->prot_hook.dev = dev;
3195			WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3196			packet_cached_dev_assign(po, dev);
3197		}
 
3198	}
3199	if (dev_curr)
3200		dev_put(dev_curr);
3201
3202	if (proto == 0 || !need_rehook)
3203		goto out_unlock;
3204
3205	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3206		register_prot_hook(sk);
3207	} else {
3208		sk->sk_err = ENETDOWN;
3209		if (!sock_flag(sk, SOCK_DEAD))
3210			sk_error_report(sk);
3211	}
3212
3213out_unlock:
3214	rcu_read_unlock();
3215	spin_unlock(&po->bind_lock);
3216	release_sock(sk);
3217	return ret;
3218}
3219
3220/*
3221 *	Bind a packet socket to a device
3222 */
3223
3224static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3225			    int addr_len)
3226{
3227	struct sock *sk = sock->sk;
3228	char name[sizeof(uaddr->sa_data) + 1];
3229
3230	/*
3231	 *	Check legality
3232	 */
3233
3234	if (addr_len != sizeof(struct sockaddr))
3235		return -EINVAL;
3236	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3237	 * zero-terminated.
3238	 */
3239	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3240	name[sizeof(uaddr->sa_data)] = 0;
3241
3242	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3243}
3244
3245static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3246{
3247	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3248	struct sock *sk = sock->sk;
3249
3250	/*
3251	 *	Check legality
3252	 */
3253
3254	if (addr_len < sizeof(struct sockaddr_ll))
3255		return -EINVAL;
3256	if (sll->sll_family != AF_PACKET)
3257		return -EINVAL;
3258
3259	return packet_do_bind(sk, NULL, sll->sll_ifindex,
3260			      sll->sll_protocol ? : pkt_sk(sk)->num);
3261}
3262
3263static struct proto packet_proto = {
3264	.name	  = "PACKET",
3265	.owner	  = THIS_MODULE,
3266	.obj_size = sizeof(struct packet_sock),
3267};
3268
3269/*
3270 *	Create a packet of type SOCK_PACKET.
3271 */
3272
3273static int packet_create(struct net *net, struct socket *sock, int protocol,
3274			 int kern)
3275{
3276	struct sock *sk;
3277	struct packet_sock *po;
3278	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3279	int err;
3280
3281	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3282		return -EPERM;
3283	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3284	    sock->type != SOCK_PACKET)
3285		return -ESOCKTNOSUPPORT;
3286
3287	sock->state = SS_UNCONNECTED;
3288
3289	err = -ENOBUFS;
3290	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3291	if (sk == NULL)
3292		goto out;
3293
3294	sock->ops = &packet_ops;
3295	if (sock->type == SOCK_PACKET)
3296		sock->ops = &packet_ops_spkt;
3297
 
 
 
 
 
3298	sock_init_data(sock, sk);
3299
3300	po = pkt_sk(sk);
3301	init_completion(&po->skb_completion);
3302	sk->sk_family = PF_PACKET;
3303	po->num = proto;
3304	po->xmit = dev_queue_xmit;
3305
3306	err = packet_alloc_pending(po);
3307	if (err)
3308		goto out2;
3309
3310	packet_cached_dev_reset(po);
3311
3312	sk->sk_destruct = packet_sock_destruct;
3313	sk_refcnt_debug_inc(sk);
3314
3315	/*
3316	 *	Attach a protocol block
3317	 */
3318
3319	spin_lock_init(&po->bind_lock);
3320	mutex_init(&po->pg_vec_lock);
3321	po->rollover = NULL;
3322	po->prot_hook.func = packet_rcv;
3323
3324	if (sock->type == SOCK_PACKET)
3325		po->prot_hook.func = packet_rcv_spkt;
3326
3327	po->prot_hook.af_packet_priv = sk;
 
3328
3329	if (proto) {
3330		po->prot_hook.type = proto;
3331		__register_prot_hook(sk);
3332	}
3333
3334	mutex_lock(&net->packet.sklist_lock);
3335	sk_add_node_tail_rcu(sk, &net->packet.sklist);
3336	mutex_unlock(&net->packet.sklist_lock);
3337
3338	preempt_disable();
3339	sock_prot_inuse_add(net, &packet_proto, 1);
3340	preempt_enable();
3341
3342	return 0;
3343out2:
3344	sk_free(sk);
3345out:
3346	return err;
3347}
3348
3349/*
3350 *	Pull a packet from our receive queue and hand it to the user.
3351 *	If necessary we block.
3352 */
3353
3354static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3355			  int flags)
3356{
3357	struct sock *sk = sock->sk;
3358	struct sk_buff *skb;
3359	int copied, err;
3360	int vnet_hdr_len = 0;
3361	unsigned int origlen = 0;
3362
3363	err = -EINVAL;
3364	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3365		goto out;
3366
3367#if 0
3368	/* What error should we return now? EUNATTACH? */
3369	if (pkt_sk(sk)->ifindex < 0)
3370		return -ENODEV;
3371#endif
3372
3373	if (flags & MSG_ERRQUEUE) {
3374		err = sock_recv_errqueue(sk, msg, len,
3375					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3376		goto out;
3377	}
3378
3379	/*
3380	 *	Call the generic datagram receiver. This handles all sorts
3381	 *	of horrible races and re-entrancy so we can forget about it
3382	 *	in the protocol layers.
3383	 *
3384	 *	Now it will return ENETDOWN, if device have just gone down,
3385	 *	but then it will block.
3386	 */
3387
3388	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3389
3390	/*
3391	 *	An error occurred so return it. Because skb_recv_datagram()
3392	 *	handles the blocking we don't see and worry about blocking
3393	 *	retries.
3394	 */
3395
3396	if (skb == NULL)
3397		goto out;
3398
3399	packet_rcv_try_clear_pressure(pkt_sk(sk));
3400
3401	if (pkt_sk(sk)->has_vnet_hdr) {
3402		err = packet_rcv_vnet(msg, skb, &len);
3403		if (err)
3404			goto out_free;
3405		vnet_hdr_len = sizeof(struct virtio_net_hdr);
3406	}
3407
3408	/* You lose any data beyond the buffer you gave. If it worries
3409	 * a user program they can ask the device for its MTU
3410	 * anyway.
3411	 */
3412	copied = skb->len;
3413	if (copied > len) {
3414		copied = len;
3415		msg->msg_flags |= MSG_TRUNC;
3416	}
3417
3418	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3419	if (err)
3420		goto out_free;
3421
3422	if (sock->type != SOCK_PACKET) {
3423		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3424
3425		/* Original length was stored in sockaddr_ll fields */
3426		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3427		sll->sll_family = AF_PACKET;
3428		sll->sll_protocol = skb->protocol;
 
3429	}
3430
3431	sock_recv_ts_and_drops(msg, sk, skb);
3432
3433	if (msg->msg_name) {
 
 
3434		int copy_len;
3435
3436		/* If the address length field is there to be filled
3437		 * in, we fill it in now.
3438		 */
3439		if (sock->type == SOCK_PACKET) {
3440			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3441			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3442			copy_len = msg->msg_namelen;
3443		} else {
3444			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3445
3446			msg->msg_namelen = sll->sll_halen +
3447				offsetof(struct sockaddr_ll, sll_addr);
3448			copy_len = msg->msg_namelen;
3449			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3450				memset(msg->msg_name +
3451				       offsetof(struct sockaddr_ll, sll_addr),
3452				       0, sizeof(sll->sll_addr));
3453				msg->msg_namelen = sizeof(struct sockaddr_ll);
3454			}
3455		}
 
 
 
 
3456		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3457	}
3458
3459	if (pkt_sk(sk)->auxdata) {
3460		struct tpacket_auxdata aux;
3461
3462		aux.tp_status = TP_STATUS_USER;
3463		if (skb->ip_summed == CHECKSUM_PARTIAL)
3464			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3465		else if (skb->pkt_type != PACKET_OUTGOING &&
3466			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3467			  skb_csum_unnecessary(skb)))
3468			aux.tp_status |= TP_STATUS_CSUM_VALID;
 
 
3469
3470		aux.tp_len = origlen;
3471		aux.tp_snaplen = skb->len;
3472		aux.tp_mac = 0;
3473		aux.tp_net = skb_network_offset(skb);
3474		if (skb_vlan_tag_present(skb)) {
3475			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3476			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3477			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3478		} else {
3479			aux.tp_vlan_tci = 0;
3480			aux.tp_vlan_tpid = 0;
3481		}
3482		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3483	}
3484
3485	/*
3486	 *	Free or return the buffer as appropriate. Again this
3487	 *	hides all the races and re-entrancy issues from us.
3488	 */
3489	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3490
3491out_free:
3492	skb_free_datagram(sk, skb);
3493out:
3494	return err;
3495}
3496
3497static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3498			       int peer)
3499{
3500	struct net_device *dev;
3501	struct sock *sk	= sock->sk;
3502
3503	if (peer)
3504		return -EOPNOTSUPP;
3505
3506	uaddr->sa_family = AF_PACKET;
3507	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3508	rcu_read_lock();
3509	dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3510	if (dev)
3511		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3512	rcu_read_unlock();
3513
3514	return sizeof(*uaddr);
3515}
3516
3517static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3518			  int peer)
3519{
3520	struct net_device *dev;
3521	struct sock *sk = sock->sk;
3522	struct packet_sock *po = pkt_sk(sk);
3523	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3524	int ifindex;
3525
3526	if (peer)
3527		return -EOPNOTSUPP;
3528
3529	ifindex = READ_ONCE(po->ifindex);
3530	sll->sll_family = AF_PACKET;
3531	sll->sll_ifindex = ifindex;
3532	sll->sll_protocol = READ_ONCE(po->num);
3533	sll->sll_pkttype = 0;
3534	rcu_read_lock();
3535	dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3536	if (dev) {
3537		sll->sll_hatype = dev->type;
3538		sll->sll_halen = dev->addr_len;
3539		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
 
 
 
 
 
3540	} else {
3541		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3542		sll->sll_halen = 0;
3543	}
3544	rcu_read_unlock();
3545
3546	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3547}
3548
3549static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3550			 int what)
3551{
3552	switch (i->type) {
3553	case PACKET_MR_MULTICAST:
3554		if (i->alen != dev->addr_len)
3555			return -EINVAL;
3556		if (what > 0)
3557			return dev_mc_add(dev, i->addr);
3558		else
3559			return dev_mc_del(dev, i->addr);
3560		break;
3561	case PACKET_MR_PROMISC:
3562		return dev_set_promiscuity(dev, what);
3563	case PACKET_MR_ALLMULTI:
3564		return dev_set_allmulti(dev, what);
3565	case PACKET_MR_UNICAST:
3566		if (i->alen != dev->addr_len)
3567			return -EINVAL;
3568		if (what > 0)
3569			return dev_uc_add(dev, i->addr);
3570		else
3571			return dev_uc_del(dev, i->addr);
3572		break;
3573	default:
3574		break;
3575	}
3576	return 0;
3577}
3578
3579static void packet_dev_mclist_delete(struct net_device *dev,
3580				     struct packet_mclist **mlp)
3581{
3582	struct packet_mclist *ml;
3583
3584	while ((ml = *mlp) != NULL) {
3585		if (ml->ifindex == dev->ifindex) {
3586			packet_dev_mc(dev, ml, -1);
3587			*mlp = ml->next;
3588			kfree(ml);
3589		} else
3590			mlp = &ml->next;
3591	}
3592}
3593
3594static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3595{
3596	struct packet_sock *po = pkt_sk(sk);
3597	struct packet_mclist *ml, *i;
3598	struct net_device *dev;
3599	int err;
3600
3601	rtnl_lock();
3602
3603	err = -ENODEV;
3604	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3605	if (!dev)
3606		goto done;
3607
3608	err = -EINVAL;
3609	if (mreq->mr_alen > dev->addr_len)
3610		goto done;
3611
3612	err = -ENOBUFS;
3613	i = kmalloc(sizeof(*i), GFP_KERNEL);
3614	if (i == NULL)
3615		goto done;
3616
3617	err = 0;
3618	for (ml = po->mclist; ml; ml = ml->next) {
3619		if (ml->ifindex == mreq->mr_ifindex &&
3620		    ml->type == mreq->mr_type &&
3621		    ml->alen == mreq->mr_alen &&
3622		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3623			ml->count++;
3624			/* Free the new element ... */
3625			kfree(i);
3626			goto done;
3627		}
3628	}
3629
3630	i->type = mreq->mr_type;
3631	i->ifindex = mreq->mr_ifindex;
3632	i->alen = mreq->mr_alen;
3633	memcpy(i->addr, mreq->mr_address, i->alen);
3634	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3635	i->count = 1;
3636	i->next = po->mclist;
3637	po->mclist = i;
3638	err = packet_dev_mc(dev, i, 1);
3639	if (err) {
3640		po->mclist = i->next;
3641		kfree(i);
3642	}
3643
3644done:
3645	rtnl_unlock();
3646	return err;
3647}
3648
3649static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3650{
3651	struct packet_mclist *ml, **mlp;
3652
3653	rtnl_lock();
3654
3655	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3656		if (ml->ifindex == mreq->mr_ifindex &&
3657		    ml->type == mreq->mr_type &&
3658		    ml->alen == mreq->mr_alen &&
3659		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3660			if (--ml->count == 0) {
3661				struct net_device *dev;
3662				*mlp = ml->next;
3663				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3664				if (dev)
3665					packet_dev_mc(dev, ml, -1);
3666				kfree(ml);
3667			}
3668			break;
3669		}
3670	}
3671	rtnl_unlock();
3672	return 0;
3673}
3674
3675static void packet_flush_mclist(struct sock *sk)
3676{
3677	struct packet_sock *po = pkt_sk(sk);
3678	struct packet_mclist *ml;
3679
3680	if (!po->mclist)
3681		return;
3682
3683	rtnl_lock();
3684	while ((ml = po->mclist) != NULL) {
3685		struct net_device *dev;
3686
3687		po->mclist = ml->next;
3688		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3689		if (dev != NULL)
3690			packet_dev_mc(dev, ml, -1);
3691		kfree(ml);
3692	}
3693	rtnl_unlock();
3694}
3695
3696static int
3697packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3698		  unsigned int optlen)
3699{
3700	struct sock *sk = sock->sk;
3701	struct packet_sock *po = pkt_sk(sk);
3702	int ret;
3703
3704	if (level != SOL_PACKET)
3705		return -ENOPROTOOPT;
3706
3707	switch (optname) {
3708	case PACKET_ADD_MEMBERSHIP:
3709	case PACKET_DROP_MEMBERSHIP:
3710	{
3711		struct packet_mreq_max mreq;
3712		int len = optlen;
3713		memset(&mreq, 0, sizeof(mreq));
3714		if (len < sizeof(struct packet_mreq))
3715			return -EINVAL;
3716		if (len > sizeof(mreq))
3717			len = sizeof(mreq);
3718		if (copy_from_sockptr(&mreq, optval, len))
3719			return -EFAULT;
3720		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3721			return -EINVAL;
3722		if (optname == PACKET_ADD_MEMBERSHIP)
3723			ret = packet_mc_add(sk, &mreq);
3724		else
3725			ret = packet_mc_drop(sk, &mreq);
3726		return ret;
3727	}
3728
3729	case PACKET_RX_RING:
3730	case PACKET_TX_RING:
3731	{
3732		union tpacket_req_u req_u;
3733		int len;
3734
 
3735		lock_sock(sk);
3736		switch (po->tp_version) {
3737		case TPACKET_V1:
3738		case TPACKET_V2:
3739			len = sizeof(req_u.req);
 
 
 
 
3740			break;
3741		case TPACKET_V3:
3742		default:
3743			len = sizeof(req_u.req3);
 
 
 
 
3744			break;
3745		}
3746		if (optlen < len) {
3747			ret = -EINVAL;
3748		} else {
3749			if (copy_from_sockptr(&req_u.req, optval, len))
3750				ret = -EFAULT;
3751			else
3752				ret = packet_set_ring(sk, &req_u, 0,
3753						    optname == PACKET_TX_RING);
3754		}
3755		release_sock(sk);
3756		return ret;
3757	}
3758	case PACKET_COPY_THRESH:
3759	{
3760		int val;
3761
3762		if (optlen != sizeof(val))
3763			return -EINVAL;
3764		if (copy_from_sockptr(&val, optval, sizeof(val)))
3765			return -EFAULT;
3766
3767		pkt_sk(sk)->copy_thresh = val;
3768		return 0;
3769	}
3770	case PACKET_VERSION:
3771	{
3772		int val;
3773
3774		if (optlen != sizeof(val))
3775			return -EINVAL;
3776		if (copy_from_sockptr(&val, optval, sizeof(val)))
3777			return -EFAULT;
3778		switch (val) {
3779		case TPACKET_V1:
3780		case TPACKET_V2:
3781		case TPACKET_V3:
3782			break;
3783		default:
3784			return -EINVAL;
3785		}
3786		lock_sock(sk);
3787		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3788			ret = -EBUSY;
3789		} else {
3790			po->tp_version = val;
3791			ret = 0;
3792		}
3793		release_sock(sk);
3794		return ret;
3795	}
3796	case PACKET_RESERVE:
3797	{
3798		unsigned int val;
3799
3800		if (optlen != sizeof(val))
3801			return -EINVAL;
3802		if (copy_from_sockptr(&val, optval, sizeof(val)))
3803			return -EFAULT;
3804		if (val > INT_MAX)
3805			return -EINVAL;
3806		lock_sock(sk);
3807		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3808			ret = -EBUSY;
3809		} else {
3810			po->tp_reserve = val;
3811			ret = 0;
3812		}
3813		release_sock(sk);
3814		return ret;
3815	}
3816	case PACKET_LOSS:
3817	{
3818		unsigned int val;
3819
3820		if (optlen != sizeof(val))
3821			return -EINVAL;
3822		if (copy_from_sockptr(&val, optval, sizeof(val)))
3823			return -EFAULT;
3824
3825		lock_sock(sk);
3826		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3827			ret = -EBUSY;
3828		} else {
3829			po->tp_loss = !!val;
3830			ret = 0;
3831		}
3832		release_sock(sk);
3833		return ret;
3834	}
3835	case PACKET_AUXDATA:
3836	{
3837		int val;
3838
3839		if (optlen < sizeof(val))
3840			return -EINVAL;
3841		if (copy_from_sockptr(&val, optval, sizeof(val)))
3842			return -EFAULT;
3843
3844		lock_sock(sk);
3845		po->auxdata = !!val;
3846		release_sock(sk);
3847		return 0;
3848	}
3849	case PACKET_ORIGDEV:
3850	{
3851		int val;
3852
3853		if (optlen < sizeof(val))
3854			return -EINVAL;
3855		if (copy_from_sockptr(&val, optval, sizeof(val)))
3856			return -EFAULT;
3857
3858		lock_sock(sk);
3859		po->origdev = !!val;
3860		release_sock(sk);
3861		return 0;
3862	}
3863	case PACKET_VNET_HDR:
 
3864	{
3865		int val;
3866
3867		if (sock->type != SOCK_RAW)
3868			return -EINVAL;
3869		if (optlen < sizeof(val))
3870			return -EINVAL;
3871		if (copy_from_sockptr(&val, optval, sizeof(val)))
3872			return -EFAULT;
3873
 
 
 
 
 
 
 
 
3874		lock_sock(sk);
3875		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3876			ret = -EBUSY;
3877		} else {
3878			po->has_vnet_hdr = !!val;
3879			ret = 0;
3880		}
3881		release_sock(sk);
3882		return ret;
3883	}
3884	case PACKET_TIMESTAMP:
3885	{
3886		int val;
3887
3888		if (optlen != sizeof(val))
3889			return -EINVAL;
3890		if (copy_from_sockptr(&val, optval, sizeof(val)))
3891			return -EFAULT;
3892
3893		po->tp_tstamp = val;
3894		return 0;
3895	}
3896	case PACKET_FANOUT:
3897	{
3898		struct fanout_args args = { 0 };
3899
3900		if (optlen != sizeof(int) && optlen != sizeof(args))
3901			return -EINVAL;
3902		if (copy_from_sockptr(&args, optval, optlen))
3903			return -EFAULT;
3904
3905		return fanout_add(sk, &args);
3906	}
3907	case PACKET_FANOUT_DATA:
3908	{
3909		if (!po->fanout)
 
3910			return -EINVAL;
3911
3912		return fanout_set_data(po, optval, optlen);
3913	}
3914	case PACKET_IGNORE_OUTGOING:
3915	{
3916		int val;
3917
3918		if (optlen != sizeof(val))
3919			return -EINVAL;
3920		if (copy_from_sockptr(&val, optval, sizeof(val)))
3921			return -EFAULT;
3922		if (val < 0 || val > 1)
3923			return -EINVAL;
3924
3925		po->prot_hook.ignore_outgoing = !!val;
3926		return 0;
3927	}
3928	case PACKET_TX_HAS_OFF:
3929	{
3930		unsigned int val;
3931
3932		if (optlen != sizeof(val))
3933			return -EINVAL;
3934		if (copy_from_sockptr(&val, optval, sizeof(val)))
3935			return -EFAULT;
3936
3937		lock_sock(sk);
3938		if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
3939			po->tp_tx_has_off = !!val;
3940
3941		release_sock(sk);
3942		return 0;
3943	}
3944	case PACKET_QDISC_BYPASS:
3945	{
3946		int val;
3947
3948		if (optlen != sizeof(val))
3949			return -EINVAL;
3950		if (copy_from_sockptr(&val, optval, sizeof(val)))
3951			return -EFAULT;
3952
3953		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3954		return 0;
3955	}
3956	default:
3957		return -ENOPROTOOPT;
3958	}
3959}
3960
3961static int packet_getsockopt(struct socket *sock, int level, int optname,
3962			     char __user *optval, int __user *optlen)
3963{
3964	int len;
3965	int val, lv = sizeof(val);
3966	struct sock *sk = sock->sk;
3967	struct packet_sock *po = pkt_sk(sk);
3968	void *data = &val;
3969	union tpacket_stats_u st;
3970	struct tpacket_rollover_stats rstats;
3971	int drops;
3972
3973	if (level != SOL_PACKET)
3974		return -ENOPROTOOPT;
3975
3976	if (get_user(len, optlen))
3977		return -EFAULT;
3978
3979	if (len < 0)
3980		return -EINVAL;
3981
3982	switch (optname) {
3983	case PACKET_STATISTICS:
3984		spin_lock_bh(&sk->sk_receive_queue.lock);
3985		memcpy(&st, &po->stats, sizeof(st));
3986		memset(&po->stats, 0, sizeof(po->stats));
3987		spin_unlock_bh(&sk->sk_receive_queue.lock);
3988		drops = atomic_xchg(&po->tp_drops, 0);
3989
3990		if (po->tp_version == TPACKET_V3) {
3991			lv = sizeof(struct tpacket_stats_v3);
3992			st.stats3.tp_drops = drops;
3993			st.stats3.tp_packets += drops;
3994			data = &st.stats3;
3995		} else {
3996			lv = sizeof(struct tpacket_stats);
3997			st.stats1.tp_drops = drops;
3998			st.stats1.tp_packets += drops;
3999			data = &st.stats1;
4000		}
4001
4002		break;
4003	case PACKET_AUXDATA:
4004		val = po->auxdata;
4005		break;
4006	case PACKET_ORIGDEV:
4007		val = po->origdev;
4008		break;
4009	case PACKET_VNET_HDR:
4010		val = po->has_vnet_hdr;
 
 
 
 
 
 
4011		break;
4012	case PACKET_VERSION:
4013		val = po->tp_version;
4014		break;
4015	case PACKET_HDRLEN:
4016		if (len > sizeof(int))
4017			len = sizeof(int);
4018		if (len < sizeof(int))
4019			return -EINVAL;
4020		if (copy_from_user(&val, optval, len))
4021			return -EFAULT;
4022		switch (val) {
4023		case TPACKET_V1:
4024			val = sizeof(struct tpacket_hdr);
4025			break;
4026		case TPACKET_V2:
4027			val = sizeof(struct tpacket2_hdr);
4028			break;
4029		case TPACKET_V3:
4030			val = sizeof(struct tpacket3_hdr);
4031			break;
4032		default:
4033			return -EINVAL;
4034		}
4035		break;
4036	case PACKET_RESERVE:
4037		val = po->tp_reserve;
4038		break;
4039	case PACKET_LOSS:
4040		val = po->tp_loss;
4041		break;
4042	case PACKET_TIMESTAMP:
4043		val = po->tp_tstamp;
4044		break;
4045	case PACKET_FANOUT:
4046		val = (po->fanout ?
4047		       ((u32)po->fanout->id |
4048			((u32)po->fanout->type << 16) |
4049			((u32)po->fanout->flags << 24)) :
4050		       0);
4051		break;
4052	case PACKET_IGNORE_OUTGOING:
4053		val = po->prot_hook.ignore_outgoing;
4054		break;
4055	case PACKET_ROLLOVER_STATS:
4056		if (!po->rollover)
4057			return -EINVAL;
4058		rstats.tp_all = atomic_long_read(&po->rollover->num);
4059		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4060		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4061		data = &rstats;
4062		lv = sizeof(rstats);
4063		break;
4064	case PACKET_TX_HAS_OFF:
4065		val = po->tp_tx_has_off;
4066		break;
4067	case PACKET_QDISC_BYPASS:
4068		val = packet_use_direct_xmit(po);
4069		break;
4070	default:
4071		return -ENOPROTOOPT;
4072	}
4073
4074	if (len > lv)
4075		len = lv;
4076	if (put_user(len, optlen))
4077		return -EFAULT;
4078	if (copy_to_user(optval, data, len))
4079		return -EFAULT;
4080	return 0;
4081}
4082
4083static int packet_notifier(struct notifier_block *this,
4084			   unsigned long msg, void *ptr)
4085{
4086	struct sock *sk;
4087	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4088	struct net *net = dev_net(dev);
4089
4090	rcu_read_lock();
4091	sk_for_each_rcu(sk, &net->packet.sklist) {
4092		struct packet_sock *po = pkt_sk(sk);
4093
4094		switch (msg) {
4095		case NETDEV_UNREGISTER:
4096			if (po->mclist)
4097				packet_dev_mclist_delete(dev, &po->mclist);
4098			fallthrough;
4099
4100		case NETDEV_DOWN:
4101			if (dev->ifindex == po->ifindex) {
4102				spin_lock(&po->bind_lock);
4103				if (po->running) {
4104					__unregister_prot_hook(sk, false);
4105					sk->sk_err = ENETDOWN;
4106					if (!sock_flag(sk, SOCK_DEAD))
4107						sk_error_report(sk);
4108				}
4109				if (msg == NETDEV_UNREGISTER) {
4110					packet_cached_dev_reset(po);
4111					WRITE_ONCE(po->ifindex, -1);
4112					if (po->prot_hook.dev)
4113						dev_put(po->prot_hook.dev);
4114					po->prot_hook.dev = NULL;
4115				}
4116				spin_unlock(&po->bind_lock);
4117			}
4118			break;
4119		case NETDEV_UP:
4120			if (dev->ifindex == po->ifindex) {
4121				spin_lock(&po->bind_lock);
4122				if (po->num)
4123					register_prot_hook(sk);
4124				spin_unlock(&po->bind_lock);
4125			}
4126			break;
4127		}
4128	}
4129	rcu_read_unlock();
4130	return NOTIFY_DONE;
4131}
4132
4133
4134static int packet_ioctl(struct socket *sock, unsigned int cmd,
4135			unsigned long arg)
4136{
4137	struct sock *sk = sock->sk;
4138
4139	switch (cmd) {
4140	case SIOCOUTQ:
4141	{
4142		int amount = sk_wmem_alloc_get(sk);
4143
4144		return put_user(amount, (int __user *)arg);
4145	}
4146	case SIOCINQ:
4147	{
4148		struct sk_buff *skb;
4149		int amount = 0;
4150
4151		spin_lock_bh(&sk->sk_receive_queue.lock);
4152		skb = skb_peek(&sk->sk_receive_queue);
4153		if (skb)
4154			amount = skb->len;
4155		spin_unlock_bh(&sk->sk_receive_queue.lock);
4156		return put_user(amount, (int __user *)arg);
4157	}
4158#ifdef CONFIG_INET
4159	case SIOCADDRT:
4160	case SIOCDELRT:
4161	case SIOCDARP:
4162	case SIOCGARP:
4163	case SIOCSARP:
4164	case SIOCGIFADDR:
4165	case SIOCSIFADDR:
4166	case SIOCGIFBRDADDR:
4167	case SIOCSIFBRDADDR:
4168	case SIOCGIFNETMASK:
4169	case SIOCSIFNETMASK:
4170	case SIOCGIFDSTADDR:
4171	case SIOCSIFDSTADDR:
4172	case SIOCSIFFLAGS:
4173		return inet_dgram_ops.ioctl(sock, cmd, arg);
4174#endif
4175
4176	default:
4177		return -ENOIOCTLCMD;
4178	}
4179	return 0;
4180}
4181
4182static __poll_t packet_poll(struct file *file, struct socket *sock,
4183				poll_table *wait)
4184{
4185	struct sock *sk = sock->sk;
4186	struct packet_sock *po = pkt_sk(sk);
4187	__poll_t mask = datagram_poll(file, sock, wait);
4188
4189	spin_lock_bh(&sk->sk_receive_queue.lock);
4190	if (po->rx_ring.pg_vec) {
4191		if (!packet_previous_rx_frame(po, &po->rx_ring,
4192			TP_STATUS_KERNEL))
4193			mask |= EPOLLIN | EPOLLRDNORM;
4194	}
4195	packet_rcv_try_clear_pressure(po);
4196	spin_unlock_bh(&sk->sk_receive_queue.lock);
4197	spin_lock_bh(&sk->sk_write_queue.lock);
4198	if (po->tx_ring.pg_vec) {
4199		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4200			mask |= EPOLLOUT | EPOLLWRNORM;
4201	}
4202	spin_unlock_bh(&sk->sk_write_queue.lock);
4203	return mask;
4204}
4205
4206
4207/* Dirty? Well, I still did not learn better way to account
4208 * for user mmaps.
4209 */
4210
4211static void packet_mm_open(struct vm_area_struct *vma)
4212{
4213	struct file *file = vma->vm_file;
4214	struct socket *sock = file->private_data;
4215	struct sock *sk = sock->sk;
4216
4217	if (sk)
4218		atomic_inc(&pkt_sk(sk)->mapped);
4219}
4220
4221static void packet_mm_close(struct vm_area_struct *vma)
4222{
4223	struct file *file = vma->vm_file;
4224	struct socket *sock = file->private_data;
4225	struct sock *sk = sock->sk;
4226
4227	if (sk)
4228		atomic_dec(&pkt_sk(sk)->mapped);
4229}
4230
4231static const struct vm_operations_struct packet_mmap_ops = {
4232	.open	=	packet_mm_open,
4233	.close	=	packet_mm_close,
4234};
4235
4236static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4237			unsigned int len)
4238{
4239	int i;
4240
4241	for (i = 0; i < len; i++) {
4242		if (likely(pg_vec[i].buffer)) {
4243			if (is_vmalloc_addr(pg_vec[i].buffer))
4244				vfree(pg_vec[i].buffer);
4245			else
4246				free_pages((unsigned long)pg_vec[i].buffer,
4247					   order);
4248			pg_vec[i].buffer = NULL;
4249		}
4250	}
4251	kfree(pg_vec);
4252}
4253
4254static char *alloc_one_pg_vec_page(unsigned long order)
4255{
4256	char *buffer;
4257	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4258			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4259
4260	buffer = (char *) __get_free_pages(gfp_flags, order);
4261	if (buffer)
4262		return buffer;
4263
4264	/* __get_free_pages failed, fall back to vmalloc */
4265	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4266	if (buffer)
4267		return buffer;
4268
4269	/* vmalloc failed, lets dig into swap here */
4270	gfp_flags &= ~__GFP_NORETRY;
4271	buffer = (char *) __get_free_pages(gfp_flags, order);
4272	if (buffer)
4273		return buffer;
4274
4275	/* complete and utter failure */
4276	return NULL;
4277}
4278
4279static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4280{
4281	unsigned int block_nr = req->tp_block_nr;
4282	struct pgv *pg_vec;
4283	int i;
4284
4285	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4286	if (unlikely(!pg_vec))
4287		goto out;
4288
4289	for (i = 0; i < block_nr; i++) {
4290		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4291		if (unlikely(!pg_vec[i].buffer))
4292			goto out_free_pgvec;
4293	}
4294
4295out:
4296	return pg_vec;
4297
4298out_free_pgvec:
4299	free_pg_vec(pg_vec, order, block_nr);
4300	pg_vec = NULL;
4301	goto out;
4302}
4303
4304static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4305		int closing, int tx_ring)
4306{
4307	struct pgv *pg_vec = NULL;
4308	struct packet_sock *po = pkt_sk(sk);
4309	unsigned long *rx_owner_map = NULL;
4310	int was_running, order = 0;
4311	struct packet_ring_buffer *rb;
4312	struct sk_buff_head *rb_queue;
4313	__be16 num;
4314	int err;
4315	/* Added to avoid minimal code churn */
4316	struct tpacket_req *req = &req_u->req;
4317
4318	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4319	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4320
4321	err = -EBUSY;
4322	if (!closing) {
4323		if (atomic_read(&po->mapped))
4324			goto out;
4325		if (packet_read_pending(rb))
4326			goto out;
4327	}
4328
4329	if (req->tp_block_nr) {
4330		unsigned int min_frame_size;
4331
4332		/* Sanity tests and some calculations */
4333		err = -EBUSY;
4334		if (unlikely(rb->pg_vec))
4335			goto out;
4336
4337		switch (po->tp_version) {
4338		case TPACKET_V1:
4339			po->tp_hdrlen = TPACKET_HDRLEN;
4340			break;
4341		case TPACKET_V2:
4342			po->tp_hdrlen = TPACKET2_HDRLEN;
4343			break;
4344		case TPACKET_V3:
4345			po->tp_hdrlen = TPACKET3_HDRLEN;
4346			break;
4347		}
4348
4349		err = -EINVAL;
4350		if (unlikely((int)req->tp_block_size <= 0))
4351			goto out;
4352		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4353			goto out;
4354		min_frame_size = po->tp_hdrlen + po->tp_reserve;
4355		if (po->tp_version >= TPACKET_V3 &&
4356		    req->tp_block_size <
4357		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4358			goto out;
4359		if (unlikely(req->tp_frame_size < min_frame_size))
4360			goto out;
4361		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4362			goto out;
4363
4364		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4365		if (unlikely(rb->frames_per_block == 0))
4366			goto out;
4367		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4368			goto out;
4369		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4370					req->tp_frame_nr))
4371			goto out;
4372
4373		err = -ENOMEM;
4374		order = get_order(req->tp_block_size);
4375		pg_vec = alloc_pg_vec(req, order);
4376		if (unlikely(!pg_vec))
4377			goto out;
4378		switch (po->tp_version) {
4379		case TPACKET_V3:
4380			/* Block transmit is not supported yet */
4381			if (!tx_ring) {
4382				init_prb_bdqc(po, rb, pg_vec, req_u);
4383			} else {
4384				struct tpacket_req3 *req3 = &req_u->req3;
4385
4386				if (req3->tp_retire_blk_tov ||
4387				    req3->tp_sizeof_priv ||
4388				    req3->tp_feature_req_word) {
4389					err = -EINVAL;
4390					goto out_free_pg_vec;
4391				}
4392			}
4393			break;
4394		default:
4395			if (!tx_ring) {
4396				rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4397					GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4398				if (!rx_owner_map)
4399					goto out_free_pg_vec;
4400			}
4401			break;
4402		}
4403	}
4404	/* Done */
4405	else {
4406		err = -EINVAL;
4407		if (unlikely(req->tp_frame_nr))
4408			goto out;
4409	}
4410
4411
4412	/* Detach socket from network */
4413	spin_lock(&po->bind_lock);
4414	was_running = po->running;
4415	num = po->num;
4416	if (was_running) {
4417		WRITE_ONCE(po->num, 0);
4418		__unregister_prot_hook(sk, false);
4419	}
4420	spin_unlock(&po->bind_lock);
4421
4422	synchronize_net();
4423
4424	err = -EBUSY;
4425	mutex_lock(&po->pg_vec_lock);
4426	if (closing || atomic_read(&po->mapped) == 0) {
4427		err = 0;
4428		spin_lock_bh(&rb_queue->lock);
4429		swap(rb->pg_vec, pg_vec);
4430		if (po->tp_version <= TPACKET_V2)
4431			swap(rb->rx_owner_map, rx_owner_map);
4432		rb->frame_max = (req->tp_frame_nr - 1);
4433		rb->head = 0;
4434		rb->frame_size = req->tp_frame_size;
4435		spin_unlock_bh(&rb_queue->lock);
4436
4437		swap(rb->pg_vec_order, order);
4438		swap(rb->pg_vec_len, req->tp_block_nr);
4439
4440		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4441		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4442						tpacket_rcv : packet_rcv;
4443		skb_queue_purge(rb_queue);
4444		if (atomic_read(&po->mapped))
4445			pr_err("packet_mmap: vma is busy: %d\n",
4446			       atomic_read(&po->mapped));
4447	}
4448	mutex_unlock(&po->pg_vec_lock);
4449
4450	spin_lock(&po->bind_lock);
4451	if (was_running) {
4452		WRITE_ONCE(po->num, num);
4453		register_prot_hook(sk);
4454	}
4455	spin_unlock(&po->bind_lock);
4456	if (pg_vec && (po->tp_version > TPACKET_V2)) {
4457		/* Because we don't support block-based V3 on tx-ring */
4458		if (!tx_ring)
4459			prb_shutdown_retire_blk_timer(po, rb_queue);
4460	}
4461
4462out_free_pg_vec:
4463	bitmap_free(rx_owner_map);
4464	if (pg_vec)
4465		free_pg_vec(pg_vec, order, req->tp_block_nr);
 
4466out:
4467	return err;
4468}
4469
4470static int packet_mmap(struct file *file, struct socket *sock,
4471		struct vm_area_struct *vma)
4472{
4473	struct sock *sk = sock->sk;
4474	struct packet_sock *po = pkt_sk(sk);
4475	unsigned long size, expected_size;
4476	struct packet_ring_buffer *rb;
4477	unsigned long start;
4478	int err = -EINVAL;
4479	int i;
4480
4481	if (vma->vm_pgoff)
4482		return -EINVAL;
4483
4484	mutex_lock(&po->pg_vec_lock);
4485
4486	expected_size = 0;
4487	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4488		if (rb->pg_vec) {
4489			expected_size += rb->pg_vec_len
4490						* rb->pg_vec_pages
4491						* PAGE_SIZE;
4492		}
4493	}
4494
4495	if (expected_size == 0)
4496		goto out;
4497
4498	size = vma->vm_end - vma->vm_start;
4499	if (size != expected_size)
4500		goto out;
4501
4502	start = vma->vm_start;
4503	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4504		if (rb->pg_vec == NULL)
4505			continue;
4506
4507		for (i = 0; i < rb->pg_vec_len; i++) {
4508			struct page *page;
4509			void *kaddr = rb->pg_vec[i].buffer;
4510			int pg_num;
4511
4512			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4513				page = pgv_to_page(kaddr);
4514				err = vm_insert_page(vma, start, page);
4515				if (unlikely(err))
4516					goto out;
4517				start += PAGE_SIZE;
4518				kaddr += PAGE_SIZE;
4519			}
4520		}
4521	}
4522
4523	atomic_inc(&po->mapped);
4524	vma->vm_ops = &packet_mmap_ops;
4525	err = 0;
4526
4527out:
4528	mutex_unlock(&po->pg_vec_lock);
4529	return err;
4530}
4531
4532static const struct proto_ops packet_ops_spkt = {
4533	.family =	PF_PACKET,
4534	.owner =	THIS_MODULE,
4535	.release =	packet_release,
4536	.bind =		packet_bind_spkt,
4537	.connect =	sock_no_connect,
4538	.socketpair =	sock_no_socketpair,
4539	.accept =	sock_no_accept,
4540	.getname =	packet_getname_spkt,
4541	.poll =		datagram_poll,
4542	.ioctl =	packet_ioctl,
4543	.gettstamp =	sock_gettstamp,
4544	.listen =	sock_no_listen,
4545	.shutdown =	sock_no_shutdown,
4546	.sendmsg =	packet_sendmsg_spkt,
4547	.recvmsg =	packet_recvmsg,
4548	.mmap =		sock_no_mmap,
4549	.sendpage =	sock_no_sendpage,
4550};
4551
4552static const struct proto_ops packet_ops = {
4553	.family =	PF_PACKET,
4554	.owner =	THIS_MODULE,
4555	.release =	packet_release,
4556	.bind =		packet_bind,
4557	.connect =	sock_no_connect,
4558	.socketpair =	sock_no_socketpair,
4559	.accept =	sock_no_accept,
4560	.getname =	packet_getname,
4561	.poll =		packet_poll,
4562	.ioctl =	packet_ioctl,
4563	.gettstamp =	sock_gettstamp,
4564	.listen =	sock_no_listen,
4565	.shutdown =	sock_no_shutdown,
4566	.setsockopt =	packet_setsockopt,
4567	.getsockopt =	packet_getsockopt,
4568	.sendmsg =	packet_sendmsg,
4569	.recvmsg =	packet_recvmsg,
4570	.mmap =		packet_mmap,
4571	.sendpage =	sock_no_sendpage,
4572};
4573
4574static const struct net_proto_family packet_family_ops = {
4575	.family =	PF_PACKET,
4576	.create =	packet_create,
4577	.owner	=	THIS_MODULE,
4578};
4579
4580static struct notifier_block packet_netdev_notifier = {
4581	.notifier_call =	packet_notifier,
4582};
4583
4584#ifdef CONFIG_PROC_FS
4585
4586static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4587	__acquires(RCU)
4588{
4589	struct net *net = seq_file_net(seq);
4590
4591	rcu_read_lock();
4592	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4593}
4594
4595static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4596{
4597	struct net *net = seq_file_net(seq);
4598	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4599}
4600
4601static void packet_seq_stop(struct seq_file *seq, void *v)
4602	__releases(RCU)
4603{
4604	rcu_read_unlock();
4605}
4606
4607static int packet_seq_show(struct seq_file *seq, void *v)
4608{
4609	if (v == SEQ_START_TOKEN)
4610		seq_printf(seq,
4611			   "%*sRefCnt Type Proto  Iface R Rmem   User   Inode\n",
4612			   IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
4613	else {
4614		struct sock *s = sk_entry(v);
4615		const struct packet_sock *po = pkt_sk(s);
4616
4617		seq_printf(seq,
4618			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4619			   s,
4620			   refcount_read(&s->sk_refcnt),
4621			   s->sk_type,
4622			   ntohs(READ_ONCE(po->num)),
4623			   READ_ONCE(po->ifindex),
4624			   po->running,
4625			   atomic_read(&s->sk_rmem_alloc),
4626			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4627			   sock_i_ino(s));
4628	}
4629
4630	return 0;
4631}
4632
4633static const struct seq_operations packet_seq_ops = {
4634	.start	= packet_seq_start,
4635	.next	= packet_seq_next,
4636	.stop	= packet_seq_stop,
4637	.show	= packet_seq_show,
4638};
4639#endif
4640
4641static int __net_init packet_net_init(struct net *net)
4642{
4643	mutex_init(&net->packet.sklist_lock);
4644	INIT_HLIST_HEAD(&net->packet.sklist);
4645
4646#ifdef CONFIG_PROC_FS
4647	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4648			sizeof(struct seq_net_private)))
4649		return -ENOMEM;
4650#endif /* CONFIG_PROC_FS */
4651
4652	return 0;
4653}
4654
4655static void __net_exit packet_net_exit(struct net *net)
4656{
4657	remove_proc_entry("packet", net->proc_net);
4658	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4659}
4660
4661static struct pernet_operations packet_net_ops = {
4662	.init = packet_net_init,
4663	.exit = packet_net_exit,
4664};
4665
4666
4667static void __exit packet_exit(void)
4668{
 
 
4669	unregister_netdevice_notifier(&packet_netdev_notifier);
4670	unregister_pernet_subsys(&packet_net_ops);
4671	sock_unregister(PF_PACKET);
4672	proto_unregister(&packet_proto);
4673}
4674
4675static int __init packet_init(void)
4676{
4677	int rc;
4678
 
 
 
 
 
 
4679	rc = proto_register(&packet_proto, 0);
4680	if (rc)
4681		goto out;
4682	rc = sock_register(&packet_family_ops);
4683	if (rc)
4684		goto out_proto;
4685	rc = register_pernet_subsys(&packet_net_ops);
4686	if (rc)
4687		goto out_sock;
4688	rc = register_netdevice_notifier(&packet_netdev_notifier);
4689	if (rc)
4690		goto out_pernet;
4691
4692	return 0;
4693
 
 
 
 
4694out_pernet:
4695	unregister_pernet_subsys(&packet_net_ops);
4696out_sock:
4697	sock_unregister(PF_PACKET);
4698out_proto:
4699	proto_unregister(&packet_proto);
4700out:
4701	return rc;
4702}
4703
4704module_init(packet_init);
4705module_exit(packet_exit);
 
4706MODULE_LICENSE("GPL");
4707MODULE_ALIAS_NETPROTO(PF_PACKET);