Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		PACKET - implements raw packet sockets.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *
  13 * Fixes:
  14 *		Alan Cox	:	verify_area() now used correctly
  15 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
  16 *		Alan Cox	:	tidied skbuff lists.
  17 *		Alan Cox	:	Now uses generic datagram routines I
  18 *					added. Also fixed the peek/read crash
  19 *					from all old Linux datagram code.
  20 *		Alan Cox	:	Uses the improved datagram code.
  21 *		Alan Cox	:	Added NULL's for socket options.
  22 *		Alan Cox	:	Re-commented the code.
  23 *		Alan Cox	:	Use new kernel side addressing
  24 *		Rob Janssen	:	Correct MTU usage.
  25 *		Dave Platt	:	Counter leaks caused by incorrect
  26 *					interrupt locking and some slightly
  27 *					dubious gcc output. Can you read
  28 *					compiler: it said _VOLATILE_
  29 *	Richard Kooijman	:	Timestamp fixes.
  30 *		Alan Cox	:	New buffers. Use sk->mac.raw.
  31 *		Alan Cox	:	sendmsg/recvmsg support.
  32 *		Alan Cox	:	Protocol setting support
  33 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
  34 *	Cyrus Durgin		:	Fixed kerneld for kmod.
  35 *	Michal Ostrowski        :       Module initialization cleanup.
  36 *         Ulises Alonso        :       Frame number limit removal and
  37 *                                      packet_set_ring memory leak.
  38 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
  39 *					The convention is that longer addresses
  40 *					will simply extend the hardware address
  41 *					byte arrays at the end of sockaddr_ll
  42 *					and packet_mreq.
  43 *		Johann Baudy	:	Added TX RING.
  44 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
  45 *					layer.
  46 *					Copyright (C) 2011, <lokec@ccs.neu.edu>
  47 */
  48
  49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  50
  51#include <linux/ethtool.h>
  52#include <linux/filter.h>
  53#include <linux/types.h>
  54#include <linux/mm.h>
  55#include <linux/capability.h>
  56#include <linux/fcntl.h>
  57#include <linux/socket.h>
  58#include <linux/in.h>
  59#include <linux/inet.h>
  60#include <linux/netdevice.h>
  61#include <linux/if_packet.h>
  62#include <linux/wireless.h>
  63#include <linux/kernel.h>
  64#include <linux/kmod.h>
  65#include <linux/slab.h>
  66#include <linux/vmalloc.h>
  67#include <net/net_namespace.h>
  68#include <net/ip.h>
  69#include <net/protocol.h>
  70#include <linux/skbuff.h>
  71#include <net/sock.h>
  72#include <linux/errno.h>
  73#include <linux/timer.h>
  74#include <linux/uaccess.h>
  75#include <asm/ioctls.h>
  76#include <asm/page.h>
  77#include <asm/cacheflush.h>
  78#include <asm/io.h>
  79#include <linux/proc_fs.h>
  80#include <linux/seq_file.h>
  81#include <linux/poll.h>
  82#include <linux/module.h>
  83#include <linux/init.h>
  84#include <linux/mutex.h>
  85#include <linux/if_vlan.h>
  86#include <linux/virtio_net.h>
  87#include <linux/errqueue.h>
  88#include <linux/net_tstamp.h>
  89#include <linux/percpu.h>
  90#ifdef CONFIG_INET
  91#include <net/inet_common.h>
  92#endif
  93#include <linux/bpf.h>
  94#include <net/compat.h>
  95#include <linux/netfilter_netdev.h>
  96
  97#include "internal.h"
  98
  99/*
 100   Assumptions:
 101   - If the device has no dev->header_ops->create, there is no LL header
 102     visible above the device. In this case, its hard_header_len should be 0.
 103     The device may prepend its own header internally. In this case, its
 104     needed_headroom should be set to the space needed for it to add its
 105     internal header.
 106     For example, a WiFi driver pretending to be an Ethernet driver should
 107     set its hard_header_len to be the Ethernet header length, and set its
 108     needed_headroom to be (the real WiFi header length - the fake Ethernet
 109     header length).
 110   - packet socket receives packets with pulled ll header,
 111     so that SOCK_RAW should push it back.
 112
 113On receive:
 114-----------
 115
 116Incoming, dev_has_header(dev) == true
 117   mac_header -> ll header
 118   data       -> data
 119
 120Outgoing, dev_has_header(dev) == true
 121   mac_header -> ll header
 122   data       -> ll header
 123
 124Incoming, dev_has_header(dev) == false
 125   mac_header -> data
 126     However drivers often make it point to the ll header.
 127     This is incorrect because the ll header should be invisible to us.
 128   data       -> data
 129
 130Outgoing, dev_has_header(dev) == false
 131   mac_header -> data. ll header is invisible to us.
 132   data       -> data
 133
 134Resume
 135  If dev_has_header(dev) == false we are unable to restore the ll header,
 136    because it is invisible to us.
 137
 138
 139On transmit:
 140------------
 141
 142dev_has_header(dev) == true
 143   mac_header -> ll header
 144   data       -> ll header
 145
 146dev_has_header(dev) == false (ll header is invisible to us)
 147   mac_header -> data
 148   data       -> data
 149
 150   We should set network_header on output to the correct position,
 151   packet classifier depends on it.
 152 */
 153
 154/* Private packet socket structures. */
 155
 156/* identical to struct packet_mreq except it has
 157 * a longer address field.
 158 */
 159struct packet_mreq_max {
 160	int		mr_ifindex;
 161	unsigned short	mr_type;
 162	unsigned short	mr_alen;
 163	unsigned char	mr_address[MAX_ADDR_LEN];
 164};
 165
 166union tpacket_uhdr {
 167	struct tpacket_hdr  *h1;
 168	struct tpacket2_hdr *h2;
 169	struct tpacket3_hdr *h3;
 170	void *raw;
 171};
 172
 173static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 174		int closing, int tx_ring);
 175
 176#define V3_ALIGNMENT	(8)
 177
 178#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 179
 180#define BLK_PLUS_PRIV(sz_of_priv) \
 181	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 182
 183#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 184#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 185#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 186#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 187#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 188#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 
 189
 190struct packet_sock;
 191static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 192		       struct packet_type *pt, struct net_device *orig_dev);
 193
 194static void *packet_previous_frame(struct packet_sock *po,
 195		struct packet_ring_buffer *rb,
 196		int status);
 197static void packet_increment_head(struct packet_ring_buffer *buff);
 198static int prb_curr_blk_in_use(struct tpacket_block_desc *);
 199static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 200			struct packet_sock *);
 201static void prb_retire_current_block(struct tpacket_kbdq_core *,
 202		struct packet_sock *, unsigned int status);
 203static int prb_queue_frozen(struct tpacket_kbdq_core *);
 204static void prb_open_block(struct tpacket_kbdq_core *,
 205		struct tpacket_block_desc *);
 206static void prb_retire_rx_blk_timer_expired(struct timer_list *);
 207static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 208static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 209static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 210		struct tpacket3_hdr *);
 211static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 212		struct tpacket3_hdr *);
 213static void packet_flush_mclist(struct sock *sk);
 214static u16 packet_pick_tx_queue(struct sk_buff *skb);
 215
 216struct packet_skb_cb {
 217	union {
 218		struct sockaddr_pkt pkt;
 219		union {
 220			/* Trick: alias skb original length with
 221			 * ll.sll_family and ll.protocol in order
 222			 * to save room.
 223			 */
 224			unsigned int origlen;
 225			struct sockaddr_ll ll;
 226		};
 227	} sa;
 228};
 229
 230#define vio_le() virtio_legacy_is_little_endian()
 231
 232#define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 233
 234#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 235#define GET_PBLOCK_DESC(x, bid)	\
 236	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 237#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 238	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 239#define GET_NEXT_PRB_BLK_NUM(x) \
 240	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 241	((x)->kactive_blk_num+1) : 0)
 242
 243static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 244static void __fanout_link(struct sock *sk, struct packet_sock *po);
 245
 246#ifdef CONFIG_NETFILTER_EGRESS
 247static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb)
 248{
 249	struct sk_buff *next, *head = NULL, *tail;
 250	int rc;
 251
 252	rcu_read_lock();
 253	for (; skb != NULL; skb = next) {
 254		next = skb->next;
 255		skb_mark_not_on_list(skb);
 256
 257		if (!nf_hook_egress(skb, &rc, skb->dev))
 258			continue;
 259
 260		if (!head)
 261			head = skb;
 262		else
 263			tail->next = skb;
 264
 265		tail = skb;
 266	}
 267	rcu_read_unlock();
 268
 269	return head;
 270}
 271#endif
 272
 273static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb)
 274{
 275	if (!packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS))
 276		return dev_queue_xmit(skb);
 277
 278#ifdef CONFIG_NETFILTER_EGRESS
 279	if (nf_hook_egress_active()) {
 280		skb = nf_hook_direct_egress(skb);
 281		if (!skb)
 282			return NET_XMIT_DROP;
 283	}
 284#endif
 285	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
 286}
 287
 288static struct net_device *packet_cached_dev_get(struct packet_sock *po)
 289{
 290	struct net_device *dev;
 291
 292	rcu_read_lock();
 293	dev = rcu_dereference(po->cached_dev);
 294	dev_hold(dev);
 
 295	rcu_read_unlock();
 296
 297	return dev;
 298}
 299
 300static void packet_cached_dev_assign(struct packet_sock *po,
 301				     struct net_device *dev)
 302{
 303	rcu_assign_pointer(po->cached_dev, dev);
 304}
 305
 306static void packet_cached_dev_reset(struct packet_sock *po)
 307{
 308	RCU_INIT_POINTER(po->cached_dev, NULL);
 309}
 310
 
 
 
 
 
 311static u16 packet_pick_tx_queue(struct sk_buff *skb)
 312{
 313	struct net_device *dev = skb->dev;
 314	const struct net_device_ops *ops = dev->netdev_ops;
 315	int cpu = raw_smp_processor_id();
 316	u16 queue_index;
 317
 318#ifdef CONFIG_XPS
 319	skb->sender_cpu = cpu + 1;
 320#endif
 321	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
 322	if (ops->ndo_select_queue) {
 323		queue_index = ops->ndo_select_queue(dev, skb, NULL);
 324		queue_index = netdev_cap_txqueue(dev, queue_index);
 325	} else {
 326		queue_index = netdev_pick_tx(dev, skb, NULL);
 327	}
 328
 329	return queue_index;
 330}
 331
 332/* __register_prot_hook must be invoked through register_prot_hook
 333 * or from a context in which asynchronous accesses to the packet
 334 * socket is not possible (packet_create()).
 335 */
 336static void __register_prot_hook(struct sock *sk)
 337{
 338	struct packet_sock *po = pkt_sk(sk);
 339
 340	if (!packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
 341		if (po->fanout)
 342			__fanout_link(sk, po);
 343		else
 344			dev_add_pack(&po->prot_hook);
 345
 346		sock_hold(sk);
 347		packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 1);
 348	}
 349}
 350
 351static void register_prot_hook(struct sock *sk)
 352{
 353	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
 354	__register_prot_hook(sk);
 355}
 356
 357/* If the sync parameter is true, we will temporarily drop
 358 * the po->bind_lock and do a synchronize_net to make sure no
 359 * asynchronous packet processing paths still refer to the elements
 360 * of po->prot_hook.  If the sync parameter is false, it is the
 361 * callers responsibility to take care of this.
 362 */
 363static void __unregister_prot_hook(struct sock *sk, bool sync)
 364{
 365	struct packet_sock *po = pkt_sk(sk);
 366
 367	lockdep_assert_held_once(&po->bind_lock);
 368
 369	packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 0);
 370
 371	if (po->fanout)
 372		__fanout_unlink(sk, po);
 373	else
 374		__dev_remove_pack(&po->prot_hook);
 375
 376	__sock_put(sk);
 377
 378	if (sync) {
 379		spin_unlock(&po->bind_lock);
 380		synchronize_net();
 381		spin_lock(&po->bind_lock);
 382	}
 383}
 384
 385static void unregister_prot_hook(struct sock *sk, bool sync)
 386{
 387	struct packet_sock *po = pkt_sk(sk);
 388
 389	if (packet_sock_flag(po, PACKET_SOCK_RUNNING))
 390		__unregister_prot_hook(sk, sync);
 391}
 392
 393static inline struct page * __pure pgv_to_page(void *addr)
 394{
 395	if (is_vmalloc_addr(addr))
 396		return vmalloc_to_page(addr);
 397	return virt_to_page(addr);
 398}
 399
 400static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 401{
 402	union tpacket_uhdr h;
 403
 404	/* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */
 405
 406	h.raw = frame;
 407	switch (po->tp_version) {
 408	case TPACKET_V1:
 409		WRITE_ONCE(h.h1->tp_status, status);
 410		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 411		break;
 412	case TPACKET_V2:
 413		WRITE_ONCE(h.h2->tp_status, status);
 414		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 415		break;
 416	case TPACKET_V3:
 417		WRITE_ONCE(h.h3->tp_status, status);
 418		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 419		break;
 420	default:
 421		WARN(1, "TPACKET version not supported.\n");
 422		BUG();
 423	}
 424
 425	smp_wmb();
 426}
 427
 428static int __packet_get_status(const struct packet_sock *po, void *frame)
 429{
 430	union tpacket_uhdr h;
 431
 432	smp_rmb();
 433
 434	/* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */
 435
 436	h.raw = frame;
 437	switch (po->tp_version) {
 438	case TPACKET_V1:
 439		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 440		return READ_ONCE(h.h1->tp_status);
 441	case TPACKET_V2:
 442		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 443		return READ_ONCE(h.h2->tp_status);
 444	case TPACKET_V3:
 445		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 446		return READ_ONCE(h.h3->tp_status);
 447	default:
 448		WARN(1, "TPACKET version not supported.\n");
 449		BUG();
 450		return 0;
 451	}
 452}
 453
 454static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
 455				   unsigned int flags)
 456{
 457	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 458
 459	if (shhwtstamps &&
 460	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
 461	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
 462		return TP_STATUS_TS_RAW_HARDWARE;
 463
 464	if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
 465	    ktime_to_timespec64_cond(skb_tstamp(skb), ts))
 466		return TP_STATUS_TS_SOFTWARE;
 467
 468	return 0;
 469}
 470
 471static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
 472				    struct sk_buff *skb)
 473{
 474	union tpacket_uhdr h;
 475	struct timespec64 ts;
 476	__u32 ts_status;
 477
 478	if (!(ts_status = tpacket_get_timestamp(skb, &ts, READ_ONCE(po->tp_tstamp))))
 479		return 0;
 480
 481	h.raw = frame;
 482	/*
 483	 * versions 1 through 3 overflow the timestamps in y2106, since they
 484	 * all store the seconds in a 32-bit unsigned integer.
 485	 * If we create a version 4, that should have a 64-bit timestamp,
 486	 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
 487	 * nanoseconds.
 488	 */
 489	switch (po->tp_version) {
 490	case TPACKET_V1:
 491		h.h1->tp_sec = ts.tv_sec;
 492		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
 493		break;
 494	case TPACKET_V2:
 495		h.h2->tp_sec = ts.tv_sec;
 496		h.h2->tp_nsec = ts.tv_nsec;
 497		break;
 498	case TPACKET_V3:
 499		h.h3->tp_sec = ts.tv_sec;
 500		h.h3->tp_nsec = ts.tv_nsec;
 501		break;
 502	default:
 503		WARN(1, "TPACKET version not supported.\n");
 504		BUG();
 505	}
 506
 507	/* one flush is safe, as both fields always lie on the same cacheline */
 508	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
 509	smp_wmb();
 510
 511	return ts_status;
 512}
 513
 514static void *packet_lookup_frame(const struct packet_sock *po,
 515				 const struct packet_ring_buffer *rb,
 516				 unsigned int position,
 517				 int status)
 518{
 519	unsigned int pg_vec_pos, frame_offset;
 520	union tpacket_uhdr h;
 521
 522	pg_vec_pos = position / rb->frames_per_block;
 523	frame_offset = position % rb->frames_per_block;
 524
 525	h.raw = rb->pg_vec[pg_vec_pos].buffer +
 526		(frame_offset * rb->frame_size);
 527
 528	if (status != __packet_get_status(po, h.raw))
 529		return NULL;
 530
 531	return h.raw;
 532}
 533
 534static void *packet_current_frame(struct packet_sock *po,
 535		struct packet_ring_buffer *rb,
 536		int status)
 537{
 538	return packet_lookup_frame(po, rb, rb->head, status);
 539}
 540
 541static u16 vlan_get_tci(const struct sk_buff *skb, struct net_device *dev)
 542{
 543	struct vlan_hdr vhdr, *vh;
 544	unsigned int header_len;
 545
 546	if (!dev)
 547		return 0;
 548
 549	/* In the SOCK_DGRAM scenario, skb data starts at the network
 550	 * protocol, which is after the VLAN headers. The outer VLAN
 551	 * header is at the hard_header_len offset in non-variable
 552	 * length link layer headers. If it's a VLAN device, the
 553	 * min_header_len should be used to exclude the VLAN header
 554	 * size.
 555	 */
 556	if (dev->min_header_len == dev->hard_header_len)
 557		header_len = dev->hard_header_len;
 558	else if (is_vlan_dev(dev))
 559		header_len = dev->min_header_len;
 560	else
 561		return 0;
 562
 563	vh = skb_header_pointer(skb, skb_mac_offset(skb) + header_len,
 564				sizeof(vhdr), &vhdr);
 565	if (unlikely(!vh))
 566		return 0;
 567
 568	return ntohs(vh->h_vlan_TCI);
 569}
 570
 571static __be16 vlan_get_protocol_dgram(const struct sk_buff *skb)
 572{
 573	__be16 proto = skb->protocol;
 574
 575	if (unlikely(eth_type_vlan(proto)))
 576		proto = __vlan_get_protocol_offset(skb, proto,
 577						   skb_mac_offset(skb), NULL);
 578
 579	return proto;
 580}
 581
 582static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 583{
 584	del_timer_sync(&pkc->retire_blk_timer);
 585}
 586
 587static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 588		struct sk_buff_head *rb_queue)
 589{
 590	struct tpacket_kbdq_core *pkc;
 591
 592	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 593
 594	spin_lock_bh(&rb_queue->lock);
 595	pkc->delete_blk_timer = 1;
 596	spin_unlock_bh(&rb_queue->lock);
 597
 598	prb_del_retire_blk_timer(pkc);
 599}
 600
 601static void prb_setup_retire_blk_timer(struct packet_sock *po)
 602{
 603	struct tpacket_kbdq_core *pkc;
 604
 605	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 606	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
 607		    0);
 608	pkc->retire_blk_timer.expires = jiffies;
 609}
 610
 611static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 612				int blk_size_in_bytes)
 613{
 614	struct net_device *dev;
 615	unsigned int mbits, div;
 616	struct ethtool_link_ksettings ecmd;
 617	int err;
 618
 619	rtnl_lock();
 620	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 621	if (unlikely(!dev)) {
 622		rtnl_unlock();
 623		return DEFAULT_PRB_RETIRE_TOV;
 624	}
 625	err = __ethtool_get_link_ksettings(dev, &ecmd);
 626	rtnl_unlock();
 627	if (err)
 628		return DEFAULT_PRB_RETIRE_TOV;
 629
 630	/* If the link speed is so slow you don't really
 631	 * need to worry about perf anyways
 632	 */
 633	if (ecmd.base.speed < SPEED_1000 ||
 634	    ecmd.base.speed == SPEED_UNKNOWN)
 635		return DEFAULT_PRB_RETIRE_TOV;
 
 
 
 
 636
 637	div = ecmd.base.speed / 1000;
 638	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 639
 640	if (div)
 641		mbits /= div;
 642
 
 
 643	if (div)
 644		return mbits + 1;
 645	return mbits;
 646}
 647
 648static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 649			union tpacket_req_u *req_u)
 650{
 651	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 652}
 653
 654static void init_prb_bdqc(struct packet_sock *po,
 655			struct packet_ring_buffer *rb,
 656			struct pgv *pg_vec,
 657			union tpacket_req_u *req_u)
 658{
 659	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
 660	struct tpacket_block_desc *pbd;
 661
 662	memset(p1, 0x0, sizeof(*p1));
 663
 664	p1->knxt_seq_num = 1;
 665	p1->pkbdq = pg_vec;
 666	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 667	p1->pkblk_start	= pg_vec[0].buffer;
 668	p1->kblk_size = req_u->req3.tp_block_size;
 669	p1->knum_blocks	= req_u->req3.tp_block_nr;
 670	p1->hdrlen = po->tp_hdrlen;
 671	p1->version = po->tp_version;
 672	p1->last_kactive_blk_num = 0;
 673	po->stats.stats3.tp_freeze_q_cnt = 0;
 674	if (req_u->req3.tp_retire_blk_tov)
 675		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 676	else
 677		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 678						req_u->req3.tp_block_size);
 679	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 680	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 681	rwlock_init(&p1->blk_fill_in_prog_lock);
 682
 683	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
 684	prb_init_ft_ops(p1, req_u);
 685	prb_setup_retire_blk_timer(po);
 686	prb_open_block(p1, pbd);
 687}
 688
 689/*  Do NOT update the last_blk_num first.
 690 *  Assumes sk_buff_head lock is held.
 691 */
 692static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 693{
 694	mod_timer(&pkc->retire_blk_timer,
 695			jiffies + pkc->tov_in_jiffies);
 696	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 697}
 698
 699/*
 700 * Timer logic:
 701 * 1) We refresh the timer only when we open a block.
 702 *    By doing this we don't waste cycles refreshing the timer
 703 *	  on packet-by-packet basis.
 704 *
 705 * With a 1MB block-size, on a 1Gbps line, it will take
 706 * i) ~8 ms to fill a block + ii) memcpy etc.
 707 * In this cut we are not accounting for the memcpy time.
 708 *
 709 * So, if the user sets the 'tmo' to 10ms then the timer
 710 * will never fire while the block is still getting filled
 711 * (which is what we want). However, the user could choose
 712 * to close a block early and that's fine.
 713 *
 714 * But when the timer does fire, we check whether or not to refresh it.
 715 * Since the tmo granularity is in msecs, it is not too expensive
 716 * to refresh the timer, lets say every '8' msecs.
 717 * Either the user can set the 'tmo' or we can derive it based on
 718 * a) line-speed and b) block-size.
 719 * prb_calc_retire_blk_tmo() calculates the tmo.
 720 *
 721 */
 722static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
 723{
 724	struct packet_sock *po =
 725		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
 726	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 727	unsigned int frozen;
 728	struct tpacket_block_desc *pbd;
 729
 730	spin_lock(&po->sk.sk_receive_queue.lock);
 731
 732	frozen = prb_queue_frozen(pkc);
 733	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 734
 735	if (unlikely(pkc->delete_blk_timer))
 736		goto out;
 737
 738	/* We only need to plug the race when the block is partially filled.
 739	 * tpacket_rcv:
 740	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 741	 *		copy_bits() is in progress ...
 742	 *		timer fires on other cpu:
 743	 *		we can't retire the current block because copy_bits
 744	 *		is in progress.
 745	 *
 746	 */
 747	if (BLOCK_NUM_PKTS(pbd)) {
 748		/* Waiting for skb_copy_bits to finish... */
 749		write_lock(&pkc->blk_fill_in_prog_lock);
 750		write_unlock(&pkc->blk_fill_in_prog_lock);
 
 751	}
 752
 753	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 754		if (!frozen) {
 755			if (!BLOCK_NUM_PKTS(pbd)) {
 756				/* An empty block. Just refresh the timer. */
 757				goto refresh_timer;
 758			}
 759			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 760			if (!prb_dispatch_next_block(pkc, po))
 761				goto refresh_timer;
 762			else
 763				goto out;
 764		} else {
 765			/* Case 1. Queue was frozen because user-space was
 766			 *	   lagging behind.
 767			 */
 768			if (prb_curr_blk_in_use(pbd)) {
 769				/*
 770				 * Ok, user-space is still behind.
 771				 * So just refresh the timer.
 772				 */
 773				goto refresh_timer;
 774			} else {
 775			       /* Case 2. queue was frozen,user-space caught up,
 776				* now the link went idle && the timer fired.
 777				* We don't have a block to close.So we open this
 778				* block and restart the timer.
 779				* opening a block thaws the queue,restarts timer
 780				* Thawing/timer-refresh is a side effect.
 781				*/
 782				prb_open_block(pkc, pbd);
 783				goto out;
 784			}
 785		}
 786	}
 787
 788refresh_timer:
 789	_prb_refresh_rx_retire_blk_timer(pkc);
 790
 791out:
 792	spin_unlock(&po->sk.sk_receive_queue.lock);
 793}
 794
 795static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 796		struct tpacket_block_desc *pbd1, __u32 status)
 797{
 798	/* Flush everything minus the block header */
 799
 800#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 801	u8 *start, *end;
 802
 803	start = (u8 *)pbd1;
 804
 805	/* Skip the block header(we know header WILL fit in 4K) */
 806	start += PAGE_SIZE;
 807
 808	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 809	for (; start < end; start += PAGE_SIZE)
 810		flush_dcache_page(pgv_to_page(start));
 811
 812	smp_wmb();
 813#endif
 814
 815	/* Now update the block status. */
 816
 817	BLOCK_STATUS(pbd1) = status;
 818
 819	/* Flush the block header */
 820
 821#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 822	start = (u8 *)pbd1;
 823	flush_dcache_page(pgv_to_page(start));
 824
 825	smp_wmb();
 826#endif
 827}
 828
 829/*
 830 * Side effect:
 831 *
 832 * 1) flush the block
 833 * 2) Increment active_blk_num
 834 *
 835 * Note:We DONT refresh the timer on purpose.
 836 *	Because almost always the next block will be opened.
 837 */
 838static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 839		struct tpacket_block_desc *pbd1,
 840		struct packet_sock *po, unsigned int stat)
 841{
 842	__u32 status = TP_STATUS_USER | stat;
 843
 844	struct tpacket3_hdr *last_pkt;
 845	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 846	struct sock *sk = &po->sk;
 847
 848	if (atomic_read(&po->tp_drops))
 849		status |= TP_STATUS_LOSING;
 850
 851	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 852	last_pkt->tp_next_offset = 0;
 853
 854	/* Get the ts of the last pkt */
 855	if (BLOCK_NUM_PKTS(pbd1)) {
 856		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 857		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 858	} else {
 859		/* Ok, we tmo'd - so get the current time.
 860		 *
 861		 * It shouldn't really happen as we don't close empty
 862		 * blocks. See prb_retire_rx_blk_timer_expired().
 863		 */
 864		struct timespec64 ts;
 865		ktime_get_real_ts64(&ts);
 866		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 867		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 868	}
 869
 870	smp_wmb();
 871
 872	/* Flush the block */
 873	prb_flush_block(pkc1, pbd1, status);
 874
 875	sk->sk_data_ready(sk);
 876
 877	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 878}
 879
 880static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 881{
 882	pkc->reset_pending_on_curr_blk = 0;
 883}
 884
 885/*
 886 * Side effect of opening a block:
 887 *
 888 * 1) prb_queue is thawed.
 889 * 2) retire_blk_timer is refreshed.
 890 *
 891 */
 892static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 893	struct tpacket_block_desc *pbd1)
 894{
 895	struct timespec64 ts;
 896	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 897
 898	smp_rmb();
 899
 900	/* We could have just memset this but we will lose the
 901	 * flexibility of making the priv area sticky
 902	 */
 903
 904	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 905	BLOCK_NUM_PKTS(pbd1) = 0;
 906	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 907
 908	ktime_get_real_ts64(&ts);
 909
 910	h1->ts_first_pkt.ts_sec = ts.tv_sec;
 911	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 912
 913	pkc1->pkblk_start = (char *)pbd1;
 914	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 915
 916	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 917	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 918
 919	pbd1->version = pkc1->version;
 920	pkc1->prev = pkc1->nxt_offset;
 921	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 922
 923	prb_thaw_queue(pkc1);
 924	_prb_refresh_rx_retire_blk_timer(pkc1);
 925
 926	smp_wmb();
 927}
 928
 929/*
 930 * Queue freeze logic:
 931 * 1) Assume tp_block_nr = 8 blocks.
 932 * 2) At time 't0', user opens Rx ring.
 933 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 934 * 4) user-space is either sleeping or processing block '0'.
 935 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 936 *    it will close block-7,loop around and try to fill block '0'.
 937 *    call-flow:
 938 *    __packet_lookup_frame_in_block
 939 *      prb_retire_current_block()
 940 *      prb_dispatch_next_block()
 941 *        |->(BLOCK_STATUS == USER) evaluates to true
 942 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 943 * 6) Now there are two cases:
 944 *    6.1) Link goes idle right after the queue is frozen.
 945 *         But remember, the last open_block() refreshed the timer.
 946 *         When this timer expires,it will refresh itself so that we can
 947 *         re-open block-0 in near future.
 948 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 949 *         case and __packet_lookup_frame_in_block will check if block-0
 950 *         is free and can now be re-used.
 951 */
 952static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 953				  struct packet_sock *po)
 954{
 955	pkc->reset_pending_on_curr_blk = 1;
 956	po->stats.stats3.tp_freeze_q_cnt++;
 957}
 958
 959#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 960
 961/*
 962 * If the next block is free then we will dispatch it
 963 * and return a good offset.
 964 * Else, we will freeze the queue.
 965 * So, caller must check the return value.
 966 */
 967static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 968		struct packet_sock *po)
 969{
 970	struct tpacket_block_desc *pbd;
 971
 972	smp_rmb();
 973
 974	/* 1. Get current block num */
 975	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 976
 977	/* 2. If this block is currently in_use then freeze the queue */
 978	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 979		prb_freeze_queue(pkc, po);
 980		return NULL;
 981	}
 982
 983	/*
 984	 * 3.
 985	 * open this block and return the offset where the first packet
 986	 * needs to get stored.
 987	 */
 988	prb_open_block(pkc, pbd);
 989	return (void *)pkc->nxt_offset;
 990}
 991
 992static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 993		struct packet_sock *po, unsigned int status)
 994{
 995	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 996
 997	/* retire/close the current block */
 998	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 999		/*
1000		 * Plug the case where copy_bits() is in progress on
1001		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
1002		 * have space to copy the pkt in the current block and
1003		 * called prb_retire_current_block()
1004		 *
1005		 * We don't need to worry about the TMO case because
1006		 * the timer-handler already handled this case.
1007		 */
1008		if (!(status & TP_STATUS_BLK_TMO)) {
1009			/* Waiting for skb_copy_bits to finish... */
1010			write_lock(&pkc->blk_fill_in_prog_lock);
1011			write_unlock(&pkc->blk_fill_in_prog_lock);
 
1012		}
1013		prb_close_block(pkc, pbd, po, status);
1014		return;
1015	}
1016}
1017
1018static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
1019{
1020	return TP_STATUS_USER & BLOCK_STATUS(pbd);
1021}
1022
1023static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
1024{
1025	return pkc->reset_pending_on_curr_blk;
1026}
1027
1028static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
1029	__releases(&pkc->blk_fill_in_prog_lock)
1030{
1031	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1032
1033	read_unlock(&pkc->blk_fill_in_prog_lock);
1034}
1035
1036static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
1037			struct tpacket3_hdr *ppd)
1038{
1039	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
1040}
1041
1042static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
1043			struct tpacket3_hdr *ppd)
1044{
1045	ppd->hv1.tp_rxhash = 0;
1046}
1047
1048static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1049			struct tpacket3_hdr *ppd)
1050{
1051	struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc);
1052
1053	if (skb_vlan_tag_present(pkc->skb)) {
1054		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1055		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1056		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1057	} else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) {
1058		ppd->hv1.tp_vlan_tci = vlan_get_tci(pkc->skb, pkc->skb->dev);
1059		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol);
1060		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1061	} else {
1062		ppd->hv1.tp_vlan_tci = 0;
1063		ppd->hv1.tp_vlan_tpid = 0;
1064		ppd->tp_status = TP_STATUS_AVAILABLE;
1065	}
1066}
1067
1068static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1069			struct tpacket3_hdr *ppd)
1070{
1071	ppd->hv1.tp_padding = 0;
1072	prb_fill_vlan_info(pkc, ppd);
1073
1074	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1075		prb_fill_rxhash(pkc, ppd);
1076	else
1077		prb_clear_rxhash(pkc, ppd);
1078}
1079
1080static void prb_fill_curr_block(char *curr,
1081				struct tpacket_kbdq_core *pkc,
1082				struct tpacket_block_desc *pbd,
1083				unsigned int len)
1084	__acquires(&pkc->blk_fill_in_prog_lock)
1085{
1086	struct tpacket3_hdr *ppd;
1087
1088	ppd  = (struct tpacket3_hdr *)curr;
1089	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1090	pkc->prev = curr;
1091	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1092	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1093	BLOCK_NUM_PKTS(pbd) += 1;
1094	read_lock(&pkc->blk_fill_in_prog_lock);
1095	prb_run_all_ft_ops(pkc, ppd);
1096}
1097
1098/* Assumes caller has the sk->rx_queue.lock */
1099static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1100					    struct sk_buff *skb,
1101					    unsigned int len
1102					    )
1103{
1104	struct tpacket_kbdq_core *pkc;
1105	struct tpacket_block_desc *pbd;
1106	char *curr, *end;
1107
1108	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1109	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1110
1111	/* Queue is frozen when user space is lagging behind */
1112	if (prb_queue_frozen(pkc)) {
1113		/*
1114		 * Check if that last block which caused the queue to freeze,
1115		 * is still in_use by user-space.
1116		 */
1117		if (prb_curr_blk_in_use(pbd)) {
1118			/* Can't record this packet */
1119			return NULL;
1120		} else {
1121			/*
1122			 * Ok, the block was released by user-space.
1123			 * Now let's open that block.
1124			 * opening a block also thaws the queue.
1125			 * Thawing is a side effect.
1126			 */
1127			prb_open_block(pkc, pbd);
1128		}
1129	}
1130
1131	smp_mb();
1132	curr = pkc->nxt_offset;
1133	pkc->skb = skb;
1134	end = (char *)pbd + pkc->kblk_size;
1135
1136	/* first try the current block */
1137	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1138		prb_fill_curr_block(curr, pkc, pbd, len);
1139		return (void *)curr;
1140	}
1141
1142	/* Ok, close the current block */
1143	prb_retire_current_block(pkc, po, 0);
1144
1145	/* Now, try to dispatch the next block */
1146	curr = (char *)prb_dispatch_next_block(pkc, po);
1147	if (curr) {
1148		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1149		prb_fill_curr_block(curr, pkc, pbd, len);
1150		return (void *)curr;
1151	}
1152
1153	/*
1154	 * No free blocks are available.user_space hasn't caught up yet.
1155	 * Queue was just frozen and now this packet will get dropped.
1156	 */
1157	return NULL;
1158}
1159
1160static void *packet_current_rx_frame(struct packet_sock *po,
1161					    struct sk_buff *skb,
1162					    int status, unsigned int len)
1163{
1164	char *curr = NULL;
1165	switch (po->tp_version) {
1166	case TPACKET_V1:
1167	case TPACKET_V2:
1168		curr = packet_lookup_frame(po, &po->rx_ring,
1169					po->rx_ring.head, status);
1170		return curr;
1171	case TPACKET_V3:
1172		return __packet_lookup_frame_in_block(po, skb, len);
1173	default:
1174		WARN(1, "TPACKET version not supported\n");
1175		BUG();
1176		return NULL;
1177	}
1178}
1179
1180static void *prb_lookup_block(const struct packet_sock *po,
1181			      const struct packet_ring_buffer *rb,
1182			      unsigned int idx,
1183			      int status)
1184{
1185	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1186	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1187
1188	if (status != BLOCK_STATUS(pbd))
1189		return NULL;
1190	return pbd;
1191}
1192
1193static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1194{
1195	unsigned int prev;
1196	if (rb->prb_bdqc.kactive_blk_num)
1197		prev = rb->prb_bdqc.kactive_blk_num-1;
1198	else
1199		prev = rb->prb_bdqc.knum_blocks-1;
1200	return prev;
1201}
1202
1203/* Assumes caller has held the rx_queue.lock */
1204static void *__prb_previous_block(struct packet_sock *po,
1205					 struct packet_ring_buffer *rb,
1206					 int status)
1207{
1208	unsigned int previous = prb_previous_blk_num(rb);
1209	return prb_lookup_block(po, rb, previous, status);
1210}
1211
1212static void *packet_previous_rx_frame(struct packet_sock *po,
1213					     struct packet_ring_buffer *rb,
1214					     int status)
1215{
1216	if (po->tp_version <= TPACKET_V2)
1217		return packet_previous_frame(po, rb, status);
1218
1219	return __prb_previous_block(po, rb, status);
1220}
1221
1222static void packet_increment_rx_head(struct packet_sock *po,
1223					    struct packet_ring_buffer *rb)
1224{
1225	switch (po->tp_version) {
1226	case TPACKET_V1:
1227	case TPACKET_V2:
1228		return packet_increment_head(rb);
1229	case TPACKET_V3:
1230	default:
1231		WARN(1, "TPACKET version not supported.\n");
1232		BUG();
1233		return;
1234	}
1235}
1236
1237static void *packet_previous_frame(struct packet_sock *po,
1238		struct packet_ring_buffer *rb,
1239		int status)
1240{
1241	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1242	return packet_lookup_frame(po, rb, previous, status);
1243}
1244
1245static void packet_increment_head(struct packet_ring_buffer *buff)
1246{
1247	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1248}
1249
1250static void packet_inc_pending(struct packet_ring_buffer *rb)
1251{
1252	this_cpu_inc(*rb->pending_refcnt);
1253}
1254
1255static void packet_dec_pending(struct packet_ring_buffer *rb)
1256{
1257	this_cpu_dec(*rb->pending_refcnt);
1258}
1259
1260static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1261{
1262	unsigned int refcnt = 0;
1263	int cpu;
1264
1265	/* We don't use pending refcount in rx_ring. */
1266	if (rb->pending_refcnt == NULL)
1267		return 0;
1268
1269	for_each_possible_cpu(cpu)
1270		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1271
1272	return refcnt;
1273}
1274
1275static int packet_alloc_pending(struct packet_sock *po)
1276{
1277	po->rx_ring.pending_refcnt = NULL;
1278
1279	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1280	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1281		return -ENOBUFS;
1282
1283	return 0;
1284}
1285
1286static void packet_free_pending(struct packet_sock *po)
1287{
1288	free_percpu(po->tx_ring.pending_refcnt);
1289}
1290
1291#define ROOM_POW_OFF	2
1292#define ROOM_NONE	0x0
1293#define ROOM_LOW	0x1
1294#define ROOM_NORMAL	0x2
1295
1296static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1297{
1298	int idx, len;
1299
1300	len = READ_ONCE(po->rx_ring.frame_max) + 1;
1301	idx = READ_ONCE(po->rx_ring.head);
1302	if (pow_off)
1303		idx += len >> pow_off;
1304	if (idx >= len)
1305		idx -= len;
1306	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1307}
1308
1309static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1310{
1311	int idx, len;
1312
1313	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1314	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1315	if (pow_off)
1316		idx += len >> pow_off;
1317	if (idx >= len)
1318		idx -= len;
1319	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1320}
1321
1322static int __packet_rcv_has_room(const struct packet_sock *po,
1323				 const struct sk_buff *skb)
1324{
1325	const struct sock *sk = &po->sk;
1326	int ret = ROOM_NONE;
1327
1328	if (po->prot_hook.func != tpacket_rcv) {
1329		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1330		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1331				   - (skb ? skb->truesize : 0);
1332
1333		if (avail > (rcvbuf >> ROOM_POW_OFF))
1334			return ROOM_NORMAL;
1335		else if (avail > 0)
1336			return ROOM_LOW;
1337		else
1338			return ROOM_NONE;
1339	}
1340
1341	if (po->tp_version == TPACKET_V3) {
1342		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1343			ret = ROOM_NORMAL;
1344		else if (__tpacket_v3_has_room(po, 0))
1345			ret = ROOM_LOW;
1346	} else {
1347		if (__tpacket_has_room(po, ROOM_POW_OFF))
1348			ret = ROOM_NORMAL;
1349		else if (__tpacket_has_room(po, 0))
1350			ret = ROOM_LOW;
1351	}
1352
1353	return ret;
1354}
1355
1356static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1357{
1358	bool pressure;
1359	int ret;
1360
1361	ret = __packet_rcv_has_room(po, skb);
1362	pressure = ret != ROOM_NORMAL;
1363
1364	if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) != pressure)
1365		packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, pressure);
1366
1367	return ret;
1368}
1369
1370static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1371{
1372	if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) &&
1373	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1374		packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, false);
1375}
1376
1377static void packet_sock_destruct(struct sock *sk)
1378{
1379	skb_queue_purge(&sk->sk_error_queue);
1380
1381	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1382	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1383
1384	if (!sock_flag(sk, SOCK_DEAD)) {
1385		pr_err("Attempt to release alive packet socket: %p\n", sk);
1386		return;
1387	}
 
 
1388}
1389
1390static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1391{
1392	u32 *history = po->rollover->history;
1393	u32 victim, rxhash;
1394	int i, count = 0;
1395
1396	rxhash = skb_get_hash(skb);
1397	for (i = 0; i < ROLLOVER_HLEN; i++)
1398		if (READ_ONCE(history[i]) == rxhash)
1399			count++;
1400
1401	victim = get_random_u32_below(ROLLOVER_HLEN);
1402
1403	/* Avoid dirtying the cache line if possible */
1404	if (READ_ONCE(history[victim]) != rxhash)
1405		WRITE_ONCE(history[victim], rxhash);
1406
1407	return count > (ROLLOVER_HLEN >> 1);
1408}
1409
1410static unsigned int fanout_demux_hash(struct packet_fanout *f,
1411				      struct sk_buff *skb,
1412				      unsigned int num)
1413{
1414	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1415}
1416
1417static unsigned int fanout_demux_lb(struct packet_fanout *f,
1418				    struct sk_buff *skb,
1419				    unsigned int num)
1420{
1421	unsigned int val = atomic_inc_return(&f->rr_cur);
1422
1423	return val % num;
1424}
1425
1426static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1427				     struct sk_buff *skb,
1428				     unsigned int num)
1429{
1430	return smp_processor_id() % num;
1431}
1432
1433static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1434				     struct sk_buff *skb,
1435				     unsigned int num)
1436{
1437	return get_random_u32_below(num);
1438}
1439
1440static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1441					  struct sk_buff *skb,
1442					  unsigned int idx, bool try_self,
1443					  unsigned int num)
1444{
1445	struct packet_sock *po, *po_next, *po_skip = NULL;
1446	unsigned int i, j, room = ROOM_NONE;
1447
1448	po = pkt_sk(rcu_dereference(f->arr[idx]));
1449
1450	if (try_self) {
1451		room = packet_rcv_has_room(po, skb);
1452		if (room == ROOM_NORMAL ||
1453		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1454			return idx;
1455		po_skip = po;
1456	}
1457
1458	i = j = min_t(int, po->rollover->sock, num - 1);
1459	do {
1460		po_next = pkt_sk(rcu_dereference(f->arr[i]));
1461		if (po_next != po_skip &&
1462		    !packet_sock_flag(po_next, PACKET_SOCK_PRESSURE) &&
1463		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1464			if (i != j)
1465				po->rollover->sock = i;
1466			atomic_long_inc(&po->rollover->num);
1467			if (room == ROOM_LOW)
1468				atomic_long_inc(&po->rollover->num_huge);
1469			return i;
1470		}
1471
1472		if (++i == num)
1473			i = 0;
1474	} while (i != j);
1475
1476	atomic_long_inc(&po->rollover->num_failed);
1477	return idx;
1478}
1479
1480static unsigned int fanout_demux_qm(struct packet_fanout *f,
1481				    struct sk_buff *skb,
1482				    unsigned int num)
1483{
1484	return skb_get_queue_mapping(skb) % num;
1485}
1486
1487static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1488				     struct sk_buff *skb,
1489				     unsigned int num)
1490{
1491	struct bpf_prog *prog;
1492	unsigned int ret = 0;
1493
1494	rcu_read_lock();
1495	prog = rcu_dereference(f->bpf_prog);
1496	if (prog)
1497		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1498	rcu_read_unlock();
1499
1500	return ret;
1501}
1502
1503static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1504{
1505	return f->flags & (flag >> 8);
1506}
1507
1508static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1509			     struct packet_type *pt, struct net_device *orig_dev)
1510{
1511	struct packet_fanout *f = pt->af_packet_priv;
1512	unsigned int num = READ_ONCE(f->num_members);
1513	struct net *net = read_pnet(&f->net);
1514	struct packet_sock *po;
1515	unsigned int idx;
1516
1517	if (!net_eq(dev_net(dev), net) || !num) {
1518		kfree_skb(skb);
1519		return 0;
1520	}
1521
1522	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1523		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1524		if (!skb)
1525			return 0;
1526	}
1527	switch (f->type) {
1528	case PACKET_FANOUT_HASH:
1529	default:
1530		idx = fanout_demux_hash(f, skb, num);
1531		break;
1532	case PACKET_FANOUT_LB:
1533		idx = fanout_demux_lb(f, skb, num);
1534		break;
1535	case PACKET_FANOUT_CPU:
1536		idx = fanout_demux_cpu(f, skb, num);
1537		break;
1538	case PACKET_FANOUT_RND:
1539		idx = fanout_demux_rnd(f, skb, num);
1540		break;
1541	case PACKET_FANOUT_QM:
1542		idx = fanout_demux_qm(f, skb, num);
1543		break;
1544	case PACKET_FANOUT_ROLLOVER:
1545		idx = fanout_demux_rollover(f, skb, 0, false, num);
1546		break;
1547	case PACKET_FANOUT_CBPF:
1548	case PACKET_FANOUT_EBPF:
1549		idx = fanout_demux_bpf(f, skb, num);
1550		break;
1551	}
1552
1553	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1554		idx = fanout_demux_rollover(f, skb, idx, true, num);
1555
1556	po = pkt_sk(rcu_dereference(f->arr[idx]));
1557	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1558}
1559
1560DEFINE_MUTEX(fanout_mutex);
1561EXPORT_SYMBOL_GPL(fanout_mutex);
1562static LIST_HEAD(fanout_list);
1563static u16 fanout_next_id;
1564
1565static void __fanout_link(struct sock *sk, struct packet_sock *po)
1566{
1567	struct packet_fanout *f = po->fanout;
1568
1569	spin_lock(&f->lock);
1570	rcu_assign_pointer(f->arr[f->num_members], sk);
1571	smp_wmb();
1572	f->num_members++;
1573	if (f->num_members == 1)
1574		dev_add_pack(&f->prot_hook);
1575	spin_unlock(&f->lock);
1576}
1577
1578static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1579{
1580	struct packet_fanout *f = po->fanout;
1581	int i;
1582
1583	spin_lock(&f->lock);
1584	for (i = 0; i < f->num_members; i++) {
1585		if (rcu_dereference_protected(f->arr[i],
1586					      lockdep_is_held(&f->lock)) == sk)
1587			break;
1588	}
1589	BUG_ON(i >= f->num_members);
1590	rcu_assign_pointer(f->arr[i],
1591			   rcu_dereference_protected(f->arr[f->num_members - 1],
1592						     lockdep_is_held(&f->lock)));
1593	f->num_members--;
1594	if (f->num_members == 0)
1595		__dev_remove_pack(&f->prot_hook);
1596	spin_unlock(&f->lock);
1597}
1598
1599static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1600{
1601	if (sk->sk_family != PF_PACKET)
1602		return false;
1603
1604	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1605}
1606
1607static void fanout_init_data(struct packet_fanout *f)
1608{
1609	switch (f->type) {
1610	case PACKET_FANOUT_LB:
1611		atomic_set(&f->rr_cur, 0);
1612		break;
1613	case PACKET_FANOUT_CBPF:
1614	case PACKET_FANOUT_EBPF:
1615		RCU_INIT_POINTER(f->bpf_prog, NULL);
1616		break;
1617	}
1618}
1619
1620static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1621{
1622	struct bpf_prog *old;
1623
1624	spin_lock(&f->lock);
1625	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1626	rcu_assign_pointer(f->bpf_prog, new);
1627	spin_unlock(&f->lock);
1628
1629	if (old) {
1630		synchronize_net();
1631		bpf_prog_destroy(old);
1632	}
1633}
1634
1635static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1636				unsigned int len)
1637{
1638	struct bpf_prog *new;
1639	struct sock_fprog fprog;
1640	int ret;
1641
1642	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1643		return -EPERM;
1644
1645	ret = copy_bpf_fprog_from_user(&fprog, data, len);
1646	if (ret)
1647		return ret;
1648
1649	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1650	if (ret)
1651		return ret;
1652
1653	__fanout_set_data_bpf(po->fanout, new);
1654	return 0;
1655}
1656
1657static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1658				unsigned int len)
1659{
1660	struct bpf_prog *new;
1661	u32 fd;
1662
1663	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1664		return -EPERM;
1665	if (len != sizeof(fd))
1666		return -EINVAL;
1667	if (copy_from_sockptr(&fd, data, len))
1668		return -EFAULT;
1669
1670	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1671	if (IS_ERR(new))
1672		return PTR_ERR(new);
1673
1674	__fanout_set_data_bpf(po->fanout, new);
1675	return 0;
1676}
1677
1678static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1679			   unsigned int len)
1680{
1681	switch (po->fanout->type) {
1682	case PACKET_FANOUT_CBPF:
1683		return fanout_set_data_cbpf(po, data, len);
1684	case PACKET_FANOUT_EBPF:
1685		return fanout_set_data_ebpf(po, data, len);
1686	default:
1687		return -EINVAL;
1688	}
1689}
1690
1691static void fanout_release_data(struct packet_fanout *f)
1692{
1693	switch (f->type) {
1694	case PACKET_FANOUT_CBPF:
1695	case PACKET_FANOUT_EBPF:
1696		__fanout_set_data_bpf(f, NULL);
1697	}
1698}
1699
1700static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1701{
1702	struct packet_fanout *f;
1703
1704	list_for_each_entry(f, &fanout_list, list) {
1705		if (f->id == candidate_id &&
1706		    read_pnet(&f->net) == sock_net(sk)) {
1707			return false;
1708		}
1709	}
1710	return true;
1711}
1712
1713static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1714{
1715	u16 id = fanout_next_id;
1716
1717	do {
1718		if (__fanout_id_is_free(sk, id)) {
1719			*new_id = id;
1720			fanout_next_id = id + 1;
1721			return true;
1722		}
1723
1724		id++;
1725	} while (id != fanout_next_id);
1726
1727	return false;
1728}
1729
1730static int fanout_add(struct sock *sk, struct fanout_args *args)
1731{
1732	struct packet_rollover *rollover = NULL;
1733	struct packet_sock *po = pkt_sk(sk);
1734	u16 type_flags = args->type_flags;
1735	struct packet_fanout *f, *match;
1736	u8 type = type_flags & 0xff;
1737	u8 flags = type_flags >> 8;
1738	u16 id = args->id;
1739	int err;
1740
1741	switch (type) {
1742	case PACKET_FANOUT_ROLLOVER:
1743		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1744			return -EINVAL;
1745		break;
1746	case PACKET_FANOUT_HASH:
1747	case PACKET_FANOUT_LB:
1748	case PACKET_FANOUT_CPU:
1749	case PACKET_FANOUT_RND:
1750	case PACKET_FANOUT_QM:
1751	case PACKET_FANOUT_CBPF:
1752	case PACKET_FANOUT_EBPF:
1753		break;
1754	default:
1755		return -EINVAL;
1756	}
1757
1758	mutex_lock(&fanout_mutex);
1759
1760	err = -EALREADY;
1761	if (po->fanout)
1762		goto out;
1763
1764	if (type == PACKET_FANOUT_ROLLOVER ||
1765	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1766		err = -ENOMEM;
1767		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1768		if (!rollover)
1769			goto out;
1770		atomic_long_set(&rollover->num, 0);
1771		atomic_long_set(&rollover->num_huge, 0);
1772		atomic_long_set(&rollover->num_failed, 0);
1773	}
1774
1775	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1776		if (id != 0) {
1777			err = -EINVAL;
1778			goto out;
1779		}
1780		if (!fanout_find_new_id(sk, &id)) {
1781			err = -ENOMEM;
1782			goto out;
1783		}
1784		/* ephemeral flag for the first socket in the group: drop it */
1785		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1786	}
1787
1788	match = NULL;
1789	list_for_each_entry(f, &fanout_list, list) {
1790		if (f->id == id &&
1791		    read_pnet(&f->net) == sock_net(sk)) {
1792			match = f;
1793			break;
1794		}
1795	}
1796	err = -EINVAL;
1797	if (match) {
1798		if (match->flags != flags)
1799			goto out;
1800		if (args->max_num_members &&
1801		    args->max_num_members != match->max_num_members)
1802			goto out;
1803	} else {
1804		if (args->max_num_members > PACKET_FANOUT_MAX)
1805			goto out;
1806		if (!args->max_num_members)
1807			/* legacy PACKET_FANOUT_MAX */
1808			args->max_num_members = 256;
1809		err = -ENOMEM;
1810		match = kvzalloc(struct_size(match, arr, args->max_num_members),
1811				 GFP_KERNEL);
1812		if (!match)
1813			goto out;
1814		write_pnet(&match->net, sock_net(sk));
1815		match->id = id;
1816		match->type = type;
1817		match->flags = flags;
1818		INIT_LIST_HEAD(&match->list);
1819		spin_lock_init(&match->lock);
1820		refcount_set(&match->sk_ref, 0);
1821		fanout_init_data(match);
1822		match->prot_hook.type = po->prot_hook.type;
1823		match->prot_hook.dev = po->prot_hook.dev;
1824		match->prot_hook.func = packet_rcv_fanout;
1825		match->prot_hook.af_packet_priv = match;
1826		match->prot_hook.af_packet_net = read_pnet(&match->net);
1827		match->prot_hook.id_match = match_fanout_group;
1828		match->max_num_members = args->max_num_members;
1829		match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING;
1830		list_add(&match->list, &fanout_list);
1831	}
1832	err = -EINVAL;
1833
1834	spin_lock(&po->bind_lock);
1835	if (po->num &&
1836	    match->type == type &&
1837	    match->prot_hook.type == po->prot_hook.type &&
1838	    match->prot_hook.dev == po->prot_hook.dev) {
1839		err = -ENOSPC;
1840		if (refcount_read(&match->sk_ref) < match->max_num_members) {
1841			/* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1842			WRITE_ONCE(po->fanout, match);
1843
1844			po->rollover = rollover;
1845			rollover = NULL;
1846			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1847			if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
1848				__dev_remove_pack(&po->prot_hook);
1849				__fanout_link(sk, po);
1850			}
1851			err = 0;
1852		}
1853	}
1854	spin_unlock(&po->bind_lock);
1855
1856	if (err && !refcount_read(&match->sk_ref)) {
1857		list_del(&match->list);
1858		kvfree(match);
1859	}
1860
1861out:
1862	kfree(rollover);
1863	mutex_unlock(&fanout_mutex);
1864	return err;
1865}
1866
1867/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1868 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1869 * It is the responsibility of the caller to call fanout_release_data() and
1870 * free the returned packet_fanout (after synchronize_net())
1871 */
1872static struct packet_fanout *fanout_release(struct sock *sk)
1873{
1874	struct packet_sock *po = pkt_sk(sk);
1875	struct packet_fanout *f;
1876
1877	mutex_lock(&fanout_mutex);
1878	f = po->fanout;
1879	if (f) {
1880		po->fanout = NULL;
1881
1882		if (refcount_dec_and_test(&f->sk_ref))
1883			list_del(&f->list);
1884		else
1885			f = NULL;
1886	}
1887	mutex_unlock(&fanout_mutex);
1888
1889	return f;
1890}
1891
1892static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1893					  struct sk_buff *skb)
1894{
1895	/* Earlier code assumed this would be a VLAN pkt, double-check
1896	 * this now that we have the actual packet in hand. We can only
1897	 * do this check on Ethernet devices.
1898	 */
1899	if (unlikely(dev->type != ARPHRD_ETHER))
1900		return false;
1901
1902	skb_reset_mac_header(skb);
1903	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1904}
1905
1906static const struct proto_ops packet_ops;
1907
1908static const struct proto_ops packet_ops_spkt;
1909
1910static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1911			   struct packet_type *pt, struct net_device *orig_dev)
1912{
1913	struct sock *sk;
1914	struct sockaddr_pkt *spkt;
1915
1916	/*
1917	 *	When we registered the protocol we saved the socket in the data
1918	 *	field for just this event.
1919	 */
1920
1921	sk = pt->af_packet_priv;
1922
1923	/*
1924	 *	Yank back the headers [hope the device set this
1925	 *	right or kerboom...]
1926	 *
1927	 *	Incoming packets have ll header pulled,
1928	 *	push it back.
1929	 *
1930	 *	For outgoing ones skb->data == skb_mac_header(skb)
1931	 *	so that this procedure is noop.
1932	 */
1933
1934	if (skb->pkt_type == PACKET_LOOPBACK)
1935		goto out;
1936
1937	if (!net_eq(dev_net(dev), sock_net(sk)))
1938		goto out;
1939
1940	skb = skb_share_check(skb, GFP_ATOMIC);
1941	if (skb == NULL)
1942		goto oom;
1943
1944	/* drop any routing info */
1945	skb_dst_drop(skb);
1946
1947	/* drop conntrack reference */
1948	nf_reset_ct(skb);
1949
1950	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1951
1952	skb_push(skb, skb->data - skb_mac_header(skb));
1953
1954	/*
1955	 *	The SOCK_PACKET socket receives _all_ frames.
1956	 */
1957
1958	spkt->spkt_family = dev->type;
1959	strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1960	spkt->spkt_protocol = skb->protocol;
1961
1962	/*
1963	 *	Charge the memory to the socket. This is done specifically
1964	 *	to prevent sockets using all the memory up.
1965	 */
1966
1967	if (sock_queue_rcv_skb(sk, skb) == 0)
1968		return 0;
1969
1970out:
1971	kfree_skb(skb);
1972oom:
1973	return 0;
1974}
1975
1976static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1977{
1978	int depth;
1979
1980	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1981	    sock->type == SOCK_RAW) {
1982		skb_reset_mac_header(skb);
1983		skb->protocol = dev_parse_header_protocol(skb);
1984	}
1985
1986	/* Move network header to the right position for VLAN tagged packets */
1987	if (likely(skb->dev->type == ARPHRD_ETHER) &&
1988	    eth_type_vlan(skb->protocol) &&
1989	    vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
1990		skb_set_network_header(skb, depth);
1991
1992	skb_probe_transport_header(skb);
1993}
1994
1995/*
1996 *	Output a raw packet to a device layer. This bypasses all the other
1997 *	protocol layers and you must therefore supply it with a complete frame
1998 */
1999
2000static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
2001			       size_t len)
2002{
2003	struct sock *sk = sock->sk;
2004	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
2005	struct sk_buff *skb = NULL;
2006	struct net_device *dev;
2007	struct sockcm_cookie sockc;
2008	__be16 proto = 0;
2009	int err;
2010	int extra_len = 0;
2011
2012	/*
2013	 *	Get and verify the address.
2014	 */
2015
2016	if (saddr) {
2017		if (msg->msg_namelen < sizeof(struct sockaddr))
2018			return -EINVAL;
2019		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
2020			proto = saddr->spkt_protocol;
2021	} else
2022		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
2023
2024	/*
2025	 *	Find the device first to size check it
2026	 */
2027
2028	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
2029retry:
2030	rcu_read_lock();
2031	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
2032	err = -ENODEV;
2033	if (dev == NULL)
2034		goto out_unlock;
2035
2036	err = -ENETDOWN;
2037	if (!(dev->flags & IFF_UP))
2038		goto out_unlock;
2039
2040	/*
2041	 * You may not queue a frame bigger than the mtu. This is the lowest level
2042	 * raw protocol and you must do your own fragmentation at this level.
2043	 */
2044
2045	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2046		if (!netif_supports_nofcs(dev)) {
2047			err = -EPROTONOSUPPORT;
2048			goto out_unlock;
2049		}
2050		extra_len = 4; /* We're doing our own CRC */
2051	}
2052
2053	err = -EMSGSIZE;
2054	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
2055		goto out_unlock;
2056
2057	if (!skb) {
2058		size_t reserved = LL_RESERVED_SPACE(dev);
2059		int tlen = dev->needed_tailroom;
2060		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
2061
2062		rcu_read_unlock();
2063		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
2064		if (skb == NULL)
2065			return -ENOBUFS;
2066		/* FIXME: Save some space for broken drivers that write a hard
2067		 * header at transmission time by themselves. PPP is the notable
2068		 * one here. This should really be fixed at the driver level.
2069		 */
2070		skb_reserve(skb, reserved);
2071		skb_reset_network_header(skb);
2072
2073		/* Try to align data part correctly */
2074		if (hhlen) {
2075			skb->data -= hhlen;
2076			skb->tail -= hhlen;
2077			if (len < hhlen)
2078				skb_reset_network_header(skb);
2079		}
2080		err = memcpy_from_msg(skb_put(skb, len), msg, len);
2081		if (err)
2082			goto out_free;
2083		goto retry;
2084	}
2085
2086	if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
2087		err = -EINVAL;
2088		goto out_unlock;
2089	}
2090	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
2091	    !packet_extra_vlan_len_allowed(dev, skb)) {
2092		err = -EMSGSIZE;
2093		goto out_unlock;
2094	}
2095
2096	sockcm_init(&sockc, sk);
2097	if (msg->msg_controllen) {
2098		err = sock_cmsg_send(sk, msg, &sockc);
2099		if (unlikely(err))
2100			goto out_unlock;
2101	}
2102
2103	skb->protocol = proto;
2104	skb->dev = dev;
2105	skb->priority = READ_ONCE(sk->sk_priority);
2106	skb->mark = READ_ONCE(sk->sk_mark);
2107	skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid);
2108	skb_setup_tx_timestamp(skb, &sockc);
 
2109
2110	if (unlikely(extra_len == 4))
2111		skb->no_fcs = 1;
2112
2113	packet_parse_headers(skb, sock);
2114
2115	dev_queue_xmit(skb);
2116	rcu_read_unlock();
2117	return len;
2118
2119out_unlock:
2120	rcu_read_unlock();
2121out_free:
2122	kfree_skb(skb);
2123	return err;
2124}
2125
2126static unsigned int run_filter(struct sk_buff *skb,
2127			       const struct sock *sk,
2128			       unsigned int res)
2129{
2130	struct sk_filter *filter;
2131
2132	rcu_read_lock();
2133	filter = rcu_dereference(sk->sk_filter);
2134	if (filter != NULL)
2135		res = bpf_prog_run_clear_cb(filter->prog, skb);
2136	rcu_read_unlock();
2137
2138	return res;
2139}
2140
2141static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2142			   size_t *len, int vnet_hdr_sz)
2143{
2144	struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 };
2145
2146	if (*len < vnet_hdr_sz)
2147		return -EINVAL;
2148	*len -= vnet_hdr_sz;
2149
2150	if (virtio_net_hdr_from_skb(skb, (struct virtio_net_hdr *)&vnet_hdr, vio_le(), true, 0))
2151		return -EINVAL;
2152
2153	return memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz);
2154}
2155
2156/*
2157 * This function makes lazy skb cloning in hope that most of packets
2158 * are discarded by BPF.
2159 *
2160 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2161 * and skb->cb are mangled. It works because (and until) packets
2162 * falling here are owned by current CPU. Output packets are cloned
2163 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2164 * sequentially, so that if we return skb to original state on exit,
2165 * we will not harm anyone.
2166 */
2167
2168static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2169		      struct packet_type *pt, struct net_device *orig_dev)
2170{
2171	enum skb_drop_reason drop_reason = SKB_CONSUMED;
2172	struct sock *sk = NULL;
2173	struct sockaddr_ll *sll;
2174	struct packet_sock *po;
2175	u8 *skb_head = skb->data;
2176	int skb_len = skb->len;
2177	unsigned int snaplen, res;
 
2178
2179	if (skb->pkt_type == PACKET_LOOPBACK)
2180		goto drop;
2181
2182	sk = pt->af_packet_priv;
2183	po = pkt_sk(sk);
2184
2185	if (!net_eq(dev_net(dev), sock_net(sk)))
2186		goto drop;
2187
2188	skb->dev = dev;
2189
2190	if (dev_has_header(dev)) {
2191		/* The device has an explicit notion of ll header,
2192		 * exported to higher levels.
2193		 *
2194		 * Otherwise, the device hides details of its frame
2195		 * structure, so that corresponding packet head is
2196		 * never delivered to user.
2197		 */
2198		if (sk->sk_type != SOCK_DGRAM)
2199			skb_push(skb, skb->data - skb_mac_header(skb));
2200		else if (skb->pkt_type == PACKET_OUTGOING) {
2201			/* Special case: outgoing packets have ll header at head */
2202			skb_pull(skb, skb_network_offset(skb));
2203		}
2204	}
2205
2206	snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb);
2207
2208	res = run_filter(skb, sk, snaplen);
2209	if (!res)
2210		goto drop_n_restore;
2211	if (snaplen > res)
2212		snaplen = res;
2213
2214	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2215		goto drop_n_acct;
2216
2217	if (skb_shared(skb)) {
2218		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2219		if (nskb == NULL)
2220			goto drop_n_acct;
2221
2222		if (skb_head != skb->data) {
2223			skb->data = skb_head;
2224			skb->len = skb_len;
2225		}
2226		consume_skb(skb);
2227		skb = nskb;
2228	}
2229
2230	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2231
2232	sll = &PACKET_SKB_CB(skb)->sa.ll;
2233	sll->sll_hatype = dev->type;
2234	sll->sll_pkttype = skb->pkt_type;
2235	if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2236		sll->sll_ifindex = orig_dev->ifindex;
2237	else
2238		sll->sll_ifindex = dev->ifindex;
2239
2240	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2241
2242	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2243	 * Use their space for storing the original skb length.
2244	 */
2245	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2246
2247	if (pskb_trim(skb, snaplen))
2248		goto drop_n_acct;
2249
2250	skb_set_owner_r(skb, sk);
2251	skb->dev = NULL;
2252	skb_dst_drop(skb);
2253
2254	/* drop conntrack reference */
2255	nf_reset_ct(skb);
2256
2257	spin_lock(&sk->sk_receive_queue.lock);
2258	po->stats.stats1.tp_packets++;
2259	sock_skb_set_dropcount(sk, skb);
2260	skb_clear_delivery_time(skb);
2261	__skb_queue_tail(&sk->sk_receive_queue, skb);
2262	spin_unlock(&sk->sk_receive_queue.lock);
2263	sk->sk_data_ready(sk);
2264	return 0;
2265
2266drop_n_acct:
 
2267	atomic_inc(&po->tp_drops);
2268	atomic_inc(&sk->sk_drops);
2269	drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
2270
2271drop_n_restore:
2272	if (skb_head != skb->data && skb_shared(skb)) {
2273		skb->data = skb_head;
2274		skb->len = skb_len;
2275	}
2276drop:
2277	sk_skb_reason_drop(sk, skb, drop_reason);
 
 
 
2278	return 0;
2279}
2280
2281static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2282		       struct packet_type *pt, struct net_device *orig_dev)
2283{
2284	enum skb_drop_reason drop_reason = SKB_CONSUMED;
2285	struct sock *sk = NULL;
2286	struct packet_sock *po;
2287	struct sockaddr_ll *sll;
2288	union tpacket_uhdr h;
2289	u8 *skb_head = skb->data;
2290	int skb_len = skb->len;
2291	unsigned int snaplen, res;
2292	unsigned long status = TP_STATUS_USER;
2293	unsigned short macoff, hdrlen;
2294	unsigned int netoff;
2295	struct sk_buff *copy_skb = NULL;
2296	struct timespec64 ts;
2297	__u32 ts_status;
2298	unsigned int slot_id = 0;
2299	int vnet_hdr_sz = 0;
2300
2301	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2302	 * We may add members to them until current aligned size without forcing
2303	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2304	 */
2305	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2306	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2307
2308	if (skb->pkt_type == PACKET_LOOPBACK)
2309		goto drop;
2310
2311	sk = pt->af_packet_priv;
2312	po = pkt_sk(sk);
2313
2314	if (!net_eq(dev_net(dev), sock_net(sk)))
2315		goto drop;
2316
2317	if (dev_has_header(dev)) {
2318		if (sk->sk_type != SOCK_DGRAM)
2319			skb_push(skb, skb->data - skb_mac_header(skb));
2320		else if (skb->pkt_type == PACKET_OUTGOING) {
2321			/* Special case: outgoing packets have ll header at head */
2322			skb_pull(skb, skb_network_offset(skb));
2323		}
2324	}
2325
2326	snaplen = skb_frags_readable(skb) ? skb->len : skb_headlen(skb);
2327
2328	res = run_filter(skb, sk, snaplen);
2329	if (!res)
2330		goto drop_n_restore;
2331
2332	/* If we are flooded, just give up */
2333	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2334		atomic_inc(&po->tp_drops);
2335		goto drop_n_restore;
2336	}
2337
2338	if (skb->ip_summed == CHECKSUM_PARTIAL)
2339		status |= TP_STATUS_CSUMNOTREADY;
2340	else if (skb->pkt_type != PACKET_OUTGOING &&
2341		 skb_csum_unnecessary(skb))
 
2342		status |= TP_STATUS_CSUM_VALID;
2343	if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
2344		status |= TP_STATUS_GSO_TCP;
2345
2346	if (snaplen > res)
2347		snaplen = res;
2348
2349	if (sk->sk_type == SOCK_DGRAM) {
2350		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2351				  po->tp_reserve;
2352	} else {
2353		unsigned int maclen = skb_network_offset(skb);
2354		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2355				       (maclen < 16 ? 16 : maclen)) +
2356				       po->tp_reserve;
2357		vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2358		if (vnet_hdr_sz)
2359			netoff += vnet_hdr_sz;
 
2360		macoff = netoff - maclen;
2361	}
2362	if (netoff > USHRT_MAX) {
2363		atomic_inc(&po->tp_drops);
2364		goto drop_n_restore;
2365	}
2366	if (po->tp_version <= TPACKET_V2) {
2367		if (macoff + snaplen > po->rx_ring.frame_size) {
2368			if (READ_ONCE(po->copy_thresh) &&
2369			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2370				if (skb_shared(skb)) {
2371					copy_skb = skb_clone(skb, GFP_ATOMIC);
2372				} else {
2373					copy_skb = skb_get(skb);
2374					skb_head = skb->data;
2375				}
2376				if (copy_skb) {
2377					memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
2378					       sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
2379					skb_set_owner_r(copy_skb, sk);
2380				}
2381			}
2382			snaplen = po->rx_ring.frame_size - macoff;
2383			if ((int)snaplen < 0) {
2384				snaplen = 0;
2385				vnet_hdr_sz = 0;
2386			}
2387		}
2388	} else if (unlikely(macoff + snaplen >
2389			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2390		u32 nval;
2391
2392		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2393		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2394			    snaplen, nval, macoff);
2395		snaplen = nval;
2396		if (unlikely((int)snaplen < 0)) {
2397			snaplen = 0;
2398			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2399			vnet_hdr_sz = 0;
2400		}
2401	}
2402	spin_lock(&sk->sk_receive_queue.lock);
2403	h.raw = packet_current_rx_frame(po, skb,
2404					TP_STATUS_KERNEL, (macoff+snaplen));
2405	if (!h.raw)
2406		goto drop_n_account;
2407
2408	if (po->tp_version <= TPACKET_V2) {
2409		slot_id = po->rx_ring.head;
2410		if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2411			goto drop_n_account;
2412		__set_bit(slot_id, po->rx_ring.rx_owner_map);
2413	}
2414
2415	if (vnet_hdr_sz &&
2416	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
2417				    sizeof(struct virtio_net_hdr),
2418				    vio_le(), true, 0)) {
2419		if (po->tp_version == TPACKET_V3)
2420			prb_clear_blk_fill_status(&po->rx_ring);
2421		goto drop_n_account;
2422	}
2423
2424	if (po->tp_version <= TPACKET_V2) {
2425		packet_increment_rx_head(po, &po->rx_ring);
2426	/*
2427	 * LOSING will be reported till you read the stats,
2428	 * because it's COR - Clear On Read.
2429	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2430	 * at packet level.
2431	 */
2432		if (atomic_read(&po->tp_drops))
2433			status |= TP_STATUS_LOSING;
2434	}
2435
 
 
 
 
 
 
2436	po->stats.stats1.tp_packets++;
2437	if (copy_skb) {
2438		status |= TP_STATUS_COPY;
2439		skb_clear_delivery_time(copy_skb);
2440		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2441	}
2442	spin_unlock(&sk->sk_receive_queue.lock);
2443
2444	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2445
2446	/* Always timestamp; prefer an existing software timestamp taken
2447	 * closer to the time of capture.
2448	 */
2449	ts_status = tpacket_get_timestamp(skb, &ts,
2450					  READ_ONCE(po->tp_tstamp) |
2451					  SOF_TIMESTAMPING_SOFTWARE);
2452	if (!ts_status)
2453		ktime_get_real_ts64(&ts);
2454
2455	status |= ts_status;
2456
2457	switch (po->tp_version) {
2458	case TPACKET_V1:
2459		h.h1->tp_len = skb->len;
2460		h.h1->tp_snaplen = snaplen;
2461		h.h1->tp_mac = macoff;
2462		h.h1->tp_net = netoff;
2463		h.h1->tp_sec = ts.tv_sec;
2464		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2465		hdrlen = sizeof(*h.h1);
2466		break;
2467	case TPACKET_V2:
2468		h.h2->tp_len = skb->len;
2469		h.h2->tp_snaplen = snaplen;
2470		h.h2->tp_mac = macoff;
2471		h.h2->tp_net = netoff;
2472		h.h2->tp_sec = ts.tv_sec;
2473		h.h2->tp_nsec = ts.tv_nsec;
2474		if (skb_vlan_tag_present(skb)) {
2475			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2476			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2477			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2478		} else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
2479			h.h2->tp_vlan_tci = vlan_get_tci(skb, skb->dev);
2480			h.h2->tp_vlan_tpid = ntohs(skb->protocol);
2481			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2482		} else {
2483			h.h2->tp_vlan_tci = 0;
2484			h.h2->tp_vlan_tpid = 0;
2485		}
2486		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2487		hdrlen = sizeof(*h.h2);
2488		break;
2489	case TPACKET_V3:
2490		/* tp_nxt_offset,vlan are already populated above.
2491		 * So DONT clear those fields here
2492		 */
2493		h.h3->tp_status |= status;
2494		h.h3->tp_len = skb->len;
2495		h.h3->tp_snaplen = snaplen;
2496		h.h3->tp_mac = macoff;
2497		h.h3->tp_net = netoff;
2498		h.h3->tp_sec  = ts.tv_sec;
2499		h.h3->tp_nsec = ts.tv_nsec;
2500		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2501		hdrlen = sizeof(*h.h3);
2502		break;
2503	default:
2504		BUG();
2505	}
2506
2507	sll = h.raw + TPACKET_ALIGN(hdrlen);
2508	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2509	sll->sll_family = AF_PACKET;
2510	sll->sll_hatype = dev->type;
2511	sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ?
2512		vlan_get_protocol_dgram(skb) : skb->protocol;
2513	sll->sll_pkttype = skb->pkt_type;
2514	if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2515		sll->sll_ifindex = orig_dev->ifindex;
2516	else
2517		sll->sll_ifindex = dev->ifindex;
2518
2519	smp_mb();
2520
2521#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2522	if (po->tp_version <= TPACKET_V2) {
2523		u8 *start, *end;
2524
2525		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2526					macoff + snaplen);
2527
2528		for (start = h.raw; start < end; start += PAGE_SIZE)
2529			flush_dcache_page(pgv_to_page(start));
2530	}
2531	smp_wmb();
2532#endif
2533
2534	if (po->tp_version <= TPACKET_V2) {
2535		spin_lock(&sk->sk_receive_queue.lock);
2536		__packet_set_status(po, h.raw, status);
2537		__clear_bit(slot_id, po->rx_ring.rx_owner_map);
2538		spin_unlock(&sk->sk_receive_queue.lock);
2539		sk->sk_data_ready(sk);
2540	} else if (po->tp_version == TPACKET_V3) {
2541		prb_clear_blk_fill_status(&po->rx_ring);
2542	}
2543
2544drop_n_restore:
2545	if (skb_head != skb->data && skb_shared(skb)) {
2546		skb->data = skb_head;
2547		skb->len = skb_len;
2548	}
2549drop:
2550	sk_skb_reason_drop(sk, skb, drop_reason);
 
 
 
2551	return 0;
2552
2553drop_n_account:
2554	spin_unlock(&sk->sk_receive_queue.lock);
2555	atomic_inc(&po->tp_drops);
2556	drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
2557
2558	sk->sk_data_ready(sk);
2559	sk_skb_reason_drop(sk, copy_skb, drop_reason);
2560	goto drop_n_restore;
2561}
2562
2563static void tpacket_destruct_skb(struct sk_buff *skb)
2564{
2565	struct packet_sock *po = pkt_sk(skb->sk);
2566
2567	if (likely(po->tx_ring.pg_vec)) {
2568		void *ph;
2569		__u32 ts;
2570
2571		ph = skb_zcopy_get_nouarg(skb);
2572		packet_dec_pending(&po->tx_ring);
2573
2574		ts = __packet_set_timestamp(po, ph, skb);
2575		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2576
2577		complete(&po->skb_completion);
 
2578	}
2579
2580	sock_wfree(skb);
2581}
2582
2583static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2584{
2585	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2586	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2587	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2588	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2589		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2590			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2591			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2592
2593	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2594		return -EINVAL;
2595
2596	return 0;
2597}
2598
2599static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2600				 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
2601{
2602	int ret;
2603
2604	if (*len < vnet_hdr_sz)
2605		return -EINVAL;
2606	*len -= vnet_hdr_sz;
2607
2608	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2609		return -EFAULT;
2610
2611	ret = __packet_snd_vnet_parse(vnet_hdr, *len);
2612	if (ret)
2613		return ret;
2614
2615	/* move iter to point to the start of mac header */
2616	if (vnet_hdr_sz != sizeof(struct virtio_net_hdr))
2617		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
2618
2619	return 0;
2620}
2621
2622static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2623		void *frame, struct net_device *dev, void *data, int tp_len,
2624		__be16 proto, unsigned char *addr, int hlen, int copylen,
2625		const struct sockcm_cookie *sockc)
2626{
2627	union tpacket_uhdr ph;
2628	int to_write, offset, len, nr_frags, len_max;
2629	struct socket *sock = po->sk.sk_socket;
2630	struct page *page;
2631	int err;
2632
2633	ph.raw = frame;
2634
2635	skb->protocol = proto;
2636	skb->dev = dev;
2637	skb->priority = READ_ONCE(po->sk.sk_priority);
2638	skb->mark = READ_ONCE(po->sk.sk_mark);
2639	skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, po->sk.sk_clockid);
2640	skb_setup_tx_timestamp(skb, sockc);
2641	skb_zcopy_set_nouarg(skb, ph.raw);
2642
2643	skb_reserve(skb, hlen);
2644	skb_reset_network_header(skb);
2645
2646	to_write = tp_len;
2647
2648	if (sock->type == SOCK_DGRAM) {
2649		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2650				NULL, tp_len);
2651		if (unlikely(err < 0))
2652			return -EINVAL;
2653	} else if (copylen) {
2654		int hdrlen = min_t(int, copylen, tp_len);
2655
2656		skb_push(skb, dev->hard_header_len);
2657		skb_put(skb, copylen - dev->hard_header_len);
2658		err = skb_store_bits(skb, 0, data, hdrlen);
2659		if (unlikely(err))
2660			return err;
2661		if (!dev_validate_header(dev, skb->data, hdrlen))
2662			return -EINVAL;
2663
2664		data += hdrlen;
2665		to_write -= hdrlen;
2666	}
2667
2668	offset = offset_in_page(data);
2669	len_max = PAGE_SIZE - offset;
2670	len = ((to_write > len_max) ? len_max : to_write);
2671
2672	skb->data_len = to_write;
2673	skb->len += to_write;
2674	skb->truesize += to_write;
2675	refcount_add(to_write, &po->sk.sk_wmem_alloc);
2676
2677	while (likely(to_write)) {
2678		nr_frags = skb_shinfo(skb)->nr_frags;
2679
2680		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2681			pr_err("Packet exceed the number of skb frags(%u)\n",
2682			       (unsigned int)MAX_SKB_FRAGS);
2683			return -EFAULT;
2684		}
2685
2686		page = pgv_to_page(data);
2687		data += len;
2688		flush_dcache_page(page);
2689		get_page(page);
2690		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2691		to_write -= len;
2692		offset = 0;
2693		len_max = PAGE_SIZE;
2694		len = ((to_write > len_max) ? len_max : to_write);
2695	}
2696
2697	packet_parse_headers(skb, sock);
2698
2699	return tp_len;
2700}
2701
2702static int tpacket_parse_header(struct packet_sock *po, void *frame,
2703				int size_max, void **data)
2704{
2705	union tpacket_uhdr ph;
2706	int tp_len, off;
2707
2708	ph.raw = frame;
2709
2710	switch (po->tp_version) {
2711	case TPACKET_V3:
2712		if (ph.h3->tp_next_offset != 0) {
2713			pr_warn_once("variable sized slot not supported");
2714			return -EINVAL;
2715		}
2716		tp_len = ph.h3->tp_len;
2717		break;
2718	case TPACKET_V2:
2719		tp_len = ph.h2->tp_len;
2720		break;
2721	default:
2722		tp_len = ph.h1->tp_len;
2723		break;
2724	}
2725	if (unlikely(tp_len > size_max)) {
2726		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2727		return -EMSGSIZE;
2728	}
2729
2730	if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) {
2731		int off_min, off_max;
2732
2733		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2734		off_max = po->tx_ring.frame_size - tp_len;
2735		if (po->sk.sk_type == SOCK_DGRAM) {
2736			switch (po->tp_version) {
2737			case TPACKET_V3:
2738				off = ph.h3->tp_net;
2739				break;
2740			case TPACKET_V2:
2741				off = ph.h2->tp_net;
2742				break;
2743			default:
2744				off = ph.h1->tp_net;
2745				break;
2746			}
2747		} else {
2748			switch (po->tp_version) {
2749			case TPACKET_V3:
2750				off = ph.h3->tp_mac;
2751				break;
2752			case TPACKET_V2:
2753				off = ph.h2->tp_mac;
2754				break;
2755			default:
2756				off = ph.h1->tp_mac;
2757				break;
2758			}
2759		}
2760		if (unlikely((off < off_min) || (off_max < off)))
2761			return -EINVAL;
2762	} else {
2763		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2764	}
2765
2766	*data = frame + off;
2767	return tp_len;
2768}
2769
2770static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2771{
2772	struct sk_buff *skb = NULL;
2773	struct net_device *dev;
2774	struct virtio_net_hdr *vnet_hdr = NULL;
2775	struct sockcm_cookie sockc;
2776	__be16 proto;
2777	int err, reserve = 0;
2778	void *ph;
2779	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2780	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2781	int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2782	unsigned char *addr = NULL;
2783	int tp_len, size_max;
2784	void *data;
2785	int len_sum = 0;
2786	int status = TP_STATUS_AVAILABLE;
2787	int hlen, tlen, copylen = 0;
2788	long timeo = 0;
2789
2790	mutex_lock(&po->pg_vec_lock);
2791
2792	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2793	 * we need to confirm it under protection of pg_vec_lock.
2794	 */
2795	if (unlikely(!po->tx_ring.pg_vec)) {
2796		err = -EBUSY;
2797		goto out;
2798	}
2799	if (likely(saddr == NULL)) {
2800		dev	= packet_cached_dev_get(po);
2801		proto	= READ_ONCE(po->num);
2802	} else {
2803		err = -EINVAL;
2804		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2805			goto out;
2806		if (msg->msg_namelen < (saddr->sll_halen
2807					+ offsetof(struct sockaddr_ll,
2808						sll_addr)))
2809			goto out;
2810		proto	= saddr->sll_protocol;
2811		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2812		if (po->sk.sk_socket->type == SOCK_DGRAM) {
2813			if (dev && msg->msg_namelen < dev->addr_len +
2814				   offsetof(struct sockaddr_ll, sll_addr))
2815				goto out_put;
2816			addr = saddr->sll_addr;
2817		}
2818	}
2819
2820	err = -ENXIO;
2821	if (unlikely(dev == NULL))
2822		goto out;
2823	err = -ENETDOWN;
2824	if (unlikely(!(dev->flags & IFF_UP)))
2825		goto out_put;
2826
2827	sockcm_init(&sockc, &po->sk);
2828	if (msg->msg_controllen) {
2829		err = sock_cmsg_send(&po->sk, msg, &sockc);
2830		if (unlikely(err))
2831			goto out_put;
2832	}
2833
2834	if (po->sk.sk_socket->type == SOCK_RAW)
2835		reserve = dev->hard_header_len;
2836	size_max = po->tx_ring.frame_size
2837		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2838
2839	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz)
2840		size_max = dev->mtu + reserve + VLAN_HLEN;
2841
2842	reinit_completion(&po->skb_completion);
2843
2844	do {
2845		ph = packet_current_frame(po, &po->tx_ring,
2846					  TP_STATUS_SEND_REQUEST);
2847		if (unlikely(ph == NULL)) {
2848			if (need_wait && skb) {
2849				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2850				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2851				if (timeo <= 0) {
2852					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2853					goto out_put;
2854				}
2855			}
2856			/* check for additional frames */
2857			continue;
2858		}
2859
2860		skb = NULL;
2861		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2862		if (tp_len < 0)
2863			goto tpacket_error;
2864
2865		status = TP_STATUS_SEND_REQUEST;
2866		hlen = LL_RESERVED_SPACE(dev);
2867		tlen = dev->needed_tailroom;
2868		if (vnet_hdr_sz) {
2869			vnet_hdr = data;
2870			data += vnet_hdr_sz;
2871			tp_len -= vnet_hdr_sz;
2872			if (tp_len < 0 ||
2873			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2874				tp_len = -EINVAL;
2875				goto tpacket_error;
2876			}
2877			copylen = __virtio16_to_cpu(vio_le(),
2878						    vnet_hdr->hdr_len);
2879		}
2880		copylen = max_t(int, copylen, dev->hard_header_len);
2881		skb = sock_alloc_send_skb(&po->sk,
2882				hlen + tlen + sizeof(struct sockaddr_ll) +
2883				(copylen - dev->hard_header_len),
2884				!need_wait, &err);
2885
2886		if (unlikely(skb == NULL)) {
2887			/* we assume the socket was initially writeable ... */
2888			if (likely(len_sum > 0))
2889				err = len_sum;
2890			goto out_status;
2891		}
2892		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2893					  addr, hlen, copylen, &sockc);
2894		if (likely(tp_len >= 0) &&
2895		    tp_len > dev->mtu + reserve &&
2896		    !vnet_hdr_sz &&
2897		    !packet_extra_vlan_len_allowed(dev, skb))
2898			tp_len = -EMSGSIZE;
2899
2900		if (unlikely(tp_len < 0)) {
2901tpacket_error:
2902			if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) {
2903				__packet_set_status(po, ph,
2904						TP_STATUS_AVAILABLE);
2905				packet_increment_head(&po->tx_ring);
2906				kfree_skb(skb);
2907				continue;
2908			} else {
2909				status = TP_STATUS_WRONG_FORMAT;
2910				err = tp_len;
2911				goto out_status;
2912			}
2913		}
2914
2915		if (vnet_hdr_sz) {
2916			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2917				tp_len = -EINVAL;
2918				goto tpacket_error;
2919			}
2920			virtio_net_hdr_set_proto(skb, vnet_hdr);
2921		}
2922
2923		skb->destructor = tpacket_destruct_skb;
2924		__packet_set_status(po, ph, TP_STATUS_SENDING);
2925		packet_inc_pending(&po->tx_ring);
2926
2927		status = TP_STATUS_SEND_REQUEST;
2928		err = packet_xmit(po, skb);
2929		if (unlikely(err != 0)) {
2930			if (err > 0)
2931				err = net_xmit_errno(err);
2932			if (err && __packet_get_status(po, ph) ==
2933				   TP_STATUS_AVAILABLE) {
2934				/* skb was destructed already */
2935				skb = NULL;
2936				goto out_status;
2937			}
2938			/*
2939			 * skb was dropped but not destructed yet;
2940			 * let's treat it like congestion or err < 0
2941			 */
2942			err = 0;
2943		}
2944		packet_increment_head(&po->tx_ring);
2945		len_sum += tp_len;
2946	} while (likely((ph != NULL) ||
2947		/* Note: packet_read_pending() might be slow if we have
2948		 * to call it as it's per_cpu variable, but in fast-path
2949		 * we already short-circuit the loop with the first
2950		 * condition, and luckily don't have to go that path
2951		 * anyway.
2952		 */
2953		 (need_wait && packet_read_pending(&po->tx_ring))));
2954
2955	err = len_sum;
2956	goto out_put;
2957
2958out_status:
2959	__packet_set_status(po, ph, status);
2960	kfree_skb(skb);
2961out_put:
2962	dev_put(dev);
2963out:
2964	mutex_unlock(&po->pg_vec_lock);
2965	return err;
2966}
2967
2968static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2969				        size_t reserve, size_t len,
2970				        size_t linear, int noblock,
2971				        int *err)
2972{
2973	struct sk_buff *skb;
2974
2975	/* Under a page?  Don't bother with paged skb. */
2976	if (prepad + len < PAGE_SIZE || !linear)
2977		linear = len;
2978
2979	if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
2980		linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
2981	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2982				   err, PAGE_ALLOC_COSTLY_ORDER);
2983	if (!skb)
2984		return NULL;
2985
2986	skb_reserve(skb, reserve);
2987	skb_put(skb, linear);
2988	skb->data_len = len - linear;
2989	skb->len += len - linear;
2990
2991	return skb;
2992}
2993
2994static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2995{
2996	struct sock *sk = sock->sk;
2997	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2998	struct sk_buff *skb;
2999	struct net_device *dev;
3000	__be16 proto;
3001	unsigned char *addr = NULL;
3002	int err, reserve = 0;
3003	struct sockcm_cookie sockc;
3004	struct virtio_net_hdr vnet_hdr = { 0 };
3005	int offset = 0;
3006	struct packet_sock *po = pkt_sk(sk);
3007	int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
3008	int hlen, tlen, linear;
3009	int extra_len = 0;
3010
3011	/*
3012	 *	Get and verify the address.
3013	 */
3014
3015	if (likely(saddr == NULL)) {
3016		dev	= packet_cached_dev_get(po);
3017		proto	= READ_ONCE(po->num);
3018	} else {
3019		err = -EINVAL;
3020		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
3021			goto out;
3022		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
3023			goto out;
3024		proto	= saddr->sll_protocol;
3025		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
3026		if (sock->type == SOCK_DGRAM) {
3027			if (dev && msg->msg_namelen < dev->addr_len +
3028				   offsetof(struct sockaddr_ll, sll_addr))
3029				goto out_unlock;
3030			addr = saddr->sll_addr;
3031		}
3032	}
3033
3034	err = -ENXIO;
3035	if (unlikely(dev == NULL))
3036		goto out_unlock;
3037	err = -ENETDOWN;
3038	if (unlikely(!(dev->flags & IFF_UP)))
3039		goto out_unlock;
3040
3041	sockcm_init(&sockc, sk);
3042	sockc.mark = READ_ONCE(sk->sk_mark);
3043	if (msg->msg_controllen) {
3044		err = sock_cmsg_send(sk, msg, &sockc);
3045		if (unlikely(err))
3046			goto out_unlock;
3047	}
3048
3049	if (sock->type == SOCK_RAW)
3050		reserve = dev->hard_header_len;
3051	if (vnet_hdr_sz) {
3052		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
3053		if (err)
3054			goto out_unlock;
 
3055	}
3056
3057	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
3058		if (!netif_supports_nofcs(dev)) {
3059			err = -EPROTONOSUPPORT;
3060			goto out_unlock;
3061		}
3062		extra_len = 4; /* We're doing our own CRC */
3063	}
3064
3065	err = -EMSGSIZE;
3066	if (!vnet_hdr.gso_type &&
3067	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
3068		goto out_unlock;
3069
3070	err = -ENOBUFS;
3071	hlen = LL_RESERVED_SPACE(dev);
3072	tlen = dev->needed_tailroom;
3073	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
3074	linear = max(linear, min_t(int, len, dev->hard_header_len));
3075	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
3076			       msg->msg_flags & MSG_DONTWAIT, &err);
3077	if (skb == NULL)
3078		goto out_unlock;
3079
3080	skb_reset_network_header(skb);
3081
3082	err = -EINVAL;
3083	if (sock->type == SOCK_DGRAM) {
3084		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
3085		if (unlikely(offset < 0))
3086			goto out_free;
3087	} else if (reserve) {
3088		skb_reserve(skb, -reserve);
3089		if (len < reserve + sizeof(struct ipv6hdr) &&
3090		    dev->min_header_len != dev->hard_header_len)
3091			skb_reset_network_header(skb);
3092	}
3093
3094	/* Returns -EFAULT on error */
3095	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
3096	if (err)
3097		goto out_free;
3098
3099	if ((sock->type == SOCK_RAW &&
3100	     !dev_validate_header(dev, skb->data, len)) || !skb->len) {
3101		err = -EINVAL;
3102		goto out_free;
3103	}
3104
3105	skb_setup_tx_timestamp(skb, &sockc);
3106
3107	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
3108	    !packet_extra_vlan_len_allowed(dev, skb)) {
3109		err = -EMSGSIZE;
3110		goto out_free;
3111	}
3112
3113	skb->protocol = proto;
3114	skb->dev = dev;
3115	skb->priority = READ_ONCE(sk->sk_priority);
3116	skb->mark = sockc.mark;
3117	skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid);
3118
3119	if (unlikely(extra_len == 4))
3120		skb->no_fcs = 1;
3121
3122	packet_parse_headers(skb, sock);
3123
3124	if (vnet_hdr_sz) {
3125		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3126		if (err)
3127			goto out_free;
3128		len += vnet_hdr_sz;
3129		virtio_net_hdr_set_proto(skb, &vnet_hdr);
3130	}
3131
3132	err = packet_xmit(po, skb);
3133
3134	if (unlikely(err != 0)) {
3135		if (err > 0)
3136			err = net_xmit_errno(err);
3137		if (err)
3138			goto out_unlock;
3139	}
3140
3141	dev_put(dev);
3142
3143	return len;
3144
3145out_free:
3146	kfree_skb(skb);
3147out_unlock:
3148	dev_put(dev);
 
3149out:
3150	return err;
3151}
3152
3153static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3154{
3155	struct sock *sk = sock->sk;
3156	struct packet_sock *po = pkt_sk(sk);
3157
3158	/* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3159	 * tpacket_snd() will redo the check safely.
3160	 */
3161	if (data_race(po->tx_ring.pg_vec))
3162		return tpacket_snd(po, msg);
3163
3164	return packet_snd(sock, msg, len);
3165}
3166
3167/*
3168 *	Close a PACKET socket. This is fairly simple. We immediately go
3169 *	to 'closed' state and remove our protocol entry in the device list.
3170 */
3171
3172static int packet_release(struct socket *sock)
3173{
3174	struct sock *sk = sock->sk;
3175	struct packet_sock *po;
3176	struct packet_fanout *f;
3177	struct net *net;
3178	union tpacket_req_u req_u;
3179
3180	if (!sk)
3181		return 0;
3182
3183	net = sock_net(sk);
3184	po = pkt_sk(sk);
3185
3186	mutex_lock(&net->packet.sklist_lock);
3187	sk_del_node_init_rcu(sk);
3188	mutex_unlock(&net->packet.sklist_lock);
3189
 
3190	sock_prot_inuse_add(net, sk->sk_prot, -1);
 
3191
3192	spin_lock(&po->bind_lock);
3193	unregister_prot_hook(sk, false);
3194	packet_cached_dev_reset(po);
3195
3196	if (po->prot_hook.dev) {
3197		netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3198		po->prot_hook.dev = NULL;
3199	}
3200	spin_unlock(&po->bind_lock);
3201
3202	packet_flush_mclist(sk);
3203
3204	lock_sock(sk);
3205	if (po->rx_ring.pg_vec) {
3206		memset(&req_u, 0, sizeof(req_u));
3207		packet_set_ring(sk, &req_u, 1, 0);
3208	}
3209
3210	if (po->tx_ring.pg_vec) {
3211		memset(&req_u, 0, sizeof(req_u));
3212		packet_set_ring(sk, &req_u, 1, 1);
3213	}
3214	release_sock(sk);
3215
3216	f = fanout_release(sk);
3217
3218	synchronize_net();
3219
3220	kfree(po->rollover);
3221	if (f) {
3222		fanout_release_data(f);
3223		kvfree(f);
3224	}
3225	/*
3226	 *	Now the socket is dead. No more input will appear.
3227	 */
3228	sock_orphan(sk);
3229	sock->sk = NULL;
3230
3231	/* Purge queues */
3232
3233	skb_queue_purge(&sk->sk_receive_queue);
3234	packet_free_pending(po);
 
3235
3236	sock_put(sk);
3237	return 0;
3238}
3239
3240/*
3241 *	Attach a packet hook.
3242 */
3243
3244static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3245			  __be16 proto)
3246{
3247	struct packet_sock *po = pkt_sk(sk);
3248	struct net_device *dev = NULL;
3249	bool unlisted = false;
3250	bool need_rehook;
 
3251	int ret = 0;
 
3252
3253	lock_sock(sk);
3254	spin_lock(&po->bind_lock);
3255	if (!proto)
3256		proto = po->num;
3257
3258	rcu_read_lock();
3259
3260	if (po->fanout) {
3261		ret = -EINVAL;
3262		goto out_unlock;
3263	}
3264
3265	if (name) {
3266		dev = dev_get_by_name_rcu(sock_net(sk), name);
3267		if (!dev) {
3268			ret = -ENODEV;
3269			goto out_unlock;
3270		}
3271	} else if (ifindex) {
3272		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3273		if (!dev) {
3274			ret = -ENODEV;
3275			goto out_unlock;
3276		}
3277	}
3278
3279	need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev;
 
 
 
 
 
 
3280
3281	if (need_rehook) {
3282		dev_hold(dev);
3283		if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
3284			rcu_read_unlock();
3285			/* prevents packet_notifier() from calling
3286			 * register_prot_hook()
3287			 */
3288			WRITE_ONCE(po->num, 0);
3289			__unregister_prot_hook(sk, true);
3290			rcu_read_lock();
 
3291			if (dev)
3292				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3293								 dev->ifindex);
3294		}
3295
3296		BUG_ON(packet_sock_flag(po, PACKET_SOCK_RUNNING));
3297		WRITE_ONCE(po->num, proto);
3298		po->prot_hook.type = proto;
3299
3300		netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3301
3302		if (unlikely(unlisted)) {
 
3303			po->prot_hook.dev = NULL;
3304			WRITE_ONCE(po->ifindex, -1);
3305			packet_cached_dev_reset(po);
3306		} else {
3307			netdev_hold(dev, &po->prot_hook.dev_tracker,
3308				    GFP_ATOMIC);
3309			po->prot_hook.dev = dev;
3310			WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3311			packet_cached_dev_assign(po, dev);
3312		}
3313		dev_put(dev);
3314	}
 
 
3315
3316	if (proto == 0 || !need_rehook)
3317		goto out_unlock;
3318
3319	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3320		register_prot_hook(sk);
3321	} else {
3322		sk->sk_err = ENETDOWN;
3323		if (!sock_flag(sk, SOCK_DEAD))
3324			sk_error_report(sk);
3325	}
3326
3327out_unlock:
3328	rcu_read_unlock();
3329	spin_unlock(&po->bind_lock);
3330	release_sock(sk);
3331	return ret;
3332}
3333
3334/*
3335 *	Bind a packet socket to a device
3336 */
3337
3338static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3339			    int addr_len)
3340{
3341	struct sock *sk = sock->sk;
3342	char name[sizeof(uaddr->sa_data_min) + 1];
3343
3344	/*
3345	 *	Check legality
3346	 */
3347
3348	if (addr_len != sizeof(struct sockaddr))
3349		return -EINVAL;
3350	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3351	 * zero-terminated.
3352	 */
3353	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min));
3354	name[sizeof(uaddr->sa_data_min)] = 0;
3355
3356	return packet_do_bind(sk, name, 0, 0);
3357}
3358
3359static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3360{
3361	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3362	struct sock *sk = sock->sk;
3363
3364	/*
3365	 *	Check legality
3366	 */
3367
3368	if (addr_len < sizeof(struct sockaddr_ll))
3369		return -EINVAL;
3370	if (sll->sll_family != AF_PACKET)
3371		return -EINVAL;
3372
3373	return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol);
 
3374}
3375
3376static struct proto packet_proto = {
3377	.name	  = "PACKET",
3378	.owner	  = THIS_MODULE,
3379	.obj_size = sizeof(struct packet_sock),
3380};
3381
3382/*
3383 *	Create a packet of type SOCK_PACKET.
3384 */
3385
3386static int packet_create(struct net *net, struct socket *sock, int protocol,
3387			 int kern)
3388{
3389	struct sock *sk;
3390	struct packet_sock *po;
3391	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3392	int err;
3393
3394	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3395		return -EPERM;
3396	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3397	    sock->type != SOCK_PACKET)
3398		return -ESOCKTNOSUPPORT;
3399
3400	sock->state = SS_UNCONNECTED;
3401
3402	err = -ENOBUFS;
3403	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3404	if (sk == NULL)
3405		goto out;
3406
3407	sock->ops = &packet_ops;
3408	if (sock->type == SOCK_PACKET)
3409		sock->ops = &packet_ops_spkt;
3410
3411	po = pkt_sk(sk);
3412	err = packet_alloc_pending(po);
3413	if (err)
3414		goto out_sk_free;
3415
3416	sock_init_data(sock, sk);
3417
 
3418	init_completion(&po->skb_completion);
3419	sk->sk_family = PF_PACKET;
3420	po->num = proto;
 
 
 
 
 
3421
3422	packet_cached_dev_reset(po);
3423
3424	sk->sk_destruct = packet_sock_destruct;
 
3425
3426	/*
3427	 *	Attach a protocol block
3428	 */
3429
3430	spin_lock_init(&po->bind_lock);
3431	mutex_init(&po->pg_vec_lock);
3432	po->rollover = NULL;
3433	po->prot_hook.func = packet_rcv;
3434
3435	if (sock->type == SOCK_PACKET)
3436		po->prot_hook.func = packet_rcv_spkt;
3437
3438	po->prot_hook.af_packet_priv = sk;
3439	po->prot_hook.af_packet_net = sock_net(sk);
3440
3441	if (proto) {
3442		po->prot_hook.type = proto;
3443		__register_prot_hook(sk);
3444	}
3445
3446	mutex_lock(&net->packet.sklist_lock);
3447	sk_add_node_tail_rcu(sk, &net->packet.sklist);
3448	mutex_unlock(&net->packet.sklist_lock);
3449
 
3450	sock_prot_inuse_add(net, &packet_proto, 1);
 
3451
3452	return 0;
3453out_sk_free:
3454	sk_free(sk);
3455out:
3456	return err;
3457}
3458
3459/*
3460 *	Pull a packet from our receive queue and hand it to the user.
3461 *	If necessary we block.
3462 */
3463
3464static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3465			  int flags)
3466{
3467	struct sock *sk = sock->sk;
3468	struct sk_buff *skb;
3469	int copied, err;
3470	int vnet_hdr_len = READ_ONCE(pkt_sk(sk)->vnet_hdr_sz);
3471	unsigned int origlen = 0;
3472
3473	err = -EINVAL;
3474	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3475		goto out;
3476
3477#if 0
3478	/* What error should we return now? EUNATTACH? */
3479	if (pkt_sk(sk)->ifindex < 0)
3480		return -ENODEV;
3481#endif
3482
3483	if (flags & MSG_ERRQUEUE) {
3484		err = sock_recv_errqueue(sk, msg, len,
3485					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3486		goto out;
3487	}
3488
3489	/*
3490	 *	Call the generic datagram receiver. This handles all sorts
3491	 *	of horrible races and re-entrancy so we can forget about it
3492	 *	in the protocol layers.
3493	 *
3494	 *	Now it will return ENETDOWN, if device have just gone down,
3495	 *	but then it will block.
3496	 */
3497
3498	skb = skb_recv_datagram(sk, flags, &err);
3499
3500	/*
3501	 *	An error occurred so return it. Because skb_recv_datagram()
3502	 *	handles the blocking we don't see and worry about blocking
3503	 *	retries.
3504	 */
3505
3506	if (skb == NULL)
3507		goto out;
3508
3509	packet_rcv_try_clear_pressure(pkt_sk(sk));
3510
3511	if (vnet_hdr_len) {
3512		err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
3513		if (err)
3514			goto out_free;
 
3515	}
3516
3517	/* You lose any data beyond the buffer you gave. If it worries
3518	 * a user program they can ask the device for its MTU
3519	 * anyway.
3520	 */
3521	copied = skb->len;
3522	if (copied > len) {
3523		copied = len;
3524		msg->msg_flags |= MSG_TRUNC;
3525	}
3526
3527	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3528	if (err)
3529		goto out_free;
3530
3531	if (sock->type != SOCK_PACKET) {
3532		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3533
3534		/* Original length was stored in sockaddr_ll fields */
3535		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3536		sll->sll_family = AF_PACKET;
3537		sll->sll_protocol = (sock->type == SOCK_DGRAM) ?
3538			vlan_get_protocol_dgram(skb) : skb->protocol;
3539	}
3540
3541	sock_recv_cmsgs(msg, sk, skb);
3542
3543	if (msg->msg_name) {
3544		const size_t max_len = min(sizeof(skb->cb),
3545					   sizeof(struct sockaddr_storage));
3546		int copy_len;
3547
3548		/* If the address length field is there to be filled
3549		 * in, we fill it in now.
3550		 */
3551		if (sock->type == SOCK_PACKET) {
3552			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3553			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3554			copy_len = msg->msg_namelen;
3555		} else {
3556			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3557
3558			msg->msg_namelen = sll->sll_halen +
3559				offsetof(struct sockaddr_ll, sll_addr);
3560			copy_len = msg->msg_namelen;
3561			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3562				memset(msg->msg_name +
3563				       offsetof(struct sockaddr_ll, sll_addr),
3564				       0, sizeof(sll->sll_addr));
3565				msg->msg_namelen = sizeof(struct sockaddr_ll);
3566			}
3567		}
3568		if (WARN_ON_ONCE(copy_len > max_len)) {
3569			copy_len = max_len;
3570			msg->msg_namelen = copy_len;
3571		}
3572		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3573	}
3574
3575	if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
3576		struct tpacket_auxdata aux;
3577
3578		aux.tp_status = TP_STATUS_USER;
3579		if (skb->ip_summed == CHECKSUM_PARTIAL)
3580			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3581		else if (skb->pkt_type != PACKET_OUTGOING &&
3582			 skb_csum_unnecessary(skb))
 
3583			aux.tp_status |= TP_STATUS_CSUM_VALID;
3584		if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
3585			aux.tp_status |= TP_STATUS_GSO_TCP;
3586
3587		aux.tp_len = origlen;
3588		aux.tp_snaplen = skb->len;
3589		aux.tp_mac = 0;
3590		aux.tp_net = skb_network_offset(skb);
3591		if (skb_vlan_tag_present(skb)) {
3592			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3593			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3594			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3595		} else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) {
3596			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3597			struct net_device *dev;
3598
3599			rcu_read_lock();
3600			dev = dev_get_by_index_rcu(sock_net(sk), sll->sll_ifindex);
3601			if (dev) {
3602				aux.tp_vlan_tci = vlan_get_tci(skb, dev);
3603				aux.tp_vlan_tpid = ntohs(skb->protocol);
3604				aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3605			} else {
3606				aux.tp_vlan_tci = 0;
3607				aux.tp_vlan_tpid = 0;
3608			}
3609			rcu_read_unlock();
3610		} else {
3611			aux.tp_vlan_tci = 0;
3612			aux.tp_vlan_tpid = 0;
3613		}
3614		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3615	}
3616
3617	/*
3618	 *	Free or return the buffer as appropriate. Again this
3619	 *	hides all the races and re-entrancy issues from us.
3620	 */
3621	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3622
3623out_free:
3624	skb_free_datagram(sk, skb);
3625out:
3626	return err;
3627}
3628
3629static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3630			       int peer)
3631{
3632	struct net_device *dev;
3633	struct sock *sk	= sock->sk;
3634
3635	if (peer)
3636		return -EOPNOTSUPP;
3637
3638	uaddr->sa_family = AF_PACKET;
3639	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min));
3640	rcu_read_lock();
3641	dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3642	if (dev)
3643		strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min));
3644	rcu_read_unlock();
3645
3646	return sizeof(*uaddr);
3647}
3648
3649static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3650			  int peer)
3651{
3652	struct net_device *dev;
3653	struct sock *sk = sock->sk;
3654	struct packet_sock *po = pkt_sk(sk);
3655	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3656	int ifindex;
3657
3658	if (peer)
3659		return -EOPNOTSUPP;
3660
3661	ifindex = READ_ONCE(po->ifindex);
3662	sll->sll_family = AF_PACKET;
3663	sll->sll_ifindex = ifindex;
3664	sll->sll_protocol = READ_ONCE(po->num);
3665	sll->sll_pkttype = 0;
3666	rcu_read_lock();
3667	dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3668	if (dev) {
3669		sll->sll_hatype = dev->type;
3670		sll->sll_halen = dev->addr_len;
3671
3672		/* Let __fortify_memcpy_chk() know the actual buffer size. */
3673		memcpy(((struct sockaddr_storage *)sll)->__data +
3674		       offsetof(struct sockaddr_ll, sll_addr) -
3675		       offsetofend(struct sockaddr_ll, sll_family),
3676		       dev->dev_addr, dev->addr_len);
3677	} else {
3678		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3679		sll->sll_halen = 0;
3680	}
3681	rcu_read_unlock();
3682
3683	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3684}
3685
3686static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3687			 int what)
3688{
3689	switch (i->type) {
3690	case PACKET_MR_MULTICAST:
3691		if (i->alen != dev->addr_len)
3692			return -EINVAL;
3693		if (what > 0)
3694			return dev_mc_add(dev, i->addr);
3695		else
3696			return dev_mc_del(dev, i->addr);
3697		break;
3698	case PACKET_MR_PROMISC:
3699		return dev_set_promiscuity(dev, what);
3700	case PACKET_MR_ALLMULTI:
3701		return dev_set_allmulti(dev, what);
3702	case PACKET_MR_UNICAST:
3703		if (i->alen != dev->addr_len)
3704			return -EINVAL;
3705		if (what > 0)
3706			return dev_uc_add(dev, i->addr);
3707		else
3708			return dev_uc_del(dev, i->addr);
3709		break;
3710	default:
3711		break;
3712	}
3713	return 0;
3714}
3715
3716static void packet_dev_mclist_delete(struct net_device *dev,
3717				     struct packet_mclist **mlp)
3718{
3719	struct packet_mclist *ml;
3720
3721	while ((ml = *mlp) != NULL) {
3722		if (ml->ifindex == dev->ifindex) {
3723			packet_dev_mc(dev, ml, -1);
3724			*mlp = ml->next;
3725			kfree(ml);
3726		} else
3727			mlp = &ml->next;
3728	}
3729}
3730
3731static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3732{
3733	struct packet_sock *po = pkt_sk(sk);
3734	struct packet_mclist *ml, *i;
3735	struct net_device *dev;
3736	int err;
3737
3738	rtnl_lock();
3739
3740	err = -ENODEV;
3741	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3742	if (!dev)
3743		goto done;
3744
3745	err = -EINVAL;
3746	if (mreq->mr_alen > dev->addr_len)
3747		goto done;
3748
3749	err = -ENOBUFS;
3750	i = kmalloc(sizeof(*i), GFP_KERNEL);
3751	if (i == NULL)
3752		goto done;
3753
3754	err = 0;
3755	for (ml = po->mclist; ml; ml = ml->next) {
3756		if (ml->ifindex == mreq->mr_ifindex &&
3757		    ml->type == mreq->mr_type &&
3758		    ml->alen == mreq->mr_alen &&
3759		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3760			ml->count++;
3761			/* Free the new element ... */
3762			kfree(i);
3763			goto done;
3764		}
3765	}
3766
3767	i->type = mreq->mr_type;
3768	i->ifindex = mreq->mr_ifindex;
3769	i->alen = mreq->mr_alen;
3770	memcpy(i->addr, mreq->mr_address, i->alen);
3771	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3772	i->count = 1;
3773	i->next = po->mclist;
3774	po->mclist = i;
3775	err = packet_dev_mc(dev, i, 1);
3776	if (err) {
3777		po->mclist = i->next;
3778		kfree(i);
3779	}
3780
3781done:
3782	rtnl_unlock();
3783	return err;
3784}
3785
3786static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3787{
3788	struct packet_mclist *ml, **mlp;
3789
3790	rtnl_lock();
3791
3792	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3793		if (ml->ifindex == mreq->mr_ifindex &&
3794		    ml->type == mreq->mr_type &&
3795		    ml->alen == mreq->mr_alen &&
3796		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3797			if (--ml->count == 0) {
3798				struct net_device *dev;
3799				*mlp = ml->next;
3800				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3801				if (dev)
3802					packet_dev_mc(dev, ml, -1);
3803				kfree(ml);
3804			}
3805			break;
3806		}
3807	}
3808	rtnl_unlock();
3809	return 0;
3810}
3811
3812static void packet_flush_mclist(struct sock *sk)
3813{
3814	struct packet_sock *po = pkt_sk(sk);
3815	struct packet_mclist *ml;
3816
3817	if (!po->mclist)
3818		return;
3819
3820	rtnl_lock();
3821	while ((ml = po->mclist) != NULL) {
3822		struct net_device *dev;
3823
3824		po->mclist = ml->next;
3825		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3826		if (dev != NULL)
3827			packet_dev_mc(dev, ml, -1);
3828		kfree(ml);
3829	}
3830	rtnl_unlock();
3831}
3832
3833static int
3834packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3835		  unsigned int optlen)
3836{
3837	struct sock *sk = sock->sk;
3838	struct packet_sock *po = pkt_sk(sk);
3839	int ret;
3840
3841	if (level != SOL_PACKET)
3842		return -ENOPROTOOPT;
3843
3844	switch (optname) {
3845	case PACKET_ADD_MEMBERSHIP:
3846	case PACKET_DROP_MEMBERSHIP:
3847	{
3848		struct packet_mreq_max mreq;
3849		int len = optlen;
3850		memset(&mreq, 0, sizeof(mreq));
3851		if (len < sizeof(struct packet_mreq))
3852			return -EINVAL;
3853		if (len > sizeof(mreq))
3854			len = sizeof(mreq);
3855		if (copy_from_sockptr(&mreq, optval, len))
3856			return -EFAULT;
3857		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3858			return -EINVAL;
3859		if (optname == PACKET_ADD_MEMBERSHIP)
3860			ret = packet_mc_add(sk, &mreq);
3861		else
3862			ret = packet_mc_drop(sk, &mreq);
3863		return ret;
3864	}
3865
3866	case PACKET_RX_RING:
3867	case PACKET_TX_RING:
3868	{
3869		union tpacket_req_u req_u;
 
3870
3871		ret = -EINVAL;
3872		lock_sock(sk);
3873		switch (po->tp_version) {
3874		case TPACKET_V1:
3875		case TPACKET_V2:
3876			if (optlen < sizeof(req_u.req))
3877				break;
3878			ret = copy_from_sockptr(&req_u.req, optval,
3879						sizeof(req_u.req)) ?
3880						-EINVAL : 0;
3881			break;
3882		case TPACKET_V3:
3883		default:
3884			if (optlen < sizeof(req_u.req3))
3885				break;
3886			ret = copy_from_sockptr(&req_u.req3, optval,
3887						sizeof(req_u.req3)) ?
3888						-EINVAL : 0;
3889			break;
3890		}
3891		if (!ret)
3892			ret = packet_set_ring(sk, &req_u, 0,
3893					      optname == PACKET_TX_RING);
 
 
 
 
 
 
3894		release_sock(sk);
3895		return ret;
3896	}
3897	case PACKET_COPY_THRESH:
3898	{
3899		int val;
3900
3901		if (optlen != sizeof(val))
3902			return -EINVAL;
3903		if (copy_from_sockptr(&val, optval, sizeof(val)))
3904			return -EFAULT;
3905
3906		WRITE_ONCE(pkt_sk(sk)->copy_thresh, val);
3907		return 0;
3908	}
3909	case PACKET_VERSION:
3910	{
3911		int val;
3912
3913		if (optlen != sizeof(val))
3914			return -EINVAL;
3915		if (copy_from_sockptr(&val, optval, sizeof(val)))
3916			return -EFAULT;
3917		switch (val) {
3918		case TPACKET_V1:
3919		case TPACKET_V2:
3920		case TPACKET_V3:
3921			break;
3922		default:
3923			return -EINVAL;
3924		}
3925		lock_sock(sk);
3926		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3927			ret = -EBUSY;
3928		} else {
3929			po->tp_version = val;
3930			ret = 0;
3931		}
3932		release_sock(sk);
3933		return ret;
3934	}
3935	case PACKET_RESERVE:
3936	{
3937		unsigned int val;
3938
3939		if (optlen != sizeof(val))
3940			return -EINVAL;
3941		if (copy_from_sockptr(&val, optval, sizeof(val)))
3942			return -EFAULT;
3943		if (val > INT_MAX)
3944			return -EINVAL;
3945		lock_sock(sk);
3946		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3947			ret = -EBUSY;
3948		} else {
3949			po->tp_reserve = val;
3950			ret = 0;
3951		}
3952		release_sock(sk);
3953		return ret;
3954	}
3955	case PACKET_LOSS:
3956	{
3957		unsigned int val;
3958
3959		if (optlen != sizeof(val))
3960			return -EINVAL;
3961		if (copy_from_sockptr(&val, optval, sizeof(val)))
3962			return -EFAULT;
3963
3964		lock_sock(sk);
3965		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3966			ret = -EBUSY;
3967		} else {
3968			packet_sock_flag_set(po, PACKET_SOCK_TP_LOSS, val);
3969			ret = 0;
3970		}
3971		release_sock(sk);
3972		return ret;
3973	}
3974	case PACKET_AUXDATA:
3975	{
3976		int val;
3977
3978		if (optlen < sizeof(val))
3979			return -EINVAL;
3980		if (copy_from_sockptr(&val, optval, sizeof(val)))
3981			return -EFAULT;
3982
3983		packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
 
 
3984		return 0;
3985	}
3986	case PACKET_ORIGDEV:
3987	{
3988		int val;
3989
3990		if (optlen < sizeof(val))
3991			return -EINVAL;
3992		if (copy_from_sockptr(&val, optval, sizeof(val)))
3993			return -EFAULT;
3994
3995		packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
 
 
3996		return 0;
3997	}
3998	case PACKET_VNET_HDR:
3999	case PACKET_VNET_HDR_SZ:
4000	{
4001		int val, hdr_len;
4002
4003		if (sock->type != SOCK_RAW)
4004			return -EINVAL;
4005		if (optlen < sizeof(val))
4006			return -EINVAL;
4007		if (copy_from_sockptr(&val, optval, sizeof(val)))
4008			return -EFAULT;
4009
4010		if (optname == PACKET_VNET_HDR_SZ) {
4011			if (val && val != sizeof(struct virtio_net_hdr) &&
4012			    val != sizeof(struct virtio_net_hdr_mrg_rxbuf))
4013				return -EINVAL;
4014			hdr_len = val;
4015		} else {
4016			hdr_len = val ? sizeof(struct virtio_net_hdr) : 0;
4017		}
4018		lock_sock(sk);
4019		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
4020			ret = -EBUSY;
4021		} else {
4022			WRITE_ONCE(po->vnet_hdr_sz, hdr_len);
4023			ret = 0;
4024		}
4025		release_sock(sk);
4026		return ret;
4027	}
4028	case PACKET_TIMESTAMP:
4029	{
4030		int val;
4031
4032		if (optlen != sizeof(val))
4033			return -EINVAL;
4034		if (copy_from_sockptr(&val, optval, sizeof(val)))
4035			return -EFAULT;
4036
4037		WRITE_ONCE(po->tp_tstamp, val);
4038		return 0;
4039	}
4040	case PACKET_FANOUT:
4041	{
4042		struct fanout_args args = { 0 };
4043
4044		if (optlen != sizeof(int) && optlen != sizeof(args))
4045			return -EINVAL;
4046		if (copy_from_sockptr(&args, optval, optlen))
4047			return -EFAULT;
4048
4049		return fanout_add(sk, &args);
4050	}
4051	case PACKET_FANOUT_DATA:
4052	{
4053		/* Paired with the WRITE_ONCE() in fanout_add() */
4054		if (!READ_ONCE(po->fanout))
4055			return -EINVAL;
4056
4057		return fanout_set_data(po, optval, optlen);
4058	}
4059	case PACKET_IGNORE_OUTGOING:
4060	{
4061		int val;
4062
4063		if (optlen != sizeof(val))
4064			return -EINVAL;
4065		if (copy_from_sockptr(&val, optval, sizeof(val)))
4066			return -EFAULT;
4067		if (val < 0 || val > 1)
4068			return -EINVAL;
4069
4070		WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val);
4071		return 0;
4072	}
4073	case PACKET_TX_HAS_OFF:
4074	{
4075		unsigned int val;
4076
4077		if (optlen != sizeof(val))
4078			return -EINVAL;
4079		if (copy_from_sockptr(&val, optval, sizeof(val)))
4080			return -EFAULT;
4081
4082		lock_sock(sk);
4083		if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
4084			packet_sock_flag_set(po, PACKET_SOCK_TX_HAS_OFF, val);
4085
 
 
 
4086		release_sock(sk);
4087		return 0;
4088	}
4089	case PACKET_QDISC_BYPASS:
4090	{
4091		int val;
4092
4093		if (optlen != sizeof(val))
4094			return -EINVAL;
4095		if (copy_from_sockptr(&val, optval, sizeof(val)))
4096			return -EFAULT;
4097
4098		packet_sock_flag_set(po, PACKET_SOCK_QDISC_BYPASS, val);
4099		return 0;
4100	}
4101	default:
4102		return -ENOPROTOOPT;
4103	}
4104}
4105
4106static int packet_getsockopt(struct socket *sock, int level, int optname,
4107			     char __user *optval, int __user *optlen)
4108{
4109	int len;
4110	int val, lv = sizeof(val);
4111	struct sock *sk = sock->sk;
4112	struct packet_sock *po = pkt_sk(sk);
4113	void *data = &val;
4114	union tpacket_stats_u st;
4115	struct tpacket_rollover_stats rstats;
4116	int drops;
4117
4118	if (level != SOL_PACKET)
4119		return -ENOPROTOOPT;
4120
4121	if (get_user(len, optlen))
4122		return -EFAULT;
4123
4124	if (len < 0)
4125		return -EINVAL;
4126
4127	switch (optname) {
4128	case PACKET_STATISTICS:
4129		spin_lock_bh(&sk->sk_receive_queue.lock);
4130		memcpy(&st, &po->stats, sizeof(st));
4131		memset(&po->stats, 0, sizeof(po->stats));
4132		spin_unlock_bh(&sk->sk_receive_queue.lock);
4133		drops = atomic_xchg(&po->tp_drops, 0);
4134
4135		if (po->tp_version == TPACKET_V3) {
4136			lv = sizeof(struct tpacket_stats_v3);
4137			st.stats3.tp_drops = drops;
4138			st.stats3.tp_packets += drops;
4139			data = &st.stats3;
4140		} else {
4141			lv = sizeof(struct tpacket_stats);
4142			st.stats1.tp_drops = drops;
4143			st.stats1.tp_packets += drops;
4144			data = &st.stats1;
4145		}
4146
4147		break;
4148	case PACKET_AUXDATA:
4149		val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
4150		break;
4151	case PACKET_ORIGDEV:
4152		val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
4153		break;
4154	case PACKET_VNET_HDR:
4155		val = !!READ_ONCE(po->vnet_hdr_sz);
4156		break;
4157	case PACKET_VNET_HDR_SZ:
4158		val = READ_ONCE(po->vnet_hdr_sz);
4159		break;
4160	case PACKET_COPY_THRESH:
4161		val = READ_ONCE(pkt_sk(sk)->copy_thresh);
4162		break;
4163	case PACKET_VERSION:
4164		val = po->tp_version;
4165		break;
4166	case PACKET_HDRLEN:
4167		if (len > sizeof(int))
4168			len = sizeof(int);
4169		if (len < sizeof(int))
4170			return -EINVAL;
4171		if (copy_from_user(&val, optval, len))
4172			return -EFAULT;
4173		switch (val) {
4174		case TPACKET_V1:
4175			val = sizeof(struct tpacket_hdr);
4176			break;
4177		case TPACKET_V2:
4178			val = sizeof(struct tpacket2_hdr);
4179			break;
4180		case TPACKET_V3:
4181			val = sizeof(struct tpacket3_hdr);
4182			break;
4183		default:
4184			return -EINVAL;
4185		}
4186		break;
4187	case PACKET_RESERVE:
4188		val = po->tp_reserve;
4189		break;
4190	case PACKET_LOSS:
4191		val = packet_sock_flag(po, PACKET_SOCK_TP_LOSS);
4192		break;
4193	case PACKET_TIMESTAMP:
4194		val = READ_ONCE(po->tp_tstamp);
4195		break;
4196	case PACKET_FANOUT:
4197		val = (po->fanout ?
4198		       ((u32)po->fanout->id |
4199			((u32)po->fanout->type << 16) |
4200			((u32)po->fanout->flags << 24)) :
4201		       0);
4202		break;
4203	case PACKET_IGNORE_OUTGOING:
4204		val = READ_ONCE(po->prot_hook.ignore_outgoing);
4205		break;
4206	case PACKET_ROLLOVER_STATS:
4207		if (!po->rollover)
4208			return -EINVAL;
4209		rstats.tp_all = atomic_long_read(&po->rollover->num);
4210		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4211		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4212		data = &rstats;
4213		lv = sizeof(rstats);
4214		break;
4215	case PACKET_TX_HAS_OFF:
4216		val = packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF);
4217		break;
4218	case PACKET_QDISC_BYPASS:
4219		val = packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS);
4220		break;
4221	default:
4222		return -ENOPROTOOPT;
4223	}
4224
4225	if (len > lv)
4226		len = lv;
4227	if (put_user(len, optlen))
4228		return -EFAULT;
4229	if (copy_to_user(optval, data, len))
4230		return -EFAULT;
4231	return 0;
4232}
4233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4234static int packet_notifier(struct notifier_block *this,
4235			   unsigned long msg, void *ptr)
4236{
4237	struct sock *sk;
4238	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4239	struct net *net = dev_net(dev);
4240
4241	rcu_read_lock();
4242	sk_for_each_rcu(sk, &net->packet.sklist) {
4243		struct packet_sock *po = pkt_sk(sk);
4244
4245		switch (msg) {
4246		case NETDEV_UNREGISTER:
4247			if (po->mclist)
4248				packet_dev_mclist_delete(dev, &po->mclist);
4249			fallthrough;
4250
4251		case NETDEV_DOWN:
4252			if (dev->ifindex == po->ifindex) {
4253				spin_lock(&po->bind_lock);
4254				if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
4255					__unregister_prot_hook(sk, false);
4256					sk->sk_err = ENETDOWN;
4257					if (!sock_flag(sk, SOCK_DEAD))
4258						sk_error_report(sk);
4259				}
4260				if (msg == NETDEV_UNREGISTER) {
4261					packet_cached_dev_reset(po);
4262					WRITE_ONCE(po->ifindex, -1);
4263					netdev_put(po->prot_hook.dev,
4264						   &po->prot_hook.dev_tracker);
4265					po->prot_hook.dev = NULL;
4266				}
4267				spin_unlock(&po->bind_lock);
4268			}
4269			break;
4270		case NETDEV_UP:
4271			if (dev->ifindex == po->ifindex) {
4272				spin_lock(&po->bind_lock);
4273				if (po->num)
4274					register_prot_hook(sk);
4275				spin_unlock(&po->bind_lock);
4276			}
4277			break;
4278		}
4279	}
4280	rcu_read_unlock();
4281	return NOTIFY_DONE;
4282}
4283
4284
4285static int packet_ioctl(struct socket *sock, unsigned int cmd,
4286			unsigned long arg)
4287{
4288	struct sock *sk = sock->sk;
4289
4290	switch (cmd) {
4291	case SIOCOUTQ:
4292	{
4293		int amount = sk_wmem_alloc_get(sk);
4294
4295		return put_user(amount, (int __user *)arg);
4296	}
4297	case SIOCINQ:
4298	{
4299		struct sk_buff *skb;
4300		int amount = 0;
4301
4302		spin_lock_bh(&sk->sk_receive_queue.lock);
4303		skb = skb_peek(&sk->sk_receive_queue);
4304		if (skb)
4305			amount = skb->len;
4306		spin_unlock_bh(&sk->sk_receive_queue.lock);
4307		return put_user(amount, (int __user *)arg);
4308	}
4309#ifdef CONFIG_INET
4310	case SIOCADDRT:
4311	case SIOCDELRT:
4312	case SIOCDARP:
4313	case SIOCGARP:
4314	case SIOCSARP:
4315	case SIOCGIFADDR:
4316	case SIOCSIFADDR:
4317	case SIOCGIFBRDADDR:
4318	case SIOCSIFBRDADDR:
4319	case SIOCGIFNETMASK:
4320	case SIOCSIFNETMASK:
4321	case SIOCGIFDSTADDR:
4322	case SIOCSIFDSTADDR:
4323	case SIOCSIFFLAGS:
4324		return inet_dgram_ops.ioctl(sock, cmd, arg);
4325#endif
4326
4327	default:
4328		return -ENOIOCTLCMD;
4329	}
4330	return 0;
4331}
4332
4333static __poll_t packet_poll(struct file *file, struct socket *sock,
4334				poll_table *wait)
4335{
4336	struct sock *sk = sock->sk;
4337	struct packet_sock *po = pkt_sk(sk);
4338	__poll_t mask = datagram_poll(file, sock, wait);
4339
4340	spin_lock_bh(&sk->sk_receive_queue.lock);
4341	if (po->rx_ring.pg_vec) {
4342		if (!packet_previous_rx_frame(po, &po->rx_ring,
4343			TP_STATUS_KERNEL))
4344			mask |= EPOLLIN | EPOLLRDNORM;
4345	}
4346	packet_rcv_try_clear_pressure(po);
4347	spin_unlock_bh(&sk->sk_receive_queue.lock);
4348	spin_lock_bh(&sk->sk_write_queue.lock);
4349	if (po->tx_ring.pg_vec) {
4350		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4351			mask |= EPOLLOUT | EPOLLWRNORM;
4352	}
4353	spin_unlock_bh(&sk->sk_write_queue.lock);
4354	return mask;
4355}
4356
4357
4358/* Dirty? Well, I still did not learn better way to account
4359 * for user mmaps.
4360 */
4361
4362static void packet_mm_open(struct vm_area_struct *vma)
4363{
4364	struct file *file = vma->vm_file;
4365	struct socket *sock = file->private_data;
4366	struct sock *sk = sock->sk;
4367
4368	if (sk)
4369		atomic_long_inc(&pkt_sk(sk)->mapped);
4370}
4371
4372static void packet_mm_close(struct vm_area_struct *vma)
4373{
4374	struct file *file = vma->vm_file;
4375	struct socket *sock = file->private_data;
4376	struct sock *sk = sock->sk;
4377
4378	if (sk)
4379		atomic_long_dec(&pkt_sk(sk)->mapped);
4380}
4381
4382static const struct vm_operations_struct packet_mmap_ops = {
4383	.open	=	packet_mm_open,
4384	.close	=	packet_mm_close,
4385};
4386
4387static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4388			unsigned int len)
4389{
4390	int i;
4391
4392	for (i = 0; i < len; i++) {
4393		if (likely(pg_vec[i].buffer)) {
4394			if (is_vmalloc_addr(pg_vec[i].buffer))
4395				vfree(pg_vec[i].buffer);
4396			else
4397				free_pages((unsigned long)pg_vec[i].buffer,
4398					   order);
4399			pg_vec[i].buffer = NULL;
4400		}
4401	}
4402	kfree(pg_vec);
4403}
4404
4405static char *alloc_one_pg_vec_page(unsigned long order)
4406{
4407	char *buffer;
4408	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4409			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4410
4411	buffer = (char *) __get_free_pages(gfp_flags, order);
4412	if (buffer)
4413		return buffer;
4414
4415	/* __get_free_pages failed, fall back to vmalloc */
4416	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4417	if (buffer)
4418		return buffer;
4419
4420	/* vmalloc failed, lets dig into swap here */
4421	gfp_flags &= ~__GFP_NORETRY;
4422	buffer = (char *) __get_free_pages(gfp_flags, order);
4423	if (buffer)
4424		return buffer;
4425
4426	/* complete and utter failure */
4427	return NULL;
4428}
4429
4430static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4431{
4432	unsigned int block_nr = req->tp_block_nr;
4433	struct pgv *pg_vec;
4434	int i;
4435
4436	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4437	if (unlikely(!pg_vec))
4438		goto out;
4439
4440	for (i = 0; i < block_nr; i++) {
4441		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4442		if (unlikely(!pg_vec[i].buffer))
4443			goto out_free_pgvec;
4444	}
4445
4446out:
4447	return pg_vec;
4448
4449out_free_pgvec:
4450	free_pg_vec(pg_vec, order, block_nr);
4451	pg_vec = NULL;
4452	goto out;
4453}
4454
4455static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4456		int closing, int tx_ring)
4457{
4458	struct pgv *pg_vec = NULL;
4459	struct packet_sock *po = pkt_sk(sk);
4460	unsigned long *rx_owner_map = NULL;
4461	int was_running, order = 0;
4462	struct packet_ring_buffer *rb;
4463	struct sk_buff_head *rb_queue;
4464	__be16 num;
4465	int err;
4466	/* Added to avoid minimal code churn */
4467	struct tpacket_req *req = &req_u->req;
4468
4469	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4470	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4471
4472	err = -EBUSY;
4473	if (!closing) {
4474		if (atomic_long_read(&po->mapped))
4475			goto out;
4476		if (packet_read_pending(rb))
4477			goto out;
4478	}
4479
4480	if (req->tp_block_nr) {
4481		unsigned int min_frame_size;
4482
4483		/* Sanity tests and some calculations */
4484		err = -EBUSY;
4485		if (unlikely(rb->pg_vec))
4486			goto out;
4487
4488		switch (po->tp_version) {
4489		case TPACKET_V1:
4490			po->tp_hdrlen = TPACKET_HDRLEN;
4491			break;
4492		case TPACKET_V2:
4493			po->tp_hdrlen = TPACKET2_HDRLEN;
4494			break;
4495		case TPACKET_V3:
4496			po->tp_hdrlen = TPACKET3_HDRLEN;
4497			break;
4498		}
4499
4500		err = -EINVAL;
4501		if (unlikely((int)req->tp_block_size <= 0))
4502			goto out;
4503		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4504			goto out;
4505		min_frame_size = po->tp_hdrlen + po->tp_reserve;
4506		if (po->tp_version >= TPACKET_V3 &&
4507		    req->tp_block_size <
4508		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4509			goto out;
4510		if (unlikely(req->tp_frame_size < min_frame_size))
4511			goto out;
4512		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4513			goto out;
4514
4515		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4516		if (unlikely(rb->frames_per_block == 0))
4517			goto out;
4518		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4519			goto out;
4520		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4521					req->tp_frame_nr))
4522			goto out;
4523
4524		err = -ENOMEM;
4525		order = get_order(req->tp_block_size);
4526		pg_vec = alloc_pg_vec(req, order);
4527		if (unlikely(!pg_vec))
4528			goto out;
4529		switch (po->tp_version) {
4530		case TPACKET_V3:
4531			/* Block transmit is not supported yet */
4532			if (!tx_ring) {
4533				init_prb_bdqc(po, rb, pg_vec, req_u);
4534			} else {
4535				struct tpacket_req3 *req3 = &req_u->req3;
4536
4537				if (req3->tp_retire_blk_tov ||
4538				    req3->tp_sizeof_priv ||
4539				    req3->tp_feature_req_word) {
4540					err = -EINVAL;
4541					goto out_free_pg_vec;
4542				}
4543			}
4544			break;
4545		default:
4546			if (!tx_ring) {
4547				rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4548					GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4549				if (!rx_owner_map)
4550					goto out_free_pg_vec;
4551			}
4552			break;
4553		}
4554	}
4555	/* Done */
4556	else {
4557		err = -EINVAL;
4558		if (unlikely(req->tp_frame_nr))
4559			goto out;
4560	}
4561
4562
4563	/* Detach socket from network */
4564	spin_lock(&po->bind_lock);
4565	was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
4566	num = po->num;
4567	if (was_running) {
4568		WRITE_ONCE(po->num, 0);
4569		__unregister_prot_hook(sk, false);
4570	}
4571	spin_unlock(&po->bind_lock);
4572
4573	synchronize_net();
4574
4575	err = -EBUSY;
4576	mutex_lock(&po->pg_vec_lock);
4577	if (closing || atomic_long_read(&po->mapped) == 0) {
4578		err = 0;
4579		spin_lock_bh(&rb_queue->lock);
4580		swap(rb->pg_vec, pg_vec);
4581		if (po->tp_version <= TPACKET_V2)
4582			swap(rb->rx_owner_map, rx_owner_map);
4583		rb->frame_max = (req->tp_frame_nr - 1);
4584		rb->head = 0;
4585		rb->frame_size = req->tp_frame_size;
4586		spin_unlock_bh(&rb_queue->lock);
4587
4588		swap(rb->pg_vec_order, order);
4589		swap(rb->pg_vec_len, req->tp_block_nr);
4590
4591		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4592		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4593						tpacket_rcv : packet_rcv;
4594		skb_queue_purge(rb_queue);
4595		if (atomic_long_read(&po->mapped))
4596			pr_err("packet_mmap: vma is busy: %ld\n",
4597			       atomic_long_read(&po->mapped));
4598	}
4599	mutex_unlock(&po->pg_vec_lock);
4600
4601	spin_lock(&po->bind_lock);
4602	if (was_running) {
4603		WRITE_ONCE(po->num, num);
4604		register_prot_hook(sk);
4605	}
4606	spin_unlock(&po->bind_lock);
4607	if (pg_vec && (po->tp_version > TPACKET_V2)) {
4608		/* Because we don't support block-based V3 on tx-ring */
4609		if (!tx_ring)
4610			prb_shutdown_retire_blk_timer(po, rb_queue);
4611	}
4612
4613out_free_pg_vec:
4614	if (pg_vec) {
4615		bitmap_free(rx_owner_map);
4616		free_pg_vec(pg_vec, order, req->tp_block_nr);
4617	}
4618out:
4619	return err;
4620}
4621
4622static int packet_mmap(struct file *file, struct socket *sock,
4623		struct vm_area_struct *vma)
4624{
4625	struct sock *sk = sock->sk;
4626	struct packet_sock *po = pkt_sk(sk);
4627	unsigned long size, expected_size;
4628	struct packet_ring_buffer *rb;
4629	unsigned long start;
4630	int err = -EINVAL;
4631	int i;
4632
4633	if (vma->vm_pgoff)
4634		return -EINVAL;
4635
4636	mutex_lock(&po->pg_vec_lock);
4637
4638	expected_size = 0;
4639	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4640		if (rb->pg_vec) {
4641			expected_size += rb->pg_vec_len
4642						* rb->pg_vec_pages
4643						* PAGE_SIZE;
4644		}
4645	}
4646
4647	if (expected_size == 0)
4648		goto out;
4649
4650	size = vma->vm_end - vma->vm_start;
4651	if (size != expected_size)
4652		goto out;
4653
4654	start = vma->vm_start;
4655	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4656		if (rb->pg_vec == NULL)
4657			continue;
4658
4659		for (i = 0; i < rb->pg_vec_len; i++) {
4660			struct page *page;
4661			void *kaddr = rb->pg_vec[i].buffer;
4662			int pg_num;
4663
4664			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4665				page = pgv_to_page(kaddr);
4666				err = vm_insert_page(vma, start, page);
4667				if (unlikely(err))
4668					goto out;
4669				start += PAGE_SIZE;
4670				kaddr += PAGE_SIZE;
4671			}
4672		}
4673	}
4674
4675	atomic_long_inc(&po->mapped);
4676	vma->vm_ops = &packet_mmap_ops;
4677	err = 0;
4678
4679out:
4680	mutex_unlock(&po->pg_vec_lock);
4681	return err;
4682}
4683
4684static const struct proto_ops packet_ops_spkt = {
4685	.family =	PF_PACKET,
4686	.owner =	THIS_MODULE,
4687	.release =	packet_release,
4688	.bind =		packet_bind_spkt,
4689	.connect =	sock_no_connect,
4690	.socketpair =	sock_no_socketpair,
4691	.accept =	sock_no_accept,
4692	.getname =	packet_getname_spkt,
4693	.poll =		datagram_poll,
4694	.ioctl =	packet_ioctl,
4695	.gettstamp =	sock_gettstamp,
4696	.listen =	sock_no_listen,
4697	.shutdown =	sock_no_shutdown,
 
 
4698	.sendmsg =	packet_sendmsg_spkt,
4699	.recvmsg =	packet_recvmsg,
4700	.mmap =		sock_no_mmap,
 
4701};
4702
4703static const struct proto_ops packet_ops = {
4704	.family =	PF_PACKET,
4705	.owner =	THIS_MODULE,
4706	.release =	packet_release,
4707	.bind =		packet_bind,
4708	.connect =	sock_no_connect,
4709	.socketpair =	sock_no_socketpair,
4710	.accept =	sock_no_accept,
4711	.getname =	packet_getname,
4712	.poll =		packet_poll,
4713	.ioctl =	packet_ioctl,
4714	.gettstamp =	sock_gettstamp,
4715	.listen =	sock_no_listen,
4716	.shutdown =	sock_no_shutdown,
4717	.setsockopt =	packet_setsockopt,
4718	.getsockopt =	packet_getsockopt,
 
 
 
4719	.sendmsg =	packet_sendmsg,
4720	.recvmsg =	packet_recvmsg,
4721	.mmap =		packet_mmap,
 
4722};
4723
4724static const struct net_proto_family packet_family_ops = {
4725	.family =	PF_PACKET,
4726	.create =	packet_create,
4727	.owner	=	THIS_MODULE,
4728};
4729
4730static struct notifier_block packet_netdev_notifier = {
4731	.notifier_call =	packet_notifier,
4732};
4733
4734#ifdef CONFIG_PROC_FS
4735
4736static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4737	__acquires(RCU)
4738{
4739	struct net *net = seq_file_net(seq);
4740
4741	rcu_read_lock();
4742	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4743}
4744
4745static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4746{
4747	struct net *net = seq_file_net(seq);
4748	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4749}
4750
4751static void packet_seq_stop(struct seq_file *seq, void *v)
4752	__releases(RCU)
4753{
4754	rcu_read_unlock();
4755}
4756
4757static int packet_seq_show(struct seq_file *seq, void *v)
4758{
4759	if (v == SEQ_START_TOKEN)
4760		seq_printf(seq,
4761			   "%*sRefCnt Type Proto  Iface R Rmem   User   Inode\n",
4762			   IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
4763	else {
4764		struct sock *s = sk_entry(v);
4765		const struct packet_sock *po = pkt_sk(s);
4766
4767		seq_printf(seq,
4768			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4769			   s,
4770			   refcount_read(&s->sk_refcnt),
4771			   s->sk_type,
4772			   ntohs(READ_ONCE(po->num)),
4773			   READ_ONCE(po->ifindex),
4774			   packet_sock_flag(po, PACKET_SOCK_RUNNING),
4775			   atomic_read(&s->sk_rmem_alloc),
4776			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4777			   sock_i_ino(s));
4778	}
4779
4780	return 0;
4781}
4782
4783static const struct seq_operations packet_seq_ops = {
4784	.start	= packet_seq_start,
4785	.next	= packet_seq_next,
4786	.stop	= packet_seq_stop,
4787	.show	= packet_seq_show,
4788};
4789#endif
4790
4791static int __net_init packet_net_init(struct net *net)
4792{
4793	mutex_init(&net->packet.sklist_lock);
4794	INIT_HLIST_HEAD(&net->packet.sklist);
4795
4796#ifdef CONFIG_PROC_FS
4797	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4798			sizeof(struct seq_net_private)))
4799		return -ENOMEM;
4800#endif /* CONFIG_PROC_FS */
4801
4802	return 0;
4803}
4804
4805static void __net_exit packet_net_exit(struct net *net)
4806{
4807	remove_proc_entry("packet", net->proc_net);
4808	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4809}
4810
4811static struct pernet_operations packet_net_ops = {
4812	.init = packet_net_init,
4813	.exit = packet_net_exit,
4814};
4815
4816
4817static void __exit packet_exit(void)
4818{
4819	sock_unregister(PF_PACKET);
4820	proto_unregister(&packet_proto);
4821	unregister_netdevice_notifier(&packet_netdev_notifier);
4822	unregister_pernet_subsys(&packet_net_ops);
 
 
4823}
4824
4825static int __init packet_init(void)
4826{
4827	int rc;
4828
4829	rc = register_pernet_subsys(&packet_net_ops);
4830	if (rc)
4831		goto out;
4832	rc = register_netdevice_notifier(&packet_netdev_notifier);
4833	if (rc)
4834		goto out_pernet;
4835	rc = proto_register(&packet_proto, 0);
4836	if (rc)
4837		goto out_notifier;
4838	rc = sock_register(&packet_family_ops);
4839	if (rc)
4840		goto out_proto;
 
 
 
 
 
 
4841
4842	return 0;
4843
4844out_proto:
4845	proto_unregister(&packet_proto);
4846out_notifier:
4847	unregister_netdevice_notifier(&packet_netdev_notifier);
4848out_pernet:
4849	unregister_pernet_subsys(&packet_net_ops);
 
 
 
 
4850out:
4851	return rc;
4852}
4853
4854module_init(packet_init);
4855module_exit(packet_exit);
4856MODULE_DESCRIPTION("Packet socket support (AF_PACKET)");
4857MODULE_LICENSE("GPL");
4858MODULE_ALIAS_NETPROTO(PF_PACKET);
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		PACKET - implements raw packet sockets.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *
  13 * Fixes:
  14 *		Alan Cox	:	verify_area() now used correctly
  15 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
  16 *		Alan Cox	:	tidied skbuff lists.
  17 *		Alan Cox	:	Now uses generic datagram routines I
  18 *					added. Also fixed the peek/read crash
  19 *					from all old Linux datagram code.
  20 *		Alan Cox	:	Uses the improved datagram code.
  21 *		Alan Cox	:	Added NULL's for socket options.
  22 *		Alan Cox	:	Re-commented the code.
  23 *		Alan Cox	:	Use new kernel side addressing
  24 *		Rob Janssen	:	Correct MTU usage.
  25 *		Dave Platt	:	Counter leaks caused by incorrect
  26 *					interrupt locking and some slightly
  27 *					dubious gcc output. Can you read
  28 *					compiler: it said _VOLATILE_
  29 *	Richard Kooijman	:	Timestamp fixes.
  30 *		Alan Cox	:	New buffers. Use sk->mac.raw.
  31 *		Alan Cox	:	sendmsg/recvmsg support.
  32 *		Alan Cox	:	Protocol setting support
  33 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
  34 *	Cyrus Durgin		:	Fixed kerneld for kmod.
  35 *	Michal Ostrowski        :       Module initialization cleanup.
  36 *         Ulises Alonso        :       Frame number limit removal and
  37 *                                      packet_set_ring memory leak.
  38 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
  39 *					The convention is that longer addresses
  40 *					will simply extend the hardware address
  41 *					byte arrays at the end of sockaddr_ll
  42 *					and packet_mreq.
  43 *		Johann Baudy	:	Added TX RING.
  44 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
  45 *					layer.
  46 *					Copyright (C) 2011, <lokec@ccs.neu.edu>
  47 */
  48
 
 
 
 
  49#include <linux/types.h>
  50#include <linux/mm.h>
  51#include <linux/capability.h>
  52#include <linux/fcntl.h>
  53#include <linux/socket.h>
  54#include <linux/in.h>
  55#include <linux/inet.h>
  56#include <linux/netdevice.h>
  57#include <linux/if_packet.h>
  58#include <linux/wireless.h>
  59#include <linux/kernel.h>
  60#include <linux/kmod.h>
  61#include <linux/slab.h>
  62#include <linux/vmalloc.h>
  63#include <net/net_namespace.h>
  64#include <net/ip.h>
  65#include <net/protocol.h>
  66#include <linux/skbuff.h>
  67#include <net/sock.h>
  68#include <linux/errno.h>
  69#include <linux/timer.h>
  70#include <linux/uaccess.h>
  71#include <asm/ioctls.h>
  72#include <asm/page.h>
  73#include <asm/cacheflush.h>
  74#include <asm/io.h>
  75#include <linux/proc_fs.h>
  76#include <linux/seq_file.h>
  77#include <linux/poll.h>
  78#include <linux/module.h>
  79#include <linux/init.h>
  80#include <linux/mutex.h>
  81#include <linux/if_vlan.h>
  82#include <linux/virtio_net.h>
  83#include <linux/errqueue.h>
  84#include <linux/net_tstamp.h>
  85#include <linux/percpu.h>
  86#ifdef CONFIG_INET
  87#include <net/inet_common.h>
  88#endif
  89#include <linux/bpf.h>
  90#include <net/compat.h>
 
  91
  92#include "internal.h"
  93
  94/*
  95   Assumptions:
  96   - if device has no dev->hard_header routine, it adds and removes ll header
  97     inside itself. In this case ll header is invisible outside of device,
  98     but higher levels still should reserve dev->hard_header_len.
  99     Some devices are enough clever to reallocate skb, when header
 100     will not fit to reserved space (tunnel), another ones are silly
 101     (PPP).
 
 
 
 102   - packet socket receives packets with pulled ll header,
 103     so that SOCK_RAW should push it back.
 104
 105On receive:
 106-----------
 107
 108Incoming, dev->hard_header!=NULL
 109   mac_header -> ll header
 110   data       -> data
 111
 112Outgoing, dev->hard_header!=NULL
 113   mac_header -> ll header
 114   data       -> ll header
 115
 116Incoming, dev->hard_header==NULL
 117   mac_header -> UNKNOWN position. It is very likely, that it points to ll
 118		 header.  PPP makes it, that is wrong, because introduce
 119		 assymetry between rx and tx paths.
 120   data       -> data
 121
 122Outgoing, dev->hard_header==NULL
 123   mac_header -> data. ll header is still not built!
 124   data       -> data
 125
 126Resume
 127  If dev->hard_header==NULL we are unlikely to restore sensible ll header.
 
 128
 129
 130On transmit:
 131------------
 132
 133dev->hard_header != NULL
 134   mac_header -> ll header
 135   data       -> ll header
 136
 137dev->hard_header == NULL (ll header is added by device, we cannot control it)
 138   mac_header -> data
 139   data       -> data
 140
 141   We should set nh.raw on output to correct posistion,
 142   packet classifier depends on it.
 143 */
 144
 145/* Private packet socket structures. */
 146
 147/* identical to struct packet_mreq except it has
 148 * a longer address field.
 149 */
 150struct packet_mreq_max {
 151	int		mr_ifindex;
 152	unsigned short	mr_type;
 153	unsigned short	mr_alen;
 154	unsigned char	mr_address[MAX_ADDR_LEN];
 155};
 156
 157union tpacket_uhdr {
 158	struct tpacket_hdr  *h1;
 159	struct tpacket2_hdr *h2;
 160	struct tpacket3_hdr *h3;
 161	void *raw;
 162};
 163
 164static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 165		int closing, int tx_ring);
 166
 167#define V3_ALIGNMENT	(8)
 168
 169#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 170
 171#define BLK_PLUS_PRIV(sz_of_priv) \
 172	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 173
 174#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 175#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 176#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 177#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 178#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 179#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 180#define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
 181
 182struct packet_sock;
 183static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 184		       struct packet_type *pt, struct net_device *orig_dev);
 185
 186static void *packet_previous_frame(struct packet_sock *po,
 187		struct packet_ring_buffer *rb,
 188		int status);
 189static void packet_increment_head(struct packet_ring_buffer *buff);
 190static int prb_curr_blk_in_use(struct tpacket_block_desc *);
 191static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 192			struct packet_sock *);
 193static void prb_retire_current_block(struct tpacket_kbdq_core *,
 194		struct packet_sock *, unsigned int status);
 195static int prb_queue_frozen(struct tpacket_kbdq_core *);
 196static void prb_open_block(struct tpacket_kbdq_core *,
 197		struct tpacket_block_desc *);
 198static void prb_retire_rx_blk_timer_expired(struct timer_list *);
 199static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 200static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 201static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 202		struct tpacket3_hdr *);
 203static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 204		struct tpacket3_hdr *);
 205static void packet_flush_mclist(struct sock *sk);
 206static u16 packet_pick_tx_queue(struct sk_buff *skb);
 207
 208struct packet_skb_cb {
 209	union {
 210		struct sockaddr_pkt pkt;
 211		union {
 212			/* Trick: alias skb original length with
 213			 * ll.sll_family and ll.protocol in order
 214			 * to save room.
 215			 */
 216			unsigned int origlen;
 217			struct sockaddr_ll ll;
 218		};
 219	} sa;
 220};
 221
 222#define vio_le() virtio_legacy_is_little_endian()
 223
 224#define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 225
 226#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 227#define GET_PBLOCK_DESC(x, bid)	\
 228	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 229#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 230	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 231#define GET_NEXT_PRB_BLK_NUM(x) \
 232	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 233	((x)->kactive_blk_num+1) : 0)
 234
 235static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 236static void __fanout_link(struct sock *sk, struct packet_sock *po);
 237
 238static int packet_direct_xmit(struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 239{
 
 
 
 
 
 
 
 
 
 
 240	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
 241}
 242
 243static struct net_device *packet_cached_dev_get(struct packet_sock *po)
 244{
 245	struct net_device *dev;
 246
 247	rcu_read_lock();
 248	dev = rcu_dereference(po->cached_dev);
 249	if (likely(dev))
 250		dev_hold(dev);
 251	rcu_read_unlock();
 252
 253	return dev;
 254}
 255
 256static void packet_cached_dev_assign(struct packet_sock *po,
 257				     struct net_device *dev)
 258{
 259	rcu_assign_pointer(po->cached_dev, dev);
 260}
 261
 262static void packet_cached_dev_reset(struct packet_sock *po)
 263{
 264	RCU_INIT_POINTER(po->cached_dev, NULL);
 265}
 266
 267static bool packet_use_direct_xmit(const struct packet_sock *po)
 268{
 269	return po->xmit == packet_direct_xmit;
 270}
 271
 272static u16 packet_pick_tx_queue(struct sk_buff *skb)
 273{
 274	struct net_device *dev = skb->dev;
 275	const struct net_device_ops *ops = dev->netdev_ops;
 276	int cpu = raw_smp_processor_id();
 277	u16 queue_index;
 278
 279#ifdef CONFIG_XPS
 280	skb->sender_cpu = cpu + 1;
 281#endif
 282	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
 283	if (ops->ndo_select_queue) {
 284		queue_index = ops->ndo_select_queue(dev, skb, NULL);
 285		queue_index = netdev_cap_txqueue(dev, queue_index);
 286	} else {
 287		queue_index = netdev_pick_tx(dev, skb, NULL);
 288	}
 289
 290	return queue_index;
 291}
 292
 293/* __register_prot_hook must be invoked through register_prot_hook
 294 * or from a context in which asynchronous accesses to the packet
 295 * socket is not possible (packet_create()).
 296 */
 297static void __register_prot_hook(struct sock *sk)
 298{
 299	struct packet_sock *po = pkt_sk(sk);
 300
 301	if (!po->running) {
 302		if (po->fanout)
 303			__fanout_link(sk, po);
 304		else
 305			dev_add_pack(&po->prot_hook);
 306
 307		sock_hold(sk);
 308		po->running = 1;
 309	}
 310}
 311
 312static void register_prot_hook(struct sock *sk)
 313{
 314	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
 315	__register_prot_hook(sk);
 316}
 317
 318/* If the sync parameter is true, we will temporarily drop
 319 * the po->bind_lock and do a synchronize_net to make sure no
 320 * asynchronous packet processing paths still refer to the elements
 321 * of po->prot_hook.  If the sync parameter is false, it is the
 322 * callers responsibility to take care of this.
 323 */
 324static void __unregister_prot_hook(struct sock *sk, bool sync)
 325{
 326	struct packet_sock *po = pkt_sk(sk);
 327
 328	lockdep_assert_held_once(&po->bind_lock);
 329
 330	po->running = 0;
 331
 332	if (po->fanout)
 333		__fanout_unlink(sk, po);
 334	else
 335		__dev_remove_pack(&po->prot_hook);
 336
 337	__sock_put(sk);
 338
 339	if (sync) {
 340		spin_unlock(&po->bind_lock);
 341		synchronize_net();
 342		spin_lock(&po->bind_lock);
 343	}
 344}
 345
 346static void unregister_prot_hook(struct sock *sk, bool sync)
 347{
 348	struct packet_sock *po = pkt_sk(sk);
 349
 350	if (po->running)
 351		__unregister_prot_hook(sk, sync);
 352}
 353
 354static inline struct page * __pure pgv_to_page(void *addr)
 355{
 356	if (is_vmalloc_addr(addr))
 357		return vmalloc_to_page(addr);
 358	return virt_to_page(addr);
 359}
 360
 361static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 362{
 363	union tpacket_uhdr h;
 364
 
 
 365	h.raw = frame;
 366	switch (po->tp_version) {
 367	case TPACKET_V1:
 368		h.h1->tp_status = status;
 369		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 370		break;
 371	case TPACKET_V2:
 372		h.h2->tp_status = status;
 373		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 374		break;
 375	case TPACKET_V3:
 376		h.h3->tp_status = status;
 377		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 378		break;
 379	default:
 380		WARN(1, "TPACKET version not supported.\n");
 381		BUG();
 382	}
 383
 384	smp_wmb();
 385}
 386
 387static int __packet_get_status(const struct packet_sock *po, void *frame)
 388{
 389	union tpacket_uhdr h;
 390
 391	smp_rmb();
 392
 
 
 393	h.raw = frame;
 394	switch (po->tp_version) {
 395	case TPACKET_V1:
 396		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 397		return h.h1->tp_status;
 398	case TPACKET_V2:
 399		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 400		return h.h2->tp_status;
 401	case TPACKET_V3:
 402		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 403		return h.h3->tp_status;
 404	default:
 405		WARN(1, "TPACKET version not supported.\n");
 406		BUG();
 407		return 0;
 408	}
 409}
 410
 411static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
 412				   unsigned int flags)
 413{
 414	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 415
 416	if (shhwtstamps &&
 417	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
 418	    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
 419		return TP_STATUS_TS_RAW_HARDWARE;
 420
 421	if (ktime_to_timespec_cond(skb->tstamp, ts))
 
 422		return TP_STATUS_TS_SOFTWARE;
 423
 424	return 0;
 425}
 426
 427static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
 428				    struct sk_buff *skb)
 429{
 430	union tpacket_uhdr h;
 431	struct timespec ts;
 432	__u32 ts_status;
 433
 434	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
 435		return 0;
 436
 437	h.raw = frame;
 
 
 
 
 
 
 
 438	switch (po->tp_version) {
 439	case TPACKET_V1:
 440		h.h1->tp_sec = ts.tv_sec;
 441		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
 442		break;
 443	case TPACKET_V2:
 444		h.h2->tp_sec = ts.tv_sec;
 445		h.h2->tp_nsec = ts.tv_nsec;
 446		break;
 447	case TPACKET_V3:
 448		h.h3->tp_sec = ts.tv_sec;
 449		h.h3->tp_nsec = ts.tv_nsec;
 450		break;
 451	default:
 452		WARN(1, "TPACKET version not supported.\n");
 453		BUG();
 454	}
 455
 456	/* one flush is safe, as both fields always lie on the same cacheline */
 457	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
 458	smp_wmb();
 459
 460	return ts_status;
 461}
 462
 463static void *packet_lookup_frame(const struct packet_sock *po,
 464				 const struct packet_ring_buffer *rb,
 465				 unsigned int position,
 466				 int status)
 467{
 468	unsigned int pg_vec_pos, frame_offset;
 469	union tpacket_uhdr h;
 470
 471	pg_vec_pos = position / rb->frames_per_block;
 472	frame_offset = position % rb->frames_per_block;
 473
 474	h.raw = rb->pg_vec[pg_vec_pos].buffer +
 475		(frame_offset * rb->frame_size);
 476
 477	if (status != __packet_get_status(po, h.raw))
 478		return NULL;
 479
 480	return h.raw;
 481}
 482
 483static void *packet_current_frame(struct packet_sock *po,
 484		struct packet_ring_buffer *rb,
 485		int status)
 486{
 487	return packet_lookup_frame(po, rb, rb->head, status);
 488}
 489
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 490static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 491{
 492	del_timer_sync(&pkc->retire_blk_timer);
 493}
 494
 495static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 496		struct sk_buff_head *rb_queue)
 497{
 498	struct tpacket_kbdq_core *pkc;
 499
 500	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 501
 502	spin_lock_bh(&rb_queue->lock);
 503	pkc->delete_blk_timer = 1;
 504	spin_unlock_bh(&rb_queue->lock);
 505
 506	prb_del_retire_blk_timer(pkc);
 507}
 508
 509static void prb_setup_retire_blk_timer(struct packet_sock *po)
 510{
 511	struct tpacket_kbdq_core *pkc;
 512
 513	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 514	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
 515		    0);
 516	pkc->retire_blk_timer.expires = jiffies;
 517}
 518
 519static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 520				int blk_size_in_bytes)
 521{
 522	struct net_device *dev;
 523	unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
 524	struct ethtool_link_ksettings ecmd;
 525	int err;
 526
 527	rtnl_lock();
 528	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 529	if (unlikely(!dev)) {
 530		rtnl_unlock();
 531		return DEFAULT_PRB_RETIRE_TOV;
 532	}
 533	err = __ethtool_get_link_ksettings(dev, &ecmd);
 534	rtnl_unlock();
 535	if (!err) {
 536		/*
 537		 * If the link speed is so slow you don't really
 538		 * need to worry about perf anyways
 539		 */
 540		if (ecmd.base.speed < SPEED_1000 ||
 541		    ecmd.base.speed == SPEED_UNKNOWN) {
 542			return DEFAULT_PRB_RETIRE_TOV;
 543		} else {
 544			msec = 1;
 545			div = ecmd.base.speed / 1000;
 546		}
 547	}
 548
 
 549	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 550
 551	if (div)
 552		mbits /= div;
 553
 554	tmo = mbits * msec;
 555
 556	if (div)
 557		return tmo+1;
 558	return tmo;
 559}
 560
 561static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 562			union tpacket_req_u *req_u)
 563{
 564	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 565}
 566
 567static void init_prb_bdqc(struct packet_sock *po,
 568			struct packet_ring_buffer *rb,
 569			struct pgv *pg_vec,
 570			union tpacket_req_u *req_u)
 571{
 572	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
 573	struct tpacket_block_desc *pbd;
 574
 575	memset(p1, 0x0, sizeof(*p1));
 576
 577	p1->knxt_seq_num = 1;
 578	p1->pkbdq = pg_vec;
 579	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 580	p1->pkblk_start	= pg_vec[0].buffer;
 581	p1->kblk_size = req_u->req3.tp_block_size;
 582	p1->knum_blocks	= req_u->req3.tp_block_nr;
 583	p1->hdrlen = po->tp_hdrlen;
 584	p1->version = po->tp_version;
 585	p1->last_kactive_blk_num = 0;
 586	po->stats.stats3.tp_freeze_q_cnt = 0;
 587	if (req_u->req3.tp_retire_blk_tov)
 588		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 589	else
 590		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 591						req_u->req3.tp_block_size);
 592	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 593	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 
 594
 595	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
 596	prb_init_ft_ops(p1, req_u);
 597	prb_setup_retire_blk_timer(po);
 598	prb_open_block(p1, pbd);
 599}
 600
 601/*  Do NOT update the last_blk_num first.
 602 *  Assumes sk_buff_head lock is held.
 603 */
 604static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 605{
 606	mod_timer(&pkc->retire_blk_timer,
 607			jiffies + pkc->tov_in_jiffies);
 608	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 609}
 610
 611/*
 612 * Timer logic:
 613 * 1) We refresh the timer only when we open a block.
 614 *    By doing this we don't waste cycles refreshing the timer
 615 *	  on packet-by-packet basis.
 616 *
 617 * With a 1MB block-size, on a 1Gbps line, it will take
 618 * i) ~8 ms to fill a block + ii) memcpy etc.
 619 * In this cut we are not accounting for the memcpy time.
 620 *
 621 * So, if the user sets the 'tmo' to 10ms then the timer
 622 * will never fire while the block is still getting filled
 623 * (which is what we want). However, the user could choose
 624 * to close a block early and that's fine.
 625 *
 626 * But when the timer does fire, we check whether or not to refresh it.
 627 * Since the tmo granularity is in msecs, it is not too expensive
 628 * to refresh the timer, lets say every '8' msecs.
 629 * Either the user can set the 'tmo' or we can derive it based on
 630 * a) line-speed and b) block-size.
 631 * prb_calc_retire_blk_tmo() calculates the tmo.
 632 *
 633 */
 634static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
 635{
 636	struct packet_sock *po =
 637		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
 638	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 639	unsigned int frozen;
 640	struct tpacket_block_desc *pbd;
 641
 642	spin_lock(&po->sk.sk_receive_queue.lock);
 643
 644	frozen = prb_queue_frozen(pkc);
 645	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 646
 647	if (unlikely(pkc->delete_blk_timer))
 648		goto out;
 649
 650	/* We only need to plug the race when the block is partially filled.
 651	 * tpacket_rcv:
 652	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 653	 *		copy_bits() is in progress ...
 654	 *		timer fires on other cpu:
 655	 *		we can't retire the current block because copy_bits
 656	 *		is in progress.
 657	 *
 658	 */
 659	if (BLOCK_NUM_PKTS(pbd)) {
 660		while (atomic_read(&pkc->blk_fill_in_prog)) {
 661			/* Waiting for skb_copy_bits to finish... */
 662			cpu_relax();
 663		}
 664	}
 665
 666	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 667		if (!frozen) {
 668			if (!BLOCK_NUM_PKTS(pbd)) {
 669				/* An empty block. Just refresh the timer. */
 670				goto refresh_timer;
 671			}
 672			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 673			if (!prb_dispatch_next_block(pkc, po))
 674				goto refresh_timer;
 675			else
 676				goto out;
 677		} else {
 678			/* Case 1. Queue was frozen because user-space was
 679			 *	   lagging behind.
 680			 */
 681			if (prb_curr_blk_in_use(pbd)) {
 682				/*
 683				 * Ok, user-space is still behind.
 684				 * So just refresh the timer.
 685				 */
 686				goto refresh_timer;
 687			} else {
 688			       /* Case 2. queue was frozen,user-space caught up,
 689				* now the link went idle && the timer fired.
 690				* We don't have a block to close.So we open this
 691				* block and restart the timer.
 692				* opening a block thaws the queue,restarts timer
 693				* Thawing/timer-refresh is a side effect.
 694				*/
 695				prb_open_block(pkc, pbd);
 696				goto out;
 697			}
 698		}
 699	}
 700
 701refresh_timer:
 702	_prb_refresh_rx_retire_blk_timer(pkc);
 703
 704out:
 705	spin_unlock(&po->sk.sk_receive_queue.lock);
 706}
 707
 708static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 709		struct tpacket_block_desc *pbd1, __u32 status)
 710{
 711	/* Flush everything minus the block header */
 712
 713#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 714	u8 *start, *end;
 715
 716	start = (u8 *)pbd1;
 717
 718	/* Skip the block header(we know header WILL fit in 4K) */
 719	start += PAGE_SIZE;
 720
 721	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 722	for (; start < end; start += PAGE_SIZE)
 723		flush_dcache_page(pgv_to_page(start));
 724
 725	smp_wmb();
 726#endif
 727
 728	/* Now update the block status. */
 729
 730	BLOCK_STATUS(pbd1) = status;
 731
 732	/* Flush the block header */
 733
 734#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 735	start = (u8 *)pbd1;
 736	flush_dcache_page(pgv_to_page(start));
 737
 738	smp_wmb();
 739#endif
 740}
 741
 742/*
 743 * Side effect:
 744 *
 745 * 1) flush the block
 746 * 2) Increment active_blk_num
 747 *
 748 * Note:We DONT refresh the timer on purpose.
 749 *	Because almost always the next block will be opened.
 750 */
 751static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 752		struct tpacket_block_desc *pbd1,
 753		struct packet_sock *po, unsigned int stat)
 754{
 755	__u32 status = TP_STATUS_USER | stat;
 756
 757	struct tpacket3_hdr *last_pkt;
 758	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 759	struct sock *sk = &po->sk;
 760
 761	if (atomic_read(&po->tp_drops))
 762		status |= TP_STATUS_LOSING;
 763
 764	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 765	last_pkt->tp_next_offset = 0;
 766
 767	/* Get the ts of the last pkt */
 768	if (BLOCK_NUM_PKTS(pbd1)) {
 769		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 770		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 771	} else {
 772		/* Ok, we tmo'd - so get the current time.
 773		 *
 774		 * It shouldn't really happen as we don't close empty
 775		 * blocks. See prb_retire_rx_blk_timer_expired().
 776		 */
 777		struct timespec ts;
 778		getnstimeofday(&ts);
 779		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 780		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 781	}
 782
 783	smp_wmb();
 784
 785	/* Flush the block */
 786	prb_flush_block(pkc1, pbd1, status);
 787
 788	sk->sk_data_ready(sk);
 789
 790	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 791}
 792
 793static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 794{
 795	pkc->reset_pending_on_curr_blk = 0;
 796}
 797
 798/*
 799 * Side effect of opening a block:
 800 *
 801 * 1) prb_queue is thawed.
 802 * 2) retire_blk_timer is refreshed.
 803 *
 804 */
 805static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 806	struct tpacket_block_desc *pbd1)
 807{
 808	struct timespec ts;
 809	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 810
 811	smp_rmb();
 812
 813	/* We could have just memset this but we will lose the
 814	 * flexibility of making the priv area sticky
 815	 */
 816
 817	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 818	BLOCK_NUM_PKTS(pbd1) = 0;
 819	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 820
 821	getnstimeofday(&ts);
 822
 823	h1->ts_first_pkt.ts_sec = ts.tv_sec;
 824	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 825
 826	pkc1->pkblk_start = (char *)pbd1;
 827	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 828
 829	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 830	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 831
 832	pbd1->version = pkc1->version;
 833	pkc1->prev = pkc1->nxt_offset;
 834	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 835
 836	prb_thaw_queue(pkc1);
 837	_prb_refresh_rx_retire_blk_timer(pkc1);
 838
 839	smp_wmb();
 840}
 841
 842/*
 843 * Queue freeze logic:
 844 * 1) Assume tp_block_nr = 8 blocks.
 845 * 2) At time 't0', user opens Rx ring.
 846 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 847 * 4) user-space is either sleeping or processing block '0'.
 848 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 849 *    it will close block-7,loop around and try to fill block '0'.
 850 *    call-flow:
 851 *    __packet_lookup_frame_in_block
 852 *      prb_retire_current_block()
 853 *      prb_dispatch_next_block()
 854 *        |->(BLOCK_STATUS == USER) evaluates to true
 855 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 856 * 6) Now there are two cases:
 857 *    6.1) Link goes idle right after the queue is frozen.
 858 *         But remember, the last open_block() refreshed the timer.
 859 *         When this timer expires,it will refresh itself so that we can
 860 *         re-open block-0 in near future.
 861 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 862 *         case and __packet_lookup_frame_in_block will check if block-0
 863 *         is free and can now be re-used.
 864 */
 865static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 866				  struct packet_sock *po)
 867{
 868	pkc->reset_pending_on_curr_blk = 1;
 869	po->stats.stats3.tp_freeze_q_cnt++;
 870}
 871
 872#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 873
 874/*
 875 * If the next block is free then we will dispatch it
 876 * and return a good offset.
 877 * Else, we will freeze the queue.
 878 * So, caller must check the return value.
 879 */
 880static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 881		struct packet_sock *po)
 882{
 883	struct tpacket_block_desc *pbd;
 884
 885	smp_rmb();
 886
 887	/* 1. Get current block num */
 888	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 889
 890	/* 2. If this block is currently in_use then freeze the queue */
 891	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 892		prb_freeze_queue(pkc, po);
 893		return NULL;
 894	}
 895
 896	/*
 897	 * 3.
 898	 * open this block and return the offset where the first packet
 899	 * needs to get stored.
 900	 */
 901	prb_open_block(pkc, pbd);
 902	return (void *)pkc->nxt_offset;
 903}
 904
 905static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 906		struct packet_sock *po, unsigned int status)
 907{
 908	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 909
 910	/* retire/close the current block */
 911	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 912		/*
 913		 * Plug the case where copy_bits() is in progress on
 914		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
 915		 * have space to copy the pkt in the current block and
 916		 * called prb_retire_current_block()
 917		 *
 918		 * We don't need to worry about the TMO case because
 919		 * the timer-handler already handled this case.
 920		 */
 921		if (!(status & TP_STATUS_BLK_TMO)) {
 922			while (atomic_read(&pkc->blk_fill_in_prog)) {
 923				/* Waiting for skb_copy_bits to finish... */
 924				cpu_relax();
 925			}
 926		}
 927		prb_close_block(pkc, pbd, po, status);
 928		return;
 929	}
 930}
 931
 932static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
 933{
 934	return TP_STATUS_USER & BLOCK_STATUS(pbd);
 935}
 936
 937static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 938{
 939	return pkc->reset_pending_on_curr_blk;
 940}
 941
 942static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
 
 943{
 944	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 945	atomic_dec(&pkc->blk_fill_in_prog);
 
 946}
 947
 948static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
 949			struct tpacket3_hdr *ppd)
 950{
 951	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
 952}
 953
 954static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
 955			struct tpacket3_hdr *ppd)
 956{
 957	ppd->hv1.tp_rxhash = 0;
 958}
 959
 960static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
 961			struct tpacket3_hdr *ppd)
 962{
 
 
 963	if (skb_vlan_tag_present(pkc->skb)) {
 964		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
 965		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
 966		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
 
 
 
 
 967	} else {
 968		ppd->hv1.tp_vlan_tci = 0;
 969		ppd->hv1.tp_vlan_tpid = 0;
 970		ppd->tp_status = TP_STATUS_AVAILABLE;
 971	}
 972}
 973
 974static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
 975			struct tpacket3_hdr *ppd)
 976{
 977	ppd->hv1.tp_padding = 0;
 978	prb_fill_vlan_info(pkc, ppd);
 979
 980	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
 981		prb_fill_rxhash(pkc, ppd);
 982	else
 983		prb_clear_rxhash(pkc, ppd);
 984}
 985
 986static void prb_fill_curr_block(char *curr,
 987				struct tpacket_kbdq_core *pkc,
 988				struct tpacket_block_desc *pbd,
 989				unsigned int len)
 
 990{
 991	struct tpacket3_hdr *ppd;
 992
 993	ppd  = (struct tpacket3_hdr *)curr;
 994	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
 995	pkc->prev = curr;
 996	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
 997	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
 998	BLOCK_NUM_PKTS(pbd) += 1;
 999	atomic_inc(&pkc->blk_fill_in_prog);
1000	prb_run_all_ft_ops(pkc, ppd);
1001}
1002
1003/* Assumes caller has the sk->rx_queue.lock */
1004static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1005					    struct sk_buff *skb,
1006					    unsigned int len
1007					    )
1008{
1009	struct tpacket_kbdq_core *pkc;
1010	struct tpacket_block_desc *pbd;
1011	char *curr, *end;
1012
1013	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1014	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1015
1016	/* Queue is frozen when user space is lagging behind */
1017	if (prb_queue_frozen(pkc)) {
1018		/*
1019		 * Check if that last block which caused the queue to freeze,
1020		 * is still in_use by user-space.
1021		 */
1022		if (prb_curr_blk_in_use(pbd)) {
1023			/* Can't record this packet */
1024			return NULL;
1025		} else {
1026			/*
1027			 * Ok, the block was released by user-space.
1028			 * Now let's open that block.
1029			 * opening a block also thaws the queue.
1030			 * Thawing is a side effect.
1031			 */
1032			prb_open_block(pkc, pbd);
1033		}
1034	}
1035
1036	smp_mb();
1037	curr = pkc->nxt_offset;
1038	pkc->skb = skb;
1039	end = (char *)pbd + pkc->kblk_size;
1040
1041	/* first try the current block */
1042	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1043		prb_fill_curr_block(curr, pkc, pbd, len);
1044		return (void *)curr;
1045	}
1046
1047	/* Ok, close the current block */
1048	prb_retire_current_block(pkc, po, 0);
1049
1050	/* Now, try to dispatch the next block */
1051	curr = (char *)prb_dispatch_next_block(pkc, po);
1052	if (curr) {
1053		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1054		prb_fill_curr_block(curr, pkc, pbd, len);
1055		return (void *)curr;
1056	}
1057
1058	/*
1059	 * No free blocks are available.user_space hasn't caught up yet.
1060	 * Queue was just frozen and now this packet will get dropped.
1061	 */
1062	return NULL;
1063}
1064
1065static void *packet_current_rx_frame(struct packet_sock *po,
1066					    struct sk_buff *skb,
1067					    int status, unsigned int len)
1068{
1069	char *curr = NULL;
1070	switch (po->tp_version) {
1071	case TPACKET_V1:
1072	case TPACKET_V2:
1073		curr = packet_lookup_frame(po, &po->rx_ring,
1074					po->rx_ring.head, status);
1075		return curr;
1076	case TPACKET_V3:
1077		return __packet_lookup_frame_in_block(po, skb, len);
1078	default:
1079		WARN(1, "TPACKET version not supported\n");
1080		BUG();
1081		return NULL;
1082	}
1083}
1084
1085static void *prb_lookup_block(const struct packet_sock *po,
1086			      const struct packet_ring_buffer *rb,
1087			      unsigned int idx,
1088			      int status)
1089{
1090	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1091	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1092
1093	if (status != BLOCK_STATUS(pbd))
1094		return NULL;
1095	return pbd;
1096}
1097
1098static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1099{
1100	unsigned int prev;
1101	if (rb->prb_bdqc.kactive_blk_num)
1102		prev = rb->prb_bdqc.kactive_blk_num-1;
1103	else
1104		prev = rb->prb_bdqc.knum_blocks-1;
1105	return prev;
1106}
1107
1108/* Assumes caller has held the rx_queue.lock */
1109static void *__prb_previous_block(struct packet_sock *po,
1110					 struct packet_ring_buffer *rb,
1111					 int status)
1112{
1113	unsigned int previous = prb_previous_blk_num(rb);
1114	return prb_lookup_block(po, rb, previous, status);
1115}
1116
1117static void *packet_previous_rx_frame(struct packet_sock *po,
1118					     struct packet_ring_buffer *rb,
1119					     int status)
1120{
1121	if (po->tp_version <= TPACKET_V2)
1122		return packet_previous_frame(po, rb, status);
1123
1124	return __prb_previous_block(po, rb, status);
1125}
1126
1127static void packet_increment_rx_head(struct packet_sock *po,
1128					    struct packet_ring_buffer *rb)
1129{
1130	switch (po->tp_version) {
1131	case TPACKET_V1:
1132	case TPACKET_V2:
1133		return packet_increment_head(rb);
1134	case TPACKET_V3:
1135	default:
1136		WARN(1, "TPACKET version not supported.\n");
1137		BUG();
1138		return;
1139	}
1140}
1141
1142static void *packet_previous_frame(struct packet_sock *po,
1143		struct packet_ring_buffer *rb,
1144		int status)
1145{
1146	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1147	return packet_lookup_frame(po, rb, previous, status);
1148}
1149
1150static void packet_increment_head(struct packet_ring_buffer *buff)
1151{
1152	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1153}
1154
1155static void packet_inc_pending(struct packet_ring_buffer *rb)
1156{
1157	this_cpu_inc(*rb->pending_refcnt);
1158}
1159
1160static void packet_dec_pending(struct packet_ring_buffer *rb)
1161{
1162	this_cpu_dec(*rb->pending_refcnt);
1163}
1164
1165static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1166{
1167	unsigned int refcnt = 0;
1168	int cpu;
1169
1170	/* We don't use pending refcount in rx_ring. */
1171	if (rb->pending_refcnt == NULL)
1172		return 0;
1173
1174	for_each_possible_cpu(cpu)
1175		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1176
1177	return refcnt;
1178}
1179
1180static int packet_alloc_pending(struct packet_sock *po)
1181{
1182	po->rx_ring.pending_refcnt = NULL;
1183
1184	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1185	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1186		return -ENOBUFS;
1187
1188	return 0;
1189}
1190
1191static void packet_free_pending(struct packet_sock *po)
1192{
1193	free_percpu(po->tx_ring.pending_refcnt);
1194}
1195
1196#define ROOM_POW_OFF	2
1197#define ROOM_NONE	0x0
1198#define ROOM_LOW	0x1
1199#define ROOM_NORMAL	0x2
1200
1201static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1202{
1203	int idx, len;
1204
1205	len = READ_ONCE(po->rx_ring.frame_max) + 1;
1206	idx = READ_ONCE(po->rx_ring.head);
1207	if (pow_off)
1208		idx += len >> pow_off;
1209	if (idx >= len)
1210		idx -= len;
1211	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1212}
1213
1214static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1215{
1216	int idx, len;
1217
1218	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1219	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1220	if (pow_off)
1221		idx += len >> pow_off;
1222	if (idx >= len)
1223		idx -= len;
1224	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1225}
1226
1227static int __packet_rcv_has_room(const struct packet_sock *po,
1228				 const struct sk_buff *skb)
1229{
1230	const struct sock *sk = &po->sk;
1231	int ret = ROOM_NONE;
1232
1233	if (po->prot_hook.func != tpacket_rcv) {
1234		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1235		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1236				   - (skb ? skb->truesize : 0);
1237
1238		if (avail > (rcvbuf >> ROOM_POW_OFF))
1239			return ROOM_NORMAL;
1240		else if (avail > 0)
1241			return ROOM_LOW;
1242		else
1243			return ROOM_NONE;
1244	}
1245
1246	if (po->tp_version == TPACKET_V3) {
1247		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1248			ret = ROOM_NORMAL;
1249		else if (__tpacket_v3_has_room(po, 0))
1250			ret = ROOM_LOW;
1251	} else {
1252		if (__tpacket_has_room(po, ROOM_POW_OFF))
1253			ret = ROOM_NORMAL;
1254		else if (__tpacket_has_room(po, 0))
1255			ret = ROOM_LOW;
1256	}
1257
1258	return ret;
1259}
1260
1261static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1262{
1263	int pressure, ret;
 
1264
1265	ret = __packet_rcv_has_room(po, skb);
1266	pressure = ret != ROOM_NORMAL;
1267
1268	if (READ_ONCE(po->pressure) != pressure)
1269		WRITE_ONCE(po->pressure, pressure);
1270
1271	return ret;
1272}
1273
1274static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1275{
1276	if (READ_ONCE(po->pressure) &&
1277	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1278		WRITE_ONCE(po->pressure,  0);
1279}
1280
1281static void packet_sock_destruct(struct sock *sk)
1282{
1283	skb_queue_purge(&sk->sk_error_queue);
1284
1285	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1286	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1287
1288	if (!sock_flag(sk, SOCK_DEAD)) {
1289		pr_err("Attempt to release alive packet socket: %p\n", sk);
1290		return;
1291	}
1292
1293	sk_refcnt_debug_dec(sk);
1294}
1295
1296static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1297{
1298	u32 rxhash;
 
1299	int i, count = 0;
1300
1301	rxhash = skb_get_hash(skb);
1302	for (i = 0; i < ROLLOVER_HLEN; i++)
1303		if (po->rollover->history[i] == rxhash)
1304			count++;
1305
1306	po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
 
 
 
 
 
1307	return count > (ROLLOVER_HLEN >> 1);
1308}
1309
1310static unsigned int fanout_demux_hash(struct packet_fanout *f,
1311				      struct sk_buff *skb,
1312				      unsigned int num)
1313{
1314	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1315}
1316
1317static unsigned int fanout_demux_lb(struct packet_fanout *f,
1318				    struct sk_buff *skb,
1319				    unsigned int num)
1320{
1321	unsigned int val = atomic_inc_return(&f->rr_cur);
1322
1323	return val % num;
1324}
1325
1326static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1327				     struct sk_buff *skb,
1328				     unsigned int num)
1329{
1330	return smp_processor_id() % num;
1331}
1332
1333static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1334				     struct sk_buff *skb,
1335				     unsigned int num)
1336{
1337	return prandom_u32_max(num);
1338}
1339
1340static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1341					  struct sk_buff *skb,
1342					  unsigned int idx, bool try_self,
1343					  unsigned int num)
1344{
1345	struct packet_sock *po, *po_next, *po_skip = NULL;
1346	unsigned int i, j, room = ROOM_NONE;
1347
1348	po = pkt_sk(f->arr[idx]);
1349
1350	if (try_self) {
1351		room = packet_rcv_has_room(po, skb);
1352		if (room == ROOM_NORMAL ||
1353		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1354			return idx;
1355		po_skip = po;
1356	}
1357
1358	i = j = min_t(int, po->rollover->sock, num - 1);
1359	do {
1360		po_next = pkt_sk(f->arr[i]);
1361		if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
 
1362		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1363			if (i != j)
1364				po->rollover->sock = i;
1365			atomic_long_inc(&po->rollover->num);
1366			if (room == ROOM_LOW)
1367				atomic_long_inc(&po->rollover->num_huge);
1368			return i;
1369		}
1370
1371		if (++i == num)
1372			i = 0;
1373	} while (i != j);
1374
1375	atomic_long_inc(&po->rollover->num_failed);
1376	return idx;
1377}
1378
1379static unsigned int fanout_demux_qm(struct packet_fanout *f,
1380				    struct sk_buff *skb,
1381				    unsigned int num)
1382{
1383	return skb_get_queue_mapping(skb) % num;
1384}
1385
1386static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1387				     struct sk_buff *skb,
1388				     unsigned int num)
1389{
1390	struct bpf_prog *prog;
1391	unsigned int ret = 0;
1392
1393	rcu_read_lock();
1394	prog = rcu_dereference(f->bpf_prog);
1395	if (prog)
1396		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1397	rcu_read_unlock();
1398
1399	return ret;
1400}
1401
1402static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1403{
1404	return f->flags & (flag >> 8);
1405}
1406
1407static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1408			     struct packet_type *pt, struct net_device *orig_dev)
1409{
1410	struct packet_fanout *f = pt->af_packet_priv;
1411	unsigned int num = READ_ONCE(f->num_members);
1412	struct net *net = read_pnet(&f->net);
1413	struct packet_sock *po;
1414	unsigned int idx;
1415
1416	if (!net_eq(dev_net(dev), net) || !num) {
1417		kfree_skb(skb);
1418		return 0;
1419	}
1420
1421	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1422		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1423		if (!skb)
1424			return 0;
1425	}
1426	switch (f->type) {
1427	case PACKET_FANOUT_HASH:
1428	default:
1429		idx = fanout_demux_hash(f, skb, num);
1430		break;
1431	case PACKET_FANOUT_LB:
1432		idx = fanout_demux_lb(f, skb, num);
1433		break;
1434	case PACKET_FANOUT_CPU:
1435		idx = fanout_demux_cpu(f, skb, num);
1436		break;
1437	case PACKET_FANOUT_RND:
1438		idx = fanout_demux_rnd(f, skb, num);
1439		break;
1440	case PACKET_FANOUT_QM:
1441		idx = fanout_demux_qm(f, skb, num);
1442		break;
1443	case PACKET_FANOUT_ROLLOVER:
1444		idx = fanout_demux_rollover(f, skb, 0, false, num);
1445		break;
1446	case PACKET_FANOUT_CBPF:
1447	case PACKET_FANOUT_EBPF:
1448		idx = fanout_demux_bpf(f, skb, num);
1449		break;
1450	}
1451
1452	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1453		idx = fanout_demux_rollover(f, skb, idx, true, num);
1454
1455	po = pkt_sk(f->arr[idx]);
1456	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1457}
1458
1459DEFINE_MUTEX(fanout_mutex);
1460EXPORT_SYMBOL_GPL(fanout_mutex);
1461static LIST_HEAD(fanout_list);
1462static u16 fanout_next_id;
1463
1464static void __fanout_link(struct sock *sk, struct packet_sock *po)
1465{
1466	struct packet_fanout *f = po->fanout;
1467
1468	spin_lock(&f->lock);
1469	f->arr[f->num_members] = sk;
1470	smp_wmb();
1471	f->num_members++;
1472	if (f->num_members == 1)
1473		dev_add_pack(&f->prot_hook);
1474	spin_unlock(&f->lock);
1475}
1476
1477static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1478{
1479	struct packet_fanout *f = po->fanout;
1480	int i;
1481
1482	spin_lock(&f->lock);
1483	for (i = 0; i < f->num_members; i++) {
1484		if (f->arr[i] == sk)
 
1485			break;
1486	}
1487	BUG_ON(i >= f->num_members);
1488	f->arr[i] = f->arr[f->num_members - 1];
 
 
1489	f->num_members--;
1490	if (f->num_members == 0)
1491		__dev_remove_pack(&f->prot_hook);
1492	spin_unlock(&f->lock);
1493}
1494
1495static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1496{
1497	if (sk->sk_family != PF_PACKET)
1498		return false;
1499
1500	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1501}
1502
1503static void fanout_init_data(struct packet_fanout *f)
1504{
1505	switch (f->type) {
1506	case PACKET_FANOUT_LB:
1507		atomic_set(&f->rr_cur, 0);
1508		break;
1509	case PACKET_FANOUT_CBPF:
1510	case PACKET_FANOUT_EBPF:
1511		RCU_INIT_POINTER(f->bpf_prog, NULL);
1512		break;
1513	}
1514}
1515
1516static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1517{
1518	struct bpf_prog *old;
1519
1520	spin_lock(&f->lock);
1521	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1522	rcu_assign_pointer(f->bpf_prog, new);
1523	spin_unlock(&f->lock);
1524
1525	if (old) {
1526		synchronize_net();
1527		bpf_prog_destroy(old);
1528	}
1529}
1530
1531static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1532				unsigned int len)
1533{
1534	struct bpf_prog *new;
1535	struct sock_fprog fprog;
1536	int ret;
1537
1538	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1539		return -EPERM;
1540	if (len != sizeof(fprog))
1541		return -EINVAL;
1542	if (copy_from_user(&fprog, data, len))
1543		return -EFAULT;
1544
1545	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1546	if (ret)
1547		return ret;
1548
1549	__fanout_set_data_bpf(po->fanout, new);
1550	return 0;
1551}
1552
1553static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1554				unsigned int len)
1555{
1556	struct bpf_prog *new;
1557	u32 fd;
1558
1559	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1560		return -EPERM;
1561	if (len != sizeof(fd))
1562		return -EINVAL;
1563	if (copy_from_user(&fd, data, len))
1564		return -EFAULT;
1565
1566	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1567	if (IS_ERR(new))
1568		return PTR_ERR(new);
1569
1570	__fanout_set_data_bpf(po->fanout, new);
1571	return 0;
1572}
1573
1574static int fanout_set_data(struct packet_sock *po, char __user *data,
1575			   unsigned int len)
1576{
1577	switch (po->fanout->type) {
1578	case PACKET_FANOUT_CBPF:
1579		return fanout_set_data_cbpf(po, data, len);
1580	case PACKET_FANOUT_EBPF:
1581		return fanout_set_data_ebpf(po, data, len);
1582	default:
1583		return -EINVAL;
1584	}
1585}
1586
1587static void fanout_release_data(struct packet_fanout *f)
1588{
1589	switch (f->type) {
1590	case PACKET_FANOUT_CBPF:
1591	case PACKET_FANOUT_EBPF:
1592		__fanout_set_data_bpf(f, NULL);
1593	}
1594}
1595
1596static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1597{
1598	struct packet_fanout *f;
1599
1600	list_for_each_entry(f, &fanout_list, list) {
1601		if (f->id == candidate_id &&
1602		    read_pnet(&f->net) == sock_net(sk)) {
1603			return false;
1604		}
1605	}
1606	return true;
1607}
1608
1609static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1610{
1611	u16 id = fanout_next_id;
1612
1613	do {
1614		if (__fanout_id_is_free(sk, id)) {
1615			*new_id = id;
1616			fanout_next_id = id + 1;
1617			return true;
1618		}
1619
1620		id++;
1621	} while (id != fanout_next_id);
1622
1623	return false;
1624}
1625
1626static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
1627{
1628	struct packet_rollover *rollover = NULL;
1629	struct packet_sock *po = pkt_sk(sk);
 
1630	struct packet_fanout *f, *match;
1631	u8 type = type_flags & 0xff;
1632	u8 flags = type_flags >> 8;
 
1633	int err;
1634
1635	switch (type) {
1636	case PACKET_FANOUT_ROLLOVER:
1637		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1638			return -EINVAL;
 
1639	case PACKET_FANOUT_HASH:
1640	case PACKET_FANOUT_LB:
1641	case PACKET_FANOUT_CPU:
1642	case PACKET_FANOUT_RND:
1643	case PACKET_FANOUT_QM:
1644	case PACKET_FANOUT_CBPF:
1645	case PACKET_FANOUT_EBPF:
1646		break;
1647	default:
1648		return -EINVAL;
1649	}
1650
1651	mutex_lock(&fanout_mutex);
1652
1653	err = -EALREADY;
1654	if (po->fanout)
1655		goto out;
1656
1657	if (type == PACKET_FANOUT_ROLLOVER ||
1658	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1659		err = -ENOMEM;
1660		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1661		if (!rollover)
1662			goto out;
1663		atomic_long_set(&rollover->num, 0);
1664		atomic_long_set(&rollover->num_huge, 0);
1665		atomic_long_set(&rollover->num_failed, 0);
1666	}
1667
1668	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1669		if (id != 0) {
1670			err = -EINVAL;
1671			goto out;
1672		}
1673		if (!fanout_find_new_id(sk, &id)) {
1674			err = -ENOMEM;
1675			goto out;
1676		}
1677		/* ephemeral flag for the first socket in the group: drop it */
1678		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1679	}
1680
1681	match = NULL;
1682	list_for_each_entry(f, &fanout_list, list) {
1683		if (f->id == id &&
1684		    read_pnet(&f->net) == sock_net(sk)) {
1685			match = f;
1686			break;
1687		}
1688	}
1689	err = -EINVAL;
1690	if (match && match->flags != flags)
1691		goto out;
1692	if (!match) {
 
 
 
 
 
 
 
 
 
1693		err = -ENOMEM;
1694		match = kzalloc(sizeof(*match), GFP_KERNEL);
 
1695		if (!match)
1696			goto out;
1697		write_pnet(&match->net, sock_net(sk));
1698		match->id = id;
1699		match->type = type;
1700		match->flags = flags;
1701		INIT_LIST_HEAD(&match->list);
1702		spin_lock_init(&match->lock);
1703		refcount_set(&match->sk_ref, 0);
1704		fanout_init_data(match);
1705		match->prot_hook.type = po->prot_hook.type;
1706		match->prot_hook.dev = po->prot_hook.dev;
1707		match->prot_hook.func = packet_rcv_fanout;
1708		match->prot_hook.af_packet_priv = match;
 
1709		match->prot_hook.id_match = match_fanout_group;
 
 
1710		list_add(&match->list, &fanout_list);
1711	}
1712	err = -EINVAL;
1713
1714	spin_lock(&po->bind_lock);
1715	if (po->running &&
1716	    match->type == type &&
1717	    match->prot_hook.type == po->prot_hook.type &&
1718	    match->prot_hook.dev == po->prot_hook.dev) {
1719		err = -ENOSPC;
1720		if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1721			__dev_remove_pack(&po->prot_hook);
1722			po->fanout = match;
 
1723			po->rollover = rollover;
1724			rollover = NULL;
1725			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1726			__fanout_link(sk, po);
 
 
 
1727			err = 0;
1728		}
1729	}
1730	spin_unlock(&po->bind_lock);
1731
1732	if (err && !refcount_read(&match->sk_ref)) {
1733		list_del(&match->list);
1734		kfree(match);
1735	}
1736
1737out:
1738	kfree(rollover);
1739	mutex_unlock(&fanout_mutex);
1740	return err;
1741}
1742
1743/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1744 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1745 * It is the responsibility of the caller to call fanout_release_data() and
1746 * free the returned packet_fanout (after synchronize_net())
1747 */
1748static struct packet_fanout *fanout_release(struct sock *sk)
1749{
1750	struct packet_sock *po = pkt_sk(sk);
1751	struct packet_fanout *f;
1752
1753	mutex_lock(&fanout_mutex);
1754	f = po->fanout;
1755	if (f) {
1756		po->fanout = NULL;
1757
1758		if (refcount_dec_and_test(&f->sk_ref))
1759			list_del(&f->list);
1760		else
1761			f = NULL;
1762	}
1763	mutex_unlock(&fanout_mutex);
1764
1765	return f;
1766}
1767
1768static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1769					  struct sk_buff *skb)
1770{
1771	/* Earlier code assumed this would be a VLAN pkt, double-check
1772	 * this now that we have the actual packet in hand. We can only
1773	 * do this check on Ethernet devices.
1774	 */
1775	if (unlikely(dev->type != ARPHRD_ETHER))
1776		return false;
1777
1778	skb_reset_mac_header(skb);
1779	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1780}
1781
1782static const struct proto_ops packet_ops;
1783
1784static const struct proto_ops packet_ops_spkt;
1785
1786static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1787			   struct packet_type *pt, struct net_device *orig_dev)
1788{
1789	struct sock *sk;
1790	struct sockaddr_pkt *spkt;
1791
1792	/*
1793	 *	When we registered the protocol we saved the socket in the data
1794	 *	field for just this event.
1795	 */
1796
1797	sk = pt->af_packet_priv;
1798
1799	/*
1800	 *	Yank back the headers [hope the device set this
1801	 *	right or kerboom...]
1802	 *
1803	 *	Incoming packets have ll header pulled,
1804	 *	push it back.
1805	 *
1806	 *	For outgoing ones skb->data == skb_mac_header(skb)
1807	 *	so that this procedure is noop.
1808	 */
1809
1810	if (skb->pkt_type == PACKET_LOOPBACK)
1811		goto out;
1812
1813	if (!net_eq(dev_net(dev), sock_net(sk)))
1814		goto out;
1815
1816	skb = skb_share_check(skb, GFP_ATOMIC);
1817	if (skb == NULL)
1818		goto oom;
1819
1820	/* drop any routing info */
1821	skb_dst_drop(skb);
1822
1823	/* drop conntrack reference */
1824	nf_reset_ct(skb);
1825
1826	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1827
1828	skb_push(skb, skb->data - skb_mac_header(skb));
1829
1830	/*
1831	 *	The SOCK_PACKET socket receives _all_ frames.
1832	 */
1833
1834	spkt->spkt_family = dev->type;
1835	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1836	spkt->spkt_protocol = skb->protocol;
1837
1838	/*
1839	 *	Charge the memory to the socket. This is done specifically
1840	 *	to prevent sockets using all the memory up.
1841	 */
1842
1843	if (sock_queue_rcv_skb(sk, skb) == 0)
1844		return 0;
1845
1846out:
1847	kfree_skb(skb);
1848oom:
1849	return 0;
1850}
1851
1852static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1853{
 
 
1854	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1855	    sock->type == SOCK_RAW) {
1856		skb_reset_mac_header(skb);
1857		skb->protocol = dev_parse_header_protocol(skb);
1858	}
1859
 
 
 
 
 
 
1860	skb_probe_transport_header(skb);
1861}
1862
1863/*
1864 *	Output a raw packet to a device layer. This bypasses all the other
1865 *	protocol layers and you must therefore supply it with a complete frame
1866 */
1867
1868static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1869			       size_t len)
1870{
1871	struct sock *sk = sock->sk;
1872	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1873	struct sk_buff *skb = NULL;
1874	struct net_device *dev;
1875	struct sockcm_cookie sockc;
1876	__be16 proto = 0;
1877	int err;
1878	int extra_len = 0;
1879
1880	/*
1881	 *	Get and verify the address.
1882	 */
1883
1884	if (saddr) {
1885		if (msg->msg_namelen < sizeof(struct sockaddr))
1886			return -EINVAL;
1887		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1888			proto = saddr->spkt_protocol;
1889	} else
1890		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1891
1892	/*
1893	 *	Find the device first to size check it
1894	 */
1895
1896	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1897retry:
1898	rcu_read_lock();
1899	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1900	err = -ENODEV;
1901	if (dev == NULL)
1902		goto out_unlock;
1903
1904	err = -ENETDOWN;
1905	if (!(dev->flags & IFF_UP))
1906		goto out_unlock;
1907
1908	/*
1909	 * You may not queue a frame bigger than the mtu. This is the lowest level
1910	 * raw protocol and you must do your own fragmentation at this level.
1911	 */
1912
1913	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1914		if (!netif_supports_nofcs(dev)) {
1915			err = -EPROTONOSUPPORT;
1916			goto out_unlock;
1917		}
1918		extra_len = 4; /* We're doing our own CRC */
1919	}
1920
1921	err = -EMSGSIZE;
1922	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1923		goto out_unlock;
1924
1925	if (!skb) {
1926		size_t reserved = LL_RESERVED_SPACE(dev);
1927		int tlen = dev->needed_tailroom;
1928		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1929
1930		rcu_read_unlock();
1931		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1932		if (skb == NULL)
1933			return -ENOBUFS;
1934		/* FIXME: Save some space for broken drivers that write a hard
1935		 * header at transmission time by themselves. PPP is the notable
1936		 * one here. This should really be fixed at the driver level.
1937		 */
1938		skb_reserve(skb, reserved);
1939		skb_reset_network_header(skb);
1940
1941		/* Try to align data part correctly */
1942		if (hhlen) {
1943			skb->data -= hhlen;
1944			skb->tail -= hhlen;
1945			if (len < hhlen)
1946				skb_reset_network_header(skb);
1947		}
1948		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1949		if (err)
1950			goto out_free;
1951		goto retry;
1952	}
1953
1954	if (!dev_validate_header(dev, skb->data, len)) {
1955		err = -EINVAL;
1956		goto out_unlock;
1957	}
1958	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1959	    !packet_extra_vlan_len_allowed(dev, skb)) {
1960		err = -EMSGSIZE;
1961		goto out_unlock;
1962	}
1963
1964	sockcm_init(&sockc, sk);
1965	if (msg->msg_controllen) {
1966		err = sock_cmsg_send(sk, msg, &sockc);
1967		if (unlikely(err))
1968			goto out_unlock;
1969	}
1970
1971	skb->protocol = proto;
1972	skb->dev = dev;
1973	skb->priority = sk->sk_priority;
1974	skb->mark = sk->sk_mark;
1975	skb->tstamp = sockc.transmit_time;
1976
1977	skb_setup_tx_timestamp(skb, sockc.tsflags);
1978
1979	if (unlikely(extra_len == 4))
1980		skb->no_fcs = 1;
1981
1982	packet_parse_headers(skb, sock);
1983
1984	dev_queue_xmit(skb);
1985	rcu_read_unlock();
1986	return len;
1987
1988out_unlock:
1989	rcu_read_unlock();
1990out_free:
1991	kfree_skb(skb);
1992	return err;
1993}
1994
1995static unsigned int run_filter(struct sk_buff *skb,
1996			       const struct sock *sk,
1997			       unsigned int res)
1998{
1999	struct sk_filter *filter;
2000
2001	rcu_read_lock();
2002	filter = rcu_dereference(sk->sk_filter);
2003	if (filter != NULL)
2004		res = bpf_prog_run_clear_cb(filter->prog, skb);
2005	rcu_read_unlock();
2006
2007	return res;
2008}
2009
2010static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2011			   size_t *len)
2012{
2013	struct virtio_net_hdr vnet_hdr;
2014
2015	if (*len < sizeof(vnet_hdr))
2016		return -EINVAL;
2017	*len -= sizeof(vnet_hdr);
2018
2019	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2020		return -EINVAL;
2021
2022	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2023}
2024
2025/*
2026 * This function makes lazy skb cloning in hope that most of packets
2027 * are discarded by BPF.
2028 *
2029 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2030 * and skb->cb are mangled. It works because (and until) packets
2031 * falling here are owned by current CPU. Output packets are cloned
2032 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2033 * sequencially, so that if we return skb to original state on exit,
2034 * we will not harm anyone.
2035 */
2036
2037static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2038		      struct packet_type *pt, struct net_device *orig_dev)
2039{
2040	struct sock *sk;
 
2041	struct sockaddr_ll *sll;
2042	struct packet_sock *po;
2043	u8 *skb_head = skb->data;
2044	int skb_len = skb->len;
2045	unsigned int snaplen, res;
2046	bool is_drop_n_account = false;
2047
2048	if (skb->pkt_type == PACKET_LOOPBACK)
2049		goto drop;
2050
2051	sk = pt->af_packet_priv;
2052	po = pkt_sk(sk);
2053
2054	if (!net_eq(dev_net(dev), sock_net(sk)))
2055		goto drop;
2056
2057	skb->dev = dev;
2058
2059	if (dev->header_ops) {
2060		/* The device has an explicit notion of ll header,
2061		 * exported to higher levels.
2062		 *
2063		 * Otherwise, the device hides details of its frame
2064		 * structure, so that corresponding packet head is
2065		 * never delivered to user.
2066		 */
2067		if (sk->sk_type != SOCK_DGRAM)
2068			skb_push(skb, skb->data - skb_mac_header(skb));
2069		else if (skb->pkt_type == PACKET_OUTGOING) {
2070			/* Special case: outgoing packets have ll header at head */
2071			skb_pull(skb, skb_network_offset(skb));
2072		}
2073	}
2074
2075	snaplen = skb->len;
2076
2077	res = run_filter(skb, sk, snaplen);
2078	if (!res)
2079		goto drop_n_restore;
2080	if (snaplen > res)
2081		snaplen = res;
2082
2083	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2084		goto drop_n_acct;
2085
2086	if (skb_shared(skb)) {
2087		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2088		if (nskb == NULL)
2089			goto drop_n_acct;
2090
2091		if (skb_head != skb->data) {
2092			skb->data = skb_head;
2093			skb->len = skb_len;
2094		}
2095		consume_skb(skb);
2096		skb = nskb;
2097	}
2098
2099	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2100
2101	sll = &PACKET_SKB_CB(skb)->sa.ll;
2102	sll->sll_hatype = dev->type;
2103	sll->sll_pkttype = skb->pkt_type;
2104	if (unlikely(po->origdev))
2105		sll->sll_ifindex = orig_dev->ifindex;
2106	else
2107		sll->sll_ifindex = dev->ifindex;
2108
2109	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2110
2111	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2112	 * Use their space for storing the original skb length.
2113	 */
2114	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2115
2116	if (pskb_trim(skb, snaplen))
2117		goto drop_n_acct;
2118
2119	skb_set_owner_r(skb, sk);
2120	skb->dev = NULL;
2121	skb_dst_drop(skb);
2122
2123	/* drop conntrack reference */
2124	nf_reset_ct(skb);
2125
2126	spin_lock(&sk->sk_receive_queue.lock);
2127	po->stats.stats1.tp_packets++;
2128	sock_skb_set_dropcount(sk, skb);
 
2129	__skb_queue_tail(&sk->sk_receive_queue, skb);
2130	spin_unlock(&sk->sk_receive_queue.lock);
2131	sk->sk_data_ready(sk);
2132	return 0;
2133
2134drop_n_acct:
2135	is_drop_n_account = true;
2136	atomic_inc(&po->tp_drops);
2137	atomic_inc(&sk->sk_drops);
 
2138
2139drop_n_restore:
2140	if (skb_head != skb->data && skb_shared(skb)) {
2141		skb->data = skb_head;
2142		skb->len = skb_len;
2143	}
2144drop:
2145	if (!is_drop_n_account)
2146		consume_skb(skb);
2147	else
2148		kfree_skb(skb);
2149	return 0;
2150}
2151
2152static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2153		       struct packet_type *pt, struct net_device *orig_dev)
2154{
2155	struct sock *sk;
 
2156	struct packet_sock *po;
2157	struct sockaddr_ll *sll;
2158	union tpacket_uhdr h;
2159	u8 *skb_head = skb->data;
2160	int skb_len = skb->len;
2161	unsigned int snaplen, res;
2162	unsigned long status = TP_STATUS_USER;
2163	unsigned short macoff, netoff, hdrlen;
 
2164	struct sk_buff *copy_skb = NULL;
2165	struct timespec ts;
2166	__u32 ts_status;
2167	bool is_drop_n_account = false;
2168	bool do_vnet = false;
2169
2170	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2171	 * We may add members to them until current aligned size without forcing
2172	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2173	 */
2174	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2175	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2176
2177	if (skb->pkt_type == PACKET_LOOPBACK)
2178		goto drop;
2179
2180	sk = pt->af_packet_priv;
2181	po = pkt_sk(sk);
2182
2183	if (!net_eq(dev_net(dev), sock_net(sk)))
2184		goto drop;
2185
2186	if (dev->header_ops) {
2187		if (sk->sk_type != SOCK_DGRAM)
2188			skb_push(skb, skb->data - skb_mac_header(skb));
2189		else if (skb->pkt_type == PACKET_OUTGOING) {
2190			/* Special case: outgoing packets have ll header at head */
2191			skb_pull(skb, skb_network_offset(skb));
2192		}
2193	}
2194
2195	snaplen = skb->len;
2196
2197	res = run_filter(skb, sk, snaplen);
2198	if (!res)
2199		goto drop_n_restore;
2200
2201	/* If we are flooded, just give up */
2202	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2203		atomic_inc(&po->tp_drops);
2204		goto drop_n_restore;
2205	}
2206
2207	if (skb->ip_summed == CHECKSUM_PARTIAL)
2208		status |= TP_STATUS_CSUMNOTREADY;
2209	else if (skb->pkt_type != PACKET_OUTGOING &&
2210		 (skb->ip_summed == CHECKSUM_COMPLETE ||
2211		  skb_csum_unnecessary(skb)))
2212		status |= TP_STATUS_CSUM_VALID;
 
 
2213
2214	if (snaplen > res)
2215		snaplen = res;
2216
2217	if (sk->sk_type == SOCK_DGRAM) {
2218		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2219				  po->tp_reserve;
2220	} else {
2221		unsigned int maclen = skb_network_offset(skb);
2222		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2223				       (maclen < 16 ? 16 : maclen)) +
2224				       po->tp_reserve;
2225		if (po->has_vnet_hdr) {
2226			netoff += sizeof(struct virtio_net_hdr);
2227			do_vnet = true;
2228		}
2229		macoff = netoff - maclen;
2230	}
 
 
 
 
2231	if (po->tp_version <= TPACKET_V2) {
2232		if (macoff + snaplen > po->rx_ring.frame_size) {
2233			if (po->copy_thresh &&
2234			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2235				if (skb_shared(skb)) {
2236					copy_skb = skb_clone(skb, GFP_ATOMIC);
2237				} else {
2238					copy_skb = skb_get(skb);
2239					skb_head = skb->data;
2240				}
2241				if (copy_skb)
 
 
2242					skb_set_owner_r(copy_skb, sk);
 
2243			}
2244			snaplen = po->rx_ring.frame_size - macoff;
2245			if ((int)snaplen < 0) {
2246				snaplen = 0;
2247				do_vnet = false;
2248			}
2249		}
2250	} else if (unlikely(macoff + snaplen >
2251			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2252		u32 nval;
2253
2254		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2255		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2256			    snaplen, nval, macoff);
2257		snaplen = nval;
2258		if (unlikely((int)snaplen < 0)) {
2259			snaplen = 0;
2260			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2261			do_vnet = false;
2262		}
2263	}
2264	spin_lock(&sk->sk_receive_queue.lock);
2265	h.raw = packet_current_rx_frame(po, skb,
2266					TP_STATUS_KERNEL, (macoff+snaplen));
2267	if (!h.raw)
2268		goto drop_n_account;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2269	if (po->tp_version <= TPACKET_V2) {
2270		packet_increment_rx_head(po, &po->rx_ring);
2271	/*
2272	 * LOSING will be reported till you read the stats,
2273	 * because it's COR - Clear On Read.
2274	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2275	 * at packet level.
2276	 */
2277		if (atomic_read(&po->tp_drops))
2278			status |= TP_STATUS_LOSING;
2279	}
2280
2281	if (do_vnet &&
2282	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
2283				    sizeof(struct virtio_net_hdr),
2284				    vio_le(), true, 0))
2285		goto drop_n_account;
2286
2287	po->stats.stats1.tp_packets++;
2288	if (copy_skb) {
2289		status |= TP_STATUS_COPY;
 
2290		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2291	}
2292	spin_unlock(&sk->sk_receive_queue.lock);
2293
2294	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2295
2296	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2297		getnstimeofday(&ts);
 
 
 
 
 
 
2298
2299	status |= ts_status;
2300
2301	switch (po->tp_version) {
2302	case TPACKET_V1:
2303		h.h1->tp_len = skb->len;
2304		h.h1->tp_snaplen = snaplen;
2305		h.h1->tp_mac = macoff;
2306		h.h1->tp_net = netoff;
2307		h.h1->tp_sec = ts.tv_sec;
2308		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2309		hdrlen = sizeof(*h.h1);
2310		break;
2311	case TPACKET_V2:
2312		h.h2->tp_len = skb->len;
2313		h.h2->tp_snaplen = snaplen;
2314		h.h2->tp_mac = macoff;
2315		h.h2->tp_net = netoff;
2316		h.h2->tp_sec = ts.tv_sec;
2317		h.h2->tp_nsec = ts.tv_nsec;
2318		if (skb_vlan_tag_present(skb)) {
2319			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2320			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2321			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
 
 
 
 
2322		} else {
2323			h.h2->tp_vlan_tci = 0;
2324			h.h2->tp_vlan_tpid = 0;
2325		}
2326		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2327		hdrlen = sizeof(*h.h2);
2328		break;
2329	case TPACKET_V3:
2330		/* tp_nxt_offset,vlan are already populated above.
2331		 * So DONT clear those fields here
2332		 */
2333		h.h3->tp_status |= status;
2334		h.h3->tp_len = skb->len;
2335		h.h3->tp_snaplen = snaplen;
2336		h.h3->tp_mac = macoff;
2337		h.h3->tp_net = netoff;
2338		h.h3->tp_sec  = ts.tv_sec;
2339		h.h3->tp_nsec = ts.tv_nsec;
2340		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2341		hdrlen = sizeof(*h.h3);
2342		break;
2343	default:
2344		BUG();
2345	}
2346
2347	sll = h.raw + TPACKET_ALIGN(hdrlen);
2348	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2349	sll->sll_family = AF_PACKET;
2350	sll->sll_hatype = dev->type;
2351	sll->sll_protocol = skb->protocol;
 
2352	sll->sll_pkttype = skb->pkt_type;
2353	if (unlikely(po->origdev))
2354		sll->sll_ifindex = orig_dev->ifindex;
2355	else
2356		sll->sll_ifindex = dev->ifindex;
2357
2358	smp_mb();
2359
2360#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2361	if (po->tp_version <= TPACKET_V2) {
2362		u8 *start, *end;
2363
2364		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2365					macoff + snaplen);
2366
2367		for (start = h.raw; start < end; start += PAGE_SIZE)
2368			flush_dcache_page(pgv_to_page(start));
2369	}
2370	smp_wmb();
2371#endif
2372
2373	if (po->tp_version <= TPACKET_V2) {
 
2374		__packet_set_status(po, h.raw, status);
 
 
2375		sk->sk_data_ready(sk);
2376	} else {
2377		prb_clear_blk_fill_status(&po->rx_ring);
2378	}
2379
2380drop_n_restore:
2381	if (skb_head != skb->data && skb_shared(skb)) {
2382		skb->data = skb_head;
2383		skb->len = skb_len;
2384	}
2385drop:
2386	if (!is_drop_n_account)
2387		consume_skb(skb);
2388	else
2389		kfree_skb(skb);
2390	return 0;
2391
2392drop_n_account:
2393	spin_unlock(&sk->sk_receive_queue.lock);
2394	atomic_inc(&po->tp_drops);
2395	is_drop_n_account = true;
2396
2397	sk->sk_data_ready(sk);
2398	kfree_skb(copy_skb);
2399	goto drop_n_restore;
2400}
2401
2402static void tpacket_destruct_skb(struct sk_buff *skb)
2403{
2404	struct packet_sock *po = pkt_sk(skb->sk);
2405
2406	if (likely(po->tx_ring.pg_vec)) {
2407		void *ph;
2408		__u32 ts;
2409
2410		ph = skb_zcopy_get_nouarg(skb);
2411		packet_dec_pending(&po->tx_ring);
2412
2413		ts = __packet_set_timestamp(po, ph, skb);
2414		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2415
2416		if (!packet_read_pending(&po->tx_ring))
2417			complete(&po->skb_completion);
2418	}
2419
2420	sock_wfree(skb);
2421}
2422
2423static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2424{
2425	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2426	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2427	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2428	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2429		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2430			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2431			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2432
2433	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2434		return -EINVAL;
2435
2436	return 0;
2437}
2438
2439static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2440				 struct virtio_net_hdr *vnet_hdr)
2441{
2442	if (*len < sizeof(*vnet_hdr))
 
 
2443		return -EINVAL;
2444	*len -= sizeof(*vnet_hdr);
2445
2446	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2447		return -EFAULT;
2448
2449	return __packet_snd_vnet_parse(vnet_hdr, *len);
 
 
 
 
 
 
 
 
2450}
2451
2452static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2453		void *frame, struct net_device *dev, void *data, int tp_len,
2454		__be16 proto, unsigned char *addr, int hlen, int copylen,
2455		const struct sockcm_cookie *sockc)
2456{
2457	union tpacket_uhdr ph;
2458	int to_write, offset, len, nr_frags, len_max;
2459	struct socket *sock = po->sk.sk_socket;
2460	struct page *page;
2461	int err;
2462
2463	ph.raw = frame;
2464
2465	skb->protocol = proto;
2466	skb->dev = dev;
2467	skb->priority = po->sk.sk_priority;
2468	skb->mark = po->sk.sk_mark;
2469	skb->tstamp = sockc->transmit_time;
2470	skb_setup_tx_timestamp(skb, sockc->tsflags);
2471	skb_zcopy_set_nouarg(skb, ph.raw);
2472
2473	skb_reserve(skb, hlen);
2474	skb_reset_network_header(skb);
2475
2476	to_write = tp_len;
2477
2478	if (sock->type == SOCK_DGRAM) {
2479		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2480				NULL, tp_len);
2481		if (unlikely(err < 0))
2482			return -EINVAL;
2483	} else if (copylen) {
2484		int hdrlen = min_t(int, copylen, tp_len);
2485
2486		skb_push(skb, dev->hard_header_len);
2487		skb_put(skb, copylen - dev->hard_header_len);
2488		err = skb_store_bits(skb, 0, data, hdrlen);
2489		if (unlikely(err))
2490			return err;
2491		if (!dev_validate_header(dev, skb->data, hdrlen))
2492			return -EINVAL;
2493
2494		data += hdrlen;
2495		to_write -= hdrlen;
2496	}
2497
2498	offset = offset_in_page(data);
2499	len_max = PAGE_SIZE - offset;
2500	len = ((to_write > len_max) ? len_max : to_write);
2501
2502	skb->data_len = to_write;
2503	skb->len += to_write;
2504	skb->truesize += to_write;
2505	refcount_add(to_write, &po->sk.sk_wmem_alloc);
2506
2507	while (likely(to_write)) {
2508		nr_frags = skb_shinfo(skb)->nr_frags;
2509
2510		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2511			pr_err("Packet exceed the number of skb frags(%lu)\n",
2512			       MAX_SKB_FRAGS);
2513			return -EFAULT;
2514		}
2515
2516		page = pgv_to_page(data);
2517		data += len;
2518		flush_dcache_page(page);
2519		get_page(page);
2520		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2521		to_write -= len;
2522		offset = 0;
2523		len_max = PAGE_SIZE;
2524		len = ((to_write > len_max) ? len_max : to_write);
2525	}
2526
2527	packet_parse_headers(skb, sock);
2528
2529	return tp_len;
2530}
2531
2532static int tpacket_parse_header(struct packet_sock *po, void *frame,
2533				int size_max, void **data)
2534{
2535	union tpacket_uhdr ph;
2536	int tp_len, off;
2537
2538	ph.raw = frame;
2539
2540	switch (po->tp_version) {
2541	case TPACKET_V3:
2542		if (ph.h3->tp_next_offset != 0) {
2543			pr_warn_once("variable sized slot not supported");
2544			return -EINVAL;
2545		}
2546		tp_len = ph.h3->tp_len;
2547		break;
2548	case TPACKET_V2:
2549		tp_len = ph.h2->tp_len;
2550		break;
2551	default:
2552		tp_len = ph.h1->tp_len;
2553		break;
2554	}
2555	if (unlikely(tp_len > size_max)) {
2556		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2557		return -EMSGSIZE;
2558	}
2559
2560	if (unlikely(po->tp_tx_has_off)) {
2561		int off_min, off_max;
2562
2563		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2564		off_max = po->tx_ring.frame_size - tp_len;
2565		if (po->sk.sk_type == SOCK_DGRAM) {
2566			switch (po->tp_version) {
2567			case TPACKET_V3:
2568				off = ph.h3->tp_net;
2569				break;
2570			case TPACKET_V2:
2571				off = ph.h2->tp_net;
2572				break;
2573			default:
2574				off = ph.h1->tp_net;
2575				break;
2576			}
2577		} else {
2578			switch (po->tp_version) {
2579			case TPACKET_V3:
2580				off = ph.h3->tp_mac;
2581				break;
2582			case TPACKET_V2:
2583				off = ph.h2->tp_mac;
2584				break;
2585			default:
2586				off = ph.h1->tp_mac;
2587				break;
2588			}
2589		}
2590		if (unlikely((off < off_min) || (off_max < off)))
2591			return -EINVAL;
2592	} else {
2593		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2594	}
2595
2596	*data = frame + off;
2597	return tp_len;
2598}
2599
2600static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2601{
2602	struct sk_buff *skb = NULL;
2603	struct net_device *dev;
2604	struct virtio_net_hdr *vnet_hdr = NULL;
2605	struct sockcm_cookie sockc;
2606	__be16 proto;
2607	int err, reserve = 0;
2608	void *ph;
2609	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2610	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
 
2611	unsigned char *addr = NULL;
2612	int tp_len, size_max;
2613	void *data;
2614	int len_sum = 0;
2615	int status = TP_STATUS_AVAILABLE;
2616	int hlen, tlen, copylen = 0;
2617	long timeo = 0;
2618
2619	mutex_lock(&po->pg_vec_lock);
2620
2621	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2622	 * we need to confirm it under protection of pg_vec_lock.
2623	 */
2624	if (unlikely(!po->tx_ring.pg_vec)) {
2625		err = -EBUSY;
2626		goto out;
2627	}
2628	if (likely(saddr == NULL)) {
2629		dev	= packet_cached_dev_get(po);
2630		proto	= po->num;
2631	} else {
2632		err = -EINVAL;
2633		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2634			goto out;
2635		if (msg->msg_namelen < (saddr->sll_halen
2636					+ offsetof(struct sockaddr_ll,
2637						sll_addr)))
2638			goto out;
2639		proto	= saddr->sll_protocol;
2640		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2641		if (po->sk.sk_socket->type == SOCK_DGRAM) {
2642			if (dev && msg->msg_namelen < dev->addr_len +
2643				   offsetof(struct sockaddr_ll, sll_addr))
2644				goto out_put;
2645			addr = saddr->sll_addr;
2646		}
2647	}
2648
2649	err = -ENXIO;
2650	if (unlikely(dev == NULL))
2651		goto out;
2652	err = -ENETDOWN;
2653	if (unlikely(!(dev->flags & IFF_UP)))
2654		goto out_put;
2655
2656	sockcm_init(&sockc, &po->sk);
2657	if (msg->msg_controllen) {
2658		err = sock_cmsg_send(&po->sk, msg, &sockc);
2659		if (unlikely(err))
2660			goto out_put;
2661	}
2662
2663	if (po->sk.sk_socket->type == SOCK_RAW)
2664		reserve = dev->hard_header_len;
2665	size_max = po->tx_ring.frame_size
2666		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2667
2668	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2669		size_max = dev->mtu + reserve + VLAN_HLEN;
2670
2671	reinit_completion(&po->skb_completion);
2672
2673	do {
2674		ph = packet_current_frame(po, &po->tx_ring,
2675					  TP_STATUS_SEND_REQUEST);
2676		if (unlikely(ph == NULL)) {
2677			if (need_wait && skb) {
2678				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2679				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2680				if (timeo <= 0) {
2681					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2682					goto out_put;
2683				}
2684			}
2685			/* check for additional frames */
2686			continue;
2687		}
2688
2689		skb = NULL;
2690		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2691		if (tp_len < 0)
2692			goto tpacket_error;
2693
2694		status = TP_STATUS_SEND_REQUEST;
2695		hlen = LL_RESERVED_SPACE(dev);
2696		tlen = dev->needed_tailroom;
2697		if (po->has_vnet_hdr) {
2698			vnet_hdr = data;
2699			data += sizeof(*vnet_hdr);
2700			tp_len -= sizeof(*vnet_hdr);
2701			if (tp_len < 0 ||
2702			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2703				tp_len = -EINVAL;
2704				goto tpacket_error;
2705			}
2706			copylen = __virtio16_to_cpu(vio_le(),
2707						    vnet_hdr->hdr_len);
2708		}
2709		copylen = max_t(int, copylen, dev->hard_header_len);
2710		skb = sock_alloc_send_skb(&po->sk,
2711				hlen + tlen + sizeof(struct sockaddr_ll) +
2712				(copylen - dev->hard_header_len),
2713				!need_wait, &err);
2714
2715		if (unlikely(skb == NULL)) {
2716			/* we assume the socket was initially writeable ... */
2717			if (likely(len_sum > 0))
2718				err = len_sum;
2719			goto out_status;
2720		}
2721		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2722					  addr, hlen, copylen, &sockc);
2723		if (likely(tp_len >= 0) &&
2724		    tp_len > dev->mtu + reserve &&
2725		    !po->has_vnet_hdr &&
2726		    !packet_extra_vlan_len_allowed(dev, skb))
2727			tp_len = -EMSGSIZE;
2728
2729		if (unlikely(tp_len < 0)) {
2730tpacket_error:
2731			if (po->tp_loss) {
2732				__packet_set_status(po, ph,
2733						TP_STATUS_AVAILABLE);
2734				packet_increment_head(&po->tx_ring);
2735				kfree_skb(skb);
2736				continue;
2737			} else {
2738				status = TP_STATUS_WRONG_FORMAT;
2739				err = tp_len;
2740				goto out_status;
2741			}
2742		}
2743
2744		if (po->has_vnet_hdr) {
2745			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2746				tp_len = -EINVAL;
2747				goto tpacket_error;
2748			}
2749			virtio_net_hdr_set_proto(skb, vnet_hdr);
2750		}
2751
2752		skb->destructor = tpacket_destruct_skb;
2753		__packet_set_status(po, ph, TP_STATUS_SENDING);
2754		packet_inc_pending(&po->tx_ring);
2755
2756		status = TP_STATUS_SEND_REQUEST;
2757		err = po->xmit(skb);
2758		if (unlikely(err > 0)) {
2759			err = net_xmit_errno(err);
 
2760			if (err && __packet_get_status(po, ph) ==
2761				   TP_STATUS_AVAILABLE) {
2762				/* skb was destructed already */
2763				skb = NULL;
2764				goto out_status;
2765			}
2766			/*
2767			 * skb was dropped but not destructed yet;
2768			 * let's treat it like congestion or err < 0
2769			 */
2770			err = 0;
2771		}
2772		packet_increment_head(&po->tx_ring);
2773		len_sum += tp_len;
2774	} while (likely((ph != NULL) ||
2775		/* Note: packet_read_pending() might be slow if we have
2776		 * to call it as it's per_cpu variable, but in fast-path
2777		 * we already short-circuit the loop with the first
2778		 * condition, and luckily don't have to go that path
2779		 * anyway.
2780		 */
2781		 (need_wait && packet_read_pending(&po->tx_ring))));
2782
2783	err = len_sum;
2784	goto out_put;
2785
2786out_status:
2787	__packet_set_status(po, ph, status);
2788	kfree_skb(skb);
2789out_put:
2790	dev_put(dev);
2791out:
2792	mutex_unlock(&po->pg_vec_lock);
2793	return err;
2794}
2795
2796static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2797				        size_t reserve, size_t len,
2798				        size_t linear, int noblock,
2799				        int *err)
2800{
2801	struct sk_buff *skb;
2802
2803	/* Under a page?  Don't bother with paged skb. */
2804	if (prepad + len < PAGE_SIZE || !linear)
2805		linear = len;
2806
 
 
2807	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2808				   err, 0);
2809	if (!skb)
2810		return NULL;
2811
2812	skb_reserve(skb, reserve);
2813	skb_put(skb, linear);
2814	skb->data_len = len - linear;
2815	skb->len += len - linear;
2816
2817	return skb;
2818}
2819
2820static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2821{
2822	struct sock *sk = sock->sk;
2823	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2824	struct sk_buff *skb;
2825	struct net_device *dev;
2826	__be16 proto;
2827	unsigned char *addr = NULL;
2828	int err, reserve = 0;
2829	struct sockcm_cookie sockc;
2830	struct virtio_net_hdr vnet_hdr = { 0 };
2831	int offset = 0;
2832	struct packet_sock *po = pkt_sk(sk);
2833	bool has_vnet_hdr = false;
2834	int hlen, tlen, linear;
2835	int extra_len = 0;
2836
2837	/*
2838	 *	Get and verify the address.
2839	 */
2840
2841	if (likely(saddr == NULL)) {
2842		dev	= packet_cached_dev_get(po);
2843		proto	= po->num;
2844	} else {
2845		err = -EINVAL;
2846		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2847			goto out;
2848		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2849			goto out;
2850		proto	= saddr->sll_protocol;
2851		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2852		if (sock->type == SOCK_DGRAM) {
2853			if (dev && msg->msg_namelen < dev->addr_len +
2854				   offsetof(struct sockaddr_ll, sll_addr))
2855				goto out_unlock;
2856			addr = saddr->sll_addr;
2857		}
2858	}
2859
2860	err = -ENXIO;
2861	if (unlikely(dev == NULL))
2862		goto out_unlock;
2863	err = -ENETDOWN;
2864	if (unlikely(!(dev->flags & IFF_UP)))
2865		goto out_unlock;
2866
2867	sockcm_init(&sockc, sk);
2868	sockc.mark = sk->sk_mark;
2869	if (msg->msg_controllen) {
2870		err = sock_cmsg_send(sk, msg, &sockc);
2871		if (unlikely(err))
2872			goto out_unlock;
2873	}
2874
2875	if (sock->type == SOCK_RAW)
2876		reserve = dev->hard_header_len;
2877	if (po->has_vnet_hdr) {
2878		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2879		if (err)
2880			goto out_unlock;
2881		has_vnet_hdr = true;
2882	}
2883
2884	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2885		if (!netif_supports_nofcs(dev)) {
2886			err = -EPROTONOSUPPORT;
2887			goto out_unlock;
2888		}
2889		extra_len = 4; /* We're doing our own CRC */
2890	}
2891
2892	err = -EMSGSIZE;
2893	if (!vnet_hdr.gso_type &&
2894	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2895		goto out_unlock;
2896
2897	err = -ENOBUFS;
2898	hlen = LL_RESERVED_SPACE(dev);
2899	tlen = dev->needed_tailroom;
2900	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2901	linear = max(linear, min_t(int, len, dev->hard_header_len));
2902	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2903			       msg->msg_flags & MSG_DONTWAIT, &err);
2904	if (skb == NULL)
2905		goto out_unlock;
2906
2907	skb_reset_network_header(skb);
2908
2909	err = -EINVAL;
2910	if (sock->type == SOCK_DGRAM) {
2911		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2912		if (unlikely(offset < 0))
2913			goto out_free;
2914	} else if (reserve) {
2915		skb_reserve(skb, -reserve);
2916		if (len < reserve + sizeof(struct ipv6hdr) &&
2917		    dev->min_header_len != dev->hard_header_len)
2918			skb_reset_network_header(skb);
2919	}
2920
2921	/* Returns -EFAULT on error */
2922	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2923	if (err)
2924		goto out_free;
2925
2926	if (sock->type == SOCK_RAW &&
2927	    !dev_validate_header(dev, skb->data, len)) {
2928		err = -EINVAL;
2929		goto out_free;
2930	}
2931
2932	skb_setup_tx_timestamp(skb, sockc.tsflags);
2933
2934	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2935	    !packet_extra_vlan_len_allowed(dev, skb)) {
2936		err = -EMSGSIZE;
2937		goto out_free;
2938	}
2939
2940	skb->protocol = proto;
2941	skb->dev = dev;
2942	skb->priority = sk->sk_priority;
2943	skb->mark = sockc.mark;
2944	skb->tstamp = sockc.transmit_time;
2945
2946	if (has_vnet_hdr) {
 
 
 
 
 
2947		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2948		if (err)
2949			goto out_free;
2950		len += sizeof(vnet_hdr);
2951		virtio_net_hdr_set_proto(skb, &vnet_hdr);
2952	}
2953
2954	packet_parse_headers(skb, sock);
2955
2956	if (unlikely(extra_len == 4))
2957		skb->no_fcs = 1;
2958
2959	err = po->xmit(skb);
2960	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2961		goto out_unlock;
2962
2963	dev_put(dev);
2964
2965	return len;
2966
2967out_free:
2968	kfree_skb(skb);
2969out_unlock:
2970	if (dev)
2971		dev_put(dev);
2972out:
2973	return err;
2974}
2975
2976static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2977{
2978	struct sock *sk = sock->sk;
2979	struct packet_sock *po = pkt_sk(sk);
2980
2981	if (po->tx_ring.pg_vec)
 
 
 
2982		return tpacket_snd(po, msg);
2983	else
2984		return packet_snd(sock, msg, len);
2985}
2986
2987/*
2988 *	Close a PACKET socket. This is fairly simple. We immediately go
2989 *	to 'closed' state and remove our protocol entry in the device list.
2990 */
2991
2992static int packet_release(struct socket *sock)
2993{
2994	struct sock *sk = sock->sk;
2995	struct packet_sock *po;
2996	struct packet_fanout *f;
2997	struct net *net;
2998	union tpacket_req_u req_u;
2999
3000	if (!sk)
3001		return 0;
3002
3003	net = sock_net(sk);
3004	po = pkt_sk(sk);
3005
3006	mutex_lock(&net->packet.sklist_lock);
3007	sk_del_node_init_rcu(sk);
3008	mutex_unlock(&net->packet.sklist_lock);
3009
3010	preempt_disable();
3011	sock_prot_inuse_add(net, sk->sk_prot, -1);
3012	preempt_enable();
3013
3014	spin_lock(&po->bind_lock);
3015	unregister_prot_hook(sk, false);
3016	packet_cached_dev_reset(po);
3017
3018	if (po->prot_hook.dev) {
3019		dev_put(po->prot_hook.dev);
3020		po->prot_hook.dev = NULL;
3021	}
3022	spin_unlock(&po->bind_lock);
3023
3024	packet_flush_mclist(sk);
3025
3026	lock_sock(sk);
3027	if (po->rx_ring.pg_vec) {
3028		memset(&req_u, 0, sizeof(req_u));
3029		packet_set_ring(sk, &req_u, 1, 0);
3030	}
3031
3032	if (po->tx_ring.pg_vec) {
3033		memset(&req_u, 0, sizeof(req_u));
3034		packet_set_ring(sk, &req_u, 1, 1);
3035	}
3036	release_sock(sk);
3037
3038	f = fanout_release(sk);
3039
3040	synchronize_net();
3041
3042	kfree(po->rollover);
3043	if (f) {
3044		fanout_release_data(f);
3045		kfree(f);
3046	}
3047	/*
3048	 *	Now the socket is dead. No more input will appear.
3049	 */
3050	sock_orphan(sk);
3051	sock->sk = NULL;
3052
3053	/* Purge queues */
3054
3055	skb_queue_purge(&sk->sk_receive_queue);
3056	packet_free_pending(po);
3057	sk_refcnt_debug_release(sk);
3058
3059	sock_put(sk);
3060	return 0;
3061}
3062
3063/*
3064 *	Attach a packet hook.
3065 */
3066
3067static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3068			  __be16 proto)
3069{
3070	struct packet_sock *po = pkt_sk(sk);
3071	struct net_device *dev_curr;
3072	__be16 proto_curr;
3073	bool need_rehook;
3074	struct net_device *dev = NULL;
3075	int ret = 0;
3076	bool unlisted = false;
3077
3078	lock_sock(sk);
3079	spin_lock(&po->bind_lock);
 
 
 
3080	rcu_read_lock();
3081
3082	if (po->fanout) {
3083		ret = -EINVAL;
3084		goto out_unlock;
3085	}
3086
3087	if (name) {
3088		dev = dev_get_by_name_rcu(sock_net(sk), name);
3089		if (!dev) {
3090			ret = -ENODEV;
3091			goto out_unlock;
3092		}
3093	} else if (ifindex) {
3094		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3095		if (!dev) {
3096			ret = -ENODEV;
3097			goto out_unlock;
3098		}
3099	}
3100
3101	if (dev)
3102		dev_hold(dev);
3103
3104	proto_curr = po->prot_hook.type;
3105	dev_curr = po->prot_hook.dev;
3106
3107	need_rehook = proto_curr != proto || dev_curr != dev;
3108
3109	if (need_rehook) {
3110		if (po->running) {
 
3111			rcu_read_unlock();
3112			/* prevents packet_notifier() from calling
3113			 * register_prot_hook()
3114			 */
3115			po->num = 0;
3116			__unregister_prot_hook(sk, true);
3117			rcu_read_lock();
3118			dev_curr = po->prot_hook.dev;
3119			if (dev)
3120				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3121								 dev->ifindex);
3122		}
3123
3124		BUG_ON(po->running);
3125		po->num = proto;
3126		po->prot_hook.type = proto;
3127
 
 
3128		if (unlikely(unlisted)) {
3129			dev_put(dev);
3130			po->prot_hook.dev = NULL;
3131			po->ifindex = -1;
3132			packet_cached_dev_reset(po);
3133		} else {
 
 
3134			po->prot_hook.dev = dev;
3135			po->ifindex = dev ? dev->ifindex : 0;
3136			packet_cached_dev_assign(po, dev);
3137		}
 
3138	}
3139	if (dev_curr)
3140		dev_put(dev_curr);
3141
3142	if (proto == 0 || !need_rehook)
3143		goto out_unlock;
3144
3145	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3146		register_prot_hook(sk);
3147	} else {
3148		sk->sk_err = ENETDOWN;
3149		if (!sock_flag(sk, SOCK_DEAD))
3150			sk->sk_error_report(sk);
3151	}
3152
3153out_unlock:
3154	rcu_read_unlock();
3155	spin_unlock(&po->bind_lock);
3156	release_sock(sk);
3157	return ret;
3158}
3159
3160/*
3161 *	Bind a packet socket to a device
3162 */
3163
3164static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3165			    int addr_len)
3166{
3167	struct sock *sk = sock->sk;
3168	char name[sizeof(uaddr->sa_data) + 1];
3169
3170	/*
3171	 *	Check legality
3172	 */
3173
3174	if (addr_len != sizeof(struct sockaddr))
3175		return -EINVAL;
3176	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3177	 * zero-terminated.
3178	 */
3179	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3180	name[sizeof(uaddr->sa_data)] = 0;
3181
3182	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3183}
3184
3185static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3186{
3187	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3188	struct sock *sk = sock->sk;
3189
3190	/*
3191	 *	Check legality
3192	 */
3193
3194	if (addr_len < sizeof(struct sockaddr_ll))
3195		return -EINVAL;
3196	if (sll->sll_family != AF_PACKET)
3197		return -EINVAL;
3198
3199	return packet_do_bind(sk, NULL, sll->sll_ifindex,
3200			      sll->sll_protocol ? : pkt_sk(sk)->num);
3201}
3202
3203static struct proto packet_proto = {
3204	.name	  = "PACKET",
3205	.owner	  = THIS_MODULE,
3206	.obj_size = sizeof(struct packet_sock),
3207};
3208
3209/*
3210 *	Create a packet of type SOCK_PACKET.
3211 */
3212
3213static int packet_create(struct net *net, struct socket *sock, int protocol,
3214			 int kern)
3215{
3216	struct sock *sk;
3217	struct packet_sock *po;
3218	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3219	int err;
3220
3221	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3222		return -EPERM;
3223	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3224	    sock->type != SOCK_PACKET)
3225		return -ESOCKTNOSUPPORT;
3226
3227	sock->state = SS_UNCONNECTED;
3228
3229	err = -ENOBUFS;
3230	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3231	if (sk == NULL)
3232		goto out;
3233
3234	sock->ops = &packet_ops;
3235	if (sock->type == SOCK_PACKET)
3236		sock->ops = &packet_ops_spkt;
3237
 
 
 
 
 
3238	sock_init_data(sock, sk);
3239
3240	po = pkt_sk(sk);
3241	init_completion(&po->skb_completion);
3242	sk->sk_family = PF_PACKET;
3243	po->num = proto;
3244	po->xmit = dev_queue_xmit;
3245
3246	err = packet_alloc_pending(po);
3247	if (err)
3248		goto out2;
3249
3250	packet_cached_dev_reset(po);
3251
3252	sk->sk_destruct = packet_sock_destruct;
3253	sk_refcnt_debug_inc(sk);
3254
3255	/*
3256	 *	Attach a protocol block
3257	 */
3258
3259	spin_lock_init(&po->bind_lock);
3260	mutex_init(&po->pg_vec_lock);
3261	po->rollover = NULL;
3262	po->prot_hook.func = packet_rcv;
3263
3264	if (sock->type == SOCK_PACKET)
3265		po->prot_hook.func = packet_rcv_spkt;
3266
3267	po->prot_hook.af_packet_priv = sk;
 
3268
3269	if (proto) {
3270		po->prot_hook.type = proto;
3271		__register_prot_hook(sk);
3272	}
3273
3274	mutex_lock(&net->packet.sklist_lock);
3275	sk_add_node_tail_rcu(sk, &net->packet.sklist);
3276	mutex_unlock(&net->packet.sklist_lock);
3277
3278	preempt_disable();
3279	sock_prot_inuse_add(net, &packet_proto, 1);
3280	preempt_enable();
3281
3282	return 0;
3283out2:
3284	sk_free(sk);
3285out:
3286	return err;
3287}
3288
3289/*
3290 *	Pull a packet from our receive queue and hand it to the user.
3291 *	If necessary we block.
3292 */
3293
3294static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3295			  int flags)
3296{
3297	struct sock *sk = sock->sk;
3298	struct sk_buff *skb;
3299	int copied, err;
3300	int vnet_hdr_len = 0;
3301	unsigned int origlen = 0;
3302
3303	err = -EINVAL;
3304	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3305		goto out;
3306
3307#if 0
3308	/* What error should we return now? EUNATTACH? */
3309	if (pkt_sk(sk)->ifindex < 0)
3310		return -ENODEV;
3311#endif
3312
3313	if (flags & MSG_ERRQUEUE) {
3314		err = sock_recv_errqueue(sk, msg, len,
3315					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3316		goto out;
3317	}
3318
3319	/*
3320	 *	Call the generic datagram receiver. This handles all sorts
3321	 *	of horrible races and re-entrancy so we can forget about it
3322	 *	in the protocol layers.
3323	 *
3324	 *	Now it will return ENETDOWN, if device have just gone down,
3325	 *	but then it will block.
3326	 */
3327
3328	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3329
3330	/*
3331	 *	An error occurred so return it. Because skb_recv_datagram()
3332	 *	handles the blocking we don't see and worry about blocking
3333	 *	retries.
3334	 */
3335
3336	if (skb == NULL)
3337		goto out;
3338
3339	packet_rcv_try_clear_pressure(pkt_sk(sk));
3340
3341	if (pkt_sk(sk)->has_vnet_hdr) {
3342		err = packet_rcv_vnet(msg, skb, &len);
3343		if (err)
3344			goto out_free;
3345		vnet_hdr_len = sizeof(struct virtio_net_hdr);
3346	}
3347
3348	/* You lose any data beyond the buffer you gave. If it worries
3349	 * a user program they can ask the device for its MTU
3350	 * anyway.
3351	 */
3352	copied = skb->len;
3353	if (copied > len) {
3354		copied = len;
3355		msg->msg_flags |= MSG_TRUNC;
3356	}
3357
3358	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3359	if (err)
3360		goto out_free;
3361
3362	if (sock->type != SOCK_PACKET) {
3363		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3364
3365		/* Original length was stored in sockaddr_ll fields */
3366		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3367		sll->sll_family = AF_PACKET;
3368		sll->sll_protocol = skb->protocol;
 
3369	}
3370
3371	sock_recv_ts_and_drops(msg, sk, skb);
3372
3373	if (msg->msg_name) {
 
 
3374		int copy_len;
3375
3376		/* If the address length field is there to be filled
3377		 * in, we fill it in now.
3378		 */
3379		if (sock->type == SOCK_PACKET) {
3380			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3381			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3382			copy_len = msg->msg_namelen;
3383		} else {
3384			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3385
3386			msg->msg_namelen = sll->sll_halen +
3387				offsetof(struct sockaddr_ll, sll_addr);
3388			copy_len = msg->msg_namelen;
3389			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3390				memset(msg->msg_name +
3391				       offsetof(struct sockaddr_ll, sll_addr),
3392				       0, sizeof(sll->sll_addr));
3393				msg->msg_namelen = sizeof(struct sockaddr_ll);
3394			}
3395		}
 
 
 
 
3396		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3397	}
3398
3399	if (pkt_sk(sk)->auxdata) {
3400		struct tpacket_auxdata aux;
3401
3402		aux.tp_status = TP_STATUS_USER;
3403		if (skb->ip_summed == CHECKSUM_PARTIAL)
3404			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3405		else if (skb->pkt_type != PACKET_OUTGOING &&
3406			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3407			  skb_csum_unnecessary(skb)))
3408			aux.tp_status |= TP_STATUS_CSUM_VALID;
 
 
3409
3410		aux.tp_len = origlen;
3411		aux.tp_snaplen = skb->len;
3412		aux.tp_mac = 0;
3413		aux.tp_net = skb_network_offset(skb);
3414		if (skb_vlan_tag_present(skb)) {
3415			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3416			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3417			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3418		} else {
3419			aux.tp_vlan_tci = 0;
3420			aux.tp_vlan_tpid = 0;
3421		}
3422		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3423	}
3424
3425	/*
3426	 *	Free or return the buffer as appropriate. Again this
3427	 *	hides all the races and re-entrancy issues from us.
3428	 */
3429	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3430
3431out_free:
3432	skb_free_datagram(sk, skb);
3433out:
3434	return err;
3435}
3436
3437static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3438			       int peer)
3439{
3440	struct net_device *dev;
3441	struct sock *sk	= sock->sk;
3442
3443	if (peer)
3444		return -EOPNOTSUPP;
3445
3446	uaddr->sa_family = AF_PACKET;
3447	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3448	rcu_read_lock();
3449	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3450	if (dev)
3451		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3452	rcu_read_unlock();
3453
3454	return sizeof(*uaddr);
3455}
3456
3457static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3458			  int peer)
3459{
3460	struct net_device *dev;
3461	struct sock *sk = sock->sk;
3462	struct packet_sock *po = pkt_sk(sk);
3463	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
 
3464
3465	if (peer)
3466		return -EOPNOTSUPP;
3467
 
3468	sll->sll_family = AF_PACKET;
3469	sll->sll_ifindex = po->ifindex;
3470	sll->sll_protocol = po->num;
3471	sll->sll_pkttype = 0;
3472	rcu_read_lock();
3473	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3474	if (dev) {
3475		sll->sll_hatype = dev->type;
3476		sll->sll_halen = dev->addr_len;
3477		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
 
 
 
 
 
3478	} else {
3479		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3480		sll->sll_halen = 0;
3481	}
3482	rcu_read_unlock();
3483
3484	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3485}
3486
3487static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3488			 int what)
3489{
3490	switch (i->type) {
3491	case PACKET_MR_MULTICAST:
3492		if (i->alen != dev->addr_len)
3493			return -EINVAL;
3494		if (what > 0)
3495			return dev_mc_add(dev, i->addr);
3496		else
3497			return dev_mc_del(dev, i->addr);
3498		break;
3499	case PACKET_MR_PROMISC:
3500		return dev_set_promiscuity(dev, what);
3501	case PACKET_MR_ALLMULTI:
3502		return dev_set_allmulti(dev, what);
3503	case PACKET_MR_UNICAST:
3504		if (i->alen != dev->addr_len)
3505			return -EINVAL;
3506		if (what > 0)
3507			return dev_uc_add(dev, i->addr);
3508		else
3509			return dev_uc_del(dev, i->addr);
3510		break;
3511	default:
3512		break;
3513	}
3514	return 0;
3515}
3516
3517static void packet_dev_mclist_delete(struct net_device *dev,
3518				     struct packet_mclist **mlp)
3519{
3520	struct packet_mclist *ml;
3521
3522	while ((ml = *mlp) != NULL) {
3523		if (ml->ifindex == dev->ifindex) {
3524			packet_dev_mc(dev, ml, -1);
3525			*mlp = ml->next;
3526			kfree(ml);
3527		} else
3528			mlp = &ml->next;
3529	}
3530}
3531
3532static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3533{
3534	struct packet_sock *po = pkt_sk(sk);
3535	struct packet_mclist *ml, *i;
3536	struct net_device *dev;
3537	int err;
3538
3539	rtnl_lock();
3540
3541	err = -ENODEV;
3542	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3543	if (!dev)
3544		goto done;
3545
3546	err = -EINVAL;
3547	if (mreq->mr_alen > dev->addr_len)
3548		goto done;
3549
3550	err = -ENOBUFS;
3551	i = kmalloc(sizeof(*i), GFP_KERNEL);
3552	if (i == NULL)
3553		goto done;
3554
3555	err = 0;
3556	for (ml = po->mclist; ml; ml = ml->next) {
3557		if (ml->ifindex == mreq->mr_ifindex &&
3558		    ml->type == mreq->mr_type &&
3559		    ml->alen == mreq->mr_alen &&
3560		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3561			ml->count++;
3562			/* Free the new element ... */
3563			kfree(i);
3564			goto done;
3565		}
3566	}
3567
3568	i->type = mreq->mr_type;
3569	i->ifindex = mreq->mr_ifindex;
3570	i->alen = mreq->mr_alen;
3571	memcpy(i->addr, mreq->mr_address, i->alen);
3572	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3573	i->count = 1;
3574	i->next = po->mclist;
3575	po->mclist = i;
3576	err = packet_dev_mc(dev, i, 1);
3577	if (err) {
3578		po->mclist = i->next;
3579		kfree(i);
3580	}
3581
3582done:
3583	rtnl_unlock();
3584	return err;
3585}
3586
3587static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3588{
3589	struct packet_mclist *ml, **mlp;
3590
3591	rtnl_lock();
3592
3593	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3594		if (ml->ifindex == mreq->mr_ifindex &&
3595		    ml->type == mreq->mr_type &&
3596		    ml->alen == mreq->mr_alen &&
3597		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3598			if (--ml->count == 0) {
3599				struct net_device *dev;
3600				*mlp = ml->next;
3601				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3602				if (dev)
3603					packet_dev_mc(dev, ml, -1);
3604				kfree(ml);
3605			}
3606			break;
3607		}
3608	}
3609	rtnl_unlock();
3610	return 0;
3611}
3612
3613static void packet_flush_mclist(struct sock *sk)
3614{
3615	struct packet_sock *po = pkt_sk(sk);
3616	struct packet_mclist *ml;
3617
3618	if (!po->mclist)
3619		return;
3620
3621	rtnl_lock();
3622	while ((ml = po->mclist) != NULL) {
3623		struct net_device *dev;
3624
3625		po->mclist = ml->next;
3626		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3627		if (dev != NULL)
3628			packet_dev_mc(dev, ml, -1);
3629		kfree(ml);
3630	}
3631	rtnl_unlock();
3632}
3633
3634static int
3635packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
 
3636{
3637	struct sock *sk = sock->sk;
3638	struct packet_sock *po = pkt_sk(sk);
3639	int ret;
3640
3641	if (level != SOL_PACKET)
3642		return -ENOPROTOOPT;
3643
3644	switch (optname) {
3645	case PACKET_ADD_MEMBERSHIP:
3646	case PACKET_DROP_MEMBERSHIP:
3647	{
3648		struct packet_mreq_max mreq;
3649		int len = optlen;
3650		memset(&mreq, 0, sizeof(mreq));
3651		if (len < sizeof(struct packet_mreq))
3652			return -EINVAL;
3653		if (len > sizeof(mreq))
3654			len = sizeof(mreq);
3655		if (copy_from_user(&mreq, optval, len))
3656			return -EFAULT;
3657		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3658			return -EINVAL;
3659		if (optname == PACKET_ADD_MEMBERSHIP)
3660			ret = packet_mc_add(sk, &mreq);
3661		else
3662			ret = packet_mc_drop(sk, &mreq);
3663		return ret;
3664	}
3665
3666	case PACKET_RX_RING:
3667	case PACKET_TX_RING:
3668	{
3669		union tpacket_req_u req_u;
3670		int len;
3671
 
3672		lock_sock(sk);
3673		switch (po->tp_version) {
3674		case TPACKET_V1:
3675		case TPACKET_V2:
3676			len = sizeof(req_u.req);
 
 
 
 
3677			break;
3678		case TPACKET_V3:
3679		default:
3680			len = sizeof(req_u.req3);
 
 
 
 
3681			break;
3682		}
3683		if (optlen < len) {
3684			ret = -EINVAL;
3685		} else {
3686			if (copy_from_user(&req_u.req, optval, len))
3687				ret = -EFAULT;
3688			else
3689				ret = packet_set_ring(sk, &req_u, 0,
3690						    optname == PACKET_TX_RING);
3691		}
3692		release_sock(sk);
3693		return ret;
3694	}
3695	case PACKET_COPY_THRESH:
3696	{
3697		int val;
3698
3699		if (optlen != sizeof(val))
3700			return -EINVAL;
3701		if (copy_from_user(&val, optval, sizeof(val)))
3702			return -EFAULT;
3703
3704		pkt_sk(sk)->copy_thresh = val;
3705		return 0;
3706	}
3707	case PACKET_VERSION:
3708	{
3709		int val;
3710
3711		if (optlen != sizeof(val))
3712			return -EINVAL;
3713		if (copy_from_user(&val, optval, sizeof(val)))
3714			return -EFAULT;
3715		switch (val) {
3716		case TPACKET_V1:
3717		case TPACKET_V2:
3718		case TPACKET_V3:
3719			break;
3720		default:
3721			return -EINVAL;
3722		}
3723		lock_sock(sk);
3724		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3725			ret = -EBUSY;
3726		} else {
3727			po->tp_version = val;
3728			ret = 0;
3729		}
3730		release_sock(sk);
3731		return ret;
3732	}
3733	case PACKET_RESERVE:
3734	{
3735		unsigned int val;
3736
3737		if (optlen != sizeof(val))
3738			return -EINVAL;
3739		if (copy_from_user(&val, optval, sizeof(val)))
3740			return -EFAULT;
3741		if (val > INT_MAX)
3742			return -EINVAL;
3743		lock_sock(sk);
3744		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3745			ret = -EBUSY;
3746		} else {
3747			po->tp_reserve = val;
3748			ret = 0;
3749		}
3750		release_sock(sk);
3751		return ret;
3752	}
3753	case PACKET_LOSS:
3754	{
3755		unsigned int val;
3756
3757		if (optlen != sizeof(val))
3758			return -EINVAL;
3759		if (copy_from_user(&val, optval, sizeof(val)))
3760			return -EFAULT;
3761
3762		lock_sock(sk);
3763		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3764			ret = -EBUSY;
3765		} else {
3766			po->tp_loss = !!val;
3767			ret = 0;
3768		}
3769		release_sock(sk);
3770		return ret;
3771	}
3772	case PACKET_AUXDATA:
3773	{
3774		int val;
3775
3776		if (optlen < sizeof(val))
3777			return -EINVAL;
3778		if (copy_from_user(&val, optval, sizeof(val)))
3779			return -EFAULT;
3780
3781		lock_sock(sk);
3782		po->auxdata = !!val;
3783		release_sock(sk);
3784		return 0;
3785	}
3786	case PACKET_ORIGDEV:
3787	{
3788		int val;
3789
3790		if (optlen < sizeof(val))
3791			return -EINVAL;
3792		if (copy_from_user(&val, optval, sizeof(val)))
3793			return -EFAULT;
3794
3795		lock_sock(sk);
3796		po->origdev = !!val;
3797		release_sock(sk);
3798		return 0;
3799	}
3800	case PACKET_VNET_HDR:
 
3801	{
3802		int val;
3803
3804		if (sock->type != SOCK_RAW)
3805			return -EINVAL;
3806		if (optlen < sizeof(val))
3807			return -EINVAL;
3808		if (copy_from_user(&val, optval, sizeof(val)))
3809			return -EFAULT;
3810
 
 
 
 
 
 
 
 
3811		lock_sock(sk);
3812		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3813			ret = -EBUSY;
3814		} else {
3815			po->has_vnet_hdr = !!val;
3816			ret = 0;
3817		}
3818		release_sock(sk);
3819		return ret;
3820	}
3821	case PACKET_TIMESTAMP:
3822	{
3823		int val;
3824
3825		if (optlen != sizeof(val))
3826			return -EINVAL;
3827		if (copy_from_user(&val, optval, sizeof(val)))
3828			return -EFAULT;
3829
3830		po->tp_tstamp = val;
3831		return 0;
3832	}
3833	case PACKET_FANOUT:
3834	{
3835		int val;
3836
3837		if (optlen != sizeof(val))
3838			return -EINVAL;
3839		if (copy_from_user(&val, optval, sizeof(val)))
3840			return -EFAULT;
3841
3842		return fanout_add(sk, val & 0xffff, val >> 16);
3843	}
3844	case PACKET_FANOUT_DATA:
3845	{
3846		if (!po->fanout)
 
3847			return -EINVAL;
3848
3849		return fanout_set_data(po, optval, optlen);
3850	}
3851	case PACKET_IGNORE_OUTGOING:
3852	{
3853		int val;
3854
3855		if (optlen != sizeof(val))
3856			return -EINVAL;
3857		if (copy_from_user(&val, optval, sizeof(val)))
3858			return -EFAULT;
3859		if (val < 0 || val > 1)
3860			return -EINVAL;
3861
3862		po->prot_hook.ignore_outgoing = !!val;
3863		return 0;
3864	}
3865	case PACKET_TX_HAS_OFF:
3866	{
3867		unsigned int val;
3868
3869		if (optlen != sizeof(val))
3870			return -EINVAL;
3871		if (copy_from_user(&val, optval, sizeof(val)))
3872			return -EFAULT;
3873
3874		lock_sock(sk);
3875		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3876			ret = -EBUSY;
3877		} else {
3878			po->tp_tx_has_off = !!val;
3879			ret = 0;
3880		}
3881		release_sock(sk);
3882		return 0;
3883	}
3884	case PACKET_QDISC_BYPASS:
3885	{
3886		int val;
3887
3888		if (optlen != sizeof(val))
3889			return -EINVAL;
3890		if (copy_from_user(&val, optval, sizeof(val)))
3891			return -EFAULT;
3892
3893		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3894		return 0;
3895	}
3896	default:
3897		return -ENOPROTOOPT;
3898	}
3899}
3900
3901static int packet_getsockopt(struct socket *sock, int level, int optname,
3902			     char __user *optval, int __user *optlen)
3903{
3904	int len;
3905	int val, lv = sizeof(val);
3906	struct sock *sk = sock->sk;
3907	struct packet_sock *po = pkt_sk(sk);
3908	void *data = &val;
3909	union tpacket_stats_u st;
3910	struct tpacket_rollover_stats rstats;
3911	int drops;
3912
3913	if (level != SOL_PACKET)
3914		return -ENOPROTOOPT;
3915
3916	if (get_user(len, optlen))
3917		return -EFAULT;
3918
3919	if (len < 0)
3920		return -EINVAL;
3921
3922	switch (optname) {
3923	case PACKET_STATISTICS:
3924		spin_lock_bh(&sk->sk_receive_queue.lock);
3925		memcpy(&st, &po->stats, sizeof(st));
3926		memset(&po->stats, 0, sizeof(po->stats));
3927		spin_unlock_bh(&sk->sk_receive_queue.lock);
3928		drops = atomic_xchg(&po->tp_drops, 0);
3929
3930		if (po->tp_version == TPACKET_V3) {
3931			lv = sizeof(struct tpacket_stats_v3);
3932			st.stats3.tp_drops = drops;
3933			st.stats3.tp_packets += drops;
3934			data = &st.stats3;
3935		} else {
3936			lv = sizeof(struct tpacket_stats);
3937			st.stats1.tp_drops = drops;
3938			st.stats1.tp_packets += drops;
3939			data = &st.stats1;
3940		}
3941
3942		break;
3943	case PACKET_AUXDATA:
3944		val = po->auxdata;
3945		break;
3946	case PACKET_ORIGDEV:
3947		val = po->origdev;
3948		break;
3949	case PACKET_VNET_HDR:
3950		val = po->has_vnet_hdr;
 
 
 
 
 
 
3951		break;
3952	case PACKET_VERSION:
3953		val = po->tp_version;
3954		break;
3955	case PACKET_HDRLEN:
3956		if (len > sizeof(int))
3957			len = sizeof(int);
3958		if (len < sizeof(int))
3959			return -EINVAL;
3960		if (copy_from_user(&val, optval, len))
3961			return -EFAULT;
3962		switch (val) {
3963		case TPACKET_V1:
3964			val = sizeof(struct tpacket_hdr);
3965			break;
3966		case TPACKET_V2:
3967			val = sizeof(struct tpacket2_hdr);
3968			break;
3969		case TPACKET_V3:
3970			val = sizeof(struct tpacket3_hdr);
3971			break;
3972		default:
3973			return -EINVAL;
3974		}
3975		break;
3976	case PACKET_RESERVE:
3977		val = po->tp_reserve;
3978		break;
3979	case PACKET_LOSS:
3980		val = po->tp_loss;
3981		break;
3982	case PACKET_TIMESTAMP:
3983		val = po->tp_tstamp;
3984		break;
3985	case PACKET_FANOUT:
3986		val = (po->fanout ?
3987		       ((u32)po->fanout->id |
3988			((u32)po->fanout->type << 16) |
3989			((u32)po->fanout->flags << 24)) :
3990		       0);
3991		break;
3992	case PACKET_IGNORE_OUTGOING:
3993		val = po->prot_hook.ignore_outgoing;
3994		break;
3995	case PACKET_ROLLOVER_STATS:
3996		if (!po->rollover)
3997			return -EINVAL;
3998		rstats.tp_all = atomic_long_read(&po->rollover->num);
3999		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4000		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4001		data = &rstats;
4002		lv = sizeof(rstats);
4003		break;
4004	case PACKET_TX_HAS_OFF:
4005		val = po->tp_tx_has_off;
4006		break;
4007	case PACKET_QDISC_BYPASS:
4008		val = packet_use_direct_xmit(po);
4009		break;
4010	default:
4011		return -ENOPROTOOPT;
4012	}
4013
4014	if (len > lv)
4015		len = lv;
4016	if (put_user(len, optlen))
4017		return -EFAULT;
4018	if (copy_to_user(optval, data, len))
4019		return -EFAULT;
4020	return 0;
4021}
4022
4023
4024#ifdef CONFIG_COMPAT
4025static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
4026				    char __user *optval, unsigned int optlen)
4027{
4028	struct packet_sock *po = pkt_sk(sock->sk);
4029
4030	if (level != SOL_PACKET)
4031		return -ENOPROTOOPT;
4032
4033	if (optname == PACKET_FANOUT_DATA &&
4034	    po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
4035		optval = (char __user *)get_compat_bpf_fprog(optval);
4036		if (!optval)
4037			return -EFAULT;
4038		optlen = sizeof(struct sock_fprog);
4039	}
4040
4041	return packet_setsockopt(sock, level, optname, optval, optlen);
4042}
4043#endif
4044
4045static int packet_notifier(struct notifier_block *this,
4046			   unsigned long msg, void *ptr)
4047{
4048	struct sock *sk;
4049	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4050	struct net *net = dev_net(dev);
4051
4052	rcu_read_lock();
4053	sk_for_each_rcu(sk, &net->packet.sklist) {
4054		struct packet_sock *po = pkt_sk(sk);
4055
4056		switch (msg) {
4057		case NETDEV_UNREGISTER:
4058			if (po->mclist)
4059				packet_dev_mclist_delete(dev, &po->mclist);
4060			/* fallthrough */
4061
4062		case NETDEV_DOWN:
4063			if (dev->ifindex == po->ifindex) {
4064				spin_lock(&po->bind_lock);
4065				if (po->running) {
4066					__unregister_prot_hook(sk, false);
4067					sk->sk_err = ENETDOWN;
4068					if (!sock_flag(sk, SOCK_DEAD))
4069						sk->sk_error_report(sk);
4070				}
4071				if (msg == NETDEV_UNREGISTER) {
4072					packet_cached_dev_reset(po);
4073					po->ifindex = -1;
4074					if (po->prot_hook.dev)
4075						dev_put(po->prot_hook.dev);
4076					po->prot_hook.dev = NULL;
4077				}
4078				spin_unlock(&po->bind_lock);
4079			}
4080			break;
4081		case NETDEV_UP:
4082			if (dev->ifindex == po->ifindex) {
4083				spin_lock(&po->bind_lock);
4084				if (po->num)
4085					register_prot_hook(sk);
4086				spin_unlock(&po->bind_lock);
4087			}
4088			break;
4089		}
4090	}
4091	rcu_read_unlock();
4092	return NOTIFY_DONE;
4093}
4094
4095
4096static int packet_ioctl(struct socket *sock, unsigned int cmd,
4097			unsigned long arg)
4098{
4099	struct sock *sk = sock->sk;
4100
4101	switch (cmd) {
4102	case SIOCOUTQ:
4103	{
4104		int amount = sk_wmem_alloc_get(sk);
4105
4106		return put_user(amount, (int __user *)arg);
4107	}
4108	case SIOCINQ:
4109	{
4110		struct sk_buff *skb;
4111		int amount = 0;
4112
4113		spin_lock_bh(&sk->sk_receive_queue.lock);
4114		skb = skb_peek(&sk->sk_receive_queue);
4115		if (skb)
4116			amount = skb->len;
4117		spin_unlock_bh(&sk->sk_receive_queue.lock);
4118		return put_user(amount, (int __user *)arg);
4119	}
4120#ifdef CONFIG_INET
4121	case SIOCADDRT:
4122	case SIOCDELRT:
4123	case SIOCDARP:
4124	case SIOCGARP:
4125	case SIOCSARP:
4126	case SIOCGIFADDR:
4127	case SIOCSIFADDR:
4128	case SIOCGIFBRDADDR:
4129	case SIOCSIFBRDADDR:
4130	case SIOCGIFNETMASK:
4131	case SIOCSIFNETMASK:
4132	case SIOCGIFDSTADDR:
4133	case SIOCSIFDSTADDR:
4134	case SIOCSIFFLAGS:
4135		return inet_dgram_ops.ioctl(sock, cmd, arg);
4136#endif
4137
4138	default:
4139		return -ENOIOCTLCMD;
4140	}
4141	return 0;
4142}
4143
4144static __poll_t packet_poll(struct file *file, struct socket *sock,
4145				poll_table *wait)
4146{
4147	struct sock *sk = sock->sk;
4148	struct packet_sock *po = pkt_sk(sk);
4149	__poll_t mask = datagram_poll(file, sock, wait);
4150
4151	spin_lock_bh(&sk->sk_receive_queue.lock);
4152	if (po->rx_ring.pg_vec) {
4153		if (!packet_previous_rx_frame(po, &po->rx_ring,
4154			TP_STATUS_KERNEL))
4155			mask |= EPOLLIN | EPOLLRDNORM;
4156	}
4157	packet_rcv_try_clear_pressure(po);
4158	spin_unlock_bh(&sk->sk_receive_queue.lock);
4159	spin_lock_bh(&sk->sk_write_queue.lock);
4160	if (po->tx_ring.pg_vec) {
4161		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4162			mask |= EPOLLOUT | EPOLLWRNORM;
4163	}
4164	spin_unlock_bh(&sk->sk_write_queue.lock);
4165	return mask;
4166}
4167
4168
4169/* Dirty? Well, I still did not learn better way to account
4170 * for user mmaps.
4171 */
4172
4173static void packet_mm_open(struct vm_area_struct *vma)
4174{
4175	struct file *file = vma->vm_file;
4176	struct socket *sock = file->private_data;
4177	struct sock *sk = sock->sk;
4178
4179	if (sk)
4180		atomic_inc(&pkt_sk(sk)->mapped);
4181}
4182
4183static void packet_mm_close(struct vm_area_struct *vma)
4184{
4185	struct file *file = vma->vm_file;
4186	struct socket *sock = file->private_data;
4187	struct sock *sk = sock->sk;
4188
4189	if (sk)
4190		atomic_dec(&pkt_sk(sk)->mapped);
4191}
4192
4193static const struct vm_operations_struct packet_mmap_ops = {
4194	.open	=	packet_mm_open,
4195	.close	=	packet_mm_close,
4196};
4197
4198static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4199			unsigned int len)
4200{
4201	int i;
4202
4203	for (i = 0; i < len; i++) {
4204		if (likely(pg_vec[i].buffer)) {
4205			if (is_vmalloc_addr(pg_vec[i].buffer))
4206				vfree(pg_vec[i].buffer);
4207			else
4208				free_pages((unsigned long)pg_vec[i].buffer,
4209					   order);
4210			pg_vec[i].buffer = NULL;
4211		}
4212	}
4213	kfree(pg_vec);
4214}
4215
4216static char *alloc_one_pg_vec_page(unsigned long order)
4217{
4218	char *buffer;
4219	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4220			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4221
4222	buffer = (char *) __get_free_pages(gfp_flags, order);
4223	if (buffer)
4224		return buffer;
4225
4226	/* __get_free_pages failed, fall back to vmalloc */
4227	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4228	if (buffer)
4229		return buffer;
4230
4231	/* vmalloc failed, lets dig into swap here */
4232	gfp_flags &= ~__GFP_NORETRY;
4233	buffer = (char *) __get_free_pages(gfp_flags, order);
4234	if (buffer)
4235		return buffer;
4236
4237	/* complete and utter failure */
4238	return NULL;
4239}
4240
4241static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4242{
4243	unsigned int block_nr = req->tp_block_nr;
4244	struct pgv *pg_vec;
4245	int i;
4246
4247	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4248	if (unlikely(!pg_vec))
4249		goto out;
4250
4251	for (i = 0; i < block_nr; i++) {
4252		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4253		if (unlikely(!pg_vec[i].buffer))
4254			goto out_free_pgvec;
4255	}
4256
4257out:
4258	return pg_vec;
4259
4260out_free_pgvec:
4261	free_pg_vec(pg_vec, order, block_nr);
4262	pg_vec = NULL;
4263	goto out;
4264}
4265
4266static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4267		int closing, int tx_ring)
4268{
4269	struct pgv *pg_vec = NULL;
4270	struct packet_sock *po = pkt_sk(sk);
 
4271	int was_running, order = 0;
4272	struct packet_ring_buffer *rb;
4273	struct sk_buff_head *rb_queue;
4274	__be16 num;
4275	int err = -EINVAL;
4276	/* Added to avoid minimal code churn */
4277	struct tpacket_req *req = &req_u->req;
4278
4279	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4280	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4281
4282	err = -EBUSY;
4283	if (!closing) {
4284		if (atomic_read(&po->mapped))
4285			goto out;
4286		if (packet_read_pending(rb))
4287			goto out;
4288	}
4289
4290	if (req->tp_block_nr) {
4291		unsigned int min_frame_size;
4292
4293		/* Sanity tests and some calculations */
4294		err = -EBUSY;
4295		if (unlikely(rb->pg_vec))
4296			goto out;
4297
4298		switch (po->tp_version) {
4299		case TPACKET_V1:
4300			po->tp_hdrlen = TPACKET_HDRLEN;
4301			break;
4302		case TPACKET_V2:
4303			po->tp_hdrlen = TPACKET2_HDRLEN;
4304			break;
4305		case TPACKET_V3:
4306			po->tp_hdrlen = TPACKET3_HDRLEN;
4307			break;
4308		}
4309
4310		err = -EINVAL;
4311		if (unlikely((int)req->tp_block_size <= 0))
4312			goto out;
4313		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4314			goto out;
4315		min_frame_size = po->tp_hdrlen + po->tp_reserve;
4316		if (po->tp_version >= TPACKET_V3 &&
4317		    req->tp_block_size <
4318		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4319			goto out;
4320		if (unlikely(req->tp_frame_size < min_frame_size))
4321			goto out;
4322		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4323			goto out;
4324
4325		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4326		if (unlikely(rb->frames_per_block == 0))
4327			goto out;
4328		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4329			goto out;
4330		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4331					req->tp_frame_nr))
4332			goto out;
4333
4334		err = -ENOMEM;
4335		order = get_order(req->tp_block_size);
4336		pg_vec = alloc_pg_vec(req, order);
4337		if (unlikely(!pg_vec))
4338			goto out;
4339		switch (po->tp_version) {
4340		case TPACKET_V3:
4341			/* Block transmit is not supported yet */
4342			if (!tx_ring) {
4343				init_prb_bdqc(po, rb, pg_vec, req_u);
4344			} else {
4345				struct tpacket_req3 *req3 = &req_u->req3;
4346
4347				if (req3->tp_retire_blk_tov ||
4348				    req3->tp_sizeof_priv ||
4349				    req3->tp_feature_req_word) {
4350					err = -EINVAL;
4351					goto out_free_pg_vec;
4352				}
4353			}
4354			break;
4355		default:
 
 
 
 
 
 
4356			break;
4357		}
4358	}
4359	/* Done */
4360	else {
4361		err = -EINVAL;
4362		if (unlikely(req->tp_frame_nr))
4363			goto out;
4364	}
4365
4366
4367	/* Detach socket from network */
4368	spin_lock(&po->bind_lock);
4369	was_running = po->running;
4370	num = po->num;
4371	if (was_running) {
4372		po->num = 0;
4373		__unregister_prot_hook(sk, false);
4374	}
4375	spin_unlock(&po->bind_lock);
4376
4377	synchronize_net();
4378
4379	err = -EBUSY;
4380	mutex_lock(&po->pg_vec_lock);
4381	if (closing || atomic_read(&po->mapped) == 0) {
4382		err = 0;
4383		spin_lock_bh(&rb_queue->lock);
4384		swap(rb->pg_vec, pg_vec);
 
 
4385		rb->frame_max = (req->tp_frame_nr - 1);
4386		rb->head = 0;
4387		rb->frame_size = req->tp_frame_size;
4388		spin_unlock_bh(&rb_queue->lock);
4389
4390		swap(rb->pg_vec_order, order);
4391		swap(rb->pg_vec_len, req->tp_block_nr);
4392
4393		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4394		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4395						tpacket_rcv : packet_rcv;
4396		skb_queue_purge(rb_queue);
4397		if (atomic_read(&po->mapped))
4398			pr_err("packet_mmap: vma is busy: %d\n",
4399			       atomic_read(&po->mapped));
4400	}
4401	mutex_unlock(&po->pg_vec_lock);
4402
4403	spin_lock(&po->bind_lock);
4404	if (was_running) {
4405		po->num = num;
4406		register_prot_hook(sk);
4407	}
4408	spin_unlock(&po->bind_lock);
4409	if (pg_vec && (po->tp_version > TPACKET_V2)) {
4410		/* Because we don't support block-based V3 on tx-ring */
4411		if (!tx_ring)
4412			prb_shutdown_retire_blk_timer(po, rb_queue);
4413	}
4414
4415out_free_pg_vec:
4416	if (pg_vec)
 
4417		free_pg_vec(pg_vec, order, req->tp_block_nr);
 
4418out:
4419	return err;
4420}
4421
4422static int packet_mmap(struct file *file, struct socket *sock,
4423		struct vm_area_struct *vma)
4424{
4425	struct sock *sk = sock->sk;
4426	struct packet_sock *po = pkt_sk(sk);
4427	unsigned long size, expected_size;
4428	struct packet_ring_buffer *rb;
4429	unsigned long start;
4430	int err = -EINVAL;
4431	int i;
4432
4433	if (vma->vm_pgoff)
4434		return -EINVAL;
4435
4436	mutex_lock(&po->pg_vec_lock);
4437
4438	expected_size = 0;
4439	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4440		if (rb->pg_vec) {
4441			expected_size += rb->pg_vec_len
4442						* rb->pg_vec_pages
4443						* PAGE_SIZE;
4444		}
4445	}
4446
4447	if (expected_size == 0)
4448		goto out;
4449
4450	size = vma->vm_end - vma->vm_start;
4451	if (size != expected_size)
4452		goto out;
4453
4454	start = vma->vm_start;
4455	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4456		if (rb->pg_vec == NULL)
4457			continue;
4458
4459		for (i = 0; i < rb->pg_vec_len; i++) {
4460			struct page *page;
4461			void *kaddr = rb->pg_vec[i].buffer;
4462			int pg_num;
4463
4464			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4465				page = pgv_to_page(kaddr);
4466				err = vm_insert_page(vma, start, page);
4467				if (unlikely(err))
4468					goto out;
4469				start += PAGE_SIZE;
4470				kaddr += PAGE_SIZE;
4471			}
4472		}
4473	}
4474
4475	atomic_inc(&po->mapped);
4476	vma->vm_ops = &packet_mmap_ops;
4477	err = 0;
4478
4479out:
4480	mutex_unlock(&po->pg_vec_lock);
4481	return err;
4482}
4483
4484static const struct proto_ops packet_ops_spkt = {
4485	.family =	PF_PACKET,
4486	.owner =	THIS_MODULE,
4487	.release =	packet_release,
4488	.bind =		packet_bind_spkt,
4489	.connect =	sock_no_connect,
4490	.socketpair =	sock_no_socketpair,
4491	.accept =	sock_no_accept,
4492	.getname =	packet_getname_spkt,
4493	.poll =		datagram_poll,
4494	.ioctl =	packet_ioctl,
4495	.gettstamp =	sock_gettstamp,
4496	.listen =	sock_no_listen,
4497	.shutdown =	sock_no_shutdown,
4498	.setsockopt =	sock_no_setsockopt,
4499	.getsockopt =	sock_no_getsockopt,
4500	.sendmsg =	packet_sendmsg_spkt,
4501	.recvmsg =	packet_recvmsg,
4502	.mmap =		sock_no_mmap,
4503	.sendpage =	sock_no_sendpage,
4504};
4505
4506static const struct proto_ops packet_ops = {
4507	.family =	PF_PACKET,
4508	.owner =	THIS_MODULE,
4509	.release =	packet_release,
4510	.bind =		packet_bind,
4511	.connect =	sock_no_connect,
4512	.socketpair =	sock_no_socketpair,
4513	.accept =	sock_no_accept,
4514	.getname =	packet_getname,
4515	.poll =		packet_poll,
4516	.ioctl =	packet_ioctl,
4517	.gettstamp =	sock_gettstamp,
4518	.listen =	sock_no_listen,
4519	.shutdown =	sock_no_shutdown,
4520	.setsockopt =	packet_setsockopt,
4521	.getsockopt =	packet_getsockopt,
4522#ifdef CONFIG_COMPAT
4523	.compat_setsockopt = compat_packet_setsockopt,
4524#endif
4525	.sendmsg =	packet_sendmsg,
4526	.recvmsg =	packet_recvmsg,
4527	.mmap =		packet_mmap,
4528	.sendpage =	sock_no_sendpage,
4529};
4530
4531static const struct net_proto_family packet_family_ops = {
4532	.family =	PF_PACKET,
4533	.create =	packet_create,
4534	.owner	=	THIS_MODULE,
4535};
4536
4537static struct notifier_block packet_netdev_notifier = {
4538	.notifier_call =	packet_notifier,
4539};
4540
4541#ifdef CONFIG_PROC_FS
4542
4543static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4544	__acquires(RCU)
4545{
4546	struct net *net = seq_file_net(seq);
4547
4548	rcu_read_lock();
4549	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4550}
4551
4552static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4553{
4554	struct net *net = seq_file_net(seq);
4555	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4556}
4557
4558static void packet_seq_stop(struct seq_file *seq, void *v)
4559	__releases(RCU)
4560{
4561	rcu_read_unlock();
4562}
4563
4564static int packet_seq_show(struct seq_file *seq, void *v)
4565{
4566	if (v == SEQ_START_TOKEN)
4567		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
 
 
4568	else {
4569		struct sock *s = sk_entry(v);
4570		const struct packet_sock *po = pkt_sk(s);
4571
4572		seq_printf(seq,
4573			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4574			   s,
4575			   refcount_read(&s->sk_refcnt),
4576			   s->sk_type,
4577			   ntohs(po->num),
4578			   po->ifindex,
4579			   po->running,
4580			   atomic_read(&s->sk_rmem_alloc),
4581			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4582			   sock_i_ino(s));
4583	}
4584
4585	return 0;
4586}
4587
4588static const struct seq_operations packet_seq_ops = {
4589	.start	= packet_seq_start,
4590	.next	= packet_seq_next,
4591	.stop	= packet_seq_stop,
4592	.show	= packet_seq_show,
4593};
4594#endif
4595
4596static int __net_init packet_net_init(struct net *net)
4597{
4598	mutex_init(&net->packet.sklist_lock);
4599	INIT_HLIST_HEAD(&net->packet.sklist);
4600
 
4601	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4602			sizeof(struct seq_net_private)))
4603		return -ENOMEM;
 
4604
4605	return 0;
4606}
4607
4608static void __net_exit packet_net_exit(struct net *net)
4609{
4610	remove_proc_entry("packet", net->proc_net);
4611	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4612}
4613
4614static struct pernet_operations packet_net_ops = {
4615	.init = packet_net_init,
4616	.exit = packet_net_exit,
4617};
4618
4619
4620static void __exit packet_exit(void)
4621{
 
 
4622	unregister_netdevice_notifier(&packet_netdev_notifier);
4623	unregister_pernet_subsys(&packet_net_ops);
4624	sock_unregister(PF_PACKET);
4625	proto_unregister(&packet_proto);
4626}
4627
4628static int __init packet_init(void)
4629{
4630	int rc;
4631
 
 
 
 
 
 
4632	rc = proto_register(&packet_proto, 0);
4633	if (rc)
4634		goto out;
4635	rc = sock_register(&packet_family_ops);
4636	if (rc)
4637		goto out_proto;
4638	rc = register_pernet_subsys(&packet_net_ops);
4639	if (rc)
4640		goto out_sock;
4641	rc = register_netdevice_notifier(&packet_netdev_notifier);
4642	if (rc)
4643		goto out_pernet;
4644
4645	return 0;
4646
 
 
 
 
4647out_pernet:
4648	unregister_pernet_subsys(&packet_net_ops);
4649out_sock:
4650	sock_unregister(PF_PACKET);
4651out_proto:
4652	proto_unregister(&packet_proto);
4653out:
4654	return rc;
4655}
4656
4657module_init(packet_init);
4658module_exit(packet_exit);
 
4659MODULE_LICENSE("GPL");
4660MODULE_ALIAS_NETPROTO(PF_PACKET);