Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   4 *		operating system.  INET is implemented using the  BSD Socket
   5 *		interface as the means of communication with the user level.
   6 *
   7 *		PACKET - implements raw packet sockets.
   8 *
   9 * Authors:	Ross Biro
  10 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  11 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  12 *
  13 * Fixes:
  14 *		Alan Cox	:	verify_area() now used correctly
  15 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
  16 *		Alan Cox	:	tidied skbuff lists.
  17 *		Alan Cox	:	Now uses generic datagram routines I
  18 *					added. Also fixed the peek/read crash
  19 *					from all old Linux datagram code.
  20 *		Alan Cox	:	Uses the improved datagram code.
  21 *		Alan Cox	:	Added NULL's for socket options.
  22 *		Alan Cox	:	Re-commented the code.
  23 *		Alan Cox	:	Use new kernel side addressing
  24 *		Rob Janssen	:	Correct MTU usage.
  25 *		Dave Platt	:	Counter leaks caused by incorrect
  26 *					interrupt locking and some slightly
  27 *					dubious gcc output. Can you read
  28 *					compiler: it said _VOLATILE_
  29 *	Richard Kooijman	:	Timestamp fixes.
  30 *		Alan Cox	:	New buffers. Use sk->mac.raw.
  31 *		Alan Cox	:	sendmsg/recvmsg support.
  32 *		Alan Cox	:	Protocol setting support
  33 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
  34 *	Cyrus Durgin		:	Fixed kerneld for kmod.
  35 *	Michal Ostrowski        :       Module initialization cleanup.
  36 *         Ulises Alonso        :       Frame number limit removal and
  37 *                                      packet_set_ring memory leak.
  38 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
  39 *					The convention is that longer addresses
  40 *					will simply extend the hardware address
  41 *					byte arrays at the end of sockaddr_ll
  42 *					and packet_mreq.
  43 *		Johann Baudy	:	Added TX RING.
  44 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
  45 *					layer.
  46 *					Copyright (C) 2011, <lokec@ccs.neu.edu>
 
 
 
 
 
 
 
  47 */
  48
  49#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  50
  51#include <linux/ethtool.h>
  52#include <linux/filter.h>
  53#include <linux/types.h>
  54#include <linux/mm.h>
  55#include <linux/capability.h>
  56#include <linux/fcntl.h>
  57#include <linux/socket.h>
  58#include <linux/in.h>
  59#include <linux/inet.h>
  60#include <linux/netdevice.h>
  61#include <linux/if_packet.h>
  62#include <linux/wireless.h>
  63#include <linux/kernel.h>
  64#include <linux/kmod.h>
  65#include <linux/slab.h>
  66#include <linux/vmalloc.h>
  67#include <net/net_namespace.h>
  68#include <net/ip.h>
  69#include <net/protocol.h>
  70#include <linux/skbuff.h>
  71#include <net/sock.h>
  72#include <linux/errno.h>
  73#include <linux/timer.h>
  74#include <linux/uaccess.h>
  75#include <asm/ioctls.h>
  76#include <asm/page.h>
  77#include <asm/cacheflush.h>
  78#include <asm/io.h>
  79#include <linux/proc_fs.h>
  80#include <linux/seq_file.h>
  81#include <linux/poll.h>
  82#include <linux/module.h>
  83#include <linux/init.h>
  84#include <linux/mutex.h>
  85#include <linux/if_vlan.h>
  86#include <linux/virtio_net.h>
  87#include <linux/errqueue.h>
  88#include <linux/net_tstamp.h>
  89#include <linux/percpu.h>
  90#ifdef CONFIG_INET
  91#include <net/inet_common.h>
  92#endif
  93#include <linux/bpf.h>
  94#include <net/compat.h>
  95#include <linux/netfilter_netdev.h>
  96
  97#include "internal.h"
  98
  99/*
 100   Assumptions:
 101   - If the device has no dev->header_ops->create, there is no LL header
 102     visible above the device. In this case, its hard_header_len should be 0.
 103     The device may prepend its own header internally. In this case, its
 104     needed_headroom should be set to the space needed for it to add its
 105     internal header.
 106     For example, a WiFi driver pretending to be an Ethernet driver should
 107     set its hard_header_len to be the Ethernet header length, and set its
 108     needed_headroom to be (the real WiFi header length - the fake Ethernet
 109     header length).
 110   - packet socket receives packets with pulled ll header,
 111     so that SOCK_RAW should push it back.
 112
 113On receive:
 114-----------
 115
 116Incoming, dev_has_header(dev) == true
 117   mac_header -> ll header
 118   data       -> data
 119
 120Outgoing, dev_has_header(dev) == true
 121   mac_header -> ll header
 122   data       -> ll header
 123
 124Incoming, dev_has_header(dev) == false
 125   mac_header -> data
 126     However drivers often make it point to the ll header.
 127     This is incorrect because the ll header should be invisible to us.
 128   data       -> data
 129
 130Outgoing, dev_has_header(dev) == false
 131   mac_header -> data. ll header is invisible to us.
 132   data       -> data
 133
 134Resume
 135  If dev_has_header(dev) == false we are unable to restore the ll header,
 136    because it is invisible to us.
 137
 138
 139On transmit:
 140------------
 141
 142dev_has_header(dev) == true
 143   mac_header -> ll header
 144   data       -> ll header
 145
 146dev_has_header(dev) == false (ll header is invisible to us)
 147   mac_header -> data
 148   data       -> data
 149
 150   We should set network_header on output to the correct position,
 151   packet classifier depends on it.
 152 */
 153
 154/* Private packet socket structures. */
 155
 156/* identical to struct packet_mreq except it has
 157 * a longer address field.
 158 */
 159struct packet_mreq_max {
 160	int		mr_ifindex;
 161	unsigned short	mr_type;
 162	unsigned short	mr_alen;
 163	unsigned char	mr_address[MAX_ADDR_LEN];
 164};
 165
 166union tpacket_uhdr {
 167	struct tpacket_hdr  *h1;
 168	struct tpacket2_hdr *h2;
 169	struct tpacket3_hdr *h3;
 170	void *raw;
 171};
 172
 173static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 174		int closing, int tx_ring);
 175
 176#define V3_ALIGNMENT	(8)
 177
 178#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 179
 180#define BLK_PLUS_PRIV(sz_of_priv) \
 181	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 182
 
 
 183#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 184#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 185#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 186#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 187#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 188#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 
 189
 190struct packet_sock;
 
 191static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 192		       struct packet_type *pt, struct net_device *orig_dev);
 193
 194static void *packet_previous_frame(struct packet_sock *po,
 195		struct packet_ring_buffer *rb,
 196		int status);
 197static void packet_increment_head(struct packet_ring_buffer *buff);
 198static int prb_curr_blk_in_use(struct tpacket_block_desc *);
 
 199static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 200			struct packet_sock *);
 201static void prb_retire_current_block(struct tpacket_kbdq_core *,
 202		struct packet_sock *, unsigned int status);
 203static int prb_queue_frozen(struct tpacket_kbdq_core *);
 204static void prb_open_block(struct tpacket_kbdq_core *,
 205		struct tpacket_block_desc *);
 206static void prb_retire_rx_blk_timer_expired(struct timer_list *);
 207static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 
 
 
 208static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 209static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 210		struct tpacket3_hdr *);
 211static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 212		struct tpacket3_hdr *);
 213static void packet_flush_mclist(struct sock *sk);
 214static u16 packet_pick_tx_queue(struct sk_buff *skb);
 215
 216struct packet_skb_cb {
 217	union {
 218		struct sockaddr_pkt pkt;
 219		union {
 220			/* Trick: alias skb original length with
 221			 * ll.sll_family and ll.protocol in order
 222			 * to save room.
 223			 */
 224			unsigned int origlen;
 225			struct sockaddr_ll ll;
 226		};
 227	} sa;
 228};
 229
 230#define vio_le() virtio_legacy_is_little_endian()
 231
 232#define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 233
 234#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 235#define GET_PBLOCK_DESC(x, bid)	\
 236	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 237#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 238	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 239#define GET_NEXT_PRB_BLK_NUM(x) \
 240	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 241	((x)->kactive_blk_num+1) : 0)
 242
 243static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 244static void __fanout_link(struct sock *sk, struct packet_sock *po);
 245
 246#ifdef CONFIG_NETFILTER_EGRESS
 247static noinline struct sk_buff *nf_hook_direct_egress(struct sk_buff *skb)
 248{
 249	struct sk_buff *next, *head = NULL, *tail;
 250	int rc;
 
 
 251
 252	rcu_read_lock();
 253	for (; skb != NULL; skb = next) {
 254		next = skb->next;
 255		skb_mark_not_on_list(skb);
 256
 257		if (!nf_hook_egress(skb, &rc, skb->dev))
 258			continue;
 
 259
 260		if (!head)
 261			head = skb;
 262		else
 263			tail->next = skb;
 264
 265		tail = skb;
 266	}
 267	rcu_read_unlock();
 268
 269	return head;
 270}
 271#endif
 
 272
 273static int packet_xmit(const struct packet_sock *po, struct sk_buff *skb)
 274{
 275	if (!packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS))
 276		return dev_queue_xmit(skb);
 277
 278#ifdef CONFIG_NETFILTER_EGRESS
 279	if (nf_hook_egress_active()) {
 280		skb = nf_hook_direct_egress(skb);
 281		if (!skb)
 282			return NET_XMIT_DROP;
 283	}
 284#endif
 285	return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
 286}
 287
 288static struct net_device *packet_cached_dev_get(struct packet_sock *po)
 289{
 290	struct net_device *dev;
 291
 292	rcu_read_lock();
 293	dev = rcu_dereference(po->cached_dev);
 294	dev_hold(dev);
 
 295	rcu_read_unlock();
 296
 297	return dev;
 298}
 299
 300static void packet_cached_dev_assign(struct packet_sock *po,
 301				     struct net_device *dev)
 302{
 303	rcu_assign_pointer(po->cached_dev, dev);
 304}
 305
 306static void packet_cached_dev_reset(struct packet_sock *po)
 307{
 308	RCU_INIT_POINTER(po->cached_dev, NULL);
 309}
 310
 311static u16 packet_pick_tx_queue(struct sk_buff *skb)
 
 
 
 
 
 
 
 
 
 
 312{
 313	struct net_device *dev = skb->dev;
 314	const struct net_device_ops *ops = dev->netdev_ops;
 315	int cpu = raw_smp_processor_id();
 316	u16 queue_index;
 317
 318#ifdef CONFIG_XPS
 319	skb->sender_cpu = cpu + 1;
 320#endif
 321	skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
 322	if (ops->ndo_select_queue) {
 323		queue_index = ops->ndo_select_queue(dev, skb, NULL);
 
 324		queue_index = netdev_cap_txqueue(dev, queue_index);
 325	} else {
 326		queue_index = netdev_pick_tx(dev, skb, NULL);
 327	}
 328
 329	return queue_index;
 330}
 331
 332/* __register_prot_hook must be invoked through register_prot_hook
 333 * or from a context in which asynchronous accesses to the packet
 334 * socket is not possible (packet_create()).
 335 */
 336static void __register_prot_hook(struct sock *sk)
 337{
 338	struct packet_sock *po = pkt_sk(sk);
 339
 340	if (!packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
 341		if (po->fanout)
 342			__fanout_link(sk, po);
 343		else
 344			dev_add_pack(&po->prot_hook);
 345
 346		sock_hold(sk);
 347		packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 1);
 348	}
 349}
 350
 351static void register_prot_hook(struct sock *sk)
 352{
 353	lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
 354	__register_prot_hook(sk);
 355}
 356
 357/* If the sync parameter is true, we will temporarily drop
 358 * the po->bind_lock and do a synchronize_net to make sure no
 359 * asynchronous packet processing paths still refer to the elements
 360 * of po->prot_hook.  If the sync parameter is false, it is the
 361 * callers responsibility to take care of this.
 362 */
 363static void __unregister_prot_hook(struct sock *sk, bool sync)
 364{
 365	struct packet_sock *po = pkt_sk(sk);
 366
 367	lockdep_assert_held_once(&po->bind_lock);
 368
 369	packet_sock_flag_set(po, PACKET_SOCK_RUNNING, 0);
 370
 371	if (po->fanout)
 372		__fanout_unlink(sk, po);
 373	else
 374		__dev_remove_pack(&po->prot_hook);
 375
 376	__sock_put(sk);
 377
 378	if (sync) {
 379		spin_unlock(&po->bind_lock);
 380		synchronize_net();
 381		spin_lock(&po->bind_lock);
 382	}
 383}
 384
 385static void unregister_prot_hook(struct sock *sk, bool sync)
 386{
 387	struct packet_sock *po = pkt_sk(sk);
 388
 389	if (packet_sock_flag(po, PACKET_SOCK_RUNNING))
 390		__unregister_prot_hook(sk, sync);
 391}
 392
 393static inline struct page * __pure pgv_to_page(void *addr)
 394{
 395	if (is_vmalloc_addr(addr))
 396		return vmalloc_to_page(addr);
 397	return virt_to_page(addr);
 398}
 399
 400static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 401{
 402	union tpacket_uhdr h;
 403
 404	/* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */
 405
 406	h.raw = frame;
 407	switch (po->tp_version) {
 408	case TPACKET_V1:
 409		WRITE_ONCE(h.h1->tp_status, status);
 410		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 411		break;
 412	case TPACKET_V2:
 413		WRITE_ONCE(h.h2->tp_status, status);
 414		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 415		break;
 416	case TPACKET_V3:
 417		WRITE_ONCE(h.h3->tp_status, status);
 418		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 419		break;
 420	default:
 421		WARN(1, "TPACKET version not supported.\n");
 422		BUG();
 423	}
 424
 425	smp_wmb();
 426}
 427
 428static int __packet_get_status(const struct packet_sock *po, void *frame)
 429{
 430	union tpacket_uhdr h;
 431
 432	smp_rmb();
 433
 434	/* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */
 435
 436	h.raw = frame;
 437	switch (po->tp_version) {
 438	case TPACKET_V1:
 439		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 440		return READ_ONCE(h.h1->tp_status);
 441	case TPACKET_V2:
 442		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 443		return READ_ONCE(h.h2->tp_status);
 444	case TPACKET_V3:
 445		flush_dcache_page(pgv_to_page(&h.h3->tp_status));
 446		return READ_ONCE(h.h3->tp_status);
 447	default:
 448		WARN(1, "TPACKET version not supported.\n");
 449		BUG();
 450		return 0;
 451	}
 452}
 453
 454static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
 455				   unsigned int flags)
 456{
 457	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 458
 459	if (shhwtstamps &&
 460	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
 461	    ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
 462		return TP_STATUS_TS_RAW_HARDWARE;
 463
 464	if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
 465	    ktime_to_timespec64_cond(skb_tstamp(skb), ts))
 466		return TP_STATUS_TS_SOFTWARE;
 467
 468	return 0;
 469}
 470
 471static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
 472				    struct sk_buff *skb)
 473{
 474	union tpacket_uhdr h;
 475	struct timespec64 ts;
 476	__u32 ts_status;
 477
 478	if (!(ts_status = tpacket_get_timestamp(skb, &ts, READ_ONCE(po->tp_tstamp))))
 479		return 0;
 480
 481	h.raw = frame;
 482	/*
 483	 * versions 1 through 3 overflow the timestamps in y2106, since they
 484	 * all store the seconds in a 32-bit unsigned integer.
 485	 * If we create a version 4, that should have a 64-bit timestamp,
 486	 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
 487	 * nanoseconds.
 488	 */
 489	switch (po->tp_version) {
 490	case TPACKET_V1:
 491		h.h1->tp_sec = ts.tv_sec;
 492		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
 493		break;
 494	case TPACKET_V2:
 495		h.h2->tp_sec = ts.tv_sec;
 496		h.h2->tp_nsec = ts.tv_nsec;
 497		break;
 498	case TPACKET_V3:
 499		h.h3->tp_sec = ts.tv_sec;
 500		h.h3->tp_nsec = ts.tv_nsec;
 501		break;
 502	default:
 503		WARN(1, "TPACKET version not supported.\n");
 504		BUG();
 505	}
 506
 507	/* one flush is safe, as both fields always lie on the same cacheline */
 508	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
 509	smp_wmb();
 510
 511	return ts_status;
 512}
 513
 514static void *packet_lookup_frame(const struct packet_sock *po,
 515				 const struct packet_ring_buffer *rb,
 516				 unsigned int position,
 517				 int status)
 518{
 519	unsigned int pg_vec_pos, frame_offset;
 520	union tpacket_uhdr h;
 521
 522	pg_vec_pos = position / rb->frames_per_block;
 523	frame_offset = position % rb->frames_per_block;
 524
 525	h.raw = rb->pg_vec[pg_vec_pos].buffer +
 526		(frame_offset * rb->frame_size);
 527
 528	if (status != __packet_get_status(po, h.raw))
 529		return NULL;
 530
 531	return h.raw;
 532}
 533
 534static void *packet_current_frame(struct packet_sock *po,
 535		struct packet_ring_buffer *rb,
 536		int status)
 537{
 538	return packet_lookup_frame(po, rb, rb->head, status);
 539}
 540
 541static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 542{
 543	del_timer_sync(&pkc->retire_blk_timer);
 544}
 545
 546static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 547		struct sk_buff_head *rb_queue)
 548{
 549	struct tpacket_kbdq_core *pkc;
 550
 551	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 552
 553	spin_lock_bh(&rb_queue->lock);
 554	pkc->delete_blk_timer = 1;
 555	spin_unlock_bh(&rb_queue->lock);
 556
 557	prb_del_retire_blk_timer(pkc);
 558}
 559
 
 
 
 
 
 
 
 
 
 
 560static void prb_setup_retire_blk_timer(struct packet_sock *po)
 561{
 562	struct tpacket_kbdq_core *pkc;
 563
 564	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 565	timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
 566		    0);
 567	pkc->retire_blk_timer.expires = jiffies;
 568}
 569
 570static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 571				int blk_size_in_bytes)
 572{
 573	struct net_device *dev;
 574	unsigned int mbits, div;
 575	struct ethtool_link_ksettings ecmd;
 576	int err;
 577
 578	rtnl_lock();
 579	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 580	if (unlikely(!dev)) {
 581		rtnl_unlock();
 582		return DEFAULT_PRB_RETIRE_TOV;
 583	}
 584	err = __ethtool_get_link_ksettings(dev, &ecmd);
 585	rtnl_unlock();
 586	if (err)
 587		return DEFAULT_PRB_RETIRE_TOV;
 588
 589	/* If the link speed is so slow you don't really
 590	 * need to worry about perf anyways
 591	 */
 592	if (ecmd.base.speed < SPEED_1000 ||
 593	    ecmd.base.speed == SPEED_UNKNOWN)
 594		return DEFAULT_PRB_RETIRE_TOV;
 
 
 
 
 595
 596	div = ecmd.base.speed / 1000;
 597	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 598
 599	if (div)
 600		mbits /= div;
 601
 
 
 602	if (div)
 603		return mbits + 1;
 604	return mbits;
 605}
 606
 607static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 608			union tpacket_req_u *req_u)
 609{
 610	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 611}
 612
 613static void init_prb_bdqc(struct packet_sock *po,
 614			struct packet_ring_buffer *rb,
 615			struct pgv *pg_vec,
 616			union tpacket_req_u *req_u)
 617{
 618	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
 619	struct tpacket_block_desc *pbd;
 620
 621	memset(p1, 0x0, sizeof(*p1));
 622
 623	p1->knxt_seq_num = 1;
 624	p1->pkbdq = pg_vec;
 625	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 626	p1->pkblk_start	= pg_vec[0].buffer;
 627	p1->kblk_size = req_u->req3.tp_block_size;
 628	p1->knum_blocks	= req_u->req3.tp_block_nr;
 629	p1->hdrlen = po->tp_hdrlen;
 630	p1->version = po->tp_version;
 631	p1->last_kactive_blk_num = 0;
 632	po->stats.stats3.tp_freeze_q_cnt = 0;
 633	if (req_u->req3.tp_retire_blk_tov)
 634		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 635	else
 636		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 637						req_u->req3.tp_block_size);
 638	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 639	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 640	rwlock_init(&p1->blk_fill_in_prog_lock);
 641
 642	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
 643	prb_init_ft_ops(p1, req_u);
 644	prb_setup_retire_blk_timer(po);
 645	prb_open_block(p1, pbd);
 646}
 647
 648/*  Do NOT update the last_blk_num first.
 649 *  Assumes sk_buff_head lock is held.
 650 */
 651static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 652{
 653	mod_timer(&pkc->retire_blk_timer,
 654			jiffies + pkc->tov_in_jiffies);
 655	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 656}
 657
 658/*
 659 * Timer logic:
 660 * 1) We refresh the timer only when we open a block.
 661 *    By doing this we don't waste cycles refreshing the timer
 662 *	  on packet-by-packet basis.
 663 *
 664 * With a 1MB block-size, on a 1Gbps line, it will take
 665 * i) ~8 ms to fill a block + ii) memcpy etc.
 666 * In this cut we are not accounting for the memcpy time.
 667 *
 668 * So, if the user sets the 'tmo' to 10ms then the timer
 669 * will never fire while the block is still getting filled
 670 * (which is what we want). However, the user could choose
 671 * to close a block early and that's fine.
 672 *
 673 * But when the timer does fire, we check whether or not to refresh it.
 674 * Since the tmo granularity is in msecs, it is not too expensive
 675 * to refresh the timer, lets say every '8' msecs.
 676 * Either the user can set the 'tmo' or we can derive it based on
 677 * a) line-speed and b) block-size.
 678 * prb_calc_retire_blk_tmo() calculates the tmo.
 679 *
 680 */
 681static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
 682{
 683	struct packet_sock *po =
 684		from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
 685	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 686	unsigned int frozen;
 687	struct tpacket_block_desc *pbd;
 688
 689	spin_lock(&po->sk.sk_receive_queue.lock);
 690
 691	frozen = prb_queue_frozen(pkc);
 692	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 693
 694	if (unlikely(pkc->delete_blk_timer))
 695		goto out;
 696
 697	/* We only need to plug the race when the block is partially filled.
 698	 * tpacket_rcv:
 699	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 700	 *		copy_bits() is in progress ...
 701	 *		timer fires on other cpu:
 702	 *		we can't retire the current block because copy_bits
 703	 *		is in progress.
 704	 *
 705	 */
 706	if (BLOCK_NUM_PKTS(pbd)) {
 707		/* Waiting for skb_copy_bits to finish... */
 708		write_lock(&pkc->blk_fill_in_prog_lock);
 709		write_unlock(&pkc->blk_fill_in_prog_lock);
 
 710	}
 711
 712	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 713		if (!frozen) {
 714			if (!BLOCK_NUM_PKTS(pbd)) {
 715				/* An empty block. Just refresh the timer. */
 716				goto refresh_timer;
 717			}
 718			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 719			if (!prb_dispatch_next_block(pkc, po))
 720				goto refresh_timer;
 721			else
 722				goto out;
 723		} else {
 724			/* Case 1. Queue was frozen because user-space was
 725			 *	   lagging behind.
 726			 */
 727			if (prb_curr_blk_in_use(pbd)) {
 728				/*
 729				 * Ok, user-space is still behind.
 730				 * So just refresh the timer.
 731				 */
 732				goto refresh_timer;
 733			} else {
 734			       /* Case 2. queue was frozen,user-space caught up,
 735				* now the link went idle && the timer fired.
 736				* We don't have a block to close.So we open this
 737				* block and restart the timer.
 738				* opening a block thaws the queue,restarts timer
 739				* Thawing/timer-refresh is a side effect.
 740				*/
 741				prb_open_block(pkc, pbd);
 742				goto out;
 743			}
 744		}
 745	}
 746
 747refresh_timer:
 748	_prb_refresh_rx_retire_blk_timer(pkc);
 749
 750out:
 751	spin_unlock(&po->sk.sk_receive_queue.lock);
 752}
 753
 754static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 755		struct tpacket_block_desc *pbd1, __u32 status)
 756{
 757	/* Flush everything minus the block header */
 758
 759#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 760	u8 *start, *end;
 761
 762	start = (u8 *)pbd1;
 763
 764	/* Skip the block header(we know header WILL fit in 4K) */
 765	start += PAGE_SIZE;
 766
 767	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 768	for (; start < end; start += PAGE_SIZE)
 769		flush_dcache_page(pgv_to_page(start));
 770
 771	smp_wmb();
 772#endif
 773
 774	/* Now update the block status. */
 775
 776	BLOCK_STATUS(pbd1) = status;
 777
 778	/* Flush the block header */
 779
 780#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 781	start = (u8 *)pbd1;
 782	flush_dcache_page(pgv_to_page(start));
 783
 784	smp_wmb();
 785#endif
 786}
 787
 788/*
 789 * Side effect:
 790 *
 791 * 1) flush the block
 792 * 2) Increment active_blk_num
 793 *
 794 * Note:We DONT refresh the timer on purpose.
 795 *	Because almost always the next block will be opened.
 796 */
 797static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 798		struct tpacket_block_desc *pbd1,
 799		struct packet_sock *po, unsigned int stat)
 800{
 801	__u32 status = TP_STATUS_USER | stat;
 802
 803	struct tpacket3_hdr *last_pkt;
 804	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 805	struct sock *sk = &po->sk;
 806
 807	if (atomic_read(&po->tp_drops))
 808		status |= TP_STATUS_LOSING;
 809
 810	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 811	last_pkt->tp_next_offset = 0;
 812
 813	/* Get the ts of the last pkt */
 814	if (BLOCK_NUM_PKTS(pbd1)) {
 815		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 816		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 817	} else {
 818		/* Ok, we tmo'd - so get the current time.
 819		 *
 820		 * It shouldn't really happen as we don't close empty
 821		 * blocks. See prb_retire_rx_blk_timer_expired().
 822		 */
 823		struct timespec64 ts;
 824		ktime_get_real_ts64(&ts);
 825		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 826		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 827	}
 828
 829	smp_wmb();
 830
 831	/* Flush the block */
 832	prb_flush_block(pkc1, pbd1, status);
 833
 834	sk->sk_data_ready(sk);
 835
 836	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 837}
 838
 839static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 840{
 841	pkc->reset_pending_on_curr_blk = 0;
 842}
 843
 844/*
 845 * Side effect of opening a block:
 846 *
 847 * 1) prb_queue is thawed.
 848 * 2) retire_blk_timer is refreshed.
 849 *
 850 */
 851static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 852	struct tpacket_block_desc *pbd1)
 853{
 854	struct timespec64 ts;
 855	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 856
 857	smp_rmb();
 858
 859	/* We could have just memset this but we will lose the
 860	 * flexibility of making the priv area sticky
 861	 */
 862
 863	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 864	BLOCK_NUM_PKTS(pbd1) = 0;
 865	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 866
 867	ktime_get_real_ts64(&ts);
 868
 869	h1->ts_first_pkt.ts_sec = ts.tv_sec;
 870	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 871
 872	pkc1->pkblk_start = (char *)pbd1;
 873	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 874
 875	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 876	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 877
 878	pbd1->version = pkc1->version;
 879	pkc1->prev = pkc1->nxt_offset;
 880	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 881
 882	prb_thaw_queue(pkc1);
 883	_prb_refresh_rx_retire_blk_timer(pkc1);
 884
 885	smp_wmb();
 886}
 887
 888/*
 889 * Queue freeze logic:
 890 * 1) Assume tp_block_nr = 8 blocks.
 891 * 2) At time 't0', user opens Rx ring.
 892 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 893 * 4) user-space is either sleeping or processing block '0'.
 894 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 895 *    it will close block-7,loop around and try to fill block '0'.
 896 *    call-flow:
 897 *    __packet_lookup_frame_in_block
 898 *      prb_retire_current_block()
 899 *      prb_dispatch_next_block()
 900 *        |->(BLOCK_STATUS == USER) evaluates to true
 901 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 902 * 6) Now there are two cases:
 903 *    6.1) Link goes idle right after the queue is frozen.
 904 *         But remember, the last open_block() refreshed the timer.
 905 *         When this timer expires,it will refresh itself so that we can
 906 *         re-open block-0 in near future.
 907 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 908 *         case and __packet_lookup_frame_in_block will check if block-0
 909 *         is free and can now be re-used.
 910 */
 911static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 912				  struct packet_sock *po)
 913{
 914	pkc->reset_pending_on_curr_blk = 1;
 915	po->stats.stats3.tp_freeze_q_cnt++;
 916}
 917
 918#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 919
 920/*
 921 * If the next block is free then we will dispatch it
 922 * and return a good offset.
 923 * Else, we will freeze the queue.
 924 * So, caller must check the return value.
 925 */
 926static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 927		struct packet_sock *po)
 928{
 929	struct tpacket_block_desc *pbd;
 930
 931	smp_rmb();
 932
 933	/* 1. Get current block num */
 934	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 935
 936	/* 2. If this block is currently in_use then freeze the queue */
 937	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 938		prb_freeze_queue(pkc, po);
 939		return NULL;
 940	}
 941
 942	/*
 943	 * 3.
 944	 * open this block and return the offset where the first packet
 945	 * needs to get stored.
 946	 */
 947	prb_open_block(pkc, pbd);
 948	return (void *)pkc->nxt_offset;
 949}
 950
 951static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 952		struct packet_sock *po, unsigned int status)
 953{
 954	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 955
 956	/* retire/close the current block */
 957	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 958		/*
 959		 * Plug the case where copy_bits() is in progress on
 960		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
 961		 * have space to copy the pkt in the current block and
 962		 * called prb_retire_current_block()
 963		 *
 964		 * We don't need to worry about the TMO case because
 965		 * the timer-handler already handled this case.
 966		 */
 967		if (!(status & TP_STATUS_BLK_TMO)) {
 968			/* Waiting for skb_copy_bits to finish... */
 969			write_lock(&pkc->blk_fill_in_prog_lock);
 970			write_unlock(&pkc->blk_fill_in_prog_lock);
 
 971		}
 972		prb_close_block(pkc, pbd, po, status);
 973		return;
 974	}
 975}
 976
 977static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
 
 978{
 979	return TP_STATUS_USER & BLOCK_STATUS(pbd);
 980}
 981
 982static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 983{
 984	return pkc->reset_pending_on_curr_blk;
 985}
 986
 987static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
 988	__releases(&pkc->blk_fill_in_prog_lock)
 989{
 990	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 991
 992	read_unlock(&pkc->blk_fill_in_prog_lock);
 993}
 994
 995static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
 996			struct tpacket3_hdr *ppd)
 997{
 998	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
 999}
1000
1001static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
1002			struct tpacket3_hdr *ppd)
1003{
1004	ppd->hv1.tp_rxhash = 0;
1005}
1006
1007static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
1008			struct tpacket3_hdr *ppd)
1009{
1010	if (skb_vlan_tag_present(pkc->skb)) {
1011		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1012		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1013		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1014	} else {
1015		ppd->hv1.tp_vlan_tci = 0;
1016		ppd->hv1.tp_vlan_tpid = 0;
1017		ppd->tp_status = TP_STATUS_AVAILABLE;
1018	}
1019}
1020
1021static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1022			struct tpacket3_hdr *ppd)
1023{
1024	ppd->hv1.tp_padding = 0;
1025	prb_fill_vlan_info(pkc, ppd);
1026
1027	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1028		prb_fill_rxhash(pkc, ppd);
1029	else
1030		prb_clear_rxhash(pkc, ppd);
1031}
1032
1033static void prb_fill_curr_block(char *curr,
1034				struct tpacket_kbdq_core *pkc,
1035				struct tpacket_block_desc *pbd,
1036				unsigned int len)
1037	__acquires(&pkc->blk_fill_in_prog_lock)
1038{
1039	struct tpacket3_hdr *ppd;
1040
1041	ppd  = (struct tpacket3_hdr *)curr;
1042	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1043	pkc->prev = curr;
1044	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1045	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1046	BLOCK_NUM_PKTS(pbd) += 1;
1047	read_lock(&pkc->blk_fill_in_prog_lock);
1048	prb_run_all_ft_ops(pkc, ppd);
1049}
1050
1051/* Assumes caller has the sk->rx_queue.lock */
1052static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1053					    struct sk_buff *skb,
 
1054					    unsigned int len
1055					    )
1056{
1057	struct tpacket_kbdq_core *pkc;
1058	struct tpacket_block_desc *pbd;
1059	char *curr, *end;
1060
1061	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1062	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1063
1064	/* Queue is frozen when user space is lagging behind */
1065	if (prb_queue_frozen(pkc)) {
1066		/*
1067		 * Check if that last block which caused the queue to freeze,
1068		 * is still in_use by user-space.
1069		 */
1070		if (prb_curr_blk_in_use(pbd)) {
1071			/* Can't record this packet */
1072			return NULL;
1073		} else {
1074			/*
1075			 * Ok, the block was released by user-space.
1076			 * Now let's open that block.
1077			 * opening a block also thaws the queue.
1078			 * Thawing is a side effect.
1079			 */
1080			prb_open_block(pkc, pbd);
1081		}
1082	}
1083
1084	smp_mb();
1085	curr = pkc->nxt_offset;
1086	pkc->skb = skb;
1087	end = (char *)pbd + pkc->kblk_size;
1088
1089	/* first try the current block */
1090	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1091		prb_fill_curr_block(curr, pkc, pbd, len);
1092		return (void *)curr;
1093	}
1094
1095	/* Ok, close the current block */
1096	prb_retire_current_block(pkc, po, 0);
1097
1098	/* Now, try to dispatch the next block */
1099	curr = (char *)prb_dispatch_next_block(pkc, po);
1100	if (curr) {
1101		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1102		prb_fill_curr_block(curr, pkc, pbd, len);
1103		return (void *)curr;
1104	}
1105
1106	/*
1107	 * No free blocks are available.user_space hasn't caught up yet.
1108	 * Queue was just frozen and now this packet will get dropped.
1109	 */
1110	return NULL;
1111}
1112
1113static void *packet_current_rx_frame(struct packet_sock *po,
1114					    struct sk_buff *skb,
1115					    int status, unsigned int len)
1116{
1117	char *curr = NULL;
1118	switch (po->tp_version) {
1119	case TPACKET_V1:
1120	case TPACKET_V2:
1121		curr = packet_lookup_frame(po, &po->rx_ring,
1122					po->rx_ring.head, status);
1123		return curr;
1124	case TPACKET_V3:
1125		return __packet_lookup_frame_in_block(po, skb, len);
1126	default:
1127		WARN(1, "TPACKET version not supported\n");
1128		BUG();
1129		return NULL;
1130	}
1131}
1132
1133static void *prb_lookup_block(const struct packet_sock *po,
1134			      const struct packet_ring_buffer *rb,
1135			      unsigned int idx,
1136			      int status)
1137{
1138	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1139	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1140
1141	if (status != BLOCK_STATUS(pbd))
1142		return NULL;
1143	return pbd;
1144}
1145
1146static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1147{
1148	unsigned int prev;
1149	if (rb->prb_bdqc.kactive_blk_num)
1150		prev = rb->prb_bdqc.kactive_blk_num-1;
1151	else
1152		prev = rb->prb_bdqc.knum_blocks-1;
1153	return prev;
1154}
1155
1156/* Assumes caller has held the rx_queue.lock */
1157static void *__prb_previous_block(struct packet_sock *po,
1158					 struct packet_ring_buffer *rb,
1159					 int status)
1160{
1161	unsigned int previous = prb_previous_blk_num(rb);
1162	return prb_lookup_block(po, rb, previous, status);
1163}
1164
1165static void *packet_previous_rx_frame(struct packet_sock *po,
1166					     struct packet_ring_buffer *rb,
1167					     int status)
1168{
1169	if (po->tp_version <= TPACKET_V2)
1170		return packet_previous_frame(po, rb, status);
1171
1172	return __prb_previous_block(po, rb, status);
1173}
1174
1175static void packet_increment_rx_head(struct packet_sock *po,
1176					    struct packet_ring_buffer *rb)
1177{
1178	switch (po->tp_version) {
1179	case TPACKET_V1:
1180	case TPACKET_V2:
1181		return packet_increment_head(rb);
1182	case TPACKET_V3:
1183	default:
1184		WARN(1, "TPACKET version not supported.\n");
1185		BUG();
1186		return;
1187	}
1188}
1189
1190static void *packet_previous_frame(struct packet_sock *po,
1191		struct packet_ring_buffer *rb,
1192		int status)
1193{
1194	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1195	return packet_lookup_frame(po, rb, previous, status);
1196}
1197
1198static void packet_increment_head(struct packet_ring_buffer *buff)
1199{
1200	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1201}
1202
1203static void packet_inc_pending(struct packet_ring_buffer *rb)
1204{
1205	this_cpu_inc(*rb->pending_refcnt);
1206}
1207
1208static void packet_dec_pending(struct packet_ring_buffer *rb)
1209{
1210	this_cpu_dec(*rb->pending_refcnt);
1211}
1212
1213static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1214{
1215	unsigned int refcnt = 0;
1216	int cpu;
1217
1218	/* We don't use pending refcount in rx_ring. */
1219	if (rb->pending_refcnt == NULL)
1220		return 0;
1221
1222	for_each_possible_cpu(cpu)
1223		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1224
1225	return refcnt;
1226}
1227
1228static int packet_alloc_pending(struct packet_sock *po)
1229{
1230	po->rx_ring.pending_refcnt = NULL;
1231
1232	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1233	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1234		return -ENOBUFS;
1235
1236	return 0;
1237}
1238
1239static void packet_free_pending(struct packet_sock *po)
1240{
1241	free_percpu(po->tx_ring.pending_refcnt);
1242}
1243
1244#define ROOM_POW_OFF	2
1245#define ROOM_NONE	0x0
1246#define ROOM_LOW	0x1
1247#define ROOM_NORMAL	0x2
1248
1249static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1250{
1251	int idx, len;
1252
1253	len = READ_ONCE(po->rx_ring.frame_max) + 1;
1254	idx = READ_ONCE(po->rx_ring.head);
1255	if (pow_off)
1256		idx += len >> pow_off;
1257	if (idx >= len)
1258		idx -= len;
1259	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1260}
1261
1262static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1263{
1264	int idx, len;
1265
1266	len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1267	idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1268	if (pow_off)
1269		idx += len >> pow_off;
1270	if (idx >= len)
1271		idx -= len;
1272	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1273}
1274
1275static int __packet_rcv_has_room(const struct packet_sock *po,
1276				 const struct sk_buff *skb)
1277{
1278	const struct sock *sk = &po->sk;
1279	int ret = ROOM_NONE;
1280
1281	if (po->prot_hook.func != tpacket_rcv) {
1282		int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1283		int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1284				   - (skb ? skb->truesize : 0);
1285
1286		if (avail > (rcvbuf >> ROOM_POW_OFF))
1287			return ROOM_NORMAL;
1288		else if (avail > 0)
1289			return ROOM_LOW;
1290		else
1291			return ROOM_NONE;
1292	}
1293
1294	if (po->tp_version == TPACKET_V3) {
1295		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1296			ret = ROOM_NORMAL;
1297		else if (__tpacket_v3_has_room(po, 0))
1298			ret = ROOM_LOW;
1299	} else {
1300		if (__tpacket_has_room(po, ROOM_POW_OFF))
1301			ret = ROOM_NORMAL;
1302		else if (__tpacket_has_room(po, 0))
1303			ret = ROOM_LOW;
1304	}
1305
1306	return ret;
1307}
1308
1309static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1310{
1311	bool pressure;
1312	int ret;
 
1313
 
1314	ret = __packet_rcv_has_room(po, skb);
1315	pressure = ret != ROOM_NORMAL;
1316
1317	if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) != pressure)
1318		packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, pressure);
1319
1320	return ret;
1321}
1322
1323static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1324{
1325	if (packet_sock_flag(po, PACKET_SOCK_PRESSURE) &&
1326	    __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1327		packet_sock_flag_set(po, PACKET_SOCK_PRESSURE, false);
1328}
1329
1330static void packet_sock_destruct(struct sock *sk)
1331{
1332	skb_queue_purge(&sk->sk_error_queue);
1333
1334	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1335	WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1336
1337	if (!sock_flag(sk, SOCK_DEAD)) {
1338		pr_err("Attempt to release alive packet socket: %p\n", sk);
1339		return;
1340	}
 
 
1341}
1342
1343static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1344{
1345	u32 *history = po->rollover->history;
1346	u32 victim, rxhash;
1347	int i, count = 0;
1348
1349	rxhash = skb_get_hash(skb);
1350	for (i = 0; i < ROLLOVER_HLEN; i++)
1351		if (READ_ONCE(history[i]) == rxhash)
1352			count++;
1353
1354	victim = get_random_u32_below(ROLLOVER_HLEN);
1355
1356	/* Avoid dirtying the cache line if possible */
1357	if (READ_ONCE(history[victim]) != rxhash)
1358		WRITE_ONCE(history[victim], rxhash);
1359
1360	return count > (ROLLOVER_HLEN >> 1);
1361}
1362
1363static unsigned int fanout_demux_hash(struct packet_fanout *f,
1364				      struct sk_buff *skb,
1365				      unsigned int num)
1366{
1367	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1368}
1369
1370static unsigned int fanout_demux_lb(struct packet_fanout *f,
1371				    struct sk_buff *skb,
1372				    unsigned int num)
1373{
1374	unsigned int val = atomic_inc_return(&f->rr_cur);
1375
1376	return val % num;
1377}
1378
1379static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1380				     struct sk_buff *skb,
1381				     unsigned int num)
1382{
1383	return smp_processor_id() % num;
1384}
1385
1386static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1387				     struct sk_buff *skb,
1388				     unsigned int num)
1389{
1390	return get_random_u32_below(num);
1391}
1392
1393static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1394					  struct sk_buff *skb,
1395					  unsigned int idx, bool try_self,
1396					  unsigned int num)
1397{
1398	struct packet_sock *po, *po_next, *po_skip = NULL;
1399	unsigned int i, j, room = ROOM_NONE;
1400
1401	po = pkt_sk(rcu_dereference(f->arr[idx]));
1402
1403	if (try_self) {
1404		room = packet_rcv_has_room(po, skb);
1405		if (room == ROOM_NORMAL ||
1406		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1407			return idx;
1408		po_skip = po;
1409	}
1410
1411	i = j = min_t(int, po->rollover->sock, num - 1);
1412	do {
1413		po_next = pkt_sk(rcu_dereference(f->arr[i]));
1414		if (po_next != po_skip &&
1415		    !packet_sock_flag(po_next, PACKET_SOCK_PRESSURE) &&
1416		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1417			if (i != j)
1418				po->rollover->sock = i;
1419			atomic_long_inc(&po->rollover->num);
1420			if (room == ROOM_LOW)
1421				atomic_long_inc(&po->rollover->num_huge);
1422			return i;
1423		}
1424
1425		if (++i == num)
1426			i = 0;
1427	} while (i != j);
1428
1429	atomic_long_inc(&po->rollover->num_failed);
1430	return idx;
1431}
1432
1433static unsigned int fanout_demux_qm(struct packet_fanout *f,
1434				    struct sk_buff *skb,
1435				    unsigned int num)
1436{
1437	return skb_get_queue_mapping(skb) % num;
1438}
1439
1440static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1441				     struct sk_buff *skb,
1442				     unsigned int num)
1443{
1444	struct bpf_prog *prog;
1445	unsigned int ret = 0;
1446
1447	rcu_read_lock();
1448	prog = rcu_dereference(f->bpf_prog);
1449	if (prog)
1450		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1451	rcu_read_unlock();
1452
1453	return ret;
1454}
1455
1456static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1457{
1458	return f->flags & (flag >> 8);
1459}
1460
1461static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1462			     struct packet_type *pt, struct net_device *orig_dev)
1463{
1464	struct packet_fanout *f = pt->af_packet_priv;
1465	unsigned int num = READ_ONCE(f->num_members);
1466	struct net *net = read_pnet(&f->net);
1467	struct packet_sock *po;
1468	unsigned int idx;
1469
1470	if (!net_eq(dev_net(dev), net) || !num) {
1471		kfree_skb(skb);
1472		return 0;
1473	}
1474
1475	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1476		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1477		if (!skb)
1478			return 0;
1479	}
1480	switch (f->type) {
1481	case PACKET_FANOUT_HASH:
1482	default:
1483		idx = fanout_demux_hash(f, skb, num);
1484		break;
1485	case PACKET_FANOUT_LB:
1486		idx = fanout_demux_lb(f, skb, num);
1487		break;
1488	case PACKET_FANOUT_CPU:
1489		idx = fanout_demux_cpu(f, skb, num);
1490		break;
1491	case PACKET_FANOUT_RND:
1492		idx = fanout_demux_rnd(f, skb, num);
1493		break;
1494	case PACKET_FANOUT_QM:
1495		idx = fanout_demux_qm(f, skb, num);
1496		break;
1497	case PACKET_FANOUT_ROLLOVER:
1498		idx = fanout_demux_rollover(f, skb, 0, false, num);
1499		break;
1500	case PACKET_FANOUT_CBPF:
1501	case PACKET_FANOUT_EBPF:
1502		idx = fanout_demux_bpf(f, skb, num);
1503		break;
1504	}
1505
1506	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1507		idx = fanout_demux_rollover(f, skb, idx, true, num);
1508
1509	po = pkt_sk(rcu_dereference(f->arr[idx]));
1510	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1511}
1512
1513DEFINE_MUTEX(fanout_mutex);
1514EXPORT_SYMBOL_GPL(fanout_mutex);
1515static LIST_HEAD(fanout_list);
1516static u16 fanout_next_id;
1517
1518static void __fanout_link(struct sock *sk, struct packet_sock *po)
1519{
1520	struct packet_fanout *f = po->fanout;
1521
1522	spin_lock(&f->lock);
1523	rcu_assign_pointer(f->arr[f->num_members], sk);
1524	smp_wmb();
1525	f->num_members++;
1526	if (f->num_members == 1)
1527		dev_add_pack(&f->prot_hook);
1528	spin_unlock(&f->lock);
1529}
1530
1531static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1532{
1533	struct packet_fanout *f = po->fanout;
1534	int i;
1535
1536	spin_lock(&f->lock);
1537	for (i = 0; i < f->num_members; i++) {
1538		if (rcu_dereference_protected(f->arr[i],
1539					      lockdep_is_held(&f->lock)) == sk)
1540			break;
1541	}
1542	BUG_ON(i >= f->num_members);
1543	rcu_assign_pointer(f->arr[i],
1544			   rcu_dereference_protected(f->arr[f->num_members - 1],
1545						     lockdep_is_held(&f->lock)));
1546	f->num_members--;
1547	if (f->num_members == 0)
1548		__dev_remove_pack(&f->prot_hook);
1549	spin_unlock(&f->lock);
1550}
1551
1552static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1553{
1554	if (sk->sk_family != PF_PACKET)
1555		return false;
1556
1557	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1558}
1559
1560static void fanout_init_data(struct packet_fanout *f)
1561{
1562	switch (f->type) {
1563	case PACKET_FANOUT_LB:
1564		atomic_set(&f->rr_cur, 0);
1565		break;
1566	case PACKET_FANOUT_CBPF:
1567	case PACKET_FANOUT_EBPF:
1568		RCU_INIT_POINTER(f->bpf_prog, NULL);
1569		break;
1570	}
1571}
1572
1573static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1574{
1575	struct bpf_prog *old;
1576
1577	spin_lock(&f->lock);
1578	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1579	rcu_assign_pointer(f->bpf_prog, new);
1580	spin_unlock(&f->lock);
1581
1582	if (old) {
1583		synchronize_net();
1584		bpf_prog_destroy(old);
1585	}
1586}
1587
1588static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1589				unsigned int len)
1590{
1591	struct bpf_prog *new;
1592	struct sock_fprog fprog;
1593	int ret;
1594
1595	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1596		return -EPERM;
1597
1598	ret = copy_bpf_fprog_from_user(&fprog, data, len);
1599	if (ret)
1600		return ret;
1601
1602	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1603	if (ret)
1604		return ret;
1605
1606	__fanout_set_data_bpf(po->fanout, new);
1607	return 0;
1608}
1609
1610static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1611				unsigned int len)
1612{
1613	struct bpf_prog *new;
1614	u32 fd;
1615
1616	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1617		return -EPERM;
1618	if (len != sizeof(fd))
1619		return -EINVAL;
1620	if (copy_from_sockptr(&fd, data, len))
1621		return -EFAULT;
1622
1623	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1624	if (IS_ERR(new))
1625		return PTR_ERR(new);
1626
1627	__fanout_set_data_bpf(po->fanout, new);
1628	return 0;
1629}
1630
1631static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1632			   unsigned int len)
1633{
1634	switch (po->fanout->type) {
1635	case PACKET_FANOUT_CBPF:
1636		return fanout_set_data_cbpf(po, data, len);
1637	case PACKET_FANOUT_EBPF:
1638		return fanout_set_data_ebpf(po, data, len);
1639	default:
1640		return -EINVAL;
1641	}
1642}
1643
1644static void fanout_release_data(struct packet_fanout *f)
1645{
1646	switch (f->type) {
1647	case PACKET_FANOUT_CBPF:
1648	case PACKET_FANOUT_EBPF:
1649		__fanout_set_data_bpf(f, NULL);
1650	}
1651}
1652
1653static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1654{
1655	struct packet_fanout *f;
1656
1657	list_for_each_entry(f, &fanout_list, list) {
1658		if (f->id == candidate_id &&
1659		    read_pnet(&f->net) == sock_net(sk)) {
1660			return false;
1661		}
1662	}
1663	return true;
1664}
1665
1666static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1667{
1668	u16 id = fanout_next_id;
1669
1670	do {
1671		if (__fanout_id_is_free(sk, id)) {
1672			*new_id = id;
1673			fanout_next_id = id + 1;
1674			return true;
1675		}
1676
1677		id++;
1678	} while (id != fanout_next_id);
1679
1680	return false;
1681}
1682
1683static int fanout_add(struct sock *sk, struct fanout_args *args)
1684{
1685	struct packet_rollover *rollover = NULL;
1686	struct packet_sock *po = pkt_sk(sk);
1687	u16 type_flags = args->type_flags;
1688	struct packet_fanout *f, *match;
1689	u8 type = type_flags & 0xff;
1690	u8 flags = type_flags >> 8;
1691	u16 id = args->id;
1692	int err;
1693
1694	switch (type) {
1695	case PACKET_FANOUT_ROLLOVER:
1696		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1697			return -EINVAL;
1698		break;
1699	case PACKET_FANOUT_HASH:
1700	case PACKET_FANOUT_LB:
1701	case PACKET_FANOUT_CPU:
1702	case PACKET_FANOUT_RND:
1703	case PACKET_FANOUT_QM:
1704	case PACKET_FANOUT_CBPF:
1705	case PACKET_FANOUT_EBPF:
1706		break;
1707	default:
1708		return -EINVAL;
1709	}
1710
1711	mutex_lock(&fanout_mutex);
1712
 
 
 
 
1713	err = -EALREADY;
1714	if (po->fanout)
1715		goto out;
1716
1717	if (type == PACKET_FANOUT_ROLLOVER ||
1718	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1719		err = -ENOMEM;
1720		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1721		if (!rollover)
1722			goto out;
1723		atomic_long_set(&rollover->num, 0);
1724		atomic_long_set(&rollover->num_huge, 0);
1725		atomic_long_set(&rollover->num_failed, 0);
1726	}
1727
1728	if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1729		if (id != 0) {
1730			err = -EINVAL;
1731			goto out;
1732		}
1733		if (!fanout_find_new_id(sk, &id)) {
1734			err = -ENOMEM;
1735			goto out;
1736		}
1737		/* ephemeral flag for the first socket in the group: drop it */
1738		flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1739	}
1740
1741	match = NULL;
1742	list_for_each_entry(f, &fanout_list, list) {
1743		if (f->id == id &&
1744		    read_pnet(&f->net) == sock_net(sk)) {
1745			match = f;
1746			break;
1747		}
1748	}
1749	err = -EINVAL;
1750	if (match) {
1751		if (match->flags != flags)
1752			goto out;
1753		if (args->max_num_members &&
1754		    args->max_num_members != match->max_num_members)
1755			goto out;
1756	} else {
1757		if (args->max_num_members > PACKET_FANOUT_MAX)
1758			goto out;
1759		if (!args->max_num_members)
1760			/* legacy PACKET_FANOUT_MAX */
1761			args->max_num_members = 256;
1762		err = -ENOMEM;
1763		match = kvzalloc(struct_size(match, arr, args->max_num_members),
1764				 GFP_KERNEL);
1765		if (!match)
1766			goto out;
1767		write_pnet(&match->net, sock_net(sk));
1768		match->id = id;
1769		match->type = type;
1770		match->flags = flags;
1771		INIT_LIST_HEAD(&match->list);
1772		spin_lock_init(&match->lock);
1773		refcount_set(&match->sk_ref, 0);
1774		fanout_init_data(match);
1775		match->prot_hook.type = po->prot_hook.type;
1776		match->prot_hook.dev = po->prot_hook.dev;
1777		match->prot_hook.func = packet_rcv_fanout;
1778		match->prot_hook.af_packet_priv = match;
1779		match->prot_hook.af_packet_net = read_pnet(&match->net);
1780		match->prot_hook.id_match = match_fanout_group;
1781		match->max_num_members = args->max_num_members;
1782		match->prot_hook.ignore_outgoing = type_flags & PACKET_FANOUT_FLAG_IGNORE_OUTGOING;
1783		list_add(&match->list, &fanout_list);
1784	}
1785	err = -EINVAL;
1786
1787	spin_lock(&po->bind_lock);
1788	if (packet_sock_flag(po, PACKET_SOCK_RUNNING) &&
1789	    match->type == type &&
1790	    match->prot_hook.type == po->prot_hook.type &&
1791	    match->prot_hook.dev == po->prot_hook.dev) {
1792		err = -ENOSPC;
1793		if (refcount_read(&match->sk_ref) < match->max_num_members) {
1794			__dev_remove_pack(&po->prot_hook);
1795
1796			/* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */
1797			WRITE_ONCE(po->fanout, match);
1798
1799			po->rollover = rollover;
1800			rollover = NULL;
1801			refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1802			__fanout_link(sk, po);
1803			err = 0;
1804		}
1805	}
1806	spin_unlock(&po->bind_lock);
1807
1808	if (err && !refcount_read(&match->sk_ref)) {
1809		list_del(&match->list);
1810		kvfree(match);
1811	}
1812
1813out:
1814	kfree(rollover);
 
 
 
1815	mutex_unlock(&fanout_mutex);
1816	return err;
1817}
1818
1819/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1820 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1821 * It is the responsibility of the caller to call fanout_release_data() and
1822 * free the returned packet_fanout (after synchronize_net())
1823 */
1824static struct packet_fanout *fanout_release(struct sock *sk)
1825{
1826	struct packet_sock *po = pkt_sk(sk);
1827	struct packet_fanout *f;
1828
1829	mutex_lock(&fanout_mutex);
1830	f = po->fanout;
1831	if (f) {
1832		po->fanout = NULL;
1833
1834		if (refcount_dec_and_test(&f->sk_ref))
1835			list_del(&f->list);
1836		else
1837			f = NULL;
 
 
 
1838	}
1839	mutex_unlock(&fanout_mutex);
1840
1841	return f;
1842}
1843
1844static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1845					  struct sk_buff *skb)
1846{
1847	/* Earlier code assumed this would be a VLAN pkt, double-check
1848	 * this now that we have the actual packet in hand. We can only
1849	 * do this check on Ethernet devices.
1850	 */
1851	if (unlikely(dev->type != ARPHRD_ETHER))
1852		return false;
1853
1854	skb_reset_mac_header(skb);
1855	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1856}
1857
1858static const struct proto_ops packet_ops;
1859
1860static const struct proto_ops packet_ops_spkt;
1861
1862static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1863			   struct packet_type *pt, struct net_device *orig_dev)
1864{
1865	struct sock *sk;
1866	struct sockaddr_pkt *spkt;
1867
1868	/*
1869	 *	When we registered the protocol we saved the socket in the data
1870	 *	field for just this event.
1871	 */
1872
1873	sk = pt->af_packet_priv;
1874
1875	/*
1876	 *	Yank back the headers [hope the device set this
1877	 *	right or kerboom...]
1878	 *
1879	 *	Incoming packets have ll header pulled,
1880	 *	push it back.
1881	 *
1882	 *	For outgoing ones skb->data == skb_mac_header(skb)
1883	 *	so that this procedure is noop.
1884	 */
1885
1886	if (skb->pkt_type == PACKET_LOOPBACK)
1887		goto out;
1888
1889	if (!net_eq(dev_net(dev), sock_net(sk)))
1890		goto out;
1891
1892	skb = skb_share_check(skb, GFP_ATOMIC);
1893	if (skb == NULL)
1894		goto oom;
1895
1896	/* drop any routing info */
1897	skb_dst_drop(skb);
1898
1899	/* drop conntrack reference */
1900	nf_reset_ct(skb);
1901
1902	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1903
1904	skb_push(skb, skb->data - skb_mac_header(skb));
1905
1906	/*
1907	 *	The SOCK_PACKET socket receives _all_ frames.
1908	 */
1909
1910	spkt->spkt_family = dev->type;
1911	strscpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1912	spkt->spkt_protocol = skb->protocol;
1913
1914	/*
1915	 *	Charge the memory to the socket. This is done specifically
1916	 *	to prevent sockets using all the memory up.
1917	 */
1918
1919	if (sock_queue_rcv_skb(sk, skb) == 0)
1920		return 0;
1921
1922out:
1923	kfree_skb(skb);
1924oom:
1925	return 0;
1926}
1927
1928static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1929{
1930	int depth;
1931
1932	if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1933	    sock->type == SOCK_RAW) {
1934		skb_reset_mac_header(skb);
1935		skb->protocol = dev_parse_header_protocol(skb);
1936	}
1937
1938	/* Move network header to the right position for VLAN tagged packets */
1939	if (likely(skb->dev->type == ARPHRD_ETHER) &&
1940	    eth_type_vlan(skb->protocol) &&
1941	    vlan_get_protocol_and_depth(skb, skb->protocol, &depth) != 0)
1942		skb_set_network_header(skb, depth);
1943
1944	skb_probe_transport_header(skb);
1945}
1946
1947/*
1948 *	Output a raw packet to a device layer. This bypasses all the other
1949 *	protocol layers and you must therefore supply it with a complete frame
1950 */
1951
1952static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1953			       size_t len)
1954{
1955	struct sock *sk = sock->sk;
1956	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1957	struct sk_buff *skb = NULL;
1958	struct net_device *dev;
1959	struct sockcm_cookie sockc;
1960	__be16 proto = 0;
1961	int err;
1962	int extra_len = 0;
1963
1964	/*
1965	 *	Get and verify the address.
1966	 */
1967
1968	if (saddr) {
1969		if (msg->msg_namelen < sizeof(struct sockaddr))
1970			return -EINVAL;
1971		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1972			proto = saddr->spkt_protocol;
1973	} else
1974		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1975
1976	/*
1977	 *	Find the device first to size check it
1978	 */
1979
1980	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1981retry:
1982	rcu_read_lock();
1983	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1984	err = -ENODEV;
1985	if (dev == NULL)
1986		goto out_unlock;
1987
1988	err = -ENETDOWN;
1989	if (!(dev->flags & IFF_UP))
1990		goto out_unlock;
1991
1992	/*
1993	 * You may not queue a frame bigger than the mtu. This is the lowest level
1994	 * raw protocol and you must do your own fragmentation at this level.
1995	 */
1996
1997	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1998		if (!netif_supports_nofcs(dev)) {
1999			err = -EPROTONOSUPPORT;
2000			goto out_unlock;
2001		}
2002		extra_len = 4; /* We're doing our own CRC */
2003	}
2004
2005	err = -EMSGSIZE;
2006	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
2007		goto out_unlock;
2008
2009	if (!skb) {
2010		size_t reserved = LL_RESERVED_SPACE(dev);
2011		int tlen = dev->needed_tailroom;
2012		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
2013
2014		rcu_read_unlock();
2015		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
2016		if (skb == NULL)
2017			return -ENOBUFS;
2018		/* FIXME: Save some space for broken drivers that write a hard
2019		 * header at transmission time by themselves. PPP is the notable
2020		 * one here. This should really be fixed at the driver level.
2021		 */
2022		skb_reserve(skb, reserved);
2023		skb_reset_network_header(skb);
2024
2025		/* Try to align data part correctly */
2026		if (hhlen) {
2027			skb->data -= hhlen;
2028			skb->tail -= hhlen;
2029			if (len < hhlen)
2030				skb_reset_network_header(skb);
2031		}
2032		err = memcpy_from_msg(skb_put(skb, len), msg, len);
2033		if (err)
2034			goto out_free;
2035		goto retry;
2036	}
2037
2038	if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
2039		err = -EINVAL;
2040		goto out_unlock;
2041	}
2042	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
2043	    !packet_extra_vlan_len_allowed(dev, skb)) {
2044		err = -EMSGSIZE;
2045		goto out_unlock;
2046	}
2047
2048	sockcm_init(&sockc, sk);
2049	if (msg->msg_controllen) {
2050		err = sock_cmsg_send(sk, msg, &sockc);
2051		if (unlikely(err))
2052			goto out_unlock;
2053	}
2054
2055	skb->protocol = proto;
2056	skb->dev = dev;
2057	skb->priority = READ_ONCE(sk->sk_priority);
2058	skb->mark = READ_ONCE(sk->sk_mark);
2059	skb->tstamp = sockc.transmit_time;
2060
2061	skb_setup_tx_timestamp(skb, sockc.tsflags);
2062
2063	if (unlikely(extra_len == 4))
2064		skb->no_fcs = 1;
2065
2066	packet_parse_headers(skb, sock);
2067
2068	dev_queue_xmit(skb);
2069	rcu_read_unlock();
2070	return len;
2071
2072out_unlock:
2073	rcu_read_unlock();
2074out_free:
2075	kfree_skb(skb);
2076	return err;
2077}
2078
2079static unsigned int run_filter(struct sk_buff *skb,
2080			       const struct sock *sk,
2081			       unsigned int res)
2082{
2083	struct sk_filter *filter;
2084
2085	rcu_read_lock();
2086	filter = rcu_dereference(sk->sk_filter);
2087	if (filter != NULL)
2088		res = bpf_prog_run_clear_cb(filter->prog, skb);
2089	rcu_read_unlock();
2090
2091	return res;
2092}
2093
2094static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2095			   size_t *len, int vnet_hdr_sz)
2096{
2097	struct virtio_net_hdr_mrg_rxbuf vnet_hdr = { .num_buffers = 0 };
2098
2099	if (*len < vnet_hdr_sz)
2100		return -EINVAL;
2101	*len -= vnet_hdr_sz;
2102
2103	if (virtio_net_hdr_from_skb(skb, (struct virtio_net_hdr *)&vnet_hdr, vio_le(), true, 0))
2104		return -EINVAL;
2105
2106	return memcpy_to_msg(msg, (void *)&vnet_hdr, vnet_hdr_sz);
2107}
2108
2109/*
2110 * This function makes lazy skb cloning in hope that most of packets
2111 * are discarded by BPF.
2112 *
2113 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2114 * and skb->cb are mangled. It works because (and until) packets
2115 * falling here are owned by current CPU. Output packets are cloned
2116 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2117 * sequentially, so that if we return skb to original state on exit,
2118 * we will not harm anyone.
2119 */
2120
2121static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2122		      struct packet_type *pt, struct net_device *orig_dev)
2123{
2124	enum skb_drop_reason drop_reason = SKB_CONSUMED;
2125	struct sock *sk;
2126	struct sockaddr_ll *sll;
2127	struct packet_sock *po;
2128	u8 *skb_head = skb->data;
2129	int skb_len = skb->len;
2130	unsigned int snaplen, res;
 
2131
2132	if (skb->pkt_type == PACKET_LOOPBACK)
2133		goto drop;
2134
2135	sk = pt->af_packet_priv;
2136	po = pkt_sk(sk);
2137
2138	if (!net_eq(dev_net(dev), sock_net(sk)))
2139		goto drop;
2140
2141	skb->dev = dev;
2142
2143	if (dev_has_header(dev)) {
2144		/* The device has an explicit notion of ll header,
2145		 * exported to higher levels.
2146		 *
2147		 * Otherwise, the device hides details of its frame
2148		 * structure, so that corresponding packet head is
2149		 * never delivered to user.
2150		 */
2151		if (sk->sk_type != SOCK_DGRAM)
2152			skb_push(skb, skb->data - skb_mac_header(skb));
2153		else if (skb->pkt_type == PACKET_OUTGOING) {
2154			/* Special case: outgoing packets have ll header at head */
2155			skb_pull(skb, skb_network_offset(skb));
2156		}
2157	}
2158
2159	snaplen = skb->len;
2160
2161	res = run_filter(skb, sk, snaplen);
2162	if (!res)
2163		goto drop_n_restore;
2164	if (snaplen > res)
2165		snaplen = res;
2166
2167	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2168		goto drop_n_acct;
2169
2170	if (skb_shared(skb)) {
2171		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2172		if (nskb == NULL)
2173			goto drop_n_acct;
2174
2175		if (skb_head != skb->data) {
2176			skb->data = skb_head;
2177			skb->len = skb_len;
2178		}
2179		consume_skb(skb);
2180		skb = nskb;
2181	}
2182
2183	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2184
2185	sll = &PACKET_SKB_CB(skb)->sa.ll;
2186	sll->sll_hatype = dev->type;
2187	sll->sll_pkttype = skb->pkt_type;
2188	if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2189		sll->sll_ifindex = orig_dev->ifindex;
2190	else
2191		sll->sll_ifindex = dev->ifindex;
2192
2193	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2194
2195	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2196	 * Use their space for storing the original skb length.
2197	 */
2198	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2199
2200	if (pskb_trim(skb, snaplen))
2201		goto drop_n_acct;
2202
2203	skb_set_owner_r(skb, sk);
2204	skb->dev = NULL;
2205	skb_dst_drop(skb);
2206
2207	/* drop conntrack reference */
2208	nf_reset_ct(skb);
2209
2210	spin_lock(&sk->sk_receive_queue.lock);
2211	po->stats.stats1.tp_packets++;
2212	sock_skb_set_dropcount(sk, skb);
2213	skb_clear_delivery_time(skb);
2214	__skb_queue_tail(&sk->sk_receive_queue, skb);
2215	spin_unlock(&sk->sk_receive_queue.lock);
2216	sk->sk_data_ready(sk);
2217	return 0;
2218
2219drop_n_acct:
2220	atomic_inc(&po->tp_drops);
 
 
2221	atomic_inc(&sk->sk_drops);
2222	drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
2223
2224drop_n_restore:
2225	if (skb_head != skb->data && skb_shared(skb)) {
2226		skb->data = skb_head;
2227		skb->len = skb_len;
2228	}
2229drop:
2230	kfree_skb_reason(skb, drop_reason);
 
 
 
2231	return 0;
2232}
2233
2234static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2235		       struct packet_type *pt, struct net_device *orig_dev)
2236{
2237	enum skb_drop_reason drop_reason = SKB_CONSUMED;
2238	struct sock *sk;
2239	struct packet_sock *po;
2240	struct sockaddr_ll *sll;
2241	union tpacket_uhdr h;
2242	u8 *skb_head = skb->data;
2243	int skb_len = skb->len;
2244	unsigned int snaplen, res;
2245	unsigned long status = TP_STATUS_USER;
2246	unsigned short macoff, hdrlen;
2247	unsigned int netoff;
2248	struct sk_buff *copy_skb = NULL;
2249	struct timespec64 ts;
2250	__u32 ts_status;
2251	unsigned int slot_id = 0;
2252	int vnet_hdr_sz = 0;
2253
2254	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2255	 * We may add members to them until current aligned size without forcing
2256	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2257	 */
2258	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2259	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2260
2261	if (skb->pkt_type == PACKET_LOOPBACK)
2262		goto drop;
2263
2264	sk = pt->af_packet_priv;
2265	po = pkt_sk(sk);
2266
2267	if (!net_eq(dev_net(dev), sock_net(sk)))
2268		goto drop;
2269
2270	if (dev_has_header(dev)) {
2271		if (sk->sk_type != SOCK_DGRAM)
2272			skb_push(skb, skb->data - skb_mac_header(skb));
2273		else if (skb->pkt_type == PACKET_OUTGOING) {
2274			/* Special case: outgoing packets have ll header at head */
2275			skb_pull(skb, skb_network_offset(skb));
2276		}
2277	}
2278
2279	snaplen = skb->len;
2280
2281	res = run_filter(skb, sk, snaplen);
2282	if (!res)
2283		goto drop_n_restore;
2284
2285	/* If we are flooded, just give up */
2286	if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2287		atomic_inc(&po->tp_drops);
2288		goto drop_n_restore;
2289	}
2290
2291	if (skb->ip_summed == CHECKSUM_PARTIAL)
2292		status |= TP_STATUS_CSUMNOTREADY;
2293	else if (skb->pkt_type != PACKET_OUTGOING &&
2294		 skb_csum_unnecessary(skb))
 
2295		status |= TP_STATUS_CSUM_VALID;
2296	if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
2297		status |= TP_STATUS_GSO_TCP;
2298
2299	if (snaplen > res)
2300		snaplen = res;
2301
2302	if (sk->sk_type == SOCK_DGRAM) {
2303		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2304				  po->tp_reserve;
2305	} else {
2306		unsigned int maclen = skb_network_offset(skb);
2307		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2308				       (maclen < 16 ? 16 : maclen)) +
2309				       po->tp_reserve;
2310		vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2311		if (vnet_hdr_sz)
2312			netoff += vnet_hdr_sz;
2313		macoff = netoff - maclen;
2314	}
2315	if (netoff > USHRT_MAX) {
2316		atomic_inc(&po->tp_drops);
2317		goto drop_n_restore;
2318	}
2319	if (po->tp_version <= TPACKET_V2) {
2320		if (macoff + snaplen > po->rx_ring.frame_size) {
2321			if (po->copy_thresh &&
2322			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2323				if (skb_shared(skb)) {
2324					copy_skb = skb_clone(skb, GFP_ATOMIC);
2325				} else {
2326					copy_skb = skb_get(skb);
2327					skb_head = skb->data;
2328				}
2329				if (copy_skb) {
2330					memset(&PACKET_SKB_CB(copy_skb)->sa.ll, 0,
2331					       sizeof(PACKET_SKB_CB(copy_skb)->sa.ll));
2332					skb_set_owner_r(copy_skb, sk);
2333				}
2334			}
2335			snaplen = po->rx_ring.frame_size - macoff;
2336			if ((int)snaplen < 0) {
2337				snaplen = 0;
2338				vnet_hdr_sz = 0;
2339			}
2340		}
2341	} else if (unlikely(macoff + snaplen >
2342			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2343		u32 nval;
2344
2345		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2346		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2347			    snaplen, nval, macoff);
2348		snaplen = nval;
2349		if (unlikely((int)snaplen < 0)) {
2350			snaplen = 0;
2351			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2352			vnet_hdr_sz = 0;
2353		}
2354	}
2355	spin_lock(&sk->sk_receive_queue.lock);
2356	h.raw = packet_current_rx_frame(po, skb,
2357					TP_STATUS_KERNEL, (macoff+snaplen));
2358	if (!h.raw)
2359		goto drop_n_account;
2360
2361	if (po->tp_version <= TPACKET_V2) {
2362		slot_id = po->rx_ring.head;
2363		if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2364			goto drop_n_account;
2365		__set_bit(slot_id, po->rx_ring.rx_owner_map);
2366	}
2367
2368	if (vnet_hdr_sz &&
2369	    virtio_net_hdr_from_skb(skb, h.raw + macoff -
2370				    sizeof(struct virtio_net_hdr),
2371				    vio_le(), true, 0)) {
2372		if (po->tp_version == TPACKET_V3)
2373			prb_clear_blk_fill_status(&po->rx_ring);
2374		goto drop_n_account;
2375	}
2376
2377	if (po->tp_version <= TPACKET_V2) {
2378		packet_increment_rx_head(po, &po->rx_ring);
2379	/*
2380	 * LOSING will be reported till you read the stats,
2381	 * because it's COR - Clear On Read.
2382	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2383	 * at packet level.
2384	 */
2385		if (atomic_read(&po->tp_drops))
2386			status |= TP_STATUS_LOSING;
2387	}
2388
2389	po->stats.stats1.tp_packets++;
2390	if (copy_skb) {
2391		status |= TP_STATUS_COPY;
2392		skb_clear_delivery_time(copy_skb);
2393		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2394	}
2395	spin_unlock(&sk->sk_receive_queue.lock);
2396
 
 
 
 
 
 
 
 
 
2397	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2398
2399	/* Always timestamp; prefer an existing software timestamp taken
2400	 * closer to the time of capture.
2401	 */
2402	ts_status = tpacket_get_timestamp(skb, &ts,
2403					  READ_ONCE(po->tp_tstamp) |
2404					  SOF_TIMESTAMPING_SOFTWARE);
2405	if (!ts_status)
2406		ktime_get_real_ts64(&ts);
2407
2408	status |= ts_status;
2409
2410	switch (po->tp_version) {
2411	case TPACKET_V1:
2412		h.h1->tp_len = skb->len;
2413		h.h1->tp_snaplen = snaplen;
2414		h.h1->tp_mac = macoff;
2415		h.h1->tp_net = netoff;
2416		h.h1->tp_sec = ts.tv_sec;
2417		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2418		hdrlen = sizeof(*h.h1);
2419		break;
2420	case TPACKET_V2:
2421		h.h2->tp_len = skb->len;
2422		h.h2->tp_snaplen = snaplen;
2423		h.h2->tp_mac = macoff;
2424		h.h2->tp_net = netoff;
2425		h.h2->tp_sec = ts.tv_sec;
2426		h.h2->tp_nsec = ts.tv_nsec;
2427		if (skb_vlan_tag_present(skb)) {
2428			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2429			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2430			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2431		} else {
2432			h.h2->tp_vlan_tci = 0;
2433			h.h2->tp_vlan_tpid = 0;
2434		}
2435		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2436		hdrlen = sizeof(*h.h2);
2437		break;
2438	case TPACKET_V3:
2439		/* tp_nxt_offset,vlan are already populated above.
2440		 * So DONT clear those fields here
2441		 */
2442		h.h3->tp_status |= status;
2443		h.h3->tp_len = skb->len;
2444		h.h3->tp_snaplen = snaplen;
2445		h.h3->tp_mac = macoff;
2446		h.h3->tp_net = netoff;
2447		h.h3->tp_sec  = ts.tv_sec;
2448		h.h3->tp_nsec = ts.tv_nsec;
2449		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2450		hdrlen = sizeof(*h.h3);
2451		break;
2452	default:
2453		BUG();
2454	}
2455
2456	sll = h.raw + TPACKET_ALIGN(hdrlen);
2457	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2458	sll->sll_family = AF_PACKET;
2459	sll->sll_hatype = dev->type;
2460	sll->sll_protocol = skb->protocol;
2461	sll->sll_pkttype = skb->pkt_type;
2462	if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV)))
2463		sll->sll_ifindex = orig_dev->ifindex;
2464	else
2465		sll->sll_ifindex = dev->ifindex;
2466
2467	smp_mb();
2468
2469#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2470	if (po->tp_version <= TPACKET_V2) {
2471		u8 *start, *end;
2472
2473		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2474					macoff + snaplen);
2475
2476		for (start = h.raw; start < end; start += PAGE_SIZE)
2477			flush_dcache_page(pgv_to_page(start));
2478	}
2479	smp_wmb();
2480#endif
2481
2482	if (po->tp_version <= TPACKET_V2) {
2483		spin_lock(&sk->sk_receive_queue.lock);
2484		__packet_set_status(po, h.raw, status);
2485		__clear_bit(slot_id, po->rx_ring.rx_owner_map);
2486		spin_unlock(&sk->sk_receive_queue.lock);
2487		sk->sk_data_ready(sk);
2488	} else if (po->tp_version == TPACKET_V3) {
2489		prb_clear_blk_fill_status(&po->rx_ring);
2490	}
2491
2492drop_n_restore:
2493	if (skb_head != skb->data && skb_shared(skb)) {
2494		skb->data = skb_head;
2495		skb->len = skb_len;
2496	}
2497drop:
2498	kfree_skb_reason(skb, drop_reason);
 
 
 
2499	return 0;
2500
2501drop_n_account:
 
 
2502	spin_unlock(&sk->sk_receive_queue.lock);
2503	atomic_inc(&po->tp_drops);
2504	drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR;
2505
2506	sk->sk_data_ready(sk);
2507	kfree_skb_reason(copy_skb, drop_reason);
2508	goto drop_n_restore;
2509}
2510
2511static void tpacket_destruct_skb(struct sk_buff *skb)
2512{
2513	struct packet_sock *po = pkt_sk(skb->sk);
2514
2515	if (likely(po->tx_ring.pg_vec)) {
2516		void *ph;
2517		__u32 ts;
2518
2519		ph = skb_zcopy_get_nouarg(skb);
2520		packet_dec_pending(&po->tx_ring);
2521
2522		ts = __packet_set_timestamp(po, ph, skb);
2523		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2524
2525		if (!packet_read_pending(&po->tx_ring))
2526			complete(&po->skb_completion);
2527	}
2528
2529	sock_wfree(skb);
2530}
2531
 
 
 
 
 
 
 
 
 
2532static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2533{
2534	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2535	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2536	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2537	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2538		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2539			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2540			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2541
2542	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2543		return -EINVAL;
2544
2545	return 0;
2546}
2547
2548static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2549				 struct virtio_net_hdr *vnet_hdr, int vnet_hdr_sz)
2550{
2551	int ret;
2552
2553	if (*len < vnet_hdr_sz)
2554		return -EINVAL;
2555	*len -= vnet_hdr_sz;
2556
2557	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2558		return -EFAULT;
2559
2560	ret = __packet_snd_vnet_parse(vnet_hdr, *len);
2561	if (ret)
2562		return ret;
2563
2564	/* move iter to point to the start of mac header */
2565	if (vnet_hdr_sz != sizeof(struct virtio_net_hdr))
2566		iov_iter_advance(&msg->msg_iter, vnet_hdr_sz - sizeof(struct virtio_net_hdr));
2567
2568	return 0;
2569}
2570
2571static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2572		void *frame, struct net_device *dev, void *data, int tp_len,
2573		__be16 proto, unsigned char *addr, int hlen, int copylen,
2574		const struct sockcm_cookie *sockc)
2575{
2576	union tpacket_uhdr ph;
2577	int to_write, offset, len, nr_frags, len_max;
2578	struct socket *sock = po->sk.sk_socket;
2579	struct page *page;
2580	int err;
2581
2582	ph.raw = frame;
2583
2584	skb->protocol = proto;
2585	skb->dev = dev;
2586	skb->priority = READ_ONCE(po->sk.sk_priority);
2587	skb->mark = READ_ONCE(po->sk.sk_mark);
2588	skb->tstamp = sockc->transmit_time;
2589	skb_setup_tx_timestamp(skb, sockc->tsflags);
2590	skb_zcopy_set_nouarg(skb, ph.raw);
2591
2592	skb_reserve(skb, hlen);
2593	skb_reset_network_header(skb);
2594
2595	to_write = tp_len;
2596
2597	if (sock->type == SOCK_DGRAM) {
2598		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2599				NULL, tp_len);
2600		if (unlikely(err < 0))
2601			return -EINVAL;
2602	} else if (copylen) {
2603		int hdrlen = min_t(int, copylen, tp_len);
2604
2605		skb_push(skb, dev->hard_header_len);
2606		skb_put(skb, copylen - dev->hard_header_len);
2607		err = skb_store_bits(skb, 0, data, hdrlen);
2608		if (unlikely(err))
2609			return err;
2610		if (!dev_validate_header(dev, skb->data, hdrlen))
2611			return -EINVAL;
 
 
2612
2613		data += hdrlen;
2614		to_write -= hdrlen;
2615	}
2616
2617	offset = offset_in_page(data);
2618	len_max = PAGE_SIZE - offset;
2619	len = ((to_write > len_max) ? len_max : to_write);
2620
2621	skb->data_len = to_write;
2622	skb->len += to_write;
2623	skb->truesize += to_write;
2624	refcount_add(to_write, &po->sk.sk_wmem_alloc);
2625
2626	while (likely(to_write)) {
2627		nr_frags = skb_shinfo(skb)->nr_frags;
2628
2629		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2630			pr_err("Packet exceed the number of skb frags(%u)\n",
2631			       (unsigned int)MAX_SKB_FRAGS);
2632			return -EFAULT;
2633		}
2634
2635		page = pgv_to_page(data);
2636		data += len;
2637		flush_dcache_page(page);
2638		get_page(page);
2639		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2640		to_write -= len;
2641		offset = 0;
2642		len_max = PAGE_SIZE;
2643		len = ((to_write > len_max) ? len_max : to_write);
2644	}
2645
2646	packet_parse_headers(skb, sock);
2647
2648	return tp_len;
2649}
2650
2651static int tpacket_parse_header(struct packet_sock *po, void *frame,
2652				int size_max, void **data)
2653{
2654	union tpacket_uhdr ph;
2655	int tp_len, off;
2656
2657	ph.raw = frame;
2658
2659	switch (po->tp_version) {
2660	case TPACKET_V3:
2661		if (ph.h3->tp_next_offset != 0) {
2662			pr_warn_once("variable sized slot not supported");
2663			return -EINVAL;
2664		}
2665		tp_len = ph.h3->tp_len;
2666		break;
2667	case TPACKET_V2:
2668		tp_len = ph.h2->tp_len;
2669		break;
2670	default:
2671		tp_len = ph.h1->tp_len;
2672		break;
2673	}
2674	if (unlikely(tp_len > size_max)) {
2675		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2676		return -EMSGSIZE;
2677	}
2678
2679	if (unlikely(packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF))) {
2680		int off_min, off_max;
2681
2682		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2683		off_max = po->tx_ring.frame_size - tp_len;
2684		if (po->sk.sk_type == SOCK_DGRAM) {
2685			switch (po->tp_version) {
2686			case TPACKET_V3:
2687				off = ph.h3->tp_net;
2688				break;
2689			case TPACKET_V2:
2690				off = ph.h2->tp_net;
2691				break;
2692			default:
2693				off = ph.h1->tp_net;
2694				break;
2695			}
2696		} else {
2697			switch (po->tp_version) {
2698			case TPACKET_V3:
2699				off = ph.h3->tp_mac;
2700				break;
2701			case TPACKET_V2:
2702				off = ph.h2->tp_mac;
2703				break;
2704			default:
2705				off = ph.h1->tp_mac;
2706				break;
2707			}
2708		}
2709		if (unlikely((off < off_min) || (off_max < off)))
2710			return -EINVAL;
2711	} else {
2712		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2713	}
2714
2715	*data = frame + off;
2716	return tp_len;
2717}
2718
2719static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2720{
2721	struct sk_buff *skb = NULL;
2722	struct net_device *dev;
2723	struct virtio_net_hdr *vnet_hdr = NULL;
2724	struct sockcm_cookie sockc;
2725	__be16 proto;
2726	int err, reserve = 0;
2727	void *ph;
2728	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2729	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2730	int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2731	unsigned char *addr = NULL;
2732	int tp_len, size_max;
 
2733	void *data;
2734	int len_sum = 0;
2735	int status = TP_STATUS_AVAILABLE;
2736	int hlen, tlen, copylen = 0;
2737	long timeo = 0;
2738
2739	mutex_lock(&po->pg_vec_lock);
2740
2741	/* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2742	 * we need to confirm it under protection of pg_vec_lock.
2743	 */
2744	if (unlikely(!po->tx_ring.pg_vec)) {
2745		err = -EBUSY;
2746		goto out;
2747	}
2748	if (likely(saddr == NULL)) {
2749		dev	= packet_cached_dev_get(po);
2750		proto	= READ_ONCE(po->num);
 
2751	} else {
2752		err = -EINVAL;
2753		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2754			goto out;
2755		if (msg->msg_namelen < (saddr->sll_halen
2756					+ offsetof(struct sockaddr_ll,
2757						sll_addr)))
2758			goto out;
2759		proto	= saddr->sll_protocol;
 
2760		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2761		if (po->sk.sk_socket->type == SOCK_DGRAM) {
2762			if (dev && msg->msg_namelen < dev->addr_len +
2763				   offsetof(struct sockaddr_ll, sll_addr))
2764				goto out_put;
2765			addr = saddr->sll_addr;
2766		}
 
2767	}
2768
2769	err = -ENXIO;
2770	if (unlikely(dev == NULL))
2771		goto out;
2772	err = -ENETDOWN;
2773	if (unlikely(!(dev->flags & IFF_UP)))
2774		goto out_put;
2775
2776	sockcm_init(&sockc, &po->sk);
2777	if (msg->msg_controllen) {
2778		err = sock_cmsg_send(&po->sk, msg, &sockc);
2779		if (unlikely(err))
2780			goto out_put;
2781	}
2782
2783	if (po->sk.sk_socket->type == SOCK_RAW)
2784		reserve = dev->hard_header_len;
2785	size_max = po->tx_ring.frame_size
2786		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2787
2788	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !vnet_hdr_sz)
2789		size_max = dev->mtu + reserve + VLAN_HLEN;
2790
2791	reinit_completion(&po->skb_completion);
2792
2793	do {
2794		ph = packet_current_frame(po, &po->tx_ring,
2795					  TP_STATUS_SEND_REQUEST);
2796		if (unlikely(ph == NULL)) {
2797			if (need_wait && skb) {
2798				timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2799				timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2800				if (timeo <= 0) {
2801					err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2802					goto out_put;
2803				}
2804			}
2805			/* check for additional frames */
2806			continue;
2807		}
2808
2809		skb = NULL;
2810		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2811		if (tp_len < 0)
2812			goto tpacket_error;
2813
2814		status = TP_STATUS_SEND_REQUEST;
2815		hlen = LL_RESERVED_SPACE(dev);
2816		tlen = dev->needed_tailroom;
2817		if (vnet_hdr_sz) {
2818			vnet_hdr = data;
2819			data += vnet_hdr_sz;
2820			tp_len -= vnet_hdr_sz;
2821			if (tp_len < 0 ||
2822			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2823				tp_len = -EINVAL;
2824				goto tpacket_error;
2825			}
2826			copylen = __virtio16_to_cpu(vio_le(),
2827						    vnet_hdr->hdr_len);
2828		}
2829		copylen = max_t(int, copylen, dev->hard_header_len);
2830		skb = sock_alloc_send_skb(&po->sk,
2831				hlen + tlen + sizeof(struct sockaddr_ll) +
2832				(copylen - dev->hard_header_len),
2833				!need_wait, &err);
2834
2835		if (unlikely(skb == NULL)) {
2836			/* we assume the socket was initially writeable ... */
2837			if (likely(len_sum > 0))
2838				err = len_sum;
2839			goto out_status;
2840		}
2841		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2842					  addr, hlen, copylen, &sockc);
2843		if (likely(tp_len >= 0) &&
2844		    tp_len > dev->mtu + reserve &&
2845		    !vnet_hdr_sz &&
2846		    !packet_extra_vlan_len_allowed(dev, skb))
2847			tp_len = -EMSGSIZE;
2848
2849		if (unlikely(tp_len < 0)) {
2850tpacket_error:
2851			if (packet_sock_flag(po, PACKET_SOCK_TP_LOSS)) {
2852				__packet_set_status(po, ph,
2853						TP_STATUS_AVAILABLE);
2854				packet_increment_head(&po->tx_ring);
2855				kfree_skb(skb);
2856				continue;
2857			} else {
2858				status = TP_STATUS_WRONG_FORMAT;
2859				err = tp_len;
2860				goto out_status;
2861			}
2862		}
2863
2864		if (vnet_hdr_sz) {
2865			if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2866				tp_len = -EINVAL;
2867				goto tpacket_error;
2868			}
2869			virtio_net_hdr_set_proto(skb, vnet_hdr);
2870		}
2871
 
 
2872		skb->destructor = tpacket_destruct_skb;
2873		__packet_set_status(po, ph, TP_STATUS_SENDING);
2874		packet_inc_pending(&po->tx_ring);
2875
2876		status = TP_STATUS_SEND_REQUEST;
2877		err = packet_xmit(po, skb);
2878		if (unlikely(err != 0)) {
2879			if (err > 0)
2880				err = net_xmit_errno(err);
2881			if (err && __packet_get_status(po, ph) ==
2882				   TP_STATUS_AVAILABLE) {
2883				/* skb was destructed already */
2884				skb = NULL;
2885				goto out_status;
2886			}
2887			/*
2888			 * skb was dropped but not destructed yet;
2889			 * let's treat it like congestion or err < 0
2890			 */
2891			err = 0;
2892		}
2893		packet_increment_head(&po->tx_ring);
2894		len_sum += tp_len;
2895	} while (likely((ph != NULL) ||
2896		/* Note: packet_read_pending() might be slow if we have
2897		 * to call it as it's per_cpu variable, but in fast-path
2898		 * we already short-circuit the loop with the first
2899		 * condition, and luckily don't have to go that path
2900		 * anyway.
2901		 */
2902		 (need_wait && packet_read_pending(&po->tx_ring))));
2903
2904	err = len_sum;
2905	goto out_put;
2906
2907out_status:
2908	__packet_set_status(po, ph, status);
2909	kfree_skb(skb);
2910out_put:
2911	dev_put(dev);
2912out:
2913	mutex_unlock(&po->pg_vec_lock);
2914	return err;
2915}
2916
2917static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2918				        size_t reserve, size_t len,
2919				        size_t linear, int noblock,
2920				        int *err)
2921{
2922	struct sk_buff *skb;
2923
2924	/* Under a page?  Don't bother with paged skb. */
2925	if (prepad + len < PAGE_SIZE || !linear)
2926		linear = len;
2927
2928	if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
2929		linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
2930	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2931				   err, PAGE_ALLOC_COSTLY_ORDER);
2932	if (!skb)
2933		return NULL;
2934
2935	skb_reserve(skb, reserve);
2936	skb_put(skb, linear);
2937	skb->data_len = len - linear;
2938	skb->len += len - linear;
2939
2940	return skb;
2941}
2942
2943static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2944{
2945	struct sock *sk = sock->sk;
2946	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2947	struct sk_buff *skb;
2948	struct net_device *dev;
2949	__be16 proto;
2950	unsigned char *addr = NULL;
2951	int err, reserve = 0;
2952	struct sockcm_cookie sockc;
2953	struct virtio_net_hdr vnet_hdr = { 0 };
2954	int offset = 0;
2955	struct packet_sock *po = pkt_sk(sk);
2956	int vnet_hdr_sz = READ_ONCE(po->vnet_hdr_sz);
2957	int hlen, tlen, linear;
2958	int extra_len = 0;
2959
2960	/*
2961	 *	Get and verify the address.
2962	 */
2963
2964	if (likely(saddr == NULL)) {
2965		dev	= packet_cached_dev_get(po);
2966		proto	= READ_ONCE(po->num);
 
2967	} else {
2968		err = -EINVAL;
2969		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2970			goto out;
2971		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2972			goto out;
2973		proto	= saddr->sll_protocol;
 
2974		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2975		if (sock->type == SOCK_DGRAM) {
2976			if (dev && msg->msg_namelen < dev->addr_len +
2977				   offsetof(struct sockaddr_ll, sll_addr))
2978				goto out_unlock;
2979			addr = saddr->sll_addr;
2980		}
2981	}
2982
2983	err = -ENXIO;
2984	if (unlikely(dev == NULL))
2985		goto out_unlock;
2986	err = -ENETDOWN;
2987	if (unlikely(!(dev->flags & IFF_UP)))
2988		goto out_unlock;
2989
2990	sockcm_init(&sockc, sk);
2991	sockc.mark = READ_ONCE(sk->sk_mark);
2992	if (msg->msg_controllen) {
2993		err = sock_cmsg_send(sk, msg, &sockc);
2994		if (unlikely(err))
2995			goto out_unlock;
2996	}
2997
2998	if (sock->type == SOCK_RAW)
2999		reserve = dev->hard_header_len;
3000	if (vnet_hdr_sz) {
3001		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr, vnet_hdr_sz);
3002		if (err)
3003			goto out_unlock;
3004	}
3005
3006	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
3007		if (!netif_supports_nofcs(dev)) {
3008			err = -EPROTONOSUPPORT;
3009			goto out_unlock;
3010		}
3011		extra_len = 4; /* We're doing our own CRC */
3012	}
3013
3014	err = -EMSGSIZE;
3015	if (!vnet_hdr.gso_type &&
3016	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
3017		goto out_unlock;
3018
3019	err = -ENOBUFS;
3020	hlen = LL_RESERVED_SPACE(dev);
3021	tlen = dev->needed_tailroom;
3022	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
3023	linear = max(linear, min_t(int, len, dev->hard_header_len));
3024	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
3025			       msg->msg_flags & MSG_DONTWAIT, &err);
3026	if (skb == NULL)
3027		goto out_unlock;
3028
3029	skb_reset_network_header(skb);
3030
3031	err = -EINVAL;
3032	if (sock->type == SOCK_DGRAM) {
3033		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
3034		if (unlikely(offset < 0))
3035			goto out_free;
3036	} else if (reserve) {
3037		skb_reserve(skb, -reserve);
3038		if (len < reserve + sizeof(struct ipv6hdr) &&
3039		    dev->min_header_len != dev->hard_header_len)
3040			skb_reset_network_header(skb);
3041	}
3042
3043	/* Returns -EFAULT on error */
3044	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
3045	if (err)
3046		goto out_free;
3047
3048	if ((sock->type == SOCK_RAW &&
3049	     !dev_validate_header(dev, skb->data, len)) || !skb->len) {
3050		err = -EINVAL;
3051		goto out_free;
3052	}
3053
3054	skb_setup_tx_timestamp(skb, sockc.tsflags);
3055
3056	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
3057	    !packet_extra_vlan_len_allowed(dev, skb)) {
3058		err = -EMSGSIZE;
3059		goto out_free;
3060	}
3061
3062	skb->protocol = proto;
3063	skb->dev = dev;
3064	skb->priority = READ_ONCE(sk->sk_priority);
3065	skb->mark = sockc.mark;
3066	skb->tstamp = sockc.transmit_time;
3067
3068	if (unlikely(extra_len == 4))
3069		skb->no_fcs = 1;
3070
3071	packet_parse_headers(skb, sock);
3072
3073	if (vnet_hdr_sz) {
3074		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3075		if (err)
3076			goto out_free;
3077		len += vnet_hdr_sz;
3078		virtio_net_hdr_set_proto(skb, &vnet_hdr);
3079	}
3080
3081	err = packet_xmit(po, skb);
3082
3083	if (unlikely(err != 0)) {
3084		if (err > 0)
3085			err = net_xmit_errno(err);
3086		if (err)
3087			goto out_unlock;
3088	}
3089
3090	dev_put(dev);
3091
3092	return len;
3093
3094out_free:
3095	kfree_skb(skb);
3096out_unlock:
3097	dev_put(dev);
 
3098out:
3099	return err;
3100}
3101
3102static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3103{
3104	struct sock *sk = sock->sk;
3105	struct packet_sock *po = pkt_sk(sk);
3106
3107	/* Reading tx_ring.pg_vec without holding pg_vec_lock is racy.
3108	 * tpacket_snd() will redo the check safely.
3109	 */
3110	if (data_race(po->tx_ring.pg_vec))
3111		return tpacket_snd(po, msg);
3112
3113	return packet_snd(sock, msg, len);
3114}
3115
3116/*
3117 *	Close a PACKET socket. This is fairly simple. We immediately go
3118 *	to 'closed' state and remove our protocol entry in the device list.
3119 */
3120
3121static int packet_release(struct socket *sock)
3122{
3123	struct sock *sk = sock->sk;
3124	struct packet_sock *po;
3125	struct packet_fanout *f;
3126	struct net *net;
3127	union tpacket_req_u req_u;
3128
3129	if (!sk)
3130		return 0;
3131
3132	net = sock_net(sk);
3133	po = pkt_sk(sk);
3134
3135	mutex_lock(&net->packet.sklist_lock);
3136	sk_del_node_init_rcu(sk);
3137	mutex_unlock(&net->packet.sklist_lock);
3138
 
3139	sock_prot_inuse_add(net, sk->sk_prot, -1);
 
3140
3141	spin_lock(&po->bind_lock);
3142	unregister_prot_hook(sk, false);
3143	packet_cached_dev_reset(po);
3144
3145	if (po->prot_hook.dev) {
3146		netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3147		po->prot_hook.dev = NULL;
3148	}
3149	spin_unlock(&po->bind_lock);
3150
3151	packet_flush_mclist(sk);
3152
3153	lock_sock(sk);
3154	if (po->rx_ring.pg_vec) {
3155		memset(&req_u, 0, sizeof(req_u));
3156		packet_set_ring(sk, &req_u, 1, 0);
3157	}
3158
3159	if (po->tx_ring.pg_vec) {
3160		memset(&req_u, 0, sizeof(req_u));
3161		packet_set_ring(sk, &req_u, 1, 1);
3162	}
3163	release_sock(sk);
3164
3165	f = fanout_release(sk);
3166
3167	synchronize_net();
3168
3169	kfree(po->rollover);
3170	if (f) {
3171		fanout_release_data(f);
3172		kvfree(f);
3173	}
3174	/*
3175	 *	Now the socket is dead. No more input will appear.
3176	 */
3177	sock_orphan(sk);
3178	sock->sk = NULL;
3179
3180	/* Purge queues */
3181
3182	skb_queue_purge(&sk->sk_receive_queue);
3183	packet_free_pending(po);
 
3184
3185	sock_put(sk);
3186	return 0;
3187}
3188
3189/*
3190 *	Attach a packet hook.
3191 */
3192
3193static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3194			  __be16 proto)
3195{
3196	struct packet_sock *po = pkt_sk(sk);
3197	struct net_device *dev = NULL;
3198	bool unlisted = false;
3199	bool need_rehook;
 
3200	int ret = 0;
 
 
 
 
3201
3202	lock_sock(sk);
3203	spin_lock(&po->bind_lock);
3204	if (!proto)
3205		proto = po->num;
3206
3207	rcu_read_lock();
3208
3209	if (po->fanout) {
3210		ret = -EINVAL;
3211		goto out_unlock;
3212	}
3213
3214	if (name) {
3215		dev = dev_get_by_name_rcu(sock_net(sk), name);
3216		if (!dev) {
3217			ret = -ENODEV;
3218			goto out_unlock;
3219		}
3220	} else if (ifindex) {
3221		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3222		if (!dev) {
3223			ret = -ENODEV;
3224			goto out_unlock;
3225		}
3226	}
3227
3228	need_rehook = po->prot_hook.type != proto || po->prot_hook.dev != dev;
 
 
 
 
 
 
3229
3230	if (need_rehook) {
3231		dev_hold(dev);
3232		if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
3233			rcu_read_unlock();
3234			/* prevents packet_notifier() from calling
3235			 * register_prot_hook()
3236			 */
3237			WRITE_ONCE(po->num, 0);
3238			__unregister_prot_hook(sk, true);
3239			rcu_read_lock();
 
3240			if (dev)
3241				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3242								 dev->ifindex);
3243		}
3244
3245		BUG_ON(packet_sock_flag(po, PACKET_SOCK_RUNNING));
3246		WRITE_ONCE(po->num, proto);
3247		po->prot_hook.type = proto;
3248
3249		netdev_put(po->prot_hook.dev, &po->prot_hook.dev_tracker);
3250
3251		if (unlikely(unlisted)) {
 
3252			po->prot_hook.dev = NULL;
3253			WRITE_ONCE(po->ifindex, -1);
3254			packet_cached_dev_reset(po);
3255		} else {
3256			netdev_hold(dev, &po->prot_hook.dev_tracker,
3257				    GFP_ATOMIC);
3258			po->prot_hook.dev = dev;
3259			WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
3260			packet_cached_dev_assign(po, dev);
3261		}
3262		dev_put(dev);
3263	}
 
 
3264
3265	if (proto == 0 || !need_rehook)
3266		goto out_unlock;
3267
3268	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3269		register_prot_hook(sk);
3270	} else {
3271		sk->sk_err = ENETDOWN;
3272		if (!sock_flag(sk, SOCK_DEAD))
3273			sk_error_report(sk);
3274	}
3275
3276out_unlock:
3277	rcu_read_unlock();
3278	spin_unlock(&po->bind_lock);
3279	release_sock(sk);
3280	return ret;
3281}
3282
3283/*
3284 *	Bind a packet socket to a device
3285 */
3286
3287static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3288			    int addr_len)
3289{
3290	struct sock *sk = sock->sk;
3291	char name[sizeof(uaddr->sa_data_min) + 1];
3292
3293	/*
3294	 *	Check legality
3295	 */
3296
3297	if (addr_len != sizeof(struct sockaddr))
3298		return -EINVAL;
3299	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3300	 * zero-terminated.
3301	 */
3302	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data_min));
3303	name[sizeof(uaddr->sa_data_min)] = 0;
3304
3305	return packet_do_bind(sk, name, 0, 0);
3306}
3307
3308static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3309{
3310	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3311	struct sock *sk = sock->sk;
3312
3313	/*
3314	 *	Check legality
3315	 */
3316
3317	if (addr_len < sizeof(struct sockaddr_ll))
3318		return -EINVAL;
3319	if (sll->sll_family != AF_PACKET)
3320		return -EINVAL;
3321
3322	return packet_do_bind(sk, NULL, sll->sll_ifindex, sll->sll_protocol);
 
3323}
3324
3325static struct proto packet_proto = {
3326	.name	  = "PACKET",
3327	.owner	  = THIS_MODULE,
3328	.obj_size = sizeof(struct packet_sock),
3329};
3330
3331/*
3332 *	Create a packet of type SOCK_PACKET.
3333 */
3334
3335static int packet_create(struct net *net, struct socket *sock, int protocol,
3336			 int kern)
3337{
3338	struct sock *sk;
3339	struct packet_sock *po;
3340	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3341	int err;
3342
3343	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3344		return -EPERM;
3345	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3346	    sock->type != SOCK_PACKET)
3347		return -ESOCKTNOSUPPORT;
3348
3349	sock->state = SS_UNCONNECTED;
3350
3351	err = -ENOBUFS;
3352	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3353	if (sk == NULL)
3354		goto out;
3355
3356	sock->ops = &packet_ops;
3357	if (sock->type == SOCK_PACKET)
3358		sock->ops = &packet_ops_spkt;
3359
3360	sock_init_data(sock, sk);
3361
3362	po = pkt_sk(sk);
3363	init_completion(&po->skb_completion);
3364	sk->sk_family = PF_PACKET;
3365	po->num = proto;
 
3366
3367	err = packet_alloc_pending(po);
3368	if (err)
3369		goto out2;
3370
3371	packet_cached_dev_reset(po);
3372
3373	sk->sk_destruct = packet_sock_destruct;
 
3374
3375	/*
3376	 *	Attach a protocol block
3377	 */
3378
3379	spin_lock_init(&po->bind_lock);
3380	mutex_init(&po->pg_vec_lock);
3381	po->rollover = NULL;
3382	po->prot_hook.func = packet_rcv;
3383
3384	if (sock->type == SOCK_PACKET)
3385		po->prot_hook.func = packet_rcv_spkt;
3386
3387	po->prot_hook.af_packet_priv = sk;
3388	po->prot_hook.af_packet_net = sock_net(sk);
3389
3390	if (proto) {
3391		po->prot_hook.type = proto;
3392		__register_prot_hook(sk);
3393	}
3394
3395	mutex_lock(&net->packet.sklist_lock);
3396	sk_add_node_tail_rcu(sk, &net->packet.sklist);
3397	mutex_unlock(&net->packet.sklist_lock);
3398
 
3399	sock_prot_inuse_add(net, &packet_proto, 1);
 
3400
3401	return 0;
3402out2:
3403	sk_free(sk);
3404out:
3405	return err;
3406}
3407
3408/*
3409 *	Pull a packet from our receive queue and hand it to the user.
3410 *	If necessary we block.
3411 */
3412
3413static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3414			  int flags)
3415{
3416	struct sock *sk = sock->sk;
3417	struct sk_buff *skb;
3418	int copied, err;
3419	int vnet_hdr_len = READ_ONCE(pkt_sk(sk)->vnet_hdr_sz);
3420	unsigned int origlen = 0;
3421
3422	err = -EINVAL;
3423	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3424		goto out;
3425
3426#if 0
3427	/* What error should we return now? EUNATTACH? */
3428	if (pkt_sk(sk)->ifindex < 0)
3429		return -ENODEV;
3430#endif
3431
3432	if (flags & MSG_ERRQUEUE) {
3433		err = sock_recv_errqueue(sk, msg, len,
3434					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3435		goto out;
3436	}
3437
3438	/*
3439	 *	Call the generic datagram receiver. This handles all sorts
3440	 *	of horrible races and re-entrancy so we can forget about it
3441	 *	in the protocol layers.
3442	 *
3443	 *	Now it will return ENETDOWN, if device have just gone down,
3444	 *	but then it will block.
3445	 */
3446
3447	skb = skb_recv_datagram(sk, flags, &err);
3448
3449	/*
3450	 *	An error occurred so return it. Because skb_recv_datagram()
3451	 *	handles the blocking we don't see and worry about blocking
3452	 *	retries.
3453	 */
3454
3455	if (skb == NULL)
3456		goto out;
3457
3458	packet_rcv_try_clear_pressure(pkt_sk(sk));
 
3459
3460	if (vnet_hdr_len) {
3461		err = packet_rcv_vnet(msg, skb, &len, vnet_hdr_len);
3462		if (err)
3463			goto out_free;
 
3464	}
3465
3466	/* You lose any data beyond the buffer you gave. If it worries
3467	 * a user program they can ask the device for its MTU
3468	 * anyway.
3469	 */
3470	copied = skb->len;
3471	if (copied > len) {
3472		copied = len;
3473		msg->msg_flags |= MSG_TRUNC;
3474	}
3475
3476	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3477	if (err)
3478		goto out_free;
3479
3480	if (sock->type != SOCK_PACKET) {
3481		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3482
3483		/* Original length was stored in sockaddr_ll fields */
3484		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3485		sll->sll_family = AF_PACKET;
3486		sll->sll_protocol = skb->protocol;
3487	}
3488
3489	sock_recv_cmsgs(msg, sk, skb);
3490
3491	if (msg->msg_name) {
3492		const size_t max_len = min(sizeof(skb->cb),
3493					   sizeof(struct sockaddr_storage));
3494		int copy_len;
3495
3496		/* If the address length field is there to be filled
3497		 * in, we fill it in now.
3498		 */
3499		if (sock->type == SOCK_PACKET) {
3500			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3501			msg->msg_namelen = sizeof(struct sockaddr_pkt);
3502			copy_len = msg->msg_namelen;
3503		} else {
3504			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3505
3506			msg->msg_namelen = sll->sll_halen +
3507				offsetof(struct sockaddr_ll, sll_addr);
3508			copy_len = msg->msg_namelen;
3509			if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3510				memset(msg->msg_name +
3511				       offsetof(struct sockaddr_ll, sll_addr),
3512				       0, sizeof(sll->sll_addr));
3513				msg->msg_namelen = sizeof(struct sockaddr_ll);
3514			}
3515		}
3516		if (WARN_ON_ONCE(copy_len > max_len)) {
3517			copy_len = max_len;
3518			msg->msg_namelen = copy_len;
3519		}
3520		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
 
3521	}
3522
3523	if (packet_sock_flag(pkt_sk(sk), PACKET_SOCK_AUXDATA)) {
3524		struct tpacket_auxdata aux;
3525
3526		aux.tp_status = TP_STATUS_USER;
3527		if (skb->ip_summed == CHECKSUM_PARTIAL)
3528			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3529		else if (skb->pkt_type != PACKET_OUTGOING &&
3530			 skb_csum_unnecessary(skb))
 
3531			aux.tp_status |= TP_STATUS_CSUM_VALID;
3532		if (skb_is_gso(skb) && skb_is_gso_tcp(skb))
3533			aux.tp_status |= TP_STATUS_GSO_TCP;
3534
3535		aux.tp_len = origlen;
3536		aux.tp_snaplen = skb->len;
3537		aux.tp_mac = 0;
3538		aux.tp_net = skb_network_offset(skb);
3539		if (skb_vlan_tag_present(skb)) {
3540			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3541			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3542			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3543		} else {
3544			aux.tp_vlan_tci = 0;
3545			aux.tp_vlan_tpid = 0;
3546		}
3547		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3548	}
3549
3550	/*
3551	 *	Free or return the buffer as appropriate. Again this
3552	 *	hides all the races and re-entrancy issues from us.
3553	 */
3554	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3555
3556out_free:
3557	skb_free_datagram(sk, skb);
3558out:
3559	return err;
3560}
3561
3562static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3563			       int peer)
3564{
3565	struct net_device *dev;
3566	struct sock *sk	= sock->sk;
3567
3568	if (peer)
3569		return -EOPNOTSUPP;
3570
3571	uaddr->sa_family = AF_PACKET;
3572	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data_min));
3573	rcu_read_lock();
3574	dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
3575	if (dev)
3576		strscpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data_min));
3577	rcu_read_unlock();
 
3578
3579	return sizeof(*uaddr);
3580}
3581
3582static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3583			  int peer)
3584{
3585	struct net_device *dev;
3586	struct sock *sk = sock->sk;
3587	struct packet_sock *po = pkt_sk(sk);
3588	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3589	int ifindex;
3590
3591	if (peer)
3592		return -EOPNOTSUPP;
3593
3594	ifindex = READ_ONCE(po->ifindex);
3595	sll->sll_family = AF_PACKET;
3596	sll->sll_ifindex = ifindex;
3597	sll->sll_protocol = READ_ONCE(po->num);
3598	sll->sll_pkttype = 0;
3599	rcu_read_lock();
3600	dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3601	if (dev) {
3602		sll->sll_hatype = dev->type;
3603		sll->sll_halen = dev->addr_len;
3604
3605		/* Let __fortify_memcpy_chk() know the actual buffer size. */
3606		memcpy(((struct sockaddr_storage *)sll)->__data +
3607		       offsetof(struct sockaddr_ll, sll_addr) -
3608		       offsetofend(struct sockaddr_ll, sll_family),
3609		       dev->dev_addr, dev->addr_len);
3610	} else {
3611		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3612		sll->sll_halen = 0;
3613	}
3614	rcu_read_unlock();
 
3615
3616	return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3617}
3618
3619static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3620			 int what)
3621{
3622	switch (i->type) {
3623	case PACKET_MR_MULTICAST:
3624		if (i->alen != dev->addr_len)
3625			return -EINVAL;
3626		if (what > 0)
3627			return dev_mc_add(dev, i->addr);
3628		else
3629			return dev_mc_del(dev, i->addr);
3630		break;
3631	case PACKET_MR_PROMISC:
3632		return dev_set_promiscuity(dev, what);
3633	case PACKET_MR_ALLMULTI:
3634		return dev_set_allmulti(dev, what);
3635	case PACKET_MR_UNICAST:
3636		if (i->alen != dev->addr_len)
3637			return -EINVAL;
3638		if (what > 0)
3639			return dev_uc_add(dev, i->addr);
3640		else
3641			return dev_uc_del(dev, i->addr);
3642		break;
3643	default:
3644		break;
3645	}
3646	return 0;
3647}
3648
3649static void packet_dev_mclist_delete(struct net_device *dev,
3650				     struct packet_mclist **mlp)
3651{
3652	struct packet_mclist *ml;
3653
3654	while ((ml = *mlp) != NULL) {
3655		if (ml->ifindex == dev->ifindex) {
3656			packet_dev_mc(dev, ml, -1);
3657			*mlp = ml->next;
3658			kfree(ml);
3659		} else
3660			mlp = &ml->next;
3661	}
3662}
3663
3664static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3665{
3666	struct packet_sock *po = pkt_sk(sk);
3667	struct packet_mclist *ml, *i;
3668	struct net_device *dev;
3669	int err;
3670
3671	rtnl_lock();
3672
3673	err = -ENODEV;
3674	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3675	if (!dev)
3676		goto done;
3677
3678	err = -EINVAL;
3679	if (mreq->mr_alen > dev->addr_len)
3680		goto done;
3681
3682	err = -ENOBUFS;
3683	i = kmalloc(sizeof(*i), GFP_KERNEL);
3684	if (i == NULL)
3685		goto done;
3686
3687	err = 0;
3688	for (ml = po->mclist; ml; ml = ml->next) {
3689		if (ml->ifindex == mreq->mr_ifindex &&
3690		    ml->type == mreq->mr_type &&
3691		    ml->alen == mreq->mr_alen &&
3692		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3693			ml->count++;
3694			/* Free the new element ... */
3695			kfree(i);
3696			goto done;
3697		}
3698	}
3699
3700	i->type = mreq->mr_type;
3701	i->ifindex = mreq->mr_ifindex;
3702	i->alen = mreq->mr_alen;
3703	memcpy(i->addr, mreq->mr_address, i->alen);
3704	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3705	i->count = 1;
3706	i->next = po->mclist;
3707	po->mclist = i;
3708	err = packet_dev_mc(dev, i, 1);
3709	if (err) {
3710		po->mclist = i->next;
3711		kfree(i);
3712	}
3713
3714done:
3715	rtnl_unlock();
3716	return err;
3717}
3718
3719static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3720{
3721	struct packet_mclist *ml, **mlp;
3722
3723	rtnl_lock();
3724
3725	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3726		if (ml->ifindex == mreq->mr_ifindex &&
3727		    ml->type == mreq->mr_type &&
3728		    ml->alen == mreq->mr_alen &&
3729		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3730			if (--ml->count == 0) {
3731				struct net_device *dev;
3732				*mlp = ml->next;
3733				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3734				if (dev)
3735					packet_dev_mc(dev, ml, -1);
3736				kfree(ml);
3737			}
3738			break;
3739		}
3740	}
3741	rtnl_unlock();
3742	return 0;
3743}
3744
3745static void packet_flush_mclist(struct sock *sk)
3746{
3747	struct packet_sock *po = pkt_sk(sk);
3748	struct packet_mclist *ml;
3749
3750	if (!po->mclist)
3751		return;
3752
3753	rtnl_lock();
3754	while ((ml = po->mclist) != NULL) {
3755		struct net_device *dev;
3756
3757		po->mclist = ml->next;
3758		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3759		if (dev != NULL)
3760			packet_dev_mc(dev, ml, -1);
3761		kfree(ml);
3762	}
3763	rtnl_unlock();
3764}
3765
3766static int
3767packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3768		  unsigned int optlen)
3769{
3770	struct sock *sk = sock->sk;
3771	struct packet_sock *po = pkt_sk(sk);
3772	int ret;
3773
3774	if (level != SOL_PACKET)
3775		return -ENOPROTOOPT;
3776
3777	switch (optname) {
3778	case PACKET_ADD_MEMBERSHIP:
3779	case PACKET_DROP_MEMBERSHIP:
3780	{
3781		struct packet_mreq_max mreq;
3782		int len = optlen;
3783		memset(&mreq, 0, sizeof(mreq));
3784		if (len < sizeof(struct packet_mreq))
3785			return -EINVAL;
3786		if (len > sizeof(mreq))
3787			len = sizeof(mreq);
3788		if (copy_from_sockptr(&mreq, optval, len))
3789			return -EFAULT;
3790		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3791			return -EINVAL;
3792		if (optname == PACKET_ADD_MEMBERSHIP)
3793			ret = packet_mc_add(sk, &mreq);
3794		else
3795			ret = packet_mc_drop(sk, &mreq);
3796		return ret;
3797	}
3798
3799	case PACKET_RX_RING:
3800	case PACKET_TX_RING:
3801	{
3802		union tpacket_req_u req_u;
3803		int len;
3804
3805		lock_sock(sk);
3806		switch (po->tp_version) {
3807		case TPACKET_V1:
3808		case TPACKET_V2:
3809			len = sizeof(req_u.req);
3810			break;
3811		case TPACKET_V3:
3812		default:
3813			len = sizeof(req_u.req3);
3814			break;
3815		}
3816		if (optlen < len) {
3817			ret = -EINVAL;
3818		} else {
3819			if (copy_from_sockptr(&req_u.req, optval, len))
3820				ret = -EFAULT;
3821			else
3822				ret = packet_set_ring(sk, &req_u, 0,
3823						    optname == PACKET_TX_RING);
3824		}
3825		release_sock(sk);
3826		return ret;
3827	}
3828	case PACKET_COPY_THRESH:
3829	{
3830		int val;
3831
3832		if (optlen != sizeof(val))
3833			return -EINVAL;
3834		if (copy_from_sockptr(&val, optval, sizeof(val)))
3835			return -EFAULT;
3836
3837		pkt_sk(sk)->copy_thresh = val;
3838		return 0;
3839	}
3840	case PACKET_VERSION:
3841	{
3842		int val;
3843
3844		if (optlen != sizeof(val))
3845			return -EINVAL;
3846		if (copy_from_sockptr(&val, optval, sizeof(val)))
3847			return -EFAULT;
3848		switch (val) {
3849		case TPACKET_V1:
3850		case TPACKET_V2:
3851		case TPACKET_V3:
3852			break;
3853		default:
3854			return -EINVAL;
3855		}
3856		lock_sock(sk);
3857		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3858			ret = -EBUSY;
3859		} else {
3860			po->tp_version = val;
3861			ret = 0;
3862		}
3863		release_sock(sk);
3864		return ret;
3865	}
3866	case PACKET_RESERVE:
3867	{
3868		unsigned int val;
3869
3870		if (optlen != sizeof(val))
3871			return -EINVAL;
3872		if (copy_from_sockptr(&val, optval, sizeof(val)))
 
 
3873			return -EFAULT;
3874		if (val > INT_MAX)
3875			return -EINVAL;
3876		lock_sock(sk);
3877		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3878			ret = -EBUSY;
3879		} else {
3880			po->tp_reserve = val;
3881			ret = 0;
3882		}
3883		release_sock(sk);
3884		return ret;
3885	}
3886	case PACKET_LOSS:
3887	{
3888		unsigned int val;
3889
3890		if (optlen != sizeof(val))
3891			return -EINVAL;
3892		if (copy_from_sockptr(&val, optval, sizeof(val)))
 
 
3893			return -EFAULT;
3894
3895		lock_sock(sk);
3896		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3897			ret = -EBUSY;
3898		} else {
3899			packet_sock_flag_set(po, PACKET_SOCK_TP_LOSS, val);
3900			ret = 0;
3901		}
3902		release_sock(sk);
3903		return ret;
3904	}
3905	case PACKET_AUXDATA:
3906	{
3907		int val;
3908
3909		if (optlen < sizeof(val))
3910			return -EINVAL;
3911		if (copy_from_sockptr(&val, optval, sizeof(val)))
3912			return -EFAULT;
3913
3914		packet_sock_flag_set(po, PACKET_SOCK_AUXDATA, val);
3915		return 0;
3916	}
3917	case PACKET_ORIGDEV:
3918	{
3919		int val;
3920
3921		if (optlen < sizeof(val))
3922			return -EINVAL;
3923		if (copy_from_sockptr(&val, optval, sizeof(val)))
3924			return -EFAULT;
3925
3926		packet_sock_flag_set(po, PACKET_SOCK_ORIGDEV, val);
3927		return 0;
3928	}
3929	case PACKET_VNET_HDR:
3930	case PACKET_VNET_HDR_SZ:
3931	{
3932		int val, hdr_len;
3933
3934		if (sock->type != SOCK_RAW)
3935			return -EINVAL;
 
 
3936		if (optlen < sizeof(val))
3937			return -EINVAL;
3938		if (copy_from_sockptr(&val, optval, sizeof(val)))
3939			return -EFAULT;
3940
3941		if (optname == PACKET_VNET_HDR_SZ) {
3942			if (val && val != sizeof(struct virtio_net_hdr) &&
3943			    val != sizeof(struct virtio_net_hdr_mrg_rxbuf))
3944				return -EINVAL;
3945			hdr_len = val;
3946		} else {
3947			hdr_len = val ? sizeof(struct virtio_net_hdr) : 0;
3948		}
3949		lock_sock(sk);
3950		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3951			ret = -EBUSY;
3952		} else {
3953			WRITE_ONCE(po->vnet_hdr_sz, hdr_len);
3954			ret = 0;
3955		}
3956		release_sock(sk);
3957		return ret;
3958	}
3959	case PACKET_TIMESTAMP:
3960	{
3961		int val;
3962
3963		if (optlen != sizeof(val))
3964			return -EINVAL;
3965		if (copy_from_sockptr(&val, optval, sizeof(val)))
3966			return -EFAULT;
3967
3968		WRITE_ONCE(po->tp_tstamp, val);
3969		return 0;
3970	}
3971	case PACKET_FANOUT:
3972	{
3973		struct fanout_args args = { 0 };
3974
3975		if (optlen != sizeof(int) && optlen != sizeof(args))
3976			return -EINVAL;
3977		if (copy_from_sockptr(&args, optval, optlen))
3978			return -EFAULT;
3979
3980		return fanout_add(sk, &args);
3981	}
3982	case PACKET_FANOUT_DATA:
3983	{
3984		/* Paired with the WRITE_ONCE() in fanout_add() */
3985		if (!READ_ONCE(po->fanout))
3986			return -EINVAL;
3987
3988		return fanout_set_data(po, optval, optlen);
3989	}
3990	case PACKET_IGNORE_OUTGOING:
3991	{
3992		int val;
3993
3994		if (optlen != sizeof(val))
3995			return -EINVAL;
3996		if (copy_from_sockptr(&val, optval, sizeof(val)))
3997			return -EFAULT;
3998		if (val < 0 || val > 1)
3999			return -EINVAL;
4000
4001		po->prot_hook.ignore_outgoing = !!val;
4002		return 0;
4003	}
4004	case PACKET_TX_HAS_OFF:
4005	{
4006		unsigned int val;
4007
4008		if (optlen != sizeof(val))
4009			return -EINVAL;
4010		if (copy_from_sockptr(&val, optval, sizeof(val)))
 
 
4011			return -EFAULT;
4012
4013		lock_sock(sk);
4014		if (!po->rx_ring.pg_vec && !po->tx_ring.pg_vec)
4015			packet_sock_flag_set(po, PACKET_SOCK_TX_HAS_OFF, val);
4016
4017		release_sock(sk);
4018		return 0;
4019	}
4020	case PACKET_QDISC_BYPASS:
4021	{
4022		int val;
4023
4024		if (optlen != sizeof(val))
4025			return -EINVAL;
4026		if (copy_from_sockptr(&val, optval, sizeof(val)))
4027			return -EFAULT;
4028
4029		packet_sock_flag_set(po, PACKET_SOCK_QDISC_BYPASS, val);
4030		return 0;
4031	}
4032	default:
4033		return -ENOPROTOOPT;
4034	}
4035}
4036
4037static int packet_getsockopt(struct socket *sock, int level, int optname,
4038			     char __user *optval, int __user *optlen)
4039{
4040	int len;
4041	int val, lv = sizeof(val);
4042	struct sock *sk = sock->sk;
4043	struct packet_sock *po = pkt_sk(sk);
4044	void *data = &val;
4045	union tpacket_stats_u st;
4046	struct tpacket_rollover_stats rstats;
4047	int drops;
4048
4049	if (level != SOL_PACKET)
4050		return -ENOPROTOOPT;
4051
4052	if (get_user(len, optlen))
4053		return -EFAULT;
4054
4055	if (len < 0)
4056		return -EINVAL;
4057
4058	switch (optname) {
4059	case PACKET_STATISTICS:
4060		spin_lock_bh(&sk->sk_receive_queue.lock);
4061		memcpy(&st, &po->stats, sizeof(st));
4062		memset(&po->stats, 0, sizeof(po->stats));
4063		spin_unlock_bh(&sk->sk_receive_queue.lock);
4064		drops = atomic_xchg(&po->tp_drops, 0);
4065
4066		if (po->tp_version == TPACKET_V3) {
4067			lv = sizeof(struct tpacket_stats_v3);
4068			st.stats3.tp_drops = drops;
4069			st.stats3.tp_packets += drops;
4070			data = &st.stats3;
4071		} else {
4072			lv = sizeof(struct tpacket_stats);
4073			st.stats1.tp_drops = drops;
4074			st.stats1.tp_packets += drops;
4075			data = &st.stats1;
4076		}
4077
4078		break;
4079	case PACKET_AUXDATA:
4080		val = packet_sock_flag(po, PACKET_SOCK_AUXDATA);
4081		break;
4082	case PACKET_ORIGDEV:
4083		val = packet_sock_flag(po, PACKET_SOCK_ORIGDEV);
4084		break;
4085	case PACKET_VNET_HDR:
4086		val = !!READ_ONCE(po->vnet_hdr_sz);
4087		break;
4088	case PACKET_VNET_HDR_SZ:
4089		val = READ_ONCE(po->vnet_hdr_sz);
4090		break;
4091	case PACKET_VERSION:
4092		val = po->tp_version;
4093		break;
4094	case PACKET_HDRLEN:
4095		if (len > sizeof(int))
4096			len = sizeof(int);
4097		if (len < sizeof(int))
4098			return -EINVAL;
4099		if (copy_from_user(&val, optval, len))
4100			return -EFAULT;
4101		switch (val) {
4102		case TPACKET_V1:
4103			val = sizeof(struct tpacket_hdr);
4104			break;
4105		case TPACKET_V2:
4106			val = sizeof(struct tpacket2_hdr);
4107			break;
4108		case TPACKET_V3:
4109			val = sizeof(struct tpacket3_hdr);
4110			break;
4111		default:
4112			return -EINVAL;
4113		}
4114		break;
4115	case PACKET_RESERVE:
4116		val = po->tp_reserve;
4117		break;
4118	case PACKET_LOSS:
4119		val = packet_sock_flag(po, PACKET_SOCK_TP_LOSS);
4120		break;
4121	case PACKET_TIMESTAMP:
4122		val = READ_ONCE(po->tp_tstamp);
4123		break;
4124	case PACKET_FANOUT:
4125		val = (po->fanout ?
4126		       ((u32)po->fanout->id |
4127			((u32)po->fanout->type << 16) |
4128			((u32)po->fanout->flags << 24)) :
4129		       0);
4130		break;
4131	case PACKET_IGNORE_OUTGOING:
4132		val = po->prot_hook.ignore_outgoing;
4133		break;
4134	case PACKET_ROLLOVER_STATS:
4135		if (!po->rollover)
4136			return -EINVAL;
4137		rstats.tp_all = atomic_long_read(&po->rollover->num);
4138		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4139		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4140		data = &rstats;
4141		lv = sizeof(rstats);
4142		break;
4143	case PACKET_TX_HAS_OFF:
4144		val = packet_sock_flag(po, PACKET_SOCK_TX_HAS_OFF);
4145		break;
4146	case PACKET_QDISC_BYPASS:
4147		val = packet_sock_flag(po, PACKET_SOCK_QDISC_BYPASS);
4148		break;
4149	default:
4150		return -ENOPROTOOPT;
4151	}
4152
4153	if (len > lv)
4154		len = lv;
4155	if (put_user(len, optlen))
4156		return -EFAULT;
4157	if (copy_to_user(optval, data, len))
4158		return -EFAULT;
4159	return 0;
4160}
4161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4162static int packet_notifier(struct notifier_block *this,
4163			   unsigned long msg, void *ptr)
4164{
4165	struct sock *sk;
4166	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4167	struct net *net = dev_net(dev);
4168
4169	rcu_read_lock();
4170	sk_for_each_rcu(sk, &net->packet.sklist) {
4171		struct packet_sock *po = pkt_sk(sk);
4172
4173		switch (msg) {
4174		case NETDEV_UNREGISTER:
4175			if (po->mclist)
4176				packet_dev_mclist_delete(dev, &po->mclist);
4177			fallthrough;
4178
4179		case NETDEV_DOWN:
4180			if (dev->ifindex == po->ifindex) {
4181				spin_lock(&po->bind_lock);
4182				if (packet_sock_flag(po, PACKET_SOCK_RUNNING)) {
4183					__unregister_prot_hook(sk, false);
4184					sk->sk_err = ENETDOWN;
4185					if (!sock_flag(sk, SOCK_DEAD))
4186						sk_error_report(sk);
4187				}
4188				if (msg == NETDEV_UNREGISTER) {
4189					packet_cached_dev_reset(po);
4190					WRITE_ONCE(po->ifindex, -1);
4191					netdev_put(po->prot_hook.dev,
4192						   &po->prot_hook.dev_tracker);
4193					po->prot_hook.dev = NULL;
4194				}
4195				spin_unlock(&po->bind_lock);
4196			}
4197			break;
4198		case NETDEV_UP:
4199			if (dev->ifindex == po->ifindex) {
4200				spin_lock(&po->bind_lock);
4201				if (po->num)
4202					register_prot_hook(sk);
4203				spin_unlock(&po->bind_lock);
4204			}
4205			break;
4206		}
4207	}
4208	rcu_read_unlock();
4209	return NOTIFY_DONE;
4210}
4211
4212
4213static int packet_ioctl(struct socket *sock, unsigned int cmd,
4214			unsigned long arg)
4215{
4216	struct sock *sk = sock->sk;
4217
4218	switch (cmd) {
4219	case SIOCOUTQ:
4220	{
4221		int amount = sk_wmem_alloc_get(sk);
4222
4223		return put_user(amount, (int __user *)arg);
4224	}
4225	case SIOCINQ:
4226	{
4227		struct sk_buff *skb;
4228		int amount = 0;
4229
4230		spin_lock_bh(&sk->sk_receive_queue.lock);
4231		skb = skb_peek(&sk->sk_receive_queue);
4232		if (skb)
4233			amount = skb->len;
4234		spin_unlock_bh(&sk->sk_receive_queue.lock);
4235		return put_user(amount, (int __user *)arg);
4236	}
 
 
 
 
 
4237#ifdef CONFIG_INET
4238	case SIOCADDRT:
4239	case SIOCDELRT:
4240	case SIOCDARP:
4241	case SIOCGARP:
4242	case SIOCSARP:
4243	case SIOCGIFADDR:
4244	case SIOCSIFADDR:
4245	case SIOCGIFBRDADDR:
4246	case SIOCSIFBRDADDR:
4247	case SIOCGIFNETMASK:
4248	case SIOCSIFNETMASK:
4249	case SIOCGIFDSTADDR:
4250	case SIOCSIFDSTADDR:
4251	case SIOCSIFFLAGS:
4252		return inet_dgram_ops.ioctl(sock, cmd, arg);
4253#endif
4254
4255	default:
4256		return -ENOIOCTLCMD;
4257	}
4258	return 0;
4259}
4260
4261static __poll_t packet_poll(struct file *file, struct socket *sock,
4262				poll_table *wait)
4263{
4264	struct sock *sk = sock->sk;
4265	struct packet_sock *po = pkt_sk(sk);
4266	__poll_t mask = datagram_poll(file, sock, wait);
4267
4268	spin_lock_bh(&sk->sk_receive_queue.lock);
4269	if (po->rx_ring.pg_vec) {
4270		if (!packet_previous_rx_frame(po, &po->rx_ring,
4271			TP_STATUS_KERNEL))
4272			mask |= EPOLLIN | EPOLLRDNORM;
4273	}
4274	packet_rcv_try_clear_pressure(po);
 
4275	spin_unlock_bh(&sk->sk_receive_queue.lock);
4276	spin_lock_bh(&sk->sk_write_queue.lock);
4277	if (po->tx_ring.pg_vec) {
4278		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4279			mask |= EPOLLOUT | EPOLLWRNORM;
4280	}
4281	spin_unlock_bh(&sk->sk_write_queue.lock);
4282	return mask;
4283}
4284
4285
4286/* Dirty? Well, I still did not learn better way to account
4287 * for user mmaps.
4288 */
4289
4290static void packet_mm_open(struct vm_area_struct *vma)
4291{
4292	struct file *file = vma->vm_file;
4293	struct socket *sock = file->private_data;
4294	struct sock *sk = sock->sk;
4295
4296	if (sk)
4297		atomic_long_inc(&pkt_sk(sk)->mapped);
4298}
4299
4300static void packet_mm_close(struct vm_area_struct *vma)
4301{
4302	struct file *file = vma->vm_file;
4303	struct socket *sock = file->private_data;
4304	struct sock *sk = sock->sk;
4305
4306	if (sk)
4307		atomic_long_dec(&pkt_sk(sk)->mapped);
4308}
4309
4310static const struct vm_operations_struct packet_mmap_ops = {
4311	.open	=	packet_mm_open,
4312	.close	=	packet_mm_close,
4313};
4314
4315static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4316			unsigned int len)
4317{
4318	int i;
4319
4320	for (i = 0; i < len; i++) {
4321		if (likely(pg_vec[i].buffer)) {
4322			if (is_vmalloc_addr(pg_vec[i].buffer))
4323				vfree(pg_vec[i].buffer);
4324			else
4325				free_pages((unsigned long)pg_vec[i].buffer,
4326					   order);
4327			pg_vec[i].buffer = NULL;
4328		}
4329	}
4330	kfree(pg_vec);
4331}
4332
4333static char *alloc_one_pg_vec_page(unsigned long order)
4334{
4335	char *buffer;
4336	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4337			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4338
4339	buffer = (char *) __get_free_pages(gfp_flags, order);
4340	if (buffer)
4341		return buffer;
4342
4343	/* __get_free_pages failed, fall back to vmalloc */
4344	buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4345	if (buffer)
4346		return buffer;
4347
4348	/* vmalloc failed, lets dig into swap here */
4349	gfp_flags &= ~__GFP_NORETRY;
4350	buffer = (char *) __get_free_pages(gfp_flags, order);
4351	if (buffer)
4352		return buffer;
4353
4354	/* complete and utter failure */
4355	return NULL;
4356}
4357
4358static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4359{
4360	unsigned int block_nr = req->tp_block_nr;
4361	struct pgv *pg_vec;
4362	int i;
4363
4364	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4365	if (unlikely(!pg_vec))
4366		goto out;
4367
4368	for (i = 0; i < block_nr; i++) {
4369		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4370		if (unlikely(!pg_vec[i].buffer))
4371			goto out_free_pgvec;
4372	}
4373
4374out:
4375	return pg_vec;
4376
4377out_free_pgvec:
4378	free_pg_vec(pg_vec, order, block_nr);
4379	pg_vec = NULL;
4380	goto out;
4381}
4382
4383static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4384		int closing, int tx_ring)
4385{
4386	struct pgv *pg_vec = NULL;
4387	struct packet_sock *po = pkt_sk(sk);
4388	unsigned long *rx_owner_map = NULL;
4389	int was_running, order = 0;
4390	struct packet_ring_buffer *rb;
4391	struct sk_buff_head *rb_queue;
4392	__be16 num;
4393	int err;
4394	/* Added to avoid minimal code churn */
4395	struct tpacket_req *req = &req_u->req;
4396
 
 
 
 
 
 
 
4397	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4398	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4399
4400	err = -EBUSY;
4401	if (!closing) {
4402		if (atomic_long_read(&po->mapped))
4403			goto out;
4404		if (packet_read_pending(rb))
4405			goto out;
4406	}
4407
4408	if (req->tp_block_nr) {
4409		unsigned int min_frame_size;
4410
4411		/* Sanity tests and some calculations */
4412		err = -EBUSY;
4413		if (unlikely(rb->pg_vec))
4414			goto out;
4415
4416		switch (po->tp_version) {
4417		case TPACKET_V1:
4418			po->tp_hdrlen = TPACKET_HDRLEN;
4419			break;
4420		case TPACKET_V2:
4421			po->tp_hdrlen = TPACKET2_HDRLEN;
4422			break;
4423		case TPACKET_V3:
4424			po->tp_hdrlen = TPACKET3_HDRLEN;
4425			break;
4426		}
4427
4428		err = -EINVAL;
4429		if (unlikely((int)req->tp_block_size <= 0))
4430			goto out;
4431		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4432			goto out;
4433		min_frame_size = po->tp_hdrlen + po->tp_reserve;
4434		if (po->tp_version >= TPACKET_V3 &&
4435		    req->tp_block_size <
4436		    BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4437			goto out;
4438		if (unlikely(req->tp_frame_size < min_frame_size))
 
4439			goto out;
4440		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4441			goto out;
4442
4443		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4444		if (unlikely(rb->frames_per_block == 0))
4445			goto out;
4446		if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4447			goto out;
4448		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4449					req->tp_frame_nr))
4450			goto out;
4451
4452		err = -ENOMEM;
4453		order = get_order(req->tp_block_size);
4454		pg_vec = alloc_pg_vec(req, order);
4455		if (unlikely(!pg_vec))
4456			goto out;
4457		switch (po->tp_version) {
4458		case TPACKET_V3:
4459			/* Block transmit is not supported yet */
4460			if (!tx_ring) {
 
 
4461				init_prb_bdqc(po, rb, pg_vec, req_u);
4462			} else {
4463				struct tpacket_req3 *req3 = &req_u->req3;
4464
4465				if (req3->tp_retire_blk_tov ||
4466				    req3->tp_sizeof_priv ||
4467				    req3->tp_feature_req_word) {
4468					err = -EINVAL;
4469					goto out_free_pg_vec;
4470				}
4471			}
4472			break;
4473		default:
4474			if (!tx_ring) {
4475				rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4476					GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4477				if (!rx_owner_map)
4478					goto out_free_pg_vec;
4479			}
4480			break;
4481		}
4482	}
4483	/* Done */
4484	else {
4485		err = -EINVAL;
4486		if (unlikely(req->tp_frame_nr))
4487			goto out;
4488	}
4489
4490
4491	/* Detach socket from network */
4492	spin_lock(&po->bind_lock);
4493	was_running = packet_sock_flag(po, PACKET_SOCK_RUNNING);
4494	num = po->num;
4495	if (was_running) {
4496		WRITE_ONCE(po->num, 0);
4497		__unregister_prot_hook(sk, false);
4498	}
4499	spin_unlock(&po->bind_lock);
4500
4501	synchronize_net();
4502
4503	err = -EBUSY;
4504	mutex_lock(&po->pg_vec_lock);
4505	if (closing || atomic_long_read(&po->mapped) == 0) {
4506		err = 0;
4507		spin_lock_bh(&rb_queue->lock);
4508		swap(rb->pg_vec, pg_vec);
4509		if (po->tp_version <= TPACKET_V2)
4510			swap(rb->rx_owner_map, rx_owner_map);
4511		rb->frame_max = (req->tp_frame_nr - 1);
4512		rb->head = 0;
4513		rb->frame_size = req->tp_frame_size;
4514		spin_unlock_bh(&rb_queue->lock);
4515
4516		swap(rb->pg_vec_order, order);
4517		swap(rb->pg_vec_len, req->tp_block_nr);
4518
4519		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4520		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4521						tpacket_rcv : packet_rcv;
4522		skb_queue_purge(rb_queue);
4523		if (atomic_long_read(&po->mapped))
4524			pr_err("packet_mmap: vma is busy: %ld\n",
4525			       atomic_long_read(&po->mapped));
4526	}
4527	mutex_unlock(&po->pg_vec_lock);
4528
4529	spin_lock(&po->bind_lock);
4530	if (was_running) {
4531		WRITE_ONCE(po->num, num);
4532		register_prot_hook(sk);
4533	}
4534	spin_unlock(&po->bind_lock);
4535	if (pg_vec && (po->tp_version > TPACKET_V2)) {
4536		/* Because we don't support block-based V3 on tx-ring */
4537		if (!tx_ring)
4538			prb_shutdown_retire_blk_timer(po, rb_queue);
4539	}
4540
4541out_free_pg_vec:
4542	if (pg_vec) {
4543		bitmap_free(rx_owner_map);
4544		free_pg_vec(pg_vec, order, req->tp_block_nr);
4545	}
4546out:
 
4547	return err;
4548}
4549
4550static int packet_mmap(struct file *file, struct socket *sock,
4551		struct vm_area_struct *vma)
4552{
4553	struct sock *sk = sock->sk;
4554	struct packet_sock *po = pkt_sk(sk);
4555	unsigned long size, expected_size;
4556	struct packet_ring_buffer *rb;
4557	unsigned long start;
4558	int err = -EINVAL;
4559	int i;
4560
4561	if (vma->vm_pgoff)
4562		return -EINVAL;
4563
4564	mutex_lock(&po->pg_vec_lock);
4565
4566	expected_size = 0;
4567	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4568		if (rb->pg_vec) {
4569			expected_size += rb->pg_vec_len
4570						* rb->pg_vec_pages
4571						* PAGE_SIZE;
4572		}
4573	}
4574
4575	if (expected_size == 0)
4576		goto out;
4577
4578	size = vma->vm_end - vma->vm_start;
4579	if (size != expected_size)
4580		goto out;
4581
4582	start = vma->vm_start;
4583	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4584		if (rb->pg_vec == NULL)
4585			continue;
4586
4587		for (i = 0; i < rb->pg_vec_len; i++) {
4588			struct page *page;
4589			void *kaddr = rb->pg_vec[i].buffer;
4590			int pg_num;
4591
4592			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4593				page = pgv_to_page(kaddr);
4594				err = vm_insert_page(vma, start, page);
4595				if (unlikely(err))
4596					goto out;
4597				start += PAGE_SIZE;
4598				kaddr += PAGE_SIZE;
4599			}
4600		}
4601	}
4602
4603	atomic_long_inc(&po->mapped);
4604	vma->vm_ops = &packet_mmap_ops;
4605	err = 0;
4606
4607out:
4608	mutex_unlock(&po->pg_vec_lock);
4609	return err;
4610}
4611
4612static const struct proto_ops packet_ops_spkt = {
4613	.family =	PF_PACKET,
4614	.owner =	THIS_MODULE,
4615	.release =	packet_release,
4616	.bind =		packet_bind_spkt,
4617	.connect =	sock_no_connect,
4618	.socketpair =	sock_no_socketpair,
4619	.accept =	sock_no_accept,
4620	.getname =	packet_getname_spkt,
4621	.poll =		datagram_poll,
4622	.ioctl =	packet_ioctl,
4623	.gettstamp =	sock_gettstamp,
4624	.listen =	sock_no_listen,
4625	.shutdown =	sock_no_shutdown,
 
 
4626	.sendmsg =	packet_sendmsg_spkt,
4627	.recvmsg =	packet_recvmsg,
4628	.mmap =		sock_no_mmap,
 
4629};
4630
4631static const struct proto_ops packet_ops = {
4632	.family =	PF_PACKET,
4633	.owner =	THIS_MODULE,
4634	.release =	packet_release,
4635	.bind =		packet_bind,
4636	.connect =	sock_no_connect,
4637	.socketpair =	sock_no_socketpair,
4638	.accept =	sock_no_accept,
4639	.getname =	packet_getname,
4640	.poll =		packet_poll,
4641	.ioctl =	packet_ioctl,
4642	.gettstamp =	sock_gettstamp,
4643	.listen =	sock_no_listen,
4644	.shutdown =	sock_no_shutdown,
4645	.setsockopt =	packet_setsockopt,
4646	.getsockopt =	packet_getsockopt,
 
 
 
4647	.sendmsg =	packet_sendmsg,
4648	.recvmsg =	packet_recvmsg,
4649	.mmap =		packet_mmap,
 
4650};
4651
4652static const struct net_proto_family packet_family_ops = {
4653	.family =	PF_PACKET,
4654	.create =	packet_create,
4655	.owner	=	THIS_MODULE,
4656};
4657
4658static struct notifier_block packet_netdev_notifier = {
4659	.notifier_call =	packet_notifier,
4660};
4661
4662#ifdef CONFIG_PROC_FS
4663
4664static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4665	__acquires(RCU)
4666{
4667	struct net *net = seq_file_net(seq);
4668
4669	rcu_read_lock();
4670	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4671}
4672
4673static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4674{
4675	struct net *net = seq_file_net(seq);
4676	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4677}
4678
4679static void packet_seq_stop(struct seq_file *seq, void *v)
4680	__releases(RCU)
4681{
4682	rcu_read_unlock();
4683}
4684
4685static int packet_seq_show(struct seq_file *seq, void *v)
4686{
4687	if (v == SEQ_START_TOKEN)
4688		seq_printf(seq,
4689			   "%*sRefCnt Type Proto  Iface R Rmem   User   Inode\n",
4690			   IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
4691	else {
4692		struct sock *s = sk_entry(v);
4693		const struct packet_sock *po = pkt_sk(s);
4694
4695		seq_printf(seq,
4696			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4697			   s,
4698			   refcount_read(&s->sk_refcnt),
4699			   s->sk_type,
4700			   ntohs(READ_ONCE(po->num)),
4701			   READ_ONCE(po->ifindex),
4702			   packet_sock_flag(po, PACKET_SOCK_RUNNING),
4703			   atomic_read(&s->sk_rmem_alloc),
4704			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4705			   sock_i_ino(s));
4706	}
4707
4708	return 0;
4709}
4710
4711static const struct seq_operations packet_seq_ops = {
4712	.start	= packet_seq_start,
4713	.next	= packet_seq_next,
4714	.stop	= packet_seq_stop,
4715	.show	= packet_seq_show,
4716};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4717#endif
4718
4719static int __net_init packet_net_init(struct net *net)
4720{
4721	mutex_init(&net->packet.sklist_lock);
4722	INIT_HLIST_HEAD(&net->packet.sklist);
4723
4724#ifdef CONFIG_PROC_FS
4725	if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4726			sizeof(struct seq_net_private)))
4727		return -ENOMEM;
4728#endif /* CONFIG_PROC_FS */
4729
4730	return 0;
4731}
4732
4733static void __net_exit packet_net_exit(struct net *net)
4734{
4735	remove_proc_entry("packet", net->proc_net);
4736	WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4737}
4738
4739static struct pernet_operations packet_net_ops = {
4740	.init = packet_net_init,
4741	.exit = packet_net_exit,
4742};
4743
4744
4745static void __exit packet_exit(void)
4746{
4747	sock_unregister(PF_PACKET);
4748	proto_unregister(&packet_proto);
4749	unregister_netdevice_notifier(&packet_netdev_notifier);
4750	unregister_pernet_subsys(&packet_net_ops);
 
 
4751}
4752
4753static int __init packet_init(void)
4754{
4755	int rc;
4756
4757	rc = register_pernet_subsys(&packet_net_ops);
4758	if (rc)
4759		goto out;
4760	rc = register_netdevice_notifier(&packet_netdev_notifier);
4761	if (rc)
4762		goto out_pernet;
4763	rc = proto_register(&packet_proto, 0);
4764	if (rc)
4765		goto out_notifier;
4766	rc = sock_register(&packet_family_ops);
4767	if (rc)
4768		goto out_proto;
4769
4770	return 0;
4771
4772out_proto:
4773	proto_unregister(&packet_proto);
4774out_notifier:
4775	unregister_netdevice_notifier(&packet_netdev_notifier);
4776out_pernet:
4777	unregister_pernet_subsys(&packet_net_ops);
4778out:
4779	return rc;
4780}
4781
4782module_init(packet_init);
4783module_exit(packet_exit);
4784MODULE_DESCRIPTION("Packet socket support (AF_PACKET)");
4785MODULE_LICENSE("GPL");
4786MODULE_ALIAS_NETPROTO(PF_PACKET);
v4.10.11
 
   1/*
   2 * INET		An implementation of the TCP/IP protocol suite for the LINUX
   3 *		operating system.  INET is implemented using the  BSD Socket
   4 *		interface as the means of communication with the user level.
   5 *
   6 *		PACKET - implements raw packet sockets.
   7 *
   8 * Authors:	Ross Biro
   9 *		Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  10 *		Alan Cox, <gw4pts@gw4pts.ampr.org>
  11 *
  12 * Fixes:
  13 *		Alan Cox	:	verify_area() now used correctly
  14 *		Alan Cox	:	new skbuff lists, look ma no backlogs!
  15 *		Alan Cox	:	tidied skbuff lists.
  16 *		Alan Cox	:	Now uses generic datagram routines I
  17 *					added. Also fixed the peek/read crash
  18 *					from all old Linux datagram code.
  19 *		Alan Cox	:	Uses the improved datagram code.
  20 *		Alan Cox	:	Added NULL's for socket options.
  21 *		Alan Cox	:	Re-commented the code.
  22 *		Alan Cox	:	Use new kernel side addressing
  23 *		Rob Janssen	:	Correct MTU usage.
  24 *		Dave Platt	:	Counter leaks caused by incorrect
  25 *					interrupt locking and some slightly
  26 *					dubious gcc output. Can you read
  27 *					compiler: it said _VOLATILE_
  28 *	Richard Kooijman	:	Timestamp fixes.
  29 *		Alan Cox	:	New buffers. Use sk->mac.raw.
  30 *		Alan Cox	:	sendmsg/recvmsg support.
  31 *		Alan Cox	:	Protocol setting support
  32 *	Alexey Kuznetsov	:	Untied from IPv4 stack.
  33 *	Cyrus Durgin		:	Fixed kerneld for kmod.
  34 *	Michal Ostrowski        :       Module initialization cleanup.
  35 *         Ulises Alonso        :       Frame number limit removal and
  36 *                                      packet_set_ring memory leak.
  37 *		Eric Biederman	:	Allow for > 8 byte hardware addresses.
  38 *					The convention is that longer addresses
  39 *					will simply extend the hardware address
  40 *					byte arrays at the end of sockaddr_ll
  41 *					and packet_mreq.
  42 *		Johann Baudy	:	Added TX RING.
  43 *		Chetan Loke	:	Implemented TPACKET_V3 block abstraction
  44 *					layer.
  45 *					Copyright (C) 2011, <lokec@ccs.neu.edu>
  46 *
  47 *
  48 *		This program is free software; you can redistribute it and/or
  49 *		modify it under the terms of the GNU General Public License
  50 *		as published by the Free Software Foundation; either version
  51 *		2 of the License, or (at your option) any later version.
  52 *
  53 */
  54
 
 
 
 
  55#include <linux/types.h>
  56#include <linux/mm.h>
  57#include <linux/capability.h>
  58#include <linux/fcntl.h>
  59#include <linux/socket.h>
  60#include <linux/in.h>
  61#include <linux/inet.h>
  62#include <linux/netdevice.h>
  63#include <linux/if_packet.h>
  64#include <linux/wireless.h>
  65#include <linux/kernel.h>
  66#include <linux/kmod.h>
  67#include <linux/slab.h>
  68#include <linux/vmalloc.h>
  69#include <net/net_namespace.h>
  70#include <net/ip.h>
  71#include <net/protocol.h>
  72#include <linux/skbuff.h>
  73#include <net/sock.h>
  74#include <linux/errno.h>
  75#include <linux/timer.h>
  76#include <linux/uaccess.h>
  77#include <asm/ioctls.h>
  78#include <asm/page.h>
  79#include <asm/cacheflush.h>
  80#include <asm/io.h>
  81#include <linux/proc_fs.h>
  82#include <linux/seq_file.h>
  83#include <linux/poll.h>
  84#include <linux/module.h>
  85#include <linux/init.h>
  86#include <linux/mutex.h>
  87#include <linux/if_vlan.h>
  88#include <linux/virtio_net.h>
  89#include <linux/errqueue.h>
  90#include <linux/net_tstamp.h>
  91#include <linux/percpu.h>
  92#ifdef CONFIG_INET
  93#include <net/inet_common.h>
  94#endif
  95#include <linux/bpf.h>
  96#include <net/compat.h>
 
  97
  98#include "internal.h"
  99
 100/*
 101   Assumptions:
 102   - if device has no dev->hard_header routine, it adds and removes ll header
 103     inside itself. In this case ll header is invisible outside of device,
 104     but higher levels still should reserve dev->hard_header_len.
 105     Some devices are enough clever to reallocate skb, when header
 106     will not fit to reserved space (tunnel), another ones are silly
 107     (PPP).
 
 
 
 108   - packet socket receives packets with pulled ll header,
 109     so that SOCK_RAW should push it back.
 110
 111On receive:
 112-----------
 113
 114Incoming, dev->hard_header!=NULL
 115   mac_header -> ll header
 116   data       -> data
 117
 118Outgoing, dev->hard_header!=NULL
 119   mac_header -> ll header
 120   data       -> ll header
 121
 122Incoming, dev->hard_header==NULL
 123   mac_header -> UNKNOWN position. It is very likely, that it points to ll
 124		 header.  PPP makes it, that is wrong, because introduce
 125		 assymetry between rx and tx paths.
 126   data       -> data
 127
 128Outgoing, dev->hard_header==NULL
 129   mac_header -> data. ll header is still not built!
 130   data       -> data
 131
 132Resume
 133  If dev->hard_header==NULL we are unlikely to restore sensible ll header.
 
 134
 135
 136On transmit:
 137------------
 138
 139dev->hard_header != NULL
 140   mac_header -> ll header
 141   data       -> ll header
 142
 143dev->hard_header == NULL (ll header is added by device, we cannot control it)
 144   mac_header -> data
 145   data       -> data
 146
 147   We should set nh.raw on output to correct posistion,
 148   packet classifier depends on it.
 149 */
 150
 151/* Private packet socket structures. */
 152
 153/* identical to struct packet_mreq except it has
 154 * a longer address field.
 155 */
 156struct packet_mreq_max {
 157	int		mr_ifindex;
 158	unsigned short	mr_type;
 159	unsigned short	mr_alen;
 160	unsigned char	mr_address[MAX_ADDR_LEN];
 161};
 162
 163union tpacket_uhdr {
 164	struct tpacket_hdr  *h1;
 165	struct tpacket2_hdr *h2;
 166	struct tpacket3_hdr *h3;
 167	void *raw;
 168};
 169
 170static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
 171		int closing, int tx_ring);
 172
 173#define V3_ALIGNMENT	(8)
 174
 175#define BLK_HDR_LEN	(ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
 176
 177#define BLK_PLUS_PRIV(sz_of_priv) \
 178	(BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
 179
 180#define PGV_FROM_VMALLOC 1
 181
 182#define BLOCK_STATUS(x)	((x)->hdr.bh1.block_status)
 183#define BLOCK_NUM_PKTS(x)	((x)->hdr.bh1.num_pkts)
 184#define BLOCK_O2FP(x)		((x)->hdr.bh1.offset_to_first_pkt)
 185#define BLOCK_LEN(x)		((x)->hdr.bh1.blk_len)
 186#define BLOCK_SNUM(x)		((x)->hdr.bh1.seq_num)
 187#define BLOCK_O2PRIV(x)	((x)->offset_to_priv)
 188#define BLOCK_PRIV(x)		((void *)((char *)(x) + BLOCK_O2PRIV(x)))
 189
 190struct packet_sock;
 191static int tpacket_snd(struct packet_sock *po, struct msghdr *msg);
 192static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
 193		       struct packet_type *pt, struct net_device *orig_dev);
 194
 195static void *packet_previous_frame(struct packet_sock *po,
 196		struct packet_ring_buffer *rb,
 197		int status);
 198static void packet_increment_head(struct packet_ring_buffer *buff);
 199static int prb_curr_blk_in_use(struct tpacket_kbdq_core *,
 200			struct tpacket_block_desc *);
 201static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
 202			struct packet_sock *);
 203static void prb_retire_current_block(struct tpacket_kbdq_core *,
 204		struct packet_sock *, unsigned int status);
 205static int prb_queue_frozen(struct tpacket_kbdq_core *);
 206static void prb_open_block(struct tpacket_kbdq_core *,
 207		struct tpacket_block_desc *);
 208static void prb_retire_rx_blk_timer_expired(unsigned long);
 209static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
 210static void prb_init_blk_timer(struct packet_sock *,
 211		struct tpacket_kbdq_core *,
 212		void (*func) (unsigned long));
 213static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
 214static void prb_clear_rxhash(struct tpacket_kbdq_core *,
 215		struct tpacket3_hdr *);
 216static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
 217		struct tpacket3_hdr *);
 218static void packet_flush_mclist(struct sock *sk);
 
 219
 220struct packet_skb_cb {
 221	union {
 222		struct sockaddr_pkt pkt;
 223		union {
 224			/* Trick: alias skb original length with
 225			 * ll.sll_family and ll.protocol in order
 226			 * to save room.
 227			 */
 228			unsigned int origlen;
 229			struct sockaddr_ll ll;
 230		};
 231	} sa;
 232};
 233
 234#define vio_le() virtio_legacy_is_little_endian()
 235
 236#define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 237
 238#define GET_PBDQC_FROM_RB(x)	((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
 239#define GET_PBLOCK_DESC(x, bid)	\
 240	((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
 241#define GET_CURR_PBLOCK_DESC_FROM_CORE(x)	\
 242	((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
 243#define GET_NEXT_PRB_BLK_NUM(x) \
 244	(((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
 245	((x)->kactive_blk_num+1) : 0)
 246
 247static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
 248static void __fanout_link(struct sock *sk, struct packet_sock *po);
 249
 250static int packet_direct_xmit(struct sk_buff *skb)
 
 251{
 252	struct net_device *dev = skb->dev;
 253	struct sk_buff *orig_skb = skb;
 254	struct netdev_queue *txq;
 255	int ret = NETDEV_TX_BUSY;
 256
 257	if (unlikely(!netif_running(dev) ||
 258		     !netif_carrier_ok(dev)))
 259		goto drop;
 
 260
 261	skb = validate_xmit_skb_list(skb, dev);
 262	if (skb != orig_skb)
 263		goto drop;
 264
 265	txq = skb_get_tx_queue(dev, skb);
 
 
 
 266
 267	local_bh_disable();
 
 
 268
 269	HARD_TX_LOCK(dev, txq, smp_processor_id());
 270	if (!netif_xmit_frozen_or_drv_stopped(txq))
 271		ret = netdev_start_xmit(skb, dev, txq, false);
 272	HARD_TX_UNLOCK(dev, txq);
 273
 274	local_bh_enable();
 
 
 
 275
 276	if (!dev_xmit_complete(ret))
 277		kfree_skb(skb);
 278
 279	return ret;
 280drop:
 281	atomic_long_inc(&dev->tx_dropped);
 282	kfree_skb_list(skb);
 283	return NET_XMIT_DROP;
 284}
 285
 286static struct net_device *packet_cached_dev_get(struct packet_sock *po)
 287{
 288	struct net_device *dev;
 289
 290	rcu_read_lock();
 291	dev = rcu_dereference(po->cached_dev);
 292	if (likely(dev))
 293		dev_hold(dev);
 294	rcu_read_unlock();
 295
 296	return dev;
 297}
 298
 299static void packet_cached_dev_assign(struct packet_sock *po,
 300				     struct net_device *dev)
 301{
 302	rcu_assign_pointer(po->cached_dev, dev);
 303}
 304
 305static void packet_cached_dev_reset(struct packet_sock *po)
 306{
 307	RCU_INIT_POINTER(po->cached_dev, NULL);
 308}
 309
 310static bool packet_use_direct_xmit(const struct packet_sock *po)
 311{
 312	return po->xmit == packet_direct_xmit;
 313}
 314
 315static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
 316{
 317	return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
 318}
 319
 320static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
 321{
 
 322	const struct net_device_ops *ops = dev->netdev_ops;
 
 323	u16 queue_index;
 324
 
 
 
 
 325	if (ops->ndo_select_queue) {
 326		queue_index = ops->ndo_select_queue(dev, skb, NULL,
 327						    __packet_pick_tx_queue);
 328		queue_index = netdev_cap_txqueue(dev, queue_index);
 329	} else {
 330		queue_index = __packet_pick_tx_queue(dev, skb);
 331	}
 332
 333	skb_set_queue_mapping(skb, queue_index);
 334}
 335
 336/* register_prot_hook must be invoked with the po->bind_lock held,
 337 * or from a context in which asynchronous accesses to the packet
 338 * socket is not possible (packet_create()).
 339 */
 340static void register_prot_hook(struct sock *sk)
 341{
 342	struct packet_sock *po = pkt_sk(sk);
 343
 344	if (!po->running) {
 345		if (po->fanout)
 346			__fanout_link(sk, po);
 347		else
 348			dev_add_pack(&po->prot_hook);
 349
 350		sock_hold(sk);
 351		po->running = 1;
 352	}
 353}
 354
 355/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
 356 * held.   If the sync parameter is true, we will temporarily drop
 
 
 
 
 
 357 * the po->bind_lock and do a synchronize_net to make sure no
 358 * asynchronous packet processing paths still refer to the elements
 359 * of po->prot_hook.  If the sync parameter is false, it is the
 360 * callers responsibility to take care of this.
 361 */
 362static void __unregister_prot_hook(struct sock *sk, bool sync)
 363{
 364	struct packet_sock *po = pkt_sk(sk);
 365
 366	po->running = 0;
 
 
 367
 368	if (po->fanout)
 369		__fanout_unlink(sk, po);
 370	else
 371		__dev_remove_pack(&po->prot_hook);
 372
 373	__sock_put(sk);
 374
 375	if (sync) {
 376		spin_unlock(&po->bind_lock);
 377		synchronize_net();
 378		spin_lock(&po->bind_lock);
 379	}
 380}
 381
 382static void unregister_prot_hook(struct sock *sk, bool sync)
 383{
 384	struct packet_sock *po = pkt_sk(sk);
 385
 386	if (po->running)
 387		__unregister_prot_hook(sk, sync);
 388}
 389
 390static inline struct page * __pure pgv_to_page(void *addr)
 391{
 392	if (is_vmalloc_addr(addr))
 393		return vmalloc_to_page(addr);
 394	return virt_to_page(addr);
 395}
 396
 397static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 398{
 399	union tpacket_uhdr h;
 400
 
 
 401	h.raw = frame;
 402	switch (po->tp_version) {
 403	case TPACKET_V1:
 404		h.h1->tp_status = status;
 405		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 406		break;
 407	case TPACKET_V2:
 408		h.h2->tp_status = status;
 409		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 410		break;
 411	case TPACKET_V3:
 
 
 
 412	default:
 413		WARN(1, "TPACKET version not supported.\n");
 414		BUG();
 415	}
 416
 417	smp_wmb();
 418}
 419
 420static int __packet_get_status(struct packet_sock *po, void *frame)
 421{
 422	union tpacket_uhdr h;
 423
 424	smp_rmb();
 425
 
 
 426	h.raw = frame;
 427	switch (po->tp_version) {
 428	case TPACKET_V1:
 429		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 430		return h.h1->tp_status;
 431	case TPACKET_V2:
 432		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 433		return h.h2->tp_status;
 434	case TPACKET_V3:
 
 
 435	default:
 436		WARN(1, "TPACKET version not supported.\n");
 437		BUG();
 438		return 0;
 439	}
 440}
 441
 442static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
 443				   unsigned int flags)
 444{
 445	struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 446
 447	if (shhwtstamps &&
 448	    (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
 449	    ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
 450		return TP_STATUS_TS_RAW_HARDWARE;
 451
 452	if (ktime_to_timespec_cond(skb->tstamp, ts))
 
 453		return TP_STATUS_TS_SOFTWARE;
 454
 455	return 0;
 456}
 457
 458static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
 459				    struct sk_buff *skb)
 460{
 461	union tpacket_uhdr h;
 462	struct timespec ts;
 463	__u32 ts_status;
 464
 465	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
 466		return 0;
 467
 468	h.raw = frame;
 
 
 
 
 
 
 
 469	switch (po->tp_version) {
 470	case TPACKET_V1:
 471		h.h1->tp_sec = ts.tv_sec;
 472		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
 473		break;
 474	case TPACKET_V2:
 475		h.h2->tp_sec = ts.tv_sec;
 476		h.h2->tp_nsec = ts.tv_nsec;
 477		break;
 478	case TPACKET_V3:
 
 
 
 479	default:
 480		WARN(1, "TPACKET version not supported.\n");
 481		BUG();
 482	}
 483
 484	/* one flush is safe, as both fields always lie on the same cacheline */
 485	flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
 486	smp_wmb();
 487
 488	return ts_status;
 489}
 490
 491static void *packet_lookup_frame(struct packet_sock *po,
 492		struct packet_ring_buffer *rb,
 493		unsigned int position,
 494		int status)
 495{
 496	unsigned int pg_vec_pos, frame_offset;
 497	union tpacket_uhdr h;
 498
 499	pg_vec_pos = position / rb->frames_per_block;
 500	frame_offset = position % rb->frames_per_block;
 501
 502	h.raw = rb->pg_vec[pg_vec_pos].buffer +
 503		(frame_offset * rb->frame_size);
 504
 505	if (status != __packet_get_status(po, h.raw))
 506		return NULL;
 507
 508	return h.raw;
 509}
 510
 511static void *packet_current_frame(struct packet_sock *po,
 512		struct packet_ring_buffer *rb,
 513		int status)
 514{
 515	return packet_lookup_frame(po, rb, rb->head, status);
 516}
 517
 518static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 519{
 520	del_timer_sync(&pkc->retire_blk_timer);
 521}
 522
 523static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
 524		struct sk_buff_head *rb_queue)
 525{
 526	struct tpacket_kbdq_core *pkc;
 527
 528	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 529
 530	spin_lock_bh(&rb_queue->lock);
 531	pkc->delete_blk_timer = 1;
 532	spin_unlock_bh(&rb_queue->lock);
 533
 534	prb_del_retire_blk_timer(pkc);
 535}
 536
 537static void prb_init_blk_timer(struct packet_sock *po,
 538		struct tpacket_kbdq_core *pkc,
 539		void (*func) (unsigned long))
 540{
 541	init_timer(&pkc->retire_blk_timer);
 542	pkc->retire_blk_timer.data = (long)po;
 543	pkc->retire_blk_timer.function = func;
 544	pkc->retire_blk_timer.expires = jiffies;
 545}
 546
 547static void prb_setup_retire_blk_timer(struct packet_sock *po)
 548{
 549	struct tpacket_kbdq_core *pkc;
 550
 551	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 552	prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
 
 
 553}
 554
 555static int prb_calc_retire_blk_tmo(struct packet_sock *po,
 556				int blk_size_in_bytes)
 557{
 558	struct net_device *dev;
 559	unsigned int mbits = 0, msec = 0, div = 0, tmo = 0;
 560	struct ethtool_link_ksettings ecmd;
 561	int err;
 562
 563	rtnl_lock();
 564	dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
 565	if (unlikely(!dev)) {
 566		rtnl_unlock();
 567		return DEFAULT_PRB_RETIRE_TOV;
 568	}
 569	err = __ethtool_get_link_ksettings(dev, &ecmd);
 570	rtnl_unlock();
 571	if (!err) {
 572		/*
 573		 * If the link speed is so slow you don't really
 574		 * need to worry about perf anyways
 575		 */
 576		if (ecmd.base.speed < SPEED_1000 ||
 577		    ecmd.base.speed == SPEED_UNKNOWN) {
 578			return DEFAULT_PRB_RETIRE_TOV;
 579		} else {
 580			msec = 1;
 581			div = ecmd.base.speed / 1000;
 582		}
 583	}
 584
 
 585	mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
 586
 587	if (div)
 588		mbits /= div;
 589
 590	tmo = mbits * msec;
 591
 592	if (div)
 593		return tmo+1;
 594	return tmo;
 595}
 596
 597static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
 598			union tpacket_req_u *req_u)
 599{
 600	p1->feature_req_word = req_u->req3.tp_feature_req_word;
 601}
 602
 603static void init_prb_bdqc(struct packet_sock *po,
 604			struct packet_ring_buffer *rb,
 605			struct pgv *pg_vec,
 606			union tpacket_req_u *req_u)
 607{
 608	struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
 609	struct tpacket_block_desc *pbd;
 610
 611	memset(p1, 0x0, sizeof(*p1));
 612
 613	p1->knxt_seq_num = 1;
 614	p1->pkbdq = pg_vec;
 615	pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
 616	p1->pkblk_start	= pg_vec[0].buffer;
 617	p1->kblk_size = req_u->req3.tp_block_size;
 618	p1->knum_blocks	= req_u->req3.tp_block_nr;
 619	p1->hdrlen = po->tp_hdrlen;
 620	p1->version = po->tp_version;
 621	p1->last_kactive_blk_num = 0;
 622	po->stats.stats3.tp_freeze_q_cnt = 0;
 623	if (req_u->req3.tp_retire_blk_tov)
 624		p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
 625	else
 626		p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
 627						req_u->req3.tp_block_size);
 628	p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
 629	p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
 
 630
 631	p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
 632	prb_init_ft_ops(p1, req_u);
 633	prb_setup_retire_blk_timer(po);
 634	prb_open_block(p1, pbd);
 635}
 636
 637/*  Do NOT update the last_blk_num first.
 638 *  Assumes sk_buff_head lock is held.
 639 */
 640static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
 641{
 642	mod_timer(&pkc->retire_blk_timer,
 643			jiffies + pkc->tov_in_jiffies);
 644	pkc->last_kactive_blk_num = pkc->kactive_blk_num;
 645}
 646
 647/*
 648 * Timer logic:
 649 * 1) We refresh the timer only when we open a block.
 650 *    By doing this we don't waste cycles refreshing the timer
 651 *	  on packet-by-packet basis.
 652 *
 653 * With a 1MB block-size, on a 1Gbps line, it will take
 654 * i) ~8 ms to fill a block + ii) memcpy etc.
 655 * In this cut we are not accounting for the memcpy time.
 656 *
 657 * So, if the user sets the 'tmo' to 10ms then the timer
 658 * will never fire while the block is still getting filled
 659 * (which is what we want). However, the user could choose
 660 * to close a block early and that's fine.
 661 *
 662 * But when the timer does fire, we check whether or not to refresh it.
 663 * Since the tmo granularity is in msecs, it is not too expensive
 664 * to refresh the timer, lets say every '8' msecs.
 665 * Either the user can set the 'tmo' or we can derive it based on
 666 * a) line-speed and b) block-size.
 667 * prb_calc_retire_blk_tmo() calculates the tmo.
 668 *
 669 */
 670static void prb_retire_rx_blk_timer_expired(unsigned long data)
 671{
 672	struct packet_sock *po = (struct packet_sock *)data;
 
 673	struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
 674	unsigned int frozen;
 675	struct tpacket_block_desc *pbd;
 676
 677	spin_lock(&po->sk.sk_receive_queue.lock);
 678
 679	frozen = prb_queue_frozen(pkc);
 680	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 681
 682	if (unlikely(pkc->delete_blk_timer))
 683		goto out;
 684
 685	/* We only need to plug the race when the block is partially filled.
 686	 * tpacket_rcv:
 687	 *		lock(); increment BLOCK_NUM_PKTS; unlock()
 688	 *		copy_bits() is in progress ...
 689	 *		timer fires on other cpu:
 690	 *		we can't retire the current block because copy_bits
 691	 *		is in progress.
 692	 *
 693	 */
 694	if (BLOCK_NUM_PKTS(pbd)) {
 695		while (atomic_read(&pkc->blk_fill_in_prog)) {
 696			/* Waiting for skb_copy_bits to finish... */
 697			cpu_relax();
 698		}
 699	}
 700
 701	if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
 702		if (!frozen) {
 703			if (!BLOCK_NUM_PKTS(pbd)) {
 704				/* An empty block. Just refresh the timer. */
 705				goto refresh_timer;
 706			}
 707			prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
 708			if (!prb_dispatch_next_block(pkc, po))
 709				goto refresh_timer;
 710			else
 711				goto out;
 712		} else {
 713			/* Case 1. Queue was frozen because user-space was
 714			 *	   lagging behind.
 715			 */
 716			if (prb_curr_blk_in_use(pkc, pbd)) {
 717				/*
 718				 * Ok, user-space is still behind.
 719				 * So just refresh the timer.
 720				 */
 721				goto refresh_timer;
 722			} else {
 723			       /* Case 2. queue was frozen,user-space caught up,
 724				* now the link went idle && the timer fired.
 725				* We don't have a block to close.So we open this
 726				* block and restart the timer.
 727				* opening a block thaws the queue,restarts timer
 728				* Thawing/timer-refresh is a side effect.
 729				*/
 730				prb_open_block(pkc, pbd);
 731				goto out;
 732			}
 733		}
 734	}
 735
 736refresh_timer:
 737	_prb_refresh_rx_retire_blk_timer(pkc);
 738
 739out:
 740	spin_unlock(&po->sk.sk_receive_queue.lock);
 741}
 742
 743static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
 744		struct tpacket_block_desc *pbd1, __u32 status)
 745{
 746	/* Flush everything minus the block header */
 747
 748#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 749	u8 *start, *end;
 750
 751	start = (u8 *)pbd1;
 752
 753	/* Skip the block header(we know header WILL fit in 4K) */
 754	start += PAGE_SIZE;
 755
 756	end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
 757	for (; start < end; start += PAGE_SIZE)
 758		flush_dcache_page(pgv_to_page(start));
 759
 760	smp_wmb();
 761#endif
 762
 763	/* Now update the block status. */
 764
 765	BLOCK_STATUS(pbd1) = status;
 766
 767	/* Flush the block header */
 768
 769#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 770	start = (u8 *)pbd1;
 771	flush_dcache_page(pgv_to_page(start));
 772
 773	smp_wmb();
 774#endif
 775}
 776
 777/*
 778 * Side effect:
 779 *
 780 * 1) flush the block
 781 * 2) Increment active_blk_num
 782 *
 783 * Note:We DONT refresh the timer on purpose.
 784 *	Because almost always the next block will be opened.
 785 */
 786static void prb_close_block(struct tpacket_kbdq_core *pkc1,
 787		struct tpacket_block_desc *pbd1,
 788		struct packet_sock *po, unsigned int stat)
 789{
 790	__u32 status = TP_STATUS_USER | stat;
 791
 792	struct tpacket3_hdr *last_pkt;
 793	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 794	struct sock *sk = &po->sk;
 795
 796	if (po->stats.stats3.tp_drops)
 797		status |= TP_STATUS_LOSING;
 798
 799	last_pkt = (struct tpacket3_hdr *)pkc1->prev;
 800	last_pkt->tp_next_offset = 0;
 801
 802	/* Get the ts of the last pkt */
 803	if (BLOCK_NUM_PKTS(pbd1)) {
 804		h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
 805		h1->ts_last_pkt.ts_nsec	= last_pkt->tp_nsec;
 806	} else {
 807		/* Ok, we tmo'd - so get the current time.
 808		 *
 809		 * It shouldn't really happen as we don't close empty
 810		 * blocks. See prb_retire_rx_blk_timer_expired().
 811		 */
 812		struct timespec ts;
 813		getnstimeofday(&ts);
 814		h1->ts_last_pkt.ts_sec = ts.tv_sec;
 815		h1->ts_last_pkt.ts_nsec	= ts.tv_nsec;
 816	}
 817
 818	smp_wmb();
 819
 820	/* Flush the block */
 821	prb_flush_block(pkc1, pbd1, status);
 822
 823	sk->sk_data_ready(sk);
 824
 825	pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
 826}
 827
 828static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
 829{
 830	pkc->reset_pending_on_curr_blk = 0;
 831}
 832
 833/*
 834 * Side effect of opening a block:
 835 *
 836 * 1) prb_queue is thawed.
 837 * 2) retire_blk_timer is refreshed.
 838 *
 839 */
 840static void prb_open_block(struct tpacket_kbdq_core *pkc1,
 841	struct tpacket_block_desc *pbd1)
 842{
 843	struct timespec ts;
 844	struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
 845
 846	smp_rmb();
 847
 848	/* We could have just memset this but we will lose the
 849	 * flexibility of making the priv area sticky
 850	 */
 851
 852	BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
 853	BLOCK_NUM_PKTS(pbd1) = 0;
 854	BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 855
 856	getnstimeofday(&ts);
 857
 858	h1->ts_first_pkt.ts_sec = ts.tv_sec;
 859	h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
 860
 861	pkc1->pkblk_start = (char *)pbd1;
 862	pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 863
 864	BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
 865	BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
 866
 867	pbd1->version = pkc1->version;
 868	pkc1->prev = pkc1->nxt_offset;
 869	pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
 870
 871	prb_thaw_queue(pkc1);
 872	_prb_refresh_rx_retire_blk_timer(pkc1);
 873
 874	smp_wmb();
 875}
 876
 877/*
 878 * Queue freeze logic:
 879 * 1) Assume tp_block_nr = 8 blocks.
 880 * 2) At time 't0', user opens Rx ring.
 881 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
 882 * 4) user-space is either sleeping or processing block '0'.
 883 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
 884 *    it will close block-7,loop around and try to fill block '0'.
 885 *    call-flow:
 886 *    __packet_lookup_frame_in_block
 887 *      prb_retire_current_block()
 888 *      prb_dispatch_next_block()
 889 *        |->(BLOCK_STATUS == USER) evaluates to true
 890 *    5.1) Since block-0 is currently in-use, we just freeze the queue.
 891 * 6) Now there are two cases:
 892 *    6.1) Link goes idle right after the queue is frozen.
 893 *         But remember, the last open_block() refreshed the timer.
 894 *         When this timer expires,it will refresh itself so that we can
 895 *         re-open block-0 in near future.
 896 *    6.2) Link is busy and keeps on receiving packets. This is a simple
 897 *         case and __packet_lookup_frame_in_block will check if block-0
 898 *         is free and can now be re-used.
 899 */
 900static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
 901				  struct packet_sock *po)
 902{
 903	pkc->reset_pending_on_curr_blk = 1;
 904	po->stats.stats3.tp_freeze_q_cnt++;
 905}
 906
 907#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
 908
 909/*
 910 * If the next block is free then we will dispatch it
 911 * and return a good offset.
 912 * Else, we will freeze the queue.
 913 * So, caller must check the return value.
 914 */
 915static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
 916		struct packet_sock *po)
 917{
 918	struct tpacket_block_desc *pbd;
 919
 920	smp_rmb();
 921
 922	/* 1. Get current block num */
 923	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 924
 925	/* 2. If this block is currently in_use then freeze the queue */
 926	if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
 927		prb_freeze_queue(pkc, po);
 928		return NULL;
 929	}
 930
 931	/*
 932	 * 3.
 933	 * open this block and return the offset where the first packet
 934	 * needs to get stored.
 935	 */
 936	prb_open_block(pkc, pbd);
 937	return (void *)pkc->nxt_offset;
 938}
 939
 940static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
 941		struct packet_sock *po, unsigned int status)
 942{
 943	struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
 944
 945	/* retire/close the current block */
 946	if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
 947		/*
 948		 * Plug the case where copy_bits() is in progress on
 949		 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
 950		 * have space to copy the pkt in the current block and
 951		 * called prb_retire_current_block()
 952		 *
 953		 * We don't need to worry about the TMO case because
 954		 * the timer-handler already handled this case.
 955		 */
 956		if (!(status & TP_STATUS_BLK_TMO)) {
 957			while (atomic_read(&pkc->blk_fill_in_prog)) {
 958				/* Waiting for skb_copy_bits to finish... */
 959				cpu_relax();
 960			}
 961		}
 962		prb_close_block(pkc, pbd, po, status);
 963		return;
 964	}
 965}
 966
 967static int prb_curr_blk_in_use(struct tpacket_kbdq_core *pkc,
 968				      struct tpacket_block_desc *pbd)
 969{
 970	return TP_STATUS_USER & BLOCK_STATUS(pbd);
 971}
 972
 973static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
 974{
 975	return pkc->reset_pending_on_curr_blk;
 976}
 977
 978static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
 
 979{
 980	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
 981	atomic_dec(&pkc->blk_fill_in_prog);
 
 982}
 983
 984static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
 985			struct tpacket3_hdr *ppd)
 986{
 987	ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
 988}
 989
 990static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
 991			struct tpacket3_hdr *ppd)
 992{
 993	ppd->hv1.tp_rxhash = 0;
 994}
 995
 996static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
 997			struct tpacket3_hdr *ppd)
 998{
 999	if (skb_vlan_tag_present(pkc->skb)) {
1000		ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
1001		ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
1002		ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
1003	} else {
1004		ppd->hv1.tp_vlan_tci = 0;
1005		ppd->hv1.tp_vlan_tpid = 0;
1006		ppd->tp_status = TP_STATUS_AVAILABLE;
1007	}
1008}
1009
1010static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
1011			struct tpacket3_hdr *ppd)
1012{
1013	ppd->hv1.tp_padding = 0;
1014	prb_fill_vlan_info(pkc, ppd);
1015
1016	if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
1017		prb_fill_rxhash(pkc, ppd);
1018	else
1019		prb_clear_rxhash(pkc, ppd);
1020}
1021
1022static void prb_fill_curr_block(char *curr,
1023				struct tpacket_kbdq_core *pkc,
1024				struct tpacket_block_desc *pbd,
1025				unsigned int len)
 
1026{
1027	struct tpacket3_hdr *ppd;
1028
1029	ppd  = (struct tpacket3_hdr *)curr;
1030	ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1031	pkc->prev = curr;
1032	pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1033	BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1034	BLOCK_NUM_PKTS(pbd) += 1;
1035	atomic_inc(&pkc->blk_fill_in_prog);
1036	prb_run_all_ft_ops(pkc, ppd);
1037}
1038
1039/* Assumes caller has the sk->rx_queue.lock */
1040static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1041					    struct sk_buff *skb,
1042						int status,
1043					    unsigned int len
1044					    )
1045{
1046	struct tpacket_kbdq_core *pkc;
1047	struct tpacket_block_desc *pbd;
1048	char *curr, *end;
1049
1050	pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1051	pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1052
1053	/* Queue is frozen when user space is lagging behind */
1054	if (prb_queue_frozen(pkc)) {
1055		/*
1056		 * Check if that last block which caused the queue to freeze,
1057		 * is still in_use by user-space.
1058		 */
1059		if (prb_curr_blk_in_use(pkc, pbd)) {
1060			/* Can't record this packet */
1061			return NULL;
1062		} else {
1063			/*
1064			 * Ok, the block was released by user-space.
1065			 * Now let's open that block.
1066			 * opening a block also thaws the queue.
1067			 * Thawing is a side effect.
1068			 */
1069			prb_open_block(pkc, pbd);
1070		}
1071	}
1072
1073	smp_mb();
1074	curr = pkc->nxt_offset;
1075	pkc->skb = skb;
1076	end = (char *)pbd + pkc->kblk_size;
1077
1078	/* first try the current block */
1079	if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1080		prb_fill_curr_block(curr, pkc, pbd, len);
1081		return (void *)curr;
1082	}
1083
1084	/* Ok, close the current block */
1085	prb_retire_current_block(pkc, po, 0);
1086
1087	/* Now, try to dispatch the next block */
1088	curr = (char *)prb_dispatch_next_block(pkc, po);
1089	if (curr) {
1090		pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1091		prb_fill_curr_block(curr, pkc, pbd, len);
1092		return (void *)curr;
1093	}
1094
1095	/*
1096	 * No free blocks are available.user_space hasn't caught up yet.
1097	 * Queue was just frozen and now this packet will get dropped.
1098	 */
1099	return NULL;
1100}
1101
1102static void *packet_current_rx_frame(struct packet_sock *po,
1103					    struct sk_buff *skb,
1104					    int status, unsigned int len)
1105{
1106	char *curr = NULL;
1107	switch (po->tp_version) {
1108	case TPACKET_V1:
1109	case TPACKET_V2:
1110		curr = packet_lookup_frame(po, &po->rx_ring,
1111					po->rx_ring.head, status);
1112		return curr;
1113	case TPACKET_V3:
1114		return __packet_lookup_frame_in_block(po, skb, status, len);
1115	default:
1116		WARN(1, "TPACKET version not supported\n");
1117		BUG();
1118		return NULL;
1119	}
1120}
1121
1122static void *prb_lookup_block(struct packet_sock *po,
1123				     struct packet_ring_buffer *rb,
1124				     unsigned int idx,
1125				     int status)
1126{
1127	struct tpacket_kbdq_core *pkc  = GET_PBDQC_FROM_RB(rb);
1128	struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1129
1130	if (status != BLOCK_STATUS(pbd))
1131		return NULL;
1132	return pbd;
1133}
1134
1135static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1136{
1137	unsigned int prev;
1138	if (rb->prb_bdqc.kactive_blk_num)
1139		prev = rb->prb_bdqc.kactive_blk_num-1;
1140	else
1141		prev = rb->prb_bdqc.knum_blocks-1;
1142	return prev;
1143}
1144
1145/* Assumes caller has held the rx_queue.lock */
1146static void *__prb_previous_block(struct packet_sock *po,
1147					 struct packet_ring_buffer *rb,
1148					 int status)
1149{
1150	unsigned int previous = prb_previous_blk_num(rb);
1151	return prb_lookup_block(po, rb, previous, status);
1152}
1153
1154static void *packet_previous_rx_frame(struct packet_sock *po,
1155					     struct packet_ring_buffer *rb,
1156					     int status)
1157{
1158	if (po->tp_version <= TPACKET_V2)
1159		return packet_previous_frame(po, rb, status);
1160
1161	return __prb_previous_block(po, rb, status);
1162}
1163
1164static void packet_increment_rx_head(struct packet_sock *po,
1165					    struct packet_ring_buffer *rb)
1166{
1167	switch (po->tp_version) {
1168	case TPACKET_V1:
1169	case TPACKET_V2:
1170		return packet_increment_head(rb);
1171	case TPACKET_V3:
1172	default:
1173		WARN(1, "TPACKET version not supported.\n");
1174		BUG();
1175		return;
1176	}
1177}
1178
1179static void *packet_previous_frame(struct packet_sock *po,
1180		struct packet_ring_buffer *rb,
1181		int status)
1182{
1183	unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1184	return packet_lookup_frame(po, rb, previous, status);
1185}
1186
1187static void packet_increment_head(struct packet_ring_buffer *buff)
1188{
1189	buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1190}
1191
1192static void packet_inc_pending(struct packet_ring_buffer *rb)
1193{
1194	this_cpu_inc(*rb->pending_refcnt);
1195}
1196
1197static void packet_dec_pending(struct packet_ring_buffer *rb)
1198{
1199	this_cpu_dec(*rb->pending_refcnt);
1200}
1201
1202static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1203{
1204	unsigned int refcnt = 0;
1205	int cpu;
1206
1207	/* We don't use pending refcount in rx_ring. */
1208	if (rb->pending_refcnt == NULL)
1209		return 0;
1210
1211	for_each_possible_cpu(cpu)
1212		refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1213
1214	return refcnt;
1215}
1216
1217static int packet_alloc_pending(struct packet_sock *po)
1218{
1219	po->rx_ring.pending_refcnt = NULL;
1220
1221	po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1222	if (unlikely(po->tx_ring.pending_refcnt == NULL))
1223		return -ENOBUFS;
1224
1225	return 0;
1226}
1227
1228static void packet_free_pending(struct packet_sock *po)
1229{
1230	free_percpu(po->tx_ring.pending_refcnt);
1231}
1232
1233#define ROOM_POW_OFF	2
1234#define ROOM_NONE	0x0
1235#define ROOM_LOW	0x1
1236#define ROOM_NORMAL	0x2
1237
1238static bool __tpacket_has_room(struct packet_sock *po, int pow_off)
1239{
1240	int idx, len;
1241
1242	len = po->rx_ring.frame_max + 1;
1243	idx = po->rx_ring.head;
1244	if (pow_off)
1245		idx += len >> pow_off;
1246	if (idx >= len)
1247		idx -= len;
1248	return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1249}
1250
1251static bool __tpacket_v3_has_room(struct packet_sock *po, int pow_off)
1252{
1253	int idx, len;
1254
1255	len = po->rx_ring.prb_bdqc.knum_blocks;
1256	idx = po->rx_ring.prb_bdqc.kactive_blk_num;
1257	if (pow_off)
1258		idx += len >> pow_off;
1259	if (idx >= len)
1260		idx -= len;
1261	return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1262}
1263
1264static int __packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
 
1265{
1266	struct sock *sk = &po->sk;
1267	int ret = ROOM_NONE;
1268
1269	if (po->prot_hook.func != tpacket_rcv) {
1270		int avail = sk->sk_rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1271					  - (skb ? skb->truesize : 0);
1272		if (avail > (sk->sk_rcvbuf >> ROOM_POW_OFF))
 
 
1273			return ROOM_NORMAL;
1274		else if (avail > 0)
1275			return ROOM_LOW;
1276		else
1277			return ROOM_NONE;
1278	}
1279
1280	if (po->tp_version == TPACKET_V3) {
1281		if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1282			ret = ROOM_NORMAL;
1283		else if (__tpacket_v3_has_room(po, 0))
1284			ret = ROOM_LOW;
1285	} else {
1286		if (__tpacket_has_room(po, ROOM_POW_OFF))
1287			ret = ROOM_NORMAL;
1288		else if (__tpacket_has_room(po, 0))
1289			ret = ROOM_LOW;
1290	}
1291
1292	return ret;
1293}
1294
1295static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1296{
 
1297	int ret;
1298	bool has_room;
1299
1300	spin_lock_bh(&po->sk.sk_receive_queue.lock);
1301	ret = __packet_rcv_has_room(po, skb);
1302	has_room = ret == ROOM_NORMAL;
1303	if (po->pressure == has_room)
1304		po->pressure = !has_room;
1305	spin_unlock_bh(&po->sk.sk_receive_queue.lock);
1306
1307	return ret;
1308}
1309
 
 
 
 
 
 
 
1310static void packet_sock_destruct(struct sock *sk)
1311{
1312	skb_queue_purge(&sk->sk_error_queue);
1313
1314	WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1315	WARN_ON(atomic_read(&sk->sk_wmem_alloc));
1316
1317	if (!sock_flag(sk, SOCK_DEAD)) {
1318		pr_err("Attempt to release alive packet socket: %p\n", sk);
1319		return;
1320	}
1321
1322	sk_refcnt_debug_dec(sk);
1323}
1324
1325static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1326{
1327	u32 rxhash;
 
1328	int i, count = 0;
1329
1330	rxhash = skb_get_hash(skb);
1331	for (i = 0; i < ROLLOVER_HLEN; i++)
1332		if (po->rollover->history[i] == rxhash)
1333			count++;
1334
1335	po->rollover->history[prandom_u32() % ROLLOVER_HLEN] = rxhash;
 
 
 
 
 
1336	return count > (ROLLOVER_HLEN >> 1);
1337}
1338
1339static unsigned int fanout_demux_hash(struct packet_fanout *f,
1340				      struct sk_buff *skb,
1341				      unsigned int num)
1342{
1343	return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1344}
1345
1346static unsigned int fanout_demux_lb(struct packet_fanout *f,
1347				    struct sk_buff *skb,
1348				    unsigned int num)
1349{
1350	unsigned int val = atomic_inc_return(&f->rr_cur);
1351
1352	return val % num;
1353}
1354
1355static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1356				     struct sk_buff *skb,
1357				     unsigned int num)
1358{
1359	return smp_processor_id() % num;
1360}
1361
1362static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1363				     struct sk_buff *skb,
1364				     unsigned int num)
1365{
1366	return prandom_u32_max(num);
1367}
1368
1369static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1370					  struct sk_buff *skb,
1371					  unsigned int idx, bool try_self,
1372					  unsigned int num)
1373{
1374	struct packet_sock *po, *po_next, *po_skip = NULL;
1375	unsigned int i, j, room = ROOM_NONE;
1376
1377	po = pkt_sk(f->arr[idx]);
1378
1379	if (try_self) {
1380		room = packet_rcv_has_room(po, skb);
1381		if (room == ROOM_NORMAL ||
1382		    (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1383			return idx;
1384		po_skip = po;
1385	}
1386
1387	i = j = min_t(int, po->rollover->sock, num - 1);
1388	do {
1389		po_next = pkt_sk(f->arr[i]);
1390		if (po_next != po_skip && !po_next->pressure &&
 
1391		    packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1392			if (i != j)
1393				po->rollover->sock = i;
1394			atomic_long_inc(&po->rollover->num);
1395			if (room == ROOM_LOW)
1396				atomic_long_inc(&po->rollover->num_huge);
1397			return i;
1398		}
1399
1400		if (++i == num)
1401			i = 0;
1402	} while (i != j);
1403
1404	atomic_long_inc(&po->rollover->num_failed);
1405	return idx;
1406}
1407
1408static unsigned int fanout_demux_qm(struct packet_fanout *f,
1409				    struct sk_buff *skb,
1410				    unsigned int num)
1411{
1412	return skb_get_queue_mapping(skb) % num;
1413}
1414
1415static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1416				     struct sk_buff *skb,
1417				     unsigned int num)
1418{
1419	struct bpf_prog *prog;
1420	unsigned int ret = 0;
1421
1422	rcu_read_lock();
1423	prog = rcu_dereference(f->bpf_prog);
1424	if (prog)
1425		ret = bpf_prog_run_clear_cb(prog, skb) % num;
1426	rcu_read_unlock();
1427
1428	return ret;
1429}
1430
1431static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1432{
1433	return f->flags & (flag >> 8);
1434}
1435
1436static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1437			     struct packet_type *pt, struct net_device *orig_dev)
1438{
1439	struct packet_fanout *f = pt->af_packet_priv;
1440	unsigned int num = READ_ONCE(f->num_members);
1441	struct net *net = read_pnet(&f->net);
1442	struct packet_sock *po;
1443	unsigned int idx;
1444
1445	if (!net_eq(dev_net(dev), net) || !num) {
1446		kfree_skb(skb);
1447		return 0;
1448	}
1449
1450	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1451		skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1452		if (!skb)
1453			return 0;
1454	}
1455	switch (f->type) {
1456	case PACKET_FANOUT_HASH:
1457	default:
1458		idx = fanout_demux_hash(f, skb, num);
1459		break;
1460	case PACKET_FANOUT_LB:
1461		idx = fanout_demux_lb(f, skb, num);
1462		break;
1463	case PACKET_FANOUT_CPU:
1464		idx = fanout_demux_cpu(f, skb, num);
1465		break;
1466	case PACKET_FANOUT_RND:
1467		idx = fanout_demux_rnd(f, skb, num);
1468		break;
1469	case PACKET_FANOUT_QM:
1470		idx = fanout_demux_qm(f, skb, num);
1471		break;
1472	case PACKET_FANOUT_ROLLOVER:
1473		idx = fanout_demux_rollover(f, skb, 0, false, num);
1474		break;
1475	case PACKET_FANOUT_CBPF:
1476	case PACKET_FANOUT_EBPF:
1477		idx = fanout_demux_bpf(f, skb, num);
1478		break;
1479	}
1480
1481	if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1482		idx = fanout_demux_rollover(f, skb, idx, true, num);
1483
1484	po = pkt_sk(f->arr[idx]);
1485	return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1486}
1487
1488DEFINE_MUTEX(fanout_mutex);
1489EXPORT_SYMBOL_GPL(fanout_mutex);
1490static LIST_HEAD(fanout_list);
 
1491
1492static void __fanout_link(struct sock *sk, struct packet_sock *po)
1493{
1494	struct packet_fanout *f = po->fanout;
1495
1496	spin_lock(&f->lock);
1497	f->arr[f->num_members] = sk;
1498	smp_wmb();
1499	f->num_members++;
1500	if (f->num_members == 1)
1501		dev_add_pack(&f->prot_hook);
1502	spin_unlock(&f->lock);
1503}
1504
1505static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1506{
1507	struct packet_fanout *f = po->fanout;
1508	int i;
1509
1510	spin_lock(&f->lock);
1511	for (i = 0; i < f->num_members; i++) {
1512		if (f->arr[i] == sk)
 
1513			break;
1514	}
1515	BUG_ON(i >= f->num_members);
1516	f->arr[i] = f->arr[f->num_members - 1];
 
 
1517	f->num_members--;
1518	if (f->num_members == 0)
1519		__dev_remove_pack(&f->prot_hook);
1520	spin_unlock(&f->lock);
1521}
1522
1523static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1524{
1525	if (sk->sk_family != PF_PACKET)
1526		return false;
1527
1528	return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1529}
1530
1531static void fanout_init_data(struct packet_fanout *f)
1532{
1533	switch (f->type) {
1534	case PACKET_FANOUT_LB:
1535		atomic_set(&f->rr_cur, 0);
1536		break;
1537	case PACKET_FANOUT_CBPF:
1538	case PACKET_FANOUT_EBPF:
1539		RCU_INIT_POINTER(f->bpf_prog, NULL);
1540		break;
1541	}
1542}
1543
1544static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1545{
1546	struct bpf_prog *old;
1547
1548	spin_lock(&f->lock);
1549	old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1550	rcu_assign_pointer(f->bpf_prog, new);
1551	spin_unlock(&f->lock);
1552
1553	if (old) {
1554		synchronize_net();
1555		bpf_prog_destroy(old);
1556	}
1557}
1558
1559static int fanout_set_data_cbpf(struct packet_sock *po, char __user *data,
1560				unsigned int len)
1561{
1562	struct bpf_prog *new;
1563	struct sock_fprog fprog;
1564	int ret;
1565
1566	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1567		return -EPERM;
1568	if (len != sizeof(fprog))
1569		return -EINVAL;
1570	if (copy_from_user(&fprog, data, len))
1571		return -EFAULT;
1572
1573	ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1574	if (ret)
1575		return ret;
1576
1577	__fanout_set_data_bpf(po->fanout, new);
1578	return 0;
1579}
1580
1581static int fanout_set_data_ebpf(struct packet_sock *po, char __user *data,
1582				unsigned int len)
1583{
1584	struct bpf_prog *new;
1585	u32 fd;
1586
1587	if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1588		return -EPERM;
1589	if (len != sizeof(fd))
1590		return -EINVAL;
1591	if (copy_from_user(&fd, data, len))
1592		return -EFAULT;
1593
1594	new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1595	if (IS_ERR(new))
1596		return PTR_ERR(new);
1597
1598	__fanout_set_data_bpf(po->fanout, new);
1599	return 0;
1600}
1601
1602static int fanout_set_data(struct packet_sock *po, char __user *data,
1603			   unsigned int len)
1604{
1605	switch (po->fanout->type) {
1606	case PACKET_FANOUT_CBPF:
1607		return fanout_set_data_cbpf(po, data, len);
1608	case PACKET_FANOUT_EBPF:
1609		return fanout_set_data_ebpf(po, data, len);
1610	default:
1611		return -EINVAL;
1612	};
1613}
1614
1615static void fanout_release_data(struct packet_fanout *f)
1616{
1617	switch (f->type) {
1618	case PACKET_FANOUT_CBPF:
1619	case PACKET_FANOUT_EBPF:
1620		__fanout_set_data_bpf(f, NULL);
1621	};
1622}
1623
1624static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1625{
1626	struct packet_rollover *rollover = NULL;
1627	struct packet_sock *po = pkt_sk(sk);
 
1628	struct packet_fanout *f, *match;
1629	u8 type = type_flags & 0xff;
1630	u8 flags = type_flags >> 8;
 
1631	int err;
1632
1633	switch (type) {
1634	case PACKET_FANOUT_ROLLOVER:
1635		if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1636			return -EINVAL;
 
1637	case PACKET_FANOUT_HASH:
1638	case PACKET_FANOUT_LB:
1639	case PACKET_FANOUT_CPU:
1640	case PACKET_FANOUT_RND:
1641	case PACKET_FANOUT_QM:
1642	case PACKET_FANOUT_CBPF:
1643	case PACKET_FANOUT_EBPF:
1644		break;
1645	default:
1646		return -EINVAL;
1647	}
1648
1649	mutex_lock(&fanout_mutex);
1650
1651	err = -EINVAL;
1652	if (!po->running)
1653		goto out;
1654
1655	err = -EALREADY;
1656	if (po->fanout)
1657		goto out;
1658
1659	if (type == PACKET_FANOUT_ROLLOVER ||
1660	    (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1661		err = -ENOMEM;
1662		rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1663		if (!rollover)
1664			goto out;
1665		atomic_long_set(&rollover->num, 0);
1666		atomic_long_set(&rollover->num_huge, 0);
1667		atomic_long_set(&rollover->num_failed, 0);
1668		po->rollover = rollover;
 
 
 
 
 
 
 
 
 
 
 
 
1669	}
1670
1671	match = NULL;
1672	list_for_each_entry(f, &fanout_list, list) {
1673		if (f->id == id &&
1674		    read_pnet(&f->net) == sock_net(sk)) {
1675			match = f;
1676			break;
1677		}
1678	}
1679	err = -EINVAL;
1680	if (match && match->flags != flags)
1681		goto out;
1682	if (!match) {
 
 
 
 
 
 
 
 
 
1683		err = -ENOMEM;
1684		match = kzalloc(sizeof(*match), GFP_KERNEL);
 
1685		if (!match)
1686			goto out;
1687		write_pnet(&match->net, sock_net(sk));
1688		match->id = id;
1689		match->type = type;
1690		match->flags = flags;
1691		INIT_LIST_HEAD(&match->list);
1692		spin_lock_init(&match->lock);
1693		atomic_set(&match->sk_ref, 0);
1694		fanout_init_data(match);
1695		match->prot_hook.type = po->prot_hook.type;
1696		match->prot_hook.dev = po->prot_hook.dev;
1697		match->prot_hook.func = packet_rcv_fanout;
1698		match->prot_hook.af_packet_priv = match;
 
1699		match->prot_hook.id_match = match_fanout_group;
 
 
1700		list_add(&match->list, &fanout_list);
1701	}
1702	err = -EINVAL;
1703	if (match->type == type &&
 
 
 
1704	    match->prot_hook.type == po->prot_hook.type &&
1705	    match->prot_hook.dev == po->prot_hook.dev) {
1706		err = -ENOSPC;
1707		if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
1708			__dev_remove_pack(&po->prot_hook);
1709			po->fanout = match;
1710			atomic_inc(&match->sk_ref);
 
 
 
 
 
1711			__fanout_link(sk, po);
1712			err = 0;
1713		}
1714	}
 
 
 
 
 
 
 
1715out:
1716	if (err && rollover) {
1717		kfree(rollover);
1718		po->rollover = NULL;
1719	}
1720	mutex_unlock(&fanout_mutex);
1721	return err;
1722}
1723
1724/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1725 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1726 * It is the responsibility of the caller to call fanout_release_data() and
1727 * free the returned packet_fanout (after synchronize_net())
1728 */
1729static struct packet_fanout *fanout_release(struct sock *sk)
1730{
1731	struct packet_sock *po = pkt_sk(sk);
1732	struct packet_fanout *f;
1733
1734	mutex_lock(&fanout_mutex);
1735	f = po->fanout;
1736	if (f) {
1737		po->fanout = NULL;
1738
1739		if (atomic_dec_and_test(&f->sk_ref))
1740			list_del(&f->list);
1741		else
1742			f = NULL;
1743
1744		if (po->rollover)
1745			kfree_rcu(po->rollover, rcu);
1746	}
1747	mutex_unlock(&fanout_mutex);
1748
1749	return f;
1750}
1751
1752static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1753					  struct sk_buff *skb)
1754{
1755	/* Earlier code assumed this would be a VLAN pkt, double-check
1756	 * this now that we have the actual packet in hand. We can only
1757	 * do this check on Ethernet devices.
1758	 */
1759	if (unlikely(dev->type != ARPHRD_ETHER))
1760		return false;
1761
1762	skb_reset_mac_header(skb);
1763	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1764}
1765
1766static const struct proto_ops packet_ops;
1767
1768static const struct proto_ops packet_ops_spkt;
1769
1770static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1771			   struct packet_type *pt, struct net_device *orig_dev)
1772{
1773	struct sock *sk;
1774	struct sockaddr_pkt *spkt;
1775
1776	/*
1777	 *	When we registered the protocol we saved the socket in the data
1778	 *	field for just this event.
1779	 */
1780
1781	sk = pt->af_packet_priv;
1782
1783	/*
1784	 *	Yank back the headers [hope the device set this
1785	 *	right or kerboom...]
1786	 *
1787	 *	Incoming packets have ll header pulled,
1788	 *	push it back.
1789	 *
1790	 *	For outgoing ones skb->data == skb_mac_header(skb)
1791	 *	so that this procedure is noop.
1792	 */
1793
1794	if (skb->pkt_type == PACKET_LOOPBACK)
1795		goto out;
1796
1797	if (!net_eq(dev_net(dev), sock_net(sk)))
1798		goto out;
1799
1800	skb = skb_share_check(skb, GFP_ATOMIC);
1801	if (skb == NULL)
1802		goto oom;
1803
1804	/* drop any routing info */
1805	skb_dst_drop(skb);
1806
1807	/* drop conntrack reference */
1808	nf_reset(skb);
1809
1810	spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1811
1812	skb_push(skb, skb->data - skb_mac_header(skb));
1813
1814	/*
1815	 *	The SOCK_PACKET socket receives _all_ frames.
1816	 */
1817
1818	spkt->spkt_family = dev->type;
1819	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1820	spkt->spkt_protocol = skb->protocol;
1821
1822	/*
1823	 *	Charge the memory to the socket. This is done specifically
1824	 *	to prevent sockets using all the memory up.
1825	 */
1826
1827	if (sock_queue_rcv_skb(sk, skb) == 0)
1828		return 0;
1829
1830out:
1831	kfree_skb(skb);
1832oom:
1833	return 0;
1834}
1835
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1836
1837/*
1838 *	Output a raw packet to a device layer. This bypasses all the other
1839 *	protocol layers and you must therefore supply it with a complete frame
1840 */
1841
1842static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1843			       size_t len)
1844{
1845	struct sock *sk = sock->sk;
1846	DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1847	struct sk_buff *skb = NULL;
1848	struct net_device *dev;
1849	struct sockcm_cookie sockc;
1850	__be16 proto = 0;
1851	int err;
1852	int extra_len = 0;
1853
1854	/*
1855	 *	Get and verify the address.
1856	 */
1857
1858	if (saddr) {
1859		if (msg->msg_namelen < sizeof(struct sockaddr))
1860			return -EINVAL;
1861		if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1862			proto = saddr->spkt_protocol;
1863	} else
1864		return -ENOTCONN;	/* SOCK_PACKET must be sent giving an address */
1865
1866	/*
1867	 *	Find the device first to size check it
1868	 */
1869
1870	saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1871retry:
1872	rcu_read_lock();
1873	dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1874	err = -ENODEV;
1875	if (dev == NULL)
1876		goto out_unlock;
1877
1878	err = -ENETDOWN;
1879	if (!(dev->flags & IFF_UP))
1880		goto out_unlock;
1881
1882	/*
1883	 * You may not queue a frame bigger than the mtu. This is the lowest level
1884	 * raw protocol and you must do your own fragmentation at this level.
1885	 */
1886
1887	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1888		if (!netif_supports_nofcs(dev)) {
1889			err = -EPROTONOSUPPORT;
1890			goto out_unlock;
1891		}
1892		extra_len = 4; /* We're doing our own CRC */
1893	}
1894
1895	err = -EMSGSIZE;
1896	if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1897		goto out_unlock;
1898
1899	if (!skb) {
1900		size_t reserved = LL_RESERVED_SPACE(dev);
1901		int tlen = dev->needed_tailroom;
1902		unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1903
1904		rcu_read_unlock();
1905		skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1906		if (skb == NULL)
1907			return -ENOBUFS;
1908		/* FIXME: Save some space for broken drivers that write a hard
1909		 * header at transmission time by themselves. PPP is the notable
1910		 * one here. This should really be fixed at the driver level.
1911		 */
1912		skb_reserve(skb, reserved);
1913		skb_reset_network_header(skb);
1914
1915		/* Try to align data part correctly */
1916		if (hhlen) {
1917			skb->data -= hhlen;
1918			skb->tail -= hhlen;
1919			if (len < hhlen)
1920				skb_reset_network_header(skb);
1921		}
1922		err = memcpy_from_msg(skb_put(skb, len), msg, len);
1923		if (err)
1924			goto out_free;
1925		goto retry;
1926	}
1927
1928	if (!dev_validate_header(dev, skb->data, len)) {
1929		err = -EINVAL;
1930		goto out_unlock;
1931	}
1932	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1933	    !packet_extra_vlan_len_allowed(dev, skb)) {
1934		err = -EMSGSIZE;
1935		goto out_unlock;
1936	}
1937
1938	sockc.tsflags = sk->sk_tsflags;
1939	if (msg->msg_controllen) {
1940		err = sock_cmsg_send(sk, msg, &sockc);
1941		if (unlikely(err))
1942			goto out_unlock;
1943	}
1944
1945	skb->protocol = proto;
1946	skb->dev = dev;
1947	skb->priority = sk->sk_priority;
1948	skb->mark = sk->sk_mark;
 
1949
1950	sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
1951
1952	if (unlikely(extra_len == 4))
1953		skb->no_fcs = 1;
1954
1955	skb_probe_transport_header(skb, 0);
1956
1957	dev_queue_xmit(skb);
1958	rcu_read_unlock();
1959	return len;
1960
1961out_unlock:
1962	rcu_read_unlock();
1963out_free:
1964	kfree_skb(skb);
1965	return err;
1966}
1967
1968static unsigned int run_filter(struct sk_buff *skb,
1969			       const struct sock *sk,
1970			       unsigned int res)
1971{
1972	struct sk_filter *filter;
1973
1974	rcu_read_lock();
1975	filter = rcu_dereference(sk->sk_filter);
1976	if (filter != NULL)
1977		res = bpf_prog_run_clear_cb(filter->prog, skb);
1978	rcu_read_unlock();
1979
1980	return res;
1981}
1982
1983static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
1984			   size_t *len)
1985{
1986	struct virtio_net_hdr vnet_hdr;
1987
1988	if (*len < sizeof(vnet_hdr))
1989		return -EINVAL;
1990	*len -= sizeof(vnet_hdr);
1991
1992	if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
1993		return -EINVAL;
1994
1995	return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
1996}
1997
1998/*
1999 * This function makes lazy skb cloning in hope that most of packets
2000 * are discarded by BPF.
2001 *
2002 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2003 * and skb->cb are mangled. It works because (and until) packets
2004 * falling here are owned by current CPU. Output packets are cloned
2005 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2006 * sequencially, so that if we return skb to original state on exit,
2007 * we will not harm anyone.
2008 */
2009
2010static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2011		      struct packet_type *pt, struct net_device *orig_dev)
2012{
 
2013	struct sock *sk;
2014	struct sockaddr_ll *sll;
2015	struct packet_sock *po;
2016	u8 *skb_head = skb->data;
2017	int skb_len = skb->len;
2018	unsigned int snaplen, res;
2019	bool is_drop_n_account = false;
2020
2021	if (skb->pkt_type == PACKET_LOOPBACK)
2022		goto drop;
2023
2024	sk = pt->af_packet_priv;
2025	po = pkt_sk(sk);
2026
2027	if (!net_eq(dev_net(dev), sock_net(sk)))
2028		goto drop;
2029
2030	skb->dev = dev;
2031
2032	if (dev->header_ops) {
2033		/* The device has an explicit notion of ll header,
2034		 * exported to higher levels.
2035		 *
2036		 * Otherwise, the device hides details of its frame
2037		 * structure, so that corresponding packet head is
2038		 * never delivered to user.
2039		 */
2040		if (sk->sk_type != SOCK_DGRAM)
2041			skb_push(skb, skb->data - skb_mac_header(skb));
2042		else if (skb->pkt_type == PACKET_OUTGOING) {
2043			/* Special case: outgoing packets have ll header at head */
2044			skb_pull(skb, skb_network_offset(skb));
2045		}
2046	}
2047
2048	snaplen = skb->len;
2049
2050	res = run_filter(skb, sk, snaplen);
2051	if (!res)
2052		goto drop_n_restore;
2053	if (snaplen > res)
2054		snaplen = res;
2055
2056	if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2057		goto drop_n_acct;
2058
2059	if (skb_shared(skb)) {
2060		struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2061		if (nskb == NULL)
2062			goto drop_n_acct;
2063
2064		if (skb_head != skb->data) {
2065			skb->data = skb_head;
2066			skb->len = skb_len;
2067		}
2068		consume_skb(skb);
2069		skb = nskb;
2070	}
2071
2072	sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2073
2074	sll = &PACKET_SKB_CB(skb)->sa.ll;
2075	sll->sll_hatype = dev->type;
2076	sll->sll_pkttype = skb->pkt_type;
2077	if (unlikely(po->origdev))
2078		sll->sll_ifindex = orig_dev->ifindex;
2079	else
2080		sll->sll_ifindex = dev->ifindex;
2081
2082	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2083
2084	/* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2085	 * Use their space for storing the original skb length.
2086	 */
2087	PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2088
2089	if (pskb_trim(skb, snaplen))
2090		goto drop_n_acct;
2091
2092	skb_set_owner_r(skb, sk);
2093	skb->dev = NULL;
2094	skb_dst_drop(skb);
2095
2096	/* drop conntrack reference */
2097	nf_reset(skb);
2098
2099	spin_lock(&sk->sk_receive_queue.lock);
2100	po->stats.stats1.tp_packets++;
2101	sock_skb_set_dropcount(sk, skb);
 
2102	__skb_queue_tail(&sk->sk_receive_queue, skb);
2103	spin_unlock(&sk->sk_receive_queue.lock);
2104	sk->sk_data_ready(sk);
2105	return 0;
2106
2107drop_n_acct:
2108	is_drop_n_account = true;
2109	spin_lock(&sk->sk_receive_queue.lock);
2110	po->stats.stats1.tp_drops++;
2111	atomic_inc(&sk->sk_drops);
2112	spin_unlock(&sk->sk_receive_queue.lock);
2113
2114drop_n_restore:
2115	if (skb_head != skb->data && skb_shared(skb)) {
2116		skb->data = skb_head;
2117		skb->len = skb_len;
2118	}
2119drop:
2120	if (!is_drop_n_account)
2121		consume_skb(skb);
2122	else
2123		kfree_skb(skb);
2124	return 0;
2125}
2126
2127static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2128		       struct packet_type *pt, struct net_device *orig_dev)
2129{
 
2130	struct sock *sk;
2131	struct packet_sock *po;
2132	struct sockaddr_ll *sll;
2133	union tpacket_uhdr h;
2134	u8 *skb_head = skb->data;
2135	int skb_len = skb->len;
2136	unsigned int snaplen, res;
2137	unsigned long status = TP_STATUS_USER;
2138	unsigned short macoff, netoff, hdrlen;
 
2139	struct sk_buff *copy_skb = NULL;
2140	struct timespec ts;
2141	__u32 ts_status;
2142	bool is_drop_n_account = false;
 
2143
2144	/* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2145	 * We may add members to them until current aligned size without forcing
2146	 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2147	 */
2148	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2149	BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2150
2151	if (skb->pkt_type == PACKET_LOOPBACK)
2152		goto drop;
2153
2154	sk = pt->af_packet_priv;
2155	po = pkt_sk(sk);
2156
2157	if (!net_eq(dev_net(dev), sock_net(sk)))
2158		goto drop;
2159
2160	if (dev->header_ops) {
2161		if (sk->sk_type != SOCK_DGRAM)
2162			skb_push(skb, skb->data - skb_mac_header(skb));
2163		else if (skb->pkt_type == PACKET_OUTGOING) {
2164			/* Special case: outgoing packets have ll header at head */
2165			skb_pull(skb, skb_network_offset(skb));
2166		}
2167	}
2168
2169	snaplen = skb->len;
2170
2171	res = run_filter(skb, sk, snaplen);
2172	if (!res)
2173		goto drop_n_restore;
2174
 
 
 
 
 
 
2175	if (skb->ip_summed == CHECKSUM_PARTIAL)
2176		status |= TP_STATUS_CSUMNOTREADY;
2177	else if (skb->pkt_type != PACKET_OUTGOING &&
2178		 (skb->ip_summed == CHECKSUM_COMPLETE ||
2179		  skb_csum_unnecessary(skb)))
2180		status |= TP_STATUS_CSUM_VALID;
 
 
2181
2182	if (snaplen > res)
2183		snaplen = res;
2184
2185	if (sk->sk_type == SOCK_DGRAM) {
2186		macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2187				  po->tp_reserve;
2188	} else {
2189		unsigned int maclen = skb_network_offset(skb);
2190		netoff = TPACKET_ALIGN(po->tp_hdrlen +
2191				       (maclen < 16 ? 16 : maclen)) +
2192				       po->tp_reserve;
2193		if (po->has_vnet_hdr)
2194			netoff += sizeof(struct virtio_net_hdr);
 
2195		macoff = netoff - maclen;
2196	}
 
 
 
 
2197	if (po->tp_version <= TPACKET_V2) {
2198		if (macoff + snaplen > po->rx_ring.frame_size) {
2199			if (po->copy_thresh &&
2200			    atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2201				if (skb_shared(skb)) {
2202					copy_skb = skb_clone(skb, GFP_ATOMIC);
2203				} else {
2204					copy_skb = skb_get(skb);
2205					skb_head = skb->data;
2206				}
2207				if (copy_skb)
 
 
2208					skb_set_owner_r(copy_skb, sk);
 
2209			}
2210			snaplen = po->rx_ring.frame_size - macoff;
2211			if ((int)snaplen < 0)
2212				snaplen = 0;
 
 
2213		}
2214	} else if (unlikely(macoff + snaplen >
2215			    GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2216		u32 nval;
2217
2218		nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2219		pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2220			    snaplen, nval, macoff);
2221		snaplen = nval;
2222		if (unlikely((int)snaplen < 0)) {
2223			snaplen = 0;
2224			macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
 
2225		}
2226	}
2227	spin_lock(&sk->sk_receive_queue.lock);
2228	h.raw = packet_current_rx_frame(po, skb,
2229					TP_STATUS_KERNEL, (macoff+snaplen));
2230	if (!h.raw)
2231		goto drop_n_account;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2232	if (po->tp_version <= TPACKET_V2) {
2233		packet_increment_rx_head(po, &po->rx_ring);
2234	/*
2235	 * LOSING will be reported till you read the stats,
2236	 * because it's COR - Clear On Read.
2237	 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2238	 * at packet level.
2239	 */
2240		if (po->stats.stats1.tp_drops)
2241			status |= TP_STATUS_LOSING;
2242	}
 
2243	po->stats.stats1.tp_packets++;
2244	if (copy_skb) {
2245		status |= TP_STATUS_COPY;
 
2246		__skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2247	}
2248	spin_unlock(&sk->sk_receive_queue.lock);
2249
2250	if (po->has_vnet_hdr) {
2251		if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
2252					    sizeof(struct virtio_net_hdr),
2253					    vio_le(), true)) {
2254			spin_lock(&sk->sk_receive_queue.lock);
2255			goto drop_n_account;
2256		}
2257	}
2258
2259	skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2260
2261	if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
2262		getnstimeofday(&ts);
 
 
 
 
 
 
2263
2264	status |= ts_status;
2265
2266	switch (po->tp_version) {
2267	case TPACKET_V1:
2268		h.h1->tp_len = skb->len;
2269		h.h1->tp_snaplen = snaplen;
2270		h.h1->tp_mac = macoff;
2271		h.h1->tp_net = netoff;
2272		h.h1->tp_sec = ts.tv_sec;
2273		h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2274		hdrlen = sizeof(*h.h1);
2275		break;
2276	case TPACKET_V2:
2277		h.h2->tp_len = skb->len;
2278		h.h2->tp_snaplen = snaplen;
2279		h.h2->tp_mac = macoff;
2280		h.h2->tp_net = netoff;
2281		h.h2->tp_sec = ts.tv_sec;
2282		h.h2->tp_nsec = ts.tv_nsec;
2283		if (skb_vlan_tag_present(skb)) {
2284			h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2285			h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2286			status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2287		} else {
2288			h.h2->tp_vlan_tci = 0;
2289			h.h2->tp_vlan_tpid = 0;
2290		}
2291		memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2292		hdrlen = sizeof(*h.h2);
2293		break;
2294	case TPACKET_V3:
2295		/* tp_nxt_offset,vlan are already populated above.
2296		 * So DONT clear those fields here
2297		 */
2298		h.h3->tp_status |= status;
2299		h.h3->tp_len = skb->len;
2300		h.h3->tp_snaplen = snaplen;
2301		h.h3->tp_mac = macoff;
2302		h.h3->tp_net = netoff;
2303		h.h3->tp_sec  = ts.tv_sec;
2304		h.h3->tp_nsec = ts.tv_nsec;
2305		memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2306		hdrlen = sizeof(*h.h3);
2307		break;
2308	default:
2309		BUG();
2310	}
2311
2312	sll = h.raw + TPACKET_ALIGN(hdrlen);
2313	sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2314	sll->sll_family = AF_PACKET;
2315	sll->sll_hatype = dev->type;
2316	sll->sll_protocol = skb->protocol;
2317	sll->sll_pkttype = skb->pkt_type;
2318	if (unlikely(po->origdev))
2319		sll->sll_ifindex = orig_dev->ifindex;
2320	else
2321		sll->sll_ifindex = dev->ifindex;
2322
2323	smp_mb();
2324
2325#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2326	if (po->tp_version <= TPACKET_V2) {
2327		u8 *start, *end;
2328
2329		end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2330					macoff + snaplen);
2331
2332		for (start = h.raw; start < end; start += PAGE_SIZE)
2333			flush_dcache_page(pgv_to_page(start));
2334	}
2335	smp_wmb();
2336#endif
2337
2338	if (po->tp_version <= TPACKET_V2) {
 
2339		__packet_set_status(po, h.raw, status);
 
 
2340		sk->sk_data_ready(sk);
2341	} else {
2342		prb_clear_blk_fill_status(&po->rx_ring);
2343	}
2344
2345drop_n_restore:
2346	if (skb_head != skb->data && skb_shared(skb)) {
2347		skb->data = skb_head;
2348		skb->len = skb_len;
2349	}
2350drop:
2351	if (!is_drop_n_account)
2352		consume_skb(skb);
2353	else
2354		kfree_skb(skb);
2355	return 0;
2356
2357drop_n_account:
2358	is_drop_n_account = true;
2359	po->stats.stats1.tp_drops++;
2360	spin_unlock(&sk->sk_receive_queue.lock);
 
 
2361
2362	sk->sk_data_ready(sk);
2363	kfree_skb(copy_skb);
2364	goto drop_n_restore;
2365}
2366
2367static void tpacket_destruct_skb(struct sk_buff *skb)
2368{
2369	struct packet_sock *po = pkt_sk(skb->sk);
2370
2371	if (likely(po->tx_ring.pg_vec)) {
2372		void *ph;
2373		__u32 ts;
2374
2375		ph = skb_shinfo(skb)->destructor_arg;
2376		packet_dec_pending(&po->tx_ring);
2377
2378		ts = __packet_set_timestamp(po, ph, skb);
2379		__packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
 
 
 
2380	}
2381
2382	sock_wfree(skb);
2383}
2384
2385static void tpacket_set_protocol(const struct net_device *dev,
2386				 struct sk_buff *skb)
2387{
2388	if (dev->type == ARPHRD_ETHER) {
2389		skb_reset_mac_header(skb);
2390		skb->protocol = eth_hdr(skb)->h_proto;
2391	}
2392}
2393
2394static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2395{
2396	if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2397	    (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2398	     __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2399	      __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2400		vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2401			 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2402			__virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2403
2404	if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2405		return -EINVAL;
2406
2407	return 0;
2408}
2409
2410static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2411				 struct virtio_net_hdr *vnet_hdr)
2412{
2413	if (*len < sizeof(*vnet_hdr))
 
 
2414		return -EINVAL;
2415	*len -= sizeof(*vnet_hdr);
2416
2417	if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2418		return -EFAULT;
2419
2420	return __packet_snd_vnet_parse(vnet_hdr, *len);
 
 
 
 
 
 
 
 
2421}
2422
2423static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2424		void *frame, struct net_device *dev, void *data, int tp_len,
2425		__be16 proto, unsigned char *addr, int hlen, int copylen,
2426		const struct sockcm_cookie *sockc)
2427{
2428	union tpacket_uhdr ph;
2429	int to_write, offset, len, nr_frags, len_max;
2430	struct socket *sock = po->sk.sk_socket;
2431	struct page *page;
2432	int err;
2433
2434	ph.raw = frame;
2435
2436	skb->protocol = proto;
2437	skb->dev = dev;
2438	skb->priority = po->sk.sk_priority;
2439	skb->mark = po->sk.sk_mark;
2440	sock_tx_timestamp(&po->sk, sockc->tsflags, &skb_shinfo(skb)->tx_flags);
2441	skb_shinfo(skb)->destructor_arg = ph.raw;
 
2442
2443	skb_reserve(skb, hlen);
2444	skb_reset_network_header(skb);
2445
2446	to_write = tp_len;
2447
2448	if (sock->type == SOCK_DGRAM) {
2449		err = dev_hard_header(skb, dev, ntohs(proto), addr,
2450				NULL, tp_len);
2451		if (unlikely(err < 0))
2452			return -EINVAL;
2453	} else if (copylen) {
2454		int hdrlen = min_t(int, copylen, tp_len);
2455
2456		skb_push(skb, dev->hard_header_len);
2457		skb_put(skb, copylen - dev->hard_header_len);
2458		err = skb_store_bits(skb, 0, data, hdrlen);
2459		if (unlikely(err))
2460			return err;
2461		if (!dev_validate_header(dev, skb->data, hdrlen))
2462			return -EINVAL;
2463		if (!skb->protocol)
2464			tpacket_set_protocol(dev, skb);
2465
2466		data += hdrlen;
2467		to_write -= hdrlen;
2468	}
2469
2470	offset = offset_in_page(data);
2471	len_max = PAGE_SIZE - offset;
2472	len = ((to_write > len_max) ? len_max : to_write);
2473
2474	skb->data_len = to_write;
2475	skb->len += to_write;
2476	skb->truesize += to_write;
2477	atomic_add(to_write, &po->sk.sk_wmem_alloc);
2478
2479	while (likely(to_write)) {
2480		nr_frags = skb_shinfo(skb)->nr_frags;
2481
2482		if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2483			pr_err("Packet exceed the number of skb frags(%lu)\n",
2484			       MAX_SKB_FRAGS);
2485			return -EFAULT;
2486		}
2487
2488		page = pgv_to_page(data);
2489		data += len;
2490		flush_dcache_page(page);
2491		get_page(page);
2492		skb_fill_page_desc(skb, nr_frags, page, offset, len);
2493		to_write -= len;
2494		offset = 0;
2495		len_max = PAGE_SIZE;
2496		len = ((to_write > len_max) ? len_max : to_write);
2497	}
2498
2499	skb_probe_transport_header(skb, 0);
2500
2501	return tp_len;
2502}
2503
2504static int tpacket_parse_header(struct packet_sock *po, void *frame,
2505				int size_max, void **data)
2506{
2507	union tpacket_uhdr ph;
2508	int tp_len, off;
2509
2510	ph.raw = frame;
2511
2512	switch (po->tp_version) {
 
 
 
 
 
 
 
2513	case TPACKET_V2:
2514		tp_len = ph.h2->tp_len;
2515		break;
2516	default:
2517		tp_len = ph.h1->tp_len;
2518		break;
2519	}
2520	if (unlikely(tp_len > size_max)) {
2521		pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2522		return -EMSGSIZE;
2523	}
2524
2525	if (unlikely(po->tp_tx_has_off)) {
2526		int off_min, off_max;
2527
2528		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2529		off_max = po->tx_ring.frame_size - tp_len;
2530		if (po->sk.sk_type == SOCK_DGRAM) {
2531			switch (po->tp_version) {
 
 
 
2532			case TPACKET_V2:
2533				off = ph.h2->tp_net;
2534				break;
2535			default:
2536				off = ph.h1->tp_net;
2537				break;
2538			}
2539		} else {
2540			switch (po->tp_version) {
 
 
 
2541			case TPACKET_V2:
2542				off = ph.h2->tp_mac;
2543				break;
2544			default:
2545				off = ph.h1->tp_mac;
2546				break;
2547			}
2548		}
2549		if (unlikely((off < off_min) || (off_max < off)))
2550			return -EINVAL;
2551	} else {
2552		off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2553	}
2554
2555	*data = frame + off;
2556	return tp_len;
2557}
2558
2559static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2560{
2561	struct sk_buff *skb;
2562	struct net_device *dev;
2563	struct virtio_net_hdr *vnet_hdr = NULL;
2564	struct sockcm_cookie sockc;
2565	__be16 proto;
2566	int err, reserve = 0;
2567	void *ph;
2568	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2569	bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
 
 
2570	int tp_len, size_max;
2571	unsigned char *addr;
2572	void *data;
2573	int len_sum = 0;
2574	int status = TP_STATUS_AVAILABLE;
2575	int hlen, tlen, copylen = 0;
 
2576
2577	mutex_lock(&po->pg_vec_lock);
2578
 
 
 
 
 
 
 
2579	if (likely(saddr == NULL)) {
2580		dev	= packet_cached_dev_get(po);
2581		proto	= po->num;
2582		addr	= NULL;
2583	} else {
2584		err = -EINVAL;
2585		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2586			goto out;
2587		if (msg->msg_namelen < (saddr->sll_halen
2588					+ offsetof(struct sockaddr_ll,
2589						sll_addr)))
2590			goto out;
2591		proto	= saddr->sll_protocol;
2592		addr	= saddr->sll_addr;
2593		dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2594	}
2595
2596	sockc.tsflags = po->sk.sk_tsflags;
2597	if (msg->msg_controllen) {
2598		err = sock_cmsg_send(&po->sk, msg, &sockc);
2599		if (unlikely(err))
2600			goto out;
2601	}
2602
2603	err = -ENXIO;
2604	if (unlikely(dev == NULL))
2605		goto out;
2606	err = -ENETDOWN;
2607	if (unlikely(!(dev->flags & IFF_UP)))
2608		goto out_put;
2609
 
 
 
 
 
 
 
2610	if (po->sk.sk_socket->type == SOCK_RAW)
2611		reserve = dev->hard_header_len;
2612	size_max = po->tx_ring.frame_size
2613		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2614
2615	if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2616		size_max = dev->mtu + reserve + VLAN_HLEN;
2617
 
 
2618	do {
2619		ph = packet_current_frame(po, &po->tx_ring,
2620					  TP_STATUS_SEND_REQUEST);
2621		if (unlikely(ph == NULL)) {
2622			if (need_wait && need_resched())
2623				schedule();
 
 
 
 
 
 
 
2624			continue;
2625		}
2626
2627		skb = NULL;
2628		tp_len = tpacket_parse_header(po, ph, size_max, &data);
2629		if (tp_len < 0)
2630			goto tpacket_error;
2631
2632		status = TP_STATUS_SEND_REQUEST;
2633		hlen = LL_RESERVED_SPACE(dev);
2634		tlen = dev->needed_tailroom;
2635		if (po->has_vnet_hdr) {
2636			vnet_hdr = data;
2637			data += sizeof(*vnet_hdr);
2638			tp_len -= sizeof(*vnet_hdr);
2639			if (tp_len < 0 ||
2640			    __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2641				tp_len = -EINVAL;
2642				goto tpacket_error;
2643			}
2644			copylen = __virtio16_to_cpu(vio_le(),
2645						    vnet_hdr->hdr_len);
2646		}
2647		copylen = max_t(int, copylen, dev->hard_header_len);
2648		skb = sock_alloc_send_skb(&po->sk,
2649				hlen + tlen + sizeof(struct sockaddr_ll) +
2650				(copylen - dev->hard_header_len),
2651				!need_wait, &err);
2652
2653		if (unlikely(skb == NULL)) {
2654			/* we assume the socket was initially writeable ... */
2655			if (likely(len_sum > 0))
2656				err = len_sum;
2657			goto out_status;
2658		}
2659		tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2660					  addr, hlen, copylen, &sockc);
2661		if (likely(tp_len >= 0) &&
2662		    tp_len > dev->mtu + reserve &&
2663		    !po->has_vnet_hdr &&
2664		    !packet_extra_vlan_len_allowed(dev, skb))
2665			tp_len = -EMSGSIZE;
2666
2667		if (unlikely(tp_len < 0)) {
2668tpacket_error:
2669			if (po->tp_loss) {
2670				__packet_set_status(po, ph,
2671						TP_STATUS_AVAILABLE);
2672				packet_increment_head(&po->tx_ring);
2673				kfree_skb(skb);
2674				continue;
2675			} else {
2676				status = TP_STATUS_WRONG_FORMAT;
2677				err = tp_len;
2678				goto out_status;
2679			}
2680		}
2681
2682		if (po->has_vnet_hdr && virtio_net_hdr_to_skb(skb, vnet_hdr,
2683							      vio_le())) {
2684			tp_len = -EINVAL;
2685			goto tpacket_error;
 
 
2686		}
2687
2688		packet_pick_tx_queue(dev, skb);
2689
2690		skb->destructor = tpacket_destruct_skb;
2691		__packet_set_status(po, ph, TP_STATUS_SENDING);
2692		packet_inc_pending(&po->tx_ring);
2693
2694		status = TP_STATUS_SEND_REQUEST;
2695		err = po->xmit(skb);
2696		if (unlikely(err > 0)) {
2697			err = net_xmit_errno(err);
 
2698			if (err && __packet_get_status(po, ph) ==
2699				   TP_STATUS_AVAILABLE) {
2700				/* skb was destructed already */
2701				skb = NULL;
2702				goto out_status;
2703			}
2704			/*
2705			 * skb was dropped but not destructed yet;
2706			 * let's treat it like congestion or err < 0
2707			 */
2708			err = 0;
2709		}
2710		packet_increment_head(&po->tx_ring);
2711		len_sum += tp_len;
2712	} while (likely((ph != NULL) ||
2713		/* Note: packet_read_pending() might be slow if we have
2714		 * to call it as it's per_cpu variable, but in fast-path
2715		 * we already short-circuit the loop with the first
2716		 * condition, and luckily don't have to go that path
2717		 * anyway.
2718		 */
2719		 (need_wait && packet_read_pending(&po->tx_ring))));
2720
2721	err = len_sum;
2722	goto out_put;
2723
2724out_status:
2725	__packet_set_status(po, ph, status);
2726	kfree_skb(skb);
2727out_put:
2728	dev_put(dev);
2729out:
2730	mutex_unlock(&po->pg_vec_lock);
2731	return err;
2732}
2733
2734static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2735				        size_t reserve, size_t len,
2736				        size_t linear, int noblock,
2737				        int *err)
2738{
2739	struct sk_buff *skb;
2740
2741	/* Under a page?  Don't bother with paged skb. */
2742	if (prepad + len < PAGE_SIZE || !linear)
2743		linear = len;
2744
 
 
2745	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2746				   err, 0);
2747	if (!skb)
2748		return NULL;
2749
2750	skb_reserve(skb, reserve);
2751	skb_put(skb, linear);
2752	skb->data_len = len - linear;
2753	skb->len += len - linear;
2754
2755	return skb;
2756}
2757
2758static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2759{
2760	struct sock *sk = sock->sk;
2761	DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2762	struct sk_buff *skb;
2763	struct net_device *dev;
2764	__be16 proto;
2765	unsigned char *addr;
2766	int err, reserve = 0;
2767	struct sockcm_cookie sockc;
2768	struct virtio_net_hdr vnet_hdr = { 0 };
2769	int offset = 0;
2770	struct packet_sock *po = pkt_sk(sk);
 
2771	int hlen, tlen, linear;
2772	int extra_len = 0;
2773
2774	/*
2775	 *	Get and verify the address.
2776	 */
2777
2778	if (likely(saddr == NULL)) {
2779		dev	= packet_cached_dev_get(po);
2780		proto	= po->num;
2781		addr	= NULL;
2782	} else {
2783		err = -EINVAL;
2784		if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2785			goto out;
2786		if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2787			goto out;
2788		proto	= saddr->sll_protocol;
2789		addr	= saddr->sll_addr;
2790		dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
 
 
 
 
 
 
2791	}
2792
2793	err = -ENXIO;
2794	if (unlikely(dev == NULL))
2795		goto out_unlock;
2796	err = -ENETDOWN;
2797	if (unlikely(!(dev->flags & IFF_UP)))
2798		goto out_unlock;
2799
2800	sockc.tsflags = sk->sk_tsflags;
2801	sockc.mark = sk->sk_mark;
2802	if (msg->msg_controllen) {
2803		err = sock_cmsg_send(sk, msg, &sockc);
2804		if (unlikely(err))
2805			goto out_unlock;
2806	}
2807
2808	if (sock->type == SOCK_RAW)
2809		reserve = dev->hard_header_len;
2810	if (po->has_vnet_hdr) {
2811		err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2812		if (err)
2813			goto out_unlock;
2814	}
2815
2816	if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2817		if (!netif_supports_nofcs(dev)) {
2818			err = -EPROTONOSUPPORT;
2819			goto out_unlock;
2820		}
2821		extra_len = 4; /* We're doing our own CRC */
2822	}
2823
2824	err = -EMSGSIZE;
2825	if (!vnet_hdr.gso_type &&
2826	    (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2827		goto out_unlock;
2828
2829	err = -ENOBUFS;
2830	hlen = LL_RESERVED_SPACE(dev);
2831	tlen = dev->needed_tailroom;
2832	linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2833	linear = max(linear, min_t(int, len, dev->hard_header_len));
2834	skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2835			       msg->msg_flags & MSG_DONTWAIT, &err);
2836	if (skb == NULL)
2837		goto out_unlock;
2838
2839	skb_set_network_header(skb, reserve);
2840
2841	err = -EINVAL;
2842	if (sock->type == SOCK_DGRAM) {
2843		offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2844		if (unlikely(offset < 0))
2845			goto out_free;
 
 
 
 
 
2846	}
2847
2848	/* Returns -EFAULT on error */
2849	err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2850	if (err)
2851		goto out_free;
2852
2853	if (sock->type == SOCK_RAW &&
2854	    !dev_validate_header(dev, skb->data, len)) {
2855		err = -EINVAL;
2856		goto out_free;
2857	}
2858
2859	sock_tx_timestamp(sk, sockc.tsflags, &skb_shinfo(skb)->tx_flags);
2860
2861	if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2862	    !packet_extra_vlan_len_allowed(dev, skb)) {
2863		err = -EMSGSIZE;
2864		goto out_free;
2865	}
2866
2867	skb->protocol = proto;
2868	skb->dev = dev;
2869	skb->priority = sk->sk_priority;
2870	skb->mark = sockc.mark;
 
 
 
 
2871
2872	packet_pick_tx_queue(dev, skb);
2873
2874	if (po->has_vnet_hdr) {
2875		err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
2876		if (err)
2877			goto out_free;
2878		len += sizeof(vnet_hdr);
 
2879	}
2880
2881	skb_probe_transport_header(skb, reserve);
2882
2883	if (unlikely(extra_len == 4))
2884		skb->no_fcs = 1;
2885
2886	err = po->xmit(skb);
2887	if (err > 0 && (err = net_xmit_errno(err)) != 0)
2888		goto out_unlock;
2889
2890	dev_put(dev);
2891
2892	return len;
2893
2894out_free:
2895	kfree_skb(skb);
2896out_unlock:
2897	if (dev)
2898		dev_put(dev);
2899out:
2900	return err;
2901}
2902
2903static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
2904{
2905	struct sock *sk = sock->sk;
2906	struct packet_sock *po = pkt_sk(sk);
2907
2908	if (po->tx_ring.pg_vec)
 
 
 
2909		return tpacket_snd(po, msg);
2910	else
2911		return packet_snd(sock, msg, len);
2912}
2913
2914/*
2915 *	Close a PACKET socket. This is fairly simple. We immediately go
2916 *	to 'closed' state and remove our protocol entry in the device list.
2917 */
2918
2919static int packet_release(struct socket *sock)
2920{
2921	struct sock *sk = sock->sk;
2922	struct packet_sock *po;
2923	struct packet_fanout *f;
2924	struct net *net;
2925	union tpacket_req_u req_u;
2926
2927	if (!sk)
2928		return 0;
2929
2930	net = sock_net(sk);
2931	po = pkt_sk(sk);
2932
2933	mutex_lock(&net->packet.sklist_lock);
2934	sk_del_node_init_rcu(sk);
2935	mutex_unlock(&net->packet.sklist_lock);
2936
2937	preempt_disable();
2938	sock_prot_inuse_add(net, sk->sk_prot, -1);
2939	preempt_enable();
2940
2941	spin_lock(&po->bind_lock);
2942	unregister_prot_hook(sk, false);
2943	packet_cached_dev_reset(po);
2944
2945	if (po->prot_hook.dev) {
2946		dev_put(po->prot_hook.dev);
2947		po->prot_hook.dev = NULL;
2948	}
2949	spin_unlock(&po->bind_lock);
2950
2951	packet_flush_mclist(sk);
2952
 
2953	if (po->rx_ring.pg_vec) {
2954		memset(&req_u, 0, sizeof(req_u));
2955		packet_set_ring(sk, &req_u, 1, 0);
2956	}
2957
2958	if (po->tx_ring.pg_vec) {
2959		memset(&req_u, 0, sizeof(req_u));
2960		packet_set_ring(sk, &req_u, 1, 1);
2961	}
 
2962
2963	f = fanout_release(sk);
2964
2965	synchronize_net();
2966
 
2967	if (f) {
2968		fanout_release_data(f);
2969		kfree(f);
2970	}
2971	/*
2972	 *	Now the socket is dead. No more input will appear.
2973	 */
2974	sock_orphan(sk);
2975	sock->sk = NULL;
2976
2977	/* Purge queues */
2978
2979	skb_queue_purge(&sk->sk_receive_queue);
2980	packet_free_pending(po);
2981	sk_refcnt_debug_release(sk);
2982
2983	sock_put(sk);
2984	return 0;
2985}
2986
2987/*
2988 *	Attach a packet hook.
2989 */
2990
2991static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
2992			  __be16 proto)
2993{
2994	struct packet_sock *po = pkt_sk(sk);
2995	struct net_device *dev_curr;
2996	__be16 proto_curr;
2997	bool need_rehook;
2998	struct net_device *dev = NULL;
2999	int ret = 0;
3000	bool unlisted = false;
3001
3002	if (po->fanout)
3003		return -EINVAL;
3004
3005	lock_sock(sk);
3006	spin_lock(&po->bind_lock);
 
 
 
3007	rcu_read_lock();
3008
 
 
 
 
 
3009	if (name) {
3010		dev = dev_get_by_name_rcu(sock_net(sk), name);
3011		if (!dev) {
3012			ret = -ENODEV;
3013			goto out_unlock;
3014		}
3015	} else if (ifindex) {
3016		dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3017		if (!dev) {
3018			ret = -ENODEV;
3019			goto out_unlock;
3020		}
3021	}
3022
3023	if (dev)
3024		dev_hold(dev);
3025
3026	proto_curr = po->prot_hook.type;
3027	dev_curr = po->prot_hook.dev;
3028
3029	need_rehook = proto_curr != proto || dev_curr != dev;
3030
3031	if (need_rehook) {
3032		if (po->running) {
 
3033			rcu_read_unlock();
 
 
 
 
3034			__unregister_prot_hook(sk, true);
3035			rcu_read_lock();
3036			dev_curr = po->prot_hook.dev;
3037			if (dev)
3038				unlisted = !dev_get_by_index_rcu(sock_net(sk),
3039								 dev->ifindex);
3040		}
3041
3042		po->num = proto;
 
3043		po->prot_hook.type = proto;
3044
 
 
3045		if (unlikely(unlisted)) {
3046			dev_put(dev);
3047			po->prot_hook.dev = NULL;
3048			po->ifindex = -1;
3049			packet_cached_dev_reset(po);
3050		} else {
 
 
3051			po->prot_hook.dev = dev;
3052			po->ifindex = dev ? dev->ifindex : 0;
3053			packet_cached_dev_assign(po, dev);
3054		}
 
3055	}
3056	if (dev_curr)
3057		dev_put(dev_curr);
3058
3059	if (proto == 0 || !need_rehook)
3060		goto out_unlock;
3061
3062	if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3063		register_prot_hook(sk);
3064	} else {
3065		sk->sk_err = ENETDOWN;
3066		if (!sock_flag(sk, SOCK_DEAD))
3067			sk->sk_error_report(sk);
3068	}
3069
3070out_unlock:
3071	rcu_read_unlock();
3072	spin_unlock(&po->bind_lock);
3073	release_sock(sk);
3074	return ret;
3075}
3076
3077/*
3078 *	Bind a packet socket to a device
3079 */
3080
3081static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3082			    int addr_len)
3083{
3084	struct sock *sk = sock->sk;
3085	char name[sizeof(uaddr->sa_data) + 1];
3086
3087	/*
3088	 *	Check legality
3089	 */
3090
3091	if (addr_len != sizeof(struct sockaddr))
3092		return -EINVAL;
3093	/* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3094	 * zero-terminated.
3095	 */
3096	memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3097	name[sizeof(uaddr->sa_data)] = 0;
3098
3099	return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3100}
3101
3102static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3103{
3104	struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3105	struct sock *sk = sock->sk;
3106
3107	/*
3108	 *	Check legality
3109	 */
3110
3111	if (addr_len < sizeof(struct sockaddr_ll))
3112		return -EINVAL;
3113	if (sll->sll_family != AF_PACKET)
3114		return -EINVAL;
3115
3116	return packet_do_bind(sk, NULL, sll->sll_ifindex,
3117			      sll->sll_protocol ? : pkt_sk(sk)->num);
3118}
3119
3120static struct proto packet_proto = {
3121	.name	  = "PACKET",
3122	.owner	  = THIS_MODULE,
3123	.obj_size = sizeof(struct packet_sock),
3124};
3125
3126/*
3127 *	Create a packet of type SOCK_PACKET.
3128 */
3129
3130static int packet_create(struct net *net, struct socket *sock, int protocol,
3131			 int kern)
3132{
3133	struct sock *sk;
3134	struct packet_sock *po;
3135	__be16 proto = (__force __be16)protocol; /* weird, but documented */
3136	int err;
3137
3138	if (!ns_capable(net->user_ns, CAP_NET_RAW))
3139		return -EPERM;
3140	if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3141	    sock->type != SOCK_PACKET)
3142		return -ESOCKTNOSUPPORT;
3143
3144	sock->state = SS_UNCONNECTED;
3145
3146	err = -ENOBUFS;
3147	sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3148	if (sk == NULL)
3149		goto out;
3150
3151	sock->ops = &packet_ops;
3152	if (sock->type == SOCK_PACKET)
3153		sock->ops = &packet_ops_spkt;
3154
3155	sock_init_data(sock, sk);
3156
3157	po = pkt_sk(sk);
 
3158	sk->sk_family = PF_PACKET;
3159	po->num = proto;
3160	po->xmit = dev_queue_xmit;
3161
3162	err = packet_alloc_pending(po);
3163	if (err)
3164		goto out2;
3165
3166	packet_cached_dev_reset(po);
3167
3168	sk->sk_destruct = packet_sock_destruct;
3169	sk_refcnt_debug_inc(sk);
3170
3171	/*
3172	 *	Attach a protocol block
3173	 */
3174
3175	spin_lock_init(&po->bind_lock);
3176	mutex_init(&po->pg_vec_lock);
3177	po->rollover = NULL;
3178	po->prot_hook.func = packet_rcv;
3179
3180	if (sock->type == SOCK_PACKET)
3181		po->prot_hook.func = packet_rcv_spkt;
3182
3183	po->prot_hook.af_packet_priv = sk;
 
3184
3185	if (proto) {
3186		po->prot_hook.type = proto;
3187		register_prot_hook(sk);
3188	}
3189
3190	mutex_lock(&net->packet.sklist_lock);
3191	sk_add_node_rcu(sk, &net->packet.sklist);
3192	mutex_unlock(&net->packet.sklist_lock);
3193
3194	preempt_disable();
3195	sock_prot_inuse_add(net, &packet_proto, 1);
3196	preempt_enable();
3197
3198	return 0;
3199out2:
3200	sk_free(sk);
3201out:
3202	return err;
3203}
3204
3205/*
3206 *	Pull a packet from our receive queue and hand it to the user.
3207 *	If necessary we block.
3208 */
3209
3210static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3211			  int flags)
3212{
3213	struct sock *sk = sock->sk;
3214	struct sk_buff *skb;
3215	int copied, err;
3216	int vnet_hdr_len = 0;
3217	unsigned int origlen = 0;
3218
3219	err = -EINVAL;
3220	if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3221		goto out;
3222
3223#if 0
3224	/* What error should we return now? EUNATTACH? */
3225	if (pkt_sk(sk)->ifindex < 0)
3226		return -ENODEV;
3227#endif
3228
3229	if (flags & MSG_ERRQUEUE) {
3230		err = sock_recv_errqueue(sk, msg, len,
3231					 SOL_PACKET, PACKET_TX_TIMESTAMP);
3232		goto out;
3233	}
3234
3235	/*
3236	 *	Call the generic datagram receiver. This handles all sorts
3237	 *	of horrible races and re-entrancy so we can forget about it
3238	 *	in the protocol layers.
3239	 *
3240	 *	Now it will return ENETDOWN, if device have just gone down,
3241	 *	but then it will block.
3242	 */
3243
3244	skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3245
3246	/*
3247	 *	An error occurred so return it. Because skb_recv_datagram()
3248	 *	handles the blocking we don't see and worry about blocking
3249	 *	retries.
3250	 */
3251
3252	if (skb == NULL)
3253		goto out;
3254
3255	if (pkt_sk(sk)->pressure)
3256		packet_rcv_has_room(pkt_sk(sk), NULL);
3257
3258	if (pkt_sk(sk)->has_vnet_hdr) {
3259		err = packet_rcv_vnet(msg, skb, &len);
3260		if (err)
3261			goto out_free;
3262		vnet_hdr_len = sizeof(struct virtio_net_hdr);
3263	}
3264
3265	/* You lose any data beyond the buffer you gave. If it worries
3266	 * a user program they can ask the device for its MTU
3267	 * anyway.
3268	 */
3269	copied = skb->len;
3270	if (copied > len) {
3271		copied = len;
3272		msg->msg_flags |= MSG_TRUNC;
3273	}
3274
3275	err = skb_copy_datagram_msg(skb, 0, msg, copied);
3276	if (err)
3277		goto out_free;
3278
3279	if (sock->type != SOCK_PACKET) {
3280		struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3281
3282		/* Original length was stored in sockaddr_ll fields */
3283		origlen = PACKET_SKB_CB(skb)->sa.origlen;
3284		sll->sll_family = AF_PACKET;
3285		sll->sll_protocol = skb->protocol;
3286	}
3287
3288	sock_recv_ts_and_drops(msg, sk, skb);
3289
3290	if (msg->msg_name) {
 
 
 
 
3291		/* If the address length field is there to be filled
3292		 * in, we fill it in now.
3293		 */
3294		if (sock->type == SOCK_PACKET) {
3295			__sockaddr_check_size(sizeof(struct sockaddr_pkt));
3296			msg->msg_namelen = sizeof(struct sockaddr_pkt);
 
3297		} else {
3298			struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3299
3300			msg->msg_namelen = sll->sll_halen +
3301				offsetof(struct sockaddr_ll, sll_addr);
 
 
 
 
 
 
 
 
 
 
 
3302		}
3303		memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa,
3304		       msg->msg_namelen);
3305	}
3306
3307	if (pkt_sk(sk)->auxdata) {
3308		struct tpacket_auxdata aux;
3309
3310		aux.tp_status = TP_STATUS_USER;
3311		if (skb->ip_summed == CHECKSUM_PARTIAL)
3312			aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3313		else if (skb->pkt_type != PACKET_OUTGOING &&
3314			 (skb->ip_summed == CHECKSUM_COMPLETE ||
3315			  skb_csum_unnecessary(skb)))
3316			aux.tp_status |= TP_STATUS_CSUM_VALID;
 
 
3317
3318		aux.tp_len = origlen;
3319		aux.tp_snaplen = skb->len;
3320		aux.tp_mac = 0;
3321		aux.tp_net = skb_network_offset(skb);
3322		if (skb_vlan_tag_present(skb)) {
3323			aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3324			aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3325			aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3326		} else {
3327			aux.tp_vlan_tci = 0;
3328			aux.tp_vlan_tpid = 0;
3329		}
3330		put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3331	}
3332
3333	/*
3334	 *	Free or return the buffer as appropriate. Again this
3335	 *	hides all the races and re-entrancy issues from us.
3336	 */
3337	err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3338
3339out_free:
3340	skb_free_datagram(sk, skb);
3341out:
3342	return err;
3343}
3344
3345static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3346			       int *uaddr_len, int peer)
3347{
3348	struct net_device *dev;
3349	struct sock *sk	= sock->sk;
3350
3351	if (peer)
3352		return -EOPNOTSUPP;
3353
3354	uaddr->sa_family = AF_PACKET;
3355	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3356	rcu_read_lock();
3357	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3358	if (dev)
3359		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3360	rcu_read_unlock();
3361	*uaddr_len = sizeof(*uaddr);
3362
3363	return 0;
3364}
3365
3366static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3367			  int *uaddr_len, int peer)
3368{
3369	struct net_device *dev;
3370	struct sock *sk = sock->sk;
3371	struct packet_sock *po = pkt_sk(sk);
3372	DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
 
3373
3374	if (peer)
3375		return -EOPNOTSUPP;
3376
 
3377	sll->sll_family = AF_PACKET;
3378	sll->sll_ifindex = po->ifindex;
3379	sll->sll_protocol = po->num;
3380	sll->sll_pkttype = 0;
3381	rcu_read_lock();
3382	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3383	if (dev) {
3384		sll->sll_hatype = dev->type;
3385		sll->sll_halen = dev->addr_len;
3386		memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
 
 
 
 
 
3387	} else {
3388		sll->sll_hatype = 0;	/* Bad: we have no ARPHRD_UNSPEC */
3389		sll->sll_halen = 0;
3390	}
3391	rcu_read_unlock();
3392	*uaddr_len = offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3393
3394	return 0;
3395}
3396
3397static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3398			 int what)
3399{
3400	switch (i->type) {
3401	case PACKET_MR_MULTICAST:
3402		if (i->alen != dev->addr_len)
3403			return -EINVAL;
3404		if (what > 0)
3405			return dev_mc_add(dev, i->addr);
3406		else
3407			return dev_mc_del(dev, i->addr);
3408		break;
3409	case PACKET_MR_PROMISC:
3410		return dev_set_promiscuity(dev, what);
3411	case PACKET_MR_ALLMULTI:
3412		return dev_set_allmulti(dev, what);
3413	case PACKET_MR_UNICAST:
3414		if (i->alen != dev->addr_len)
3415			return -EINVAL;
3416		if (what > 0)
3417			return dev_uc_add(dev, i->addr);
3418		else
3419			return dev_uc_del(dev, i->addr);
3420		break;
3421	default:
3422		break;
3423	}
3424	return 0;
3425}
3426
3427static void packet_dev_mclist_delete(struct net_device *dev,
3428				     struct packet_mclist **mlp)
3429{
3430	struct packet_mclist *ml;
3431
3432	while ((ml = *mlp) != NULL) {
3433		if (ml->ifindex == dev->ifindex) {
3434			packet_dev_mc(dev, ml, -1);
3435			*mlp = ml->next;
3436			kfree(ml);
3437		} else
3438			mlp = &ml->next;
3439	}
3440}
3441
3442static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3443{
3444	struct packet_sock *po = pkt_sk(sk);
3445	struct packet_mclist *ml, *i;
3446	struct net_device *dev;
3447	int err;
3448
3449	rtnl_lock();
3450
3451	err = -ENODEV;
3452	dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3453	if (!dev)
3454		goto done;
3455
3456	err = -EINVAL;
3457	if (mreq->mr_alen > dev->addr_len)
3458		goto done;
3459
3460	err = -ENOBUFS;
3461	i = kmalloc(sizeof(*i), GFP_KERNEL);
3462	if (i == NULL)
3463		goto done;
3464
3465	err = 0;
3466	for (ml = po->mclist; ml; ml = ml->next) {
3467		if (ml->ifindex == mreq->mr_ifindex &&
3468		    ml->type == mreq->mr_type &&
3469		    ml->alen == mreq->mr_alen &&
3470		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3471			ml->count++;
3472			/* Free the new element ... */
3473			kfree(i);
3474			goto done;
3475		}
3476	}
3477
3478	i->type = mreq->mr_type;
3479	i->ifindex = mreq->mr_ifindex;
3480	i->alen = mreq->mr_alen;
3481	memcpy(i->addr, mreq->mr_address, i->alen);
3482	memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3483	i->count = 1;
3484	i->next = po->mclist;
3485	po->mclist = i;
3486	err = packet_dev_mc(dev, i, 1);
3487	if (err) {
3488		po->mclist = i->next;
3489		kfree(i);
3490	}
3491
3492done:
3493	rtnl_unlock();
3494	return err;
3495}
3496
3497static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3498{
3499	struct packet_mclist *ml, **mlp;
3500
3501	rtnl_lock();
3502
3503	for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3504		if (ml->ifindex == mreq->mr_ifindex &&
3505		    ml->type == mreq->mr_type &&
3506		    ml->alen == mreq->mr_alen &&
3507		    memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3508			if (--ml->count == 0) {
3509				struct net_device *dev;
3510				*mlp = ml->next;
3511				dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3512				if (dev)
3513					packet_dev_mc(dev, ml, -1);
3514				kfree(ml);
3515			}
3516			break;
3517		}
3518	}
3519	rtnl_unlock();
3520	return 0;
3521}
3522
3523static void packet_flush_mclist(struct sock *sk)
3524{
3525	struct packet_sock *po = pkt_sk(sk);
3526	struct packet_mclist *ml;
3527
3528	if (!po->mclist)
3529		return;
3530
3531	rtnl_lock();
3532	while ((ml = po->mclist) != NULL) {
3533		struct net_device *dev;
3534
3535		po->mclist = ml->next;
3536		dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3537		if (dev != NULL)
3538			packet_dev_mc(dev, ml, -1);
3539		kfree(ml);
3540	}
3541	rtnl_unlock();
3542}
3543
3544static int
3545packet_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
 
3546{
3547	struct sock *sk = sock->sk;
3548	struct packet_sock *po = pkt_sk(sk);
3549	int ret;
3550
3551	if (level != SOL_PACKET)
3552		return -ENOPROTOOPT;
3553
3554	switch (optname) {
3555	case PACKET_ADD_MEMBERSHIP:
3556	case PACKET_DROP_MEMBERSHIP:
3557	{
3558		struct packet_mreq_max mreq;
3559		int len = optlen;
3560		memset(&mreq, 0, sizeof(mreq));
3561		if (len < sizeof(struct packet_mreq))
3562			return -EINVAL;
3563		if (len > sizeof(mreq))
3564			len = sizeof(mreq);
3565		if (copy_from_user(&mreq, optval, len))
3566			return -EFAULT;
3567		if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3568			return -EINVAL;
3569		if (optname == PACKET_ADD_MEMBERSHIP)
3570			ret = packet_mc_add(sk, &mreq);
3571		else
3572			ret = packet_mc_drop(sk, &mreq);
3573		return ret;
3574	}
3575
3576	case PACKET_RX_RING:
3577	case PACKET_TX_RING:
3578	{
3579		union tpacket_req_u req_u;
3580		int len;
3581
 
3582		switch (po->tp_version) {
3583		case TPACKET_V1:
3584		case TPACKET_V2:
3585			len = sizeof(req_u.req);
3586			break;
3587		case TPACKET_V3:
3588		default:
3589			len = sizeof(req_u.req3);
3590			break;
3591		}
3592		if (optlen < len)
3593			return -EINVAL;
3594		if (copy_from_user(&req_u.req, optval, len))
3595			return -EFAULT;
3596		return packet_set_ring(sk, &req_u, 0,
3597			optname == PACKET_TX_RING);
 
 
 
 
 
3598	}
3599	case PACKET_COPY_THRESH:
3600	{
3601		int val;
3602
3603		if (optlen != sizeof(val))
3604			return -EINVAL;
3605		if (copy_from_user(&val, optval, sizeof(val)))
3606			return -EFAULT;
3607
3608		pkt_sk(sk)->copy_thresh = val;
3609		return 0;
3610	}
3611	case PACKET_VERSION:
3612	{
3613		int val;
3614
3615		if (optlen != sizeof(val))
3616			return -EINVAL;
3617		if (copy_from_user(&val, optval, sizeof(val)))
3618			return -EFAULT;
3619		switch (val) {
3620		case TPACKET_V1:
3621		case TPACKET_V2:
3622		case TPACKET_V3:
3623			break;
3624		default:
3625			return -EINVAL;
3626		}
3627		lock_sock(sk);
3628		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3629			ret = -EBUSY;
3630		} else {
3631			po->tp_version = val;
3632			ret = 0;
3633		}
3634		release_sock(sk);
3635		return ret;
3636	}
3637	case PACKET_RESERVE:
3638	{
3639		unsigned int val;
3640
3641		if (optlen != sizeof(val))
3642			return -EINVAL;
3643		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3644			return -EBUSY;
3645		if (copy_from_user(&val, optval, sizeof(val)))
3646			return -EFAULT;
3647		po->tp_reserve = val;
3648		return 0;
 
 
 
 
 
 
 
 
 
3649	}
3650	case PACKET_LOSS:
3651	{
3652		unsigned int val;
3653
3654		if (optlen != sizeof(val))
3655			return -EINVAL;
3656		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3657			return -EBUSY;
3658		if (copy_from_user(&val, optval, sizeof(val)))
3659			return -EFAULT;
3660		po->tp_loss = !!val;
3661		return 0;
 
 
 
 
 
 
 
 
3662	}
3663	case PACKET_AUXDATA:
3664	{
3665		int val;
3666
3667		if (optlen < sizeof(val))
3668			return -EINVAL;
3669		if (copy_from_user(&val, optval, sizeof(val)))
3670			return -EFAULT;
3671
3672		po->auxdata = !!val;
3673		return 0;
3674	}
3675	case PACKET_ORIGDEV:
3676	{
3677		int val;
3678
3679		if (optlen < sizeof(val))
3680			return -EINVAL;
3681		if (copy_from_user(&val, optval, sizeof(val)))
3682			return -EFAULT;
3683
3684		po->origdev = !!val;
3685		return 0;
3686	}
3687	case PACKET_VNET_HDR:
 
3688	{
3689		int val;
3690
3691		if (sock->type != SOCK_RAW)
3692			return -EINVAL;
3693		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3694			return -EBUSY;
3695		if (optlen < sizeof(val))
3696			return -EINVAL;
3697		if (copy_from_user(&val, optval, sizeof(val)))
3698			return -EFAULT;
3699
3700		po->has_vnet_hdr = !!val;
3701		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3702	}
3703	case PACKET_TIMESTAMP:
3704	{
3705		int val;
3706
3707		if (optlen != sizeof(val))
3708			return -EINVAL;
3709		if (copy_from_user(&val, optval, sizeof(val)))
3710			return -EFAULT;
3711
3712		po->tp_tstamp = val;
3713		return 0;
3714	}
3715	case PACKET_FANOUT:
3716	{
3717		int val;
3718
3719		if (optlen != sizeof(val))
3720			return -EINVAL;
3721		if (copy_from_user(&val, optval, sizeof(val)))
3722			return -EFAULT;
3723
3724		return fanout_add(sk, val & 0xffff, val >> 16);
3725	}
3726	case PACKET_FANOUT_DATA:
3727	{
3728		if (!po->fanout)
 
3729			return -EINVAL;
3730
3731		return fanout_set_data(po, optval, optlen);
3732	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3733	case PACKET_TX_HAS_OFF:
3734	{
3735		unsigned int val;
3736
3737		if (optlen != sizeof(val))
3738			return -EINVAL;
3739		if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
3740			return -EBUSY;
3741		if (copy_from_user(&val, optval, sizeof(val)))
3742			return -EFAULT;
3743		po->tp_tx_has_off = !!val;
 
 
 
 
 
3744		return 0;
3745	}
3746	case PACKET_QDISC_BYPASS:
3747	{
3748		int val;
3749
3750		if (optlen != sizeof(val))
3751			return -EINVAL;
3752		if (copy_from_user(&val, optval, sizeof(val)))
3753			return -EFAULT;
3754
3755		po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3756		return 0;
3757	}
3758	default:
3759		return -ENOPROTOOPT;
3760	}
3761}
3762
3763static int packet_getsockopt(struct socket *sock, int level, int optname,
3764			     char __user *optval, int __user *optlen)
3765{
3766	int len;
3767	int val, lv = sizeof(val);
3768	struct sock *sk = sock->sk;
3769	struct packet_sock *po = pkt_sk(sk);
3770	void *data = &val;
3771	union tpacket_stats_u st;
3772	struct tpacket_rollover_stats rstats;
 
3773
3774	if (level != SOL_PACKET)
3775		return -ENOPROTOOPT;
3776
3777	if (get_user(len, optlen))
3778		return -EFAULT;
3779
3780	if (len < 0)
3781		return -EINVAL;
3782
3783	switch (optname) {
3784	case PACKET_STATISTICS:
3785		spin_lock_bh(&sk->sk_receive_queue.lock);
3786		memcpy(&st, &po->stats, sizeof(st));
3787		memset(&po->stats, 0, sizeof(po->stats));
3788		spin_unlock_bh(&sk->sk_receive_queue.lock);
 
3789
3790		if (po->tp_version == TPACKET_V3) {
3791			lv = sizeof(struct tpacket_stats_v3);
3792			st.stats3.tp_packets += st.stats3.tp_drops;
 
3793			data = &st.stats3;
3794		} else {
3795			lv = sizeof(struct tpacket_stats);
3796			st.stats1.tp_packets += st.stats1.tp_drops;
 
3797			data = &st.stats1;
3798		}
3799
3800		break;
3801	case PACKET_AUXDATA:
3802		val = po->auxdata;
3803		break;
3804	case PACKET_ORIGDEV:
3805		val = po->origdev;
3806		break;
3807	case PACKET_VNET_HDR:
3808		val = po->has_vnet_hdr;
 
 
 
3809		break;
3810	case PACKET_VERSION:
3811		val = po->tp_version;
3812		break;
3813	case PACKET_HDRLEN:
3814		if (len > sizeof(int))
3815			len = sizeof(int);
 
 
3816		if (copy_from_user(&val, optval, len))
3817			return -EFAULT;
3818		switch (val) {
3819		case TPACKET_V1:
3820			val = sizeof(struct tpacket_hdr);
3821			break;
3822		case TPACKET_V2:
3823			val = sizeof(struct tpacket2_hdr);
3824			break;
3825		case TPACKET_V3:
3826			val = sizeof(struct tpacket3_hdr);
3827			break;
3828		default:
3829			return -EINVAL;
3830		}
3831		break;
3832	case PACKET_RESERVE:
3833		val = po->tp_reserve;
3834		break;
3835	case PACKET_LOSS:
3836		val = po->tp_loss;
3837		break;
3838	case PACKET_TIMESTAMP:
3839		val = po->tp_tstamp;
3840		break;
3841	case PACKET_FANOUT:
3842		val = (po->fanout ?
3843		       ((u32)po->fanout->id |
3844			((u32)po->fanout->type << 16) |
3845			((u32)po->fanout->flags << 24)) :
3846		       0);
3847		break;
 
 
 
3848	case PACKET_ROLLOVER_STATS:
3849		if (!po->rollover)
3850			return -EINVAL;
3851		rstats.tp_all = atomic_long_read(&po->rollover->num);
3852		rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
3853		rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
3854		data = &rstats;
3855		lv = sizeof(rstats);
3856		break;
3857	case PACKET_TX_HAS_OFF:
3858		val = po->tp_tx_has_off;
3859		break;
3860	case PACKET_QDISC_BYPASS:
3861		val = packet_use_direct_xmit(po);
3862		break;
3863	default:
3864		return -ENOPROTOOPT;
3865	}
3866
3867	if (len > lv)
3868		len = lv;
3869	if (put_user(len, optlen))
3870		return -EFAULT;
3871	if (copy_to_user(optval, data, len))
3872		return -EFAULT;
3873	return 0;
3874}
3875
3876
3877#ifdef CONFIG_COMPAT
3878static int compat_packet_setsockopt(struct socket *sock, int level, int optname,
3879				    char __user *optval, unsigned int optlen)
3880{
3881	struct packet_sock *po = pkt_sk(sock->sk);
3882
3883	if (level != SOL_PACKET)
3884		return -ENOPROTOOPT;
3885
3886	if (optname == PACKET_FANOUT_DATA &&
3887	    po->fanout && po->fanout->type == PACKET_FANOUT_CBPF) {
3888		optval = (char __user *)get_compat_bpf_fprog(optval);
3889		if (!optval)
3890			return -EFAULT;
3891		optlen = sizeof(struct sock_fprog);
3892	}
3893
3894	return packet_setsockopt(sock, level, optname, optval, optlen);
3895}
3896#endif
3897
3898static int packet_notifier(struct notifier_block *this,
3899			   unsigned long msg, void *ptr)
3900{
3901	struct sock *sk;
3902	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3903	struct net *net = dev_net(dev);
3904
3905	rcu_read_lock();
3906	sk_for_each_rcu(sk, &net->packet.sklist) {
3907		struct packet_sock *po = pkt_sk(sk);
3908
3909		switch (msg) {
3910		case NETDEV_UNREGISTER:
3911			if (po->mclist)
3912				packet_dev_mclist_delete(dev, &po->mclist);
3913			/* fallthrough */
3914
3915		case NETDEV_DOWN:
3916			if (dev->ifindex == po->ifindex) {
3917				spin_lock(&po->bind_lock);
3918				if (po->running) {
3919					__unregister_prot_hook(sk, false);
3920					sk->sk_err = ENETDOWN;
3921					if (!sock_flag(sk, SOCK_DEAD))
3922						sk->sk_error_report(sk);
3923				}
3924				if (msg == NETDEV_UNREGISTER) {
3925					packet_cached_dev_reset(po);
3926					po->ifindex = -1;
3927					if (po->prot_hook.dev)
3928						dev_put(po->prot_hook.dev);
3929					po->prot_hook.dev = NULL;
3930				}
3931				spin_unlock(&po->bind_lock);
3932			}
3933			break;
3934		case NETDEV_UP:
3935			if (dev->ifindex == po->ifindex) {
3936				spin_lock(&po->bind_lock);
3937				if (po->num)
3938					register_prot_hook(sk);
3939				spin_unlock(&po->bind_lock);
3940			}
3941			break;
3942		}
3943	}
3944	rcu_read_unlock();
3945	return NOTIFY_DONE;
3946}
3947
3948
3949static int packet_ioctl(struct socket *sock, unsigned int cmd,
3950			unsigned long arg)
3951{
3952	struct sock *sk = sock->sk;
3953
3954	switch (cmd) {
3955	case SIOCOUTQ:
3956	{
3957		int amount = sk_wmem_alloc_get(sk);
3958
3959		return put_user(amount, (int __user *)arg);
3960	}
3961	case SIOCINQ:
3962	{
3963		struct sk_buff *skb;
3964		int amount = 0;
3965
3966		spin_lock_bh(&sk->sk_receive_queue.lock);
3967		skb = skb_peek(&sk->sk_receive_queue);
3968		if (skb)
3969			amount = skb->len;
3970		spin_unlock_bh(&sk->sk_receive_queue.lock);
3971		return put_user(amount, (int __user *)arg);
3972	}
3973	case SIOCGSTAMP:
3974		return sock_get_timestamp(sk, (struct timeval __user *)arg);
3975	case SIOCGSTAMPNS:
3976		return sock_get_timestampns(sk, (struct timespec __user *)arg);
3977
3978#ifdef CONFIG_INET
3979	case SIOCADDRT:
3980	case SIOCDELRT:
3981	case SIOCDARP:
3982	case SIOCGARP:
3983	case SIOCSARP:
3984	case SIOCGIFADDR:
3985	case SIOCSIFADDR:
3986	case SIOCGIFBRDADDR:
3987	case SIOCSIFBRDADDR:
3988	case SIOCGIFNETMASK:
3989	case SIOCSIFNETMASK:
3990	case SIOCGIFDSTADDR:
3991	case SIOCSIFDSTADDR:
3992	case SIOCSIFFLAGS:
3993		return inet_dgram_ops.ioctl(sock, cmd, arg);
3994#endif
3995
3996	default:
3997		return -ENOIOCTLCMD;
3998	}
3999	return 0;
4000}
4001
4002static unsigned int packet_poll(struct file *file, struct socket *sock,
4003				poll_table *wait)
4004{
4005	struct sock *sk = sock->sk;
4006	struct packet_sock *po = pkt_sk(sk);
4007	unsigned int mask = datagram_poll(file, sock, wait);
4008
4009	spin_lock_bh(&sk->sk_receive_queue.lock);
4010	if (po->rx_ring.pg_vec) {
4011		if (!packet_previous_rx_frame(po, &po->rx_ring,
4012			TP_STATUS_KERNEL))
4013			mask |= POLLIN | POLLRDNORM;
4014	}
4015	if (po->pressure && __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
4016		po->pressure = 0;
4017	spin_unlock_bh(&sk->sk_receive_queue.lock);
4018	spin_lock_bh(&sk->sk_write_queue.lock);
4019	if (po->tx_ring.pg_vec) {
4020		if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4021			mask |= POLLOUT | POLLWRNORM;
4022	}
4023	spin_unlock_bh(&sk->sk_write_queue.lock);
4024	return mask;
4025}
4026
4027
4028/* Dirty? Well, I still did not learn better way to account
4029 * for user mmaps.
4030 */
4031
4032static void packet_mm_open(struct vm_area_struct *vma)
4033{
4034	struct file *file = vma->vm_file;
4035	struct socket *sock = file->private_data;
4036	struct sock *sk = sock->sk;
4037
4038	if (sk)
4039		atomic_inc(&pkt_sk(sk)->mapped);
4040}
4041
4042static void packet_mm_close(struct vm_area_struct *vma)
4043{
4044	struct file *file = vma->vm_file;
4045	struct socket *sock = file->private_data;
4046	struct sock *sk = sock->sk;
4047
4048	if (sk)
4049		atomic_dec(&pkt_sk(sk)->mapped);
4050}
4051
4052static const struct vm_operations_struct packet_mmap_ops = {
4053	.open	=	packet_mm_open,
4054	.close	=	packet_mm_close,
4055};
4056
4057static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4058			unsigned int len)
4059{
4060	int i;
4061
4062	for (i = 0; i < len; i++) {
4063		if (likely(pg_vec[i].buffer)) {
4064			if (is_vmalloc_addr(pg_vec[i].buffer))
4065				vfree(pg_vec[i].buffer);
4066			else
4067				free_pages((unsigned long)pg_vec[i].buffer,
4068					   order);
4069			pg_vec[i].buffer = NULL;
4070		}
4071	}
4072	kfree(pg_vec);
4073}
4074
4075static char *alloc_one_pg_vec_page(unsigned long order)
4076{
4077	char *buffer;
4078	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4079			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4080
4081	buffer = (char *) __get_free_pages(gfp_flags, order);
4082	if (buffer)
4083		return buffer;
4084
4085	/* __get_free_pages failed, fall back to vmalloc */
4086	buffer = vzalloc((1 << order) * PAGE_SIZE);
4087	if (buffer)
4088		return buffer;
4089
4090	/* vmalloc failed, lets dig into swap here */
4091	gfp_flags &= ~__GFP_NORETRY;
4092	buffer = (char *) __get_free_pages(gfp_flags, order);
4093	if (buffer)
4094		return buffer;
4095
4096	/* complete and utter failure */
4097	return NULL;
4098}
4099
4100static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4101{
4102	unsigned int block_nr = req->tp_block_nr;
4103	struct pgv *pg_vec;
4104	int i;
4105
4106	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
4107	if (unlikely(!pg_vec))
4108		goto out;
4109
4110	for (i = 0; i < block_nr; i++) {
4111		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4112		if (unlikely(!pg_vec[i].buffer))
4113			goto out_free_pgvec;
4114	}
4115
4116out:
4117	return pg_vec;
4118
4119out_free_pgvec:
4120	free_pg_vec(pg_vec, order, block_nr);
4121	pg_vec = NULL;
4122	goto out;
4123}
4124
4125static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4126		int closing, int tx_ring)
4127{
4128	struct pgv *pg_vec = NULL;
4129	struct packet_sock *po = pkt_sk(sk);
 
4130	int was_running, order = 0;
4131	struct packet_ring_buffer *rb;
4132	struct sk_buff_head *rb_queue;
4133	__be16 num;
4134	int err = -EINVAL;
4135	/* Added to avoid minimal code churn */
4136	struct tpacket_req *req = &req_u->req;
4137
4138	lock_sock(sk);
4139	/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
4140	if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
4141		net_warn_ratelimited("Tx-ring is not supported.\n");
4142		goto out;
4143	}
4144
4145	rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4146	rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4147
4148	err = -EBUSY;
4149	if (!closing) {
4150		if (atomic_read(&po->mapped))
4151			goto out;
4152		if (packet_read_pending(rb))
4153			goto out;
4154	}
4155
4156	if (req->tp_block_nr) {
 
 
4157		/* Sanity tests and some calculations */
4158		err = -EBUSY;
4159		if (unlikely(rb->pg_vec))
4160			goto out;
4161
4162		switch (po->tp_version) {
4163		case TPACKET_V1:
4164			po->tp_hdrlen = TPACKET_HDRLEN;
4165			break;
4166		case TPACKET_V2:
4167			po->tp_hdrlen = TPACKET2_HDRLEN;
4168			break;
4169		case TPACKET_V3:
4170			po->tp_hdrlen = TPACKET3_HDRLEN;
4171			break;
4172		}
4173
4174		err = -EINVAL;
4175		if (unlikely((int)req->tp_block_size <= 0))
4176			goto out;
4177		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4178			goto out;
 
4179		if (po->tp_version >= TPACKET_V3 &&
4180		    req->tp_block_size <=
4181			  BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
4182			goto out;
4183		if (unlikely(req->tp_frame_size < po->tp_hdrlen +
4184					po->tp_reserve))
4185			goto out;
4186		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4187			goto out;
4188
4189		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4190		if (unlikely(rb->frames_per_block == 0))
4191			goto out;
 
 
4192		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4193					req->tp_frame_nr))
4194			goto out;
4195
4196		err = -ENOMEM;
4197		order = get_order(req->tp_block_size);
4198		pg_vec = alloc_pg_vec(req, order);
4199		if (unlikely(!pg_vec))
4200			goto out;
4201		switch (po->tp_version) {
4202		case TPACKET_V3:
4203		/* Transmit path is not supported. We checked
4204		 * it above but just being paranoid
4205		 */
4206			if (!tx_ring)
4207				init_prb_bdqc(po, rb, pg_vec, req_u);
 
 
 
 
 
 
 
 
 
 
4208			break;
4209		default:
 
 
 
 
 
 
4210			break;
4211		}
4212	}
4213	/* Done */
4214	else {
4215		err = -EINVAL;
4216		if (unlikely(req->tp_frame_nr))
4217			goto out;
4218	}
4219
4220
4221	/* Detach socket from network */
4222	spin_lock(&po->bind_lock);
4223	was_running = po->running;
4224	num = po->num;
4225	if (was_running) {
4226		po->num = 0;
4227		__unregister_prot_hook(sk, false);
4228	}
4229	spin_unlock(&po->bind_lock);
4230
4231	synchronize_net();
4232
4233	err = -EBUSY;
4234	mutex_lock(&po->pg_vec_lock);
4235	if (closing || atomic_read(&po->mapped) == 0) {
4236		err = 0;
4237		spin_lock_bh(&rb_queue->lock);
4238		swap(rb->pg_vec, pg_vec);
 
 
4239		rb->frame_max = (req->tp_frame_nr - 1);
4240		rb->head = 0;
4241		rb->frame_size = req->tp_frame_size;
4242		spin_unlock_bh(&rb_queue->lock);
4243
4244		swap(rb->pg_vec_order, order);
4245		swap(rb->pg_vec_len, req->tp_block_nr);
4246
4247		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4248		po->prot_hook.func = (po->rx_ring.pg_vec) ?
4249						tpacket_rcv : packet_rcv;
4250		skb_queue_purge(rb_queue);
4251		if (atomic_read(&po->mapped))
4252			pr_err("packet_mmap: vma is busy: %d\n",
4253			       atomic_read(&po->mapped));
4254	}
4255	mutex_unlock(&po->pg_vec_lock);
4256
4257	spin_lock(&po->bind_lock);
4258	if (was_running) {
4259		po->num = num;
4260		register_prot_hook(sk);
4261	}
4262	spin_unlock(&po->bind_lock);
4263	if (closing && (po->tp_version > TPACKET_V2)) {
4264		/* Because we don't support block-based V3 on tx-ring */
4265		if (!tx_ring)
4266			prb_shutdown_retire_blk_timer(po, rb_queue);
4267	}
4268
4269	if (pg_vec)
 
 
4270		free_pg_vec(pg_vec, order, req->tp_block_nr);
 
4271out:
4272	release_sock(sk);
4273	return err;
4274}
4275
4276static int packet_mmap(struct file *file, struct socket *sock,
4277		struct vm_area_struct *vma)
4278{
4279	struct sock *sk = sock->sk;
4280	struct packet_sock *po = pkt_sk(sk);
4281	unsigned long size, expected_size;
4282	struct packet_ring_buffer *rb;
4283	unsigned long start;
4284	int err = -EINVAL;
4285	int i;
4286
4287	if (vma->vm_pgoff)
4288		return -EINVAL;
4289
4290	mutex_lock(&po->pg_vec_lock);
4291
4292	expected_size = 0;
4293	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4294		if (rb->pg_vec) {
4295			expected_size += rb->pg_vec_len
4296						* rb->pg_vec_pages
4297						* PAGE_SIZE;
4298		}
4299	}
4300
4301	if (expected_size == 0)
4302		goto out;
4303
4304	size = vma->vm_end - vma->vm_start;
4305	if (size != expected_size)
4306		goto out;
4307
4308	start = vma->vm_start;
4309	for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4310		if (rb->pg_vec == NULL)
4311			continue;
4312
4313		for (i = 0; i < rb->pg_vec_len; i++) {
4314			struct page *page;
4315			void *kaddr = rb->pg_vec[i].buffer;
4316			int pg_num;
4317
4318			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4319				page = pgv_to_page(kaddr);
4320				err = vm_insert_page(vma, start, page);
4321				if (unlikely(err))
4322					goto out;
4323				start += PAGE_SIZE;
4324				kaddr += PAGE_SIZE;
4325			}
4326		}
4327	}
4328
4329	atomic_inc(&po->mapped);
4330	vma->vm_ops = &packet_mmap_ops;
4331	err = 0;
4332
4333out:
4334	mutex_unlock(&po->pg_vec_lock);
4335	return err;
4336}
4337
4338static const struct proto_ops packet_ops_spkt = {
4339	.family =	PF_PACKET,
4340	.owner =	THIS_MODULE,
4341	.release =	packet_release,
4342	.bind =		packet_bind_spkt,
4343	.connect =	sock_no_connect,
4344	.socketpair =	sock_no_socketpair,
4345	.accept =	sock_no_accept,
4346	.getname =	packet_getname_spkt,
4347	.poll =		datagram_poll,
4348	.ioctl =	packet_ioctl,
 
4349	.listen =	sock_no_listen,
4350	.shutdown =	sock_no_shutdown,
4351	.setsockopt =	sock_no_setsockopt,
4352	.getsockopt =	sock_no_getsockopt,
4353	.sendmsg =	packet_sendmsg_spkt,
4354	.recvmsg =	packet_recvmsg,
4355	.mmap =		sock_no_mmap,
4356	.sendpage =	sock_no_sendpage,
4357};
4358
4359static const struct proto_ops packet_ops = {
4360	.family =	PF_PACKET,
4361	.owner =	THIS_MODULE,
4362	.release =	packet_release,
4363	.bind =		packet_bind,
4364	.connect =	sock_no_connect,
4365	.socketpair =	sock_no_socketpair,
4366	.accept =	sock_no_accept,
4367	.getname =	packet_getname,
4368	.poll =		packet_poll,
4369	.ioctl =	packet_ioctl,
 
4370	.listen =	sock_no_listen,
4371	.shutdown =	sock_no_shutdown,
4372	.setsockopt =	packet_setsockopt,
4373	.getsockopt =	packet_getsockopt,
4374#ifdef CONFIG_COMPAT
4375	.compat_setsockopt = compat_packet_setsockopt,
4376#endif
4377	.sendmsg =	packet_sendmsg,
4378	.recvmsg =	packet_recvmsg,
4379	.mmap =		packet_mmap,
4380	.sendpage =	sock_no_sendpage,
4381};
4382
4383static const struct net_proto_family packet_family_ops = {
4384	.family =	PF_PACKET,
4385	.create =	packet_create,
4386	.owner	=	THIS_MODULE,
4387};
4388
4389static struct notifier_block packet_netdev_notifier = {
4390	.notifier_call =	packet_notifier,
4391};
4392
4393#ifdef CONFIG_PROC_FS
4394
4395static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4396	__acquires(RCU)
4397{
4398	struct net *net = seq_file_net(seq);
4399
4400	rcu_read_lock();
4401	return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4402}
4403
4404static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4405{
4406	struct net *net = seq_file_net(seq);
4407	return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4408}
4409
4410static void packet_seq_stop(struct seq_file *seq, void *v)
4411	__releases(RCU)
4412{
4413	rcu_read_unlock();
4414}
4415
4416static int packet_seq_show(struct seq_file *seq, void *v)
4417{
4418	if (v == SEQ_START_TOKEN)
4419		seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
 
 
4420	else {
4421		struct sock *s = sk_entry(v);
4422		const struct packet_sock *po = pkt_sk(s);
4423
4424		seq_printf(seq,
4425			   "%pK %-6d %-4d %04x   %-5d %1d %-6u %-6u %-6lu\n",
4426			   s,
4427			   atomic_read(&s->sk_refcnt),
4428			   s->sk_type,
4429			   ntohs(po->num),
4430			   po->ifindex,
4431			   po->running,
4432			   atomic_read(&s->sk_rmem_alloc),
4433			   from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4434			   sock_i_ino(s));
4435	}
4436
4437	return 0;
4438}
4439
4440static const struct seq_operations packet_seq_ops = {
4441	.start	= packet_seq_start,
4442	.next	= packet_seq_next,
4443	.stop	= packet_seq_stop,
4444	.show	= packet_seq_show,
4445};
4446
4447static int packet_seq_open(struct inode *inode, struct file *file)
4448{
4449	return seq_open_net(inode, file, &packet_seq_ops,
4450			    sizeof(struct seq_net_private));
4451}
4452
4453static const struct file_operations packet_seq_fops = {
4454	.owner		= THIS_MODULE,
4455	.open		= packet_seq_open,
4456	.read		= seq_read,
4457	.llseek		= seq_lseek,
4458	.release	= seq_release_net,
4459};
4460
4461#endif
4462
4463static int __net_init packet_net_init(struct net *net)
4464{
4465	mutex_init(&net->packet.sklist_lock);
4466	INIT_HLIST_HEAD(&net->packet.sklist);
4467
4468	if (!proc_create("packet", 0, net->proc_net, &packet_seq_fops))
 
 
4469		return -ENOMEM;
 
4470
4471	return 0;
4472}
4473
4474static void __net_exit packet_net_exit(struct net *net)
4475{
4476	remove_proc_entry("packet", net->proc_net);
 
4477}
4478
4479static struct pernet_operations packet_net_ops = {
4480	.init = packet_net_init,
4481	.exit = packet_net_exit,
4482};
4483
4484
4485static void __exit packet_exit(void)
4486{
 
 
4487	unregister_netdevice_notifier(&packet_netdev_notifier);
4488	unregister_pernet_subsys(&packet_net_ops);
4489	sock_unregister(PF_PACKET);
4490	proto_unregister(&packet_proto);
4491}
4492
4493static int __init packet_init(void)
4494{
4495	int rc = proto_register(&packet_proto, 0);
4496
4497	if (rc != 0)
 
4498		goto out;
 
 
 
 
 
 
 
 
 
4499
4500	sock_register(&packet_family_ops);
4501	register_pernet_subsys(&packet_net_ops);
4502	register_netdevice_notifier(&packet_netdev_notifier);
 
 
 
 
 
4503out:
4504	return rc;
4505}
4506
4507module_init(packet_init);
4508module_exit(packet_exit);
 
4509MODULE_LICENSE("GPL");
4510MODULE_ALIAS_NETPROTO(PF_PACKET);