Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  TUN - Universal TUN/TAP device driver.
   4 *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
   5 *
   6 *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
   7 */
   8
   9/*
  10 *  Changes:
  11 *
  12 *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
  13 *    Add TUNSETLINK ioctl to set the link encapsulation
  14 *
  15 *  Mark Smith <markzzzsmith@yahoo.com.au>
  16 *    Use eth_random_addr() for tap MAC address.
  17 *
  18 *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
  19 *    Fixes in packet dropping, queue length setting and queue wakeup.
  20 *    Increased default tx queue length.
  21 *    Added ethtool API.
  22 *    Minor cleanups
  23 *
  24 *  Daniel Podlejski <underley@underley.eu.org>
  25 *    Modifications for 2.3.99-pre5 kernel.
  26 */
  27
  28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29
  30#define DRV_NAME	"tun"
  31#define DRV_VERSION	"1.6"
  32#define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
  33#define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
  34
  35#include <linux/module.h>
  36#include <linux/errno.h>
  37#include <linux/kernel.h>
  38#include <linux/sched/signal.h>
  39#include <linux/major.h>
  40#include <linux/slab.h>
  41#include <linux/poll.h>
  42#include <linux/fcntl.h>
  43#include <linux/init.h>
  44#include <linux/skbuff.h>
  45#include <linux/netdevice.h>
  46#include <linux/etherdevice.h>
  47#include <linux/miscdevice.h>
  48#include <linux/ethtool.h>
  49#include <linux/rtnetlink.h>
  50#include <linux/compat.h>
  51#include <linux/if.h>
  52#include <linux/if_arp.h>
  53#include <linux/if_ether.h>
  54#include <linux/if_tun.h>
  55#include <linux/if_vlan.h>
  56#include <linux/crc32.h>
  57#include <linux/nsproxy.h>
  58#include <linux/virtio_net.h>
  59#include <linux/rcupdate.h>
  60#include <net/net_namespace.h>
  61#include <net/netns/generic.h>
  62#include <net/rtnetlink.h>
  63#include <net/sock.h>
  64#include <net/xdp.h>
  65#include <net/ip_tunnels.h>
  66#include <linux/seq_file.h>
  67#include <linux/uio.h>
  68#include <linux/skb_array.h>
  69#include <linux/bpf.h>
  70#include <linux/bpf_trace.h>
  71#include <linux/mutex.h>
  72#include <linux/ieee802154.h>
  73#include <linux/if_ltalk.h>
  74#include <uapi/linux/if_fddi.h>
  75#include <uapi/linux/if_hippi.h>
  76#include <uapi/linux/if_fc.h>
  77#include <net/ax25.h>
  78#include <net/rose.h>
  79#include <net/6lowpan.h>
  80
  81#include <linux/uaccess.h>
  82#include <linux/proc_fs.h>
  83
  84static void tun_default_link_ksettings(struct net_device *dev,
  85				       struct ethtool_link_ksettings *cmd);
  86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  87#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
  88
  89/* TUN device flags */
  90
  91/* IFF_ATTACH_QUEUE is never stored in device flags,
  92 * overload it to mean fasync when stored there.
  93 */
  94#define TUN_FASYNC	IFF_ATTACH_QUEUE
  95/* High bits in flags field are unused. */
  96#define TUN_VNET_LE     0x80000000
  97#define TUN_VNET_BE     0x40000000
  98
  99#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
 100		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
 101
 102#define GOODCOPY_LEN 128
 103
 104#define FLT_EXACT_COUNT 8
 105struct tap_filter {
 106	unsigned int    count;    /* Number of addrs. Zero means disabled */
 107	u32             mask[2];  /* Mask of the hashed addrs */
 108	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
 109};
 110
 111/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
 112 * to max number of VCPUs in guest. */
 113#define MAX_TAP_QUEUES 256
 114#define MAX_TAP_FLOWS  4096
 115
 116#define TUN_FLOW_EXPIRE (3 * HZ)
 117
 
 
 
 
 
 
 
 
 
 
 
 118/* A tun_file connects an open character device to a tuntap netdevice. It
 119 * also contains all socket related structures (except sock_fprog and tap_filter)
 120 * to serve as one transmit queue for tuntap device. The sock_fprog and
 121 * tap_filter were kept in tun_struct since they were used for filtering for the
 122 * netdevice not for a specific queue (at least I didn't see the requirement for
 123 * this).
 124 *
 125 * RCU usage:
 126 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
 127 * other can only be read while rcu_read_lock or rtnl_lock is held.
 128 */
 129struct tun_file {
 130	struct sock sk;
 131	struct socket socket;
 132	struct tun_struct __rcu *tun;
 133	struct fasync_struct *fasync;
 134	/* only used for fasnyc */
 135	unsigned int flags;
 136	union {
 137		u16 queue_index;
 138		unsigned int ifindex;
 139	};
 140	struct napi_struct napi;
 141	bool napi_enabled;
 142	bool napi_frags_enabled;
 143	struct mutex napi_mutex;	/* Protects access to the above napi */
 144	struct list_head next;
 145	struct tun_struct *detached;
 146	struct ptr_ring tx_ring;
 147	struct xdp_rxq_info xdp_rxq;
 148};
 149
 150struct tun_page {
 151	struct page *page;
 152	int count;
 153};
 154
 155struct tun_flow_entry {
 156	struct hlist_node hash_link;
 157	struct rcu_head rcu;
 158	struct tun_struct *tun;
 159
 160	u32 rxhash;
 161	u32 rps_rxhash;
 162	int queue_index;
 163	unsigned long updated ____cacheline_aligned_in_smp;
 164};
 165
 166#define TUN_NUM_FLOW_ENTRIES 1024
 167#define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
 168
 169struct tun_prog {
 170	struct rcu_head rcu;
 171	struct bpf_prog *prog;
 172};
 173
 174/* Since the socket were moved to tun_file, to preserve the behavior of persist
 175 * device, socket filter, sndbuf and vnet header size were restore when the
 176 * file were attached to a persist device.
 177 */
 178struct tun_struct {
 179	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
 180	unsigned int            numqueues;
 181	unsigned int 		flags;
 182	kuid_t			owner;
 183	kgid_t			group;
 184
 185	struct net_device	*dev;
 186	netdev_features_t	set_features;
 187#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
 188			  NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4)
 189
 190	int			align;
 191	int			vnet_hdr_sz;
 192	int			sndbuf;
 193	struct tap_filter	txflt;
 194	struct sock_fprog	fprog;
 195	/* protected by rtnl lock */
 196	bool			filter_attached;
 197	u32			msg_enable;
 
 
 198	spinlock_t lock;
 199	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
 200	struct timer_list flow_gc_timer;
 201	unsigned long ageing_time;
 202	unsigned int numdisabled;
 203	struct list_head disabled;
 204	void *security;
 205	u32 flow_count;
 206	u32 rx_batched;
 207	atomic_long_t rx_frame_errors;
 208	struct bpf_prog __rcu *xdp_prog;
 209	struct tun_prog __rcu *steering_prog;
 210	struct tun_prog __rcu *filter_prog;
 211	struct ethtool_link_ksettings link_ksettings;
 212	/* init args */
 213	struct file *file;
 214	struct ifreq *ifr;
 215};
 216
 217struct veth {
 218	__be16 h_vlan_proto;
 219	__be16 h_vlan_TCI;
 220};
 221
 222static void tun_flow_init(struct tun_struct *tun);
 223static void tun_flow_uninit(struct tun_struct *tun);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 224
 225static int tun_napi_receive(struct napi_struct *napi, int budget)
 226{
 227	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
 228	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
 229	struct sk_buff_head process_queue;
 230	struct sk_buff *skb;
 231	int received = 0;
 232
 233	__skb_queue_head_init(&process_queue);
 234
 235	spin_lock(&queue->lock);
 236	skb_queue_splice_tail_init(queue, &process_queue);
 237	spin_unlock(&queue->lock);
 238
 239	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
 240		napi_gro_receive(napi, skb);
 241		++received;
 242	}
 243
 244	if (!skb_queue_empty(&process_queue)) {
 245		spin_lock(&queue->lock);
 246		skb_queue_splice(&process_queue, queue);
 247		spin_unlock(&queue->lock);
 248	}
 249
 250	return received;
 251}
 252
 253static int tun_napi_poll(struct napi_struct *napi, int budget)
 254{
 255	unsigned int received;
 256
 257	received = tun_napi_receive(napi, budget);
 258
 259	if (received < budget)
 260		napi_complete_done(napi, received);
 261
 262	return received;
 263}
 264
 265static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
 266			  bool napi_en, bool napi_frags)
 267{
 268	tfile->napi_enabled = napi_en;
 269	tfile->napi_frags_enabled = napi_en && napi_frags;
 270	if (napi_en) {
 271		netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll);
 
 272		napi_enable(&tfile->napi);
 273	}
 274}
 275
 276static void tun_napi_enable(struct tun_file *tfile)
 277{
 278	if (tfile->napi_enabled)
 279		napi_enable(&tfile->napi);
 280}
 281
 282static void tun_napi_disable(struct tun_file *tfile)
 283{
 284	if (tfile->napi_enabled)
 285		napi_disable(&tfile->napi);
 286}
 287
 288static void tun_napi_del(struct tun_file *tfile)
 289{
 290	if (tfile->napi_enabled)
 291		netif_napi_del(&tfile->napi);
 292}
 293
 294static bool tun_napi_frags_enabled(const struct tun_file *tfile)
 295{
 296	return tfile->napi_frags_enabled;
 297}
 298
 299#ifdef CONFIG_TUN_VNET_CROSS_LE
 300static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
 301{
 302	return tun->flags & TUN_VNET_BE ? false :
 303		virtio_legacy_is_little_endian();
 304}
 305
 306static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
 307{
 308	int be = !!(tun->flags & TUN_VNET_BE);
 309
 310	if (put_user(be, argp))
 311		return -EFAULT;
 312
 313	return 0;
 314}
 315
 316static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
 317{
 318	int be;
 319
 320	if (get_user(be, argp))
 321		return -EFAULT;
 322
 323	if (be)
 324		tun->flags |= TUN_VNET_BE;
 325	else
 326		tun->flags &= ~TUN_VNET_BE;
 327
 328	return 0;
 329}
 330#else
 331static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
 332{
 333	return virtio_legacy_is_little_endian();
 334}
 335
 336static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
 337{
 338	return -EINVAL;
 339}
 340
 341static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
 342{
 343	return -EINVAL;
 344}
 345#endif /* CONFIG_TUN_VNET_CROSS_LE */
 346
 347static inline bool tun_is_little_endian(struct tun_struct *tun)
 348{
 349	return tun->flags & TUN_VNET_LE ||
 350		tun_legacy_is_little_endian(tun);
 351}
 352
 353static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
 354{
 355	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
 356}
 357
 358static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
 359{
 360	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
 361}
 362
 363static inline u32 tun_hashfn(u32 rxhash)
 364{
 365	return rxhash & TUN_MASK_FLOW_ENTRIES;
 366}
 367
 368static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
 369{
 370	struct tun_flow_entry *e;
 371
 372	hlist_for_each_entry_rcu(e, head, hash_link) {
 373		if (e->rxhash == rxhash)
 374			return e;
 375	}
 376	return NULL;
 377}
 378
 379static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
 380					      struct hlist_head *head,
 381					      u32 rxhash, u16 queue_index)
 382{
 383	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
 384
 385	if (e) {
 386		netif_info(tun, tx_queued, tun->dev,
 387			   "create flow: hash %u index %u\n",
 388			   rxhash, queue_index);
 389		e->updated = jiffies;
 390		e->rxhash = rxhash;
 391		e->rps_rxhash = 0;
 392		e->queue_index = queue_index;
 393		e->tun = tun;
 394		hlist_add_head_rcu(&e->hash_link, head);
 395		++tun->flow_count;
 396	}
 397	return e;
 398}
 399
 400static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
 401{
 402	netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
 403		   e->rxhash, e->queue_index);
 404	hlist_del_rcu(&e->hash_link);
 405	kfree_rcu(e, rcu);
 406	--tun->flow_count;
 407}
 408
 409static void tun_flow_flush(struct tun_struct *tun)
 410{
 411	int i;
 412
 413	spin_lock_bh(&tun->lock);
 414	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 415		struct tun_flow_entry *e;
 416		struct hlist_node *n;
 417
 418		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
 419			tun_flow_delete(tun, e);
 420	}
 421	spin_unlock_bh(&tun->lock);
 422}
 423
 424static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
 425{
 426	int i;
 427
 428	spin_lock_bh(&tun->lock);
 429	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 430		struct tun_flow_entry *e;
 431		struct hlist_node *n;
 432
 433		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
 434			if (e->queue_index == queue_index)
 435				tun_flow_delete(tun, e);
 436		}
 437	}
 438	spin_unlock_bh(&tun->lock);
 439}
 440
 441static void tun_flow_cleanup(struct timer_list *t)
 442{
 443	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
 444	unsigned long delay = tun->ageing_time;
 445	unsigned long next_timer = jiffies + delay;
 446	unsigned long count = 0;
 447	int i;
 448
 
 
 449	spin_lock(&tun->lock);
 450	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 451		struct tun_flow_entry *e;
 452		struct hlist_node *n;
 453
 454		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
 455			unsigned long this_timer;
 456
 457			this_timer = e->updated + delay;
 458			if (time_before_eq(this_timer, jiffies)) {
 459				tun_flow_delete(tun, e);
 460				continue;
 461			}
 462			count++;
 463			if (time_before(this_timer, next_timer))
 464				next_timer = this_timer;
 465		}
 466	}
 467
 468	if (count)
 469		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
 470	spin_unlock(&tun->lock);
 471}
 472
 473static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
 474			    struct tun_file *tfile)
 475{
 476	struct hlist_head *head;
 477	struct tun_flow_entry *e;
 478	unsigned long delay = tun->ageing_time;
 479	u16 queue_index = tfile->queue_index;
 480
 481	head = &tun->flows[tun_hashfn(rxhash)];
 482
 483	rcu_read_lock();
 484
 485	e = tun_flow_find(head, rxhash);
 486	if (likely(e)) {
 487		/* TODO: keep queueing to old queue until it's empty? */
 488		if (READ_ONCE(e->queue_index) != queue_index)
 489			WRITE_ONCE(e->queue_index, queue_index);
 490		if (e->updated != jiffies)
 491			e->updated = jiffies;
 492		sock_rps_record_flow_hash(e->rps_rxhash);
 493	} else {
 494		spin_lock_bh(&tun->lock);
 495		if (!tun_flow_find(head, rxhash) &&
 496		    tun->flow_count < MAX_TAP_FLOWS)
 497			tun_flow_create(tun, head, rxhash, queue_index);
 498
 499		if (!timer_pending(&tun->flow_gc_timer))
 500			mod_timer(&tun->flow_gc_timer,
 501				  round_jiffies_up(jiffies + delay));
 502		spin_unlock_bh(&tun->lock);
 503	}
 504
 505	rcu_read_unlock();
 506}
 507
 508/* Save the hash received in the stack receive path and update the
 
 509 * flow_hash table accordingly.
 510 */
 511static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
 512{
 513	if (unlikely(e->rps_rxhash != hash))
 514		e->rps_rxhash = hash;
 515}
 516
 517/* We try to identify a flow through its rxhash. The reason that
 518 * we do not check rxq no. is because some cards(e.g 82599), chooses
 519 * the rxq based on the txq where the last packet of the flow comes. As
 520 * the userspace application move between processors, we may get a
 521 * different rxq no. here.
 522 */
 523static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 524{
 525	struct tun_flow_entry *e;
 526	u32 txq = 0;
 527	u32 numqueues = 0;
 528
 529	numqueues = READ_ONCE(tun->numqueues);
 530
 531	txq = __skb_get_hash_symmetric(skb);
 532	e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
 533	if (e) {
 534		tun_flow_save_rps_rxhash(e, txq);
 535		txq = e->queue_index;
 536	} else {
 537		/* use multiply and shift instead of expensive divide */
 538		txq = ((u64)txq * numqueues) >> 32;
 539	}
 540
 541	return txq;
 542}
 543
 544static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 545{
 546	struct tun_prog *prog;
 547	u32 numqueues;
 548	u16 ret = 0;
 549
 550	numqueues = READ_ONCE(tun->numqueues);
 551	if (!numqueues)
 552		return 0;
 553
 554	prog = rcu_dereference(tun->steering_prog);
 555	if (prog)
 556		ret = bpf_prog_run_clear_cb(prog->prog, skb);
 557
 558	return ret % numqueues;
 559}
 560
 561static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
 562			    struct net_device *sb_dev)
 563{
 564	struct tun_struct *tun = netdev_priv(dev);
 565	u16 ret;
 566
 567	rcu_read_lock();
 568	if (rcu_dereference(tun->steering_prog))
 569		ret = tun_ebpf_select_queue(tun, skb);
 570	else
 571		ret = tun_automq_select_queue(tun, skb);
 572	rcu_read_unlock();
 573
 574	return ret;
 575}
 576
 577static inline bool tun_not_capable(struct tun_struct *tun)
 578{
 579	const struct cred *cred = current_cred();
 580	struct net *net = dev_net(tun->dev);
 581
 582	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
 583		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
 584		!ns_capable(net->user_ns, CAP_NET_ADMIN);
 585}
 586
 587static void tun_set_real_num_queues(struct tun_struct *tun)
 588{
 589	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
 590	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
 591}
 592
 593static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
 594{
 595	tfile->detached = tun;
 596	list_add_tail(&tfile->next, &tun->disabled);
 597	++tun->numdisabled;
 598}
 599
 600static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
 601{
 602	struct tun_struct *tun = tfile->detached;
 603
 604	tfile->detached = NULL;
 605	list_del_init(&tfile->next);
 606	--tun->numdisabled;
 607	return tun;
 608}
 609
 610void tun_ptr_free(void *ptr)
 611{
 612	if (!ptr)
 613		return;
 614	if (tun_is_xdp_frame(ptr)) {
 615		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
 616
 617		xdp_return_frame(xdpf);
 618	} else {
 619		__skb_array_destroy_skb(ptr);
 620	}
 621}
 622EXPORT_SYMBOL_GPL(tun_ptr_free);
 623
 624static void tun_queue_purge(struct tun_file *tfile)
 625{
 626	void *ptr;
 627
 628	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
 629		tun_ptr_free(ptr);
 630
 631	skb_queue_purge(&tfile->sk.sk_write_queue);
 632	skb_queue_purge(&tfile->sk.sk_error_queue);
 633}
 634
 635static void __tun_detach(struct tun_file *tfile, bool clean)
 636{
 637	struct tun_file *ntfile;
 638	struct tun_struct *tun;
 639
 640	tun = rtnl_dereference(tfile->tun);
 641
 642	if (tun && clean) {
 643		if (!tfile->detached)
 644			tun_napi_disable(tfile);
 645		tun_napi_del(tfile);
 646	}
 647
 648	if (tun && !tfile->detached) {
 649		u16 index = tfile->queue_index;
 650		BUG_ON(index >= tun->numqueues);
 651
 652		rcu_assign_pointer(tun->tfiles[index],
 653				   tun->tfiles[tun->numqueues - 1]);
 654		ntfile = rtnl_dereference(tun->tfiles[index]);
 655		ntfile->queue_index = index;
 656		ntfile->xdp_rxq.queue_index = index;
 657		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
 658				   NULL);
 659
 660		--tun->numqueues;
 661		if (clean) {
 662			RCU_INIT_POINTER(tfile->tun, NULL);
 663			sock_put(&tfile->sk);
 664		} else {
 665			tun_disable_queue(tun, tfile);
 666			tun_napi_disable(tfile);
 667		}
 668
 669		synchronize_net();
 670		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
 671		/* Drop read queue */
 672		tun_queue_purge(tfile);
 673		tun_set_real_num_queues(tun);
 674	} else if (tfile->detached && clean) {
 675		tun = tun_enable_queue(tfile);
 676		sock_put(&tfile->sk);
 677	}
 678
 679	if (clean) {
 680		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
 681			netif_carrier_off(tun->dev);
 682
 683			if (!(tun->flags & IFF_PERSIST) &&
 684			    tun->dev->reg_state == NETREG_REGISTERED)
 685				unregister_netdevice(tun->dev);
 686		}
 687		if (tun)
 688			xdp_rxq_info_unreg(&tfile->xdp_rxq);
 689		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
 
 690	}
 691}
 692
 693static void tun_detach(struct tun_file *tfile, bool clean)
 694{
 695	struct tun_struct *tun;
 696	struct net_device *dev;
 697
 698	rtnl_lock();
 699	tun = rtnl_dereference(tfile->tun);
 700	dev = tun ? tun->dev : NULL;
 701	__tun_detach(tfile, clean);
 702	if (dev)
 703		netdev_state_change(dev);
 704	rtnl_unlock();
 705
 706	if (clean)
 707		sock_put(&tfile->sk);
 708}
 709
 710static void tun_detach_all(struct net_device *dev)
 711{
 712	struct tun_struct *tun = netdev_priv(dev);
 713	struct tun_file *tfile, *tmp;
 714	int i, n = tun->numqueues;
 715
 716	for (i = 0; i < n; i++) {
 717		tfile = rtnl_dereference(tun->tfiles[i]);
 718		BUG_ON(!tfile);
 719		tun_napi_disable(tfile);
 720		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
 721		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
 722		RCU_INIT_POINTER(tfile->tun, NULL);
 723		--tun->numqueues;
 724	}
 725	list_for_each_entry(tfile, &tun->disabled, next) {
 726		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
 727		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
 728		RCU_INIT_POINTER(tfile->tun, NULL);
 729	}
 730	BUG_ON(tun->numqueues != 0);
 731
 732	synchronize_net();
 733	for (i = 0; i < n; i++) {
 734		tfile = rtnl_dereference(tun->tfiles[i]);
 735		tun_napi_del(tfile);
 736		/* Drop read queue */
 737		tun_queue_purge(tfile);
 738		xdp_rxq_info_unreg(&tfile->xdp_rxq);
 739		sock_put(&tfile->sk);
 740	}
 741	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
 742		tun_napi_del(tfile);
 743		tun_enable_queue(tfile);
 744		tun_queue_purge(tfile);
 745		xdp_rxq_info_unreg(&tfile->xdp_rxq);
 746		sock_put(&tfile->sk);
 747	}
 748	BUG_ON(tun->numdisabled != 0);
 749
 750	if (tun->flags & IFF_PERSIST)
 751		module_put(THIS_MODULE);
 752}
 753
 754static int tun_attach(struct tun_struct *tun, struct file *file,
 755		      bool skip_filter, bool napi, bool napi_frags,
 756		      bool publish_tun)
 757{
 758	struct tun_file *tfile = file->private_data;
 759	struct net_device *dev = tun->dev;
 760	int err;
 761
 762	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
 763	if (err < 0)
 764		goto out;
 765
 766	err = -EINVAL;
 767	if (rtnl_dereference(tfile->tun) && !tfile->detached)
 768		goto out;
 769
 770	err = -EBUSY;
 771	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
 772		goto out;
 773
 774	err = -E2BIG;
 775	if (!tfile->detached &&
 776	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
 777		goto out;
 778
 779	err = 0;
 780
 781	/* Re-attach the filter to persist device */
 782	if (!skip_filter && (tun->filter_attached == true)) {
 783		lock_sock(tfile->socket.sk);
 784		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
 785		release_sock(tfile->socket.sk);
 786		if (!err)
 787			goto out;
 788	}
 789
 790	if (!tfile->detached &&
 791	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
 792			    GFP_KERNEL, tun_ptr_free)) {
 793		err = -ENOMEM;
 794		goto out;
 795	}
 796
 797	tfile->queue_index = tun->numqueues;
 798	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
 799
 800	if (tfile->detached) {
 801		/* Re-attach detached tfile, updating XDP queue_index */
 802		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
 803
 804		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
 805			tfile->xdp_rxq.queue_index = tfile->queue_index;
 806	} else {
 807		/* Setup XDP RX-queue info, for new tfile getting attached */
 808		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
 809				       tun->dev, tfile->queue_index, 0);
 810		if (err < 0)
 811			goto out;
 812		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
 813						 MEM_TYPE_PAGE_SHARED, NULL);
 814		if (err < 0) {
 815			xdp_rxq_info_unreg(&tfile->xdp_rxq);
 816			goto out;
 817		}
 818		err = 0;
 819	}
 820
 821	if (tfile->detached) {
 822		tun_enable_queue(tfile);
 823		tun_napi_enable(tfile);
 824	} else {
 825		sock_hold(&tfile->sk);
 826		tun_napi_init(tun, tfile, napi, napi_frags);
 827	}
 828
 829	if (rtnl_dereference(tun->xdp_prog))
 830		sock_set_flag(&tfile->sk, SOCK_XDP);
 831
 832	/* device is allowed to go away first, so no need to hold extra
 833	 * refcnt.
 834	 */
 835
 836	/* Publish tfile->tun and tun->tfiles only after we've fully
 837	 * initialized tfile; otherwise we risk using half-initialized
 838	 * object.
 839	 */
 840	if (publish_tun)
 841		rcu_assign_pointer(tfile->tun, tun);
 842	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
 843	tun->numqueues++;
 844	tun_set_real_num_queues(tun);
 845out:
 846	return err;
 847}
 848
 849static struct tun_struct *tun_get(struct tun_file *tfile)
 850{
 851	struct tun_struct *tun;
 852
 853	rcu_read_lock();
 854	tun = rcu_dereference(tfile->tun);
 855	if (tun)
 856		dev_hold(tun->dev);
 857	rcu_read_unlock();
 858
 859	return tun;
 860}
 861
 862static void tun_put(struct tun_struct *tun)
 863{
 864	dev_put(tun->dev);
 865}
 866
 867/* TAP filtering */
 868static void addr_hash_set(u32 *mask, const u8 *addr)
 869{
 870	int n = ether_crc(ETH_ALEN, addr) >> 26;
 871	mask[n >> 5] |= (1 << (n & 31));
 872}
 873
 874static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
 875{
 876	int n = ether_crc(ETH_ALEN, addr) >> 26;
 877	return mask[n >> 5] & (1 << (n & 31));
 878}
 879
 880static int update_filter(struct tap_filter *filter, void __user *arg)
 881{
 882	struct { u8 u[ETH_ALEN]; } *addr;
 883	struct tun_filter uf;
 884	int err, alen, n, nexact;
 885
 886	if (copy_from_user(&uf, arg, sizeof(uf)))
 887		return -EFAULT;
 888
 889	if (!uf.count) {
 890		/* Disabled */
 891		filter->count = 0;
 892		return 0;
 893	}
 894
 895	alen = ETH_ALEN * uf.count;
 896	addr = memdup_user(arg + sizeof(uf), alen);
 897	if (IS_ERR(addr))
 898		return PTR_ERR(addr);
 899
 900	/* The filter is updated without holding any locks. Which is
 901	 * perfectly safe. We disable it first and in the worst
 902	 * case we'll accept a few undesired packets. */
 903	filter->count = 0;
 904	wmb();
 905
 906	/* Use first set of addresses as an exact filter */
 907	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
 908		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
 909
 910	nexact = n;
 911
 912	/* Remaining multicast addresses are hashed,
 913	 * unicast will leave the filter disabled. */
 914	memset(filter->mask, 0, sizeof(filter->mask));
 915	for (; n < uf.count; n++) {
 916		if (!is_multicast_ether_addr(addr[n].u)) {
 917			err = 0; /* no filter */
 918			goto free_addr;
 919		}
 920		addr_hash_set(filter->mask, addr[n].u);
 921	}
 922
 923	/* For ALLMULTI just set the mask to all ones.
 924	 * This overrides the mask populated above. */
 925	if ((uf.flags & TUN_FLT_ALLMULTI))
 926		memset(filter->mask, ~0, sizeof(filter->mask));
 927
 928	/* Now enable the filter */
 929	wmb();
 930	filter->count = nexact;
 931
 932	/* Return the number of exact filters */
 933	err = nexact;
 934free_addr:
 935	kfree(addr);
 936	return err;
 937}
 938
 939/* Returns: 0 - drop, !=0 - accept */
 940static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
 941{
 942	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
 943	 * at this point. */
 944	struct ethhdr *eh = (struct ethhdr *) skb->data;
 945	int i;
 946
 947	/* Exact match */
 948	for (i = 0; i < filter->count; i++)
 949		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
 950			return 1;
 951
 952	/* Inexact match (multicast only) */
 953	if (is_multicast_ether_addr(eh->h_dest))
 954		return addr_hash_test(filter->mask, eh->h_dest);
 955
 956	return 0;
 957}
 958
 959/*
 960 * Checks whether the packet is accepted or not.
 961 * Returns: 0 - drop, !=0 - accept
 962 */
 963static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
 964{
 965	if (!filter->count)
 966		return 1;
 967
 968	return run_filter(filter, skb);
 969}
 970
 971/* Network device part of the driver */
 972
 973static const struct ethtool_ops tun_ethtool_ops;
 974
 975static int tun_net_init(struct net_device *dev)
 976{
 977	struct tun_struct *tun = netdev_priv(dev);
 978	struct ifreq *ifr = tun->ifr;
 979	int err;
 980
 981	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
 982	if (!dev->tstats)
 983		return -ENOMEM;
 984
 985	spin_lock_init(&tun->lock);
 986
 987	err = security_tun_dev_alloc_security(&tun->security);
 988	if (err < 0) {
 989		free_percpu(dev->tstats);
 990		return err;
 991	}
 992
 993	tun_flow_init(tun);
 994
 995	dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
 996			   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
 997			   NETIF_F_HW_VLAN_STAG_TX;
 998	dev->features = dev->hw_features | NETIF_F_LLTX;
 999	dev->vlan_features = dev->features &
1000			     ~(NETIF_F_HW_VLAN_CTAG_TX |
1001			       NETIF_F_HW_VLAN_STAG_TX);
1002
1003	tun->flags = (tun->flags & ~TUN_FEATURES) |
1004		      (ifr->ifr_flags & TUN_FEATURES);
1005
1006	INIT_LIST_HEAD(&tun->disabled);
1007	err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
1008			 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
1009	if (err < 0) {
1010		tun_flow_uninit(tun);
1011		security_tun_dev_free_security(tun->security);
1012		free_percpu(dev->tstats);
1013		return err;
1014	}
1015	return 0;
1016}
1017
1018/* Net device detach from fd. */
1019static void tun_net_uninit(struct net_device *dev)
1020{
1021	tun_detach_all(dev);
1022}
1023
1024/* Net device open. */
1025static int tun_net_open(struct net_device *dev)
1026{
1027	netif_tx_start_all_queues(dev);
1028
1029	return 0;
1030}
1031
1032/* Net device close. */
1033static int tun_net_close(struct net_device *dev)
1034{
1035	netif_tx_stop_all_queues(dev);
1036	return 0;
1037}
1038
1039/* Net device start xmit */
1040static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1041{
1042#ifdef CONFIG_RPS
1043	if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1044		/* Select queue was not called for the skbuff, so we extract the
1045		 * RPS hash and save it into the flow_table here.
1046		 */
1047		struct tun_flow_entry *e;
1048		__u32 rxhash;
1049
1050		rxhash = __skb_get_hash_symmetric(skb);
1051		e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1052		if (e)
1053			tun_flow_save_rps_rxhash(e, rxhash);
1054	}
1055#endif
1056}
1057
1058static unsigned int run_ebpf_filter(struct tun_struct *tun,
1059				    struct sk_buff *skb,
1060				    int len)
1061{
1062	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1063
1064	if (prog)
1065		len = bpf_prog_run_clear_cb(prog->prog, skb);
1066
1067	return len;
1068}
1069
1070/* Net device start xmit */
1071static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1072{
1073	struct tun_struct *tun = netdev_priv(dev);
1074	enum skb_drop_reason drop_reason;
1075	int txq = skb->queue_mapping;
1076	struct netdev_queue *queue;
1077	struct tun_file *tfile;
1078	int len = skb->len;
1079
1080	rcu_read_lock();
1081	tfile = rcu_dereference(tun->tfiles[txq]);
1082
1083	/* Drop packet if interface is not attached */
1084	if (!tfile) {
1085		drop_reason = SKB_DROP_REASON_DEV_READY;
1086		goto drop;
1087	}
1088
1089	if (!rcu_dereference(tun->steering_prog))
1090		tun_automq_xmit(tun, skb);
1091
1092	netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
 
 
1093
1094	/* Drop if the filter does not like it.
1095	 * This is a noop if the filter is disabled.
1096	 * Filter can be enabled only for the TAP devices. */
1097	if (!check_filter(&tun->txflt, skb)) {
1098		drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
1099		goto drop;
1100	}
1101
1102	if (tfile->socket.sk->sk_filter &&
1103	    sk_filter(tfile->socket.sk, skb)) {
1104		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1105		goto drop;
1106	}
1107
1108	len = run_ebpf_filter(tun, skb, len);
1109	if (len == 0) {
1110		drop_reason = SKB_DROP_REASON_TAP_FILTER;
1111		goto drop;
1112	}
1113
1114	if (pskb_trim(skb, len)) {
1115		drop_reason = SKB_DROP_REASON_NOMEM;
1116		goto drop;
1117	}
1118
1119	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
1120		drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1121		goto drop;
1122	}
1123
1124	skb_tx_timestamp(skb);
1125
1126	/* Orphan the skb - required as we might hang on to it
1127	 * for indefinite time.
1128	 */
1129	skb_orphan(skb);
1130
1131	nf_reset_ct(skb);
1132
1133	if (ptr_ring_produce(&tfile->tx_ring, skb)) {
1134		drop_reason = SKB_DROP_REASON_FULL_RING;
1135		goto drop;
1136	}
1137
1138	/* NETIF_F_LLTX requires to do our own update of trans_start */
1139	queue = netdev_get_tx_queue(dev, txq);
1140	txq_trans_cond_update(queue);
1141
1142	/* Notify and wake up reader process */
1143	if (tfile->flags & TUN_FASYNC)
1144		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1145	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1146
1147	rcu_read_unlock();
1148	return NETDEV_TX_OK;
1149
1150drop:
1151	dev_core_stats_tx_dropped_inc(dev);
1152	skb_tx_error(skb);
1153	kfree_skb_reason(skb, drop_reason);
1154	rcu_read_unlock();
1155	return NET_XMIT_DROP;
1156}
1157
1158static void tun_net_mclist(struct net_device *dev)
1159{
1160	/*
1161	 * This callback is supposed to deal with mc filter in
1162	 * _rx_ path and has nothing to do with the _tx_ path.
1163	 * In rx path we always accept everything userspace gives us.
1164	 */
1165}
1166
1167static netdev_features_t tun_net_fix_features(struct net_device *dev,
1168	netdev_features_t features)
1169{
1170	struct tun_struct *tun = netdev_priv(dev);
1171
1172	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1173}
1174
1175static void tun_set_headroom(struct net_device *dev, int new_hr)
1176{
1177	struct tun_struct *tun = netdev_priv(dev);
1178
1179	if (new_hr < NET_SKB_PAD)
1180		new_hr = NET_SKB_PAD;
1181
1182	tun->align = new_hr;
1183}
1184
1185static void
1186tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1187{
 
1188	struct tun_struct *tun = netdev_priv(dev);
 
 
1189
1190	dev_get_tstats64(dev, stats);
1191
1192	stats->rx_frame_errors +=
1193		(unsigned long)atomic_long_read(&tun->rx_frame_errors);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1194}
1195
1196static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1197		       struct netlink_ext_ack *extack)
1198{
1199	struct tun_struct *tun = netdev_priv(dev);
1200	struct tun_file *tfile;
1201	struct bpf_prog *old_prog;
1202	int i;
1203
1204	old_prog = rtnl_dereference(tun->xdp_prog);
1205	rcu_assign_pointer(tun->xdp_prog, prog);
1206	if (old_prog)
1207		bpf_prog_put(old_prog);
1208
1209	for (i = 0; i < tun->numqueues; i++) {
1210		tfile = rtnl_dereference(tun->tfiles[i]);
1211		if (prog)
1212			sock_set_flag(&tfile->sk, SOCK_XDP);
1213		else
1214			sock_reset_flag(&tfile->sk, SOCK_XDP);
1215	}
1216	list_for_each_entry(tfile, &tun->disabled, next) {
1217		if (prog)
1218			sock_set_flag(&tfile->sk, SOCK_XDP);
1219		else
1220			sock_reset_flag(&tfile->sk, SOCK_XDP);
1221	}
1222
1223	return 0;
1224}
1225
 
 
 
 
 
 
 
 
 
 
 
 
1226static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1227{
1228	switch (xdp->command) {
1229	case XDP_SETUP_PROG:
1230		return tun_xdp_set(dev, xdp->prog, xdp->extack);
 
 
 
1231	default:
1232		return -EINVAL;
1233	}
1234}
1235
1236static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1237{
1238	if (new_carrier) {
1239		struct tun_struct *tun = netdev_priv(dev);
1240
1241		if (!tun->numqueues)
1242			return -EPERM;
1243
1244		netif_carrier_on(dev);
1245	} else {
1246		netif_carrier_off(dev);
1247	}
1248	return 0;
1249}
1250
1251static const struct net_device_ops tun_netdev_ops = {
1252	.ndo_init		= tun_net_init,
1253	.ndo_uninit		= tun_net_uninit,
1254	.ndo_open		= tun_net_open,
1255	.ndo_stop		= tun_net_close,
1256	.ndo_start_xmit		= tun_net_xmit,
1257	.ndo_fix_features	= tun_net_fix_features,
1258	.ndo_select_queue	= tun_select_queue,
1259	.ndo_set_rx_headroom	= tun_set_headroom,
1260	.ndo_get_stats64	= tun_net_get_stats64,
1261	.ndo_change_carrier	= tun_net_change_carrier,
1262};
1263
1264static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1265{
1266	/* Notify and wake up reader process */
1267	if (tfile->flags & TUN_FASYNC)
1268		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1269	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1270}
1271
1272static int tun_xdp_xmit(struct net_device *dev, int n,
1273			struct xdp_frame **frames, u32 flags)
1274{
1275	struct tun_struct *tun = netdev_priv(dev);
1276	struct tun_file *tfile;
1277	u32 numqueues;
1278	int nxmit = 0;
 
1279	int i;
1280
1281	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1282		return -EINVAL;
1283
1284	rcu_read_lock();
1285
1286resample:
1287	numqueues = READ_ONCE(tun->numqueues);
1288	if (!numqueues) {
1289		rcu_read_unlock();
1290		return -ENXIO; /* Caller will free/return all frames */
1291	}
1292
1293	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1294					    numqueues]);
1295	if (unlikely(!tfile))
1296		goto resample;
1297
1298	spin_lock(&tfile->tx_ring.producer_lock);
1299	for (i = 0; i < n; i++) {
1300		struct xdp_frame *xdp = frames[i];
1301		/* Encode the XDP flag into lowest bit for consumer to differ
1302		 * XDP buffer from sk_buff.
1303		 */
1304		void *frame = tun_xdp_to_ptr(xdp);
1305
1306		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1307			dev_core_stats_tx_dropped_inc(dev);
1308			break;
 
1309		}
1310		nxmit++;
1311	}
1312	spin_unlock(&tfile->tx_ring.producer_lock);
1313
1314	if (flags & XDP_XMIT_FLUSH)
1315		__tun_xdp_flush_tfile(tfile);
1316
1317	rcu_read_unlock();
1318	return nxmit;
1319}
1320
1321static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1322{
1323	struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
1324	int nxmit;
1325
1326	if (unlikely(!frame))
1327		return -EOVERFLOW;
1328
1329	nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1330	if (!nxmit)
1331		xdp_return_frame_rx_napi(frame);
1332	return nxmit;
1333}
1334
1335static const struct net_device_ops tap_netdev_ops = {
1336	.ndo_init		= tun_net_init,
1337	.ndo_uninit		= tun_net_uninit,
1338	.ndo_open		= tun_net_open,
1339	.ndo_stop		= tun_net_close,
1340	.ndo_start_xmit		= tun_net_xmit,
1341	.ndo_fix_features	= tun_net_fix_features,
1342	.ndo_set_rx_mode	= tun_net_mclist,
1343	.ndo_set_mac_address	= eth_mac_addr,
1344	.ndo_validate_addr	= eth_validate_addr,
1345	.ndo_select_queue	= tun_select_queue,
1346	.ndo_features_check	= passthru_features_check,
1347	.ndo_set_rx_headroom	= tun_set_headroom,
1348	.ndo_get_stats64	= dev_get_tstats64,
1349	.ndo_bpf		= tun_xdp,
1350	.ndo_xdp_xmit		= tun_xdp_xmit,
1351	.ndo_change_carrier	= tun_net_change_carrier,
1352};
1353
1354static void tun_flow_init(struct tun_struct *tun)
1355{
1356	int i;
1357
1358	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1359		INIT_HLIST_HEAD(&tun->flows[i]);
1360
1361	tun->ageing_time = TUN_FLOW_EXPIRE;
1362	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1363	mod_timer(&tun->flow_gc_timer,
1364		  round_jiffies_up(jiffies + tun->ageing_time));
1365}
1366
1367static void tun_flow_uninit(struct tun_struct *tun)
1368{
1369	del_timer_sync(&tun->flow_gc_timer);
1370	tun_flow_flush(tun);
1371}
1372
1373#define MIN_MTU 68
1374#define MAX_MTU 65535
1375
1376/* Initialize net device. */
1377static void tun_net_initialize(struct net_device *dev)
1378{
1379	struct tun_struct *tun = netdev_priv(dev);
1380
1381	switch (tun->flags & TUN_TYPE_MASK) {
1382	case IFF_TUN:
1383		dev->netdev_ops = &tun_netdev_ops;
1384		dev->header_ops = &ip_tunnel_header_ops;
1385
1386		/* Point-to-Point TUN Device */
1387		dev->hard_header_len = 0;
1388		dev->addr_len = 0;
1389		dev->mtu = 1500;
1390
1391		/* Zero header length */
1392		dev->type = ARPHRD_NONE;
1393		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1394		break;
1395
1396	case IFF_TAP:
1397		dev->netdev_ops = &tap_netdev_ops;
1398		/* Ethernet TAP Device */
1399		ether_setup(dev);
1400		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1401		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1402
1403		eth_hw_addr_random(dev);
1404
1405		/* Currently tun does not support XDP, only tap does. */
1406		dev->xdp_features = NETDEV_XDP_ACT_BASIC |
1407				    NETDEV_XDP_ACT_REDIRECT |
1408				    NETDEV_XDP_ACT_NDO_XMIT;
1409
1410		break;
1411	}
1412
1413	dev->min_mtu = MIN_MTU;
1414	dev->max_mtu = MAX_MTU - dev->hard_header_len;
1415}
1416
1417static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1418{
1419	struct sock *sk = tfile->socket.sk;
1420
1421	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1422}
1423
1424/* Character device part */
1425
1426/* Poll */
1427static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1428{
1429	struct tun_file *tfile = file->private_data;
1430	struct tun_struct *tun = tun_get(tfile);
1431	struct sock *sk;
1432	__poll_t mask = 0;
1433
1434	if (!tun)
1435		return EPOLLERR;
1436
1437	sk = tfile->socket.sk;
1438
 
 
1439	poll_wait(file, sk_sleep(sk), wait);
1440
1441	if (!ptr_ring_empty(&tfile->tx_ring))
1442		mask |= EPOLLIN | EPOLLRDNORM;
1443
1444	/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1445	 * guarantee EPOLLOUT to be raised by either here or
1446	 * tun_sock_write_space(). Then process could get notification
1447	 * after it writes to a down device and meets -EIO.
1448	 */
1449	if (tun_sock_writeable(tun, tfile) ||
1450	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1451	     tun_sock_writeable(tun, tfile)))
1452		mask |= EPOLLOUT | EPOLLWRNORM;
1453
1454	if (tun->dev->reg_state != NETREG_REGISTERED)
1455		mask = EPOLLERR;
1456
1457	tun_put(tun);
1458	return mask;
1459}
1460
1461static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1462					    size_t len,
1463					    const struct iov_iter *it)
1464{
1465	struct sk_buff *skb;
1466	size_t linear;
1467	int err;
1468	int i;
1469
1470	if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
1471	    len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
1472		return ERR_PTR(-EMSGSIZE);
1473
1474	local_bh_disable();
1475	skb = napi_get_frags(&tfile->napi);
1476	local_bh_enable();
1477	if (!skb)
1478		return ERR_PTR(-ENOMEM);
1479
1480	linear = iov_iter_single_seg_count(it);
1481	err = __skb_grow(skb, linear);
1482	if (err)
1483		goto free;
1484
1485	skb->len = len;
1486	skb->data_len = len - linear;
1487	skb->truesize += skb->data_len;
1488
1489	for (i = 1; i < it->nr_segs; i++) {
1490		const struct iovec *iov = iter_iov(it);
1491		size_t fragsz = iov->iov_len;
1492		struct page *page;
1493		void *frag;
1494
1495		if (fragsz == 0 || fragsz > PAGE_SIZE) {
1496			err = -EINVAL;
1497			goto free;
1498		}
1499		frag = netdev_alloc_frag(fragsz);
1500		if (!frag) {
1501			err = -ENOMEM;
1502			goto free;
1503		}
1504		page = virt_to_head_page(frag);
1505		skb_fill_page_desc(skb, i - 1, page,
1506				   frag - page_address(page), fragsz);
1507	}
1508
1509	return skb;
1510free:
1511	/* frees skb and all frags allocated with napi_alloc_frag() */
1512	napi_free_frags(&tfile->napi);
1513	return ERR_PTR(err);
1514}
1515
1516/* prepad is the amount to reserve at front.  len is length after that.
1517 * linear is a hint as to how much to copy (usually headers). */
1518static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1519				     size_t prepad, size_t len,
1520				     size_t linear, int noblock)
1521{
1522	struct sock *sk = tfile->socket.sk;
1523	struct sk_buff *skb;
1524	int err;
1525
1526	/* Under a page?  Don't bother with paged skb. */
1527	if (prepad + len < PAGE_SIZE)
1528		linear = len;
1529
1530	if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
1531		linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
1532	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1533				   &err, PAGE_ALLOC_COSTLY_ORDER);
1534	if (!skb)
1535		return ERR_PTR(err);
1536
1537	skb_reserve(skb, prepad);
1538	skb_put(skb, linear);
1539	skb->data_len = len - linear;
1540	skb->len += len - linear;
1541
1542	return skb;
1543}
1544
1545static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1546			   struct sk_buff *skb, int more)
1547{
1548	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1549	struct sk_buff_head process_queue;
1550	u32 rx_batched = tun->rx_batched;
1551	bool rcv = false;
1552
1553	if (!rx_batched || (!more && skb_queue_empty(queue))) {
1554		local_bh_disable();
1555		skb_record_rx_queue(skb, tfile->queue_index);
1556		netif_receive_skb(skb);
1557		local_bh_enable();
1558		return;
1559	}
1560
1561	spin_lock(&queue->lock);
1562	if (!more || skb_queue_len(queue) == rx_batched) {
1563		__skb_queue_head_init(&process_queue);
1564		skb_queue_splice_tail_init(queue, &process_queue);
1565		rcv = true;
1566	} else {
1567		__skb_queue_tail(queue, skb);
1568	}
1569	spin_unlock(&queue->lock);
1570
1571	if (rcv) {
1572		struct sk_buff *nskb;
1573
1574		local_bh_disable();
1575		while ((nskb = __skb_dequeue(&process_queue))) {
1576			skb_record_rx_queue(nskb, tfile->queue_index);
1577			netif_receive_skb(nskb);
1578		}
1579		skb_record_rx_queue(skb, tfile->queue_index);
1580		netif_receive_skb(skb);
1581		local_bh_enable();
1582	}
1583}
1584
1585static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1586			      int len, int noblock, bool zerocopy)
1587{
1588	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1589		return false;
1590
1591	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1592		return false;
1593
1594	if (!noblock)
1595		return false;
1596
1597	if (zerocopy)
1598		return false;
1599
1600	if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) +
1601	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1602		return false;
1603
1604	return true;
1605}
1606
1607static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1608				       struct page_frag *alloc_frag, char *buf,
1609				       int buflen, int len, int pad)
1610{
1611	struct sk_buff *skb = build_skb(buf, buflen);
1612
1613	if (!skb)
1614		return ERR_PTR(-ENOMEM);
1615
1616	skb_reserve(skb, pad);
1617	skb_put(skb, len);
1618	skb_set_owner_w(skb, tfile->socket.sk);
1619
1620	get_page(alloc_frag->page);
1621	alloc_frag->offset += buflen;
1622
1623	return skb;
1624}
1625
1626static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1627		       struct xdp_buff *xdp, u32 act)
1628{
1629	int err;
1630
1631	switch (act) {
1632	case XDP_REDIRECT:
1633		err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1634		if (err) {
1635			dev_core_stats_rx_dropped_inc(tun->dev);
1636			return err;
1637		}
1638		dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1639		break;
1640	case XDP_TX:
1641		err = tun_xdp_tx(tun->dev, xdp);
1642		if (err < 0) {
1643			dev_core_stats_rx_dropped_inc(tun->dev);
1644			return err;
1645		}
1646		dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1647		break;
1648	case XDP_PASS:
1649		break;
1650	default:
1651		bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act);
1652		fallthrough;
1653	case XDP_ABORTED:
1654		trace_xdp_exception(tun->dev, xdp_prog, act);
1655		fallthrough;
1656	case XDP_DROP:
1657		dev_core_stats_rx_dropped_inc(tun->dev);
1658		break;
1659	}
1660
1661	return act;
1662}
1663
1664static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1665				     struct tun_file *tfile,
1666				     struct iov_iter *from,
1667				     struct virtio_net_hdr *hdr,
1668				     int len, int *skb_xdp)
1669{
1670	struct page_frag *alloc_frag = &current->task_frag;
1671	struct bpf_prog *xdp_prog;
1672	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1673	char *buf;
1674	size_t copied;
1675	int pad = TUN_RX_PAD;
1676	int err = 0;
1677
1678	rcu_read_lock();
1679	xdp_prog = rcu_dereference(tun->xdp_prog);
1680	if (xdp_prog)
1681		pad += XDP_PACKET_HEADROOM;
1682	buflen += SKB_DATA_ALIGN(len + pad);
1683	rcu_read_unlock();
1684
1685	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1686	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1687		return ERR_PTR(-ENOMEM);
1688
1689	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1690	copied = copy_page_from_iter(alloc_frag->page,
1691				     alloc_frag->offset + pad,
1692				     len, from);
1693	if (copied != len)
1694		return ERR_PTR(-EFAULT);
1695
1696	/* There's a small window that XDP may be set after the check
1697	 * of xdp_prog above, this should be rare and for simplicity
1698	 * we do XDP on skb in case the headroom is not enough.
1699	 */
1700	if (hdr->gso_type || !xdp_prog) {
1701		*skb_xdp = 1;
1702		return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1703				       pad);
1704	}
1705
1706	*skb_xdp = 0;
1707
1708	local_bh_disable();
1709	rcu_read_lock();
1710	xdp_prog = rcu_dereference(tun->xdp_prog);
1711	if (xdp_prog) {
1712		struct xdp_buff xdp;
1713		u32 act;
1714
1715		xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
1716		xdp_prepare_buff(&xdp, buf, pad, len, false);
 
 
 
1717
1718		act = bpf_prog_run_xdp(xdp_prog, &xdp);
1719		if (act == XDP_REDIRECT || act == XDP_TX) {
1720			get_page(alloc_frag->page);
1721			alloc_frag->offset += buflen;
1722		}
1723		err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1724		if (err < 0) {
1725			if (act == XDP_REDIRECT || act == XDP_TX)
1726				put_page(alloc_frag->page);
1727			goto out;
1728		}
1729
1730		if (err == XDP_REDIRECT)
1731			xdp_do_flush();
1732		if (err != XDP_PASS)
1733			goto out;
1734
1735		pad = xdp.data - xdp.data_hard_start;
1736		len = xdp.data_end - xdp.data;
1737	}
1738	rcu_read_unlock();
1739	local_bh_enable();
1740
1741	return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1742
 
 
1743out:
1744	rcu_read_unlock();
1745	local_bh_enable();
1746	return NULL;
1747}
1748
1749/* Get packet from user space buffer */
1750static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1751			    void *msg_control, struct iov_iter *from,
1752			    int noblock, bool more)
1753{
1754	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1755	struct sk_buff *skb;
1756	size_t total_len = iov_iter_count(from);
1757	size_t len = total_len, align = tun->align, linear;
1758	struct virtio_net_hdr gso = { 0 };
 
1759	int good_linear;
1760	int copylen;
1761	bool zerocopy = false;
1762	int err;
1763	u32 rxhash = 0;
1764	int skb_xdp = 1;
1765	bool frags = tun_napi_frags_enabled(tfile);
1766	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1767
1768	if (!(tun->flags & IFF_NO_PI)) {
1769		if (len < sizeof(pi))
1770			return -EINVAL;
1771		len -= sizeof(pi);
1772
1773		if (!copy_from_iter_full(&pi, sizeof(pi), from))
1774			return -EFAULT;
1775	}
1776
1777	if (tun->flags & IFF_VNET_HDR) {
1778		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1779
1780		if (len < vnet_hdr_sz)
1781			return -EINVAL;
1782		len -= vnet_hdr_sz;
1783
1784		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1785			return -EFAULT;
1786
1787		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1788		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1789			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1790
1791		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1792			return -EINVAL;
1793		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1794	}
1795
1796	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1797		align += NET_IP_ALIGN;
1798		if (unlikely(len < ETH_HLEN ||
1799			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1800			return -EINVAL;
1801	}
1802
1803	good_linear = SKB_MAX_HEAD(align);
1804
1805	if (msg_control) {
1806		struct iov_iter i = *from;
1807
1808		/* There are 256 bytes to be copied in skb, so there is
1809		 * enough room for skb expand head in case it is used.
1810		 * The rest of the buffer is mapped from userspace.
1811		 */
1812		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1813		if (copylen > good_linear)
1814			copylen = good_linear;
1815		linear = copylen;
1816		iov_iter_advance(&i, copylen);
1817		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1818			zerocopy = true;
1819	}
1820
1821	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1822		/* For the packet that is not easy to be processed
1823		 * (e.g gso or jumbo packet), we will do it at after
1824		 * skb was created with generic XDP routine.
1825		 */
1826		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1827		err = PTR_ERR_OR_ZERO(skb);
1828		if (err)
1829			goto drop;
 
1830		if (!skb)
1831			return total_len;
1832	} else {
1833		if (!zerocopy) {
1834			copylen = len;
1835			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1836				linear = good_linear;
1837			else
1838				linear = tun16_to_cpu(tun, gso.hdr_len);
1839		}
1840
1841		if (frags) {
1842			mutex_lock(&tfile->napi_mutex);
1843			skb = tun_napi_alloc_frags(tfile, copylen, from);
1844			/* tun_napi_alloc_frags() enforces a layout for the skb.
1845			 * If zerocopy is enabled, then this layout will be
1846			 * overwritten by zerocopy_sg_from_iter().
1847			 */
1848			zerocopy = false;
1849		} else {
1850			if (!linear)
1851				linear = min_t(size_t, good_linear, copylen);
1852
1853			skb = tun_alloc_skb(tfile, align, copylen, linear,
1854					    noblock);
1855		}
1856
1857		err = PTR_ERR_OR_ZERO(skb);
1858		if (err)
1859			goto drop;
 
 
 
 
1860
1861		if (zerocopy)
1862			err = zerocopy_sg_from_iter(skb, from);
1863		else
1864			err = skb_copy_datagram_from_iter(skb, 0, from, len);
1865
1866		if (err) {
1867			err = -EFAULT;
1868			drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1869			goto drop;
 
 
 
 
 
 
 
1870		}
1871	}
1872
1873	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1874		atomic_long_inc(&tun->rx_frame_errors);
1875		err = -EINVAL;
1876		goto free_skb;
 
 
 
 
 
1877	}
1878
1879	switch (tun->flags & TUN_TYPE_MASK) {
1880	case IFF_TUN:
1881		if (tun->flags & IFF_NO_PI) {
1882			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1883
1884			switch (ip_version) {
1885			case 4:
1886				pi.proto = htons(ETH_P_IP);
1887				break;
1888			case 6:
1889				pi.proto = htons(ETH_P_IPV6);
1890				break;
1891			default:
1892				err = -EINVAL;
1893				goto drop;
 
1894			}
1895		}
1896
1897		skb_reset_mac_header(skb);
1898		skb->protocol = pi.proto;
1899		skb->dev = tun->dev;
1900		break;
1901	case IFF_TAP:
1902		if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
1903			err = -ENOMEM;
1904			drop_reason = SKB_DROP_REASON_HDR_TRUNC;
1905			goto drop;
1906		}
1907		skb->protocol = eth_type_trans(skb, tun->dev);
1908		break;
1909	}
1910
1911	/* copy skb_ubuf_info for callback when skb has no error */
1912	if (zerocopy) {
1913		skb_zcopy_init(skb, msg_control);
 
 
1914	} else if (msg_control) {
1915		struct ubuf_info *uarg = msg_control;
1916		uarg->callback(NULL, uarg, false);
1917	}
1918
1919	skb_reset_network_header(skb);
1920	skb_probe_transport_header(skb);
1921	skb_record_rx_queue(skb, tfile->queue_index);
1922
1923	if (skb_xdp) {
1924		struct bpf_prog *xdp_prog;
1925		int ret;
1926
1927		local_bh_disable();
1928		rcu_read_lock();
1929		xdp_prog = rcu_dereference(tun->xdp_prog);
1930		if (xdp_prog) {
1931			ret = do_xdp_generic(xdp_prog, skb);
1932			if (ret != XDP_PASS) {
1933				rcu_read_unlock();
1934				local_bh_enable();
1935				goto unlock_frags;
1936			}
1937		}
1938		rcu_read_unlock();
1939		local_bh_enable();
1940	}
1941
1942	/* Compute the costly rx hash only if needed for flow updates.
1943	 * We may get a very small possibility of OOO during switching, not
1944	 * worth to optimize.
1945	 */
1946	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1947	    !tfile->detached)
1948		rxhash = __skb_get_hash_symmetric(skb);
1949
1950	rcu_read_lock();
1951	if (unlikely(!(tun->dev->flags & IFF_UP))) {
1952		err = -EIO;
1953		rcu_read_unlock();
1954		drop_reason = SKB_DROP_REASON_DEV_READY;
1955		goto drop;
1956	}
1957
1958	if (frags) {
1959		u32 headlen;
1960
1961		/* Exercise flow dissector code path. */
1962		skb_push(skb, ETH_HLEN);
1963		headlen = eth_get_headlen(tun->dev, skb->data,
1964					  skb_headlen(skb));
1965
1966		if (unlikely(headlen > skb_headlen(skb))) {
1967			WARN_ON_ONCE(1);
1968			err = -ENOMEM;
1969			dev_core_stats_rx_dropped_inc(tun->dev);
1970napi_busy:
1971			napi_free_frags(&tfile->napi);
1972			rcu_read_unlock();
1973			mutex_unlock(&tfile->napi_mutex);
1974			return err;
 
1975		}
1976
1977		if (likely(napi_schedule_prep(&tfile->napi))) {
1978			local_bh_disable();
1979			napi_gro_frags(&tfile->napi);
1980			napi_complete(&tfile->napi);
1981			local_bh_enable();
1982		} else {
1983			err = -EBUSY;
1984			goto napi_busy;
1985		}
1986		mutex_unlock(&tfile->napi_mutex);
1987	} else if (tfile->napi_enabled) {
1988		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1989		int queue_len;
1990
1991		spin_lock_bh(&queue->lock);
1992
1993		if (unlikely(tfile->detached)) {
1994			spin_unlock_bh(&queue->lock);
1995			rcu_read_unlock();
1996			err = -EBUSY;
1997			goto free_skb;
1998		}
1999
2000		__skb_queue_tail(queue, skb);
2001		queue_len = skb_queue_len(queue);
2002		spin_unlock(&queue->lock);
2003
2004		if (!more || queue_len > NAPI_POLL_WEIGHT)
2005			napi_schedule(&tfile->napi);
2006
2007		local_bh_enable();
2008	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
2009		tun_rx_batched(tun, tfile, skb, more);
2010	} else {
2011		netif_rx(skb);
2012	}
2013	rcu_read_unlock();
2014
2015	preempt_disable();
2016	dev_sw_netstats_rx_add(tun->dev, len);
2017	preempt_enable();
 
 
 
2018
2019	if (rxhash)
2020		tun_flow_update(tun, rxhash, tfile);
2021
2022	return total_len;
2023
2024drop:
2025	if (err != -EAGAIN)
2026		dev_core_stats_rx_dropped_inc(tun->dev);
2027
2028free_skb:
2029	if (!IS_ERR_OR_NULL(skb))
2030		kfree_skb_reason(skb, drop_reason);
2031
2032unlock_frags:
2033	if (frags) {
2034		tfile->napi.skb = NULL;
2035		mutex_unlock(&tfile->napi_mutex);
2036	}
2037
2038	return err ?: total_len;
2039}
2040
2041static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
2042{
2043	struct file *file = iocb->ki_filp;
2044	struct tun_file *tfile = file->private_data;
2045	struct tun_struct *tun = tun_get(tfile);
2046	ssize_t result;
2047	int noblock = 0;
2048
2049	if (!tun)
2050		return -EBADFD;
2051
2052	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2053		noblock = 1;
2054
2055	result = tun_get_user(tun, tfile, NULL, from, noblock, false);
2056
2057	tun_put(tun);
2058	return result;
2059}
2060
2061static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2062				struct tun_file *tfile,
2063				struct xdp_frame *xdp_frame,
2064				struct iov_iter *iter)
2065{
2066	int vnet_hdr_sz = 0;
2067	size_t size = xdp_frame->len;
 
2068	size_t ret;
2069
2070	if (tun->flags & IFF_VNET_HDR) {
2071		struct virtio_net_hdr gso = { 0 };
2072
2073		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2074		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2075			return -EINVAL;
2076		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2077			     sizeof(gso)))
2078			return -EFAULT;
2079		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2080	}
2081
2082	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2083
2084	preempt_disable();
2085	dev_sw_netstats_tx_add(tun->dev, 1, ret);
2086	preempt_enable();
 
 
 
2087
2088	return ret;
2089}
2090
2091/* Put packet to the user space buffer */
2092static ssize_t tun_put_user(struct tun_struct *tun,
2093			    struct tun_file *tfile,
2094			    struct sk_buff *skb,
2095			    struct iov_iter *iter)
2096{
2097	struct tun_pi pi = { 0, skb->protocol };
 
2098	ssize_t total;
2099	int vlan_offset = 0;
2100	int vlan_hlen = 0;
2101	int vnet_hdr_sz = 0;
2102
2103	if (skb_vlan_tag_present(skb))
2104		vlan_hlen = VLAN_HLEN;
2105
2106	if (tun->flags & IFF_VNET_HDR)
2107		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2108
2109	total = skb->len + vlan_hlen + vnet_hdr_sz;
2110
2111	if (!(tun->flags & IFF_NO_PI)) {
2112		if (iov_iter_count(iter) < sizeof(pi))
2113			return -EINVAL;
2114
2115		total += sizeof(pi);
2116		if (iov_iter_count(iter) < total) {
2117			/* Packet will be striped */
2118			pi.flags |= TUN_PKT_STRIP;
2119		}
2120
2121		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
2122			return -EFAULT;
2123	}
2124
2125	if (vnet_hdr_sz) {
2126		struct virtio_net_hdr gso;
2127
2128		if (iov_iter_count(iter) < vnet_hdr_sz)
2129			return -EINVAL;
2130
2131		if (virtio_net_hdr_from_skb(skb, &gso,
2132					    tun_is_little_endian(tun), true,
2133					    vlan_hlen)) {
2134			struct skb_shared_info *sinfo = skb_shinfo(skb);
2135			pr_err("unexpected GSO type: "
2136			       "0x%x, gso_size %d, hdr_len %d\n",
2137			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2138			       tun16_to_cpu(tun, gso.hdr_len));
2139			print_hex_dump(KERN_ERR, "tun: ",
2140				       DUMP_PREFIX_NONE,
2141				       16, 1, skb->head,
2142				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2143			WARN_ON_ONCE(1);
2144			return -EINVAL;
2145		}
2146
2147		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2148			return -EFAULT;
2149
2150		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2151	}
2152
2153	if (vlan_hlen) {
2154		int ret;
2155		struct veth veth;
2156
2157		veth.h_vlan_proto = skb->vlan_proto;
2158		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2159
2160		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2161
2162		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2163		if (ret || !iov_iter_count(iter))
2164			goto done;
2165
2166		ret = copy_to_iter(&veth, sizeof(veth), iter);
2167		if (ret != sizeof(veth) || !iov_iter_count(iter))
2168			goto done;
2169	}
2170
2171	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2172
2173done:
2174	/* caller is in process context, */
2175	preempt_disable();
2176	dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
2177	preempt_enable();
 
 
 
2178
2179	return total;
2180}
2181
2182static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2183{
2184	DECLARE_WAITQUEUE(wait, current);
2185	void *ptr = NULL;
2186	int error = 0;
2187
2188	ptr = ptr_ring_consume(&tfile->tx_ring);
2189	if (ptr)
2190		goto out;
2191	if (noblock) {
2192		error = -EAGAIN;
2193		goto out;
2194	}
2195
2196	add_wait_queue(&tfile->socket.wq.wait, &wait);
2197
2198	while (1) {
2199		set_current_state(TASK_INTERRUPTIBLE);
2200		ptr = ptr_ring_consume(&tfile->tx_ring);
2201		if (ptr)
2202			break;
2203		if (signal_pending(current)) {
2204			error = -ERESTARTSYS;
2205			break;
2206		}
2207		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2208			error = -EFAULT;
2209			break;
2210		}
2211
2212		schedule();
2213	}
2214
2215	__set_current_state(TASK_RUNNING);
2216	remove_wait_queue(&tfile->socket.wq.wait, &wait);
2217
2218out:
2219	*err = error;
2220	return ptr;
2221}
2222
2223static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2224			   struct iov_iter *to,
2225			   int noblock, void *ptr)
2226{
2227	ssize_t ret;
2228	int err;
2229
 
 
2230	if (!iov_iter_count(to)) {
2231		tun_ptr_free(ptr);
2232		return 0;
2233	}
2234
2235	if (!ptr) {
2236		/* Read frames from ring */
2237		ptr = tun_ring_recv(tfile, noblock, &err);
2238		if (!ptr)
2239			return err;
2240	}
2241
2242	if (tun_is_xdp_frame(ptr)) {
2243		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2244
2245		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2246		xdp_return_frame(xdpf);
2247	} else {
2248		struct sk_buff *skb = ptr;
2249
2250		ret = tun_put_user(tun, tfile, skb, to);
2251		if (unlikely(ret < 0))
2252			kfree_skb(skb);
2253		else
2254			consume_skb(skb);
2255	}
2256
2257	return ret;
2258}
2259
2260static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2261{
2262	struct file *file = iocb->ki_filp;
2263	struct tun_file *tfile = file->private_data;
2264	struct tun_struct *tun = tun_get(tfile);
2265	ssize_t len = iov_iter_count(to), ret;
2266	int noblock = 0;
2267
2268	if (!tun)
2269		return -EBADFD;
2270
2271	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2272		noblock = 1;
2273
2274	ret = tun_do_read(tun, tfile, to, noblock, NULL);
2275	ret = min_t(ssize_t, ret, len);
2276	if (ret > 0)
2277		iocb->ki_pos = ret;
2278	tun_put(tun);
2279	return ret;
2280}
2281
2282static void tun_prog_free(struct rcu_head *rcu)
2283{
2284	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2285
2286	bpf_prog_destroy(prog->prog);
2287	kfree(prog);
2288}
2289
2290static int __tun_set_ebpf(struct tun_struct *tun,
2291			  struct tun_prog __rcu **prog_p,
2292			  struct bpf_prog *prog)
2293{
2294	struct tun_prog *old, *new = NULL;
2295
2296	if (prog) {
2297		new = kmalloc(sizeof(*new), GFP_KERNEL);
2298		if (!new)
2299			return -ENOMEM;
2300		new->prog = prog;
2301	}
2302
2303	spin_lock_bh(&tun->lock);
2304	old = rcu_dereference_protected(*prog_p,
2305					lockdep_is_held(&tun->lock));
2306	rcu_assign_pointer(*prog_p, new);
2307	spin_unlock_bh(&tun->lock);
2308
2309	if (old)
2310		call_rcu(&old->rcu, tun_prog_free);
2311
2312	return 0;
2313}
2314
2315static void tun_free_netdev(struct net_device *dev)
2316{
2317	struct tun_struct *tun = netdev_priv(dev);
2318
2319	BUG_ON(!(list_empty(&tun->disabled)));
2320
2321	free_percpu(dev->tstats);
2322	tun_flow_uninit(tun);
2323	security_tun_dev_free_security(tun->security);
2324	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2325	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
2326}
2327
2328static void tun_setup(struct net_device *dev)
2329{
2330	struct tun_struct *tun = netdev_priv(dev);
2331
2332	tun->owner = INVALID_UID;
2333	tun->group = INVALID_GID;
2334	tun_default_link_ksettings(dev, &tun->link_ksettings);
2335
2336	dev->ethtool_ops = &tun_ethtool_ops;
2337	dev->needs_free_netdev = true;
2338	dev->priv_destructor = tun_free_netdev;
2339	/* We prefer our own queue length */
2340	dev->tx_queue_len = TUN_READQ_SIZE;
2341}
2342
2343/* Trivial set of netlink ops to allow deleting tun or tap
2344 * device with netlink.
2345 */
2346static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2347			struct netlink_ext_ack *extack)
2348{
2349	NL_SET_ERR_MSG(extack,
2350		       "tun/tap creation via rtnetlink is not supported.");
2351	return -EOPNOTSUPP;
2352}
2353
2354static size_t tun_get_size(const struct net_device *dev)
2355{
2356	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2357	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2358
2359	return nla_total_size(sizeof(uid_t)) + /* OWNER */
2360	       nla_total_size(sizeof(gid_t)) + /* GROUP */
2361	       nla_total_size(sizeof(u8)) + /* TYPE */
2362	       nla_total_size(sizeof(u8)) + /* PI */
2363	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
2364	       nla_total_size(sizeof(u8)) + /* PERSIST */
2365	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
2366	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
2367	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
2368	       0;
2369}
2370
2371static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2372{
2373	struct tun_struct *tun = netdev_priv(dev);
2374
2375	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2376		goto nla_put_failure;
2377	if (uid_valid(tun->owner) &&
2378	    nla_put_u32(skb, IFLA_TUN_OWNER,
2379			from_kuid_munged(current_user_ns(), tun->owner)))
2380		goto nla_put_failure;
2381	if (gid_valid(tun->group) &&
2382	    nla_put_u32(skb, IFLA_TUN_GROUP,
2383			from_kgid_munged(current_user_ns(), tun->group)))
2384		goto nla_put_failure;
2385	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2386		goto nla_put_failure;
2387	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2388		goto nla_put_failure;
2389	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2390		goto nla_put_failure;
2391	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2392		       !!(tun->flags & IFF_MULTI_QUEUE)))
2393		goto nla_put_failure;
2394	if (tun->flags & IFF_MULTI_QUEUE) {
2395		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2396			goto nla_put_failure;
2397		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2398				tun->numdisabled))
2399			goto nla_put_failure;
2400	}
2401
2402	return 0;
2403
2404nla_put_failure:
2405	return -EMSGSIZE;
2406}
2407
2408static struct rtnl_link_ops tun_link_ops __read_mostly = {
2409	.kind		= DRV_NAME,
2410	.priv_size	= sizeof(struct tun_struct),
2411	.setup		= tun_setup,
2412	.validate	= tun_validate,
2413	.get_size       = tun_get_size,
2414	.fill_info      = tun_fill_info,
2415};
2416
2417static void tun_sock_write_space(struct sock *sk)
2418{
2419	struct tun_file *tfile;
2420	wait_queue_head_t *wqueue;
2421
2422	if (!sock_writeable(sk))
2423		return;
2424
2425	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2426		return;
2427
2428	wqueue = sk_sleep(sk);
2429	if (wqueue && waitqueue_active(wqueue))
2430		wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2431						EPOLLWRNORM | EPOLLWRBAND);
2432
2433	tfile = container_of(sk, struct tun_file, sk);
2434	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2435}
2436
2437static void tun_put_page(struct tun_page *tpage)
2438{
2439	if (tpage->page)
2440		__page_frag_cache_drain(tpage->page, tpage->count);
2441}
2442
2443static int tun_xdp_one(struct tun_struct *tun,
2444		       struct tun_file *tfile,
2445		       struct xdp_buff *xdp, int *flush,
2446		       struct tun_page *tpage)
2447{
2448	unsigned int datasize = xdp->data_end - xdp->data;
2449	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2450	struct virtio_net_hdr *gso = &hdr->gso;
 
2451	struct bpf_prog *xdp_prog;
2452	struct sk_buff *skb = NULL;
2453	struct sk_buff_head *queue;
2454	u32 rxhash = 0, act;
2455	int buflen = hdr->buflen;
2456	int ret = 0;
2457	bool skb_xdp = false;
2458	struct page *page;
2459
2460	xdp_prog = rcu_dereference(tun->xdp_prog);
2461	if (xdp_prog) {
2462		if (gso->gso_type) {
2463			skb_xdp = true;
2464			goto build;
2465		}
2466
2467		xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
2468		xdp_set_data_meta_invalid(xdp);
 
2469
2470		act = bpf_prog_run_xdp(xdp_prog, xdp);
2471		ret = tun_xdp_act(tun, xdp_prog, xdp, act);
2472		if (ret < 0) {
2473			put_page(virt_to_head_page(xdp->data));
2474			return ret;
2475		}
2476
2477		switch (ret) {
2478		case XDP_REDIRECT:
2479			*flush = true;
2480			fallthrough;
2481		case XDP_TX:
2482			return 0;
2483		case XDP_PASS:
2484			break;
2485		default:
2486			page = virt_to_head_page(xdp->data);
2487			if (tpage->page == page) {
2488				++tpage->count;
2489			} else {
2490				tun_put_page(tpage);
2491				tpage->page = page;
2492				tpage->count = 1;
2493			}
2494			return 0;
2495		}
2496	}
2497
2498build:
2499	skb = build_skb(xdp->data_hard_start, buflen);
2500	if (!skb) {
2501		ret = -ENOMEM;
2502		goto out;
2503	}
2504
2505	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2506	skb_put(skb, xdp->data_end - xdp->data);
2507
2508	if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2509		atomic_long_inc(&tun->rx_frame_errors);
2510		kfree_skb(skb);
2511		ret = -EINVAL;
2512		goto out;
2513	}
2514
2515	skb->protocol = eth_type_trans(skb, tun->dev);
2516	skb_reset_network_header(skb);
2517	skb_probe_transport_header(skb);
2518	skb_record_rx_queue(skb, tfile->queue_index);
2519
2520	if (skb_xdp) {
2521		ret = do_xdp_generic(xdp_prog, skb);
2522		if (ret != XDP_PASS) {
2523			ret = 0;
2524			goto out;
2525		}
2526	}
2527
2528	if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2529	    !tfile->detached)
2530		rxhash = __skb_get_hash_symmetric(skb);
2531
2532	if (tfile->napi_enabled) {
2533		queue = &tfile->sk.sk_write_queue;
2534		spin_lock(&queue->lock);
2535
2536		if (unlikely(tfile->detached)) {
2537			spin_unlock(&queue->lock);
2538			kfree_skb(skb);
2539			return -EBUSY;
2540		}
2541
2542		__skb_queue_tail(queue, skb);
2543		spin_unlock(&queue->lock);
2544		ret = 1;
2545	} else {
2546		netif_receive_skb(skb);
2547		ret = 0;
2548	}
2549
2550	/* No need to disable preemption here since this function is
2551	 * always called with bh disabled
2552	 */
2553	dev_sw_netstats_rx_add(tun->dev, datasize);
 
 
 
 
2554
2555	if (rxhash)
2556		tun_flow_update(tun, rxhash, tfile);
2557
2558out:
2559	return ret;
2560}
2561
2562static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2563{
2564	int ret, i;
2565	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2566	struct tun_struct *tun = tun_get(tfile);
2567	struct tun_msg_ctl *ctl = m->msg_control;
2568	struct xdp_buff *xdp;
2569
2570	if (!tun)
2571		return -EBADFD;
2572
2573	if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
2574	    ctl && ctl->type == TUN_MSG_PTR) {
2575		struct tun_page tpage;
2576		int n = ctl->num;
2577		int flush = 0, queued = 0;
2578
2579		memset(&tpage, 0, sizeof(tpage));
2580
2581		local_bh_disable();
2582		rcu_read_lock();
2583
2584		for (i = 0; i < n; i++) {
2585			xdp = &((struct xdp_buff *)ctl->ptr)[i];
2586			ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2587			if (ret > 0)
2588				queued += ret;
2589		}
2590
2591		if (flush)
2592			xdp_do_flush();
2593
2594		if (tfile->napi_enabled && queued > 0)
2595			napi_schedule(&tfile->napi);
2596
2597		rcu_read_unlock();
2598		local_bh_enable();
2599
2600		tun_put_page(&tpage);
2601
2602		ret = total_len;
2603		goto out;
2604	}
2605
2606	ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2607			   m->msg_flags & MSG_DONTWAIT,
2608			   m->msg_flags & MSG_MORE);
2609out:
2610	tun_put(tun);
2611	return ret;
2612}
2613
2614static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2615		       int flags)
2616{
2617	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2618	struct tun_struct *tun = tun_get(tfile);
2619	void *ptr = m->msg_control;
2620	int ret;
2621
2622	if (!tun) {
2623		ret = -EBADFD;
2624		goto out_free;
2625	}
2626
2627	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2628		ret = -EINVAL;
2629		goto out_put_tun;
2630	}
2631	if (flags & MSG_ERRQUEUE) {
2632		ret = sock_recv_errqueue(sock->sk, m, total_len,
2633					 SOL_PACKET, TUN_TX_TIMESTAMP);
2634		goto out;
2635	}
2636	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2637	if (ret > (ssize_t)total_len) {
2638		m->msg_flags |= MSG_TRUNC;
2639		ret = flags & MSG_TRUNC ? ret : total_len;
2640	}
2641out:
2642	tun_put(tun);
2643	return ret;
2644
2645out_put_tun:
2646	tun_put(tun);
2647out_free:
2648	tun_ptr_free(ptr);
2649	return ret;
2650}
2651
2652static int tun_ptr_peek_len(void *ptr)
2653{
2654	if (likely(ptr)) {
2655		if (tun_is_xdp_frame(ptr)) {
2656			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2657
2658			return xdpf->len;
2659		}
2660		return __skb_array_len_with_tag(ptr);
2661	} else {
2662		return 0;
2663	}
2664}
2665
2666static int tun_peek_len(struct socket *sock)
2667{
2668	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2669	struct tun_struct *tun;
2670	int ret = 0;
2671
2672	tun = tun_get(tfile);
2673	if (!tun)
2674		return 0;
2675
2676	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2677	tun_put(tun);
2678
2679	return ret;
2680}
2681
2682/* Ops structure to mimic raw sockets with tun */
2683static const struct proto_ops tun_socket_ops = {
2684	.peek_len = tun_peek_len,
2685	.sendmsg = tun_sendmsg,
2686	.recvmsg = tun_recvmsg,
2687};
2688
2689static struct proto tun_proto = {
2690	.name		= "tun",
2691	.owner		= THIS_MODULE,
2692	.obj_size	= sizeof(struct tun_file),
2693};
2694
2695static int tun_flags(struct tun_struct *tun)
2696{
2697	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2698}
2699
2700static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
2701			      char *buf)
2702{
2703	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2704	return sysfs_emit(buf, "0x%x\n", tun_flags(tun));
2705}
2706
2707static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
2708			  char *buf)
2709{
2710	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2711	return uid_valid(tun->owner)?
2712		sysfs_emit(buf, "%u\n",
2713			   from_kuid_munged(current_user_ns(), tun->owner)) :
2714		sysfs_emit(buf, "-1\n");
2715}
2716
2717static ssize_t group_show(struct device *dev, struct device_attribute *attr,
2718			  char *buf)
2719{
2720	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2721	return gid_valid(tun->group) ?
2722		sysfs_emit(buf, "%u\n",
2723			   from_kgid_munged(current_user_ns(), tun->group)) :
2724		sysfs_emit(buf, "-1\n");
2725}
2726
2727static DEVICE_ATTR_RO(tun_flags);
2728static DEVICE_ATTR_RO(owner);
2729static DEVICE_ATTR_RO(group);
2730
2731static struct attribute *tun_dev_attrs[] = {
2732	&dev_attr_tun_flags.attr,
2733	&dev_attr_owner.attr,
2734	&dev_attr_group.attr,
2735	NULL
2736};
2737
2738static const struct attribute_group tun_attr_group = {
2739	.attrs = tun_dev_attrs
2740};
2741
2742static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2743{
2744	struct tun_struct *tun;
2745	struct tun_file *tfile = file->private_data;
2746	struct net_device *dev;
2747	int err;
2748
2749	if (tfile->detached)
2750		return -EINVAL;
2751
2752	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2753		if (!capable(CAP_NET_ADMIN))
2754			return -EPERM;
2755
2756		if (!(ifr->ifr_flags & IFF_NAPI) ||
2757		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2758			return -EINVAL;
2759	}
2760
2761	dev = __dev_get_by_name(net, ifr->ifr_name);
2762	if (dev) {
2763		if (ifr->ifr_flags & IFF_TUN_EXCL)
2764			return -EBUSY;
2765		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2766			tun = netdev_priv(dev);
2767		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2768			tun = netdev_priv(dev);
2769		else
2770			return -EINVAL;
2771
2772		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2773		    !!(tun->flags & IFF_MULTI_QUEUE))
2774			return -EINVAL;
2775
2776		if (tun_not_capable(tun))
2777			return -EPERM;
2778		err = security_tun_dev_open(tun->security);
2779		if (err < 0)
2780			return err;
2781
2782		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2783				 ifr->ifr_flags & IFF_NAPI,
2784				 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2785		if (err < 0)
2786			return err;
2787
2788		if (tun->flags & IFF_MULTI_QUEUE &&
2789		    (tun->numqueues + tun->numdisabled > 1)) {
2790			/* One or more queue has already been attached, no need
2791			 * to initialize the device again.
2792			 */
2793			netdev_state_change(dev);
2794			return 0;
2795		}
2796
2797		tun->flags = (tun->flags & ~TUN_FEATURES) |
2798			      (ifr->ifr_flags & TUN_FEATURES);
2799
2800		netdev_state_change(dev);
2801	} else {
2802		char *name;
2803		unsigned long flags = 0;
2804		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2805			     MAX_TAP_QUEUES : 1;
2806
2807		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2808			return -EPERM;
2809		err = security_tun_dev_create();
2810		if (err < 0)
2811			return err;
2812
2813		/* Set dev type */
2814		if (ifr->ifr_flags & IFF_TUN) {
2815			/* TUN device */
2816			flags |= IFF_TUN;
2817			name = "tun%d";
2818		} else if (ifr->ifr_flags & IFF_TAP) {
2819			/* TAP device */
2820			flags |= IFF_TAP;
2821			name = "tap%d";
2822		} else
2823			return -EINVAL;
2824
2825		if (*ifr->ifr_name)
2826			name = ifr->ifr_name;
2827
2828		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2829				       NET_NAME_UNKNOWN, tun_setup, queues,
2830				       queues);
2831
2832		if (!dev)
2833			return -ENOMEM;
 
 
 
2834
2835		dev_net_set(dev, net);
2836		dev->rtnl_link_ops = &tun_link_ops;
2837		dev->ifindex = tfile->ifindex;
2838		dev->sysfs_groups[0] = &tun_attr_group;
2839
2840		tun = netdev_priv(dev);
2841		tun->dev = dev;
2842		tun->flags = flags;
2843		tun->txflt.count = 0;
2844		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2845
2846		tun->align = NET_SKB_PAD;
2847		tun->filter_attached = false;
2848		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2849		tun->rx_batched = 0;
2850		RCU_INIT_POINTER(tun->steering_prog, NULL);
2851
2852		tun->ifr = ifr;
2853		tun->file = file;
 
 
 
 
 
 
 
 
 
2854
2855		tun_net_initialize(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2856
2857		err = register_netdevice(tun->dev);
2858		if (err < 0) {
2859			free_netdev(dev);
2860			return err;
2861		}
2862		/* free_netdev() won't check refcnt, to avoid race
2863		 * with dev_put() we need publish tun after registration.
2864		 */
2865		rcu_assign_pointer(tfile->tun, tun);
2866	}
2867
2868	if (ifr->ifr_flags & IFF_NO_CARRIER)
2869		netif_carrier_off(tun->dev);
2870	else
2871		netif_carrier_on(tun->dev);
2872
2873	/* Make sure persistent devices do not get stuck in
2874	 * xoff state.
2875	 */
2876	if (netif_running(tun->dev))
2877		netif_tx_wake_all_queues(tun->dev);
2878
2879	strcpy(ifr->ifr_name, tun->dev->name);
2880	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2881}
2882
2883static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2884{
 
 
2885	strcpy(ifr->ifr_name, tun->dev->name);
2886
2887	ifr->ifr_flags = tun_flags(tun);
2888
2889}
2890
2891/* This is like a cut-down ethtool ops, except done via tun fd so no
2892 * privs required. */
2893static int set_offload(struct tun_struct *tun, unsigned long arg)
2894{
2895	netdev_features_t features = 0;
2896
2897	if (arg & TUN_F_CSUM) {
2898		features |= NETIF_F_HW_CSUM;
2899		arg &= ~TUN_F_CSUM;
2900
2901		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2902			if (arg & TUN_F_TSO_ECN) {
2903				features |= NETIF_F_TSO_ECN;
2904				arg &= ~TUN_F_TSO_ECN;
2905			}
2906			if (arg & TUN_F_TSO4)
2907				features |= NETIF_F_TSO;
2908			if (arg & TUN_F_TSO6)
2909				features |= NETIF_F_TSO6;
2910			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2911		}
2912
2913		arg &= ~TUN_F_UFO;
2914
2915		/* TODO: for now USO4 and USO6 should work simultaneously */
2916		if (arg & TUN_F_USO4 && arg & TUN_F_USO6) {
2917			features |= NETIF_F_GSO_UDP_L4;
2918			arg &= ~(TUN_F_USO4 | TUN_F_USO6);
2919		}
2920	}
2921
2922	/* This gives the user a way to test for new features in future by
2923	 * trying to set them. */
2924	if (arg)
2925		return -EINVAL;
2926
2927	tun->set_features = features;
2928	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2929	tun->dev->wanted_features |= features;
2930	netdev_update_features(tun->dev);
2931
2932	return 0;
2933}
2934
2935static void tun_detach_filter(struct tun_struct *tun, int n)
2936{
2937	int i;
2938	struct tun_file *tfile;
2939
2940	for (i = 0; i < n; i++) {
2941		tfile = rtnl_dereference(tun->tfiles[i]);
2942		lock_sock(tfile->socket.sk);
2943		sk_detach_filter(tfile->socket.sk);
2944		release_sock(tfile->socket.sk);
2945	}
2946
2947	tun->filter_attached = false;
2948}
2949
2950static int tun_attach_filter(struct tun_struct *tun)
2951{
2952	int i, ret = 0;
2953	struct tun_file *tfile;
2954
2955	for (i = 0; i < tun->numqueues; i++) {
2956		tfile = rtnl_dereference(tun->tfiles[i]);
2957		lock_sock(tfile->socket.sk);
2958		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2959		release_sock(tfile->socket.sk);
2960		if (ret) {
2961			tun_detach_filter(tun, i);
2962			return ret;
2963		}
2964	}
2965
2966	tun->filter_attached = true;
2967	return ret;
2968}
2969
2970static void tun_set_sndbuf(struct tun_struct *tun)
2971{
2972	struct tun_file *tfile;
2973	int i;
2974
2975	for (i = 0; i < tun->numqueues; i++) {
2976		tfile = rtnl_dereference(tun->tfiles[i]);
2977		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2978	}
2979}
2980
2981static int tun_set_queue(struct file *file, struct ifreq *ifr)
2982{
2983	struct tun_file *tfile = file->private_data;
2984	struct tun_struct *tun;
2985	int ret = 0;
2986
2987	rtnl_lock();
2988
2989	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2990		tun = tfile->detached;
2991		if (!tun) {
2992			ret = -EINVAL;
2993			goto unlock;
2994		}
2995		ret = security_tun_dev_attach_queue(tun->security);
2996		if (ret < 0)
2997			goto unlock;
2998		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2999				 tun->flags & IFF_NAPI_FRAGS, true);
3000	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
3001		tun = rtnl_dereference(tfile->tun);
3002		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
3003			ret = -EINVAL;
3004		else
3005			__tun_detach(tfile, false);
3006	} else
3007		ret = -EINVAL;
3008
3009	if (ret >= 0)
3010		netdev_state_change(tun->dev);
3011
3012unlock:
3013	rtnl_unlock();
3014	return ret;
3015}
3016
3017static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
3018			void __user *data)
3019{
3020	struct bpf_prog *prog;
3021	int fd;
3022
3023	if (copy_from_user(&fd, data, sizeof(fd)))
3024		return -EFAULT;
3025
3026	if (fd == -1) {
3027		prog = NULL;
3028	} else {
3029		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
3030		if (IS_ERR(prog))
3031			return PTR_ERR(prog);
3032	}
3033
3034	return __tun_set_ebpf(tun, prog_p, prog);
3035}
3036
3037/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
3038static unsigned char tun_get_addr_len(unsigned short type)
3039{
3040	switch (type) {
3041	case ARPHRD_IP6GRE:
3042	case ARPHRD_TUNNEL6:
3043		return sizeof(struct in6_addr);
3044	case ARPHRD_IPGRE:
3045	case ARPHRD_TUNNEL:
3046	case ARPHRD_SIT:
3047		return 4;
3048	case ARPHRD_ETHER:
3049		return ETH_ALEN;
3050	case ARPHRD_IEEE802154:
3051	case ARPHRD_IEEE802154_MONITOR:
3052		return IEEE802154_EXTENDED_ADDR_LEN;
3053	case ARPHRD_PHONET_PIPE:
3054	case ARPHRD_PPP:
3055	case ARPHRD_NONE:
3056		return 0;
3057	case ARPHRD_6LOWPAN:
3058		return EUI64_ADDR_LEN;
3059	case ARPHRD_FDDI:
3060		return FDDI_K_ALEN;
3061	case ARPHRD_HIPPI:
3062		return HIPPI_ALEN;
3063	case ARPHRD_IEEE802:
3064		return FC_ALEN;
3065	case ARPHRD_ROSE:
3066		return ROSE_ADDR_LEN;
3067	case ARPHRD_NETROM:
3068		return AX25_ADDR_LEN;
3069	case ARPHRD_LOCALTLK:
3070		return LTALK_ALEN;
3071	default:
3072		return 0;
3073	}
3074}
3075
3076static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
3077			    unsigned long arg, int ifreq_len)
3078{
3079	struct tun_file *tfile = file->private_data;
3080	struct net *net = sock_net(&tfile->sk);
3081	struct tun_struct *tun;
3082	void __user* argp = (void __user*)arg;
3083	unsigned int carrier;
3084	struct ifreq ifr;
3085	kuid_t owner;
3086	kgid_t group;
3087	int ifindex;
3088	int sndbuf;
3089	int vnet_hdr_sz;
3090	int le;
3091	int ret;
3092	bool do_notify = false;
3093
3094	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3095	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
3096		if (copy_from_user(&ifr, argp, ifreq_len))
3097			return -EFAULT;
3098	} else {
3099		memset(&ifr, 0, sizeof(ifr));
3100	}
3101	if (cmd == TUNGETFEATURES) {
3102		/* Currently this just means: "what IFF flags are valid?".
3103		 * This is needed because we never checked for invalid flags on
3104		 * TUNSETIFF.
3105		 */
3106		return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
3107				TUN_FEATURES, (unsigned int __user*)argp);
3108	} else if (cmd == TUNSETQUEUE) {
3109		return tun_set_queue(file, &ifr);
3110	} else if (cmd == SIOCGSKNS) {
3111		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3112			return -EPERM;
3113		return open_related_ns(&net->ns, get_net_ns);
3114	}
3115
 
3116	rtnl_lock();
3117
3118	tun = tun_get(tfile);
3119	if (cmd == TUNSETIFF) {
3120		ret = -EEXIST;
3121		if (tun)
3122			goto unlock;
3123
3124		ifr.ifr_name[IFNAMSIZ-1] = '\0';
3125
3126		ret = tun_set_iff(net, file, &ifr);
3127
3128		if (ret)
3129			goto unlock;
3130
3131		if (copy_to_user(argp, &ifr, ifreq_len))
3132			ret = -EFAULT;
3133		goto unlock;
3134	}
3135	if (cmd == TUNSETIFINDEX) {
3136		ret = -EPERM;
3137		if (tun)
3138			goto unlock;
3139
3140		ret = -EFAULT;
3141		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3142			goto unlock;
3143		ret = -EINVAL;
3144		if (ifindex < 0)
3145			goto unlock;
3146		ret = 0;
3147		tfile->ifindex = ifindex;
3148		goto unlock;
3149	}
3150
3151	ret = -EBADFD;
3152	if (!tun)
3153		goto unlock;
3154
3155	netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
3156
3157	net = dev_net(tun->dev);
3158	ret = 0;
3159	switch (cmd) {
3160	case TUNGETIFF:
3161		tun_get_iff(tun, &ifr);
3162
3163		if (tfile->detached)
3164			ifr.ifr_flags |= IFF_DETACH_QUEUE;
3165		if (!tfile->socket.sk->sk_filter)
3166			ifr.ifr_flags |= IFF_NOFILTER;
3167
3168		if (copy_to_user(argp, &ifr, ifreq_len))
3169			ret = -EFAULT;
3170		break;
3171
3172	case TUNSETNOCSUM:
3173		/* Disable/Enable checksum */
3174
3175		/* [unimplemented] */
3176		netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
3177			   arg ? "disabled" : "enabled");
3178		break;
3179
3180	case TUNSETPERSIST:
3181		/* Disable/Enable persist mode. Keep an extra reference to the
3182		 * module to prevent the module being unprobed.
3183		 */
3184		if (arg && !(tun->flags & IFF_PERSIST)) {
3185			tun->flags |= IFF_PERSIST;
3186			__module_get(THIS_MODULE);
3187			do_notify = true;
3188		}
3189		if (!arg && (tun->flags & IFF_PERSIST)) {
3190			tun->flags &= ~IFF_PERSIST;
3191			module_put(THIS_MODULE);
3192			do_notify = true;
3193		}
3194
3195		netif_info(tun, drv, tun->dev, "persist %s\n",
3196			   arg ? "enabled" : "disabled");
3197		break;
3198
3199	case TUNSETOWNER:
3200		/* Set owner of the device */
3201		owner = make_kuid(current_user_ns(), arg);
3202		if (!uid_valid(owner)) {
3203			ret = -EINVAL;
3204			break;
3205		}
3206		tun->owner = owner;
3207		do_notify = true;
3208		netif_info(tun, drv, tun->dev, "owner set to %u\n",
3209			   from_kuid(&init_user_ns, tun->owner));
3210		break;
3211
3212	case TUNSETGROUP:
3213		/* Set group of the device */
3214		group = make_kgid(current_user_ns(), arg);
3215		if (!gid_valid(group)) {
3216			ret = -EINVAL;
3217			break;
3218		}
3219		tun->group = group;
3220		do_notify = true;
3221		netif_info(tun, drv, tun->dev, "group set to %u\n",
3222			   from_kgid(&init_user_ns, tun->group));
3223		break;
3224
3225	case TUNSETLINK:
3226		/* Only allow setting the type when the interface is down */
3227		if (tun->dev->flags & IFF_UP) {
3228			netif_info(tun, drv, tun->dev,
3229				   "Linktype set failed because interface is up\n");
3230			ret = -EBUSY;
3231		} else {
3232			ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
3233						       tun->dev);
3234			ret = notifier_to_errno(ret);
3235			if (ret) {
3236				netif_info(tun, drv, tun->dev,
3237					   "Refused to change device type\n");
3238				break;
3239			}
3240			tun->dev->type = (int) arg;
3241			tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
3242			netif_info(tun, drv, tun->dev, "linktype set to %d\n",
3243				   tun->dev->type);
3244			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
3245						 tun->dev);
3246		}
3247		break;
3248
 
3249	case TUNSETDEBUG:
3250		tun->msg_enable = (u32)arg;
3251		break;
3252
3253	case TUNSETOFFLOAD:
3254		ret = set_offload(tun, arg);
3255		break;
3256
3257	case TUNSETTXFILTER:
3258		/* Can be set only for TAPs */
3259		ret = -EINVAL;
3260		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3261			break;
3262		ret = update_filter(&tun->txflt, (void __user *)arg);
3263		break;
3264
3265	case SIOCGIFHWADDR:
3266		/* Get hw address */
3267		dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
 
3268		if (copy_to_user(argp, &ifr, ifreq_len))
3269			ret = -EFAULT;
3270		break;
3271
3272	case SIOCSIFHWADDR:
3273		/* Set hw address */
3274		ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
 
 
 
3275		break;
3276
3277	case TUNGETSNDBUF:
3278		sndbuf = tfile->socket.sk->sk_sndbuf;
3279		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3280			ret = -EFAULT;
3281		break;
3282
3283	case TUNSETSNDBUF:
3284		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3285			ret = -EFAULT;
3286			break;
3287		}
3288		if (sndbuf <= 0) {
3289			ret = -EINVAL;
3290			break;
3291		}
3292
3293		tun->sndbuf = sndbuf;
3294		tun_set_sndbuf(tun);
3295		break;
3296
3297	case TUNGETVNETHDRSZ:
3298		vnet_hdr_sz = tun->vnet_hdr_sz;
3299		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3300			ret = -EFAULT;
3301		break;
3302
3303	case TUNSETVNETHDRSZ:
3304		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3305			ret = -EFAULT;
3306			break;
3307		}
3308		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3309			ret = -EINVAL;
3310			break;
3311		}
3312
3313		tun->vnet_hdr_sz = vnet_hdr_sz;
3314		break;
3315
3316	case TUNGETVNETLE:
3317		le = !!(tun->flags & TUN_VNET_LE);
3318		if (put_user(le, (int __user *)argp))
3319			ret = -EFAULT;
3320		break;
3321
3322	case TUNSETVNETLE:
3323		if (get_user(le, (int __user *)argp)) {
3324			ret = -EFAULT;
3325			break;
3326		}
3327		if (le)
3328			tun->flags |= TUN_VNET_LE;
3329		else
3330			tun->flags &= ~TUN_VNET_LE;
3331		break;
3332
3333	case TUNGETVNETBE:
3334		ret = tun_get_vnet_be(tun, argp);
3335		break;
3336
3337	case TUNSETVNETBE:
3338		ret = tun_set_vnet_be(tun, argp);
3339		break;
3340
3341	case TUNATTACHFILTER:
3342		/* Can be set only for TAPs */
3343		ret = -EINVAL;
3344		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3345			break;
3346		ret = -EFAULT;
3347		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3348			break;
3349
3350		ret = tun_attach_filter(tun);
3351		break;
3352
3353	case TUNDETACHFILTER:
3354		/* Can be set only for TAPs */
3355		ret = -EINVAL;
3356		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3357			break;
3358		ret = 0;
3359		tun_detach_filter(tun, tun->numqueues);
3360		break;
3361
3362	case TUNGETFILTER:
3363		ret = -EINVAL;
3364		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3365			break;
3366		ret = -EFAULT;
3367		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3368			break;
3369		ret = 0;
3370		break;
3371
3372	case TUNSETSTEERINGEBPF:
3373		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3374		break;
3375
3376	case TUNSETFILTEREBPF:
3377		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3378		break;
3379
3380	case TUNSETCARRIER:
3381		ret = -EFAULT;
3382		if (copy_from_user(&carrier, argp, sizeof(carrier)))
3383			goto unlock;
3384
3385		ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3386		break;
3387
3388	case TUNGETDEVNETNS:
3389		ret = -EPERM;
3390		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3391			goto unlock;
3392		ret = open_related_ns(&net->ns, get_net_ns);
3393		break;
3394
3395	default:
3396		ret = -EINVAL;
3397		break;
3398	}
3399
3400	if (do_notify)
3401		netdev_state_change(tun->dev);
3402
3403unlock:
3404	rtnl_unlock();
3405	if (tun)
3406		tun_put(tun);
3407	return ret;
3408}
3409
3410static long tun_chr_ioctl(struct file *file,
3411			  unsigned int cmd, unsigned long arg)
3412{
3413	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3414}
3415
3416#ifdef CONFIG_COMPAT
3417static long tun_chr_compat_ioctl(struct file *file,
3418			 unsigned int cmd, unsigned long arg)
3419{
3420	switch (cmd) {
3421	case TUNSETIFF:
3422	case TUNGETIFF:
3423	case TUNSETTXFILTER:
3424	case TUNGETSNDBUF:
3425	case TUNSETSNDBUF:
3426	case SIOCGIFHWADDR:
3427	case SIOCSIFHWADDR:
3428		arg = (unsigned long)compat_ptr(arg);
3429		break;
3430	default:
3431		arg = (compat_ulong_t)arg;
3432		break;
3433	}
3434
3435	/*
3436	 * compat_ifreq is shorter than ifreq, so we must not access beyond
3437	 * the end of that structure. All fields that are used in this
3438	 * driver are compatible though, we don't need to convert the
3439	 * contents.
3440	 */
3441	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3442}
3443#endif /* CONFIG_COMPAT */
3444
3445static int tun_chr_fasync(int fd, struct file *file, int on)
3446{
3447	struct tun_file *tfile = file->private_data;
3448	int ret;
3449
3450	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3451		goto out;
3452
3453	if (on) {
3454		__f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3455		tfile->flags |= TUN_FASYNC;
3456	} else
3457		tfile->flags &= ~TUN_FASYNC;
3458	ret = 0;
3459out:
3460	return ret;
3461}
3462
3463static int tun_chr_open(struct inode *inode, struct file * file)
3464{
3465	struct net *net = current->nsproxy->net_ns;
3466	struct tun_file *tfile;
3467
 
 
3468	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3469					    &tun_proto, 0);
3470	if (!tfile)
3471		return -ENOMEM;
3472	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3473		sk_free(&tfile->sk);
3474		return -ENOMEM;
3475	}
3476
3477	mutex_init(&tfile->napi_mutex);
3478	RCU_INIT_POINTER(tfile->tun, NULL);
3479	tfile->flags = 0;
3480	tfile->ifindex = 0;
3481
3482	init_waitqueue_head(&tfile->socket.wq.wait);
3483
3484	tfile->socket.file = file;
3485	tfile->socket.ops = &tun_socket_ops;
3486
3487	sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
3488
3489	tfile->sk.sk_write_space = tun_sock_write_space;
3490	tfile->sk.sk_sndbuf = INT_MAX;
3491
3492	file->private_data = tfile;
3493	INIT_LIST_HEAD(&tfile->next);
3494
3495	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3496
3497	/* tun groks IOCB_NOWAIT just fine, mark it as such */
3498	file->f_mode |= FMODE_NOWAIT;
3499	return 0;
3500}
3501
3502static int tun_chr_close(struct inode *inode, struct file *file)
3503{
3504	struct tun_file *tfile = file->private_data;
3505
3506	tun_detach(tfile, true);
3507
3508	return 0;
3509}
3510
3511#ifdef CONFIG_PROC_FS
3512static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3513{
3514	struct tun_file *tfile = file->private_data;
3515	struct tun_struct *tun;
3516	struct ifreq ifr;
3517
3518	memset(&ifr, 0, sizeof(ifr));
3519
3520	rtnl_lock();
3521	tun = tun_get(tfile);
3522	if (tun)
3523		tun_get_iff(tun, &ifr);
3524	rtnl_unlock();
3525
3526	if (tun)
3527		tun_put(tun);
3528
3529	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3530}
3531#endif
3532
3533static const struct file_operations tun_fops = {
3534	.owner	= THIS_MODULE,
3535	.llseek = no_llseek,
3536	.read_iter  = tun_chr_read_iter,
3537	.write_iter = tun_chr_write_iter,
3538	.poll	= tun_chr_poll,
3539	.unlocked_ioctl	= tun_chr_ioctl,
3540#ifdef CONFIG_COMPAT
3541	.compat_ioctl = tun_chr_compat_ioctl,
3542#endif
3543	.open	= tun_chr_open,
3544	.release = tun_chr_close,
3545	.fasync = tun_chr_fasync,
3546#ifdef CONFIG_PROC_FS
3547	.show_fdinfo = tun_chr_show_fdinfo,
3548#endif
3549};
3550
3551static struct miscdevice tun_miscdev = {
3552	.minor = TUN_MINOR,
3553	.name = "tun",
3554	.nodename = "net/tun",
3555	.fops = &tun_fops,
3556};
3557
3558/* ethtool interface */
3559
3560static void tun_default_link_ksettings(struct net_device *dev,
3561				       struct ethtool_link_ksettings *cmd)
3562{
3563	ethtool_link_ksettings_zero_link_mode(cmd, supported);
3564	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3565	cmd->base.speed		= SPEED_10000;
3566	cmd->base.duplex	= DUPLEX_FULL;
3567	cmd->base.port		= PORT_TP;
3568	cmd->base.phy_address	= 0;
3569	cmd->base.autoneg	= AUTONEG_DISABLE;
3570}
3571
3572static int tun_get_link_ksettings(struct net_device *dev,
3573				  struct ethtool_link_ksettings *cmd)
3574{
3575	struct tun_struct *tun = netdev_priv(dev);
3576
3577	memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3578	return 0;
3579}
3580
3581static int tun_set_link_ksettings(struct net_device *dev,
3582				  const struct ethtool_link_ksettings *cmd)
3583{
3584	struct tun_struct *tun = netdev_priv(dev);
3585
3586	memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3587	return 0;
3588}
3589
3590static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3591{
3592	struct tun_struct *tun = netdev_priv(dev);
3593
3594	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
3595	strscpy(info->version, DRV_VERSION, sizeof(info->version));
3596
3597	switch (tun->flags & TUN_TYPE_MASK) {
3598	case IFF_TUN:
3599		strscpy(info->bus_info, "tun", sizeof(info->bus_info));
3600		break;
3601	case IFF_TAP:
3602		strscpy(info->bus_info, "tap", sizeof(info->bus_info));
3603		break;
3604	}
3605}
3606
3607static u32 tun_get_msglevel(struct net_device *dev)
3608{
 
3609	struct tun_struct *tun = netdev_priv(dev);
3610
3611	return tun->msg_enable;
 
 
3612}
3613
3614static void tun_set_msglevel(struct net_device *dev, u32 value)
3615{
 
3616	struct tun_struct *tun = netdev_priv(dev);
3617
3618	tun->msg_enable = value;
3619}
3620
3621static int tun_get_coalesce(struct net_device *dev,
3622			    struct ethtool_coalesce *ec,
3623			    struct kernel_ethtool_coalesce *kernel_coal,
3624			    struct netlink_ext_ack *extack)
3625{
3626	struct tun_struct *tun = netdev_priv(dev);
3627
3628	ec->rx_max_coalesced_frames = tun->rx_batched;
3629
3630	return 0;
3631}
3632
3633static int tun_set_coalesce(struct net_device *dev,
3634			    struct ethtool_coalesce *ec,
3635			    struct kernel_ethtool_coalesce *kernel_coal,
3636			    struct netlink_ext_ack *extack)
3637{
3638	struct tun_struct *tun = netdev_priv(dev);
3639
3640	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3641		tun->rx_batched = NAPI_POLL_WEIGHT;
3642	else
3643		tun->rx_batched = ec->rx_max_coalesced_frames;
3644
3645	return 0;
3646}
3647
3648static const struct ethtool_ops tun_ethtool_ops = {
3649	.supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
3650	.get_drvinfo	= tun_get_drvinfo,
3651	.get_msglevel	= tun_get_msglevel,
3652	.set_msglevel	= tun_set_msglevel,
3653	.get_link	= ethtool_op_get_link,
3654	.get_ts_info	= ethtool_op_get_ts_info,
3655	.get_coalesce   = tun_get_coalesce,
3656	.set_coalesce   = tun_set_coalesce,
3657	.get_link_ksettings = tun_get_link_ksettings,
3658	.set_link_ksettings = tun_set_link_ksettings,
3659};
3660
3661static int tun_queue_resize(struct tun_struct *tun)
3662{
3663	struct net_device *dev = tun->dev;
3664	struct tun_file *tfile;
3665	struct ptr_ring **rings;
3666	int n = tun->numqueues + tun->numdisabled;
3667	int ret, i;
3668
3669	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3670	if (!rings)
3671		return -ENOMEM;
3672
3673	for (i = 0; i < tun->numqueues; i++) {
3674		tfile = rtnl_dereference(tun->tfiles[i]);
3675		rings[i] = &tfile->tx_ring;
3676	}
3677	list_for_each_entry(tfile, &tun->disabled, next)
3678		rings[i++] = &tfile->tx_ring;
3679
3680	ret = ptr_ring_resize_multiple(rings, n,
3681				       dev->tx_queue_len, GFP_KERNEL,
3682				       tun_ptr_free);
3683
3684	kfree(rings);
3685	return ret;
3686}
3687
3688static int tun_device_event(struct notifier_block *unused,
3689			    unsigned long event, void *ptr)
3690{
3691	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3692	struct tun_struct *tun = netdev_priv(dev);
3693	int i;
3694
3695	if (dev->rtnl_link_ops != &tun_link_ops)
3696		return NOTIFY_DONE;
3697
3698	switch (event) {
3699	case NETDEV_CHANGE_TX_QUEUE_LEN:
3700		if (tun_queue_resize(tun))
3701			return NOTIFY_BAD;
3702		break;
3703	case NETDEV_UP:
3704		for (i = 0; i < tun->numqueues; i++) {
3705			struct tun_file *tfile;
3706
3707			tfile = rtnl_dereference(tun->tfiles[i]);
3708			tfile->socket.sk->sk_write_space(tfile->socket.sk);
3709		}
3710		break;
3711	default:
3712		break;
3713	}
3714
3715	return NOTIFY_DONE;
3716}
3717
3718static struct notifier_block tun_notifier_block __read_mostly = {
3719	.notifier_call	= tun_device_event,
3720};
3721
3722static int __init tun_init(void)
3723{
3724	int ret = 0;
3725
3726	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3727
3728	ret = rtnl_link_register(&tun_link_ops);
3729	if (ret) {
3730		pr_err("Can't register link_ops\n");
3731		goto err_linkops;
3732	}
3733
3734	ret = misc_register(&tun_miscdev);
3735	if (ret) {
3736		pr_err("Can't register misc device %d\n", TUN_MINOR);
3737		goto err_misc;
3738	}
3739
3740	ret = register_netdevice_notifier(&tun_notifier_block);
3741	if (ret) {
3742		pr_err("Can't register netdevice notifier\n");
3743		goto err_notifier;
3744	}
3745
3746	return  0;
3747
3748err_notifier:
3749	misc_deregister(&tun_miscdev);
3750err_misc:
3751	rtnl_link_unregister(&tun_link_ops);
3752err_linkops:
3753	return ret;
3754}
3755
3756static void __exit tun_cleanup(void)
3757{
3758	misc_deregister(&tun_miscdev);
3759	rtnl_link_unregister(&tun_link_ops);
3760	unregister_netdevice_notifier(&tun_notifier_block);
3761}
3762
3763/* Get an underlying socket object from tun file.  Returns error unless file is
3764 * attached to a device.  The returned object works like a packet socket, it
3765 * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
3766 * holding a reference to the file for as long as the socket is in use. */
3767struct socket *tun_get_socket(struct file *file)
3768{
3769	struct tun_file *tfile;
3770	if (file->f_op != &tun_fops)
3771		return ERR_PTR(-EINVAL);
3772	tfile = file->private_data;
3773	if (!tfile)
3774		return ERR_PTR(-EBADFD);
3775	return &tfile->socket;
3776}
3777EXPORT_SYMBOL_GPL(tun_get_socket);
3778
3779struct ptr_ring *tun_get_tx_ring(struct file *file)
3780{
3781	struct tun_file *tfile;
3782
3783	if (file->f_op != &tun_fops)
3784		return ERR_PTR(-EINVAL);
3785	tfile = file->private_data;
3786	if (!tfile)
3787		return ERR_PTR(-EBADFD);
3788	return &tfile->tx_ring;
3789}
3790EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3791
3792module_init(tun_init);
3793module_exit(tun_cleanup);
3794MODULE_DESCRIPTION(DRV_DESCRIPTION);
3795MODULE_AUTHOR(DRV_COPYRIGHT);
3796MODULE_LICENSE("GPL");
3797MODULE_ALIAS_MISCDEV(TUN_MINOR);
3798MODULE_ALIAS("devname:net/tun");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  TUN - Universal TUN/TAP device driver.
   4 *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
   5 *
   6 *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
   7 */
   8
   9/*
  10 *  Changes:
  11 *
  12 *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
  13 *    Add TUNSETLINK ioctl to set the link encapsulation
  14 *
  15 *  Mark Smith <markzzzsmith@yahoo.com.au>
  16 *    Use eth_random_addr() for tap MAC address.
  17 *
  18 *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
  19 *    Fixes in packet dropping, queue length setting and queue wakeup.
  20 *    Increased default tx queue length.
  21 *    Added ethtool API.
  22 *    Minor cleanups
  23 *
  24 *  Daniel Podlejski <underley@underley.eu.org>
  25 *    Modifications for 2.3.99-pre5 kernel.
  26 */
  27
  28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29
  30#define DRV_NAME	"tun"
  31#define DRV_VERSION	"1.6"
  32#define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
  33#define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
  34
  35#include <linux/module.h>
  36#include <linux/errno.h>
  37#include <linux/kernel.h>
  38#include <linux/sched/signal.h>
  39#include <linux/major.h>
  40#include <linux/slab.h>
  41#include <linux/poll.h>
  42#include <linux/fcntl.h>
  43#include <linux/init.h>
  44#include <linux/skbuff.h>
  45#include <linux/netdevice.h>
  46#include <linux/etherdevice.h>
  47#include <linux/miscdevice.h>
  48#include <linux/ethtool.h>
  49#include <linux/rtnetlink.h>
  50#include <linux/compat.h>
  51#include <linux/if.h>
  52#include <linux/if_arp.h>
  53#include <linux/if_ether.h>
  54#include <linux/if_tun.h>
  55#include <linux/if_vlan.h>
  56#include <linux/crc32.h>
  57#include <linux/nsproxy.h>
  58#include <linux/virtio_net.h>
  59#include <linux/rcupdate.h>
  60#include <net/net_namespace.h>
  61#include <net/netns/generic.h>
  62#include <net/rtnetlink.h>
  63#include <net/sock.h>
  64#include <net/xdp.h>
 
  65#include <linux/seq_file.h>
  66#include <linux/uio.h>
  67#include <linux/skb_array.h>
  68#include <linux/bpf.h>
  69#include <linux/bpf_trace.h>
  70#include <linux/mutex.h>
 
 
 
 
 
 
 
 
  71
  72#include <linux/uaccess.h>
  73#include <linux/proc_fs.h>
  74
  75static void tun_default_link_ksettings(struct net_device *dev,
  76				       struct ethtool_link_ksettings *cmd);
  77
  78/* Uncomment to enable debugging */
  79/* #define TUN_DEBUG 1 */
  80
  81#ifdef TUN_DEBUG
  82static int debug;
  83
  84#define tun_debug(level, tun, fmt, args...)			\
  85do {								\
  86	if (tun->debug)						\
  87		netdev_printk(level, tun->dev, fmt, ##args);	\
  88} while (0)
  89#define DBG1(level, fmt, args...)				\
  90do {								\
  91	if (debug == 2)						\
  92		printk(level fmt, ##args);			\
  93} while (0)
  94#else
  95#define tun_debug(level, tun, fmt, args...)			\
  96do {								\
  97	if (0)							\
  98		netdev_printk(level, tun->dev, fmt, ##args);	\
  99} while (0)
 100#define DBG1(level, fmt, args...)				\
 101do {								\
 102	if (0)							\
 103		printk(level fmt, ##args);			\
 104} while (0)
 105#endif
 106
 107#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
 108
 109/* TUN device flags */
 110
 111/* IFF_ATTACH_QUEUE is never stored in device flags,
 112 * overload it to mean fasync when stored there.
 113 */
 114#define TUN_FASYNC	IFF_ATTACH_QUEUE
 115/* High bits in flags field are unused. */
 116#define TUN_VNET_LE     0x80000000
 117#define TUN_VNET_BE     0x40000000
 118
 119#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
 120		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
 121
 122#define GOODCOPY_LEN 128
 123
 124#define FLT_EXACT_COUNT 8
 125struct tap_filter {
 126	unsigned int    count;    /* Number of addrs. Zero means disabled */
 127	u32             mask[2];  /* Mask of the hashed addrs */
 128	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
 129};
 130
 131/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
 132 * to max number of VCPUs in guest. */
 133#define MAX_TAP_QUEUES 256
 134#define MAX_TAP_FLOWS  4096
 135
 136#define TUN_FLOW_EXPIRE (3 * HZ)
 137
 138struct tun_pcpu_stats {
 139	u64 rx_packets;
 140	u64 rx_bytes;
 141	u64 tx_packets;
 142	u64 tx_bytes;
 143	struct u64_stats_sync syncp;
 144	u32 rx_dropped;
 145	u32 tx_dropped;
 146	u32 rx_frame_errors;
 147};
 148
 149/* A tun_file connects an open character device to a tuntap netdevice. It
 150 * also contains all socket related structures (except sock_fprog and tap_filter)
 151 * to serve as one transmit queue for tuntap device. The sock_fprog and
 152 * tap_filter were kept in tun_struct since they were used for filtering for the
 153 * netdevice not for a specific queue (at least I didn't see the requirement for
 154 * this).
 155 *
 156 * RCU usage:
 157 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
 158 * other can only be read while rcu_read_lock or rtnl_lock is held.
 159 */
 160struct tun_file {
 161	struct sock sk;
 162	struct socket socket;
 163	struct tun_struct __rcu *tun;
 164	struct fasync_struct *fasync;
 165	/* only used for fasnyc */
 166	unsigned int flags;
 167	union {
 168		u16 queue_index;
 169		unsigned int ifindex;
 170	};
 171	struct napi_struct napi;
 172	bool napi_enabled;
 173	bool napi_frags_enabled;
 174	struct mutex napi_mutex;	/* Protects access to the above napi */
 175	struct list_head next;
 176	struct tun_struct *detached;
 177	struct ptr_ring tx_ring;
 178	struct xdp_rxq_info xdp_rxq;
 179};
 180
 181struct tun_page {
 182	struct page *page;
 183	int count;
 184};
 185
 186struct tun_flow_entry {
 187	struct hlist_node hash_link;
 188	struct rcu_head rcu;
 189	struct tun_struct *tun;
 190
 191	u32 rxhash;
 192	u32 rps_rxhash;
 193	int queue_index;
 194	unsigned long updated ____cacheline_aligned_in_smp;
 195};
 196
 197#define TUN_NUM_FLOW_ENTRIES 1024
 198#define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
 199
 200struct tun_prog {
 201	struct rcu_head rcu;
 202	struct bpf_prog *prog;
 203};
 204
 205/* Since the socket were moved to tun_file, to preserve the behavior of persist
 206 * device, socket filter, sndbuf and vnet header size were restore when the
 207 * file were attached to a persist device.
 208 */
 209struct tun_struct {
 210	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
 211	unsigned int            numqueues;
 212	unsigned int 		flags;
 213	kuid_t			owner;
 214	kgid_t			group;
 215
 216	struct net_device	*dev;
 217	netdev_features_t	set_features;
 218#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
 219			  NETIF_F_TSO6)
 220
 221	int			align;
 222	int			vnet_hdr_sz;
 223	int			sndbuf;
 224	struct tap_filter	txflt;
 225	struct sock_fprog	fprog;
 226	/* protected by rtnl lock */
 227	bool			filter_attached;
 228#ifdef TUN_DEBUG
 229	int debug;
 230#endif
 231	spinlock_t lock;
 232	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
 233	struct timer_list flow_gc_timer;
 234	unsigned long ageing_time;
 235	unsigned int numdisabled;
 236	struct list_head disabled;
 237	void *security;
 238	u32 flow_count;
 239	u32 rx_batched;
 240	struct tun_pcpu_stats __percpu *pcpu_stats;
 241	struct bpf_prog __rcu *xdp_prog;
 242	struct tun_prog __rcu *steering_prog;
 243	struct tun_prog __rcu *filter_prog;
 244	struct ethtool_link_ksettings link_ksettings;
 
 
 
 245};
 246
 247struct veth {
 248	__be16 h_vlan_proto;
 249	__be16 h_vlan_TCI;
 250};
 251
 252bool tun_is_xdp_frame(void *ptr)
 253{
 254	return (unsigned long)ptr & TUN_XDP_FLAG;
 255}
 256EXPORT_SYMBOL(tun_is_xdp_frame);
 257
 258void *tun_xdp_to_ptr(void *ptr)
 259{
 260	return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
 261}
 262EXPORT_SYMBOL(tun_xdp_to_ptr);
 263
 264void *tun_ptr_to_xdp(void *ptr)
 265{
 266	return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
 267}
 268EXPORT_SYMBOL(tun_ptr_to_xdp);
 269
 270static int tun_napi_receive(struct napi_struct *napi, int budget)
 271{
 272	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
 273	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
 274	struct sk_buff_head process_queue;
 275	struct sk_buff *skb;
 276	int received = 0;
 277
 278	__skb_queue_head_init(&process_queue);
 279
 280	spin_lock(&queue->lock);
 281	skb_queue_splice_tail_init(queue, &process_queue);
 282	spin_unlock(&queue->lock);
 283
 284	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
 285		napi_gro_receive(napi, skb);
 286		++received;
 287	}
 288
 289	if (!skb_queue_empty(&process_queue)) {
 290		spin_lock(&queue->lock);
 291		skb_queue_splice(&process_queue, queue);
 292		spin_unlock(&queue->lock);
 293	}
 294
 295	return received;
 296}
 297
 298static int tun_napi_poll(struct napi_struct *napi, int budget)
 299{
 300	unsigned int received;
 301
 302	received = tun_napi_receive(napi, budget);
 303
 304	if (received < budget)
 305		napi_complete_done(napi, received);
 306
 307	return received;
 308}
 309
 310static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
 311			  bool napi_en, bool napi_frags)
 312{
 313	tfile->napi_enabled = napi_en;
 314	tfile->napi_frags_enabled = napi_en && napi_frags;
 315	if (napi_en) {
 316		netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
 317			       NAPI_POLL_WEIGHT);
 318		napi_enable(&tfile->napi);
 319	}
 320}
 321
 
 
 
 
 
 
 322static void tun_napi_disable(struct tun_file *tfile)
 323{
 324	if (tfile->napi_enabled)
 325		napi_disable(&tfile->napi);
 326}
 327
 328static void tun_napi_del(struct tun_file *tfile)
 329{
 330	if (tfile->napi_enabled)
 331		netif_napi_del(&tfile->napi);
 332}
 333
 334static bool tun_napi_frags_enabled(const struct tun_file *tfile)
 335{
 336	return tfile->napi_frags_enabled;
 337}
 338
 339#ifdef CONFIG_TUN_VNET_CROSS_LE
 340static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
 341{
 342	return tun->flags & TUN_VNET_BE ? false :
 343		virtio_legacy_is_little_endian();
 344}
 345
 346static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
 347{
 348	int be = !!(tun->flags & TUN_VNET_BE);
 349
 350	if (put_user(be, argp))
 351		return -EFAULT;
 352
 353	return 0;
 354}
 355
 356static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
 357{
 358	int be;
 359
 360	if (get_user(be, argp))
 361		return -EFAULT;
 362
 363	if (be)
 364		tun->flags |= TUN_VNET_BE;
 365	else
 366		tun->flags &= ~TUN_VNET_BE;
 367
 368	return 0;
 369}
 370#else
 371static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
 372{
 373	return virtio_legacy_is_little_endian();
 374}
 375
 376static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
 377{
 378	return -EINVAL;
 379}
 380
 381static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
 382{
 383	return -EINVAL;
 384}
 385#endif /* CONFIG_TUN_VNET_CROSS_LE */
 386
 387static inline bool tun_is_little_endian(struct tun_struct *tun)
 388{
 389	return tun->flags & TUN_VNET_LE ||
 390		tun_legacy_is_little_endian(tun);
 391}
 392
 393static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
 394{
 395	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
 396}
 397
 398static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
 399{
 400	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
 401}
 402
 403static inline u32 tun_hashfn(u32 rxhash)
 404{
 405	return rxhash & TUN_MASK_FLOW_ENTRIES;
 406}
 407
 408static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
 409{
 410	struct tun_flow_entry *e;
 411
 412	hlist_for_each_entry_rcu(e, head, hash_link) {
 413		if (e->rxhash == rxhash)
 414			return e;
 415	}
 416	return NULL;
 417}
 418
 419static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
 420					      struct hlist_head *head,
 421					      u32 rxhash, u16 queue_index)
 422{
 423	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
 424
 425	if (e) {
 426		tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
 427			  rxhash, queue_index);
 
 428		e->updated = jiffies;
 429		e->rxhash = rxhash;
 430		e->rps_rxhash = 0;
 431		e->queue_index = queue_index;
 432		e->tun = tun;
 433		hlist_add_head_rcu(&e->hash_link, head);
 434		++tun->flow_count;
 435	}
 436	return e;
 437}
 438
 439static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
 440{
 441	tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
 442		  e->rxhash, e->queue_index);
 443	hlist_del_rcu(&e->hash_link);
 444	kfree_rcu(e, rcu);
 445	--tun->flow_count;
 446}
 447
 448static void tun_flow_flush(struct tun_struct *tun)
 449{
 450	int i;
 451
 452	spin_lock_bh(&tun->lock);
 453	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 454		struct tun_flow_entry *e;
 455		struct hlist_node *n;
 456
 457		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
 458			tun_flow_delete(tun, e);
 459	}
 460	spin_unlock_bh(&tun->lock);
 461}
 462
 463static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
 464{
 465	int i;
 466
 467	spin_lock_bh(&tun->lock);
 468	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 469		struct tun_flow_entry *e;
 470		struct hlist_node *n;
 471
 472		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
 473			if (e->queue_index == queue_index)
 474				tun_flow_delete(tun, e);
 475		}
 476	}
 477	spin_unlock_bh(&tun->lock);
 478}
 479
 480static void tun_flow_cleanup(struct timer_list *t)
 481{
 482	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
 483	unsigned long delay = tun->ageing_time;
 484	unsigned long next_timer = jiffies + delay;
 485	unsigned long count = 0;
 486	int i;
 487
 488	tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
 489
 490	spin_lock(&tun->lock);
 491	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 492		struct tun_flow_entry *e;
 493		struct hlist_node *n;
 494
 495		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
 496			unsigned long this_timer;
 497
 498			this_timer = e->updated + delay;
 499			if (time_before_eq(this_timer, jiffies)) {
 500				tun_flow_delete(tun, e);
 501				continue;
 502			}
 503			count++;
 504			if (time_before(this_timer, next_timer))
 505				next_timer = this_timer;
 506		}
 507	}
 508
 509	if (count)
 510		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
 511	spin_unlock(&tun->lock);
 512}
 513
 514static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
 515			    struct tun_file *tfile)
 516{
 517	struct hlist_head *head;
 518	struct tun_flow_entry *e;
 519	unsigned long delay = tun->ageing_time;
 520	u16 queue_index = tfile->queue_index;
 521
 522	head = &tun->flows[tun_hashfn(rxhash)];
 523
 524	rcu_read_lock();
 525
 526	e = tun_flow_find(head, rxhash);
 527	if (likely(e)) {
 528		/* TODO: keep queueing to old queue until it's empty? */
 529		if (READ_ONCE(e->queue_index) != queue_index)
 530			WRITE_ONCE(e->queue_index, queue_index);
 531		if (e->updated != jiffies)
 532			e->updated = jiffies;
 533		sock_rps_record_flow_hash(e->rps_rxhash);
 534	} else {
 535		spin_lock_bh(&tun->lock);
 536		if (!tun_flow_find(head, rxhash) &&
 537		    tun->flow_count < MAX_TAP_FLOWS)
 538			tun_flow_create(tun, head, rxhash, queue_index);
 539
 540		if (!timer_pending(&tun->flow_gc_timer))
 541			mod_timer(&tun->flow_gc_timer,
 542				  round_jiffies_up(jiffies + delay));
 543		spin_unlock_bh(&tun->lock);
 544	}
 545
 546	rcu_read_unlock();
 547}
 548
 549/**
 550 * Save the hash received in the stack receive path and update the
 551 * flow_hash table accordingly.
 552 */
 553static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
 554{
 555	if (unlikely(e->rps_rxhash != hash))
 556		e->rps_rxhash = hash;
 557}
 558
 559/* We try to identify a flow through its rxhash. The reason that
 560 * we do not check rxq no. is because some cards(e.g 82599), chooses
 561 * the rxq based on the txq where the last packet of the flow comes. As
 562 * the userspace application move between processors, we may get a
 563 * different rxq no. here.
 564 */
 565static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 566{
 567	struct tun_flow_entry *e;
 568	u32 txq = 0;
 569	u32 numqueues = 0;
 570
 571	numqueues = READ_ONCE(tun->numqueues);
 572
 573	txq = __skb_get_hash_symmetric(skb);
 574	e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
 575	if (e) {
 576		tun_flow_save_rps_rxhash(e, txq);
 577		txq = e->queue_index;
 578	} else {
 579		/* use multiply and shift instead of expensive divide */
 580		txq = ((u64)txq * numqueues) >> 32;
 581	}
 582
 583	return txq;
 584}
 585
 586static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 587{
 588	struct tun_prog *prog;
 589	u32 numqueues;
 590	u16 ret = 0;
 591
 592	numqueues = READ_ONCE(tun->numqueues);
 593	if (!numqueues)
 594		return 0;
 595
 596	prog = rcu_dereference(tun->steering_prog);
 597	if (prog)
 598		ret = bpf_prog_run_clear_cb(prog->prog, skb);
 599
 600	return ret % numqueues;
 601}
 602
 603static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
 604			    struct net_device *sb_dev)
 605{
 606	struct tun_struct *tun = netdev_priv(dev);
 607	u16 ret;
 608
 609	rcu_read_lock();
 610	if (rcu_dereference(tun->steering_prog))
 611		ret = tun_ebpf_select_queue(tun, skb);
 612	else
 613		ret = tun_automq_select_queue(tun, skb);
 614	rcu_read_unlock();
 615
 616	return ret;
 617}
 618
 619static inline bool tun_not_capable(struct tun_struct *tun)
 620{
 621	const struct cred *cred = current_cred();
 622	struct net *net = dev_net(tun->dev);
 623
 624	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
 625		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
 626		!ns_capable(net->user_ns, CAP_NET_ADMIN);
 627}
 628
 629static void tun_set_real_num_queues(struct tun_struct *tun)
 630{
 631	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
 632	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
 633}
 634
 635static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
 636{
 637	tfile->detached = tun;
 638	list_add_tail(&tfile->next, &tun->disabled);
 639	++tun->numdisabled;
 640}
 641
 642static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
 643{
 644	struct tun_struct *tun = tfile->detached;
 645
 646	tfile->detached = NULL;
 647	list_del_init(&tfile->next);
 648	--tun->numdisabled;
 649	return tun;
 650}
 651
 652void tun_ptr_free(void *ptr)
 653{
 654	if (!ptr)
 655		return;
 656	if (tun_is_xdp_frame(ptr)) {
 657		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
 658
 659		xdp_return_frame(xdpf);
 660	} else {
 661		__skb_array_destroy_skb(ptr);
 662	}
 663}
 664EXPORT_SYMBOL_GPL(tun_ptr_free);
 665
 666static void tun_queue_purge(struct tun_file *tfile)
 667{
 668	void *ptr;
 669
 670	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
 671		tun_ptr_free(ptr);
 672
 673	skb_queue_purge(&tfile->sk.sk_write_queue);
 674	skb_queue_purge(&tfile->sk.sk_error_queue);
 675}
 676
 677static void __tun_detach(struct tun_file *tfile, bool clean)
 678{
 679	struct tun_file *ntfile;
 680	struct tun_struct *tun;
 681
 682	tun = rtnl_dereference(tfile->tun);
 683
 684	if (tun && clean) {
 685		tun_napi_disable(tfile);
 
 686		tun_napi_del(tfile);
 687	}
 688
 689	if (tun && !tfile->detached) {
 690		u16 index = tfile->queue_index;
 691		BUG_ON(index >= tun->numqueues);
 692
 693		rcu_assign_pointer(tun->tfiles[index],
 694				   tun->tfiles[tun->numqueues - 1]);
 695		ntfile = rtnl_dereference(tun->tfiles[index]);
 696		ntfile->queue_index = index;
 
 697		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
 698				   NULL);
 699
 700		--tun->numqueues;
 701		if (clean) {
 702			RCU_INIT_POINTER(tfile->tun, NULL);
 703			sock_put(&tfile->sk);
 704		} else
 705			tun_disable_queue(tun, tfile);
 
 
 706
 707		synchronize_net();
 708		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
 709		/* Drop read queue */
 710		tun_queue_purge(tfile);
 711		tun_set_real_num_queues(tun);
 712	} else if (tfile->detached && clean) {
 713		tun = tun_enable_queue(tfile);
 714		sock_put(&tfile->sk);
 715	}
 716
 717	if (clean) {
 718		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
 719			netif_carrier_off(tun->dev);
 720
 721			if (!(tun->flags & IFF_PERSIST) &&
 722			    tun->dev->reg_state == NETREG_REGISTERED)
 723				unregister_netdevice(tun->dev);
 724		}
 725		if (tun)
 726			xdp_rxq_info_unreg(&tfile->xdp_rxq);
 727		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
 728		sock_put(&tfile->sk);
 729	}
 730}
 731
 732static void tun_detach(struct tun_file *tfile, bool clean)
 733{
 734	struct tun_struct *tun;
 735	struct net_device *dev;
 736
 737	rtnl_lock();
 738	tun = rtnl_dereference(tfile->tun);
 739	dev = tun ? tun->dev : NULL;
 740	__tun_detach(tfile, clean);
 741	if (dev)
 742		netdev_state_change(dev);
 743	rtnl_unlock();
 
 
 
 744}
 745
 746static void tun_detach_all(struct net_device *dev)
 747{
 748	struct tun_struct *tun = netdev_priv(dev);
 749	struct tun_file *tfile, *tmp;
 750	int i, n = tun->numqueues;
 751
 752	for (i = 0; i < n; i++) {
 753		tfile = rtnl_dereference(tun->tfiles[i]);
 754		BUG_ON(!tfile);
 755		tun_napi_disable(tfile);
 756		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
 757		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
 758		RCU_INIT_POINTER(tfile->tun, NULL);
 759		--tun->numqueues;
 760	}
 761	list_for_each_entry(tfile, &tun->disabled, next) {
 762		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
 763		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
 764		RCU_INIT_POINTER(tfile->tun, NULL);
 765	}
 766	BUG_ON(tun->numqueues != 0);
 767
 768	synchronize_net();
 769	for (i = 0; i < n; i++) {
 770		tfile = rtnl_dereference(tun->tfiles[i]);
 771		tun_napi_del(tfile);
 772		/* Drop read queue */
 773		tun_queue_purge(tfile);
 774		xdp_rxq_info_unreg(&tfile->xdp_rxq);
 775		sock_put(&tfile->sk);
 776	}
 777	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
 
 778		tun_enable_queue(tfile);
 779		tun_queue_purge(tfile);
 780		xdp_rxq_info_unreg(&tfile->xdp_rxq);
 781		sock_put(&tfile->sk);
 782	}
 783	BUG_ON(tun->numdisabled != 0);
 784
 785	if (tun->flags & IFF_PERSIST)
 786		module_put(THIS_MODULE);
 787}
 788
 789static int tun_attach(struct tun_struct *tun, struct file *file,
 790		      bool skip_filter, bool napi, bool napi_frags,
 791		      bool publish_tun)
 792{
 793	struct tun_file *tfile = file->private_data;
 794	struct net_device *dev = tun->dev;
 795	int err;
 796
 797	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
 798	if (err < 0)
 799		goto out;
 800
 801	err = -EINVAL;
 802	if (rtnl_dereference(tfile->tun) && !tfile->detached)
 803		goto out;
 804
 805	err = -EBUSY;
 806	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
 807		goto out;
 808
 809	err = -E2BIG;
 810	if (!tfile->detached &&
 811	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
 812		goto out;
 813
 814	err = 0;
 815
 816	/* Re-attach the filter to persist device */
 817	if (!skip_filter && (tun->filter_attached == true)) {
 818		lock_sock(tfile->socket.sk);
 819		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
 820		release_sock(tfile->socket.sk);
 821		if (!err)
 822			goto out;
 823	}
 824
 825	if (!tfile->detached &&
 826	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
 827			    GFP_KERNEL, tun_ptr_free)) {
 828		err = -ENOMEM;
 829		goto out;
 830	}
 831
 832	tfile->queue_index = tun->numqueues;
 833	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
 834
 835	if (tfile->detached) {
 836		/* Re-attach detached tfile, updating XDP queue_index */
 837		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
 838
 839		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
 840			tfile->xdp_rxq.queue_index = tfile->queue_index;
 841	} else {
 842		/* Setup XDP RX-queue info, for new tfile getting attached */
 843		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
 844				       tun->dev, tfile->queue_index);
 845		if (err < 0)
 846			goto out;
 847		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
 848						 MEM_TYPE_PAGE_SHARED, NULL);
 849		if (err < 0) {
 850			xdp_rxq_info_unreg(&tfile->xdp_rxq);
 851			goto out;
 852		}
 853		err = 0;
 854	}
 855
 856	if (tfile->detached) {
 857		tun_enable_queue(tfile);
 
 858	} else {
 859		sock_hold(&tfile->sk);
 860		tun_napi_init(tun, tfile, napi, napi_frags);
 861	}
 862
 863	if (rtnl_dereference(tun->xdp_prog))
 864		sock_set_flag(&tfile->sk, SOCK_XDP);
 865
 866	/* device is allowed to go away first, so no need to hold extra
 867	 * refcnt.
 868	 */
 869
 870	/* Publish tfile->tun and tun->tfiles only after we've fully
 871	 * initialized tfile; otherwise we risk using half-initialized
 872	 * object.
 873	 */
 874	if (publish_tun)
 875		rcu_assign_pointer(tfile->tun, tun);
 876	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
 877	tun->numqueues++;
 878	tun_set_real_num_queues(tun);
 879out:
 880	return err;
 881}
 882
 883static struct tun_struct *tun_get(struct tun_file *tfile)
 884{
 885	struct tun_struct *tun;
 886
 887	rcu_read_lock();
 888	tun = rcu_dereference(tfile->tun);
 889	if (tun)
 890		dev_hold(tun->dev);
 891	rcu_read_unlock();
 892
 893	return tun;
 894}
 895
 896static void tun_put(struct tun_struct *tun)
 897{
 898	dev_put(tun->dev);
 899}
 900
 901/* TAP filtering */
 902static void addr_hash_set(u32 *mask, const u8 *addr)
 903{
 904	int n = ether_crc(ETH_ALEN, addr) >> 26;
 905	mask[n >> 5] |= (1 << (n & 31));
 906}
 907
 908static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
 909{
 910	int n = ether_crc(ETH_ALEN, addr) >> 26;
 911	return mask[n >> 5] & (1 << (n & 31));
 912}
 913
 914static int update_filter(struct tap_filter *filter, void __user *arg)
 915{
 916	struct { u8 u[ETH_ALEN]; } *addr;
 917	struct tun_filter uf;
 918	int err, alen, n, nexact;
 919
 920	if (copy_from_user(&uf, arg, sizeof(uf)))
 921		return -EFAULT;
 922
 923	if (!uf.count) {
 924		/* Disabled */
 925		filter->count = 0;
 926		return 0;
 927	}
 928
 929	alen = ETH_ALEN * uf.count;
 930	addr = memdup_user(arg + sizeof(uf), alen);
 931	if (IS_ERR(addr))
 932		return PTR_ERR(addr);
 933
 934	/* The filter is updated without holding any locks. Which is
 935	 * perfectly safe. We disable it first and in the worst
 936	 * case we'll accept a few undesired packets. */
 937	filter->count = 0;
 938	wmb();
 939
 940	/* Use first set of addresses as an exact filter */
 941	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
 942		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
 943
 944	nexact = n;
 945
 946	/* Remaining multicast addresses are hashed,
 947	 * unicast will leave the filter disabled. */
 948	memset(filter->mask, 0, sizeof(filter->mask));
 949	for (; n < uf.count; n++) {
 950		if (!is_multicast_ether_addr(addr[n].u)) {
 951			err = 0; /* no filter */
 952			goto free_addr;
 953		}
 954		addr_hash_set(filter->mask, addr[n].u);
 955	}
 956
 957	/* For ALLMULTI just set the mask to all ones.
 958	 * This overrides the mask populated above. */
 959	if ((uf.flags & TUN_FLT_ALLMULTI))
 960		memset(filter->mask, ~0, sizeof(filter->mask));
 961
 962	/* Now enable the filter */
 963	wmb();
 964	filter->count = nexact;
 965
 966	/* Return the number of exact filters */
 967	err = nexact;
 968free_addr:
 969	kfree(addr);
 970	return err;
 971}
 972
 973/* Returns: 0 - drop, !=0 - accept */
 974static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
 975{
 976	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
 977	 * at this point. */
 978	struct ethhdr *eh = (struct ethhdr *) skb->data;
 979	int i;
 980
 981	/* Exact match */
 982	for (i = 0; i < filter->count; i++)
 983		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
 984			return 1;
 985
 986	/* Inexact match (multicast only) */
 987	if (is_multicast_ether_addr(eh->h_dest))
 988		return addr_hash_test(filter->mask, eh->h_dest);
 989
 990	return 0;
 991}
 992
 993/*
 994 * Checks whether the packet is accepted or not.
 995 * Returns: 0 - drop, !=0 - accept
 996 */
 997static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
 998{
 999	if (!filter->count)
1000		return 1;
1001
1002	return run_filter(filter, skb);
1003}
1004
1005/* Network device part of the driver */
1006
1007static const struct ethtool_ops tun_ethtool_ops;
1008
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009/* Net device detach from fd. */
1010static void tun_net_uninit(struct net_device *dev)
1011{
1012	tun_detach_all(dev);
1013}
1014
1015/* Net device open. */
1016static int tun_net_open(struct net_device *dev)
1017{
1018	netif_tx_start_all_queues(dev);
1019
1020	return 0;
1021}
1022
1023/* Net device close. */
1024static int tun_net_close(struct net_device *dev)
1025{
1026	netif_tx_stop_all_queues(dev);
1027	return 0;
1028}
1029
1030/* Net device start xmit */
1031static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1032{
1033#ifdef CONFIG_RPS
1034	if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1035		/* Select queue was not called for the skbuff, so we extract the
1036		 * RPS hash and save it into the flow_table here.
1037		 */
1038		struct tun_flow_entry *e;
1039		__u32 rxhash;
1040
1041		rxhash = __skb_get_hash_symmetric(skb);
1042		e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1043		if (e)
1044			tun_flow_save_rps_rxhash(e, rxhash);
1045	}
1046#endif
1047}
1048
1049static unsigned int run_ebpf_filter(struct tun_struct *tun,
1050				    struct sk_buff *skb,
1051				    int len)
1052{
1053	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1054
1055	if (prog)
1056		len = bpf_prog_run_clear_cb(prog->prog, skb);
1057
1058	return len;
1059}
1060
1061/* Net device start xmit */
1062static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1063{
1064	struct tun_struct *tun = netdev_priv(dev);
 
1065	int txq = skb->queue_mapping;
 
1066	struct tun_file *tfile;
1067	int len = skb->len;
1068
1069	rcu_read_lock();
1070	tfile = rcu_dereference(tun->tfiles[txq]);
1071
1072	/* Drop packet if interface is not attached */
1073	if (!tfile)
 
1074		goto drop;
 
1075
1076	if (!rcu_dereference(tun->steering_prog))
1077		tun_automq_xmit(tun, skb);
1078
1079	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
1080
1081	BUG_ON(!tfile);
1082
1083	/* Drop if the filter does not like it.
1084	 * This is a noop if the filter is disabled.
1085	 * Filter can be enabled only for the TAP devices. */
1086	if (!check_filter(&tun->txflt, skb))
 
1087		goto drop;
 
1088
1089	if (tfile->socket.sk->sk_filter &&
1090	    sk_filter(tfile->socket.sk, skb))
 
1091		goto drop;
 
1092
1093	len = run_ebpf_filter(tun, skb, len);
1094	if (len == 0 || pskb_trim(skb, len))
 
1095		goto drop;
 
1096
1097	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
 
1098		goto drop;
 
 
 
 
 
 
1099
1100	skb_tx_timestamp(skb);
1101
1102	/* Orphan the skb - required as we might hang on to it
1103	 * for indefinite time.
1104	 */
1105	skb_orphan(skb);
1106
1107	nf_reset_ct(skb);
1108
1109	if (ptr_ring_produce(&tfile->tx_ring, skb))
 
1110		goto drop;
 
 
 
 
 
1111
1112	/* Notify and wake up reader process */
1113	if (tfile->flags & TUN_FASYNC)
1114		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1115	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1116
1117	rcu_read_unlock();
1118	return NETDEV_TX_OK;
1119
1120drop:
1121	this_cpu_inc(tun->pcpu_stats->tx_dropped);
1122	skb_tx_error(skb);
1123	kfree_skb(skb);
1124	rcu_read_unlock();
1125	return NET_XMIT_DROP;
1126}
1127
1128static void tun_net_mclist(struct net_device *dev)
1129{
1130	/*
1131	 * This callback is supposed to deal with mc filter in
1132	 * _rx_ path and has nothing to do with the _tx_ path.
1133	 * In rx path we always accept everything userspace gives us.
1134	 */
1135}
1136
1137static netdev_features_t tun_net_fix_features(struct net_device *dev,
1138	netdev_features_t features)
1139{
1140	struct tun_struct *tun = netdev_priv(dev);
1141
1142	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1143}
1144
1145static void tun_set_headroom(struct net_device *dev, int new_hr)
1146{
1147	struct tun_struct *tun = netdev_priv(dev);
1148
1149	if (new_hr < NET_SKB_PAD)
1150		new_hr = NET_SKB_PAD;
1151
1152	tun->align = new_hr;
1153}
1154
1155static void
1156tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1157{
1158	u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
1159	struct tun_struct *tun = netdev_priv(dev);
1160	struct tun_pcpu_stats *p;
1161	int i;
1162
1163	for_each_possible_cpu(i) {
1164		u64 rxpackets, rxbytes, txpackets, txbytes;
1165		unsigned int start;
1166
1167		p = per_cpu_ptr(tun->pcpu_stats, i);
1168		do {
1169			start = u64_stats_fetch_begin(&p->syncp);
1170			rxpackets	= p->rx_packets;
1171			rxbytes		= p->rx_bytes;
1172			txpackets	= p->tx_packets;
1173			txbytes		= p->tx_bytes;
1174		} while (u64_stats_fetch_retry(&p->syncp, start));
1175
1176		stats->rx_packets	+= rxpackets;
1177		stats->rx_bytes		+= rxbytes;
1178		stats->tx_packets	+= txpackets;
1179		stats->tx_bytes		+= txbytes;
1180
1181		/* u32 counters */
1182		rx_dropped	+= p->rx_dropped;
1183		rx_frame_errors	+= p->rx_frame_errors;
1184		tx_dropped	+= p->tx_dropped;
1185	}
1186	stats->rx_dropped  = rx_dropped;
1187	stats->rx_frame_errors = rx_frame_errors;
1188	stats->tx_dropped = tx_dropped;
1189}
1190
1191static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1192		       struct netlink_ext_ack *extack)
1193{
1194	struct tun_struct *tun = netdev_priv(dev);
1195	struct tun_file *tfile;
1196	struct bpf_prog *old_prog;
1197	int i;
1198
1199	old_prog = rtnl_dereference(tun->xdp_prog);
1200	rcu_assign_pointer(tun->xdp_prog, prog);
1201	if (old_prog)
1202		bpf_prog_put(old_prog);
1203
1204	for (i = 0; i < tun->numqueues; i++) {
1205		tfile = rtnl_dereference(tun->tfiles[i]);
1206		if (prog)
1207			sock_set_flag(&tfile->sk, SOCK_XDP);
1208		else
1209			sock_reset_flag(&tfile->sk, SOCK_XDP);
1210	}
1211	list_for_each_entry(tfile, &tun->disabled, next) {
1212		if (prog)
1213			sock_set_flag(&tfile->sk, SOCK_XDP);
1214		else
1215			sock_reset_flag(&tfile->sk, SOCK_XDP);
1216	}
1217
1218	return 0;
1219}
1220
1221static u32 tun_xdp_query(struct net_device *dev)
1222{
1223	struct tun_struct *tun = netdev_priv(dev);
1224	const struct bpf_prog *xdp_prog;
1225
1226	xdp_prog = rtnl_dereference(tun->xdp_prog);
1227	if (xdp_prog)
1228		return xdp_prog->aux->id;
1229
1230	return 0;
1231}
1232
1233static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1234{
1235	switch (xdp->command) {
1236	case XDP_SETUP_PROG:
1237		return tun_xdp_set(dev, xdp->prog, xdp->extack);
1238	case XDP_QUERY_PROG:
1239		xdp->prog_id = tun_xdp_query(dev);
1240		return 0;
1241	default:
1242		return -EINVAL;
1243	}
1244}
1245
1246static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1247{
1248	if (new_carrier) {
1249		struct tun_struct *tun = netdev_priv(dev);
1250
1251		if (!tun->numqueues)
1252			return -EPERM;
1253
1254		netif_carrier_on(dev);
1255	} else {
1256		netif_carrier_off(dev);
1257	}
1258	return 0;
1259}
1260
1261static const struct net_device_ops tun_netdev_ops = {
 
1262	.ndo_uninit		= tun_net_uninit,
1263	.ndo_open		= tun_net_open,
1264	.ndo_stop		= tun_net_close,
1265	.ndo_start_xmit		= tun_net_xmit,
1266	.ndo_fix_features	= tun_net_fix_features,
1267	.ndo_select_queue	= tun_select_queue,
1268	.ndo_set_rx_headroom	= tun_set_headroom,
1269	.ndo_get_stats64	= tun_net_get_stats64,
1270	.ndo_change_carrier	= tun_net_change_carrier,
1271};
1272
1273static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1274{
1275	/* Notify and wake up reader process */
1276	if (tfile->flags & TUN_FASYNC)
1277		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1278	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1279}
1280
1281static int tun_xdp_xmit(struct net_device *dev, int n,
1282			struct xdp_frame **frames, u32 flags)
1283{
1284	struct tun_struct *tun = netdev_priv(dev);
1285	struct tun_file *tfile;
1286	u32 numqueues;
1287	int drops = 0;
1288	int cnt = n;
1289	int i;
1290
1291	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1292		return -EINVAL;
1293
1294	rcu_read_lock();
1295
1296resample:
1297	numqueues = READ_ONCE(tun->numqueues);
1298	if (!numqueues) {
1299		rcu_read_unlock();
1300		return -ENXIO; /* Caller will free/return all frames */
1301	}
1302
1303	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1304					    numqueues]);
1305	if (unlikely(!tfile))
1306		goto resample;
1307
1308	spin_lock(&tfile->tx_ring.producer_lock);
1309	for (i = 0; i < n; i++) {
1310		struct xdp_frame *xdp = frames[i];
1311		/* Encode the XDP flag into lowest bit for consumer to differ
1312		 * XDP buffer from sk_buff.
1313		 */
1314		void *frame = tun_xdp_to_ptr(xdp);
1315
1316		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1317			this_cpu_inc(tun->pcpu_stats->tx_dropped);
1318			xdp_return_frame_rx_napi(xdp);
1319			drops++;
1320		}
 
1321	}
1322	spin_unlock(&tfile->tx_ring.producer_lock);
1323
1324	if (flags & XDP_XMIT_FLUSH)
1325		__tun_xdp_flush_tfile(tfile);
1326
1327	rcu_read_unlock();
1328	return cnt - drops;
1329}
1330
1331static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1332{
1333	struct xdp_frame *frame = convert_to_xdp_frame(xdp);
 
1334
1335	if (unlikely(!frame))
1336		return -EOVERFLOW;
1337
1338	return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
 
 
 
1339}
1340
1341static const struct net_device_ops tap_netdev_ops = {
 
1342	.ndo_uninit		= tun_net_uninit,
1343	.ndo_open		= tun_net_open,
1344	.ndo_stop		= tun_net_close,
1345	.ndo_start_xmit		= tun_net_xmit,
1346	.ndo_fix_features	= tun_net_fix_features,
1347	.ndo_set_rx_mode	= tun_net_mclist,
1348	.ndo_set_mac_address	= eth_mac_addr,
1349	.ndo_validate_addr	= eth_validate_addr,
1350	.ndo_select_queue	= tun_select_queue,
1351	.ndo_features_check	= passthru_features_check,
1352	.ndo_set_rx_headroom	= tun_set_headroom,
1353	.ndo_get_stats64	= tun_net_get_stats64,
1354	.ndo_bpf		= tun_xdp,
1355	.ndo_xdp_xmit		= tun_xdp_xmit,
1356	.ndo_change_carrier	= tun_net_change_carrier,
1357};
1358
1359static void tun_flow_init(struct tun_struct *tun)
1360{
1361	int i;
1362
1363	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1364		INIT_HLIST_HEAD(&tun->flows[i]);
1365
1366	tun->ageing_time = TUN_FLOW_EXPIRE;
1367	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1368	mod_timer(&tun->flow_gc_timer,
1369		  round_jiffies_up(jiffies + tun->ageing_time));
1370}
1371
1372static void tun_flow_uninit(struct tun_struct *tun)
1373{
1374	del_timer_sync(&tun->flow_gc_timer);
1375	tun_flow_flush(tun);
1376}
1377
1378#define MIN_MTU 68
1379#define MAX_MTU 65535
1380
1381/* Initialize net device. */
1382static void tun_net_init(struct net_device *dev)
1383{
1384	struct tun_struct *tun = netdev_priv(dev);
1385
1386	switch (tun->flags & TUN_TYPE_MASK) {
1387	case IFF_TUN:
1388		dev->netdev_ops = &tun_netdev_ops;
 
1389
1390		/* Point-to-Point TUN Device */
1391		dev->hard_header_len = 0;
1392		dev->addr_len = 0;
1393		dev->mtu = 1500;
1394
1395		/* Zero header length */
1396		dev->type = ARPHRD_NONE;
1397		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1398		break;
1399
1400	case IFF_TAP:
1401		dev->netdev_ops = &tap_netdev_ops;
1402		/* Ethernet TAP Device */
1403		ether_setup(dev);
1404		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1405		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1406
1407		eth_hw_addr_random(dev);
1408
 
 
 
 
 
1409		break;
1410	}
1411
1412	dev->min_mtu = MIN_MTU;
1413	dev->max_mtu = MAX_MTU - dev->hard_header_len;
1414}
1415
1416static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1417{
1418	struct sock *sk = tfile->socket.sk;
1419
1420	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1421}
1422
1423/* Character device part */
1424
1425/* Poll */
1426static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1427{
1428	struct tun_file *tfile = file->private_data;
1429	struct tun_struct *tun = tun_get(tfile);
1430	struct sock *sk;
1431	__poll_t mask = 0;
1432
1433	if (!tun)
1434		return EPOLLERR;
1435
1436	sk = tfile->socket.sk;
1437
1438	tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
1439
1440	poll_wait(file, sk_sleep(sk), wait);
1441
1442	if (!ptr_ring_empty(&tfile->tx_ring))
1443		mask |= EPOLLIN | EPOLLRDNORM;
1444
1445	/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1446	 * guarantee EPOLLOUT to be raised by either here or
1447	 * tun_sock_write_space(). Then process could get notification
1448	 * after it writes to a down device and meets -EIO.
1449	 */
1450	if (tun_sock_writeable(tun, tfile) ||
1451	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1452	     tun_sock_writeable(tun, tfile)))
1453		mask |= EPOLLOUT | EPOLLWRNORM;
1454
1455	if (tun->dev->reg_state != NETREG_REGISTERED)
1456		mask = EPOLLERR;
1457
1458	tun_put(tun);
1459	return mask;
1460}
1461
1462static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1463					    size_t len,
1464					    const struct iov_iter *it)
1465{
1466	struct sk_buff *skb;
1467	size_t linear;
1468	int err;
1469	int i;
1470
1471	if (it->nr_segs > MAX_SKB_FRAGS + 1)
1472		return ERR_PTR(-ENOMEM);
 
1473
1474	local_bh_disable();
1475	skb = napi_get_frags(&tfile->napi);
1476	local_bh_enable();
1477	if (!skb)
1478		return ERR_PTR(-ENOMEM);
1479
1480	linear = iov_iter_single_seg_count(it);
1481	err = __skb_grow(skb, linear);
1482	if (err)
1483		goto free;
1484
1485	skb->len = len;
1486	skb->data_len = len - linear;
1487	skb->truesize += skb->data_len;
1488
1489	for (i = 1; i < it->nr_segs; i++) {
1490		size_t fragsz = it->iov[i].iov_len;
 
1491		struct page *page;
1492		void *frag;
1493
1494		if (fragsz == 0 || fragsz > PAGE_SIZE) {
1495			err = -EINVAL;
1496			goto free;
1497		}
1498		frag = netdev_alloc_frag(fragsz);
1499		if (!frag) {
1500			err = -ENOMEM;
1501			goto free;
1502		}
1503		page = virt_to_head_page(frag);
1504		skb_fill_page_desc(skb, i - 1, page,
1505				   frag - page_address(page), fragsz);
1506	}
1507
1508	return skb;
1509free:
1510	/* frees skb and all frags allocated with napi_alloc_frag() */
1511	napi_free_frags(&tfile->napi);
1512	return ERR_PTR(err);
1513}
1514
1515/* prepad is the amount to reserve at front.  len is length after that.
1516 * linear is a hint as to how much to copy (usually headers). */
1517static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1518				     size_t prepad, size_t len,
1519				     size_t linear, int noblock)
1520{
1521	struct sock *sk = tfile->socket.sk;
1522	struct sk_buff *skb;
1523	int err;
1524
1525	/* Under a page?  Don't bother with paged skb. */
1526	if (prepad + len < PAGE_SIZE || !linear)
1527		linear = len;
1528
 
 
1529	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1530				   &err, 0);
1531	if (!skb)
1532		return ERR_PTR(err);
1533
1534	skb_reserve(skb, prepad);
1535	skb_put(skb, linear);
1536	skb->data_len = len - linear;
1537	skb->len += len - linear;
1538
1539	return skb;
1540}
1541
1542static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1543			   struct sk_buff *skb, int more)
1544{
1545	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1546	struct sk_buff_head process_queue;
1547	u32 rx_batched = tun->rx_batched;
1548	bool rcv = false;
1549
1550	if (!rx_batched || (!more && skb_queue_empty(queue))) {
1551		local_bh_disable();
1552		skb_record_rx_queue(skb, tfile->queue_index);
1553		netif_receive_skb(skb);
1554		local_bh_enable();
1555		return;
1556	}
1557
1558	spin_lock(&queue->lock);
1559	if (!more || skb_queue_len(queue) == rx_batched) {
1560		__skb_queue_head_init(&process_queue);
1561		skb_queue_splice_tail_init(queue, &process_queue);
1562		rcv = true;
1563	} else {
1564		__skb_queue_tail(queue, skb);
1565	}
1566	spin_unlock(&queue->lock);
1567
1568	if (rcv) {
1569		struct sk_buff *nskb;
1570
1571		local_bh_disable();
1572		while ((nskb = __skb_dequeue(&process_queue))) {
1573			skb_record_rx_queue(nskb, tfile->queue_index);
1574			netif_receive_skb(nskb);
1575		}
1576		skb_record_rx_queue(skb, tfile->queue_index);
1577		netif_receive_skb(skb);
1578		local_bh_enable();
1579	}
1580}
1581
1582static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1583			      int len, int noblock, bool zerocopy)
1584{
1585	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1586		return false;
1587
1588	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1589		return false;
1590
1591	if (!noblock)
1592		return false;
1593
1594	if (zerocopy)
1595		return false;
1596
1597	if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
1598	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1599		return false;
1600
1601	return true;
1602}
1603
1604static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1605				       struct page_frag *alloc_frag, char *buf,
1606				       int buflen, int len, int pad)
1607{
1608	struct sk_buff *skb = build_skb(buf, buflen);
1609
1610	if (!skb)
1611		return ERR_PTR(-ENOMEM);
1612
1613	skb_reserve(skb, pad);
1614	skb_put(skb, len);
1615	skb_set_owner_w(skb, tfile->socket.sk);
1616
1617	get_page(alloc_frag->page);
1618	alloc_frag->offset += buflen;
1619
1620	return skb;
1621}
1622
1623static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1624		       struct xdp_buff *xdp, u32 act)
1625{
1626	int err;
1627
1628	switch (act) {
1629	case XDP_REDIRECT:
1630		err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1631		if (err)
 
1632			return err;
 
 
1633		break;
1634	case XDP_TX:
1635		err = tun_xdp_tx(tun->dev, xdp);
1636		if (err < 0)
 
1637			return err;
 
 
1638		break;
1639	case XDP_PASS:
1640		break;
1641	default:
1642		bpf_warn_invalid_xdp_action(act);
1643		/* fall through */
1644	case XDP_ABORTED:
1645		trace_xdp_exception(tun->dev, xdp_prog, act);
1646		/* fall through */
1647	case XDP_DROP:
1648		this_cpu_inc(tun->pcpu_stats->rx_dropped);
1649		break;
1650	}
1651
1652	return act;
1653}
1654
1655static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1656				     struct tun_file *tfile,
1657				     struct iov_iter *from,
1658				     struct virtio_net_hdr *hdr,
1659				     int len, int *skb_xdp)
1660{
1661	struct page_frag *alloc_frag = &current->task_frag;
1662	struct bpf_prog *xdp_prog;
1663	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1664	char *buf;
1665	size_t copied;
1666	int pad = TUN_RX_PAD;
1667	int err = 0;
1668
1669	rcu_read_lock();
1670	xdp_prog = rcu_dereference(tun->xdp_prog);
1671	if (xdp_prog)
1672		pad += XDP_PACKET_HEADROOM;
1673	buflen += SKB_DATA_ALIGN(len + pad);
1674	rcu_read_unlock();
1675
1676	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1677	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1678		return ERR_PTR(-ENOMEM);
1679
1680	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1681	copied = copy_page_from_iter(alloc_frag->page,
1682				     alloc_frag->offset + pad,
1683				     len, from);
1684	if (copied != len)
1685		return ERR_PTR(-EFAULT);
1686
1687	/* There's a small window that XDP may be set after the check
1688	 * of xdp_prog above, this should be rare and for simplicity
1689	 * we do XDP on skb in case the headroom is not enough.
1690	 */
1691	if (hdr->gso_type || !xdp_prog) {
1692		*skb_xdp = 1;
1693		return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1694				       pad);
1695	}
1696
1697	*skb_xdp = 0;
1698
1699	local_bh_disable();
1700	rcu_read_lock();
1701	xdp_prog = rcu_dereference(tun->xdp_prog);
1702	if (xdp_prog) {
1703		struct xdp_buff xdp;
1704		u32 act;
1705
1706		xdp.data_hard_start = buf;
1707		xdp.data = buf + pad;
1708		xdp_set_data_meta_invalid(&xdp);
1709		xdp.data_end = xdp.data + len;
1710		xdp.rxq = &tfile->xdp_rxq;
1711
1712		act = bpf_prog_run_xdp(xdp_prog, &xdp);
1713		if (act == XDP_REDIRECT || act == XDP_TX) {
1714			get_page(alloc_frag->page);
1715			alloc_frag->offset += buflen;
1716		}
1717		err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1718		if (err < 0)
1719			goto err_xdp;
 
 
 
 
1720		if (err == XDP_REDIRECT)
1721			xdp_do_flush_map();
1722		if (err != XDP_PASS)
1723			goto out;
1724
1725		pad = xdp.data - xdp.data_hard_start;
1726		len = xdp.data_end - xdp.data;
1727	}
1728	rcu_read_unlock();
1729	local_bh_enable();
1730
1731	return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1732
1733err_xdp:
1734	put_page(alloc_frag->page);
1735out:
1736	rcu_read_unlock();
1737	local_bh_enable();
1738	return NULL;
1739}
1740
1741/* Get packet from user space buffer */
1742static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1743			    void *msg_control, struct iov_iter *from,
1744			    int noblock, bool more)
1745{
1746	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1747	struct sk_buff *skb;
1748	size_t total_len = iov_iter_count(from);
1749	size_t len = total_len, align = tun->align, linear;
1750	struct virtio_net_hdr gso = { 0 };
1751	struct tun_pcpu_stats *stats;
1752	int good_linear;
1753	int copylen;
1754	bool zerocopy = false;
1755	int err;
1756	u32 rxhash = 0;
1757	int skb_xdp = 1;
1758	bool frags = tun_napi_frags_enabled(tfile);
 
1759
1760	if (!(tun->flags & IFF_NO_PI)) {
1761		if (len < sizeof(pi))
1762			return -EINVAL;
1763		len -= sizeof(pi);
1764
1765		if (!copy_from_iter_full(&pi, sizeof(pi), from))
1766			return -EFAULT;
1767	}
1768
1769	if (tun->flags & IFF_VNET_HDR) {
1770		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1771
1772		if (len < vnet_hdr_sz)
1773			return -EINVAL;
1774		len -= vnet_hdr_sz;
1775
1776		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1777			return -EFAULT;
1778
1779		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1780		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1781			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1782
1783		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1784			return -EINVAL;
1785		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1786	}
1787
1788	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1789		align += NET_IP_ALIGN;
1790		if (unlikely(len < ETH_HLEN ||
1791			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1792			return -EINVAL;
1793	}
1794
1795	good_linear = SKB_MAX_HEAD(align);
1796
1797	if (msg_control) {
1798		struct iov_iter i = *from;
1799
1800		/* There are 256 bytes to be copied in skb, so there is
1801		 * enough room for skb expand head in case it is used.
1802		 * The rest of the buffer is mapped from userspace.
1803		 */
1804		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1805		if (copylen > good_linear)
1806			copylen = good_linear;
1807		linear = copylen;
1808		iov_iter_advance(&i, copylen);
1809		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1810			zerocopy = true;
1811	}
1812
1813	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1814		/* For the packet that is not easy to be processed
1815		 * (e.g gso or jumbo packet), we will do it at after
1816		 * skb was created with generic XDP routine.
1817		 */
1818		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1819		if (IS_ERR(skb)) {
1820			this_cpu_inc(tun->pcpu_stats->rx_dropped);
1821			return PTR_ERR(skb);
1822		}
1823		if (!skb)
1824			return total_len;
1825	} else {
1826		if (!zerocopy) {
1827			copylen = len;
1828			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1829				linear = good_linear;
1830			else
1831				linear = tun16_to_cpu(tun, gso.hdr_len);
1832		}
1833
1834		if (frags) {
1835			mutex_lock(&tfile->napi_mutex);
1836			skb = tun_napi_alloc_frags(tfile, copylen, from);
1837			/* tun_napi_alloc_frags() enforces a layout for the skb.
1838			 * If zerocopy is enabled, then this layout will be
1839			 * overwritten by zerocopy_sg_from_iter().
1840			 */
1841			zerocopy = false;
1842		} else {
 
 
 
1843			skb = tun_alloc_skb(tfile, align, copylen, linear,
1844					    noblock);
1845		}
1846
1847		if (IS_ERR(skb)) {
1848			if (PTR_ERR(skb) != -EAGAIN)
1849				this_cpu_inc(tun->pcpu_stats->rx_dropped);
1850			if (frags)
1851				mutex_unlock(&tfile->napi_mutex);
1852			return PTR_ERR(skb);
1853		}
1854
1855		if (zerocopy)
1856			err = zerocopy_sg_from_iter(skb, from);
1857		else
1858			err = skb_copy_datagram_from_iter(skb, 0, from, len);
1859
1860		if (err) {
1861			err = -EFAULT;
1862drop:
1863			this_cpu_inc(tun->pcpu_stats->rx_dropped);
1864			kfree_skb(skb);
1865			if (frags) {
1866				tfile->napi.skb = NULL;
1867				mutex_unlock(&tfile->napi_mutex);
1868			}
1869
1870			return err;
1871		}
1872	}
1873
1874	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1875		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1876		kfree_skb(skb);
1877		if (frags) {
1878			tfile->napi.skb = NULL;
1879			mutex_unlock(&tfile->napi_mutex);
1880		}
1881
1882		return -EINVAL;
1883	}
1884
1885	switch (tun->flags & TUN_TYPE_MASK) {
1886	case IFF_TUN:
1887		if (tun->flags & IFF_NO_PI) {
1888			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1889
1890			switch (ip_version) {
1891			case 4:
1892				pi.proto = htons(ETH_P_IP);
1893				break;
1894			case 6:
1895				pi.proto = htons(ETH_P_IPV6);
1896				break;
1897			default:
1898				this_cpu_inc(tun->pcpu_stats->rx_dropped);
1899				kfree_skb(skb);
1900				return -EINVAL;
1901			}
1902		}
1903
1904		skb_reset_mac_header(skb);
1905		skb->protocol = pi.proto;
1906		skb->dev = tun->dev;
1907		break;
1908	case IFF_TAP:
1909		if (!frags)
1910			skb->protocol = eth_type_trans(skb, tun->dev);
 
 
 
 
1911		break;
1912	}
1913
1914	/* copy skb_ubuf_info for callback when skb has no error */
1915	if (zerocopy) {
1916		skb_shinfo(skb)->destructor_arg = msg_control;
1917		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1918		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1919	} else if (msg_control) {
1920		struct ubuf_info *uarg = msg_control;
1921		uarg->callback(uarg, false);
1922	}
1923
1924	skb_reset_network_header(skb);
1925	skb_probe_transport_header(skb);
 
1926
1927	if (skb_xdp) {
1928		struct bpf_prog *xdp_prog;
1929		int ret;
1930
1931		local_bh_disable();
1932		rcu_read_lock();
1933		xdp_prog = rcu_dereference(tun->xdp_prog);
1934		if (xdp_prog) {
1935			ret = do_xdp_generic(xdp_prog, skb);
1936			if (ret != XDP_PASS) {
1937				rcu_read_unlock();
1938				local_bh_enable();
1939				return total_len;
1940			}
1941		}
1942		rcu_read_unlock();
1943		local_bh_enable();
1944	}
1945
1946	/* Compute the costly rx hash only if needed for flow updates.
1947	 * We may get a very small possibility of OOO during switching, not
1948	 * worth to optimize.
1949	 */
1950	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1951	    !tfile->detached)
1952		rxhash = __skb_get_hash_symmetric(skb);
1953
1954	rcu_read_lock();
1955	if (unlikely(!(tun->dev->flags & IFF_UP))) {
1956		err = -EIO;
1957		rcu_read_unlock();
 
1958		goto drop;
1959	}
1960
1961	if (frags) {
 
 
1962		/* Exercise flow dissector code path. */
1963		u32 headlen = eth_get_headlen(tun->dev, skb->data,
1964					      skb_headlen(skb));
 
1965
1966		if (unlikely(headlen > skb_headlen(skb))) {
1967			this_cpu_inc(tun->pcpu_stats->rx_dropped);
 
 
 
1968			napi_free_frags(&tfile->napi);
1969			rcu_read_unlock();
1970			mutex_unlock(&tfile->napi_mutex);
1971			WARN_ON(1);
1972			return -ENOMEM;
1973		}
1974
1975		local_bh_disable();
1976		napi_gro_frags(&tfile->napi);
1977		local_bh_enable();
 
 
 
 
 
 
1978		mutex_unlock(&tfile->napi_mutex);
1979	} else if (tfile->napi_enabled) {
1980		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1981		int queue_len;
1982
1983		spin_lock_bh(&queue->lock);
 
 
 
 
 
 
 
 
1984		__skb_queue_tail(queue, skb);
1985		queue_len = skb_queue_len(queue);
1986		spin_unlock(&queue->lock);
1987
1988		if (!more || queue_len > NAPI_POLL_WEIGHT)
1989			napi_schedule(&tfile->napi);
1990
1991		local_bh_enable();
1992	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
1993		tun_rx_batched(tun, tfile, skb, more);
1994	} else {
1995		netif_rx_ni(skb);
1996	}
1997	rcu_read_unlock();
1998
1999	stats = get_cpu_ptr(tun->pcpu_stats);
2000	u64_stats_update_begin(&stats->syncp);
2001	stats->rx_packets++;
2002	stats->rx_bytes += len;
2003	u64_stats_update_end(&stats->syncp);
2004	put_cpu_ptr(stats);
2005
2006	if (rxhash)
2007		tun_flow_update(tun, rxhash, tfile);
2008
2009	return total_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2010}
2011
2012static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
2013{
2014	struct file *file = iocb->ki_filp;
2015	struct tun_file *tfile = file->private_data;
2016	struct tun_struct *tun = tun_get(tfile);
2017	ssize_t result;
 
2018
2019	if (!tun)
2020		return -EBADFD;
2021
2022	result = tun_get_user(tun, tfile, NULL, from,
2023			      file->f_flags & O_NONBLOCK, false);
 
 
2024
2025	tun_put(tun);
2026	return result;
2027}
2028
2029static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2030				struct tun_file *tfile,
2031				struct xdp_frame *xdp_frame,
2032				struct iov_iter *iter)
2033{
2034	int vnet_hdr_sz = 0;
2035	size_t size = xdp_frame->len;
2036	struct tun_pcpu_stats *stats;
2037	size_t ret;
2038
2039	if (tun->flags & IFF_VNET_HDR) {
2040		struct virtio_net_hdr gso = { 0 };
2041
2042		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2043		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2044			return -EINVAL;
2045		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2046			     sizeof(gso)))
2047			return -EFAULT;
2048		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2049	}
2050
2051	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2052
2053	stats = get_cpu_ptr(tun->pcpu_stats);
2054	u64_stats_update_begin(&stats->syncp);
2055	stats->tx_packets++;
2056	stats->tx_bytes += ret;
2057	u64_stats_update_end(&stats->syncp);
2058	put_cpu_ptr(tun->pcpu_stats);
2059
2060	return ret;
2061}
2062
2063/* Put packet to the user space buffer */
2064static ssize_t tun_put_user(struct tun_struct *tun,
2065			    struct tun_file *tfile,
2066			    struct sk_buff *skb,
2067			    struct iov_iter *iter)
2068{
2069	struct tun_pi pi = { 0, skb->protocol };
2070	struct tun_pcpu_stats *stats;
2071	ssize_t total;
2072	int vlan_offset = 0;
2073	int vlan_hlen = 0;
2074	int vnet_hdr_sz = 0;
2075
2076	if (skb_vlan_tag_present(skb))
2077		vlan_hlen = VLAN_HLEN;
2078
2079	if (tun->flags & IFF_VNET_HDR)
2080		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2081
2082	total = skb->len + vlan_hlen + vnet_hdr_sz;
2083
2084	if (!(tun->flags & IFF_NO_PI)) {
2085		if (iov_iter_count(iter) < sizeof(pi))
2086			return -EINVAL;
2087
2088		total += sizeof(pi);
2089		if (iov_iter_count(iter) < total) {
2090			/* Packet will be striped */
2091			pi.flags |= TUN_PKT_STRIP;
2092		}
2093
2094		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
2095			return -EFAULT;
2096	}
2097
2098	if (vnet_hdr_sz) {
2099		struct virtio_net_hdr gso;
2100
2101		if (iov_iter_count(iter) < vnet_hdr_sz)
2102			return -EINVAL;
2103
2104		if (virtio_net_hdr_from_skb(skb, &gso,
2105					    tun_is_little_endian(tun), true,
2106					    vlan_hlen)) {
2107			struct skb_shared_info *sinfo = skb_shinfo(skb);
2108			pr_err("unexpected GSO type: "
2109			       "0x%x, gso_size %d, hdr_len %d\n",
2110			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2111			       tun16_to_cpu(tun, gso.hdr_len));
2112			print_hex_dump(KERN_ERR, "tun: ",
2113				       DUMP_PREFIX_NONE,
2114				       16, 1, skb->head,
2115				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2116			WARN_ON_ONCE(1);
2117			return -EINVAL;
2118		}
2119
2120		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2121			return -EFAULT;
2122
2123		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2124	}
2125
2126	if (vlan_hlen) {
2127		int ret;
2128		struct veth veth;
2129
2130		veth.h_vlan_proto = skb->vlan_proto;
2131		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2132
2133		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2134
2135		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2136		if (ret || !iov_iter_count(iter))
2137			goto done;
2138
2139		ret = copy_to_iter(&veth, sizeof(veth), iter);
2140		if (ret != sizeof(veth) || !iov_iter_count(iter))
2141			goto done;
2142	}
2143
2144	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2145
2146done:
2147	/* caller is in process context, */
2148	stats = get_cpu_ptr(tun->pcpu_stats);
2149	u64_stats_update_begin(&stats->syncp);
2150	stats->tx_packets++;
2151	stats->tx_bytes += skb->len + vlan_hlen;
2152	u64_stats_update_end(&stats->syncp);
2153	put_cpu_ptr(tun->pcpu_stats);
2154
2155	return total;
2156}
2157
2158static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2159{
2160	DECLARE_WAITQUEUE(wait, current);
2161	void *ptr = NULL;
2162	int error = 0;
2163
2164	ptr = ptr_ring_consume(&tfile->tx_ring);
2165	if (ptr)
2166		goto out;
2167	if (noblock) {
2168		error = -EAGAIN;
2169		goto out;
2170	}
2171
2172	add_wait_queue(&tfile->socket.wq.wait, &wait);
2173
2174	while (1) {
2175		set_current_state(TASK_INTERRUPTIBLE);
2176		ptr = ptr_ring_consume(&tfile->tx_ring);
2177		if (ptr)
2178			break;
2179		if (signal_pending(current)) {
2180			error = -ERESTARTSYS;
2181			break;
2182		}
2183		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2184			error = -EFAULT;
2185			break;
2186		}
2187
2188		schedule();
2189	}
2190
2191	__set_current_state(TASK_RUNNING);
2192	remove_wait_queue(&tfile->socket.wq.wait, &wait);
2193
2194out:
2195	*err = error;
2196	return ptr;
2197}
2198
2199static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2200			   struct iov_iter *to,
2201			   int noblock, void *ptr)
2202{
2203	ssize_t ret;
2204	int err;
2205
2206	tun_debug(KERN_INFO, tun, "tun_do_read\n");
2207
2208	if (!iov_iter_count(to)) {
2209		tun_ptr_free(ptr);
2210		return 0;
2211	}
2212
2213	if (!ptr) {
2214		/* Read frames from ring */
2215		ptr = tun_ring_recv(tfile, noblock, &err);
2216		if (!ptr)
2217			return err;
2218	}
2219
2220	if (tun_is_xdp_frame(ptr)) {
2221		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2222
2223		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2224		xdp_return_frame(xdpf);
2225	} else {
2226		struct sk_buff *skb = ptr;
2227
2228		ret = tun_put_user(tun, tfile, skb, to);
2229		if (unlikely(ret < 0))
2230			kfree_skb(skb);
2231		else
2232			consume_skb(skb);
2233	}
2234
2235	return ret;
2236}
2237
2238static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2239{
2240	struct file *file = iocb->ki_filp;
2241	struct tun_file *tfile = file->private_data;
2242	struct tun_struct *tun = tun_get(tfile);
2243	ssize_t len = iov_iter_count(to), ret;
 
2244
2245	if (!tun)
2246		return -EBADFD;
2247	ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
 
 
 
 
2248	ret = min_t(ssize_t, ret, len);
2249	if (ret > 0)
2250		iocb->ki_pos = ret;
2251	tun_put(tun);
2252	return ret;
2253}
2254
2255static void tun_prog_free(struct rcu_head *rcu)
2256{
2257	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2258
2259	bpf_prog_destroy(prog->prog);
2260	kfree(prog);
2261}
2262
2263static int __tun_set_ebpf(struct tun_struct *tun,
2264			  struct tun_prog __rcu **prog_p,
2265			  struct bpf_prog *prog)
2266{
2267	struct tun_prog *old, *new = NULL;
2268
2269	if (prog) {
2270		new = kmalloc(sizeof(*new), GFP_KERNEL);
2271		if (!new)
2272			return -ENOMEM;
2273		new->prog = prog;
2274	}
2275
2276	spin_lock_bh(&tun->lock);
2277	old = rcu_dereference_protected(*prog_p,
2278					lockdep_is_held(&tun->lock));
2279	rcu_assign_pointer(*prog_p, new);
2280	spin_unlock_bh(&tun->lock);
2281
2282	if (old)
2283		call_rcu(&old->rcu, tun_prog_free);
2284
2285	return 0;
2286}
2287
2288static void tun_free_netdev(struct net_device *dev)
2289{
2290	struct tun_struct *tun = netdev_priv(dev);
2291
2292	BUG_ON(!(list_empty(&tun->disabled)));
2293	free_percpu(tun->pcpu_stats);
 
2294	tun_flow_uninit(tun);
2295	security_tun_dev_free_security(tun->security);
2296	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2297	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
2298}
2299
2300static void tun_setup(struct net_device *dev)
2301{
2302	struct tun_struct *tun = netdev_priv(dev);
2303
2304	tun->owner = INVALID_UID;
2305	tun->group = INVALID_GID;
2306	tun_default_link_ksettings(dev, &tun->link_ksettings);
2307
2308	dev->ethtool_ops = &tun_ethtool_ops;
2309	dev->needs_free_netdev = true;
2310	dev->priv_destructor = tun_free_netdev;
2311	/* We prefer our own queue length */
2312	dev->tx_queue_len = TUN_READQ_SIZE;
2313}
2314
2315/* Trivial set of netlink ops to allow deleting tun or tap
2316 * device with netlink.
2317 */
2318static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2319			struct netlink_ext_ack *extack)
2320{
2321	NL_SET_ERR_MSG(extack,
2322		       "tun/tap creation via rtnetlink is not supported.");
2323	return -EOPNOTSUPP;
2324}
2325
2326static size_t tun_get_size(const struct net_device *dev)
2327{
2328	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2329	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2330
2331	return nla_total_size(sizeof(uid_t)) + /* OWNER */
2332	       nla_total_size(sizeof(gid_t)) + /* GROUP */
2333	       nla_total_size(sizeof(u8)) + /* TYPE */
2334	       nla_total_size(sizeof(u8)) + /* PI */
2335	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
2336	       nla_total_size(sizeof(u8)) + /* PERSIST */
2337	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
2338	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
2339	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
2340	       0;
2341}
2342
2343static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2344{
2345	struct tun_struct *tun = netdev_priv(dev);
2346
2347	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2348		goto nla_put_failure;
2349	if (uid_valid(tun->owner) &&
2350	    nla_put_u32(skb, IFLA_TUN_OWNER,
2351			from_kuid_munged(current_user_ns(), tun->owner)))
2352		goto nla_put_failure;
2353	if (gid_valid(tun->group) &&
2354	    nla_put_u32(skb, IFLA_TUN_GROUP,
2355			from_kgid_munged(current_user_ns(), tun->group)))
2356		goto nla_put_failure;
2357	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2358		goto nla_put_failure;
2359	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2360		goto nla_put_failure;
2361	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2362		goto nla_put_failure;
2363	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2364		       !!(tun->flags & IFF_MULTI_QUEUE)))
2365		goto nla_put_failure;
2366	if (tun->flags & IFF_MULTI_QUEUE) {
2367		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2368			goto nla_put_failure;
2369		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2370				tun->numdisabled))
2371			goto nla_put_failure;
2372	}
2373
2374	return 0;
2375
2376nla_put_failure:
2377	return -EMSGSIZE;
2378}
2379
2380static struct rtnl_link_ops tun_link_ops __read_mostly = {
2381	.kind		= DRV_NAME,
2382	.priv_size	= sizeof(struct tun_struct),
2383	.setup		= tun_setup,
2384	.validate	= tun_validate,
2385	.get_size       = tun_get_size,
2386	.fill_info      = tun_fill_info,
2387};
2388
2389static void tun_sock_write_space(struct sock *sk)
2390{
2391	struct tun_file *tfile;
2392	wait_queue_head_t *wqueue;
2393
2394	if (!sock_writeable(sk))
2395		return;
2396
2397	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2398		return;
2399
2400	wqueue = sk_sleep(sk);
2401	if (wqueue && waitqueue_active(wqueue))
2402		wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2403						EPOLLWRNORM | EPOLLWRBAND);
2404
2405	tfile = container_of(sk, struct tun_file, sk);
2406	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2407}
2408
2409static void tun_put_page(struct tun_page *tpage)
2410{
2411	if (tpage->page)
2412		__page_frag_cache_drain(tpage->page, tpage->count);
2413}
2414
2415static int tun_xdp_one(struct tun_struct *tun,
2416		       struct tun_file *tfile,
2417		       struct xdp_buff *xdp, int *flush,
2418		       struct tun_page *tpage)
2419{
2420	unsigned int datasize = xdp->data_end - xdp->data;
2421	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2422	struct virtio_net_hdr *gso = &hdr->gso;
2423	struct tun_pcpu_stats *stats;
2424	struct bpf_prog *xdp_prog;
2425	struct sk_buff *skb = NULL;
 
2426	u32 rxhash = 0, act;
2427	int buflen = hdr->buflen;
2428	int err = 0;
2429	bool skb_xdp = false;
2430	struct page *page;
2431
2432	xdp_prog = rcu_dereference(tun->xdp_prog);
2433	if (xdp_prog) {
2434		if (gso->gso_type) {
2435			skb_xdp = true;
2436			goto build;
2437		}
 
 
2438		xdp_set_data_meta_invalid(xdp);
2439		xdp->rxq = &tfile->xdp_rxq;
2440
2441		act = bpf_prog_run_xdp(xdp_prog, xdp);
2442		err = tun_xdp_act(tun, xdp_prog, xdp, act);
2443		if (err < 0) {
2444			put_page(virt_to_head_page(xdp->data));
2445			return err;
2446		}
2447
2448		switch (err) {
2449		case XDP_REDIRECT:
2450			*flush = true;
2451			/* fall through */
2452		case XDP_TX:
2453			return 0;
2454		case XDP_PASS:
2455			break;
2456		default:
2457			page = virt_to_head_page(xdp->data);
2458			if (tpage->page == page) {
2459				++tpage->count;
2460			} else {
2461				tun_put_page(tpage);
2462				tpage->page = page;
2463				tpage->count = 1;
2464			}
2465			return 0;
2466		}
2467	}
2468
2469build:
2470	skb = build_skb(xdp->data_hard_start, buflen);
2471	if (!skb) {
2472		err = -ENOMEM;
2473		goto out;
2474	}
2475
2476	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2477	skb_put(skb, xdp->data_end - xdp->data);
2478
2479	if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2480		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
2481		kfree_skb(skb);
2482		err = -EINVAL;
2483		goto out;
2484	}
2485
2486	skb->protocol = eth_type_trans(skb, tun->dev);
2487	skb_reset_network_header(skb);
2488	skb_probe_transport_header(skb);
 
2489
2490	if (skb_xdp) {
2491		err = do_xdp_generic(xdp_prog, skb);
2492		if (err != XDP_PASS)
 
2493			goto out;
 
2494	}
2495
2496	if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2497	    !tfile->detached)
2498		rxhash = __skb_get_hash_symmetric(skb);
2499
2500	skb_record_rx_queue(skb, tfile->queue_index);
2501	netif_receive_skb(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2502
2503	/* No need for get_cpu_ptr() here since this function is
2504	 * always called with bh disabled
2505	 */
2506	stats = this_cpu_ptr(tun->pcpu_stats);
2507	u64_stats_update_begin(&stats->syncp);
2508	stats->rx_packets++;
2509	stats->rx_bytes += datasize;
2510	u64_stats_update_end(&stats->syncp);
2511
2512	if (rxhash)
2513		tun_flow_update(tun, rxhash, tfile);
2514
2515out:
2516	return err;
2517}
2518
2519static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2520{
2521	int ret, i;
2522	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2523	struct tun_struct *tun = tun_get(tfile);
2524	struct tun_msg_ctl *ctl = m->msg_control;
2525	struct xdp_buff *xdp;
2526
2527	if (!tun)
2528		return -EBADFD;
2529
2530	if (ctl && (ctl->type == TUN_MSG_PTR)) {
 
2531		struct tun_page tpage;
2532		int n = ctl->num;
2533		int flush = 0;
2534
2535		memset(&tpage, 0, sizeof(tpage));
2536
2537		local_bh_disable();
2538		rcu_read_lock();
2539
2540		for (i = 0; i < n; i++) {
2541			xdp = &((struct xdp_buff *)ctl->ptr)[i];
2542			tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
 
 
2543		}
2544
2545		if (flush)
2546			xdp_do_flush_map();
 
 
 
2547
2548		rcu_read_unlock();
2549		local_bh_enable();
2550
2551		tun_put_page(&tpage);
2552
2553		ret = total_len;
2554		goto out;
2555	}
2556
2557	ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2558			   m->msg_flags & MSG_DONTWAIT,
2559			   m->msg_flags & MSG_MORE);
2560out:
2561	tun_put(tun);
2562	return ret;
2563}
2564
2565static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2566		       int flags)
2567{
2568	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2569	struct tun_struct *tun = tun_get(tfile);
2570	void *ptr = m->msg_control;
2571	int ret;
2572
2573	if (!tun) {
2574		ret = -EBADFD;
2575		goto out_free;
2576	}
2577
2578	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2579		ret = -EINVAL;
2580		goto out_put_tun;
2581	}
2582	if (flags & MSG_ERRQUEUE) {
2583		ret = sock_recv_errqueue(sock->sk, m, total_len,
2584					 SOL_PACKET, TUN_TX_TIMESTAMP);
2585		goto out;
2586	}
2587	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2588	if (ret > (ssize_t)total_len) {
2589		m->msg_flags |= MSG_TRUNC;
2590		ret = flags & MSG_TRUNC ? ret : total_len;
2591	}
2592out:
2593	tun_put(tun);
2594	return ret;
2595
2596out_put_tun:
2597	tun_put(tun);
2598out_free:
2599	tun_ptr_free(ptr);
2600	return ret;
2601}
2602
2603static int tun_ptr_peek_len(void *ptr)
2604{
2605	if (likely(ptr)) {
2606		if (tun_is_xdp_frame(ptr)) {
2607			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2608
2609			return xdpf->len;
2610		}
2611		return __skb_array_len_with_tag(ptr);
2612	} else {
2613		return 0;
2614	}
2615}
2616
2617static int tun_peek_len(struct socket *sock)
2618{
2619	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2620	struct tun_struct *tun;
2621	int ret = 0;
2622
2623	tun = tun_get(tfile);
2624	if (!tun)
2625		return 0;
2626
2627	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2628	tun_put(tun);
2629
2630	return ret;
2631}
2632
2633/* Ops structure to mimic raw sockets with tun */
2634static const struct proto_ops tun_socket_ops = {
2635	.peek_len = tun_peek_len,
2636	.sendmsg = tun_sendmsg,
2637	.recvmsg = tun_recvmsg,
2638};
2639
2640static struct proto tun_proto = {
2641	.name		= "tun",
2642	.owner		= THIS_MODULE,
2643	.obj_size	= sizeof(struct tun_file),
2644};
2645
2646static int tun_flags(struct tun_struct *tun)
2647{
2648	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2649}
2650
2651static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
2652			      char *buf)
2653{
2654	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2655	return sprintf(buf, "0x%x\n", tun_flags(tun));
2656}
2657
2658static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
2659			      char *buf)
2660{
2661	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2662	return uid_valid(tun->owner)?
2663		sprintf(buf, "%u\n",
2664			from_kuid_munged(current_user_ns(), tun->owner)):
2665		sprintf(buf, "-1\n");
2666}
2667
2668static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
2669			      char *buf)
2670{
2671	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2672	return gid_valid(tun->group) ?
2673		sprintf(buf, "%u\n",
2674			from_kgid_munged(current_user_ns(), tun->group)):
2675		sprintf(buf, "-1\n");
2676}
2677
2678static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
2679static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
2680static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
2681
2682static struct attribute *tun_dev_attrs[] = {
2683	&dev_attr_tun_flags.attr,
2684	&dev_attr_owner.attr,
2685	&dev_attr_group.attr,
2686	NULL
2687};
2688
2689static const struct attribute_group tun_attr_group = {
2690	.attrs = tun_dev_attrs
2691};
2692
2693static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2694{
2695	struct tun_struct *tun;
2696	struct tun_file *tfile = file->private_data;
2697	struct net_device *dev;
2698	int err;
2699
2700	if (tfile->detached)
2701		return -EINVAL;
2702
2703	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2704		if (!capable(CAP_NET_ADMIN))
2705			return -EPERM;
2706
2707		if (!(ifr->ifr_flags & IFF_NAPI) ||
2708		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2709			return -EINVAL;
2710	}
2711
2712	dev = __dev_get_by_name(net, ifr->ifr_name);
2713	if (dev) {
2714		if (ifr->ifr_flags & IFF_TUN_EXCL)
2715			return -EBUSY;
2716		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2717			tun = netdev_priv(dev);
2718		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2719			tun = netdev_priv(dev);
2720		else
2721			return -EINVAL;
2722
2723		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2724		    !!(tun->flags & IFF_MULTI_QUEUE))
2725			return -EINVAL;
2726
2727		if (tun_not_capable(tun))
2728			return -EPERM;
2729		err = security_tun_dev_open(tun->security);
2730		if (err < 0)
2731			return err;
2732
2733		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2734				 ifr->ifr_flags & IFF_NAPI,
2735				 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2736		if (err < 0)
2737			return err;
2738
2739		if (tun->flags & IFF_MULTI_QUEUE &&
2740		    (tun->numqueues + tun->numdisabled > 1)) {
2741			/* One or more queue has already been attached, no need
2742			 * to initialize the device again.
2743			 */
2744			netdev_state_change(dev);
2745			return 0;
2746		}
2747
2748		tun->flags = (tun->flags & ~TUN_FEATURES) |
2749			      (ifr->ifr_flags & TUN_FEATURES);
2750
2751		netdev_state_change(dev);
2752	} else {
2753		char *name;
2754		unsigned long flags = 0;
2755		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2756			     MAX_TAP_QUEUES : 1;
2757
2758		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2759			return -EPERM;
2760		err = security_tun_dev_create();
2761		if (err < 0)
2762			return err;
2763
2764		/* Set dev type */
2765		if (ifr->ifr_flags & IFF_TUN) {
2766			/* TUN device */
2767			flags |= IFF_TUN;
2768			name = "tun%d";
2769		} else if (ifr->ifr_flags & IFF_TAP) {
2770			/* TAP device */
2771			flags |= IFF_TAP;
2772			name = "tap%d";
2773		} else
2774			return -EINVAL;
2775
2776		if (*ifr->ifr_name)
2777			name = ifr->ifr_name;
2778
2779		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2780				       NET_NAME_UNKNOWN, tun_setup, queues,
2781				       queues);
2782
2783		if (!dev)
2784			return -ENOMEM;
2785		err = dev_get_valid_name(net, dev, name);
2786		if (err < 0)
2787			goto err_free_dev;
2788
2789		dev_net_set(dev, net);
2790		dev->rtnl_link_ops = &tun_link_ops;
2791		dev->ifindex = tfile->ifindex;
2792		dev->sysfs_groups[0] = &tun_attr_group;
2793
2794		tun = netdev_priv(dev);
2795		tun->dev = dev;
2796		tun->flags = flags;
2797		tun->txflt.count = 0;
2798		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2799
2800		tun->align = NET_SKB_PAD;
2801		tun->filter_attached = false;
2802		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2803		tun->rx_batched = 0;
2804		RCU_INIT_POINTER(tun->steering_prog, NULL);
2805
2806		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
2807		if (!tun->pcpu_stats) {
2808			err = -ENOMEM;
2809			goto err_free_dev;
2810		}
2811
2812		spin_lock_init(&tun->lock);
2813
2814		err = security_tun_dev_alloc_security(&tun->security);
2815		if (err < 0)
2816			goto err_free_stat;
2817
2818		tun_net_init(dev);
2819		tun_flow_init(tun);
2820
2821		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
2822				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
2823				   NETIF_F_HW_VLAN_STAG_TX;
2824		dev->features = dev->hw_features | NETIF_F_LLTX;
2825		dev->vlan_features = dev->features &
2826				     ~(NETIF_F_HW_VLAN_CTAG_TX |
2827				       NETIF_F_HW_VLAN_STAG_TX);
2828
2829		tun->flags = (tun->flags & ~TUN_FEATURES) |
2830			      (ifr->ifr_flags & TUN_FEATURES);
2831
2832		INIT_LIST_HEAD(&tun->disabled);
2833		err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
2834				 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
2835		if (err < 0)
2836			goto err_free_flow;
2837
2838		err = register_netdevice(tun->dev);
2839		if (err < 0)
2840			goto err_detach;
2841		/* free_netdev() won't check refcnt, to aovid race
 
 
2842		 * with dev_put() we need publish tun after registration.
2843		 */
2844		rcu_assign_pointer(tfile->tun, tun);
2845	}
2846
2847	netif_carrier_on(tun->dev);
2848
2849	tun_debug(KERN_INFO, tun, "tun_set_iff\n");
 
2850
2851	/* Make sure persistent devices do not get stuck in
2852	 * xoff state.
2853	 */
2854	if (netif_running(tun->dev))
2855		netif_tx_wake_all_queues(tun->dev);
2856
2857	strcpy(ifr->ifr_name, tun->dev->name);
2858	return 0;
2859
2860err_detach:
2861	tun_detach_all(dev);
2862	/* register_netdevice() already called tun_free_netdev() */
2863	goto err_free_dev;
2864
2865err_free_flow:
2866	tun_flow_uninit(tun);
2867	security_tun_dev_free_security(tun->security);
2868err_free_stat:
2869	free_percpu(tun->pcpu_stats);
2870err_free_dev:
2871	free_netdev(dev);
2872	return err;
2873}
2874
2875static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2876{
2877	tun_debug(KERN_INFO, tun, "tun_get_iff\n");
2878
2879	strcpy(ifr->ifr_name, tun->dev->name);
2880
2881	ifr->ifr_flags = tun_flags(tun);
2882
2883}
2884
2885/* This is like a cut-down ethtool ops, except done via tun fd so no
2886 * privs required. */
2887static int set_offload(struct tun_struct *tun, unsigned long arg)
2888{
2889	netdev_features_t features = 0;
2890
2891	if (arg & TUN_F_CSUM) {
2892		features |= NETIF_F_HW_CSUM;
2893		arg &= ~TUN_F_CSUM;
2894
2895		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2896			if (arg & TUN_F_TSO_ECN) {
2897				features |= NETIF_F_TSO_ECN;
2898				arg &= ~TUN_F_TSO_ECN;
2899			}
2900			if (arg & TUN_F_TSO4)
2901				features |= NETIF_F_TSO;
2902			if (arg & TUN_F_TSO6)
2903				features |= NETIF_F_TSO6;
2904			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2905		}
2906
2907		arg &= ~TUN_F_UFO;
 
 
 
 
 
 
2908	}
2909
2910	/* This gives the user a way to test for new features in future by
2911	 * trying to set them. */
2912	if (arg)
2913		return -EINVAL;
2914
2915	tun->set_features = features;
2916	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2917	tun->dev->wanted_features |= features;
2918	netdev_update_features(tun->dev);
2919
2920	return 0;
2921}
2922
2923static void tun_detach_filter(struct tun_struct *tun, int n)
2924{
2925	int i;
2926	struct tun_file *tfile;
2927
2928	for (i = 0; i < n; i++) {
2929		tfile = rtnl_dereference(tun->tfiles[i]);
2930		lock_sock(tfile->socket.sk);
2931		sk_detach_filter(tfile->socket.sk);
2932		release_sock(tfile->socket.sk);
2933	}
2934
2935	tun->filter_attached = false;
2936}
2937
2938static int tun_attach_filter(struct tun_struct *tun)
2939{
2940	int i, ret = 0;
2941	struct tun_file *tfile;
2942
2943	for (i = 0; i < tun->numqueues; i++) {
2944		tfile = rtnl_dereference(tun->tfiles[i]);
2945		lock_sock(tfile->socket.sk);
2946		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2947		release_sock(tfile->socket.sk);
2948		if (ret) {
2949			tun_detach_filter(tun, i);
2950			return ret;
2951		}
2952	}
2953
2954	tun->filter_attached = true;
2955	return ret;
2956}
2957
2958static void tun_set_sndbuf(struct tun_struct *tun)
2959{
2960	struct tun_file *tfile;
2961	int i;
2962
2963	for (i = 0; i < tun->numqueues; i++) {
2964		tfile = rtnl_dereference(tun->tfiles[i]);
2965		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2966	}
2967}
2968
2969static int tun_set_queue(struct file *file, struct ifreq *ifr)
2970{
2971	struct tun_file *tfile = file->private_data;
2972	struct tun_struct *tun;
2973	int ret = 0;
2974
2975	rtnl_lock();
2976
2977	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2978		tun = tfile->detached;
2979		if (!tun) {
2980			ret = -EINVAL;
2981			goto unlock;
2982		}
2983		ret = security_tun_dev_attach_queue(tun->security);
2984		if (ret < 0)
2985			goto unlock;
2986		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2987				 tun->flags & IFF_NAPI_FRAGS, true);
2988	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2989		tun = rtnl_dereference(tfile->tun);
2990		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
2991			ret = -EINVAL;
2992		else
2993			__tun_detach(tfile, false);
2994	} else
2995		ret = -EINVAL;
2996
2997	if (ret >= 0)
2998		netdev_state_change(tun->dev);
2999
3000unlock:
3001	rtnl_unlock();
3002	return ret;
3003}
3004
3005static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
3006			void __user *data)
3007{
3008	struct bpf_prog *prog;
3009	int fd;
3010
3011	if (copy_from_user(&fd, data, sizeof(fd)))
3012		return -EFAULT;
3013
3014	if (fd == -1) {
3015		prog = NULL;
3016	} else {
3017		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
3018		if (IS_ERR(prog))
3019			return PTR_ERR(prog);
3020	}
3021
3022	return __tun_set_ebpf(tun, prog_p, prog);
3023}
3024
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3025static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
3026			    unsigned long arg, int ifreq_len)
3027{
3028	struct tun_file *tfile = file->private_data;
3029	struct net *net = sock_net(&tfile->sk);
3030	struct tun_struct *tun;
3031	void __user* argp = (void __user*)arg;
3032	unsigned int ifindex, carrier;
3033	struct ifreq ifr;
3034	kuid_t owner;
3035	kgid_t group;
 
3036	int sndbuf;
3037	int vnet_hdr_sz;
3038	int le;
3039	int ret;
3040	bool do_notify = false;
3041
3042	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3043	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
3044		if (copy_from_user(&ifr, argp, ifreq_len))
3045			return -EFAULT;
3046	} else {
3047		memset(&ifr, 0, sizeof(ifr));
3048	}
3049	if (cmd == TUNGETFEATURES) {
3050		/* Currently this just means: "what IFF flags are valid?".
3051		 * This is needed because we never checked for invalid flags on
3052		 * TUNSETIFF.
3053		 */
3054		return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
3055				(unsigned int __user*)argp);
3056	} else if (cmd == TUNSETQUEUE) {
3057		return tun_set_queue(file, &ifr);
3058	} else if (cmd == SIOCGSKNS) {
3059		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3060			return -EPERM;
3061		return open_related_ns(&net->ns, get_net_ns);
3062	}
3063
3064	ret = 0;
3065	rtnl_lock();
3066
3067	tun = tun_get(tfile);
3068	if (cmd == TUNSETIFF) {
3069		ret = -EEXIST;
3070		if (tun)
3071			goto unlock;
3072
3073		ifr.ifr_name[IFNAMSIZ-1] = '\0';
3074
3075		ret = tun_set_iff(net, file, &ifr);
3076
3077		if (ret)
3078			goto unlock;
3079
3080		if (copy_to_user(argp, &ifr, ifreq_len))
3081			ret = -EFAULT;
3082		goto unlock;
3083	}
3084	if (cmd == TUNSETIFINDEX) {
3085		ret = -EPERM;
3086		if (tun)
3087			goto unlock;
3088
3089		ret = -EFAULT;
3090		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3091			goto unlock;
3092
 
 
3093		ret = 0;
3094		tfile->ifindex = ifindex;
3095		goto unlock;
3096	}
3097
3098	ret = -EBADFD;
3099	if (!tun)
3100		goto unlock;
3101
3102	tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
3103
3104	net = dev_net(tun->dev);
3105	ret = 0;
3106	switch (cmd) {
3107	case TUNGETIFF:
3108		tun_get_iff(tun, &ifr);
3109
3110		if (tfile->detached)
3111			ifr.ifr_flags |= IFF_DETACH_QUEUE;
3112		if (!tfile->socket.sk->sk_filter)
3113			ifr.ifr_flags |= IFF_NOFILTER;
3114
3115		if (copy_to_user(argp, &ifr, ifreq_len))
3116			ret = -EFAULT;
3117		break;
3118
3119	case TUNSETNOCSUM:
3120		/* Disable/Enable checksum */
3121
3122		/* [unimplemented] */
3123		tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
3124			  arg ? "disabled" : "enabled");
3125		break;
3126
3127	case TUNSETPERSIST:
3128		/* Disable/Enable persist mode. Keep an extra reference to the
3129		 * module to prevent the module being unprobed.
3130		 */
3131		if (arg && !(tun->flags & IFF_PERSIST)) {
3132			tun->flags |= IFF_PERSIST;
3133			__module_get(THIS_MODULE);
3134			do_notify = true;
3135		}
3136		if (!arg && (tun->flags & IFF_PERSIST)) {
3137			tun->flags &= ~IFF_PERSIST;
3138			module_put(THIS_MODULE);
3139			do_notify = true;
3140		}
3141
3142		tun_debug(KERN_INFO, tun, "persist %s\n",
3143			  arg ? "enabled" : "disabled");
3144		break;
3145
3146	case TUNSETOWNER:
3147		/* Set owner of the device */
3148		owner = make_kuid(current_user_ns(), arg);
3149		if (!uid_valid(owner)) {
3150			ret = -EINVAL;
3151			break;
3152		}
3153		tun->owner = owner;
3154		do_notify = true;
3155		tun_debug(KERN_INFO, tun, "owner set to %u\n",
3156			  from_kuid(&init_user_ns, tun->owner));
3157		break;
3158
3159	case TUNSETGROUP:
3160		/* Set group of the device */
3161		group = make_kgid(current_user_ns(), arg);
3162		if (!gid_valid(group)) {
3163			ret = -EINVAL;
3164			break;
3165		}
3166		tun->group = group;
3167		do_notify = true;
3168		tun_debug(KERN_INFO, tun, "group set to %u\n",
3169			  from_kgid(&init_user_ns, tun->group));
3170		break;
3171
3172	case TUNSETLINK:
3173		/* Only allow setting the type when the interface is down */
3174		if (tun->dev->flags & IFF_UP) {
3175			tun_debug(KERN_INFO, tun,
3176				  "Linktype set failed because interface is up\n");
3177			ret = -EBUSY;
3178		} else {
 
 
 
 
 
 
 
 
3179			tun->dev->type = (int) arg;
3180			tun_debug(KERN_INFO, tun, "linktype set to %d\n",
3181				  tun->dev->type);
3182			ret = 0;
 
 
3183		}
3184		break;
3185
3186#ifdef TUN_DEBUG
3187	case TUNSETDEBUG:
3188		tun->debug = arg;
3189		break;
3190#endif
3191	case TUNSETOFFLOAD:
3192		ret = set_offload(tun, arg);
3193		break;
3194
3195	case TUNSETTXFILTER:
3196		/* Can be set only for TAPs */
3197		ret = -EINVAL;
3198		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3199			break;
3200		ret = update_filter(&tun->txflt, (void __user *)arg);
3201		break;
3202
3203	case SIOCGIFHWADDR:
3204		/* Get hw address */
3205		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
3206		ifr.ifr_hwaddr.sa_family = tun->dev->type;
3207		if (copy_to_user(argp, &ifr, ifreq_len))
3208			ret = -EFAULT;
3209		break;
3210
3211	case SIOCSIFHWADDR:
3212		/* Set hw address */
3213		tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
3214			  ifr.ifr_hwaddr.sa_data);
3215
3216		ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
3217		break;
3218
3219	case TUNGETSNDBUF:
3220		sndbuf = tfile->socket.sk->sk_sndbuf;
3221		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3222			ret = -EFAULT;
3223		break;
3224
3225	case TUNSETSNDBUF:
3226		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3227			ret = -EFAULT;
3228			break;
3229		}
3230		if (sndbuf <= 0) {
3231			ret = -EINVAL;
3232			break;
3233		}
3234
3235		tun->sndbuf = sndbuf;
3236		tun_set_sndbuf(tun);
3237		break;
3238
3239	case TUNGETVNETHDRSZ:
3240		vnet_hdr_sz = tun->vnet_hdr_sz;
3241		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3242			ret = -EFAULT;
3243		break;
3244
3245	case TUNSETVNETHDRSZ:
3246		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3247			ret = -EFAULT;
3248			break;
3249		}
3250		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3251			ret = -EINVAL;
3252			break;
3253		}
3254
3255		tun->vnet_hdr_sz = vnet_hdr_sz;
3256		break;
3257
3258	case TUNGETVNETLE:
3259		le = !!(tun->flags & TUN_VNET_LE);
3260		if (put_user(le, (int __user *)argp))
3261			ret = -EFAULT;
3262		break;
3263
3264	case TUNSETVNETLE:
3265		if (get_user(le, (int __user *)argp)) {
3266			ret = -EFAULT;
3267			break;
3268		}
3269		if (le)
3270			tun->flags |= TUN_VNET_LE;
3271		else
3272			tun->flags &= ~TUN_VNET_LE;
3273		break;
3274
3275	case TUNGETVNETBE:
3276		ret = tun_get_vnet_be(tun, argp);
3277		break;
3278
3279	case TUNSETVNETBE:
3280		ret = tun_set_vnet_be(tun, argp);
3281		break;
3282
3283	case TUNATTACHFILTER:
3284		/* Can be set only for TAPs */
3285		ret = -EINVAL;
3286		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3287			break;
3288		ret = -EFAULT;
3289		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3290			break;
3291
3292		ret = tun_attach_filter(tun);
3293		break;
3294
3295	case TUNDETACHFILTER:
3296		/* Can be set only for TAPs */
3297		ret = -EINVAL;
3298		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3299			break;
3300		ret = 0;
3301		tun_detach_filter(tun, tun->numqueues);
3302		break;
3303
3304	case TUNGETFILTER:
3305		ret = -EINVAL;
3306		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3307			break;
3308		ret = -EFAULT;
3309		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3310			break;
3311		ret = 0;
3312		break;
3313
3314	case TUNSETSTEERINGEBPF:
3315		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3316		break;
3317
3318	case TUNSETFILTEREBPF:
3319		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3320		break;
3321
3322	case TUNSETCARRIER:
3323		ret = -EFAULT;
3324		if (copy_from_user(&carrier, argp, sizeof(carrier)))
3325			goto unlock;
3326
3327		ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3328		break;
3329
3330	case TUNGETDEVNETNS:
3331		ret = -EPERM;
3332		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3333			goto unlock;
3334		ret = open_related_ns(&net->ns, get_net_ns);
3335		break;
3336
3337	default:
3338		ret = -EINVAL;
3339		break;
3340	}
3341
3342	if (do_notify)
3343		netdev_state_change(tun->dev);
3344
3345unlock:
3346	rtnl_unlock();
3347	if (tun)
3348		tun_put(tun);
3349	return ret;
3350}
3351
3352static long tun_chr_ioctl(struct file *file,
3353			  unsigned int cmd, unsigned long arg)
3354{
3355	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3356}
3357
3358#ifdef CONFIG_COMPAT
3359static long tun_chr_compat_ioctl(struct file *file,
3360			 unsigned int cmd, unsigned long arg)
3361{
3362	switch (cmd) {
3363	case TUNSETIFF:
3364	case TUNGETIFF:
3365	case TUNSETTXFILTER:
3366	case TUNGETSNDBUF:
3367	case TUNSETSNDBUF:
3368	case SIOCGIFHWADDR:
3369	case SIOCSIFHWADDR:
3370		arg = (unsigned long)compat_ptr(arg);
3371		break;
3372	default:
3373		arg = (compat_ulong_t)arg;
3374		break;
3375	}
3376
3377	/*
3378	 * compat_ifreq is shorter than ifreq, so we must not access beyond
3379	 * the end of that structure. All fields that are used in this
3380	 * driver are compatible though, we don't need to convert the
3381	 * contents.
3382	 */
3383	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3384}
3385#endif /* CONFIG_COMPAT */
3386
3387static int tun_chr_fasync(int fd, struct file *file, int on)
3388{
3389	struct tun_file *tfile = file->private_data;
3390	int ret;
3391
3392	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3393		goto out;
3394
3395	if (on) {
3396		__f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3397		tfile->flags |= TUN_FASYNC;
3398	} else
3399		tfile->flags &= ~TUN_FASYNC;
3400	ret = 0;
3401out:
3402	return ret;
3403}
3404
3405static int tun_chr_open(struct inode *inode, struct file * file)
3406{
3407	struct net *net = current->nsproxy->net_ns;
3408	struct tun_file *tfile;
3409
3410	DBG1(KERN_INFO, "tunX: tun_chr_open\n");
3411
3412	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3413					    &tun_proto, 0);
3414	if (!tfile)
3415		return -ENOMEM;
3416	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3417		sk_free(&tfile->sk);
3418		return -ENOMEM;
3419	}
3420
3421	mutex_init(&tfile->napi_mutex);
3422	RCU_INIT_POINTER(tfile->tun, NULL);
3423	tfile->flags = 0;
3424	tfile->ifindex = 0;
3425
3426	init_waitqueue_head(&tfile->socket.wq.wait);
3427
3428	tfile->socket.file = file;
3429	tfile->socket.ops = &tun_socket_ops;
3430
3431	sock_init_data(&tfile->socket, &tfile->sk);
3432
3433	tfile->sk.sk_write_space = tun_sock_write_space;
3434	tfile->sk.sk_sndbuf = INT_MAX;
3435
3436	file->private_data = tfile;
3437	INIT_LIST_HEAD(&tfile->next);
3438
3439	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3440
 
 
3441	return 0;
3442}
3443
3444static int tun_chr_close(struct inode *inode, struct file *file)
3445{
3446	struct tun_file *tfile = file->private_data;
3447
3448	tun_detach(tfile, true);
3449
3450	return 0;
3451}
3452
3453#ifdef CONFIG_PROC_FS
3454static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3455{
3456	struct tun_file *tfile = file->private_data;
3457	struct tun_struct *tun;
3458	struct ifreq ifr;
3459
3460	memset(&ifr, 0, sizeof(ifr));
3461
3462	rtnl_lock();
3463	tun = tun_get(tfile);
3464	if (tun)
3465		tun_get_iff(tun, &ifr);
3466	rtnl_unlock();
3467
3468	if (tun)
3469		tun_put(tun);
3470
3471	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3472}
3473#endif
3474
3475static const struct file_operations tun_fops = {
3476	.owner	= THIS_MODULE,
3477	.llseek = no_llseek,
3478	.read_iter  = tun_chr_read_iter,
3479	.write_iter = tun_chr_write_iter,
3480	.poll	= tun_chr_poll,
3481	.unlocked_ioctl	= tun_chr_ioctl,
3482#ifdef CONFIG_COMPAT
3483	.compat_ioctl = tun_chr_compat_ioctl,
3484#endif
3485	.open	= tun_chr_open,
3486	.release = tun_chr_close,
3487	.fasync = tun_chr_fasync,
3488#ifdef CONFIG_PROC_FS
3489	.show_fdinfo = tun_chr_show_fdinfo,
3490#endif
3491};
3492
3493static struct miscdevice tun_miscdev = {
3494	.minor = TUN_MINOR,
3495	.name = "tun",
3496	.nodename = "net/tun",
3497	.fops = &tun_fops,
3498};
3499
3500/* ethtool interface */
3501
3502static void tun_default_link_ksettings(struct net_device *dev,
3503				       struct ethtool_link_ksettings *cmd)
3504{
3505	ethtool_link_ksettings_zero_link_mode(cmd, supported);
3506	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3507	cmd->base.speed		= SPEED_10;
3508	cmd->base.duplex	= DUPLEX_FULL;
3509	cmd->base.port		= PORT_TP;
3510	cmd->base.phy_address	= 0;
3511	cmd->base.autoneg	= AUTONEG_DISABLE;
3512}
3513
3514static int tun_get_link_ksettings(struct net_device *dev,
3515				  struct ethtool_link_ksettings *cmd)
3516{
3517	struct tun_struct *tun = netdev_priv(dev);
3518
3519	memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3520	return 0;
3521}
3522
3523static int tun_set_link_ksettings(struct net_device *dev,
3524				  const struct ethtool_link_ksettings *cmd)
3525{
3526	struct tun_struct *tun = netdev_priv(dev);
3527
3528	memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3529	return 0;
3530}
3531
3532static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3533{
3534	struct tun_struct *tun = netdev_priv(dev);
3535
3536	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
3537	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
3538
3539	switch (tun->flags & TUN_TYPE_MASK) {
3540	case IFF_TUN:
3541		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
3542		break;
3543	case IFF_TAP:
3544		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
3545		break;
3546	}
3547}
3548
3549static u32 tun_get_msglevel(struct net_device *dev)
3550{
3551#ifdef TUN_DEBUG
3552	struct tun_struct *tun = netdev_priv(dev);
3553	return tun->debug;
3554#else
3555	return -EOPNOTSUPP;
3556#endif
3557}
3558
3559static void tun_set_msglevel(struct net_device *dev, u32 value)
3560{
3561#ifdef TUN_DEBUG
3562	struct tun_struct *tun = netdev_priv(dev);
3563	tun->debug = value;
3564#endif
3565}
3566
3567static int tun_get_coalesce(struct net_device *dev,
3568			    struct ethtool_coalesce *ec)
 
 
3569{
3570	struct tun_struct *tun = netdev_priv(dev);
3571
3572	ec->rx_max_coalesced_frames = tun->rx_batched;
3573
3574	return 0;
3575}
3576
3577static int tun_set_coalesce(struct net_device *dev,
3578			    struct ethtool_coalesce *ec)
 
 
3579{
3580	struct tun_struct *tun = netdev_priv(dev);
3581
3582	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3583		tun->rx_batched = NAPI_POLL_WEIGHT;
3584	else
3585		tun->rx_batched = ec->rx_max_coalesced_frames;
3586
3587	return 0;
3588}
3589
3590static const struct ethtool_ops tun_ethtool_ops = {
 
3591	.get_drvinfo	= tun_get_drvinfo,
3592	.get_msglevel	= tun_get_msglevel,
3593	.set_msglevel	= tun_set_msglevel,
3594	.get_link	= ethtool_op_get_link,
3595	.get_ts_info	= ethtool_op_get_ts_info,
3596	.get_coalesce   = tun_get_coalesce,
3597	.set_coalesce   = tun_set_coalesce,
3598	.get_link_ksettings = tun_get_link_ksettings,
3599	.set_link_ksettings = tun_set_link_ksettings,
3600};
3601
3602static int tun_queue_resize(struct tun_struct *tun)
3603{
3604	struct net_device *dev = tun->dev;
3605	struct tun_file *tfile;
3606	struct ptr_ring **rings;
3607	int n = tun->numqueues + tun->numdisabled;
3608	int ret, i;
3609
3610	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3611	if (!rings)
3612		return -ENOMEM;
3613
3614	for (i = 0; i < tun->numqueues; i++) {
3615		tfile = rtnl_dereference(tun->tfiles[i]);
3616		rings[i] = &tfile->tx_ring;
3617	}
3618	list_for_each_entry(tfile, &tun->disabled, next)
3619		rings[i++] = &tfile->tx_ring;
3620
3621	ret = ptr_ring_resize_multiple(rings, n,
3622				       dev->tx_queue_len, GFP_KERNEL,
3623				       tun_ptr_free);
3624
3625	kfree(rings);
3626	return ret;
3627}
3628
3629static int tun_device_event(struct notifier_block *unused,
3630			    unsigned long event, void *ptr)
3631{
3632	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3633	struct tun_struct *tun = netdev_priv(dev);
3634	int i;
3635
3636	if (dev->rtnl_link_ops != &tun_link_ops)
3637		return NOTIFY_DONE;
3638
3639	switch (event) {
3640	case NETDEV_CHANGE_TX_QUEUE_LEN:
3641		if (tun_queue_resize(tun))
3642			return NOTIFY_BAD;
3643		break;
3644	case NETDEV_UP:
3645		for (i = 0; i < tun->numqueues; i++) {
3646			struct tun_file *tfile;
3647
3648			tfile = rtnl_dereference(tun->tfiles[i]);
3649			tfile->socket.sk->sk_write_space(tfile->socket.sk);
3650		}
3651		break;
3652	default:
3653		break;
3654	}
3655
3656	return NOTIFY_DONE;
3657}
3658
3659static struct notifier_block tun_notifier_block __read_mostly = {
3660	.notifier_call	= tun_device_event,
3661};
3662
3663static int __init tun_init(void)
3664{
3665	int ret = 0;
3666
3667	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3668
3669	ret = rtnl_link_register(&tun_link_ops);
3670	if (ret) {
3671		pr_err("Can't register link_ops\n");
3672		goto err_linkops;
3673	}
3674
3675	ret = misc_register(&tun_miscdev);
3676	if (ret) {
3677		pr_err("Can't register misc device %d\n", TUN_MINOR);
3678		goto err_misc;
3679	}
3680
3681	ret = register_netdevice_notifier(&tun_notifier_block);
3682	if (ret) {
3683		pr_err("Can't register netdevice notifier\n");
3684		goto err_notifier;
3685	}
3686
3687	return  0;
3688
3689err_notifier:
3690	misc_deregister(&tun_miscdev);
3691err_misc:
3692	rtnl_link_unregister(&tun_link_ops);
3693err_linkops:
3694	return ret;
3695}
3696
3697static void tun_cleanup(void)
3698{
3699	misc_deregister(&tun_miscdev);
3700	rtnl_link_unregister(&tun_link_ops);
3701	unregister_netdevice_notifier(&tun_notifier_block);
3702}
3703
3704/* Get an underlying socket object from tun file.  Returns error unless file is
3705 * attached to a device.  The returned object works like a packet socket, it
3706 * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
3707 * holding a reference to the file for as long as the socket is in use. */
3708struct socket *tun_get_socket(struct file *file)
3709{
3710	struct tun_file *tfile;
3711	if (file->f_op != &tun_fops)
3712		return ERR_PTR(-EINVAL);
3713	tfile = file->private_data;
3714	if (!tfile)
3715		return ERR_PTR(-EBADFD);
3716	return &tfile->socket;
3717}
3718EXPORT_SYMBOL_GPL(tun_get_socket);
3719
3720struct ptr_ring *tun_get_tx_ring(struct file *file)
3721{
3722	struct tun_file *tfile;
3723
3724	if (file->f_op != &tun_fops)
3725		return ERR_PTR(-EINVAL);
3726	tfile = file->private_data;
3727	if (!tfile)
3728		return ERR_PTR(-EBADFD);
3729	return &tfile->tx_ring;
3730}
3731EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3732
3733module_init(tun_init);
3734module_exit(tun_cleanup);
3735MODULE_DESCRIPTION(DRV_DESCRIPTION);
3736MODULE_AUTHOR(DRV_COPYRIGHT);
3737MODULE_LICENSE("GPL");
3738MODULE_ALIAS_MISCDEV(TUN_MINOR);
3739MODULE_ALIAS("devname:net/tun");