Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  TUN - Universal TUN/TAP device driver.
   4 *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
   5 *
   6 *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
   7 */
   8
   9/*
  10 *  Changes:
  11 *
  12 *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
  13 *    Add TUNSETLINK ioctl to set the link encapsulation
  14 *
  15 *  Mark Smith <markzzzsmith@yahoo.com.au>
  16 *    Use eth_random_addr() for tap MAC address.
  17 *
  18 *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
  19 *    Fixes in packet dropping, queue length setting and queue wakeup.
  20 *    Increased default tx queue length.
  21 *    Added ethtool API.
  22 *    Minor cleanups
  23 *
  24 *  Daniel Podlejski <underley@underley.eu.org>
  25 *    Modifications for 2.3.99-pre5 kernel.
  26 */
  27
  28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29
  30#define DRV_NAME	"tun"
  31#define DRV_VERSION	"1.6"
  32#define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
  33#define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
  34
  35#include <linux/module.h>
  36#include <linux/errno.h>
  37#include <linux/kernel.h>
  38#include <linux/sched/signal.h>
  39#include <linux/major.h>
  40#include <linux/slab.h>
  41#include <linux/poll.h>
  42#include <linux/fcntl.h>
  43#include <linux/init.h>
  44#include <linux/skbuff.h>
  45#include <linux/netdevice.h>
  46#include <linux/etherdevice.h>
  47#include <linux/miscdevice.h>
  48#include <linux/ethtool.h>
  49#include <linux/rtnetlink.h>
  50#include <linux/compat.h>
  51#include <linux/if.h>
  52#include <linux/if_arp.h>
  53#include <linux/if_ether.h>
  54#include <linux/if_tun.h>
  55#include <linux/if_vlan.h>
  56#include <linux/crc32.h>
  57#include <linux/math.h>
  58#include <linux/nsproxy.h>
  59#include <linux/virtio_net.h>
  60#include <linux/rcupdate.h>
  61#include <net/net_namespace.h>
  62#include <net/netns/generic.h>
  63#include <net/rtnetlink.h>
  64#include <net/sock.h>
  65#include <net/xdp.h>
  66#include <net/ip_tunnels.h>
  67#include <linux/seq_file.h>
  68#include <linux/uio.h>
  69#include <linux/skb_array.h>
  70#include <linux/bpf.h>
  71#include <linux/bpf_trace.h>
  72#include <linux/mutex.h>
  73#include <linux/ieee802154.h>
  74#include <uapi/linux/if_ltalk.h>
  75#include <uapi/linux/if_fddi.h>
  76#include <uapi/linux/if_hippi.h>
  77#include <uapi/linux/if_fc.h>
  78#include <net/ax25.h>
  79#include <net/rose.h>
  80#include <net/6lowpan.h>
  81#include <net/rps.h>
  82
  83#include <linux/uaccess.h>
  84#include <linux/proc_fs.h>
  85
  86static void tun_default_link_ksettings(struct net_device *dev,
  87				       struct ethtool_link_ksettings *cmd);
  88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  89#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
  90
  91/* TUN device flags */
  92
  93/* IFF_ATTACH_QUEUE is never stored in device flags,
  94 * overload it to mean fasync when stored there.
  95 */
  96#define TUN_FASYNC	IFF_ATTACH_QUEUE
  97/* High bits in flags field are unused. */
  98#define TUN_VNET_LE     0x80000000
  99#define TUN_VNET_BE     0x40000000
 100
 101#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
 102		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
 103
 104#define GOODCOPY_LEN 128
 105
 106#define FLT_EXACT_COUNT 8
 107struct tap_filter {
 108	unsigned int    count;    /* Number of addrs. Zero means disabled */
 109	u32             mask[2];  /* Mask of the hashed addrs */
 110	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
 111};
 112
 113/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
 114 * to max number of VCPUs in guest. */
 115#define MAX_TAP_QUEUES 256
 116#define MAX_TAP_FLOWS  4096
 117
 118#define TUN_FLOW_EXPIRE (3 * HZ)
 119
 
 
 
 
 
 
 
 
 
 
 
 120/* A tun_file connects an open character device to a tuntap netdevice. It
 121 * also contains all socket related structures (except sock_fprog and tap_filter)
 122 * to serve as one transmit queue for tuntap device. The sock_fprog and
 123 * tap_filter were kept in tun_struct since they were used for filtering for the
 124 * netdevice not for a specific queue (at least I didn't see the requirement for
 125 * this).
 126 *
 127 * RCU usage:
 128 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
 129 * other can only be read while rcu_read_lock or rtnl_lock is held.
 130 */
 131struct tun_file {
 132	struct sock sk;
 133	struct socket socket;
 134	struct tun_struct __rcu *tun;
 135	struct fasync_struct *fasync;
 136	/* only used for fasnyc */
 137	unsigned int flags;
 138	union {
 139		u16 queue_index;
 140		unsigned int ifindex;
 141	};
 142	struct napi_struct napi;
 143	bool napi_enabled;
 144	bool napi_frags_enabled;
 145	struct mutex napi_mutex;	/* Protects access to the above napi */
 146	struct list_head next;
 147	struct tun_struct *detached;
 148	struct ptr_ring tx_ring;
 149	struct xdp_rxq_info xdp_rxq;
 150};
 151
 152struct tun_page {
 153	struct page *page;
 154	int count;
 155};
 156
 157struct tun_flow_entry {
 158	struct hlist_node hash_link;
 159	struct rcu_head rcu;
 160	struct tun_struct *tun;
 161
 162	u32 rxhash;
 163	u32 rps_rxhash;
 164	int queue_index;
 165	unsigned long updated ____cacheline_aligned_in_smp;
 166};
 167
 168#define TUN_NUM_FLOW_ENTRIES 1024
 169#define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
 170
 171struct tun_prog {
 172	struct rcu_head rcu;
 173	struct bpf_prog *prog;
 174};
 175
 176/* Since the socket were moved to tun_file, to preserve the behavior of persist
 177 * device, socket filter, sndbuf and vnet header size were restore when the
 178 * file were attached to a persist device.
 179 */
 180struct tun_struct {
 181	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
 182	unsigned int            numqueues;
 183	unsigned int 		flags;
 184	kuid_t			owner;
 185	kgid_t			group;
 186
 187	struct net_device	*dev;
 188	netdev_features_t	set_features;
 189#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
 190			  NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4)
 191
 192	int			align;
 193	int			vnet_hdr_sz;
 194	int			sndbuf;
 195	struct tap_filter	txflt;
 196	struct sock_fprog	fprog;
 197	/* protected by rtnl lock */
 198	bool			filter_attached;
 199	u32			msg_enable;
 
 
 200	spinlock_t lock;
 201	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
 202	struct timer_list flow_gc_timer;
 203	unsigned long ageing_time;
 204	unsigned int numdisabled;
 205	struct list_head disabled;
 206	void *security;
 207	u32 flow_count;
 208	u32 rx_batched;
 209	atomic_long_t rx_frame_errors;
 210	struct bpf_prog __rcu *xdp_prog;
 211	struct tun_prog __rcu *steering_prog;
 212	struct tun_prog __rcu *filter_prog;
 213	struct ethtool_link_ksettings link_ksettings;
 214	/* init args */
 215	struct file *file;
 216	struct ifreq *ifr;
 217};
 218
 219struct veth {
 220	__be16 h_vlan_proto;
 221	__be16 h_vlan_TCI;
 222};
 223
 224static void tun_flow_init(struct tun_struct *tun);
 225static void tun_flow_uninit(struct tun_struct *tun);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 226
 227static int tun_napi_receive(struct napi_struct *napi, int budget)
 228{
 229	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
 230	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
 231	struct sk_buff_head process_queue;
 232	struct sk_buff *skb;
 233	int received = 0;
 234
 235	__skb_queue_head_init(&process_queue);
 236
 237	spin_lock(&queue->lock);
 238	skb_queue_splice_tail_init(queue, &process_queue);
 239	spin_unlock(&queue->lock);
 240
 241	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
 242		napi_gro_receive(napi, skb);
 243		++received;
 244	}
 245
 246	if (!skb_queue_empty(&process_queue)) {
 247		spin_lock(&queue->lock);
 248		skb_queue_splice(&process_queue, queue);
 249		spin_unlock(&queue->lock);
 250	}
 251
 252	return received;
 253}
 254
 255static int tun_napi_poll(struct napi_struct *napi, int budget)
 256{
 257	unsigned int received;
 258
 259	received = tun_napi_receive(napi, budget);
 260
 261	if (received < budget)
 262		napi_complete_done(napi, received);
 263
 264	return received;
 265}
 266
 267static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
 268			  bool napi_en, bool napi_frags)
 269{
 270	tfile->napi_enabled = napi_en;
 271	tfile->napi_frags_enabled = napi_en && napi_frags;
 272	if (napi_en) {
 273		netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll);
 
 274		napi_enable(&tfile->napi);
 275	}
 276}
 277
 278static void tun_napi_enable(struct tun_file *tfile)
 279{
 280	if (tfile->napi_enabled)
 281		napi_enable(&tfile->napi);
 282}
 283
 284static void tun_napi_disable(struct tun_file *tfile)
 285{
 286	if (tfile->napi_enabled)
 287		napi_disable(&tfile->napi);
 288}
 289
 290static void tun_napi_del(struct tun_file *tfile)
 291{
 292	if (tfile->napi_enabled)
 293		netif_napi_del(&tfile->napi);
 294}
 295
 296static bool tun_napi_frags_enabled(const struct tun_file *tfile)
 297{
 298	return tfile->napi_frags_enabled;
 299}
 300
 301#ifdef CONFIG_TUN_VNET_CROSS_LE
 302static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
 303{
 304	return tun->flags & TUN_VNET_BE ? false :
 305		virtio_legacy_is_little_endian();
 306}
 307
 308static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
 309{
 310	int be = !!(tun->flags & TUN_VNET_BE);
 311
 312	if (put_user(be, argp))
 313		return -EFAULT;
 314
 315	return 0;
 316}
 317
 318static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
 319{
 320	int be;
 321
 322	if (get_user(be, argp))
 323		return -EFAULT;
 324
 325	if (be)
 326		tun->flags |= TUN_VNET_BE;
 327	else
 328		tun->flags &= ~TUN_VNET_BE;
 329
 330	return 0;
 331}
 332#else
 333static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
 334{
 335	return virtio_legacy_is_little_endian();
 336}
 337
 338static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
 339{
 340	return -EINVAL;
 341}
 342
 343static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
 344{
 345	return -EINVAL;
 346}
 347#endif /* CONFIG_TUN_VNET_CROSS_LE */
 348
 349static inline bool tun_is_little_endian(struct tun_struct *tun)
 350{
 351	return tun->flags & TUN_VNET_LE ||
 352		tun_legacy_is_little_endian(tun);
 353}
 354
 355static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
 356{
 357	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
 358}
 359
 360static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
 361{
 362	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
 363}
 364
 365static inline u32 tun_hashfn(u32 rxhash)
 366{
 367	return rxhash & TUN_MASK_FLOW_ENTRIES;
 368}
 369
 370static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
 371{
 372	struct tun_flow_entry *e;
 373
 374	hlist_for_each_entry_rcu(e, head, hash_link) {
 375		if (e->rxhash == rxhash)
 376			return e;
 377	}
 378	return NULL;
 379}
 380
 381static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
 382					      struct hlist_head *head,
 383					      u32 rxhash, u16 queue_index)
 384{
 385	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
 386
 387	if (e) {
 388		netif_info(tun, tx_queued, tun->dev,
 389			   "create flow: hash %u index %u\n",
 390			   rxhash, queue_index);
 391		e->updated = jiffies;
 392		e->rxhash = rxhash;
 393		e->rps_rxhash = 0;
 394		e->queue_index = queue_index;
 395		e->tun = tun;
 396		hlist_add_head_rcu(&e->hash_link, head);
 397		++tun->flow_count;
 398	}
 399	return e;
 400}
 401
 402static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
 403{
 404	netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
 405		   e->rxhash, e->queue_index);
 406	hlist_del_rcu(&e->hash_link);
 407	kfree_rcu(e, rcu);
 408	--tun->flow_count;
 409}
 410
 411static void tun_flow_flush(struct tun_struct *tun)
 412{
 413	int i;
 414
 415	spin_lock_bh(&tun->lock);
 416	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 417		struct tun_flow_entry *e;
 418		struct hlist_node *n;
 419
 420		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
 421			tun_flow_delete(tun, e);
 422	}
 423	spin_unlock_bh(&tun->lock);
 424}
 425
 426static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
 427{
 428	int i;
 429
 430	spin_lock_bh(&tun->lock);
 431	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 432		struct tun_flow_entry *e;
 433		struct hlist_node *n;
 434
 435		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
 436			if (e->queue_index == queue_index)
 437				tun_flow_delete(tun, e);
 438		}
 439	}
 440	spin_unlock_bh(&tun->lock);
 441}
 442
 443static void tun_flow_cleanup(struct timer_list *t)
 444{
 445	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
 446	unsigned long delay = tun->ageing_time;
 447	unsigned long next_timer = jiffies + delay;
 448	unsigned long count = 0;
 449	int i;
 450
 
 
 451	spin_lock(&tun->lock);
 452	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 453		struct tun_flow_entry *e;
 454		struct hlist_node *n;
 455
 456		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
 457			unsigned long this_timer;
 458
 459			this_timer = e->updated + delay;
 460			if (time_before_eq(this_timer, jiffies)) {
 461				tun_flow_delete(tun, e);
 462				continue;
 463			}
 464			count++;
 465			if (time_before(this_timer, next_timer))
 466				next_timer = this_timer;
 467		}
 468	}
 469
 470	if (count)
 471		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
 472	spin_unlock(&tun->lock);
 473}
 474
 475static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
 476			    struct tun_file *tfile)
 477{
 478	struct hlist_head *head;
 479	struct tun_flow_entry *e;
 480	unsigned long delay = tun->ageing_time;
 481	u16 queue_index = tfile->queue_index;
 482
 483	head = &tun->flows[tun_hashfn(rxhash)];
 484
 485	rcu_read_lock();
 486
 487	e = tun_flow_find(head, rxhash);
 488	if (likely(e)) {
 489		/* TODO: keep queueing to old queue until it's empty? */
 490		if (READ_ONCE(e->queue_index) != queue_index)
 491			WRITE_ONCE(e->queue_index, queue_index);
 492		if (e->updated != jiffies)
 493			e->updated = jiffies;
 494		sock_rps_record_flow_hash(e->rps_rxhash);
 495	} else {
 496		spin_lock_bh(&tun->lock);
 497		if (!tun_flow_find(head, rxhash) &&
 498		    tun->flow_count < MAX_TAP_FLOWS)
 499			tun_flow_create(tun, head, rxhash, queue_index);
 500
 501		if (!timer_pending(&tun->flow_gc_timer))
 502			mod_timer(&tun->flow_gc_timer,
 503				  round_jiffies_up(jiffies + delay));
 504		spin_unlock_bh(&tun->lock);
 505	}
 506
 507	rcu_read_unlock();
 508}
 509
 510/* Save the hash received in the stack receive path and update the
 
 511 * flow_hash table accordingly.
 512 */
 513static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
 514{
 515	if (unlikely(e->rps_rxhash != hash))
 516		e->rps_rxhash = hash;
 517}
 518
 519/* We try to identify a flow through its rxhash. The reason that
 520 * we do not check rxq no. is because some cards(e.g 82599), chooses
 521 * the rxq based on the txq where the last packet of the flow comes. As
 522 * the userspace application move between processors, we may get a
 523 * different rxq no. here.
 524 */
 525static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 526{
 527	struct tun_flow_entry *e;
 528	u32 txq, numqueues;
 
 529
 530	numqueues = READ_ONCE(tun->numqueues);
 531
 532	txq = __skb_get_hash_symmetric(skb);
 533	e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
 534	if (e) {
 535		tun_flow_save_rps_rxhash(e, txq);
 536		txq = e->queue_index;
 537	} else {
 538		txq = reciprocal_scale(txq, numqueues);
 
 539	}
 540
 541	return txq;
 542}
 543
 544static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 545{
 546	struct tun_prog *prog;
 547	u32 numqueues;
 548	u16 ret = 0;
 549
 550	numqueues = READ_ONCE(tun->numqueues);
 551	if (!numqueues)
 552		return 0;
 553
 554	prog = rcu_dereference(tun->steering_prog);
 555	if (prog)
 556		ret = bpf_prog_run_clear_cb(prog->prog, skb);
 557
 558	return ret % numqueues;
 559}
 560
 561static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
 562			    struct net_device *sb_dev)
 563{
 564	struct tun_struct *tun = netdev_priv(dev);
 565	u16 ret;
 566
 567	rcu_read_lock();
 568	if (rcu_dereference(tun->steering_prog))
 569		ret = tun_ebpf_select_queue(tun, skb);
 570	else
 571		ret = tun_automq_select_queue(tun, skb);
 572	rcu_read_unlock();
 573
 574	return ret;
 575}
 576
 577static inline bool tun_not_capable(struct tun_struct *tun)
 578{
 579	const struct cred *cred = current_cred();
 580	struct net *net = dev_net(tun->dev);
 581
 582	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
 583		(gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
 584		!ns_capable(net->user_ns, CAP_NET_ADMIN);
 585}
 586
 587static void tun_set_real_num_queues(struct tun_struct *tun)
 588{
 589	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
 590	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
 591}
 592
 593static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
 594{
 595	tfile->detached = tun;
 596	list_add_tail(&tfile->next, &tun->disabled);
 597	++tun->numdisabled;
 598}
 599
 600static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
 601{
 602	struct tun_struct *tun = tfile->detached;
 603
 604	tfile->detached = NULL;
 605	list_del_init(&tfile->next);
 606	--tun->numdisabled;
 607	return tun;
 608}
 609
 610void tun_ptr_free(void *ptr)
 611{
 612	if (!ptr)
 613		return;
 614	if (tun_is_xdp_frame(ptr)) {
 615		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
 616
 617		xdp_return_frame(xdpf);
 618	} else {
 619		__skb_array_destroy_skb(ptr);
 620	}
 621}
 622EXPORT_SYMBOL_GPL(tun_ptr_free);
 623
 624static void tun_queue_purge(struct tun_file *tfile)
 625{
 626	void *ptr;
 627
 628	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
 629		tun_ptr_free(ptr);
 630
 631	skb_queue_purge(&tfile->sk.sk_write_queue);
 632	skb_queue_purge(&tfile->sk.sk_error_queue);
 633}
 634
 635static void __tun_detach(struct tun_file *tfile, bool clean)
 636{
 637	struct tun_file *ntfile;
 638	struct tun_struct *tun;
 639
 640	tun = rtnl_dereference(tfile->tun);
 641
 642	if (tun && clean) {
 643		if (!tfile->detached)
 644			tun_napi_disable(tfile);
 645		tun_napi_del(tfile);
 646	}
 647
 648	if (tun && !tfile->detached) {
 649		u16 index = tfile->queue_index;
 650		BUG_ON(index >= tun->numqueues);
 651
 652		rcu_assign_pointer(tun->tfiles[index],
 653				   tun->tfiles[tun->numqueues - 1]);
 654		ntfile = rtnl_dereference(tun->tfiles[index]);
 655		ntfile->queue_index = index;
 656		ntfile->xdp_rxq.queue_index = index;
 657		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
 658				   NULL);
 659
 660		--tun->numqueues;
 661		if (clean) {
 662			RCU_INIT_POINTER(tfile->tun, NULL);
 663			sock_put(&tfile->sk);
 664		} else {
 665			tun_disable_queue(tun, tfile);
 666			tun_napi_disable(tfile);
 667		}
 668
 669		synchronize_net();
 670		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
 671		/* Drop read queue */
 672		tun_queue_purge(tfile);
 673		tun_set_real_num_queues(tun);
 674	} else if (tfile->detached && clean) {
 675		tun = tun_enable_queue(tfile);
 676		sock_put(&tfile->sk);
 677	}
 678
 679	if (clean) {
 680		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
 681			netif_carrier_off(tun->dev);
 682
 683			if (!(tun->flags & IFF_PERSIST) &&
 684			    tun->dev->reg_state == NETREG_REGISTERED)
 685				unregister_netdevice(tun->dev);
 686		}
 687		if (tun)
 688			xdp_rxq_info_unreg(&tfile->xdp_rxq);
 689		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
 
 690	}
 691}
 692
 693static void tun_detach(struct tun_file *tfile, bool clean)
 694{
 695	struct tun_struct *tun;
 696	struct net_device *dev;
 697
 698	rtnl_lock();
 699	tun = rtnl_dereference(tfile->tun);
 700	dev = tun ? tun->dev : NULL;
 701	__tun_detach(tfile, clean);
 702	if (dev)
 703		netdev_state_change(dev);
 704	rtnl_unlock();
 705
 706	if (clean)
 707		sock_put(&tfile->sk);
 708}
 709
 710static void tun_detach_all(struct net_device *dev)
 711{
 712	struct tun_struct *tun = netdev_priv(dev);
 713	struct tun_file *tfile, *tmp;
 714	int i, n = tun->numqueues;
 715
 716	for (i = 0; i < n; i++) {
 717		tfile = rtnl_dereference(tun->tfiles[i]);
 718		BUG_ON(!tfile);
 719		tun_napi_disable(tfile);
 720		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
 721		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
 722		RCU_INIT_POINTER(tfile->tun, NULL);
 723		--tun->numqueues;
 724	}
 725	list_for_each_entry(tfile, &tun->disabled, next) {
 726		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
 727		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
 728		RCU_INIT_POINTER(tfile->tun, NULL);
 729	}
 730	BUG_ON(tun->numqueues != 0);
 731
 732	synchronize_net();
 733	for (i = 0; i < n; i++) {
 734		tfile = rtnl_dereference(tun->tfiles[i]);
 735		tun_napi_del(tfile);
 736		/* Drop read queue */
 737		tun_queue_purge(tfile);
 738		xdp_rxq_info_unreg(&tfile->xdp_rxq);
 739		sock_put(&tfile->sk);
 740	}
 741	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
 742		tun_napi_del(tfile);
 743		tun_enable_queue(tfile);
 744		tun_queue_purge(tfile);
 745		xdp_rxq_info_unreg(&tfile->xdp_rxq);
 746		sock_put(&tfile->sk);
 747	}
 748	BUG_ON(tun->numdisabled != 0);
 749
 750	if (tun->flags & IFF_PERSIST)
 751		module_put(THIS_MODULE);
 752}
 753
 754static int tun_attach(struct tun_struct *tun, struct file *file,
 755		      bool skip_filter, bool napi, bool napi_frags,
 756		      bool publish_tun)
 757{
 758	struct tun_file *tfile = file->private_data;
 759	struct net_device *dev = tun->dev;
 760	int err;
 761
 762	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
 763	if (err < 0)
 764		goto out;
 765
 766	err = -EINVAL;
 767	if (rtnl_dereference(tfile->tun) && !tfile->detached)
 768		goto out;
 769
 770	err = -EBUSY;
 771	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
 772		goto out;
 773
 774	err = -E2BIG;
 775	if (!tfile->detached &&
 776	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
 777		goto out;
 778
 779	err = 0;
 780
 781	/* Re-attach the filter to persist device */
 782	if (!skip_filter && (tun->filter_attached == true)) {
 783		lock_sock(tfile->socket.sk);
 784		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
 785		release_sock(tfile->socket.sk);
 786		if (!err)
 787			goto out;
 788	}
 789
 790	if (!tfile->detached &&
 791	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
 792			    GFP_KERNEL, tun_ptr_free)) {
 793		err = -ENOMEM;
 794		goto out;
 795	}
 796
 797	tfile->queue_index = tun->numqueues;
 798	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
 799
 800	if (tfile->detached) {
 801		/* Re-attach detached tfile, updating XDP queue_index */
 802		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
 803
 804		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
 805			tfile->xdp_rxq.queue_index = tfile->queue_index;
 806	} else {
 807		/* Setup XDP RX-queue info, for new tfile getting attached */
 808		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
 809				       tun->dev, tfile->queue_index, 0);
 810		if (err < 0)
 811			goto out;
 812		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
 813						 MEM_TYPE_PAGE_SHARED, NULL);
 814		if (err < 0) {
 815			xdp_rxq_info_unreg(&tfile->xdp_rxq);
 816			goto out;
 817		}
 818		err = 0;
 819	}
 820
 821	if (tfile->detached) {
 822		tun_enable_queue(tfile);
 823		tun_napi_enable(tfile);
 824	} else {
 825		sock_hold(&tfile->sk);
 826		tun_napi_init(tun, tfile, napi, napi_frags);
 827	}
 828
 829	if (rtnl_dereference(tun->xdp_prog))
 830		sock_set_flag(&tfile->sk, SOCK_XDP);
 831
 832	/* device is allowed to go away first, so no need to hold extra
 833	 * refcnt.
 834	 */
 835
 836	/* Publish tfile->tun and tun->tfiles only after we've fully
 837	 * initialized tfile; otherwise we risk using half-initialized
 838	 * object.
 839	 */
 840	if (publish_tun)
 841		rcu_assign_pointer(tfile->tun, tun);
 842	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
 843	tun->numqueues++;
 844	tun_set_real_num_queues(tun);
 845out:
 846	return err;
 847}
 848
 849static struct tun_struct *tun_get(struct tun_file *tfile)
 850{
 851	struct tun_struct *tun;
 852
 853	rcu_read_lock();
 854	tun = rcu_dereference(tfile->tun);
 855	if (tun)
 856		dev_hold(tun->dev);
 857	rcu_read_unlock();
 858
 859	return tun;
 860}
 861
 862static void tun_put(struct tun_struct *tun)
 863{
 864	dev_put(tun->dev);
 865}
 866
 867/* TAP filtering */
 868static void addr_hash_set(u32 *mask, const u8 *addr)
 869{
 870	int n = ether_crc(ETH_ALEN, addr) >> 26;
 871	mask[n >> 5] |= (1 << (n & 31));
 872}
 873
 874static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
 875{
 876	int n = ether_crc(ETH_ALEN, addr) >> 26;
 877	return mask[n >> 5] & (1 << (n & 31));
 878}
 879
 880static int update_filter(struct tap_filter *filter, void __user *arg)
 881{
 882	struct { u8 u[ETH_ALEN]; } *addr;
 883	struct tun_filter uf;
 884	int err, alen, n, nexact;
 885
 886	if (copy_from_user(&uf, arg, sizeof(uf)))
 887		return -EFAULT;
 888
 889	if (!uf.count) {
 890		/* Disabled */
 891		filter->count = 0;
 892		return 0;
 893	}
 894
 895	alen = ETH_ALEN * uf.count;
 896	addr = memdup_user(arg + sizeof(uf), alen);
 897	if (IS_ERR(addr))
 898		return PTR_ERR(addr);
 899
 900	/* The filter is updated without holding any locks. Which is
 901	 * perfectly safe. We disable it first and in the worst
 902	 * case we'll accept a few undesired packets. */
 903	filter->count = 0;
 904	wmb();
 905
 906	/* Use first set of addresses as an exact filter */
 907	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
 908		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
 909
 910	nexact = n;
 911
 912	/* Remaining multicast addresses are hashed,
 913	 * unicast will leave the filter disabled. */
 914	memset(filter->mask, 0, sizeof(filter->mask));
 915	for (; n < uf.count; n++) {
 916		if (!is_multicast_ether_addr(addr[n].u)) {
 917			err = 0; /* no filter */
 918			goto free_addr;
 919		}
 920		addr_hash_set(filter->mask, addr[n].u);
 921	}
 922
 923	/* For ALLMULTI just set the mask to all ones.
 924	 * This overrides the mask populated above. */
 925	if ((uf.flags & TUN_FLT_ALLMULTI))
 926		memset(filter->mask, ~0, sizeof(filter->mask));
 927
 928	/* Now enable the filter */
 929	wmb();
 930	filter->count = nexact;
 931
 932	/* Return the number of exact filters */
 933	err = nexact;
 934free_addr:
 935	kfree(addr);
 936	return err;
 937}
 938
 939/* Returns: 0 - drop, !=0 - accept */
 940static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
 941{
 942	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
 943	 * at this point. */
 944	struct ethhdr *eh = (struct ethhdr *) skb->data;
 945	int i;
 946
 947	/* Exact match */
 948	for (i = 0; i < filter->count; i++)
 949		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
 950			return 1;
 951
 952	/* Inexact match (multicast only) */
 953	if (is_multicast_ether_addr(eh->h_dest))
 954		return addr_hash_test(filter->mask, eh->h_dest);
 955
 956	return 0;
 957}
 958
 959/*
 960 * Checks whether the packet is accepted or not.
 961 * Returns: 0 - drop, !=0 - accept
 962 */
 963static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
 964{
 965	if (!filter->count)
 966		return 1;
 967
 968	return run_filter(filter, skb);
 969}
 970
 971/* Network device part of the driver */
 972
 973static const struct ethtool_ops tun_ethtool_ops;
 974
 975static int tun_net_init(struct net_device *dev)
 976{
 977	struct tun_struct *tun = netdev_priv(dev);
 978	struct ifreq *ifr = tun->ifr;
 979	int err;
 980
 981	spin_lock_init(&tun->lock);
 982
 983	err = security_tun_dev_alloc_security(&tun->security);
 984	if (err < 0)
 985		return err;
 986
 987	tun_flow_init(tun);
 988
 989	dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
 990	dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
 991			   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
 992			   NETIF_F_HW_VLAN_STAG_TX;
 993	dev->features = dev->hw_features;
 994	dev->vlan_features = dev->features &
 995			     ~(NETIF_F_HW_VLAN_CTAG_TX |
 996			       NETIF_F_HW_VLAN_STAG_TX);
 997	dev->lltx = true;
 998
 999	tun->flags = (tun->flags & ~TUN_FEATURES) |
1000		      (ifr->ifr_flags & TUN_FEATURES);
1001
1002	INIT_LIST_HEAD(&tun->disabled);
1003	err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
1004			 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
1005	if (err < 0) {
1006		tun_flow_uninit(tun);
1007		security_tun_dev_free_security(tun->security);
1008		return err;
1009	}
1010	return 0;
1011}
1012
1013/* Net device detach from fd. */
1014static void tun_net_uninit(struct net_device *dev)
1015{
1016	tun_detach_all(dev);
1017}
1018
1019/* Net device open. */
1020static int tun_net_open(struct net_device *dev)
1021{
1022	netif_tx_start_all_queues(dev);
1023
1024	return 0;
1025}
1026
1027/* Net device close. */
1028static int tun_net_close(struct net_device *dev)
1029{
1030	netif_tx_stop_all_queues(dev);
1031	return 0;
1032}
1033
1034/* Net device start xmit */
1035static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1036{
1037#ifdef CONFIG_RPS
1038	if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1039		/* Select queue was not called for the skbuff, so we extract the
1040		 * RPS hash and save it into the flow_table here.
1041		 */
1042		struct tun_flow_entry *e;
1043		__u32 rxhash;
1044
1045		rxhash = __skb_get_hash_symmetric(skb);
1046		e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1047		if (e)
1048			tun_flow_save_rps_rxhash(e, rxhash);
1049	}
1050#endif
1051}
1052
1053static unsigned int run_ebpf_filter(struct tun_struct *tun,
1054				    struct sk_buff *skb,
1055				    int len)
1056{
1057	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1058
1059	if (prog)
1060		len = bpf_prog_run_clear_cb(prog->prog, skb);
1061
1062	return len;
1063}
1064
1065/* Net device start xmit */
1066static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1067{
1068	struct tun_struct *tun = netdev_priv(dev);
1069	enum skb_drop_reason drop_reason;
1070	int txq = skb->queue_mapping;
1071	struct netdev_queue *queue;
1072	struct tun_file *tfile;
1073	int len = skb->len;
1074
1075	rcu_read_lock();
1076	tfile = rcu_dereference(tun->tfiles[txq]);
1077
1078	/* Drop packet if interface is not attached */
1079	if (!tfile) {
1080		drop_reason = SKB_DROP_REASON_DEV_READY;
1081		goto drop;
1082	}
1083
1084	if (!rcu_dereference(tun->steering_prog))
1085		tun_automq_xmit(tun, skb);
1086
1087	netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
 
 
1088
1089	/* Drop if the filter does not like it.
1090	 * This is a noop if the filter is disabled.
1091	 * Filter can be enabled only for the TAP devices. */
1092	if (!check_filter(&tun->txflt, skb)) {
1093		drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
1094		goto drop;
1095	}
1096
1097	if (tfile->socket.sk->sk_filter &&
1098	    sk_filter(tfile->socket.sk, skb)) {
1099		drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1100		goto drop;
1101	}
1102
1103	len = run_ebpf_filter(tun, skb, len);
1104	if (len == 0) {
1105		drop_reason = SKB_DROP_REASON_TAP_FILTER;
1106		goto drop;
1107	}
1108
1109	if (pskb_trim(skb, len)) {
1110		drop_reason = SKB_DROP_REASON_NOMEM;
1111		goto drop;
1112	}
1113
1114	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
1115		drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1116		goto drop;
1117	}
1118
1119	skb_tx_timestamp(skb);
1120
1121	/* Orphan the skb - required as we might hang on to it
1122	 * for indefinite time.
1123	 */
1124	skb_orphan(skb);
1125
1126	nf_reset_ct(skb);
1127
1128	if (ptr_ring_produce(&tfile->tx_ring, skb)) {
1129		drop_reason = SKB_DROP_REASON_FULL_RING;
1130		goto drop;
1131	}
1132
1133	/* dev->lltx requires to do our own update of trans_start */
1134	queue = netdev_get_tx_queue(dev, txq);
1135	txq_trans_cond_update(queue);
1136
1137	/* Notify and wake up reader process */
1138	if (tfile->flags & TUN_FASYNC)
1139		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1140	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1141
1142	rcu_read_unlock();
1143	return NETDEV_TX_OK;
1144
1145drop:
1146	dev_core_stats_tx_dropped_inc(dev);
1147	skb_tx_error(skb);
1148	kfree_skb_reason(skb, drop_reason);
1149	rcu_read_unlock();
1150	return NET_XMIT_DROP;
1151}
1152
1153static void tun_net_mclist(struct net_device *dev)
1154{
1155	/*
1156	 * This callback is supposed to deal with mc filter in
1157	 * _rx_ path and has nothing to do with the _tx_ path.
1158	 * In rx path we always accept everything userspace gives us.
1159	 */
1160}
1161
1162static netdev_features_t tun_net_fix_features(struct net_device *dev,
1163	netdev_features_t features)
1164{
1165	struct tun_struct *tun = netdev_priv(dev);
1166
1167	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1168}
1169
1170static void tun_set_headroom(struct net_device *dev, int new_hr)
1171{
1172	struct tun_struct *tun = netdev_priv(dev);
1173
1174	if (new_hr < NET_SKB_PAD)
1175		new_hr = NET_SKB_PAD;
1176
1177	tun->align = new_hr;
1178}
1179
1180static void
1181tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1182{
 
1183	struct tun_struct *tun = netdev_priv(dev);
 
 
1184
1185	dev_get_tstats64(dev, stats);
1186
1187	stats->rx_frame_errors +=
1188		(unsigned long)atomic_long_read(&tun->rx_frame_errors);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1189}
1190
1191static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1192		       struct netlink_ext_ack *extack)
1193{
1194	struct tun_struct *tun = netdev_priv(dev);
1195	struct tun_file *tfile;
1196	struct bpf_prog *old_prog;
1197	int i;
1198
1199	old_prog = rtnl_dereference(tun->xdp_prog);
1200	rcu_assign_pointer(tun->xdp_prog, prog);
1201	if (old_prog)
1202		bpf_prog_put(old_prog);
1203
1204	for (i = 0; i < tun->numqueues; i++) {
1205		tfile = rtnl_dereference(tun->tfiles[i]);
1206		if (prog)
1207			sock_set_flag(&tfile->sk, SOCK_XDP);
1208		else
1209			sock_reset_flag(&tfile->sk, SOCK_XDP);
1210	}
1211	list_for_each_entry(tfile, &tun->disabled, next) {
1212		if (prog)
1213			sock_set_flag(&tfile->sk, SOCK_XDP);
1214		else
1215			sock_reset_flag(&tfile->sk, SOCK_XDP);
1216	}
1217
1218	return 0;
1219}
1220
 
 
 
 
 
 
 
 
 
 
 
 
1221static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1222{
1223	switch (xdp->command) {
1224	case XDP_SETUP_PROG:
1225		return tun_xdp_set(dev, xdp->prog, xdp->extack);
 
 
 
1226	default:
1227		return -EINVAL;
1228	}
1229}
1230
1231static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1232{
1233	if (new_carrier) {
1234		struct tun_struct *tun = netdev_priv(dev);
1235
1236		if (!tun->numqueues)
1237			return -EPERM;
1238
1239		netif_carrier_on(dev);
1240	} else {
1241		netif_carrier_off(dev);
1242	}
1243	return 0;
1244}
1245
1246static const struct net_device_ops tun_netdev_ops = {
1247	.ndo_init		= tun_net_init,
1248	.ndo_uninit		= tun_net_uninit,
1249	.ndo_open		= tun_net_open,
1250	.ndo_stop		= tun_net_close,
1251	.ndo_start_xmit		= tun_net_xmit,
1252	.ndo_fix_features	= tun_net_fix_features,
1253	.ndo_select_queue	= tun_select_queue,
1254	.ndo_set_rx_headroom	= tun_set_headroom,
1255	.ndo_get_stats64	= tun_net_get_stats64,
1256	.ndo_change_carrier	= tun_net_change_carrier,
1257};
1258
1259static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1260{
1261	/* Notify and wake up reader process */
1262	if (tfile->flags & TUN_FASYNC)
1263		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1264	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1265}
1266
1267static int tun_xdp_xmit(struct net_device *dev, int n,
1268			struct xdp_frame **frames, u32 flags)
1269{
1270	struct tun_struct *tun = netdev_priv(dev);
1271	struct tun_file *tfile;
1272	u32 numqueues;
1273	int nxmit = 0;
 
1274	int i;
1275
1276	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1277		return -EINVAL;
1278
1279	rcu_read_lock();
1280
1281resample:
1282	numqueues = READ_ONCE(tun->numqueues);
1283	if (!numqueues) {
1284		rcu_read_unlock();
1285		return -ENXIO; /* Caller will free/return all frames */
1286	}
1287
1288	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1289					    numqueues]);
1290	if (unlikely(!tfile))
1291		goto resample;
1292
1293	spin_lock(&tfile->tx_ring.producer_lock);
1294	for (i = 0; i < n; i++) {
1295		struct xdp_frame *xdp = frames[i];
1296		/* Encode the XDP flag into lowest bit for consumer to differ
1297		 * XDP buffer from sk_buff.
1298		 */
1299		void *frame = tun_xdp_to_ptr(xdp);
1300
1301		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1302			dev_core_stats_tx_dropped_inc(dev);
1303			break;
 
1304		}
1305		nxmit++;
1306	}
1307	spin_unlock(&tfile->tx_ring.producer_lock);
1308
1309	if (flags & XDP_XMIT_FLUSH)
1310		__tun_xdp_flush_tfile(tfile);
1311
1312	rcu_read_unlock();
1313	return nxmit;
1314}
1315
1316static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1317{
1318	struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
1319	int nxmit;
1320
1321	if (unlikely(!frame))
1322		return -EOVERFLOW;
1323
1324	nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1325	if (!nxmit)
1326		xdp_return_frame_rx_napi(frame);
1327	return nxmit;
1328}
1329
1330static const struct net_device_ops tap_netdev_ops = {
1331	.ndo_init		= tun_net_init,
1332	.ndo_uninit		= tun_net_uninit,
1333	.ndo_open		= tun_net_open,
1334	.ndo_stop		= tun_net_close,
1335	.ndo_start_xmit		= tun_net_xmit,
1336	.ndo_fix_features	= tun_net_fix_features,
1337	.ndo_set_rx_mode	= tun_net_mclist,
1338	.ndo_set_mac_address	= eth_mac_addr,
1339	.ndo_validate_addr	= eth_validate_addr,
1340	.ndo_select_queue	= tun_select_queue,
1341	.ndo_features_check	= passthru_features_check,
1342	.ndo_set_rx_headroom	= tun_set_headroom,
 
1343	.ndo_bpf		= tun_xdp,
1344	.ndo_xdp_xmit		= tun_xdp_xmit,
1345	.ndo_change_carrier	= tun_net_change_carrier,
1346};
1347
1348static void tun_flow_init(struct tun_struct *tun)
1349{
1350	int i;
1351
1352	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1353		INIT_HLIST_HEAD(&tun->flows[i]);
1354
1355	tun->ageing_time = TUN_FLOW_EXPIRE;
1356	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1357	mod_timer(&tun->flow_gc_timer,
1358		  round_jiffies_up(jiffies + tun->ageing_time));
1359}
1360
1361static void tun_flow_uninit(struct tun_struct *tun)
1362{
1363	del_timer_sync(&tun->flow_gc_timer);
1364	tun_flow_flush(tun);
1365}
1366
1367#define MIN_MTU 68
1368#define MAX_MTU 65535
1369
1370/* Initialize net device. */
1371static void tun_net_initialize(struct net_device *dev)
1372{
1373	struct tun_struct *tun = netdev_priv(dev);
1374
1375	switch (tun->flags & TUN_TYPE_MASK) {
1376	case IFF_TUN:
1377		dev->netdev_ops = &tun_netdev_ops;
1378		dev->header_ops = &ip_tunnel_header_ops;
1379
1380		/* Point-to-Point TUN Device */
1381		dev->hard_header_len = 0;
1382		dev->addr_len = 0;
1383		dev->mtu = 1500;
1384
1385		/* Zero header length */
1386		dev->type = ARPHRD_NONE;
1387		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1388		break;
1389
1390	case IFF_TAP:
1391		dev->netdev_ops = &tap_netdev_ops;
1392		/* Ethernet TAP Device */
1393		ether_setup(dev);
1394		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1395		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1396
1397		eth_hw_addr_random(dev);
1398
1399		/* Currently tun does not support XDP, only tap does. */
1400		dev->xdp_features = NETDEV_XDP_ACT_BASIC |
1401				    NETDEV_XDP_ACT_REDIRECT |
1402				    NETDEV_XDP_ACT_NDO_XMIT;
1403
1404		break;
1405	}
1406
1407	dev->min_mtu = MIN_MTU;
1408	dev->max_mtu = MAX_MTU - dev->hard_header_len;
1409}
1410
1411static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1412{
1413	struct sock *sk = tfile->socket.sk;
1414
1415	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1416}
1417
1418/* Character device part */
1419
1420/* Poll */
1421static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1422{
1423	struct tun_file *tfile = file->private_data;
1424	struct tun_struct *tun = tun_get(tfile);
1425	struct sock *sk;
1426	__poll_t mask = 0;
1427
1428	if (!tun)
1429		return EPOLLERR;
1430
1431	sk = tfile->socket.sk;
1432
 
 
1433	poll_wait(file, sk_sleep(sk), wait);
1434
1435	if (!ptr_ring_empty(&tfile->tx_ring))
1436		mask |= EPOLLIN | EPOLLRDNORM;
1437
1438	/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1439	 * guarantee EPOLLOUT to be raised by either here or
1440	 * tun_sock_write_space(). Then process could get notification
1441	 * after it writes to a down device and meets -EIO.
1442	 */
1443	if (tun_sock_writeable(tun, tfile) ||
1444	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1445	     tun_sock_writeable(tun, tfile)))
1446		mask |= EPOLLOUT | EPOLLWRNORM;
1447
1448	if (tun->dev->reg_state != NETREG_REGISTERED)
1449		mask = EPOLLERR;
1450
1451	tun_put(tun);
1452	return mask;
1453}
1454
1455static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1456					    size_t len,
1457					    const struct iov_iter *it)
1458{
1459	struct sk_buff *skb;
1460	size_t linear;
1461	int err;
1462	int i;
1463
1464	if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
1465	    len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
1466		return ERR_PTR(-EMSGSIZE);
1467
1468	local_bh_disable();
1469	skb = napi_get_frags(&tfile->napi);
1470	local_bh_enable();
1471	if (!skb)
1472		return ERR_PTR(-ENOMEM);
1473
1474	linear = iov_iter_single_seg_count(it);
1475	err = __skb_grow(skb, linear);
1476	if (err)
1477		goto free;
1478
1479	skb->len = len;
1480	skb->data_len = len - linear;
1481	skb->truesize += skb->data_len;
1482
1483	for (i = 1; i < it->nr_segs; i++) {
1484		const struct iovec *iov = iter_iov(it) + i;
1485		size_t fragsz = iov->iov_len;
1486		struct page *page;
1487		void *frag;
1488
1489		if (fragsz == 0 || fragsz > PAGE_SIZE) {
1490			err = -EINVAL;
1491			goto free;
1492		}
1493		frag = netdev_alloc_frag(fragsz);
1494		if (!frag) {
1495			err = -ENOMEM;
1496			goto free;
1497		}
1498		page = virt_to_head_page(frag);
1499		skb_fill_page_desc(skb, i - 1, page,
1500				   frag - page_address(page), fragsz);
1501	}
1502
1503	return skb;
1504free:
1505	/* frees skb and all frags allocated with napi_alloc_frag() */
1506	napi_free_frags(&tfile->napi);
1507	return ERR_PTR(err);
1508}
1509
1510/* prepad is the amount to reserve at front.  len is length after that.
1511 * linear is a hint as to how much to copy (usually headers). */
1512static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1513				     size_t prepad, size_t len,
1514				     size_t linear, int noblock)
1515{
1516	struct sock *sk = tfile->socket.sk;
1517	struct sk_buff *skb;
1518	int err;
1519
1520	/* Under a page?  Don't bother with paged skb. */
1521	if (prepad + len < PAGE_SIZE)
1522		linear = len;
1523
1524	if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
1525		linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
1526	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1527				   &err, PAGE_ALLOC_COSTLY_ORDER);
1528	if (!skb)
1529		return ERR_PTR(err);
1530
1531	skb_reserve(skb, prepad);
1532	skb_put(skb, linear);
1533	skb->data_len = len - linear;
1534	skb->len += len - linear;
1535
1536	return skb;
1537}
1538
1539static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1540			   struct sk_buff *skb, int more)
1541{
1542	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1543	struct sk_buff_head process_queue;
1544	u32 rx_batched = tun->rx_batched;
1545	bool rcv = false;
1546
1547	if (!rx_batched || (!more && skb_queue_empty(queue))) {
1548		local_bh_disable();
1549		skb_record_rx_queue(skb, tfile->queue_index);
1550		netif_receive_skb(skb);
1551		local_bh_enable();
1552		return;
1553	}
1554
1555	spin_lock(&queue->lock);
1556	if (!more || skb_queue_len(queue) == rx_batched) {
1557		__skb_queue_head_init(&process_queue);
1558		skb_queue_splice_tail_init(queue, &process_queue);
1559		rcv = true;
1560	} else {
1561		__skb_queue_tail(queue, skb);
1562	}
1563	spin_unlock(&queue->lock);
1564
1565	if (rcv) {
1566		struct sk_buff *nskb;
1567
1568		local_bh_disable();
1569		while ((nskb = __skb_dequeue(&process_queue))) {
1570			skb_record_rx_queue(nskb, tfile->queue_index);
1571			netif_receive_skb(nskb);
1572		}
1573		skb_record_rx_queue(skb, tfile->queue_index);
1574		netif_receive_skb(skb);
1575		local_bh_enable();
1576	}
1577}
1578
1579static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1580			      int len, int noblock, bool zerocopy)
1581{
1582	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1583		return false;
1584
1585	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1586		return false;
1587
1588	if (!noblock)
1589		return false;
1590
1591	if (zerocopy)
1592		return false;
1593
1594	if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) +
1595	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1596		return false;
1597
1598	return true;
1599}
1600
1601static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1602				       struct page_frag *alloc_frag, char *buf,
1603				       int buflen, int len, int pad)
1604{
1605	struct sk_buff *skb = build_skb(buf, buflen);
1606
1607	if (!skb)
1608		return ERR_PTR(-ENOMEM);
1609
1610	skb_reserve(skb, pad);
1611	skb_put(skb, len);
1612	skb_set_owner_w(skb, tfile->socket.sk);
1613
1614	get_page(alloc_frag->page);
1615	alloc_frag->offset += buflen;
1616
1617	return skb;
1618}
1619
1620static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1621		       struct xdp_buff *xdp, u32 act)
1622{
1623	int err;
1624
1625	switch (act) {
1626	case XDP_REDIRECT:
1627		err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1628		if (err) {
1629			dev_core_stats_rx_dropped_inc(tun->dev);
1630			return err;
1631		}
1632		dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1633		break;
1634	case XDP_TX:
1635		err = tun_xdp_tx(tun->dev, xdp);
1636		if (err < 0) {
1637			dev_core_stats_rx_dropped_inc(tun->dev);
1638			return err;
1639		}
1640		dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1641		break;
1642	case XDP_PASS:
1643		break;
1644	default:
1645		bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act);
1646		fallthrough;
1647	case XDP_ABORTED:
1648		trace_xdp_exception(tun->dev, xdp_prog, act);
1649		fallthrough;
1650	case XDP_DROP:
1651		dev_core_stats_rx_dropped_inc(tun->dev);
1652		break;
1653	}
1654
1655	return act;
1656}
1657
1658static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1659				     struct tun_file *tfile,
1660				     struct iov_iter *from,
1661				     struct virtio_net_hdr *hdr,
1662				     int len, int *skb_xdp)
1663{
1664	struct page_frag *alloc_frag = &current->task_frag;
1665	struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
1666	struct bpf_prog *xdp_prog;
1667	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1668	char *buf;
1669	size_t copied;
1670	int pad = TUN_RX_PAD;
1671	int err = 0;
1672
1673	rcu_read_lock();
1674	xdp_prog = rcu_dereference(tun->xdp_prog);
1675	if (xdp_prog)
1676		pad += XDP_PACKET_HEADROOM;
1677	buflen += SKB_DATA_ALIGN(len + pad);
1678	rcu_read_unlock();
1679
1680	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1681	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1682		return ERR_PTR(-ENOMEM);
1683
1684	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1685	copied = copy_page_from_iter(alloc_frag->page,
1686				     alloc_frag->offset + pad,
1687				     len, from);
1688	if (copied != len)
1689		return ERR_PTR(-EFAULT);
1690
1691	/* There's a small window that XDP may be set after the check
1692	 * of xdp_prog above, this should be rare and for simplicity
1693	 * we do XDP on skb in case the headroom is not enough.
1694	 */
1695	if (hdr->gso_type || !xdp_prog) {
1696		*skb_xdp = 1;
1697		return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1698				       pad);
1699	}
1700
1701	*skb_xdp = 0;
1702
1703	local_bh_disable();
1704	rcu_read_lock();
1705	bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
1706	xdp_prog = rcu_dereference(tun->xdp_prog);
1707	if (xdp_prog) {
1708		struct xdp_buff xdp;
1709		u32 act;
1710
1711		xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
1712		xdp_prepare_buff(&xdp, buf, pad, len, false);
 
 
 
1713
1714		act = bpf_prog_run_xdp(xdp_prog, &xdp);
1715		if (act == XDP_REDIRECT || act == XDP_TX) {
1716			get_page(alloc_frag->page);
1717			alloc_frag->offset += buflen;
1718		}
1719		err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1720		if (err < 0) {
1721			if (act == XDP_REDIRECT || act == XDP_TX)
1722				put_page(alloc_frag->page);
1723			goto out;
1724		}
1725
1726		if (err == XDP_REDIRECT)
1727			xdp_do_flush();
1728		if (err != XDP_PASS)
1729			goto out;
1730
1731		pad = xdp.data - xdp.data_hard_start;
1732		len = xdp.data_end - xdp.data;
1733	}
1734	bpf_net_ctx_clear(bpf_net_ctx);
1735	rcu_read_unlock();
1736	local_bh_enable();
1737
1738	return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1739
 
 
1740out:
1741	bpf_net_ctx_clear(bpf_net_ctx);
1742	rcu_read_unlock();
1743	local_bh_enable();
1744	return NULL;
1745}
1746
1747/* Get packet from user space buffer */
1748static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1749			    void *msg_control, struct iov_iter *from,
1750			    int noblock, bool more)
1751{
1752	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1753	struct sk_buff *skb;
1754	size_t total_len = iov_iter_count(from);
1755	size_t len = total_len, align = tun->align, linear;
1756	struct virtio_net_hdr gso = { 0 };
 
1757	int good_linear;
1758	int copylen;
1759	bool zerocopy = false;
1760	int err;
1761	u32 rxhash = 0;
1762	int skb_xdp = 1;
1763	bool frags = tun_napi_frags_enabled(tfile);
1764	enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1765
1766	if (!(tun->flags & IFF_NO_PI)) {
1767		if (len < sizeof(pi))
1768			return -EINVAL;
1769		len -= sizeof(pi);
1770
1771		if (!copy_from_iter_full(&pi, sizeof(pi), from))
1772			return -EFAULT;
1773	}
1774
1775	if (tun->flags & IFF_VNET_HDR) {
1776		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1777
1778		if (len < vnet_hdr_sz)
1779			return -EINVAL;
1780		len -= vnet_hdr_sz;
1781
1782		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1783			return -EFAULT;
1784
1785		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1786		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1787			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1788
1789		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1790			return -EINVAL;
1791		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1792	}
1793
1794	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1795		align += NET_IP_ALIGN;
1796		if (unlikely(len < ETH_HLEN ||
1797			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1798			return -EINVAL;
1799	}
1800
1801	good_linear = SKB_MAX_HEAD(align);
1802
1803	if (msg_control) {
1804		struct iov_iter i = *from;
1805
1806		/* There are 256 bytes to be copied in skb, so there is
1807		 * enough room for skb expand head in case it is used.
1808		 * The rest of the buffer is mapped from userspace.
1809		 */
1810		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1811		if (copylen > good_linear)
1812			copylen = good_linear;
1813		linear = copylen;
1814		iov_iter_advance(&i, copylen);
1815		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1816			zerocopy = true;
1817	}
1818
1819	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1820		/* For the packet that is not easy to be processed
1821		 * (e.g gso or jumbo packet), we will do it at after
1822		 * skb was created with generic XDP routine.
1823		 */
1824		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1825		err = PTR_ERR_OR_ZERO(skb);
1826		if (err)
1827			goto drop;
 
1828		if (!skb)
1829			return total_len;
1830	} else {
1831		if (!zerocopy) {
1832			copylen = len;
1833			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1834				linear = good_linear;
1835			else
1836				linear = tun16_to_cpu(tun, gso.hdr_len);
1837		}
1838
1839		if (frags) {
1840			mutex_lock(&tfile->napi_mutex);
1841			skb = tun_napi_alloc_frags(tfile, copylen, from);
1842			/* tun_napi_alloc_frags() enforces a layout for the skb.
1843			 * If zerocopy is enabled, then this layout will be
1844			 * overwritten by zerocopy_sg_from_iter().
1845			 */
1846			zerocopy = false;
1847		} else {
1848			if (!linear)
1849				linear = min_t(size_t, good_linear, copylen);
1850
1851			skb = tun_alloc_skb(tfile, align, copylen, linear,
1852					    noblock);
1853		}
1854
1855		err = PTR_ERR_OR_ZERO(skb);
1856		if (err)
1857			goto drop;
 
 
 
 
1858
1859		if (zerocopy)
1860			err = zerocopy_sg_from_iter(skb, from);
1861		else
1862			err = skb_copy_datagram_from_iter(skb, 0, from, len);
1863
1864		if (err) {
1865			err = -EFAULT;
1866			drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1867			goto drop;
 
 
 
 
 
 
 
1868		}
1869	}
1870
1871	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1872		atomic_long_inc(&tun->rx_frame_errors);
1873		err = -EINVAL;
1874		goto free_skb;
 
 
 
 
 
1875	}
1876
1877	switch (tun->flags & TUN_TYPE_MASK) {
1878	case IFF_TUN:
1879		if (tun->flags & IFF_NO_PI) {
1880			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1881
1882			switch (ip_version) {
1883			case 4:
1884				pi.proto = htons(ETH_P_IP);
1885				break;
1886			case 6:
1887				pi.proto = htons(ETH_P_IPV6);
1888				break;
1889			default:
1890				err = -EINVAL;
1891				goto drop;
 
1892			}
1893		}
1894
1895		skb_reset_mac_header(skb);
1896		skb->protocol = pi.proto;
1897		skb->dev = tun->dev;
1898		break;
1899	case IFF_TAP:
1900		if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
1901			err = -ENOMEM;
1902			drop_reason = SKB_DROP_REASON_HDR_TRUNC;
1903			goto drop;
1904		}
1905		skb->protocol = eth_type_trans(skb, tun->dev);
1906		break;
1907	}
1908
1909	/* copy skb_ubuf_info for callback when skb has no error */
1910	if (zerocopy) {
1911		skb_zcopy_init(skb, msg_control);
 
 
1912	} else if (msg_control) {
1913		struct ubuf_info *uarg = msg_control;
1914		uarg->ops->complete(NULL, uarg, false);
1915	}
1916
1917	skb_reset_network_header(skb);
1918	skb_probe_transport_header(skb);
1919	skb_record_rx_queue(skb, tfile->queue_index);
1920
1921	if (skb_xdp) {
1922		struct bpf_prog *xdp_prog;
1923		int ret;
1924
1925		local_bh_disable();
1926		rcu_read_lock();
1927		xdp_prog = rcu_dereference(tun->xdp_prog);
1928		if (xdp_prog) {
1929			ret = do_xdp_generic(xdp_prog, &skb);
1930			if (ret != XDP_PASS) {
1931				rcu_read_unlock();
1932				local_bh_enable();
1933				goto unlock_frags;
1934			}
1935		}
1936		rcu_read_unlock();
1937		local_bh_enable();
1938	}
1939
1940	/* Compute the costly rx hash only if needed for flow updates.
1941	 * We may get a very small possibility of OOO during switching, not
1942	 * worth to optimize.
1943	 */
1944	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1945	    !tfile->detached)
1946		rxhash = __skb_get_hash_symmetric(skb);
1947
1948	rcu_read_lock();
1949	if (unlikely(!(tun->dev->flags & IFF_UP))) {
1950		err = -EIO;
1951		rcu_read_unlock();
1952		drop_reason = SKB_DROP_REASON_DEV_READY;
1953		goto drop;
1954	}
1955
1956	if (frags) {
1957		u32 headlen;
1958
1959		/* Exercise flow dissector code path. */
1960		skb_push(skb, ETH_HLEN);
1961		headlen = eth_get_headlen(tun->dev, skb->data,
1962					  skb_headlen(skb));
1963
1964		if (unlikely(headlen > skb_headlen(skb))) {
1965			WARN_ON_ONCE(1);
1966			err = -ENOMEM;
1967			dev_core_stats_rx_dropped_inc(tun->dev);
1968napi_busy:
1969			napi_free_frags(&tfile->napi);
1970			rcu_read_unlock();
1971			mutex_unlock(&tfile->napi_mutex);
1972			return err;
 
1973		}
1974
1975		if (likely(napi_schedule_prep(&tfile->napi))) {
1976			local_bh_disable();
1977			napi_gro_frags(&tfile->napi);
1978			napi_complete(&tfile->napi);
1979			local_bh_enable();
1980		} else {
1981			err = -EBUSY;
1982			goto napi_busy;
1983		}
1984		mutex_unlock(&tfile->napi_mutex);
1985	} else if (tfile->napi_enabled) {
1986		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1987		int queue_len;
1988
1989		spin_lock_bh(&queue->lock);
1990
1991		if (unlikely(tfile->detached)) {
1992			spin_unlock_bh(&queue->lock);
1993			rcu_read_unlock();
1994			err = -EBUSY;
1995			goto free_skb;
1996		}
1997
1998		__skb_queue_tail(queue, skb);
1999		queue_len = skb_queue_len(queue);
2000		spin_unlock(&queue->lock);
2001
2002		if (!more || queue_len > NAPI_POLL_WEIGHT)
2003			napi_schedule(&tfile->napi);
2004
2005		local_bh_enable();
2006	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
2007		tun_rx_batched(tun, tfile, skb, more);
2008	} else {
2009		netif_rx(skb);
2010	}
2011	rcu_read_unlock();
2012
2013	preempt_disable();
2014	dev_sw_netstats_rx_add(tun->dev, len);
2015	preempt_enable();
 
 
 
2016
2017	if (rxhash)
2018		tun_flow_update(tun, rxhash, tfile);
2019
2020	return total_len;
2021
2022drop:
2023	if (err != -EAGAIN)
2024		dev_core_stats_rx_dropped_inc(tun->dev);
2025
2026free_skb:
2027	if (!IS_ERR_OR_NULL(skb))
2028		kfree_skb_reason(skb, drop_reason);
2029
2030unlock_frags:
2031	if (frags) {
2032		tfile->napi.skb = NULL;
2033		mutex_unlock(&tfile->napi_mutex);
2034	}
2035
2036	return err ?: total_len;
2037}
2038
2039static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
2040{
2041	struct file *file = iocb->ki_filp;
2042	struct tun_file *tfile = file->private_data;
2043	struct tun_struct *tun = tun_get(tfile);
2044	ssize_t result;
2045	int noblock = 0;
2046
2047	if (!tun)
2048		return -EBADFD;
2049
2050	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2051		noblock = 1;
2052
2053	result = tun_get_user(tun, tfile, NULL, from, noblock, false);
2054
2055	tun_put(tun);
2056	return result;
2057}
2058
2059static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2060				struct tun_file *tfile,
2061				struct xdp_frame *xdp_frame,
2062				struct iov_iter *iter)
2063{
2064	int vnet_hdr_sz = 0;
2065	size_t size = xdp_frame->len;
 
2066	size_t ret;
2067
2068	if (tun->flags & IFF_VNET_HDR) {
2069		struct virtio_net_hdr gso = { 0 };
2070
2071		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2072		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2073			return -EINVAL;
2074		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2075			     sizeof(gso)))
2076			return -EFAULT;
2077		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2078	}
2079
2080	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2081
2082	preempt_disable();
2083	dev_sw_netstats_tx_add(tun->dev, 1, ret);
2084	preempt_enable();
 
 
 
2085
2086	return ret;
2087}
2088
2089/* Put packet to the user space buffer */
2090static ssize_t tun_put_user(struct tun_struct *tun,
2091			    struct tun_file *tfile,
2092			    struct sk_buff *skb,
2093			    struct iov_iter *iter)
2094{
2095	struct tun_pi pi = { 0, skb->protocol };
 
2096	ssize_t total;
2097	int vlan_offset = 0;
2098	int vlan_hlen = 0;
2099	int vnet_hdr_sz = 0;
2100
2101	if (skb_vlan_tag_present(skb))
2102		vlan_hlen = VLAN_HLEN;
2103
2104	if (tun->flags & IFF_VNET_HDR)
2105		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2106
2107	total = skb->len + vlan_hlen + vnet_hdr_sz;
2108
2109	if (!(tun->flags & IFF_NO_PI)) {
2110		if (iov_iter_count(iter) < sizeof(pi))
2111			return -EINVAL;
2112
2113		total += sizeof(pi);
2114		if (iov_iter_count(iter) < total) {
2115			/* Packet will be striped */
2116			pi.flags |= TUN_PKT_STRIP;
2117		}
2118
2119		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
2120			return -EFAULT;
2121	}
2122
2123	if (vnet_hdr_sz) {
2124		struct virtio_net_hdr gso;
2125
2126		if (iov_iter_count(iter) < vnet_hdr_sz)
2127			return -EINVAL;
2128
2129		if (virtio_net_hdr_from_skb(skb, &gso,
2130					    tun_is_little_endian(tun), true,
2131					    vlan_hlen)) {
2132			struct skb_shared_info *sinfo = skb_shinfo(skb);
2133
2134			if (net_ratelimit()) {
2135				netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
2136					   sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2137					   tun16_to_cpu(tun, gso.hdr_len));
2138				print_hex_dump(KERN_ERR, "tun: ",
2139					       DUMP_PREFIX_NONE,
2140					       16, 1, skb->head,
2141					       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2142			}
2143			WARN_ON_ONCE(1);
2144			return -EINVAL;
2145		}
2146
2147		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2148			return -EFAULT;
2149
2150		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2151	}
2152
2153	if (vlan_hlen) {
2154		int ret;
2155		struct veth veth;
2156
2157		veth.h_vlan_proto = skb->vlan_proto;
2158		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2159
2160		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2161
2162		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2163		if (ret || !iov_iter_count(iter))
2164			goto done;
2165
2166		ret = copy_to_iter(&veth, sizeof(veth), iter);
2167		if (ret != sizeof(veth) || !iov_iter_count(iter))
2168			goto done;
2169	}
2170
2171	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2172
2173done:
2174	/* caller is in process context, */
2175	preempt_disable();
2176	dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
2177	preempt_enable();
 
 
 
2178
2179	return total;
2180}
2181
2182static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2183{
2184	DECLARE_WAITQUEUE(wait, current);
2185	void *ptr = NULL;
2186	int error = 0;
2187
2188	ptr = ptr_ring_consume(&tfile->tx_ring);
2189	if (ptr)
2190		goto out;
2191	if (noblock) {
2192		error = -EAGAIN;
2193		goto out;
2194	}
2195
2196	add_wait_queue(&tfile->socket.wq.wait, &wait);
2197
2198	while (1) {
2199		set_current_state(TASK_INTERRUPTIBLE);
2200		ptr = ptr_ring_consume(&tfile->tx_ring);
2201		if (ptr)
2202			break;
2203		if (signal_pending(current)) {
2204			error = -ERESTARTSYS;
2205			break;
2206		}
2207		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2208			error = -EFAULT;
2209			break;
2210		}
2211
2212		schedule();
2213	}
2214
2215	__set_current_state(TASK_RUNNING);
2216	remove_wait_queue(&tfile->socket.wq.wait, &wait);
2217
2218out:
2219	*err = error;
2220	return ptr;
2221}
2222
2223static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2224			   struct iov_iter *to,
2225			   int noblock, void *ptr)
2226{
2227	ssize_t ret;
2228	int err;
2229
 
 
2230	if (!iov_iter_count(to)) {
2231		tun_ptr_free(ptr);
2232		return 0;
2233	}
2234
2235	if (!ptr) {
2236		/* Read frames from ring */
2237		ptr = tun_ring_recv(tfile, noblock, &err);
2238		if (!ptr)
2239			return err;
2240	}
2241
2242	if (tun_is_xdp_frame(ptr)) {
2243		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2244
2245		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2246		xdp_return_frame(xdpf);
2247	} else {
2248		struct sk_buff *skb = ptr;
2249
2250		ret = tun_put_user(tun, tfile, skb, to);
2251		if (unlikely(ret < 0))
2252			kfree_skb(skb);
2253		else
2254			consume_skb(skb);
2255	}
2256
2257	return ret;
2258}
2259
2260static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2261{
2262	struct file *file = iocb->ki_filp;
2263	struct tun_file *tfile = file->private_data;
2264	struct tun_struct *tun = tun_get(tfile);
2265	ssize_t len = iov_iter_count(to), ret;
2266	int noblock = 0;
2267
2268	if (!tun)
2269		return -EBADFD;
2270
2271	if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2272		noblock = 1;
2273
2274	ret = tun_do_read(tun, tfile, to, noblock, NULL);
2275	ret = min_t(ssize_t, ret, len);
2276	if (ret > 0)
2277		iocb->ki_pos = ret;
2278	tun_put(tun);
2279	return ret;
2280}
2281
2282static void tun_prog_free(struct rcu_head *rcu)
2283{
2284	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2285
2286	bpf_prog_destroy(prog->prog);
2287	kfree(prog);
2288}
2289
2290static int __tun_set_ebpf(struct tun_struct *tun,
2291			  struct tun_prog __rcu **prog_p,
2292			  struct bpf_prog *prog)
2293{
2294	struct tun_prog *old, *new = NULL;
2295
2296	if (prog) {
2297		new = kmalloc(sizeof(*new), GFP_KERNEL);
2298		if (!new)
2299			return -ENOMEM;
2300		new->prog = prog;
2301	}
2302
2303	spin_lock_bh(&tun->lock);
2304	old = rcu_dereference_protected(*prog_p,
2305					lockdep_is_held(&tun->lock));
2306	rcu_assign_pointer(*prog_p, new);
2307	spin_unlock_bh(&tun->lock);
2308
2309	if (old)
2310		call_rcu(&old->rcu, tun_prog_free);
2311
2312	return 0;
2313}
2314
2315static void tun_free_netdev(struct net_device *dev)
2316{
2317	struct tun_struct *tun = netdev_priv(dev);
2318
2319	BUG_ON(!(list_empty(&tun->disabled)));
2320
2321	tun_flow_uninit(tun);
2322	security_tun_dev_free_security(tun->security);
2323	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2324	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
2325}
2326
2327static void tun_setup(struct net_device *dev)
2328{
2329	struct tun_struct *tun = netdev_priv(dev);
2330
2331	tun->owner = INVALID_UID;
2332	tun->group = INVALID_GID;
2333	tun_default_link_ksettings(dev, &tun->link_ksettings);
2334
2335	dev->ethtool_ops = &tun_ethtool_ops;
2336	dev->needs_free_netdev = true;
2337	dev->priv_destructor = tun_free_netdev;
2338	/* We prefer our own queue length */
2339	dev->tx_queue_len = TUN_READQ_SIZE;
2340}
2341
2342/* Trivial set of netlink ops to allow deleting tun or tap
2343 * device with netlink.
2344 */
2345static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2346			struct netlink_ext_ack *extack)
2347{
2348	NL_SET_ERR_MSG(extack,
2349		       "tun/tap creation via rtnetlink is not supported.");
2350	return -EOPNOTSUPP;
2351}
2352
2353static size_t tun_get_size(const struct net_device *dev)
2354{
2355	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2356	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2357
2358	return nla_total_size(sizeof(uid_t)) + /* OWNER */
2359	       nla_total_size(sizeof(gid_t)) + /* GROUP */
2360	       nla_total_size(sizeof(u8)) + /* TYPE */
2361	       nla_total_size(sizeof(u8)) + /* PI */
2362	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
2363	       nla_total_size(sizeof(u8)) + /* PERSIST */
2364	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
2365	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
2366	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
2367	       0;
2368}
2369
2370static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2371{
2372	struct tun_struct *tun = netdev_priv(dev);
2373
2374	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2375		goto nla_put_failure;
2376	if (uid_valid(tun->owner) &&
2377	    nla_put_u32(skb, IFLA_TUN_OWNER,
2378			from_kuid_munged(current_user_ns(), tun->owner)))
2379		goto nla_put_failure;
2380	if (gid_valid(tun->group) &&
2381	    nla_put_u32(skb, IFLA_TUN_GROUP,
2382			from_kgid_munged(current_user_ns(), tun->group)))
2383		goto nla_put_failure;
2384	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2385		goto nla_put_failure;
2386	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2387		goto nla_put_failure;
2388	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2389		goto nla_put_failure;
2390	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2391		       !!(tun->flags & IFF_MULTI_QUEUE)))
2392		goto nla_put_failure;
2393	if (tun->flags & IFF_MULTI_QUEUE) {
2394		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2395			goto nla_put_failure;
2396		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2397				tun->numdisabled))
2398			goto nla_put_failure;
2399	}
2400
2401	return 0;
2402
2403nla_put_failure:
2404	return -EMSGSIZE;
2405}
2406
2407static struct rtnl_link_ops tun_link_ops __read_mostly = {
2408	.kind		= DRV_NAME,
2409	.priv_size	= sizeof(struct tun_struct),
2410	.setup		= tun_setup,
2411	.validate	= tun_validate,
2412	.get_size       = tun_get_size,
2413	.fill_info      = tun_fill_info,
2414};
2415
2416static void tun_sock_write_space(struct sock *sk)
2417{
2418	struct tun_file *tfile;
2419	wait_queue_head_t *wqueue;
2420
2421	if (!sock_writeable(sk))
2422		return;
2423
2424	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2425		return;
2426
2427	wqueue = sk_sleep(sk);
2428	if (wqueue && waitqueue_active(wqueue))
2429		wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2430						EPOLLWRNORM | EPOLLWRBAND);
2431
2432	tfile = container_of(sk, struct tun_file, sk);
2433	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2434}
2435
2436static void tun_put_page(struct tun_page *tpage)
2437{
2438	if (tpage->page)
2439		__page_frag_cache_drain(tpage->page, tpage->count);
2440}
2441
2442static int tun_xdp_one(struct tun_struct *tun,
2443		       struct tun_file *tfile,
2444		       struct xdp_buff *xdp, int *flush,
2445		       struct tun_page *tpage)
2446{
2447	unsigned int datasize = xdp->data_end - xdp->data;
2448	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2449	struct virtio_net_hdr *gso = &hdr->gso;
 
2450	struct bpf_prog *xdp_prog;
2451	struct sk_buff *skb = NULL;
2452	struct sk_buff_head *queue;
2453	u32 rxhash = 0, act;
2454	int buflen = hdr->buflen;
2455	int ret = 0;
2456	bool skb_xdp = false;
2457	struct page *page;
2458
2459	if (unlikely(datasize < ETH_HLEN))
2460		return -EINVAL;
2461
2462	xdp_prog = rcu_dereference(tun->xdp_prog);
2463	if (xdp_prog) {
2464		if (gso->gso_type) {
2465			skb_xdp = true;
2466			goto build;
2467		}
2468
2469		xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
2470		xdp_set_data_meta_invalid(xdp);
 
2471
2472		act = bpf_prog_run_xdp(xdp_prog, xdp);
2473		ret = tun_xdp_act(tun, xdp_prog, xdp, act);
2474		if (ret < 0) {
2475			put_page(virt_to_head_page(xdp->data));
2476			return ret;
2477		}
2478
2479		switch (ret) {
2480		case XDP_REDIRECT:
2481			*flush = true;
2482			fallthrough;
2483		case XDP_TX:
2484			return 0;
2485		case XDP_PASS:
2486			break;
2487		default:
2488			page = virt_to_head_page(xdp->data);
2489			if (tpage->page == page) {
2490				++tpage->count;
2491			} else {
2492				tun_put_page(tpage);
2493				tpage->page = page;
2494				tpage->count = 1;
2495			}
2496			return 0;
2497		}
2498	}
2499
2500build:
2501	skb = build_skb(xdp->data_hard_start, buflen);
2502	if (!skb) {
2503		ret = -ENOMEM;
2504		goto out;
2505	}
2506
2507	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2508	skb_put(skb, xdp->data_end - xdp->data);
2509
2510	if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2511		atomic_long_inc(&tun->rx_frame_errors);
2512		kfree_skb(skb);
2513		ret = -EINVAL;
2514		goto out;
2515	}
2516
2517	skb->protocol = eth_type_trans(skb, tun->dev);
2518	skb_reset_network_header(skb);
2519	skb_probe_transport_header(skb);
2520	skb_record_rx_queue(skb, tfile->queue_index);
2521
2522	if (skb_xdp) {
2523		ret = do_xdp_generic(xdp_prog, &skb);
2524		if (ret != XDP_PASS) {
2525			ret = 0;
2526			goto out;
2527		}
2528	}
2529
2530	if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2531	    !tfile->detached)
2532		rxhash = __skb_get_hash_symmetric(skb);
2533
2534	if (tfile->napi_enabled) {
2535		queue = &tfile->sk.sk_write_queue;
2536		spin_lock(&queue->lock);
2537
2538		if (unlikely(tfile->detached)) {
2539			spin_unlock(&queue->lock);
2540			kfree_skb(skb);
2541			return -EBUSY;
2542		}
2543
2544		__skb_queue_tail(queue, skb);
2545		spin_unlock(&queue->lock);
2546		ret = 1;
2547	} else {
2548		netif_receive_skb(skb);
2549		ret = 0;
2550	}
2551
2552	/* No need to disable preemption here since this function is
2553	 * always called with bh disabled
2554	 */
2555	dev_sw_netstats_rx_add(tun->dev, datasize);
 
 
 
 
2556
2557	if (rxhash)
2558		tun_flow_update(tun, rxhash, tfile);
2559
2560out:
2561	return ret;
2562}
2563
2564static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2565{
2566	int ret, i;
2567	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2568	struct tun_struct *tun = tun_get(tfile);
2569	struct tun_msg_ctl *ctl = m->msg_control;
2570	struct xdp_buff *xdp;
2571
2572	if (!tun)
2573		return -EBADFD;
2574
2575	if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
2576	    ctl && ctl->type == TUN_MSG_PTR) {
2577		struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx;
2578		struct tun_page tpage;
2579		int n = ctl->num;
2580		int flush = 0, queued = 0;
2581
2582		memset(&tpage, 0, sizeof(tpage));
2583
2584		local_bh_disable();
2585		rcu_read_lock();
2586		bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx);
2587
2588		for (i = 0; i < n; i++) {
2589			xdp = &((struct xdp_buff *)ctl->ptr)[i];
2590			ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2591			if (ret > 0)
2592				queued += ret;
2593		}
2594
2595		if (flush)
2596			xdp_do_flush();
2597
2598		if (tfile->napi_enabled && queued > 0)
2599			napi_schedule(&tfile->napi);
2600
2601		bpf_net_ctx_clear(bpf_net_ctx);
2602		rcu_read_unlock();
2603		local_bh_enable();
2604
2605		tun_put_page(&tpage);
2606
2607		ret = total_len;
2608		goto out;
2609	}
2610
2611	ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2612			   m->msg_flags & MSG_DONTWAIT,
2613			   m->msg_flags & MSG_MORE);
2614out:
2615	tun_put(tun);
2616	return ret;
2617}
2618
2619static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2620		       int flags)
2621{
2622	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2623	struct tun_struct *tun = tun_get(tfile);
2624	void *ptr = m->msg_control;
2625	int ret;
2626
2627	if (!tun) {
2628		ret = -EBADFD;
2629		goto out_free;
2630	}
2631
2632	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2633		ret = -EINVAL;
2634		goto out_put_tun;
2635	}
2636	if (flags & MSG_ERRQUEUE) {
2637		ret = sock_recv_errqueue(sock->sk, m, total_len,
2638					 SOL_PACKET, TUN_TX_TIMESTAMP);
2639		goto out;
2640	}
2641	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2642	if (ret > (ssize_t)total_len) {
2643		m->msg_flags |= MSG_TRUNC;
2644		ret = flags & MSG_TRUNC ? ret : total_len;
2645	}
2646out:
2647	tun_put(tun);
2648	return ret;
2649
2650out_put_tun:
2651	tun_put(tun);
2652out_free:
2653	tun_ptr_free(ptr);
2654	return ret;
2655}
2656
2657static int tun_ptr_peek_len(void *ptr)
2658{
2659	if (likely(ptr)) {
2660		if (tun_is_xdp_frame(ptr)) {
2661			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2662
2663			return xdpf->len;
2664		}
2665		return __skb_array_len_with_tag(ptr);
2666	} else {
2667		return 0;
2668	}
2669}
2670
2671static int tun_peek_len(struct socket *sock)
2672{
2673	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2674	struct tun_struct *tun;
2675	int ret = 0;
2676
2677	tun = tun_get(tfile);
2678	if (!tun)
2679		return 0;
2680
2681	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2682	tun_put(tun);
2683
2684	return ret;
2685}
2686
2687/* Ops structure to mimic raw sockets with tun */
2688static const struct proto_ops tun_socket_ops = {
2689	.peek_len = tun_peek_len,
2690	.sendmsg = tun_sendmsg,
2691	.recvmsg = tun_recvmsg,
2692};
2693
2694static struct proto tun_proto = {
2695	.name		= "tun",
2696	.owner		= THIS_MODULE,
2697	.obj_size	= sizeof(struct tun_file),
2698};
2699
2700static int tun_flags(struct tun_struct *tun)
2701{
2702	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2703}
2704
2705static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
2706			      char *buf)
2707{
2708	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2709	return sysfs_emit(buf, "0x%x\n", tun_flags(tun));
2710}
2711
2712static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
2713			  char *buf)
2714{
2715	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2716	return uid_valid(tun->owner)?
2717		sysfs_emit(buf, "%u\n",
2718			   from_kuid_munged(current_user_ns(), tun->owner)) :
2719		sysfs_emit(buf, "-1\n");
2720}
2721
2722static ssize_t group_show(struct device *dev, struct device_attribute *attr,
2723			  char *buf)
2724{
2725	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2726	return gid_valid(tun->group) ?
2727		sysfs_emit(buf, "%u\n",
2728			   from_kgid_munged(current_user_ns(), tun->group)) :
2729		sysfs_emit(buf, "-1\n");
2730}
2731
2732static DEVICE_ATTR_RO(tun_flags);
2733static DEVICE_ATTR_RO(owner);
2734static DEVICE_ATTR_RO(group);
2735
2736static struct attribute *tun_dev_attrs[] = {
2737	&dev_attr_tun_flags.attr,
2738	&dev_attr_owner.attr,
2739	&dev_attr_group.attr,
2740	NULL
2741};
2742
2743static const struct attribute_group tun_attr_group = {
2744	.attrs = tun_dev_attrs
2745};
2746
2747static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2748{
2749	struct tun_struct *tun;
2750	struct tun_file *tfile = file->private_data;
2751	struct net_device *dev;
2752	int err;
2753
2754	if (tfile->detached)
2755		return -EINVAL;
2756
2757	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2758		if (!capable(CAP_NET_ADMIN))
2759			return -EPERM;
2760
2761		if (!(ifr->ifr_flags & IFF_NAPI) ||
2762		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2763			return -EINVAL;
2764	}
2765
2766	dev = __dev_get_by_name(net, ifr->ifr_name);
2767	if (dev) {
2768		if (ifr->ifr_flags & IFF_TUN_EXCL)
2769			return -EBUSY;
2770		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2771			tun = netdev_priv(dev);
2772		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2773			tun = netdev_priv(dev);
2774		else
2775			return -EINVAL;
2776
2777		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2778		    !!(tun->flags & IFF_MULTI_QUEUE))
2779			return -EINVAL;
2780
2781		if (tun_not_capable(tun))
2782			return -EPERM;
2783		err = security_tun_dev_open(tun->security);
2784		if (err < 0)
2785			return err;
2786
2787		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2788				 ifr->ifr_flags & IFF_NAPI,
2789				 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2790		if (err < 0)
2791			return err;
2792
2793		if (tun->flags & IFF_MULTI_QUEUE &&
2794		    (tun->numqueues + tun->numdisabled > 1)) {
2795			/* One or more queue has already been attached, no need
2796			 * to initialize the device again.
2797			 */
2798			netdev_state_change(dev);
2799			return 0;
2800		}
2801
2802		tun->flags = (tun->flags & ~TUN_FEATURES) |
2803			      (ifr->ifr_flags & TUN_FEATURES);
2804
2805		netdev_state_change(dev);
2806	} else {
2807		char *name;
2808		unsigned long flags = 0;
2809		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2810			     MAX_TAP_QUEUES : 1;
2811
2812		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2813			return -EPERM;
2814		err = security_tun_dev_create();
2815		if (err < 0)
2816			return err;
2817
2818		/* Set dev type */
2819		if (ifr->ifr_flags & IFF_TUN) {
2820			/* TUN device */
2821			flags |= IFF_TUN;
2822			name = "tun%d";
2823		} else if (ifr->ifr_flags & IFF_TAP) {
2824			/* TAP device */
2825			flags |= IFF_TAP;
2826			name = "tap%d";
2827		} else
2828			return -EINVAL;
2829
2830		if (*ifr->ifr_name)
2831			name = ifr->ifr_name;
2832
2833		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2834				       NET_NAME_UNKNOWN, tun_setup, queues,
2835				       queues);
2836
2837		if (!dev)
2838			return -ENOMEM;
 
 
 
2839
2840		dev_net_set(dev, net);
2841		dev->rtnl_link_ops = &tun_link_ops;
2842		dev->ifindex = tfile->ifindex;
2843		dev->sysfs_groups[0] = &tun_attr_group;
2844
2845		tun = netdev_priv(dev);
2846		tun->dev = dev;
2847		tun->flags = flags;
2848		tun->txflt.count = 0;
2849		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2850
2851		tun->align = NET_SKB_PAD;
2852		tun->filter_attached = false;
2853		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2854		tun->rx_batched = 0;
2855		RCU_INIT_POINTER(tun->steering_prog, NULL);
2856
2857		tun->ifr = ifr;
2858		tun->file = file;
 
 
 
 
 
 
 
 
 
 
 
 
2859
2860		tun_net_initialize(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2861
2862		err = register_netdevice(tun->dev);
2863		if (err < 0) {
2864			free_netdev(dev);
2865			return err;
2866		}
2867		/* free_netdev() won't check refcnt, to avoid race
2868		 * with dev_put() we need publish tun after registration.
2869		 */
2870		rcu_assign_pointer(tfile->tun, tun);
2871	}
2872
2873	if (ifr->ifr_flags & IFF_NO_CARRIER)
2874		netif_carrier_off(tun->dev);
2875	else
2876		netif_carrier_on(tun->dev);
2877
2878	/* Make sure persistent devices do not get stuck in
2879	 * xoff state.
2880	 */
2881	if (netif_running(tun->dev))
2882		netif_tx_wake_all_queues(tun->dev);
2883
2884	strcpy(ifr->ifr_name, tun->dev->name);
2885	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2886}
2887
2888static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2889{
 
 
2890	strcpy(ifr->ifr_name, tun->dev->name);
2891
2892	ifr->ifr_flags = tun_flags(tun);
2893
2894}
2895
2896/* This is like a cut-down ethtool ops, except done via tun fd so no
2897 * privs required. */
2898static int set_offload(struct tun_struct *tun, unsigned long arg)
2899{
2900	netdev_features_t features = 0;
2901
2902	if (arg & TUN_F_CSUM) {
2903		features |= NETIF_F_HW_CSUM;
2904		arg &= ~TUN_F_CSUM;
2905
2906		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2907			if (arg & TUN_F_TSO_ECN) {
2908				features |= NETIF_F_TSO_ECN;
2909				arg &= ~TUN_F_TSO_ECN;
2910			}
2911			if (arg & TUN_F_TSO4)
2912				features |= NETIF_F_TSO;
2913			if (arg & TUN_F_TSO6)
2914				features |= NETIF_F_TSO6;
2915			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2916		}
2917
2918		arg &= ~TUN_F_UFO;
2919
2920		/* TODO: for now USO4 and USO6 should work simultaneously */
2921		if (arg & TUN_F_USO4 && arg & TUN_F_USO6) {
2922			features |= NETIF_F_GSO_UDP_L4;
2923			arg &= ~(TUN_F_USO4 | TUN_F_USO6);
2924		}
2925	}
2926
2927	/* This gives the user a way to test for new features in future by
2928	 * trying to set them. */
2929	if (arg)
2930		return -EINVAL;
2931
2932	tun->set_features = features;
2933	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2934	tun->dev->wanted_features |= features;
2935	netdev_update_features(tun->dev);
2936
2937	return 0;
2938}
2939
2940static void tun_detach_filter(struct tun_struct *tun, int n)
2941{
2942	int i;
2943	struct tun_file *tfile;
2944
2945	for (i = 0; i < n; i++) {
2946		tfile = rtnl_dereference(tun->tfiles[i]);
2947		lock_sock(tfile->socket.sk);
2948		sk_detach_filter(tfile->socket.sk);
2949		release_sock(tfile->socket.sk);
2950	}
2951
2952	tun->filter_attached = false;
2953}
2954
2955static int tun_attach_filter(struct tun_struct *tun)
2956{
2957	int i, ret = 0;
2958	struct tun_file *tfile;
2959
2960	for (i = 0; i < tun->numqueues; i++) {
2961		tfile = rtnl_dereference(tun->tfiles[i]);
2962		lock_sock(tfile->socket.sk);
2963		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2964		release_sock(tfile->socket.sk);
2965		if (ret) {
2966			tun_detach_filter(tun, i);
2967			return ret;
2968		}
2969	}
2970
2971	tun->filter_attached = true;
2972	return ret;
2973}
2974
2975static void tun_set_sndbuf(struct tun_struct *tun)
2976{
2977	struct tun_file *tfile;
2978	int i;
2979
2980	for (i = 0; i < tun->numqueues; i++) {
2981		tfile = rtnl_dereference(tun->tfiles[i]);
2982		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2983	}
2984}
2985
2986static int tun_set_queue(struct file *file, struct ifreq *ifr)
2987{
2988	struct tun_file *tfile = file->private_data;
2989	struct tun_struct *tun;
2990	int ret = 0;
2991
2992	rtnl_lock();
2993
2994	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2995		tun = tfile->detached;
2996		if (!tun) {
2997			ret = -EINVAL;
2998			goto unlock;
2999		}
3000		ret = security_tun_dev_attach_queue(tun->security);
3001		if (ret < 0)
3002			goto unlock;
3003		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
3004				 tun->flags & IFF_NAPI_FRAGS, true);
3005	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
3006		tun = rtnl_dereference(tfile->tun);
3007		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
3008			ret = -EINVAL;
3009		else
3010			__tun_detach(tfile, false);
3011	} else
3012		ret = -EINVAL;
3013
3014	if (ret >= 0)
3015		netdev_state_change(tun->dev);
3016
3017unlock:
3018	rtnl_unlock();
3019	return ret;
3020}
3021
3022static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
3023			void __user *data)
3024{
3025	struct bpf_prog *prog;
3026	int fd;
3027
3028	if (copy_from_user(&fd, data, sizeof(fd)))
3029		return -EFAULT;
3030
3031	if (fd == -1) {
3032		prog = NULL;
3033	} else {
3034		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
3035		if (IS_ERR(prog))
3036			return PTR_ERR(prog);
3037	}
3038
3039	return __tun_set_ebpf(tun, prog_p, prog);
3040}
3041
3042/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
3043static unsigned char tun_get_addr_len(unsigned short type)
3044{
3045	switch (type) {
3046	case ARPHRD_IP6GRE:
3047	case ARPHRD_TUNNEL6:
3048		return sizeof(struct in6_addr);
3049	case ARPHRD_IPGRE:
3050	case ARPHRD_TUNNEL:
3051	case ARPHRD_SIT:
3052		return 4;
3053	case ARPHRD_ETHER:
3054		return ETH_ALEN;
3055	case ARPHRD_IEEE802154:
3056	case ARPHRD_IEEE802154_MONITOR:
3057		return IEEE802154_EXTENDED_ADDR_LEN;
3058	case ARPHRD_PHONET_PIPE:
3059	case ARPHRD_PPP:
3060	case ARPHRD_NONE:
3061		return 0;
3062	case ARPHRD_6LOWPAN:
3063		return EUI64_ADDR_LEN;
3064	case ARPHRD_FDDI:
3065		return FDDI_K_ALEN;
3066	case ARPHRD_HIPPI:
3067		return HIPPI_ALEN;
3068	case ARPHRD_IEEE802:
3069		return FC_ALEN;
3070	case ARPHRD_ROSE:
3071		return ROSE_ADDR_LEN;
3072	case ARPHRD_NETROM:
3073		return AX25_ADDR_LEN;
3074	case ARPHRD_LOCALTLK:
3075		return LTALK_ALEN;
3076	default:
3077		return 0;
3078	}
3079}
3080
3081static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
3082			    unsigned long arg, int ifreq_len)
3083{
3084	struct tun_file *tfile = file->private_data;
3085	struct net *net = sock_net(&tfile->sk);
3086	struct tun_struct *tun;
3087	void __user* argp = (void __user*)arg;
3088	unsigned int carrier;
3089	struct ifreq ifr;
3090	kuid_t owner;
3091	kgid_t group;
3092	int ifindex;
3093	int sndbuf;
3094	int vnet_hdr_sz;
3095	int le;
3096	int ret;
3097	bool do_notify = false;
3098
3099	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3100	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
3101		if (copy_from_user(&ifr, argp, ifreq_len))
3102			return -EFAULT;
3103	} else {
3104		memset(&ifr, 0, sizeof(ifr));
3105	}
3106	if (cmd == TUNGETFEATURES) {
3107		/* Currently this just means: "what IFF flags are valid?".
3108		 * This is needed because we never checked for invalid flags on
3109		 * TUNSETIFF.
3110		 */
3111		return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
3112				TUN_FEATURES, (unsigned int __user*)argp);
3113	} else if (cmd == TUNSETQUEUE) {
3114		return tun_set_queue(file, &ifr);
3115	} else if (cmd == SIOCGSKNS) {
3116		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3117			return -EPERM;
3118		return open_related_ns(&net->ns, get_net_ns);
3119	}
3120
 
3121	rtnl_lock();
3122
3123	tun = tun_get(tfile);
3124	if (cmd == TUNSETIFF) {
3125		ret = -EEXIST;
3126		if (tun)
3127			goto unlock;
3128
3129		ifr.ifr_name[IFNAMSIZ-1] = '\0';
3130
3131		ret = tun_set_iff(net, file, &ifr);
3132
3133		if (ret)
3134			goto unlock;
3135
3136		if (copy_to_user(argp, &ifr, ifreq_len))
3137			ret = -EFAULT;
3138		goto unlock;
3139	}
3140	if (cmd == TUNSETIFINDEX) {
3141		ret = -EPERM;
3142		if (tun)
3143			goto unlock;
3144
3145		ret = -EFAULT;
3146		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3147			goto unlock;
3148		ret = -EINVAL;
3149		if (ifindex < 0)
3150			goto unlock;
3151		ret = 0;
3152		tfile->ifindex = ifindex;
3153		goto unlock;
3154	}
3155
3156	ret = -EBADFD;
3157	if (!tun)
3158		goto unlock;
3159
3160	netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
3161
3162	net = dev_net(tun->dev);
3163	ret = 0;
3164	switch (cmd) {
3165	case TUNGETIFF:
3166		tun_get_iff(tun, &ifr);
3167
3168		if (tfile->detached)
3169			ifr.ifr_flags |= IFF_DETACH_QUEUE;
3170		if (!tfile->socket.sk->sk_filter)
3171			ifr.ifr_flags |= IFF_NOFILTER;
3172
3173		if (copy_to_user(argp, &ifr, ifreq_len))
3174			ret = -EFAULT;
3175		break;
3176
3177	case TUNSETNOCSUM:
3178		/* Disable/Enable checksum */
3179
3180		/* [unimplemented] */
3181		netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
3182			   arg ? "disabled" : "enabled");
3183		break;
3184
3185	case TUNSETPERSIST:
3186		/* Disable/Enable persist mode. Keep an extra reference to the
3187		 * module to prevent the module being unprobed.
3188		 */
3189		if (arg && !(tun->flags & IFF_PERSIST)) {
3190			tun->flags |= IFF_PERSIST;
3191			__module_get(THIS_MODULE);
3192			do_notify = true;
3193		}
3194		if (!arg && (tun->flags & IFF_PERSIST)) {
3195			tun->flags &= ~IFF_PERSIST;
3196			module_put(THIS_MODULE);
3197			do_notify = true;
3198		}
3199
3200		netif_info(tun, drv, tun->dev, "persist %s\n",
3201			   arg ? "enabled" : "disabled");
3202		break;
3203
3204	case TUNSETOWNER:
3205		/* Set owner of the device */
3206		owner = make_kuid(current_user_ns(), arg);
3207		if (!uid_valid(owner)) {
3208			ret = -EINVAL;
3209			break;
3210		}
3211		tun->owner = owner;
3212		do_notify = true;
3213		netif_info(tun, drv, tun->dev, "owner set to %u\n",
3214			   from_kuid(&init_user_ns, tun->owner));
3215		break;
3216
3217	case TUNSETGROUP:
3218		/* Set group of the device */
3219		group = make_kgid(current_user_ns(), arg);
3220		if (!gid_valid(group)) {
3221			ret = -EINVAL;
3222			break;
3223		}
3224		tun->group = group;
3225		do_notify = true;
3226		netif_info(tun, drv, tun->dev, "group set to %u\n",
3227			   from_kgid(&init_user_ns, tun->group));
3228		break;
3229
3230	case TUNSETLINK:
3231		/* Only allow setting the type when the interface is down */
3232		if (tun->dev->flags & IFF_UP) {
3233			netif_info(tun, drv, tun->dev,
3234				   "Linktype set failed because interface is up\n");
3235			ret = -EBUSY;
3236		} else {
3237			ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
3238						       tun->dev);
3239			ret = notifier_to_errno(ret);
3240			if (ret) {
3241				netif_info(tun, drv, tun->dev,
3242					   "Refused to change device type\n");
3243				break;
3244			}
3245			tun->dev->type = (int) arg;
3246			tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
3247			netif_info(tun, drv, tun->dev, "linktype set to %d\n",
3248				   tun->dev->type);
3249			call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
3250						 tun->dev);
3251		}
3252		break;
3253
 
3254	case TUNSETDEBUG:
3255		tun->msg_enable = (u32)arg;
3256		break;
3257
3258	case TUNSETOFFLOAD:
3259		ret = set_offload(tun, arg);
3260		break;
3261
3262	case TUNSETTXFILTER:
3263		/* Can be set only for TAPs */
3264		ret = -EINVAL;
3265		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3266			break;
3267		ret = update_filter(&tun->txflt, (void __user *)arg);
3268		break;
3269
3270	case SIOCGIFHWADDR:
3271		/* Get hw address */
3272		dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
 
3273		if (copy_to_user(argp, &ifr, ifreq_len))
3274			ret = -EFAULT;
3275		break;
3276
3277	case SIOCSIFHWADDR:
3278		/* Set hw address */
3279		ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
 
 
 
3280		break;
3281
3282	case TUNGETSNDBUF:
3283		sndbuf = tfile->socket.sk->sk_sndbuf;
3284		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3285			ret = -EFAULT;
3286		break;
3287
3288	case TUNSETSNDBUF:
3289		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3290			ret = -EFAULT;
3291			break;
3292		}
3293		if (sndbuf <= 0) {
3294			ret = -EINVAL;
3295			break;
3296		}
3297
3298		tun->sndbuf = sndbuf;
3299		tun_set_sndbuf(tun);
3300		break;
3301
3302	case TUNGETVNETHDRSZ:
3303		vnet_hdr_sz = tun->vnet_hdr_sz;
3304		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3305			ret = -EFAULT;
3306		break;
3307
3308	case TUNSETVNETHDRSZ:
3309		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3310			ret = -EFAULT;
3311			break;
3312		}
3313		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3314			ret = -EINVAL;
3315			break;
3316		}
3317
3318		tun->vnet_hdr_sz = vnet_hdr_sz;
3319		break;
3320
3321	case TUNGETVNETLE:
3322		le = !!(tun->flags & TUN_VNET_LE);
3323		if (put_user(le, (int __user *)argp))
3324			ret = -EFAULT;
3325		break;
3326
3327	case TUNSETVNETLE:
3328		if (get_user(le, (int __user *)argp)) {
3329			ret = -EFAULT;
3330			break;
3331		}
3332		if (le)
3333			tun->flags |= TUN_VNET_LE;
3334		else
3335			tun->flags &= ~TUN_VNET_LE;
3336		break;
3337
3338	case TUNGETVNETBE:
3339		ret = tun_get_vnet_be(tun, argp);
3340		break;
3341
3342	case TUNSETVNETBE:
3343		ret = tun_set_vnet_be(tun, argp);
3344		break;
3345
3346	case TUNATTACHFILTER:
3347		/* Can be set only for TAPs */
3348		ret = -EINVAL;
3349		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3350			break;
3351		ret = -EFAULT;
3352		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3353			break;
3354
3355		ret = tun_attach_filter(tun);
3356		break;
3357
3358	case TUNDETACHFILTER:
3359		/* Can be set only for TAPs */
3360		ret = -EINVAL;
3361		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3362			break;
3363		ret = 0;
3364		tun_detach_filter(tun, tun->numqueues);
3365		break;
3366
3367	case TUNGETFILTER:
3368		ret = -EINVAL;
3369		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3370			break;
3371		ret = -EFAULT;
3372		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3373			break;
3374		ret = 0;
3375		break;
3376
3377	case TUNSETSTEERINGEBPF:
3378		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3379		break;
3380
3381	case TUNSETFILTEREBPF:
3382		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3383		break;
3384
3385	case TUNSETCARRIER:
3386		ret = -EFAULT;
3387		if (copy_from_user(&carrier, argp, sizeof(carrier)))
3388			goto unlock;
3389
3390		ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3391		break;
3392
3393	case TUNGETDEVNETNS:
3394		ret = -EPERM;
3395		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3396			goto unlock;
3397		ret = open_related_ns(&net->ns, get_net_ns);
3398		break;
3399
3400	default:
3401		ret = -EINVAL;
3402		break;
3403	}
3404
3405	if (do_notify)
3406		netdev_state_change(tun->dev);
3407
3408unlock:
3409	rtnl_unlock();
3410	if (tun)
3411		tun_put(tun);
3412	return ret;
3413}
3414
3415static long tun_chr_ioctl(struct file *file,
3416			  unsigned int cmd, unsigned long arg)
3417{
3418	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3419}
3420
3421#ifdef CONFIG_COMPAT
3422static long tun_chr_compat_ioctl(struct file *file,
3423			 unsigned int cmd, unsigned long arg)
3424{
3425	switch (cmd) {
3426	case TUNSETIFF:
3427	case TUNGETIFF:
3428	case TUNSETTXFILTER:
3429	case TUNGETSNDBUF:
3430	case TUNSETSNDBUF:
3431	case SIOCGIFHWADDR:
3432	case SIOCSIFHWADDR:
3433		arg = (unsigned long)compat_ptr(arg);
3434		break;
3435	default:
3436		arg = (compat_ulong_t)arg;
3437		break;
3438	}
3439
3440	/*
3441	 * compat_ifreq is shorter than ifreq, so we must not access beyond
3442	 * the end of that structure. All fields that are used in this
3443	 * driver are compatible though, we don't need to convert the
3444	 * contents.
3445	 */
3446	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3447}
3448#endif /* CONFIG_COMPAT */
3449
3450static int tun_chr_fasync(int fd, struct file *file, int on)
3451{
3452	struct tun_file *tfile = file->private_data;
3453	int ret;
3454
3455	if (on) {
3456		ret = file_f_owner_allocate(file);
3457		if (ret)
3458			goto out;
3459	}
3460
3461	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3462		goto out;
3463
3464	if (on) {
3465		__f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3466		tfile->flags |= TUN_FASYNC;
3467	} else
3468		tfile->flags &= ~TUN_FASYNC;
3469	ret = 0;
3470out:
3471	return ret;
3472}
3473
3474static int tun_chr_open(struct inode *inode, struct file * file)
3475{
3476	struct net *net = current->nsproxy->net_ns;
3477	struct tun_file *tfile;
3478
 
 
3479	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3480					    &tun_proto, 0);
3481	if (!tfile)
3482		return -ENOMEM;
3483	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3484		sk_free(&tfile->sk);
3485		return -ENOMEM;
3486	}
3487
3488	mutex_init(&tfile->napi_mutex);
3489	RCU_INIT_POINTER(tfile->tun, NULL);
3490	tfile->flags = 0;
3491	tfile->ifindex = 0;
3492
3493	init_waitqueue_head(&tfile->socket.wq.wait);
3494
3495	tfile->socket.file = file;
3496	tfile->socket.ops = &tun_socket_ops;
3497
3498	sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
3499
3500	tfile->sk.sk_write_space = tun_sock_write_space;
3501	tfile->sk.sk_sndbuf = INT_MAX;
3502
3503	file->private_data = tfile;
3504	INIT_LIST_HEAD(&tfile->next);
3505
3506	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3507
3508	/* tun groks IOCB_NOWAIT just fine, mark it as such */
3509	file->f_mode |= FMODE_NOWAIT;
3510	return 0;
3511}
3512
3513static int tun_chr_close(struct inode *inode, struct file *file)
3514{
3515	struct tun_file *tfile = file->private_data;
3516
3517	tun_detach(tfile, true);
3518
3519	return 0;
3520}
3521
3522#ifdef CONFIG_PROC_FS
3523static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3524{
3525	struct tun_file *tfile = file->private_data;
3526	struct tun_struct *tun;
3527	struct ifreq ifr;
3528
3529	memset(&ifr, 0, sizeof(ifr));
3530
3531	rtnl_lock();
3532	tun = tun_get(tfile);
3533	if (tun)
3534		tun_get_iff(tun, &ifr);
3535	rtnl_unlock();
3536
3537	if (tun)
3538		tun_put(tun);
3539
3540	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3541}
3542#endif
3543
3544static const struct file_operations tun_fops = {
3545	.owner	= THIS_MODULE,
 
3546	.read_iter  = tun_chr_read_iter,
3547	.write_iter = tun_chr_write_iter,
3548	.poll	= tun_chr_poll,
3549	.unlocked_ioctl	= tun_chr_ioctl,
3550#ifdef CONFIG_COMPAT
3551	.compat_ioctl = tun_chr_compat_ioctl,
3552#endif
3553	.open	= tun_chr_open,
3554	.release = tun_chr_close,
3555	.fasync = tun_chr_fasync,
3556#ifdef CONFIG_PROC_FS
3557	.show_fdinfo = tun_chr_show_fdinfo,
3558#endif
3559};
3560
3561static struct miscdevice tun_miscdev = {
3562	.minor = TUN_MINOR,
3563	.name = "tun",
3564	.nodename = "net/tun",
3565	.fops = &tun_fops,
3566};
3567
3568/* ethtool interface */
3569
3570static void tun_default_link_ksettings(struct net_device *dev,
3571				       struct ethtool_link_ksettings *cmd)
3572{
3573	ethtool_link_ksettings_zero_link_mode(cmd, supported);
3574	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3575	cmd->base.speed		= SPEED_10000;
3576	cmd->base.duplex	= DUPLEX_FULL;
3577	cmd->base.port		= PORT_TP;
3578	cmd->base.phy_address	= 0;
3579	cmd->base.autoneg	= AUTONEG_DISABLE;
3580}
3581
3582static int tun_get_link_ksettings(struct net_device *dev,
3583				  struct ethtool_link_ksettings *cmd)
3584{
3585	struct tun_struct *tun = netdev_priv(dev);
3586
3587	memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3588	return 0;
3589}
3590
3591static int tun_set_link_ksettings(struct net_device *dev,
3592				  const struct ethtool_link_ksettings *cmd)
3593{
3594	struct tun_struct *tun = netdev_priv(dev);
3595
3596	memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3597	return 0;
3598}
3599
3600static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3601{
3602	struct tun_struct *tun = netdev_priv(dev);
3603
3604	strscpy(info->driver, DRV_NAME, sizeof(info->driver));
3605	strscpy(info->version, DRV_VERSION, sizeof(info->version));
3606
3607	switch (tun->flags & TUN_TYPE_MASK) {
3608	case IFF_TUN:
3609		strscpy(info->bus_info, "tun", sizeof(info->bus_info));
3610		break;
3611	case IFF_TAP:
3612		strscpy(info->bus_info, "tap", sizeof(info->bus_info));
3613		break;
3614	}
3615}
3616
3617static u32 tun_get_msglevel(struct net_device *dev)
3618{
 
3619	struct tun_struct *tun = netdev_priv(dev);
3620
3621	return tun->msg_enable;
 
 
3622}
3623
3624static void tun_set_msglevel(struct net_device *dev, u32 value)
3625{
 
3626	struct tun_struct *tun = netdev_priv(dev);
3627
3628	tun->msg_enable = value;
3629}
3630
3631static int tun_get_coalesce(struct net_device *dev,
3632			    struct ethtool_coalesce *ec,
3633			    struct kernel_ethtool_coalesce *kernel_coal,
3634			    struct netlink_ext_ack *extack)
3635{
3636	struct tun_struct *tun = netdev_priv(dev);
3637
3638	ec->rx_max_coalesced_frames = tun->rx_batched;
3639
3640	return 0;
3641}
3642
3643static int tun_set_coalesce(struct net_device *dev,
3644			    struct ethtool_coalesce *ec,
3645			    struct kernel_ethtool_coalesce *kernel_coal,
3646			    struct netlink_ext_ack *extack)
3647{
3648	struct tun_struct *tun = netdev_priv(dev);
3649
3650	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3651		tun->rx_batched = NAPI_POLL_WEIGHT;
3652	else
3653		tun->rx_batched = ec->rx_max_coalesced_frames;
3654
3655	return 0;
3656}
3657
3658static void tun_get_channels(struct net_device *dev,
3659			     struct ethtool_channels *channels)
3660{
3661	struct tun_struct *tun = netdev_priv(dev);
3662
3663	channels->combined_count = tun->numqueues;
3664	channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1;
3665}
3666
3667static const struct ethtool_ops tun_ethtool_ops = {
3668	.supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
3669	.get_drvinfo	= tun_get_drvinfo,
3670	.get_msglevel	= tun_get_msglevel,
3671	.set_msglevel	= tun_set_msglevel,
3672	.get_link	= ethtool_op_get_link,
3673	.get_channels   = tun_get_channels,
3674	.get_ts_info	= ethtool_op_get_ts_info,
3675	.get_coalesce   = tun_get_coalesce,
3676	.set_coalesce   = tun_set_coalesce,
3677	.get_link_ksettings = tun_get_link_ksettings,
3678	.set_link_ksettings = tun_set_link_ksettings,
3679};
3680
3681static int tun_queue_resize(struct tun_struct *tun)
3682{
3683	struct net_device *dev = tun->dev;
3684	struct tun_file *tfile;
3685	struct ptr_ring **rings;
3686	int n = tun->numqueues + tun->numdisabled;
3687	int ret, i;
3688
3689	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3690	if (!rings)
3691		return -ENOMEM;
3692
3693	for (i = 0; i < tun->numqueues; i++) {
3694		tfile = rtnl_dereference(tun->tfiles[i]);
3695		rings[i] = &tfile->tx_ring;
3696	}
3697	list_for_each_entry(tfile, &tun->disabled, next)
3698		rings[i++] = &tfile->tx_ring;
3699
3700	ret = ptr_ring_resize_multiple_bh(rings, n,
3701					  dev->tx_queue_len, GFP_KERNEL,
3702					  tun_ptr_free);
3703
3704	kfree(rings);
3705	return ret;
3706}
3707
3708static int tun_device_event(struct notifier_block *unused,
3709			    unsigned long event, void *ptr)
3710{
3711	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3712	struct tun_struct *tun = netdev_priv(dev);
3713	int i;
3714
3715	if (dev->rtnl_link_ops != &tun_link_ops)
3716		return NOTIFY_DONE;
3717
3718	switch (event) {
3719	case NETDEV_CHANGE_TX_QUEUE_LEN:
3720		if (tun_queue_resize(tun))
3721			return NOTIFY_BAD;
3722		break;
3723	case NETDEV_UP:
3724		for (i = 0; i < tun->numqueues; i++) {
3725			struct tun_file *tfile;
3726
3727			tfile = rtnl_dereference(tun->tfiles[i]);
3728			tfile->socket.sk->sk_write_space(tfile->socket.sk);
3729		}
3730		break;
3731	default:
3732		break;
3733	}
3734
3735	return NOTIFY_DONE;
3736}
3737
3738static struct notifier_block tun_notifier_block __read_mostly = {
3739	.notifier_call	= tun_device_event,
3740};
3741
3742static int __init tun_init(void)
3743{
3744	int ret = 0;
3745
3746	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3747
3748	ret = rtnl_link_register(&tun_link_ops);
3749	if (ret) {
3750		pr_err("Can't register link_ops\n");
3751		goto err_linkops;
3752	}
3753
3754	ret = misc_register(&tun_miscdev);
3755	if (ret) {
3756		pr_err("Can't register misc device %d\n", TUN_MINOR);
3757		goto err_misc;
3758	}
3759
3760	ret = register_netdevice_notifier(&tun_notifier_block);
3761	if (ret) {
3762		pr_err("Can't register netdevice notifier\n");
3763		goto err_notifier;
3764	}
3765
3766	return  0;
3767
3768err_notifier:
3769	misc_deregister(&tun_miscdev);
3770err_misc:
3771	rtnl_link_unregister(&tun_link_ops);
3772err_linkops:
3773	return ret;
3774}
3775
3776static void __exit tun_cleanup(void)
3777{
3778	misc_deregister(&tun_miscdev);
3779	rtnl_link_unregister(&tun_link_ops);
3780	unregister_netdevice_notifier(&tun_notifier_block);
3781}
3782
3783/* Get an underlying socket object from tun file.  Returns error unless file is
3784 * attached to a device.  The returned object works like a packet socket, it
3785 * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
3786 * holding a reference to the file for as long as the socket is in use. */
3787struct socket *tun_get_socket(struct file *file)
3788{
3789	struct tun_file *tfile;
3790	if (file->f_op != &tun_fops)
3791		return ERR_PTR(-EINVAL);
3792	tfile = file->private_data;
3793	if (!tfile)
3794		return ERR_PTR(-EBADFD);
3795	return &tfile->socket;
3796}
3797EXPORT_SYMBOL_GPL(tun_get_socket);
3798
3799struct ptr_ring *tun_get_tx_ring(struct file *file)
3800{
3801	struct tun_file *tfile;
3802
3803	if (file->f_op != &tun_fops)
3804		return ERR_PTR(-EINVAL);
3805	tfile = file->private_data;
3806	if (!tfile)
3807		return ERR_PTR(-EBADFD);
3808	return &tfile->tx_ring;
3809}
3810EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3811
3812module_init(tun_init);
3813module_exit(tun_cleanup);
3814MODULE_DESCRIPTION(DRV_DESCRIPTION);
3815MODULE_AUTHOR(DRV_COPYRIGHT);
3816MODULE_LICENSE("GPL");
3817MODULE_ALIAS_MISCDEV(TUN_MINOR);
3818MODULE_ALIAS("devname:net/tun");
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  TUN - Universal TUN/TAP device driver.
   4 *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
   5 *
   6 *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
   7 */
   8
   9/*
  10 *  Changes:
  11 *
  12 *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
  13 *    Add TUNSETLINK ioctl to set the link encapsulation
  14 *
  15 *  Mark Smith <markzzzsmith@yahoo.com.au>
  16 *    Use eth_random_addr() for tap MAC address.
  17 *
  18 *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
  19 *    Fixes in packet dropping, queue length setting and queue wakeup.
  20 *    Increased default tx queue length.
  21 *    Added ethtool API.
  22 *    Minor cleanups
  23 *
  24 *  Daniel Podlejski <underley@underley.eu.org>
  25 *    Modifications for 2.3.99-pre5 kernel.
  26 */
  27
  28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29
  30#define DRV_NAME	"tun"
  31#define DRV_VERSION	"1.6"
  32#define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
  33#define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
  34
  35#include <linux/module.h>
  36#include <linux/errno.h>
  37#include <linux/kernel.h>
  38#include <linux/sched/signal.h>
  39#include <linux/major.h>
  40#include <linux/slab.h>
  41#include <linux/poll.h>
  42#include <linux/fcntl.h>
  43#include <linux/init.h>
  44#include <linux/skbuff.h>
  45#include <linux/netdevice.h>
  46#include <linux/etherdevice.h>
  47#include <linux/miscdevice.h>
  48#include <linux/ethtool.h>
  49#include <linux/rtnetlink.h>
  50#include <linux/compat.h>
  51#include <linux/if.h>
  52#include <linux/if_arp.h>
  53#include <linux/if_ether.h>
  54#include <linux/if_tun.h>
  55#include <linux/if_vlan.h>
  56#include <linux/crc32.h>
 
  57#include <linux/nsproxy.h>
  58#include <linux/virtio_net.h>
  59#include <linux/rcupdate.h>
  60#include <net/net_namespace.h>
  61#include <net/netns/generic.h>
  62#include <net/rtnetlink.h>
  63#include <net/sock.h>
  64#include <net/xdp.h>
 
  65#include <linux/seq_file.h>
  66#include <linux/uio.h>
  67#include <linux/skb_array.h>
  68#include <linux/bpf.h>
  69#include <linux/bpf_trace.h>
  70#include <linux/mutex.h>
 
 
 
 
 
 
 
 
 
  71
  72#include <linux/uaccess.h>
  73#include <linux/proc_fs.h>
  74
  75static void tun_default_link_ksettings(struct net_device *dev,
  76				       struct ethtool_link_ksettings *cmd);
  77
  78/* Uncomment to enable debugging */
  79/* #define TUN_DEBUG 1 */
  80
  81#ifdef TUN_DEBUG
  82static int debug;
  83
  84#define tun_debug(level, tun, fmt, args...)			\
  85do {								\
  86	if (tun->debug)						\
  87		netdev_printk(level, tun->dev, fmt, ##args);	\
  88} while (0)
  89#define DBG1(level, fmt, args...)				\
  90do {								\
  91	if (debug == 2)						\
  92		printk(level fmt, ##args);			\
  93} while (0)
  94#else
  95#define tun_debug(level, tun, fmt, args...)			\
  96do {								\
  97	if (0)							\
  98		netdev_printk(level, tun->dev, fmt, ##args);	\
  99} while (0)
 100#define DBG1(level, fmt, args...)				\
 101do {								\
 102	if (0)							\
 103		printk(level fmt, ##args);			\
 104} while (0)
 105#endif
 106
 107#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
 108
 109/* TUN device flags */
 110
 111/* IFF_ATTACH_QUEUE is never stored in device flags,
 112 * overload it to mean fasync when stored there.
 113 */
 114#define TUN_FASYNC	IFF_ATTACH_QUEUE
 115/* High bits in flags field are unused. */
 116#define TUN_VNET_LE     0x80000000
 117#define TUN_VNET_BE     0x40000000
 118
 119#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
 120		      IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
 121
 122#define GOODCOPY_LEN 128
 123
 124#define FLT_EXACT_COUNT 8
 125struct tap_filter {
 126	unsigned int    count;    /* Number of addrs. Zero means disabled */
 127	u32             mask[2];  /* Mask of the hashed addrs */
 128	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
 129};
 130
 131/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
 132 * to max number of VCPUs in guest. */
 133#define MAX_TAP_QUEUES 256
 134#define MAX_TAP_FLOWS  4096
 135
 136#define TUN_FLOW_EXPIRE (3 * HZ)
 137
 138struct tun_pcpu_stats {
 139	u64 rx_packets;
 140	u64 rx_bytes;
 141	u64 tx_packets;
 142	u64 tx_bytes;
 143	struct u64_stats_sync syncp;
 144	u32 rx_dropped;
 145	u32 tx_dropped;
 146	u32 rx_frame_errors;
 147};
 148
 149/* A tun_file connects an open character device to a tuntap netdevice. It
 150 * also contains all socket related structures (except sock_fprog and tap_filter)
 151 * to serve as one transmit queue for tuntap device. The sock_fprog and
 152 * tap_filter were kept in tun_struct since they were used for filtering for the
 153 * netdevice not for a specific queue (at least I didn't see the requirement for
 154 * this).
 155 *
 156 * RCU usage:
 157 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
 158 * other can only be read while rcu_read_lock or rtnl_lock is held.
 159 */
 160struct tun_file {
 161	struct sock sk;
 162	struct socket socket;
 163	struct tun_struct __rcu *tun;
 164	struct fasync_struct *fasync;
 165	/* only used for fasnyc */
 166	unsigned int flags;
 167	union {
 168		u16 queue_index;
 169		unsigned int ifindex;
 170	};
 171	struct napi_struct napi;
 172	bool napi_enabled;
 173	bool napi_frags_enabled;
 174	struct mutex napi_mutex;	/* Protects access to the above napi */
 175	struct list_head next;
 176	struct tun_struct *detached;
 177	struct ptr_ring tx_ring;
 178	struct xdp_rxq_info xdp_rxq;
 179};
 180
 181struct tun_page {
 182	struct page *page;
 183	int count;
 184};
 185
 186struct tun_flow_entry {
 187	struct hlist_node hash_link;
 188	struct rcu_head rcu;
 189	struct tun_struct *tun;
 190
 191	u32 rxhash;
 192	u32 rps_rxhash;
 193	int queue_index;
 194	unsigned long updated ____cacheline_aligned_in_smp;
 195};
 196
 197#define TUN_NUM_FLOW_ENTRIES 1024
 198#define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
 199
 200struct tun_prog {
 201	struct rcu_head rcu;
 202	struct bpf_prog *prog;
 203};
 204
 205/* Since the socket were moved to tun_file, to preserve the behavior of persist
 206 * device, socket filter, sndbuf and vnet header size were restore when the
 207 * file were attached to a persist device.
 208 */
 209struct tun_struct {
 210	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
 211	unsigned int            numqueues;
 212	unsigned int 		flags;
 213	kuid_t			owner;
 214	kgid_t			group;
 215
 216	struct net_device	*dev;
 217	netdev_features_t	set_features;
 218#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
 219			  NETIF_F_TSO6)
 220
 221	int			align;
 222	int			vnet_hdr_sz;
 223	int			sndbuf;
 224	struct tap_filter	txflt;
 225	struct sock_fprog	fprog;
 226	/* protected by rtnl lock */
 227	bool			filter_attached;
 228#ifdef TUN_DEBUG
 229	int debug;
 230#endif
 231	spinlock_t lock;
 232	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
 233	struct timer_list flow_gc_timer;
 234	unsigned long ageing_time;
 235	unsigned int numdisabled;
 236	struct list_head disabled;
 237	void *security;
 238	u32 flow_count;
 239	u32 rx_batched;
 240	struct tun_pcpu_stats __percpu *pcpu_stats;
 241	struct bpf_prog __rcu *xdp_prog;
 242	struct tun_prog __rcu *steering_prog;
 243	struct tun_prog __rcu *filter_prog;
 244	struct ethtool_link_ksettings link_ksettings;
 
 
 
 245};
 246
 247struct veth {
 248	__be16 h_vlan_proto;
 249	__be16 h_vlan_TCI;
 250};
 251
 252bool tun_is_xdp_frame(void *ptr)
 253{
 254	return (unsigned long)ptr & TUN_XDP_FLAG;
 255}
 256EXPORT_SYMBOL(tun_is_xdp_frame);
 257
 258void *tun_xdp_to_ptr(void *ptr)
 259{
 260	return (void *)((unsigned long)ptr | TUN_XDP_FLAG);
 261}
 262EXPORT_SYMBOL(tun_xdp_to_ptr);
 263
 264void *tun_ptr_to_xdp(void *ptr)
 265{
 266	return (void *)((unsigned long)ptr & ~TUN_XDP_FLAG);
 267}
 268EXPORT_SYMBOL(tun_ptr_to_xdp);
 269
 270static int tun_napi_receive(struct napi_struct *napi, int budget)
 271{
 272	struct tun_file *tfile = container_of(napi, struct tun_file, napi);
 273	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
 274	struct sk_buff_head process_queue;
 275	struct sk_buff *skb;
 276	int received = 0;
 277
 278	__skb_queue_head_init(&process_queue);
 279
 280	spin_lock(&queue->lock);
 281	skb_queue_splice_tail_init(queue, &process_queue);
 282	spin_unlock(&queue->lock);
 283
 284	while (received < budget && (skb = __skb_dequeue(&process_queue))) {
 285		napi_gro_receive(napi, skb);
 286		++received;
 287	}
 288
 289	if (!skb_queue_empty(&process_queue)) {
 290		spin_lock(&queue->lock);
 291		skb_queue_splice(&process_queue, queue);
 292		spin_unlock(&queue->lock);
 293	}
 294
 295	return received;
 296}
 297
 298static int tun_napi_poll(struct napi_struct *napi, int budget)
 299{
 300	unsigned int received;
 301
 302	received = tun_napi_receive(napi, budget);
 303
 304	if (received < budget)
 305		napi_complete_done(napi, received);
 306
 307	return received;
 308}
 309
 310static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
 311			  bool napi_en, bool napi_frags)
 312{
 313	tfile->napi_enabled = napi_en;
 314	tfile->napi_frags_enabled = napi_en && napi_frags;
 315	if (napi_en) {
 316		netif_napi_add(tun->dev, &tfile->napi, tun_napi_poll,
 317			       NAPI_POLL_WEIGHT);
 318		napi_enable(&tfile->napi);
 319	}
 320}
 321
 
 
 
 
 
 
 322static void tun_napi_disable(struct tun_file *tfile)
 323{
 324	if (tfile->napi_enabled)
 325		napi_disable(&tfile->napi);
 326}
 327
 328static void tun_napi_del(struct tun_file *tfile)
 329{
 330	if (tfile->napi_enabled)
 331		netif_napi_del(&tfile->napi);
 332}
 333
 334static bool tun_napi_frags_enabled(const struct tun_file *tfile)
 335{
 336	return tfile->napi_frags_enabled;
 337}
 338
 339#ifdef CONFIG_TUN_VNET_CROSS_LE
 340static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
 341{
 342	return tun->flags & TUN_VNET_BE ? false :
 343		virtio_legacy_is_little_endian();
 344}
 345
 346static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
 347{
 348	int be = !!(tun->flags & TUN_VNET_BE);
 349
 350	if (put_user(be, argp))
 351		return -EFAULT;
 352
 353	return 0;
 354}
 355
 356static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
 357{
 358	int be;
 359
 360	if (get_user(be, argp))
 361		return -EFAULT;
 362
 363	if (be)
 364		tun->flags |= TUN_VNET_BE;
 365	else
 366		tun->flags &= ~TUN_VNET_BE;
 367
 368	return 0;
 369}
 370#else
 371static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
 372{
 373	return virtio_legacy_is_little_endian();
 374}
 375
 376static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
 377{
 378	return -EINVAL;
 379}
 380
 381static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
 382{
 383	return -EINVAL;
 384}
 385#endif /* CONFIG_TUN_VNET_CROSS_LE */
 386
 387static inline bool tun_is_little_endian(struct tun_struct *tun)
 388{
 389	return tun->flags & TUN_VNET_LE ||
 390		tun_legacy_is_little_endian(tun);
 391}
 392
 393static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
 394{
 395	return __virtio16_to_cpu(tun_is_little_endian(tun), val);
 396}
 397
 398static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
 399{
 400	return __cpu_to_virtio16(tun_is_little_endian(tun), val);
 401}
 402
 403static inline u32 tun_hashfn(u32 rxhash)
 404{
 405	return rxhash & TUN_MASK_FLOW_ENTRIES;
 406}
 407
 408static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
 409{
 410	struct tun_flow_entry *e;
 411
 412	hlist_for_each_entry_rcu(e, head, hash_link) {
 413		if (e->rxhash == rxhash)
 414			return e;
 415	}
 416	return NULL;
 417}
 418
 419static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
 420					      struct hlist_head *head,
 421					      u32 rxhash, u16 queue_index)
 422{
 423	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
 424
 425	if (e) {
 426		tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
 427			  rxhash, queue_index);
 
 428		e->updated = jiffies;
 429		e->rxhash = rxhash;
 430		e->rps_rxhash = 0;
 431		e->queue_index = queue_index;
 432		e->tun = tun;
 433		hlist_add_head_rcu(&e->hash_link, head);
 434		++tun->flow_count;
 435	}
 436	return e;
 437}
 438
 439static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
 440{
 441	tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
 442		  e->rxhash, e->queue_index);
 443	hlist_del_rcu(&e->hash_link);
 444	kfree_rcu(e, rcu);
 445	--tun->flow_count;
 446}
 447
 448static void tun_flow_flush(struct tun_struct *tun)
 449{
 450	int i;
 451
 452	spin_lock_bh(&tun->lock);
 453	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 454		struct tun_flow_entry *e;
 455		struct hlist_node *n;
 456
 457		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
 458			tun_flow_delete(tun, e);
 459	}
 460	spin_unlock_bh(&tun->lock);
 461}
 462
 463static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
 464{
 465	int i;
 466
 467	spin_lock_bh(&tun->lock);
 468	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 469		struct tun_flow_entry *e;
 470		struct hlist_node *n;
 471
 472		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
 473			if (e->queue_index == queue_index)
 474				tun_flow_delete(tun, e);
 475		}
 476	}
 477	spin_unlock_bh(&tun->lock);
 478}
 479
 480static void tun_flow_cleanup(struct timer_list *t)
 481{
 482	struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
 483	unsigned long delay = tun->ageing_time;
 484	unsigned long next_timer = jiffies + delay;
 485	unsigned long count = 0;
 486	int i;
 487
 488	tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
 489
 490	spin_lock(&tun->lock);
 491	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 492		struct tun_flow_entry *e;
 493		struct hlist_node *n;
 494
 495		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
 496			unsigned long this_timer;
 497
 498			this_timer = e->updated + delay;
 499			if (time_before_eq(this_timer, jiffies)) {
 500				tun_flow_delete(tun, e);
 501				continue;
 502			}
 503			count++;
 504			if (time_before(this_timer, next_timer))
 505				next_timer = this_timer;
 506		}
 507	}
 508
 509	if (count)
 510		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
 511	spin_unlock(&tun->lock);
 512}
 513
 514static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
 515			    struct tun_file *tfile)
 516{
 517	struct hlist_head *head;
 518	struct tun_flow_entry *e;
 519	unsigned long delay = tun->ageing_time;
 520	u16 queue_index = tfile->queue_index;
 521
 522	head = &tun->flows[tun_hashfn(rxhash)];
 523
 524	rcu_read_lock();
 525
 526	e = tun_flow_find(head, rxhash);
 527	if (likely(e)) {
 528		/* TODO: keep queueing to old queue until it's empty? */
 529		if (READ_ONCE(e->queue_index) != queue_index)
 530			WRITE_ONCE(e->queue_index, queue_index);
 531		if (e->updated != jiffies)
 532			e->updated = jiffies;
 533		sock_rps_record_flow_hash(e->rps_rxhash);
 534	} else {
 535		spin_lock_bh(&tun->lock);
 536		if (!tun_flow_find(head, rxhash) &&
 537		    tun->flow_count < MAX_TAP_FLOWS)
 538			tun_flow_create(tun, head, rxhash, queue_index);
 539
 540		if (!timer_pending(&tun->flow_gc_timer))
 541			mod_timer(&tun->flow_gc_timer,
 542				  round_jiffies_up(jiffies + delay));
 543		spin_unlock_bh(&tun->lock);
 544	}
 545
 546	rcu_read_unlock();
 547}
 548
 549/**
 550 * Save the hash received in the stack receive path and update the
 551 * flow_hash table accordingly.
 552 */
 553static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
 554{
 555	if (unlikely(e->rps_rxhash != hash))
 556		e->rps_rxhash = hash;
 557}
 558
 559/* We try to identify a flow through its rxhash. The reason that
 560 * we do not check rxq no. is because some cards(e.g 82599), chooses
 561 * the rxq based on the txq where the last packet of the flow comes. As
 562 * the userspace application move between processors, we may get a
 563 * different rxq no. here.
 564 */
 565static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 566{
 567	struct tun_flow_entry *e;
 568	u32 txq = 0;
 569	u32 numqueues = 0;
 570
 571	numqueues = READ_ONCE(tun->numqueues);
 572
 573	txq = __skb_get_hash_symmetric(skb);
 574	e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
 575	if (e) {
 576		tun_flow_save_rps_rxhash(e, txq);
 577		txq = e->queue_index;
 578	} else {
 579		/* use multiply and shift instead of expensive divide */
 580		txq = ((u64)txq * numqueues) >> 32;
 581	}
 582
 583	return txq;
 584}
 585
 586static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
 587{
 588	struct tun_prog *prog;
 589	u32 numqueues;
 590	u16 ret = 0;
 591
 592	numqueues = READ_ONCE(tun->numqueues);
 593	if (!numqueues)
 594		return 0;
 595
 596	prog = rcu_dereference(tun->steering_prog);
 597	if (prog)
 598		ret = bpf_prog_run_clear_cb(prog->prog, skb);
 599
 600	return ret % numqueues;
 601}
 602
 603static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
 604			    struct net_device *sb_dev)
 605{
 606	struct tun_struct *tun = netdev_priv(dev);
 607	u16 ret;
 608
 609	rcu_read_lock();
 610	if (rcu_dereference(tun->steering_prog))
 611		ret = tun_ebpf_select_queue(tun, skb);
 612	else
 613		ret = tun_automq_select_queue(tun, skb);
 614	rcu_read_unlock();
 615
 616	return ret;
 617}
 618
 619static inline bool tun_not_capable(struct tun_struct *tun)
 620{
 621	const struct cred *cred = current_cred();
 622	struct net *net = dev_net(tun->dev);
 623
 624	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
 625		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
 626		!ns_capable(net->user_ns, CAP_NET_ADMIN);
 627}
 628
 629static void tun_set_real_num_queues(struct tun_struct *tun)
 630{
 631	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
 632	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
 633}
 634
 635static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
 636{
 637	tfile->detached = tun;
 638	list_add_tail(&tfile->next, &tun->disabled);
 639	++tun->numdisabled;
 640}
 641
 642static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
 643{
 644	struct tun_struct *tun = tfile->detached;
 645
 646	tfile->detached = NULL;
 647	list_del_init(&tfile->next);
 648	--tun->numdisabled;
 649	return tun;
 650}
 651
 652void tun_ptr_free(void *ptr)
 653{
 654	if (!ptr)
 655		return;
 656	if (tun_is_xdp_frame(ptr)) {
 657		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
 658
 659		xdp_return_frame(xdpf);
 660	} else {
 661		__skb_array_destroy_skb(ptr);
 662	}
 663}
 664EXPORT_SYMBOL_GPL(tun_ptr_free);
 665
 666static void tun_queue_purge(struct tun_file *tfile)
 667{
 668	void *ptr;
 669
 670	while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
 671		tun_ptr_free(ptr);
 672
 673	skb_queue_purge(&tfile->sk.sk_write_queue);
 674	skb_queue_purge(&tfile->sk.sk_error_queue);
 675}
 676
 677static void __tun_detach(struct tun_file *tfile, bool clean)
 678{
 679	struct tun_file *ntfile;
 680	struct tun_struct *tun;
 681
 682	tun = rtnl_dereference(tfile->tun);
 683
 684	if (tun && clean) {
 685		tun_napi_disable(tfile);
 
 686		tun_napi_del(tfile);
 687	}
 688
 689	if (tun && !tfile->detached) {
 690		u16 index = tfile->queue_index;
 691		BUG_ON(index >= tun->numqueues);
 692
 693		rcu_assign_pointer(tun->tfiles[index],
 694				   tun->tfiles[tun->numqueues - 1]);
 695		ntfile = rtnl_dereference(tun->tfiles[index]);
 696		ntfile->queue_index = index;
 
 697		rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
 698				   NULL);
 699
 700		--tun->numqueues;
 701		if (clean) {
 702			RCU_INIT_POINTER(tfile->tun, NULL);
 703			sock_put(&tfile->sk);
 704		} else
 705			tun_disable_queue(tun, tfile);
 
 
 706
 707		synchronize_net();
 708		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
 709		/* Drop read queue */
 710		tun_queue_purge(tfile);
 711		tun_set_real_num_queues(tun);
 712	} else if (tfile->detached && clean) {
 713		tun = tun_enable_queue(tfile);
 714		sock_put(&tfile->sk);
 715	}
 716
 717	if (clean) {
 718		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
 719			netif_carrier_off(tun->dev);
 720
 721			if (!(tun->flags & IFF_PERSIST) &&
 722			    tun->dev->reg_state == NETREG_REGISTERED)
 723				unregister_netdevice(tun->dev);
 724		}
 725		if (tun)
 726			xdp_rxq_info_unreg(&tfile->xdp_rxq);
 727		ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
 728		sock_put(&tfile->sk);
 729	}
 730}
 731
 732static void tun_detach(struct tun_file *tfile, bool clean)
 733{
 734	struct tun_struct *tun;
 735	struct net_device *dev;
 736
 737	rtnl_lock();
 738	tun = rtnl_dereference(tfile->tun);
 739	dev = tun ? tun->dev : NULL;
 740	__tun_detach(tfile, clean);
 741	if (dev)
 742		netdev_state_change(dev);
 743	rtnl_unlock();
 
 
 
 744}
 745
 746static void tun_detach_all(struct net_device *dev)
 747{
 748	struct tun_struct *tun = netdev_priv(dev);
 749	struct tun_file *tfile, *tmp;
 750	int i, n = tun->numqueues;
 751
 752	for (i = 0; i < n; i++) {
 753		tfile = rtnl_dereference(tun->tfiles[i]);
 754		BUG_ON(!tfile);
 755		tun_napi_disable(tfile);
 756		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
 757		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
 758		RCU_INIT_POINTER(tfile->tun, NULL);
 759		--tun->numqueues;
 760	}
 761	list_for_each_entry(tfile, &tun->disabled, next) {
 762		tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
 763		tfile->socket.sk->sk_data_ready(tfile->socket.sk);
 764		RCU_INIT_POINTER(tfile->tun, NULL);
 765	}
 766	BUG_ON(tun->numqueues != 0);
 767
 768	synchronize_net();
 769	for (i = 0; i < n; i++) {
 770		tfile = rtnl_dereference(tun->tfiles[i]);
 771		tun_napi_del(tfile);
 772		/* Drop read queue */
 773		tun_queue_purge(tfile);
 774		xdp_rxq_info_unreg(&tfile->xdp_rxq);
 775		sock_put(&tfile->sk);
 776	}
 777	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
 
 778		tun_enable_queue(tfile);
 779		tun_queue_purge(tfile);
 780		xdp_rxq_info_unreg(&tfile->xdp_rxq);
 781		sock_put(&tfile->sk);
 782	}
 783	BUG_ON(tun->numdisabled != 0);
 784
 785	if (tun->flags & IFF_PERSIST)
 786		module_put(THIS_MODULE);
 787}
 788
 789static int tun_attach(struct tun_struct *tun, struct file *file,
 790		      bool skip_filter, bool napi, bool napi_frags,
 791		      bool publish_tun)
 792{
 793	struct tun_file *tfile = file->private_data;
 794	struct net_device *dev = tun->dev;
 795	int err;
 796
 797	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
 798	if (err < 0)
 799		goto out;
 800
 801	err = -EINVAL;
 802	if (rtnl_dereference(tfile->tun) && !tfile->detached)
 803		goto out;
 804
 805	err = -EBUSY;
 806	if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
 807		goto out;
 808
 809	err = -E2BIG;
 810	if (!tfile->detached &&
 811	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
 812		goto out;
 813
 814	err = 0;
 815
 816	/* Re-attach the filter to persist device */
 817	if (!skip_filter && (tun->filter_attached == true)) {
 818		lock_sock(tfile->socket.sk);
 819		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
 820		release_sock(tfile->socket.sk);
 821		if (!err)
 822			goto out;
 823	}
 824
 825	if (!tfile->detached &&
 826	    ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
 827			    GFP_KERNEL, tun_ptr_free)) {
 828		err = -ENOMEM;
 829		goto out;
 830	}
 831
 832	tfile->queue_index = tun->numqueues;
 833	tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
 834
 835	if (tfile->detached) {
 836		/* Re-attach detached tfile, updating XDP queue_index */
 837		WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
 838
 839		if (tfile->xdp_rxq.queue_index    != tfile->queue_index)
 840			tfile->xdp_rxq.queue_index = tfile->queue_index;
 841	} else {
 842		/* Setup XDP RX-queue info, for new tfile getting attached */
 843		err = xdp_rxq_info_reg(&tfile->xdp_rxq,
 844				       tun->dev, tfile->queue_index);
 845		if (err < 0)
 846			goto out;
 847		err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
 848						 MEM_TYPE_PAGE_SHARED, NULL);
 849		if (err < 0) {
 850			xdp_rxq_info_unreg(&tfile->xdp_rxq);
 851			goto out;
 852		}
 853		err = 0;
 854	}
 855
 856	if (tfile->detached) {
 857		tun_enable_queue(tfile);
 
 858	} else {
 859		sock_hold(&tfile->sk);
 860		tun_napi_init(tun, tfile, napi, napi_frags);
 861	}
 862
 863	if (rtnl_dereference(tun->xdp_prog))
 864		sock_set_flag(&tfile->sk, SOCK_XDP);
 865
 866	/* device is allowed to go away first, so no need to hold extra
 867	 * refcnt.
 868	 */
 869
 870	/* Publish tfile->tun and tun->tfiles only after we've fully
 871	 * initialized tfile; otherwise we risk using half-initialized
 872	 * object.
 873	 */
 874	if (publish_tun)
 875		rcu_assign_pointer(tfile->tun, tun);
 876	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
 877	tun->numqueues++;
 878	tun_set_real_num_queues(tun);
 879out:
 880	return err;
 881}
 882
 883static struct tun_struct *tun_get(struct tun_file *tfile)
 884{
 885	struct tun_struct *tun;
 886
 887	rcu_read_lock();
 888	tun = rcu_dereference(tfile->tun);
 889	if (tun)
 890		dev_hold(tun->dev);
 891	rcu_read_unlock();
 892
 893	return tun;
 894}
 895
 896static void tun_put(struct tun_struct *tun)
 897{
 898	dev_put(tun->dev);
 899}
 900
 901/* TAP filtering */
 902static void addr_hash_set(u32 *mask, const u8 *addr)
 903{
 904	int n = ether_crc(ETH_ALEN, addr) >> 26;
 905	mask[n >> 5] |= (1 << (n & 31));
 906}
 907
 908static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
 909{
 910	int n = ether_crc(ETH_ALEN, addr) >> 26;
 911	return mask[n >> 5] & (1 << (n & 31));
 912}
 913
 914static int update_filter(struct tap_filter *filter, void __user *arg)
 915{
 916	struct { u8 u[ETH_ALEN]; } *addr;
 917	struct tun_filter uf;
 918	int err, alen, n, nexact;
 919
 920	if (copy_from_user(&uf, arg, sizeof(uf)))
 921		return -EFAULT;
 922
 923	if (!uf.count) {
 924		/* Disabled */
 925		filter->count = 0;
 926		return 0;
 927	}
 928
 929	alen = ETH_ALEN * uf.count;
 930	addr = memdup_user(arg + sizeof(uf), alen);
 931	if (IS_ERR(addr))
 932		return PTR_ERR(addr);
 933
 934	/* The filter is updated without holding any locks. Which is
 935	 * perfectly safe. We disable it first and in the worst
 936	 * case we'll accept a few undesired packets. */
 937	filter->count = 0;
 938	wmb();
 939
 940	/* Use first set of addresses as an exact filter */
 941	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
 942		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
 943
 944	nexact = n;
 945
 946	/* Remaining multicast addresses are hashed,
 947	 * unicast will leave the filter disabled. */
 948	memset(filter->mask, 0, sizeof(filter->mask));
 949	for (; n < uf.count; n++) {
 950		if (!is_multicast_ether_addr(addr[n].u)) {
 951			err = 0; /* no filter */
 952			goto free_addr;
 953		}
 954		addr_hash_set(filter->mask, addr[n].u);
 955	}
 956
 957	/* For ALLMULTI just set the mask to all ones.
 958	 * This overrides the mask populated above. */
 959	if ((uf.flags & TUN_FLT_ALLMULTI))
 960		memset(filter->mask, ~0, sizeof(filter->mask));
 961
 962	/* Now enable the filter */
 963	wmb();
 964	filter->count = nexact;
 965
 966	/* Return the number of exact filters */
 967	err = nexact;
 968free_addr:
 969	kfree(addr);
 970	return err;
 971}
 972
 973/* Returns: 0 - drop, !=0 - accept */
 974static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
 975{
 976	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
 977	 * at this point. */
 978	struct ethhdr *eh = (struct ethhdr *) skb->data;
 979	int i;
 980
 981	/* Exact match */
 982	for (i = 0; i < filter->count; i++)
 983		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
 984			return 1;
 985
 986	/* Inexact match (multicast only) */
 987	if (is_multicast_ether_addr(eh->h_dest))
 988		return addr_hash_test(filter->mask, eh->h_dest);
 989
 990	return 0;
 991}
 992
 993/*
 994 * Checks whether the packet is accepted or not.
 995 * Returns: 0 - drop, !=0 - accept
 996 */
 997static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
 998{
 999	if (!filter->count)
1000		return 1;
1001
1002	return run_filter(filter, skb);
1003}
1004
1005/* Network device part of the driver */
1006
1007static const struct ethtool_ops tun_ethtool_ops;
1008
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1009/* Net device detach from fd. */
1010static void tun_net_uninit(struct net_device *dev)
1011{
1012	tun_detach_all(dev);
1013}
1014
1015/* Net device open. */
1016static int tun_net_open(struct net_device *dev)
1017{
1018	netif_tx_start_all_queues(dev);
1019
1020	return 0;
1021}
1022
1023/* Net device close. */
1024static int tun_net_close(struct net_device *dev)
1025{
1026	netif_tx_stop_all_queues(dev);
1027	return 0;
1028}
1029
1030/* Net device start xmit */
1031static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1032{
1033#ifdef CONFIG_RPS
1034	if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1035		/* Select queue was not called for the skbuff, so we extract the
1036		 * RPS hash and save it into the flow_table here.
1037		 */
1038		struct tun_flow_entry *e;
1039		__u32 rxhash;
1040
1041		rxhash = __skb_get_hash_symmetric(skb);
1042		e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1043		if (e)
1044			tun_flow_save_rps_rxhash(e, rxhash);
1045	}
1046#endif
1047}
1048
1049static unsigned int run_ebpf_filter(struct tun_struct *tun,
1050				    struct sk_buff *skb,
1051				    int len)
1052{
1053	struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1054
1055	if (prog)
1056		len = bpf_prog_run_clear_cb(prog->prog, skb);
1057
1058	return len;
1059}
1060
1061/* Net device start xmit */
1062static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1063{
1064	struct tun_struct *tun = netdev_priv(dev);
 
1065	int txq = skb->queue_mapping;
 
1066	struct tun_file *tfile;
1067	int len = skb->len;
1068
1069	rcu_read_lock();
1070	tfile = rcu_dereference(tun->tfiles[txq]);
1071
1072	/* Drop packet if interface is not attached */
1073	if (!tfile)
 
1074		goto drop;
 
1075
1076	if (!rcu_dereference(tun->steering_prog))
1077		tun_automq_xmit(tun, skb);
1078
1079	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
1080
1081	BUG_ON(!tfile);
1082
1083	/* Drop if the filter does not like it.
1084	 * This is a noop if the filter is disabled.
1085	 * Filter can be enabled only for the TAP devices. */
1086	if (!check_filter(&tun->txflt, skb))
 
1087		goto drop;
 
1088
1089	if (tfile->socket.sk->sk_filter &&
1090	    sk_filter(tfile->socket.sk, skb))
 
1091		goto drop;
 
1092
1093	len = run_ebpf_filter(tun, skb, len);
1094	if (len == 0 || pskb_trim(skb, len))
 
 
 
 
 
 
1095		goto drop;
 
1096
1097	if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
 
1098		goto drop;
 
1099
1100	skb_tx_timestamp(skb);
1101
1102	/* Orphan the skb - required as we might hang on to it
1103	 * for indefinite time.
1104	 */
1105	skb_orphan(skb);
1106
1107	nf_reset_ct(skb);
1108
1109	if (ptr_ring_produce(&tfile->tx_ring, skb))
 
1110		goto drop;
 
 
 
 
 
1111
1112	/* Notify and wake up reader process */
1113	if (tfile->flags & TUN_FASYNC)
1114		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1115	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1116
1117	rcu_read_unlock();
1118	return NETDEV_TX_OK;
1119
1120drop:
1121	this_cpu_inc(tun->pcpu_stats->tx_dropped);
1122	skb_tx_error(skb);
1123	kfree_skb(skb);
1124	rcu_read_unlock();
1125	return NET_XMIT_DROP;
1126}
1127
1128static void tun_net_mclist(struct net_device *dev)
1129{
1130	/*
1131	 * This callback is supposed to deal with mc filter in
1132	 * _rx_ path and has nothing to do with the _tx_ path.
1133	 * In rx path we always accept everything userspace gives us.
1134	 */
1135}
1136
1137static netdev_features_t tun_net_fix_features(struct net_device *dev,
1138	netdev_features_t features)
1139{
1140	struct tun_struct *tun = netdev_priv(dev);
1141
1142	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1143}
1144
1145static void tun_set_headroom(struct net_device *dev, int new_hr)
1146{
1147	struct tun_struct *tun = netdev_priv(dev);
1148
1149	if (new_hr < NET_SKB_PAD)
1150		new_hr = NET_SKB_PAD;
1151
1152	tun->align = new_hr;
1153}
1154
1155static void
1156tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1157{
1158	u32 rx_dropped = 0, tx_dropped = 0, rx_frame_errors = 0;
1159	struct tun_struct *tun = netdev_priv(dev);
1160	struct tun_pcpu_stats *p;
1161	int i;
1162
1163	for_each_possible_cpu(i) {
1164		u64 rxpackets, rxbytes, txpackets, txbytes;
1165		unsigned int start;
1166
1167		p = per_cpu_ptr(tun->pcpu_stats, i);
1168		do {
1169			start = u64_stats_fetch_begin(&p->syncp);
1170			rxpackets	= p->rx_packets;
1171			rxbytes		= p->rx_bytes;
1172			txpackets	= p->tx_packets;
1173			txbytes		= p->tx_bytes;
1174		} while (u64_stats_fetch_retry(&p->syncp, start));
1175
1176		stats->rx_packets	+= rxpackets;
1177		stats->rx_bytes		+= rxbytes;
1178		stats->tx_packets	+= txpackets;
1179		stats->tx_bytes		+= txbytes;
1180
1181		/* u32 counters */
1182		rx_dropped	+= p->rx_dropped;
1183		rx_frame_errors	+= p->rx_frame_errors;
1184		tx_dropped	+= p->tx_dropped;
1185	}
1186	stats->rx_dropped  = rx_dropped;
1187	stats->rx_frame_errors = rx_frame_errors;
1188	stats->tx_dropped = tx_dropped;
1189}
1190
1191static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1192		       struct netlink_ext_ack *extack)
1193{
1194	struct tun_struct *tun = netdev_priv(dev);
1195	struct tun_file *tfile;
1196	struct bpf_prog *old_prog;
1197	int i;
1198
1199	old_prog = rtnl_dereference(tun->xdp_prog);
1200	rcu_assign_pointer(tun->xdp_prog, prog);
1201	if (old_prog)
1202		bpf_prog_put(old_prog);
1203
1204	for (i = 0; i < tun->numqueues; i++) {
1205		tfile = rtnl_dereference(tun->tfiles[i]);
1206		if (prog)
1207			sock_set_flag(&tfile->sk, SOCK_XDP);
1208		else
1209			sock_reset_flag(&tfile->sk, SOCK_XDP);
1210	}
1211	list_for_each_entry(tfile, &tun->disabled, next) {
1212		if (prog)
1213			sock_set_flag(&tfile->sk, SOCK_XDP);
1214		else
1215			sock_reset_flag(&tfile->sk, SOCK_XDP);
1216	}
1217
1218	return 0;
1219}
1220
1221static u32 tun_xdp_query(struct net_device *dev)
1222{
1223	struct tun_struct *tun = netdev_priv(dev);
1224	const struct bpf_prog *xdp_prog;
1225
1226	xdp_prog = rtnl_dereference(tun->xdp_prog);
1227	if (xdp_prog)
1228		return xdp_prog->aux->id;
1229
1230	return 0;
1231}
1232
1233static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1234{
1235	switch (xdp->command) {
1236	case XDP_SETUP_PROG:
1237		return tun_xdp_set(dev, xdp->prog, xdp->extack);
1238	case XDP_QUERY_PROG:
1239		xdp->prog_id = tun_xdp_query(dev);
1240		return 0;
1241	default:
1242		return -EINVAL;
1243	}
1244}
1245
1246static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1247{
1248	if (new_carrier) {
1249		struct tun_struct *tun = netdev_priv(dev);
1250
1251		if (!tun->numqueues)
1252			return -EPERM;
1253
1254		netif_carrier_on(dev);
1255	} else {
1256		netif_carrier_off(dev);
1257	}
1258	return 0;
1259}
1260
1261static const struct net_device_ops tun_netdev_ops = {
 
1262	.ndo_uninit		= tun_net_uninit,
1263	.ndo_open		= tun_net_open,
1264	.ndo_stop		= tun_net_close,
1265	.ndo_start_xmit		= tun_net_xmit,
1266	.ndo_fix_features	= tun_net_fix_features,
1267	.ndo_select_queue	= tun_select_queue,
1268	.ndo_set_rx_headroom	= tun_set_headroom,
1269	.ndo_get_stats64	= tun_net_get_stats64,
1270	.ndo_change_carrier	= tun_net_change_carrier,
1271};
1272
1273static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1274{
1275	/* Notify and wake up reader process */
1276	if (tfile->flags & TUN_FASYNC)
1277		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1278	tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1279}
1280
1281static int tun_xdp_xmit(struct net_device *dev, int n,
1282			struct xdp_frame **frames, u32 flags)
1283{
1284	struct tun_struct *tun = netdev_priv(dev);
1285	struct tun_file *tfile;
1286	u32 numqueues;
1287	int drops = 0;
1288	int cnt = n;
1289	int i;
1290
1291	if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1292		return -EINVAL;
1293
1294	rcu_read_lock();
1295
1296resample:
1297	numqueues = READ_ONCE(tun->numqueues);
1298	if (!numqueues) {
1299		rcu_read_unlock();
1300		return -ENXIO; /* Caller will free/return all frames */
1301	}
1302
1303	tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1304					    numqueues]);
1305	if (unlikely(!tfile))
1306		goto resample;
1307
1308	spin_lock(&tfile->tx_ring.producer_lock);
1309	for (i = 0; i < n; i++) {
1310		struct xdp_frame *xdp = frames[i];
1311		/* Encode the XDP flag into lowest bit for consumer to differ
1312		 * XDP buffer from sk_buff.
1313		 */
1314		void *frame = tun_xdp_to_ptr(xdp);
1315
1316		if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1317			this_cpu_inc(tun->pcpu_stats->tx_dropped);
1318			xdp_return_frame_rx_napi(xdp);
1319			drops++;
1320		}
 
1321	}
1322	spin_unlock(&tfile->tx_ring.producer_lock);
1323
1324	if (flags & XDP_XMIT_FLUSH)
1325		__tun_xdp_flush_tfile(tfile);
1326
1327	rcu_read_unlock();
1328	return cnt - drops;
1329}
1330
1331static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1332{
1333	struct xdp_frame *frame = convert_to_xdp_frame(xdp);
 
1334
1335	if (unlikely(!frame))
1336		return -EOVERFLOW;
1337
1338	return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
 
 
 
1339}
1340
1341static const struct net_device_ops tap_netdev_ops = {
 
1342	.ndo_uninit		= tun_net_uninit,
1343	.ndo_open		= tun_net_open,
1344	.ndo_stop		= tun_net_close,
1345	.ndo_start_xmit		= tun_net_xmit,
1346	.ndo_fix_features	= tun_net_fix_features,
1347	.ndo_set_rx_mode	= tun_net_mclist,
1348	.ndo_set_mac_address	= eth_mac_addr,
1349	.ndo_validate_addr	= eth_validate_addr,
1350	.ndo_select_queue	= tun_select_queue,
1351	.ndo_features_check	= passthru_features_check,
1352	.ndo_set_rx_headroom	= tun_set_headroom,
1353	.ndo_get_stats64	= tun_net_get_stats64,
1354	.ndo_bpf		= tun_xdp,
1355	.ndo_xdp_xmit		= tun_xdp_xmit,
1356	.ndo_change_carrier	= tun_net_change_carrier,
1357};
1358
1359static void tun_flow_init(struct tun_struct *tun)
1360{
1361	int i;
1362
1363	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1364		INIT_HLIST_HEAD(&tun->flows[i]);
1365
1366	tun->ageing_time = TUN_FLOW_EXPIRE;
1367	timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1368	mod_timer(&tun->flow_gc_timer,
1369		  round_jiffies_up(jiffies + tun->ageing_time));
1370}
1371
1372static void tun_flow_uninit(struct tun_struct *tun)
1373{
1374	del_timer_sync(&tun->flow_gc_timer);
1375	tun_flow_flush(tun);
1376}
1377
1378#define MIN_MTU 68
1379#define MAX_MTU 65535
1380
1381/* Initialize net device. */
1382static void tun_net_init(struct net_device *dev)
1383{
1384	struct tun_struct *tun = netdev_priv(dev);
1385
1386	switch (tun->flags & TUN_TYPE_MASK) {
1387	case IFF_TUN:
1388		dev->netdev_ops = &tun_netdev_ops;
 
1389
1390		/* Point-to-Point TUN Device */
1391		dev->hard_header_len = 0;
1392		dev->addr_len = 0;
1393		dev->mtu = 1500;
1394
1395		/* Zero header length */
1396		dev->type = ARPHRD_NONE;
1397		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1398		break;
1399
1400	case IFF_TAP:
1401		dev->netdev_ops = &tap_netdev_ops;
1402		/* Ethernet TAP Device */
1403		ether_setup(dev);
1404		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1405		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1406
1407		eth_hw_addr_random(dev);
1408
 
 
 
 
 
1409		break;
1410	}
1411
1412	dev->min_mtu = MIN_MTU;
1413	dev->max_mtu = MAX_MTU - dev->hard_header_len;
1414}
1415
1416static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1417{
1418	struct sock *sk = tfile->socket.sk;
1419
1420	return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1421}
1422
1423/* Character device part */
1424
1425/* Poll */
1426static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1427{
1428	struct tun_file *tfile = file->private_data;
1429	struct tun_struct *tun = tun_get(tfile);
1430	struct sock *sk;
1431	__poll_t mask = 0;
1432
1433	if (!tun)
1434		return EPOLLERR;
1435
1436	sk = tfile->socket.sk;
1437
1438	tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
1439
1440	poll_wait(file, sk_sleep(sk), wait);
1441
1442	if (!ptr_ring_empty(&tfile->tx_ring))
1443		mask |= EPOLLIN | EPOLLRDNORM;
1444
1445	/* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1446	 * guarantee EPOLLOUT to be raised by either here or
1447	 * tun_sock_write_space(). Then process could get notification
1448	 * after it writes to a down device and meets -EIO.
1449	 */
1450	if (tun_sock_writeable(tun, tfile) ||
1451	    (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1452	     tun_sock_writeable(tun, tfile)))
1453		mask |= EPOLLOUT | EPOLLWRNORM;
1454
1455	if (tun->dev->reg_state != NETREG_REGISTERED)
1456		mask = EPOLLERR;
1457
1458	tun_put(tun);
1459	return mask;
1460}
1461
1462static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1463					    size_t len,
1464					    const struct iov_iter *it)
1465{
1466	struct sk_buff *skb;
1467	size_t linear;
1468	int err;
1469	int i;
1470
1471	if (it->nr_segs > MAX_SKB_FRAGS + 1)
1472		return ERR_PTR(-ENOMEM);
 
1473
1474	local_bh_disable();
1475	skb = napi_get_frags(&tfile->napi);
1476	local_bh_enable();
1477	if (!skb)
1478		return ERR_PTR(-ENOMEM);
1479
1480	linear = iov_iter_single_seg_count(it);
1481	err = __skb_grow(skb, linear);
1482	if (err)
1483		goto free;
1484
1485	skb->len = len;
1486	skb->data_len = len - linear;
1487	skb->truesize += skb->data_len;
1488
1489	for (i = 1; i < it->nr_segs; i++) {
1490		size_t fragsz = it->iov[i].iov_len;
 
1491		struct page *page;
1492		void *frag;
1493
1494		if (fragsz == 0 || fragsz > PAGE_SIZE) {
1495			err = -EINVAL;
1496			goto free;
1497		}
1498		frag = netdev_alloc_frag(fragsz);
1499		if (!frag) {
1500			err = -ENOMEM;
1501			goto free;
1502		}
1503		page = virt_to_head_page(frag);
1504		skb_fill_page_desc(skb, i - 1, page,
1505				   frag - page_address(page), fragsz);
1506	}
1507
1508	return skb;
1509free:
1510	/* frees skb and all frags allocated with napi_alloc_frag() */
1511	napi_free_frags(&tfile->napi);
1512	return ERR_PTR(err);
1513}
1514
1515/* prepad is the amount to reserve at front.  len is length after that.
1516 * linear is a hint as to how much to copy (usually headers). */
1517static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1518				     size_t prepad, size_t len,
1519				     size_t linear, int noblock)
1520{
1521	struct sock *sk = tfile->socket.sk;
1522	struct sk_buff *skb;
1523	int err;
1524
1525	/* Under a page?  Don't bother with paged skb. */
1526	if (prepad + len < PAGE_SIZE || !linear)
1527		linear = len;
1528
 
 
1529	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1530				   &err, 0);
1531	if (!skb)
1532		return ERR_PTR(err);
1533
1534	skb_reserve(skb, prepad);
1535	skb_put(skb, linear);
1536	skb->data_len = len - linear;
1537	skb->len += len - linear;
1538
1539	return skb;
1540}
1541
1542static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1543			   struct sk_buff *skb, int more)
1544{
1545	struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1546	struct sk_buff_head process_queue;
1547	u32 rx_batched = tun->rx_batched;
1548	bool rcv = false;
1549
1550	if (!rx_batched || (!more && skb_queue_empty(queue))) {
1551		local_bh_disable();
1552		skb_record_rx_queue(skb, tfile->queue_index);
1553		netif_receive_skb(skb);
1554		local_bh_enable();
1555		return;
1556	}
1557
1558	spin_lock(&queue->lock);
1559	if (!more || skb_queue_len(queue) == rx_batched) {
1560		__skb_queue_head_init(&process_queue);
1561		skb_queue_splice_tail_init(queue, &process_queue);
1562		rcv = true;
1563	} else {
1564		__skb_queue_tail(queue, skb);
1565	}
1566	spin_unlock(&queue->lock);
1567
1568	if (rcv) {
1569		struct sk_buff *nskb;
1570
1571		local_bh_disable();
1572		while ((nskb = __skb_dequeue(&process_queue))) {
1573			skb_record_rx_queue(nskb, tfile->queue_index);
1574			netif_receive_skb(nskb);
1575		}
1576		skb_record_rx_queue(skb, tfile->queue_index);
1577		netif_receive_skb(skb);
1578		local_bh_enable();
1579	}
1580}
1581
1582static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1583			      int len, int noblock, bool zerocopy)
1584{
1585	if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1586		return false;
1587
1588	if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1589		return false;
1590
1591	if (!noblock)
1592		return false;
1593
1594	if (zerocopy)
1595		return false;
1596
1597	if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
1598	    SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1599		return false;
1600
1601	return true;
1602}
1603
1604static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1605				       struct page_frag *alloc_frag, char *buf,
1606				       int buflen, int len, int pad)
1607{
1608	struct sk_buff *skb = build_skb(buf, buflen);
1609
1610	if (!skb)
1611		return ERR_PTR(-ENOMEM);
1612
1613	skb_reserve(skb, pad);
1614	skb_put(skb, len);
1615	skb_set_owner_w(skb, tfile->socket.sk);
1616
1617	get_page(alloc_frag->page);
1618	alloc_frag->offset += buflen;
1619
1620	return skb;
1621}
1622
1623static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1624		       struct xdp_buff *xdp, u32 act)
1625{
1626	int err;
1627
1628	switch (act) {
1629	case XDP_REDIRECT:
1630		err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1631		if (err)
 
1632			return err;
 
 
1633		break;
1634	case XDP_TX:
1635		err = tun_xdp_tx(tun->dev, xdp);
1636		if (err < 0)
 
1637			return err;
 
 
1638		break;
1639	case XDP_PASS:
1640		break;
1641	default:
1642		bpf_warn_invalid_xdp_action(act);
1643		/* fall through */
1644	case XDP_ABORTED:
1645		trace_xdp_exception(tun->dev, xdp_prog, act);
1646		/* fall through */
1647	case XDP_DROP:
1648		this_cpu_inc(tun->pcpu_stats->rx_dropped);
1649		break;
1650	}
1651
1652	return act;
1653}
1654
1655static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1656				     struct tun_file *tfile,
1657				     struct iov_iter *from,
1658				     struct virtio_net_hdr *hdr,
1659				     int len, int *skb_xdp)
1660{
1661	struct page_frag *alloc_frag = &current->task_frag;
 
1662	struct bpf_prog *xdp_prog;
1663	int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1664	char *buf;
1665	size_t copied;
1666	int pad = TUN_RX_PAD;
1667	int err = 0;
1668
1669	rcu_read_lock();
1670	xdp_prog = rcu_dereference(tun->xdp_prog);
1671	if (xdp_prog)
1672		pad += XDP_PACKET_HEADROOM;
1673	buflen += SKB_DATA_ALIGN(len + pad);
1674	rcu_read_unlock();
1675
1676	alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1677	if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1678		return ERR_PTR(-ENOMEM);
1679
1680	buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1681	copied = copy_page_from_iter(alloc_frag->page,
1682				     alloc_frag->offset + pad,
1683				     len, from);
1684	if (copied != len)
1685		return ERR_PTR(-EFAULT);
1686
1687	/* There's a small window that XDP may be set after the check
1688	 * of xdp_prog above, this should be rare and for simplicity
1689	 * we do XDP on skb in case the headroom is not enough.
1690	 */
1691	if (hdr->gso_type || !xdp_prog) {
1692		*skb_xdp = 1;
1693		return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1694				       pad);
1695	}
1696
1697	*skb_xdp = 0;
1698
1699	local_bh_disable();
1700	rcu_read_lock();
 
1701	xdp_prog = rcu_dereference(tun->xdp_prog);
1702	if (xdp_prog) {
1703		struct xdp_buff xdp;
1704		u32 act;
1705
1706		xdp.data_hard_start = buf;
1707		xdp.data = buf + pad;
1708		xdp_set_data_meta_invalid(&xdp);
1709		xdp.data_end = xdp.data + len;
1710		xdp.rxq = &tfile->xdp_rxq;
1711
1712		act = bpf_prog_run_xdp(xdp_prog, &xdp);
1713		if (act == XDP_REDIRECT || act == XDP_TX) {
1714			get_page(alloc_frag->page);
1715			alloc_frag->offset += buflen;
1716		}
1717		err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1718		if (err < 0)
1719			goto err_xdp;
 
 
 
 
1720		if (err == XDP_REDIRECT)
1721			xdp_do_flush_map();
1722		if (err != XDP_PASS)
1723			goto out;
1724
1725		pad = xdp.data - xdp.data_hard_start;
1726		len = xdp.data_end - xdp.data;
1727	}
 
1728	rcu_read_unlock();
1729	local_bh_enable();
1730
1731	return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1732
1733err_xdp:
1734	put_page(alloc_frag->page);
1735out:
 
1736	rcu_read_unlock();
1737	local_bh_enable();
1738	return NULL;
1739}
1740
1741/* Get packet from user space buffer */
1742static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1743			    void *msg_control, struct iov_iter *from,
1744			    int noblock, bool more)
1745{
1746	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1747	struct sk_buff *skb;
1748	size_t total_len = iov_iter_count(from);
1749	size_t len = total_len, align = tun->align, linear;
1750	struct virtio_net_hdr gso = { 0 };
1751	struct tun_pcpu_stats *stats;
1752	int good_linear;
1753	int copylen;
1754	bool zerocopy = false;
1755	int err;
1756	u32 rxhash = 0;
1757	int skb_xdp = 1;
1758	bool frags = tun_napi_frags_enabled(tfile);
 
1759
1760	if (!(tun->flags & IFF_NO_PI)) {
1761		if (len < sizeof(pi))
1762			return -EINVAL;
1763		len -= sizeof(pi);
1764
1765		if (!copy_from_iter_full(&pi, sizeof(pi), from))
1766			return -EFAULT;
1767	}
1768
1769	if (tun->flags & IFF_VNET_HDR) {
1770		int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1771
1772		if (len < vnet_hdr_sz)
1773			return -EINVAL;
1774		len -= vnet_hdr_sz;
1775
1776		if (!copy_from_iter_full(&gso, sizeof(gso), from))
1777			return -EFAULT;
1778
1779		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1780		    tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1781			gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1782
1783		if (tun16_to_cpu(tun, gso.hdr_len) > len)
1784			return -EINVAL;
1785		iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1786	}
1787
1788	if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1789		align += NET_IP_ALIGN;
1790		if (unlikely(len < ETH_HLEN ||
1791			     (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1792			return -EINVAL;
1793	}
1794
1795	good_linear = SKB_MAX_HEAD(align);
1796
1797	if (msg_control) {
1798		struct iov_iter i = *from;
1799
1800		/* There are 256 bytes to be copied in skb, so there is
1801		 * enough room for skb expand head in case it is used.
1802		 * The rest of the buffer is mapped from userspace.
1803		 */
1804		copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1805		if (copylen > good_linear)
1806			copylen = good_linear;
1807		linear = copylen;
1808		iov_iter_advance(&i, copylen);
1809		if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1810			zerocopy = true;
1811	}
1812
1813	if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1814		/* For the packet that is not easy to be processed
1815		 * (e.g gso or jumbo packet), we will do it at after
1816		 * skb was created with generic XDP routine.
1817		 */
1818		skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1819		if (IS_ERR(skb)) {
1820			this_cpu_inc(tun->pcpu_stats->rx_dropped);
1821			return PTR_ERR(skb);
1822		}
1823		if (!skb)
1824			return total_len;
1825	} else {
1826		if (!zerocopy) {
1827			copylen = len;
1828			if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1829				linear = good_linear;
1830			else
1831				linear = tun16_to_cpu(tun, gso.hdr_len);
1832		}
1833
1834		if (frags) {
1835			mutex_lock(&tfile->napi_mutex);
1836			skb = tun_napi_alloc_frags(tfile, copylen, from);
1837			/* tun_napi_alloc_frags() enforces a layout for the skb.
1838			 * If zerocopy is enabled, then this layout will be
1839			 * overwritten by zerocopy_sg_from_iter().
1840			 */
1841			zerocopy = false;
1842		} else {
 
 
 
1843			skb = tun_alloc_skb(tfile, align, copylen, linear,
1844					    noblock);
1845		}
1846
1847		if (IS_ERR(skb)) {
1848			if (PTR_ERR(skb) != -EAGAIN)
1849				this_cpu_inc(tun->pcpu_stats->rx_dropped);
1850			if (frags)
1851				mutex_unlock(&tfile->napi_mutex);
1852			return PTR_ERR(skb);
1853		}
1854
1855		if (zerocopy)
1856			err = zerocopy_sg_from_iter(skb, from);
1857		else
1858			err = skb_copy_datagram_from_iter(skb, 0, from, len);
1859
1860		if (err) {
1861			err = -EFAULT;
1862drop:
1863			this_cpu_inc(tun->pcpu_stats->rx_dropped);
1864			kfree_skb(skb);
1865			if (frags) {
1866				tfile->napi.skb = NULL;
1867				mutex_unlock(&tfile->napi_mutex);
1868			}
1869
1870			return err;
1871		}
1872	}
1873
1874	if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1875		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
1876		kfree_skb(skb);
1877		if (frags) {
1878			tfile->napi.skb = NULL;
1879			mutex_unlock(&tfile->napi_mutex);
1880		}
1881
1882		return -EINVAL;
1883	}
1884
1885	switch (tun->flags & TUN_TYPE_MASK) {
1886	case IFF_TUN:
1887		if (tun->flags & IFF_NO_PI) {
1888			u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1889
1890			switch (ip_version) {
1891			case 4:
1892				pi.proto = htons(ETH_P_IP);
1893				break;
1894			case 6:
1895				pi.proto = htons(ETH_P_IPV6);
1896				break;
1897			default:
1898				this_cpu_inc(tun->pcpu_stats->rx_dropped);
1899				kfree_skb(skb);
1900				return -EINVAL;
1901			}
1902		}
1903
1904		skb_reset_mac_header(skb);
1905		skb->protocol = pi.proto;
1906		skb->dev = tun->dev;
1907		break;
1908	case IFF_TAP:
1909		if (!frags)
1910			skb->protocol = eth_type_trans(skb, tun->dev);
 
 
 
 
1911		break;
1912	}
1913
1914	/* copy skb_ubuf_info for callback when skb has no error */
1915	if (zerocopy) {
1916		skb_shinfo(skb)->destructor_arg = msg_control;
1917		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1918		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1919	} else if (msg_control) {
1920		struct ubuf_info *uarg = msg_control;
1921		uarg->callback(uarg, false);
1922	}
1923
1924	skb_reset_network_header(skb);
1925	skb_probe_transport_header(skb);
 
1926
1927	if (skb_xdp) {
1928		struct bpf_prog *xdp_prog;
1929		int ret;
1930
1931		local_bh_disable();
1932		rcu_read_lock();
1933		xdp_prog = rcu_dereference(tun->xdp_prog);
1934		if (xdp_prog) {
1935			ret = do_xdp_generic(xdp_prog, skb);
1936			if (ret != XDP_PASS) {
1937				rcu_read_unlock();
1938				local_bh_enable();
1939				return total_len;
1940			}
1941		}
1942		rcu_read_unlock();
1943		local_bh_enable();
1944	}
1945
1946	/* Compute the costly rx hash only if needed for flow updates.
1947	 * We may get a very small possibility of OOO during switching, not
1948	 * worth to optimize.
1949	 */
1950	if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1951	    !tfile->detached)
1952		rxhash = __skb_get_hash_symmetric(skb);
1953
1954	rcu_read_lock();
1955	if (unlikely(!(tun->dev->flags & IFF_UP))) {
1956		err = -EIO;
1957		rcu_read_unlock();
 
1958		goto drop;
1959	}
1960
1961	if (frags) {
 
 
1962		/* Exercise flow dissector code path. */
1963		u32 headlen = eth_get_headlen(tun->dev, skb->data,
1964					      skb_headlen(skb));
 
1965
1966		if (unlikely(headlen > skb_headlen(skb))) {
1967			this_cpu_inc(tun->pcpu_stats->rx_dropped);
 
 
 
1968			napi_free_frags(&tfile->napi);
1969			rcu_read_unlock();
1970			mutex_unlock(&tfile->napi_mutex);
1971			WARN_ON(1);
1972			return -ENOMEM;
1973		}
1974
1975		local_bh_disable();
1976		napi_gro_frags(&tfile->napi);
1977		local_bh_enable();
 
 
 
 
 
 
1978		mutex_unlock(&tfile->napi_mutex);
1979	} else if (tfile->napi_enabled) {
1980		struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1981		int queue_len;
1982
1983		spin_lock_bh(&queue->lock);
 
 
 
 
 
 
 
 
1984		__skb_queue_tail(queue, skb);
1985		queue_len = skb_queue_len(queue);
1986		spin_unlock(&queue->lock);
1987
1988		if (!more || queue_len > NAPI_POLL_WEIGHT)
1989			napi_schedule(&tfile->napi);
1990
1991		local_bh_enable();
1992	} else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
1993		tun_rx_batched(tun, tfile, skb, more);
1994	} else {
1995		netif_rx_ni(skb);
1996	}
1997	rcu_read_unlock();
1998
1999	stats = get_cpu_ptr(tun->pcpu_stats);
2000	u64_stats_update_begin(&stats->syncp);
2001	stats->rx_packets++;
2002	stats->rx_bytes += len;
2003	u64_stats_update_end(&stats->syncp);
2004	put_cpu_ptr(stats);
2005
2006	if (rxhash)
2007		tun_flow_update(tun, rxhash, tfile);
2008
2009	return total_len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2010}
2011
2012static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
2013{
2014	struct file *file = iocb->ki_filp;
2015	struct tun_file *tfile = file->private_data;
2016	struct tun_struct *tun = tun_get(tfile);
2017	ssize_t result;
 
2018
2019	if (!tun)
2020		return -EBADFD;
2021
2022	result = tun_get_user(tun, tfile, NULL, from,
2023			      file->f_flags & O_NONBLOCK, false);
 
 
2024
2025	tun_put(tun);
2026	return result;
2027}
2028
2029static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2030				struct tun_file *tfile,
2031				struct xdp_frame *xdp_frame,
2032				struct iov_iter *iter)
2033{
2034	int vnet_hdr_sz = 0;
2035	size_t size = xdp_frame->len;
2036	struct tun_pcpu_stats *stats;
2037	size_t ret;
2038
2039	if (tun->flags & IFF_VNET_HDR) {
2040		struct virtio_net_hdr gso = { 0 };
2041
2042		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2043		if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2044			return -EINVAL;
2045		if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2046			     sizeof(gso)))
2047			return -EFAULT;
2048		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2049	}
2050
2051	ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2052
2053	stats = get_cpu_ptr(tun->pcpu_stats);
2054	u64_stats_update_begin(&stats->syncp);
2055	stats->tx_packets++;
2056	stats->tx_bytes += ret;
2057	u64_stats_update_end(&stats->syncp);
2058	put_cpu_ptr(tun->pcpu_stats);
2059
2060	return ret;
2061}
2062
2063/* Put packet to the user space buffer */
2064static ssize_t tun_put_user(struct tun_struct *tun,
2065			    struct tun_file *tfile,
2066			    struct sk_buff *skb,
2067			    struct iov_iter *iter)
2068{
2069	struct tun_pi pi = { 0, skb->protocol };
2070	struct tun_pcpu_stats *stats;
2071	ssize_t total;
2072	int vlan_offset = 0;
2073	int vlan_hlen = 0;
2074	int vnet_hdr_sz = 0;
2075
2076	if (skb_vlan_tag_present(skb))
2077		vlan_hlen = VLAN_HLEN;
2078
2079	if (tun->flags & IFF_VNET_HDR)
2080		vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2081
2082	total = skb->len + vlan_hlen + vnet_hdr_sz;
2083
2084	if (!(tun->flags & IFF_NO_PI)) {
2085		if (iov_iter_count(iter) < sizeof(pi))
2086			return -EINVAL;
2087
2088		total += sizeof(pi);
2089		if (iov_iter_count(iter) < total) {
2090			/* Packet will be striped */
2091			pi.flags |= TUN_PKT_STRIP;
2092		}
2093
2094		if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
2095			return -EFAULT;
2096	}
2097
2098	if (vnet_hdr_sz) {
2099		struct virtio_net_hdr gso;
2100
2101		if (iov_iter_count(iter) < vnet_hdr_sz)
2102			return -EINVAL;
2103
2104		if (virtio_net_hdr_from_skb(skb, &gso,
2105					    tun_is_little_endian(tun), true,
2106					    vlan_hlen)) {
2107			struct skb_shared_info *sinfo = skb_shinfo(skb);
2108			pr_err("unexpected GSO type: "
2109			       "0x%x, gso_size %d, hdr_len %d\n",
2110			       sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2111			       tun16_to_cpu(tun, gso.hdr_len));
2112			print_hex_dump(KERN_ERR, "tun: ",
2113				       DUMP_PREFIX_NONE,
2114				       16, 1, skb->head,
2115				       min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
 
 
2116			WARN_ON_ONCE(1);
2117			return -EINVAL;
2118		}
2119
2120		if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2121			return -EFAULT;
2122
2123		iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2124	}
2125
2126	if (vlan_hlen) {
2127		int ret;
2128		struct veth veth;
2129
2130		veth.h_vlan_proto = skb->vlan_proto;
2131		veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2132
2133		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2134
2135		ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2136		if (ret || !iov_iter_count(iter))
2137			goto done;
2138
2139		ret = copy_to_iter(&veth, sizeof(veth), iter);
2140		if (ret != sizeof(veth) || !iov_iter_count(iter))
2141			goto done;
2142	}
2143
2144	skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2145
2146done:
2147	/* caller is in process context, */
2148	stats = get_cpu_ptr(tun->pcpu_stats);
2149	u64_stats_update_begin(&stats->syncp);
2150	stats->tx_packets++;
2151	stats->tx_bytes += skb->len + vlan_hlen;
2152	u64_stats_update_end(&stats->syncp);
2153	put_cpu_ptr(tun->pcpu_stats);
2154
2155	return total;
2156}
2157
2158static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2159{
2160	DECLARE_WAITQUEUE(wait, current);
2161	void *ptr = NULL;
2162	int error = 0;
2163
2164	ptr = ptr_ring_consume(&tfile->tx_ring);
2165	if (ptr)
2166		goto out;
2167	if (noblock) {
2168		error = -EAGAIN;
2169		goto out;
2170	}
2171
2172	add_wait_queue(&tfile->socket.wq.wait, &wait);
2173
2174	while (1) {
2175		set_current_state(TASK_INTERRUPTIBLE);
2176		ptr = ptr_ring_consume(&tfile->tx_ring);
2177		if (ptr)
2178			break;
2179		if (signal_pending(current)) {
2180			error = -ERESTARTSYS;
2181			break;
2182		}
2183		if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2184			error = -EFAULT;
2185			break;
2186		}
2187
2188		schedule();
2189	}
2190
2191	__set_current_state(TASK_RUNNING);
2192	remove_wait_queue(&tfile->socket.wq.wait, &wait);
2193
2194out:
2195	*err = error;
2196	return ptr;
2197}
2198
2199static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2200			   struct iov_iter *to,
2201			   int noblock, void *ptr)
2202{
2203	ssize_t ret;
2204	int err;
2205
2206	tun_debug(KERN_INFO, tun, "tun_do_read\n");
2207
2208	if (!iov_iter_count(to)) {
2209		tun_ptr_free(ptr);
2210		return 0;
2211	}
2212
2213	if (!ptr) {
2214		/* Read frames from ring */
2215		ptr = tun_ring_recv(tfile, noblock, &err);
2216		if (!ptr)
2217			return err;
2218	}
2219
2220	if (tun_is_xdp_frame(ptr)) {
2221		struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2222
2223		ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2224		xdp_return_frame(xdpf);
2225	} else {
2226		struct sk_buff *skb = ptr;
2227
2228		ret = tun_put_user(tun, tfile, skb, to);
2229		if (unlikely(ret < 0))
2230			kfree_skb(skb);
2231		else
2232			consume_skb(skb);
2233	}
2234
2235	return ret;
2236}
2237
2238static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2239{
2240	struct file *file = iocb->ki_filp;
2241	struct tun_file *tfile = file->private_data;
2242	struct tun_struct *tun = tun_get(tfile);
2243	ssize_t len = iov_iter_count(to), ret;
 
2244
2245	if (!tun)
2246		return -EBADFD;
2247	ret = tun_do_read(tun, tfile, to, file->f_flags & O_NONBLOCK, NULL);
 
 
 
 
2248	ret = min_t(ssize_t, ret, len);
2249	if (ret > 0)
2250		iocb->ki_pos = ret;
2251	tun_put(tun);
2252	return ret;
2253}
2254
2255static void tun_prog_free(struct rcu_head *rcu)
2256{
2257	struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2258
2259	bpf_prog_destroy(prog->prog);
2260	kfree(prog);
2261}
2262
2263static int __tun_set_ebpf(struct tun_struct *tun,
2264			  struct tun_prog __rcu **prog_p,
2265			  struct bpf_prog *prog)
2266{
2267	struct tun_prog *old, *new = NULL;
2268
2269	if (prog) {
2270		new = kmalloc(sizeof(*new), GFP_KERNEL);
2271		if (!new)
2272			return -ENOMEM;
2273		new->prog = prog;
2274	}
2275
2276	spin_lock_bh(&tun->lock);
2277	old = rcu_dereference_protected(*prog_p,
2278					lockdep_is_held(&tun->lock));
2279	rcu_assign_pointer(*prog_p, new);
2280	spin_unlock_bh(&tun->lock);
2281
2282	if (old)
2283		call_rcu(&old->rcu, tun_prog_free);
2284
2285	return 0;
2286}
2287
2288static void tun_free_netdev(struct net_device *dev)
2289{
2290	struct tun_struct *tun = netdev_priv(dev);
2291
2292	BUG_ON(!(list_empty(&tun->disabled)));
2293	free_percpu(tun->pcpu_stats);
2294	tun_flow_uninit(tun);
2295	security_tun_dev_free_security(tun->security);
2296	__tun_set_ebpf(tun, &tun->steering_prog, NULL);
2297	__tun_set_ebpf(tun, &tun->filter_prog, NULL);
2298}
2299
2300static void tun_setup(struct net_device *dev)
2301{
2302	struct tun_struct *tun = netdev_priv(dev);
2303
2304	tun->owner = INVALID_UID;
2305	tun->group = INVALID_GID;
2306	tun_default_link_ksettings(dev, &tun->link_ksettings);
2307
2308	dev->ethtool_ops = &tun_ethtool_ops;
2309	dev->needs_free_netdev = true;
2310	dev->priv_destructor = tun_free_netdev;
2311	/* We prefer our own queue length */
2312	dev->tx_queue_len = TUN_READQ_SIZE;
2313}
2314
2315/* Trivial set of netlink ops to allow deleting tun or tap
2316 * device with netlink.
2317 */
2318static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2319			struct netlink_ext_ack *extack)
2320{
2321	NL_SET_ERR_MSG(extack,
2322		       "tun/tap creation via rtnetlink is not supported.");
2323	return -EOPNOTSUPP;
2324}
2325
2326static size_t tun_get_size(const struct net_device *dev)
2327{
2328	BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2329	BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2330
2331	return nla_total_size(sizeof(uid_t)) + /* OWNER */
2332	       nla_total_size(sizeof(gid_t)) + /* GROUP */
2333	       nla_total_size(sizeof(u8)) + /* TYPE */
2334	       nla_total_size(sizeof(u8)) + /* PI */
2335	       nla_total_size(sizeof(u8)) + /* VNET_HDR */
2336	       nla_total_size(sizeof(u8)) + /* PERSIST */
2337	       nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
2338	       nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
2339	       nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
2340	       0;
2341}
2342
2343static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2344{
2345	struct tun_struct *tun = netdev_priv(dev);
2346
2347	if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2348		goto nla_put_failure;
2349	if (uid_valid(tun->owner) &&
2350	    nla_put_u32(skb, IFLA_TUN_OWNER,
2351			from_kuid_munged(current_user_ns(), tun->owner)))
2352		goto nla_put_failure;
2353	if (gid_valid(tun->group) &&
2354	    nla_put_u32(skb, IFLA_TUN_GROUP,
2355			from_kgid_munged(current_user_ns(), tun->group)))
2356		goto nla_put_failure;
2357	if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2358		goto nla_put_failure;
2359	if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2360		goto nla_put_failure;
2361	if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2362		goto nla_put_failure;
2363	if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2364		       !!(tun->flags & IFF_MULTI_QUEUE)))
2365		goto nla_put_failure;
2366	if (tun->flags & IFF_MULTI_QUEUE) {
2367		if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2368			goto nla_put_failure;
2369		if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2370				tun->numdisabled))
2371			goto nla_put_failure;
2372	}
2373
2374	return 0;
2375
2376nla_put_failure:
2377	return -EMSGSIZE;
2378}
2379
2380static struct rtnl_link_ops tun_link_ops __read_mostly = {
2381	.kind		= DRV_NAME,
2382	.priv_size	= sizeof(struct tun_struct),
2383	.setup		= tun_setup,
2384	.validate	= tun_validate,
2385	.get_size       = tun_get_size,
2386	.fill_info      = tun_fill_info,
2387};
2388
2389static void tun_sock_write_space(struct sock *sk)
2390{
2391	struct tun_file *tfile;
2392	wait_queue_head_t *wqueue;
2393
2394	if (!sock_writeable(sk))
2395		return;
2396
2397	if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2398		return;
2399
2400	wqueue = sk_sleep(sk);
2401	if (wqueue && waitqueue_active(wqueue))
2402		wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2403						EPOLLWRNORM | EPOLLWRBAND);
2404
2405	tfile = container_of(sk, struct tun_file, sk);
2406	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2407}
2408
2409static void tun_put_page(struct tun_page *tpage)
2410{
2411	if (tpage->page)
2412		__page_frag_cache_drain(tpage->page, tpage->count);
2413}
2414
2415static int tun_xdp_one(struct tun_struct *tun,
2416		       struct tun_file *tfile,
2417		       struct xdp_buff *xdp, int *flush,
2418		       struct tun_page *tpage)
2419{
2420	unsigned int datasize = xdp->data_end - xdp->data;
2421	struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2422	struct virtio_net_hdr *gso = &hdr->gso;
2423	struct tun_pcpu_stats *stats;
2424	struct bpf_prog *xdp_prog;
2425	struct sk_buff *skb = NULL;
 
2426	u32 rxhash = 0, act;
2427	int buflen = hdr->buflen;
2428	int err = 0;
2429	bool skb_xdp = false;
2430	struct page *page;
2431
 
 
 
2432	xdp_prog = rcu_dereference(tun->xdp_prog);
2433	if (xdp_prog) {
2434		if (gso->gso_type) {
2435			skb_xdp = true;
2436			goto build;
2437		}
 
 
2438		xdp_set_data_meta_invalid(xdp);
2439		xdp->rxq = &tfile->xdp_rxq;
2440
2441		act = bpf_prog_run_xdp(xdp_prog, xdp);
2442		err = tun_xdp_act(tun, xdp_prog, xdp, act);
2443		if (err < 0) {
2444			put_page(virt_to_head_page(xdp->data));
2445			return err;
2446		}
2447
2448		switch (err) {
2449		case XDP_REDIRECT:
2450			*flush = true;
2451			/* fall through */
2452		case XDP_TX:
2453			return 0;
2454		case XDP_PASS:
2455			break;
2456		default:
2457			page = virt_to_head_page(xdp->data);
2458			if (tpage->page == page) {
2459				++tpage->count;
2460			} else {
2461				tun_put_page(tpage);
2462				tpage->page = page;
2463				tpage->count = 1;
2464			}
2465			return 0;
2466		}
2467	}
2468
2469build:
2470	skb = build_skb(xdp->data_hard_start, buflen);
2471	if (!skb) {
2472		err = -ENOMEM;
2473		goto out;
2474	}
2475
2476	skb_reserve(skb, xdp->data - xdp->data_hard_start);
2477	skb_put(skb, xdp->data_end - xdp->data);
2478
2479	if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2480		this_cpu_inc(tun->pcpu_stats->rx_frame_errors);
2481		kfree_skb(skb);
2482		err = -EINVAL;
2483		goto out;
2484	}
2485
2486	skb->protocol = eth_type_trans(skb, tun->dev);
2487	skb_reset_network_header(skb);
2488	skb_probe_transport_header(skb);
 
2489
2490	if (skb_xdp) {
2491		err = do_xdp_generic(xdp_prog, skb);
2492		if (err != XDP_PASS)
 
2493			goto out;
 
2494	}
2495
2496	if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2497	    !tfile->detached)
2498		rxhash = __skb_get_hash_symmetric(skb);
2499
2500	skb_record_rx_queue(skb, tfile->queue_index);
2501	netif_receive_skb(skb);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2502
2503	/* No need for get_cpu_ptr() here since this function is
2504	 * always called with bh disabled
2505	 */
2506	stats = this_cpu_ptr(tun->pcpu_stats);
2507	u64_stats_update_begin(&stats->syncp);
2508	stats->rx_packets++;
2509	stats->rx_bytes += datasize;
2510	u64_stats_update_end(&stats->syncp);
2511
2512	if (rxhash)
2513		tun_flow_update(tun, rxhash, tfile);
2514
2515out:
2516	return err;
2517}
2518
2519static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2520{
2521	int ret, i;
2522	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2523	struct tun_struct *tun = tun_get(tfile);
2524	struct tun_msg_ctl *ctl = m->msg_control;
2525	struct xdp_buff *xdp;
2526
2527	if (!tun)
2528		return -EBADFD;
2529
2530	if (ctl && (ctl->type == TUN_MSG_PTR)) {
 
 
2531		struct tun_page tpage;
2532		int n = ctl->num;
2533		int flush = 0;
2534
2535		memset(&tpage, 0, sizeof(tpage));
2536
2537		local_bh_disable();
2538		rcu_read_lock();
 
2539
2540		for (i = 0; i < n; i++) {
2541			xdp = &((struct xdp_buff *)ctl->ptr)[i];
2542			tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
 
 
2543		}
2544
2545		if (flush)
2546			xdp_do_flush_map();
 
 
 
2547
 
2548		rcu_read_unlock();
2549		local_bh_enable();
2550
2551		tun_put_page(&tpage);
2552
2553		ret = total_len;
2554		goto out;
2555	}
2556
2557	ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2558			   m->msg_flags & MSG_DONTWAIT,
2559			   m->msg_flags & MSG_MORE);
2560out:
2561	tun_put(tun);
2562	return ret;
2563}
2564
2565static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2566		       int flags)
2567{
2568	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2569	struct tun_struct *tun = tun_get(tfile);
2570	void *ptr = m->msg_control;
2571	int ret;
2572
2573	if (!tun) {
2574		ret = -EBADFD;
2575		goto out_free;
2576	}
2577
2578	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2579		ret = -EINVAL;
2580		goto out_put_tun;
2581	}
2582	if (flags & MSG_ERRQUEUE) {
2583		ret = sock_recv_errqueue(sock->sk, m, total_len,
2584					 SOL_PACKET, TUN_TX_TIMESTAMP);
2585		goto out;
2586	}
2587	ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2588	if (ret > (ssize_t)total_len) {
2589		m->msg_flags |= MSG_TRUNC;
2590		ret = flags & MSG_TRUNC ? ret : total_len;
2591	}
2592out:
2593	tun_put(tun);
2594	return ret;
2595
2596out_put_tun:
2597	tun_put(tun);
2598out_free:
2599	tun_ptr_free(ptr);
2600	return ret;
2601}
2602
2603static int tun_ptr_peek_len(void *ptr)
2604{
2605	if (likely(ptr)) {
2606		if (tun_is_xdp_frame(ptr)) {
2607			struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2608
2609			return xdpf->len;
2610		}
2611		return __skb_array_len_with_tag(ptr);
2612	} else {
2613		return 0;
2614	}
2615}
2616
2617static int tun_peek_len(struct socket *sock)
2618{
2619	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2620	struct tun_struct *tun;
2621	int ret = 0;
2622
2623	tun = tun_get(tfile);
2624	if (!tun)
2625		return 0;
2626
2627	ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2628	tun_put(tun);
2629
2630	return ret;
2631}
2632
2633/* Ops structure to mimic raw sockets with tun */
2634static const struct proto_ops tun_socket_ops = {
2635	.peek_len = tun_peek_len,
2636	.sendmsg = tun_sendmsg,
2637	.recvmsg = tun_recvmsg,
2638};
2639
2640static struct proto tun_proto = {
2641	.name		= "tun",
2642	.owner		= THIS_MODULE,
2643	.obj_size	= sizeof(struct tun_file),
2644};
2645
2646static int tun_flags(struct tun_struct *tun)
2647{
2648	return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2649}
2650
2651static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
2652			      char *buf)
2653{
2654	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2655	return sprintf(buf, "0x%x\n", tun_flags(tun));
2656}
2657
2658static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
2659			      char *buf)
2660{
2661	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2662	return uid_valid(tun->owner)?
2663		sprintf(buf, "%u\n",
2664			from_kuid_munged(current_user_ns(), tun->owner)):
2665		sprintf(buf, "-1\n");
2666}
2667
2668static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
2669			      char *buf)
2670{
2671	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2672	return gid_valid(tun->group) ?
2673		sprintf(buf, "%u\n",
2674			from_kgid_munged(current_user_ns(), tun->group)):
2675		sprintf(buf, "-1\n");
2676}
2677
2678static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
2679static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
2680static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
2681
2682static struct attribute *tun_dev_attrs[] = {
2683	&dev_attr_tun_flags.attr,
2684	&dev_attr_owner.attr,
2685	&dev_attr_group.attr,
2686	NULL
2687};
2688
2689static const struct attribute_group tun_attr_group = {
2690	.attrs = tun_dev_attrs
2691};
2692
2693static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2694{
2695	struct tun_struct *tun;
2696	struct tun_file *tfile = file->private_data;
2697	struct net_device *dev;
2698	int err;
2699
2700	if (tfile->detached)
2701		return -EINVAL;
2702
2703	if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2704		if (!capable(CAP_NET_ADMIN))
2705			return -EPERM;
2706
2707		if (!(ifr->ifr_flags & IFF_NAPI) ||
2708		    (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2709			return -EINVAL;
2710	}
2711
2712	dev = __dev_get_by_name(net, ifr->ifr_name);
2713	if (dev) {
2714		if (ifr->ifr_flags & IFF_TUN_EXCL)
2715			return -EBUSY;
2716		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2717			tun = netdev_priv(dev);
2718		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2719			tun = netdev_priv(dev);
2720		else
2721			return -EINVAL;
2722
2723		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2724		    !!(tun->flags & IFF_MULTI_QUEUE))
2725			return -EINVAL;
2726
2727		if (tun_not_capable(tun))
2728			return -EPERM;
2729		err = security_tun_dev_open(tun->security);
2730		if (err < 0)
2731			return err;
2732
2733		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2734				 ifr->ifr_flags & IFF_NAPI,
2735				 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2736		if (err < 0)
2737			return err;
2738
2739		if (tun->flags & IFF_MULTI_QUEUE &&
2740		    (tun->numqueues + tun->numdisabled > 1)) {
2741			/* One or more queue has already been attached, no need
2742			 * to initialize the device again.
2743			 */
2744			netdev_state_change(dev);
2745			return 0;
2746		}
2747
2748		tun->flags = (tun->flags & ~TUN_FEATURES) |
2749			      (ifr->ifr_flags & TUN_FEATURES);
2750
2751		netdev_state_change(dev);
2752	} else {
2753		char *name;
2754		unsigned long flags = 0;
2755		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2756			     MAX_TAP_QUEUES : 1;
2757
2758		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2759			return -EPERM;
2760		err = security_tun_dev_create();
2761		if (err < 0)
2762			return err;
2763
2764		/* Set dev type */
2765		if (ifr->ifr_flags & IFF_TUN) {
2766			/* TUN device */
2767			flags |= IFF_TUN;
2768			name = "tun%d";
2769		} else if (ifr->ifr_flags & IFF_TAP) {
2770			/* TAP device */
2771			flags |= IFF_TAP;
2772			name = "tap%d";
2773		} else
2774			return -EINVAL;
2775
2776		if (*ifr->ifr_name)
2777			name = ifr->ifr_name;
2778
2779		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2780				       NET_NAME_UNKNOWN, tun_setup, queues,
2781				       queues);
2782
2783		if (!dev)
2784			return -ENOMEM;
2785		err = dev_get_valid_name(net, dev, name);
2786		if (err < 0)
2787			goto err_free_dev;
2788
2789		dev_net_set(dev, net);
2790		dev->rtnl_link_ops = &tun_link_ops;
2791		dev->ifindex = tfile->ifindex;
2792		dev->sysfs_groups[0] = &tun_attr_group;
2793
2794		tun = netdev_priv(dev);
2795		tun->dev = dev;
2796		tun->flags = flags;
2797		tun->txflt.count = 0;
2798		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2799
2800		tun->align = NET_SKB_PAD;
2801		tun->filter_attached = false;
2802		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2803		tun->rx_batched = 0;
2804		RCU_INIT_POINTER(tun->steering_prog, NULL);
2805
2806		tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
2807		if (!tun->pcpu_stats) {
2808			err = -ENOMEM;
2809			goto err_free_dev;
2810		}
2811
2812		spin_lock_init(&tun->lock);
2813
2814		err = security_tun_dev_alloc_security(&tun->security);
2815		if (err < 0)
2816			goto err_free_stat;
2817
2818		tun_net_init(dev);
2819		tun_flow_init(tun);
2820
2821		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
2822				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
2823				   NETIF_F_HW_VLAN_STAG_TX;
2824		dev->features = dev->hw_features | NETIF_F_LLTX;
2825		dev->vlan_features = dev->features &
2826				     ~(NETIF_F_HW_VLAN_CTAG_TX |
2827				       NETIF_F_HW_VLAN_STAG_TX);
2828
2829		tun->flags = (tun->flags & ~TUN_FEATURES) |
2830			      (ifr->ifr_flags & TUN_FEATURES);
2831
2832		INIT_LIST_HEAD(&tun->disabled);
2833		err = tun_attach(tun, file, false, ifr->ifr_flags & IFF_NAPI,
2834				 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
2835		if (err < 0)
2836			goto err_free_flow;
2837
2838		err = register_netdevice(tun->dev);
2839		if (err < 0)
2840			goto err_detach;
2841		/* free_netdev() won't check refcnt, to aovid race
 
 
2842		 * with dev_put() we need publish tun after registration.
2843		 */
2844		rcu_assign_pointer(tfile->tun, tun);
2845	}
2846
2847	netif_carrier_on(tun->dev);
2848
2849	tun_debug(KERN_INFO, tun, "tun_set_iff\n");
 
2850
2851	/* Make sure persistent devices do not get stuck in
2852	 * xoff state.
2853	 */
2854	if (netif_running(tun->dev))
2855		netif_tx_wake_all_queues(tun->dev);
2856
2857	strcpy(ifr->ifr_name, tun->dev->name);
2858	return 0;
2859
2860err_detach:
2861	tun_detach_all(dev);
2862	/* register_netdevice() already called tun_free_netdev() */
2863	goto err_free_dev;
2864
2865err_free_flow:
2866	tun_flow_uninit(tun);
2867	security_tun_dev_free_security(tun->security);
2868err_free_stat:
2869	free_percpu(tun->pcpu_stats);
2870err_free_dev:
2871	free_netdev(dev);
2872	return err;
2873}
2874
2875static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2876{
2877	tun_debug(KERN_INFO, tun, "tun_get_iff\n");
2878
2879	strcpy(ifr->ifr_name, tun->dev->name);
2880
2881	ifr->ifr_flags = tun_flags(tun);
2882
2883}
2884
2885/* This is like a cut-down ethtool ops, except done via tun fd so no
2886 * privs required. */
2887static int set_offload(struct tun_struct *tun, unsigned long arg)
2888{
2889	netdev_features_t features = 0;
2890
2891	if (arg & TUN_F_CSUM) {
2892		features |= NETIF_F_HW_CSUM;
2893		arg &= ~TUN_F_CSUM;
2894
2895		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2896			if (arg & TUN_F_TSO_ECN) {
2897				features |= NETIF_F_TSO_ECN;
2898				arg &= ~TUN_F_TSO_ECN;
2899			}
2900			if (arg & TUN_F_TSO4)
2901				features |= NETIF_F_TSO;
2902			if (arg & TUN_F_TSO6)
2903				features |= NETIF_F_TSO6;
2904			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2905		}
2906
2907		arg &= ~TUN_F_UFO;
 
 
 
 
 
 
2908	}
2909
2910	/* This gives the user a way to test for new features in future by
2911	 * trying to set them. */
2912	if (arg)
2913		return -EINVAL;
2914
2915	tun->set_features = features;
2916	tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2917	tun->dev->wanted_features |= features;
2918	netdev_update_features(tun->dev);
2919
2920	return 0;
2921}
2922
2923static void tun_detach_filter(struct tun_struct *tun, int n)
2924{
2925	int i;
2926	struct tun_file *tfile;
2927
2928	for (i = 0; i < n; i++) {
2929		tfile = rtnl_dereference(tun->tfiles[i]);
2930		lock_sock(tfile->socket.sk);
2931		sk_detach_filter(tfile->socket.sk);
2932		release_sock(tfile->socket.sk);
2933	}
2934
2935	tun->filter_attached = false;
2936}
2937
2938static int tun_attach_filter(struct tun_struct *tun)
2939{
2940	int i, ret = 0;
2941	struct tun_file *tfile;
2942
2943	for (i = 0; i < tun->numqueues; i++) {
2944		tfile = rtnl_dereference(tun->tfiles[i]);
2945		lock_sock(tfile->socket.sk);
2946		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2947		release_sock(tfile->socket.sk);
2948		if (ret) {
2949			tun_detach_filter(tun, i);
2950			return ret;
2951		}
2952	}
2953
2954	tun->filter_attached = true;
2955	return ret;
2956}
2957
2958static void tun_set_sndbuf(struct tun_struct *tun)
2959{
2960	struct tun_file *tfile;
2961	int i;
2962
2963	for (i = 0; i < tun->numqueues; i++) {
2964		tfile = rtnl_dereference(tun->tfiles[i]);
2965		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2966	}
2967}
2968
2969static int tun_set_queue(struct file *file, struct ifreq *ifr)
2970{
2971	struct tun_file *tfile = file->private_data;
2972	struct tun_struct *tun;
2973	int ret = 0;
2974
2975	rtnl_lock();
2976
2977	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2978		tun = tfile->detached;
2979		if (!tun) {
2980			ret = -EINVAL;
2981			goto unlock;
2982		}
2983		ret = security_tun_dev_attach_queue(tun->security);
2984		if (ret < 0)
2985			goto unlock;
2986		ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2987				 tun->flags & IFF_NAPI_FRAGS, true);
2988	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2989		tun = rtnl_dereference(tfile->tun);
2990		if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
2991			ret = -EINVAL;
2992		else
2993			__tun_detach(tfile, false);
2994	} else
2995		ret = -EINVAL;
2996
2997	if (ret >= 0)
2998		netdev_state_change(tun->dev);
2999
3000unlock:
3001	rtnl_unlock();
3002	return ret;
3003}
3004
3005static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog **prog_p,
3006			void __user *data)
3007{
3008	struct bpf_prog *prog;
3009	int fd;
3010
3011	if (copy_from_user(&fd, data, sizeof(fd)))
3012		return -EFAULT;
3013
3014	if (fd == -1) {
3015		prog = NULL;
3016	} else {
3017		prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
3018		if (IS_ERR(prog))
3019			return PTR_ERR(prog);
3020	}
3021
3022	return __tun_set_ebpf(tun, prog_p, prog);
3023}
3024
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3025static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
3026			    unsigned long arg, int ifreq_len)
3027{
3028	struct tun_file *tfile = file->private_data;
3029	struct net *net = sock_net(&tfile->sk);
3030	struct tun_struct *tun;
3031	void __user* argp = (void __user*)arg;
3032	unsigned int ifindex, carrier;
3033	struct ifreq ifr;
3034	kuid_t owner;
3035	kgid_t group;
 
3036	int sndbuf;
3037	int vnet_hdr_sz;
3038	int le;
3039	int ret;
3040	bool do_notify = false;
3041
3042	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3043	    (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
3044		if (copy_from_user(&ifr, argp, ifreq_len))
3045			return -EFAULT;
3046	} else {
3047		memset(&ifr, 0, sizeof(ifr));
3048	}
3049	if (cmd == TUNGETFEATURES) {
3050		/* Currently this just means: "what IFF flags are valid?".
3051		 * This is needed because we never checked for invalid flags on
3052		 * TUNSETIFF.
3053		 */
3054		return put_user(IFF_TUN | IFF_TAP | TUN_FEATURES,
3055				(unsigned int __user*)argp);
3056	} else if (cmd == TUNSETQUEUE) {
3057		return tun_set_queue(file, &ifr);
3058	} else if (cmd == SIOCGSKNS) {
3059		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3060			return -EPERM;
3061		return open_related_ns(&net->ns, get_net_ns);
3062	}
3063
3064	ret = 0;
3065	rtnl_lock();
3066
3067	tun = tun_get(tfile);
3068	if (cmd == TUNSETIFF) {
3069		ret = -EEXIST;
3070		if (tun)
3071			goto unlock;
3072
3073		ifr.ifr_name[IFNAMSIZ-1] = '\0';
3074
3075		ret = tun_set_iff(net, file, &ifr);
3076
3077		if (ret)
3078			goto unlock;
3079
3080		if (copy_to_user(argp, &ifr, ifreq_len))
3081			ret = -EFAULT;
3082		goto unlock;
3083	}
3084	if (cmd == TUNSETIFINDEX) {
3085		ret = -EPERM;
3086		if (tun)
3087			goto unlock;
3088
3089		ret = -EFAULT;
3090		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3091			goto unlock;
3092
 
 
3093		ret = 0;
3094		tfile->ifindex = ifindex;
3095		goto unlock;
3096	}
3097
3098	ret = -EBADFD;
3099	if (!tun)
3100		goto unlock;
3101
3102	tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
3103
3104	net = dev_net(tun->dev);
3105	ret = 0;
3106	switch (cmd) {
3107	case TUNGETIFF:
3108		tun_get_iff(tun, &ifr);
3109
3110		if (tfile->detached)
3111			ifr.ifr_flags |= IFF_DETACH_QUEUE;
3112		if (!tfile->socket.sk->sk_filter)
3113			ifr.ifr_flags |= IFF_NOFILTER;
3114
3115		if (copy_to_user(argp, &ifr, ifreq_len))
3116			ret = -EFAULT;
3117		break;
3118
3119	case TUNSETNOCSUM:
3120		/* Disable/Enable checksum */
3121
3122		/* [unimplemented] */
3123		tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
3124			  arg ? "disabled" : "enabled");
3125		break;
3126
3127	case TUNSETPERSIST:
3128		/* Disable/Enable persist mode. Keep an extra reference to the
3129		 * module to prevent the module being unprobed.
3130		 */
3131		if (arg && !(tun->flags & IFF_PERSIST)) {
3132			tun->flags |= IFF_PERSIST;
3133			__module_get(THIS_MODULE);
3134			do_notify = true;
3135		}
3136		if (!arg && (tun->flags & IFF_PERSIST)) {
3137			tun->flags &= ~IFF_PERSIST;
3138			module_put(THIS_MODULE);
3139			do_notify = true;
3140		}
3141
3142		tun_debug(KERN_INFO, tun, "persist %s\n",
3143			  arg ? "enabled" : "disabled");
3144		break;
3145
3146	case TUNSETOWNER:
3147		/* Set owner of the device */
3148		owner = make_kuid(current_user_ns(), arg);
3149		if (!uid_valid(owner)) {
3150			ret = -EINVAL;
3151			break;
3152		}
3153		tun->owner = owner;
3154		do_notify = true;
3155		tun_debug(KERN_INFO, tun, "owner set to %u\n",
3156			  from_kuid(&init_user_ns, tun->owner));
3157		break;
3158
3159	case TUNSETGROUP:
3160		/* Set group of the device */
3161		group = make_kgid(current_user_ns(), arg);
3162		if (!gid_valid(group)) {
3163			ret = -EINVAL;
3164			break;
3165		}
3166		tun->group = group;
3167		do_notify = true;
3168		tun_debug(KERN_INFO, tun, "group set to %u\n",
3169			  from_kgid(&init_user_ns, tun->group));
3170		break;
3171
3172	case TUNSETLINK:
3173		/* Only allow setting the type when the interface is down */
3174		if (tun->dev->flags & IFF_UP) {
3175			tun_debug(KERN_INFO, tun,
3176				  "Linktype set failed because interface is up\n");
3177			ret = -EBUSY;
3178		} else {
 
 
 
 
 
 
 
 
3179			tun->dev->type = (int) arg;
3180			tun_debug(KERN_INFO, tun, "linktype set to %d\n",
3181				  tun->dev->type);
3182			ret = 0;
 
 
3183		}
3184		break;
3185
3186#ifdef TUN_DEBUG
3187	case TUNSETDEBUG:
3188		tun->debug = arg;
3189		break;
3190#endif
3191	case TUNSETOFFLOAD:
3192		ret = set_offload(tun, arg);
3193		break;
3194
3195	case TUNSETTXFILTER:
3196		/* Can be set only for TAPs */
3197		ret = -EINVAL;
3198		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3199			break;
3200		ret = update_filter(&tun->txflt, (void __user *)arg);
3201		break;
3202
3203	case SIOCGIFHWADDR:
3204		/* Get hw address */
3205		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
3206		ifr.ifr_hwaddr.sa_family = tun->dev->type;
3207		if (copy_to_user(argp, &ifr, ifreq_len))
3208			ret = -EFAULT;
3209		break;
3210
3211	case SIOCSIFHWADDR:
3212		/* Set hw address */
3213		tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
3214			  ifr.ifr_hwaddr.sa_data);
3215
3216		ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL);
3217		break;
3218
3219	case TUNGETSNDBUF:
3220		sndbuf = tfile->socket.sk->sk_sndbuf;
3221		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3222			ret = -EFAULT;
3223		break;
3224
3225	case TUNSETSNDBUF:
3226		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3227			ret = -EFAULT;
3228			break;
3229		}
3230		if (sndbuf <= 0) {
3231			ret = -EINVAL;
3232			break;
3233		}
3234
3235		tun->sndbuf = sndbuf;
3236		tun_set_sndbuf(tun);
3237		break;
3238
3239	case TUNGETVNETHDRSZ:
3240		vnet_hdr_sz = tun->vnet_hdr_sz;
3241		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3242			ret = -EFAULT;
3243		break;
3244
3245	case TUNSETVNETHDRSZ:
3246		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3247			ret = -EFAULT;
3248			break;
3249		}
3250		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3251			ret = -EINVAL;
3252			break;
3253		}
3254
3255		tun->vnet_hdr_sz = vnet_hdr_sz;
3256		break;
3257
3258	case TUNGETVNETLE:
3259		le = !!(tun->flags & TUN_VNET_LE);
3260		if (put_user(le, (int __user *)argp))
3261			ret = -EFAULT;
3262		break;
3263
3264	case TUNSETVNETLE:
3265		if (get_user(le, (int __user *)argp)) {
3266			ret = -EFAULT;
3267			break;
3268		}
3269		if (le)
3270			tun->flags |= TUN_VNET_LE;
3271		else
3272			tun->flags &= ~TUN_VNET_LE;
3273		break;
3274
3275	case TUNGETVNETBE:
3276		ret = tun_get_vnet_be(tun, argp);
3277		break;
3278
3279	case TUNSETVNETBE:
3280		ret = tun_set_vnet_be(tun, argp);
3281		break;
3282
3283	case TUNATTACHFILTER:
3284		/* Can be set only for TAPs */
3285		ret = -EINVAL;
3286		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3287			break;
3288		ret = -EFAULT;
3289		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3290			break;
3291
3292		ret = tun_attach_filter(tun);
3293		break;
3294
3295	case TUNDETACHFILTER:
3296		/* Can be set only for TAPs */
3297		ret = -EINVAL;
3298		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3299			break;
3300		ret = 0;
3301		tun_detach_filter(tun, tun->numqueues);
3302		break;
3303
3304	case TUNGETFILTER:
3305		ret = -EINVAL;
3306		if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3307			break;
3308		ret = -EFAULT;
3309		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3310			break;
3311		ret = 0;
3312		break;
3313
3314	case TUNSETSTEERINGEBPF:
3315		ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3316		break;
3317
3318	case TUNSETFILTEREBPF:
3319		ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3320		break;
3321
3322	case TUNSETCARRIER:
3323		ret = -EFAULT;
3324		if (copy_from_user(&carrier, argp, sizeof(carrier)))
3325			goto unlock;
3326
3327		ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3328		break;
3329
3330	case TUNGETDEVNETNS:
3331		ret = -EPERM;
3332		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3333			goto unlock;
3334		ret = open_related_ns(&net->ns, get_net_ns);
3335		break;
3336
3337	default:
3338		ret = -EINVAL;
3339		break;
3340	}
3341
3342	if (do_notify)
3343		netdev_state_change(tun->dev);
3344
3345unlock:
3346	rtnl_unlock();
3347	if (tun)
3348		tun_put(tun);
3349	return ret;
3350}
3351
3352static long tun_chr_ioctl(struct file *file,
3353			  unsigned int cmd, unsigned long arg)
3354{
3355	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3356}
3357
3358#ifdef CONFIG_COMPAT
3359static long tun_chr_compat_ioctl(struct file *file,
3360			 unsigned int cmd, unsigned long arg)
3361{
3362	switch (cmd) {
3363	case TUNSETIFF:
3364	case TUNGETIFF:
3365	case TUNSETTXFILTER:
3366	case TUNGETSNDBUF:
3367	case TUNSETSNDBUF:
3368	case SIOCGIFHWADDR:
3369	case SIOCSIFHWADDR:
3370		arg = (unsigned long)compat_ptr(arg);
3371		break;
3372	default:
3373		arg = (compat_ulong_t)arg;
3374		break;
3375	}
3376
3377	/*
3378	 * compat_ifreq is shorter than ifreq, so we must not access beyond
3379	 * the end of that structure. All fields that are used in this
3380	 * driver are compatible though, we don't need to convert the
3381	 * contents.
3382	 */
3383	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3384}
3385#endif /* CONFIG_COMPAT */
3386
3387static int tun_chr_fasync(int fd, struct file *file, int on)
3388{
3389	struct tun_file *tfile = file->private_data;
3390	int ret;
3391
 
 
 
 
 
 
3392	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3393		goto out;
3394
3395	if (on) {
3396		__f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3397		tfile->flags |= TUN_FASYNC;
3398	} else
3399		tfile->flags &= ~TUN_FASYNC;
3400	ret = 0;
3401out:
3402	return ret;
3403}
3404
3405static int tun_chr_open(struct inode *inode, struct file * file)
3406{
3407	struct net *net = current->nsproxy->net_ns;
3408	struct tun_file *tfile;
3409
3410	DBG1(KERN_INFO, "tunX: tun_chr_open\n");
3411
3412	tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3413					    &tun_proto, 0);
3414	if (!tfile)
3415		return -ENOMEM;
3416	if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3417		sk_free(&tfile->sk);
3418		return -ENOMEM;
3419	}
3420
3421	mutex_init(&tfile->napi_mutex);
3422	RCU_INIT_POINTER(tfile->tun, NULL);
3423	tfile->flags = 0;
3424	tfile->ifindex = 0;
3425
3426	init_waitqueue_head(&tfile->socket.wq.wait);
3427
3428	tfile->socket.file = file;
3429	tfile->socket.ops = &tun_socket_ops;
3430
3431	sock_init_data(&tfile->socket, &tfile->sk);
3432
3433	tfile->sk.sk_write_space = tun_sock_write_space;
3434	tfile->sk.sk_sndbuf = INT_MAX;
3435
3436	file->private_data = tfile;
3437	INIT_LIST_HEAD(&tfile->next);
3438
3439	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3440
 
 
3441	return 0;
3442}
3443
3444static int tun_chr_close(struct inode *inode, struct file *file)
3445{
3446	struct tun_file *tfile = file->private_data;
3447
3448	tun_detach(tfile, true);
3449
3450	return 0;
3451}
3452
3453#ifdef CONFIG_PROC_FS
3454static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3455{
3456	struct tun_file *tfile = file->private_data;
3457	struct tun_struct *tun;
3458	struct ifreq ifr;
3459
3460	memset(&ifr, 0, sizeof(ifr));
3461
3462	rtnl_lock();
3463	tun = tun_get(tfile);
3464	if (tun)
3465		tun_get_iff(tun, &ifr);
3466	rtnl_unlock();
3467
3468	if (tun)
3469		tun_put(tun);
3470
3471	seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3472}
3473#endif
3474
3475static const struct file_operations tun_fops = {
3476	.owner	= THIS_MODULE,
3477	.llseek = no_llseek,
3478	.read_iter  = tun_chr_read_iter,
3479	.write_iter = tun_chr_write_iter,
3480	.poll	= tun_chr_poll,
3481	.unlocked_ioctl	= tun_chr_ioctl,
3482#ifdef CONFIG_COMPAT
3483	.compat_ioctl = tun_chr_compat_ioctl,
3484#endif
3485	.open	= tun_chr_open,
3486	.release = tun_chr_close,
3487	.fasync = tun_chr_fasync,
3488#ifdef CONFIG_PROC_FS
3489	.show_fdinfo = tun_chr_show_fdinfo,
3490#endif
3491};
3492
3493static struct miscdevice tun_miscdev = {
3494	.minor = TUN_MINOR,
3495	.name = "tun",
3496	.nodename = "net/tun",
3497	.fops = &tun_fops,
3498};
3499
3500/* ethtool interface */
3501
3502static void tun_default_link_ksettings(struct net_device *dev,
3503				       struct ethtool_link_ksettings *cmd)
3504{
3505	ethtool_link_ksettings_zero_link_mode(cmd, supported);
3506	ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3507	cmd->base.speed		= SPEED_10;
3508	cmd->base.duplex	= DUPLEX_FULL;
3509	cmd->base.port		= PORT_TP;
3510	cmd->base.phy_address	= 0;
3511	cmd->base.autoneg	= AUTONEG_DISABLE;
3512}
3513
3514static int tun_get_link_ksettings(struct net_device *dev,
3515				  struct ethtool_link_ksettings *cmd)
3516{
3517	struct tun_struct *tun = netdev_priv(dev);
3518
3519	memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3520	return 0;
3521}
3522
3523static int tun_set_link_ksettings(struct net_device *dev,
3524				  const struct ethtool_link_ksettings *cmd)
3525{
3526	struct tun_struct *tun = netdev_priv(dev);
3527
3528	memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3529	return 0;
3530}
3531
3532static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3533{
3534	struct tun_struct *tun = netdev_priv(dev);
3535
3536	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
3537	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
3538
3539	switch (tun->flags & TUN_TYPE_MASK) {
3540	case IFF_TUN:
3541		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
3542		break;
3543	case IFF_TAP:
3544		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
3545		break;
3546	}
3547}
3548
3549static u32 tun_get_msglevel(struct net_device *dev)
3550{
3551#ifdef TUN_DEBUG
3552	struct tun_struct *tun = netdev_priv(dev);
3553	return tun->debug;
3554#else
3555	return -EOPNOTSUPP;
3556#endif
3557}
3558
3559static void tun_set_msglevel(struct net_device *dev, u32 value)
3560{
3561#ifdef TUN_DEBUG
3562	struct tun_struct *tun = netdev_priv(dev);
3563	tun->debug = value;
3564#endif
3565}
3566
3567static int tun_get_coalesce(struct net_device *dev,
3568			    struct ethtool_coalesce *ec)
 
 
3569{
3570	struct tun_struct *tun = netdev_priv(dev);
3571
3572	ec->rx_max_coalesced_frames = tun->rx_batched;
3573
3574	return 0;
3575}
3576
3577static int tun_set_coalesce(struct net_device *dev,
3578			    struct ethtool_coalesce *ec)
 
 
3579{
3580	struct tun_struct *tun = netdev_priv(dev);
3581
3582	if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3583		tun->rx_batched = NAPI_POLL_WEIGHT;
3584	else
3585		tun->rx_batched = ec->rx_max_coalesced_frames;
3586
3587	return 0;
3588}
3589
 
 
 
 
 
 
 
 
 
3590static const struct ethtool_ops tun_ethtool_ops = {
 
3591	.get_drvinfo	= tun_get_drvinfo,
3592	.get_msglevel	= tun_get_msglevel,
3593	.set_msglevel	= tun_set_msglevel,
3594	.get_link	= ethtool_op_get_link,
 
3595	.get_ts_info	= ethtool_op_get_ts_info,
3596	.get_coalesce   = tun_get_coalesce,
3597	.set_coalesce   = tun_set_coalesce,
3598	.get_link_ksettings = tun_get_link_ksettings,
3599	.set_link_ksettings = tun_set_link_ksettings,
3600};
3601
3602static int tun_queue_resize(struct tun_struct *tun)
3603{
3604	struct net_device *dev = tun->dev;
3605	struct tun_file *tfile;
3606	struct ptr_ring **rings;
3607	int n = tun->numqueues + tun->numdisabled;
3608	int ret, i;
3609
3610	rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3611	if (!rings)
3612		return -ENOMEM;
3613
3614	for (i = 0; i < tun->numqueues; i++) {
3615		tfile = rtnl_dereference(tun->tfiles[i]);
3616		rings[i] = &tfile->tx_ring;
3617	}
3618	list_for_each_entry(tfile, &tun->disabled, next)
3619		rings[i++] = &tfile->tx_ring;
3620
3621	ret = ptr_ring_resize_multiple(rings, n,
3622				       dev->tx_queue_len, GFP_KERNEL,
3623				       tun_ptr_free);
3624
3625	kfree(rings);
3626	return ret;
3627}
3628
3629static int tun_device_event(struct notifier_block *unused,
3630			    unsigned long event, void *ptr)
3631{
3632	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3633	struct tun_struct *tun = netdev_priv(dev);
3634	int i;
3635
3636	if (dev->rtnl_link_ops != &tun_link_ops)
3637		return NOTIFY_DONE;
3638
3639	switch (event) {
3640	case NETDEV_CHANGE_TX_QUEUE_LEN:
3641		if (tun_queue_resize(tun))
3642			return NOTIFY_BAD;
3643		break;
3644	case NETDEV_UP:
3645		for (i = 0; i < tun->numqueues; i++) {
3646			struct tun_file *tfile;
3647
3648			tfile = rtnl_dereference(tun->tfiles[i]);
3649			tfile->socket.sk->sk_write_space(tfile->socket.sk);
3650		}
3651		break;
3652	default:
3653		break;
3654	}
3655
3656	return NOTIFY_DONE;
3657}
3658
3659static struct notifier_block tun_notifier_block __read_mostly = {
3660	.notifier_call	= tun_device_event,
3661};
3662
3663static int __init tun_init(void)
3664{
3665	int ret = 0;
3666
3667	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3668
3669	ret = rtnl_link_register(&tun_link_ops);
3670	if (ret) {
3671		pr_err("Can't register link_ops\n");
3672		goto err_linkops;
3673	}
3674
3675	ret = misc_register(&tun_miscdev);
3676	if (ret) {
3677		pr_err("Can't register misc device %d\n", TUN_MINOR);
3678		goto err_misc;
3679	}
3680
3681	ret = register_netdevice_notifier(&tun_notifier_block);
3682	if (ret) {
3683		pr_err("Can't register netdevice notifier\n");
3684		goto err_notifier;
3685	}
3686
3687	return  0;
3688
3689err_notifier:
3690	misc_deregister(&tun_miscdev);
3691err_misc:
3692	rtnl_link_unregister(&tun_link_ops);
3693err_linkops:
3694	return ret;
3695}
3696
3697static void tun_cleanup(void)
3698{
3699	misc_deregister(&tun_miscdev);
3700	rtnl_link_unregister(&tun_link_ops);
3701	unregister_netdevice_notifier(&tun_notifier_block);
3702}
3703
3704/* Get an underlying socket object from tun file.  Returns error unless file is
3705 * attached to a device.  The returned object works like a packet socket, it
3706 * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
3707 * holding a reference to the file for as long as the socket is in use. */
3708struct socket *tun_get_socket(struct file *file)
3709{
3710	struct tun_file *tfile;
3711	if (file->f_op != &tun_fops)
3712		return ERR_PTR(-EINVAL);
3713	tfile = file->private_data;
3714	if (!tfile)
3715		return ERR_PTR(-EBADFD);
3716	return &tfile->socket;
3717}
3718EXPORT_SYMBOL_GPL(tun_get_socket);
3719
3720struct ptr_ring *tun_get_tx_ring(struct file *file)
3721{
3722	struct tun_file *tfile;
3723
3724	if (file->f_op != &tun_fops)
3725		return ERR_PTR(-EINVAL);
3726	tfile = file->private_data;
3727	if (!tfile)
3728		return ERR_PTR(-EBADFD);
3729	return &tfile->tx_ring;
3730}
3731EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3732
3733module_init(tun_init);
3734module_exit(tun_cleanup);
3735MODULE_DESCRIPTION(DRV_DESCRIPTION);
3736MODULE_AUTHOR(DRV_COPYRIGHT);
3737MODULE_LICENSE("GPL");
3738MODULE_ALIAS_MISCDEV(TUN_MINOR);
3739MODULE_ALIAS("devname:net/tun");