Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v3.1
   1/*
   2 *  TUN - Universal TUN/TAP device driver.
   3 *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
   4 *
   5 *  This program is free software; you can redistribute it and/or modify
   6 *  it under the terms of the GNU General Public License as published by
   7 *  the Free Software Foundation; either version 2 of the License, or
   8 *  (at your option) any later version.
   9 *
  10 *  This program is distributed in the hope that it will be useful,
  11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13 *  GNU General Public License for more details.
  14 *
  15 *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
  16 */
  17
  18/*
  19 *  Changes:
  20 *
  21 *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
  22 *    Add TUNSETLINK ioctl to set the link encapsulation
  23 *
  24 *  Mark Smith <markzzzsmith@yahoo.com.au>
  25 *    Use random_ether_addr() for tap MAC address.
  26 *
  27 *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
  28 *    Fixes in packet dropping, queue length setting and queue wakeup.
  29 *    Increased default tx queue length.
  30 *    Added ethtool API.
  31 *    Minor cleanups
  32 *
  33 *  Daniel Podlejski <underley@underley.eu.org>
  34 *    Modifications for 2.3.99-pre5 kernel.
  35 */
  36
  37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38
  39#define DRV_NAME	"tun"
  40#define DRV_VERSION	"1.6"
  41#define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
  42#define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
  43
  44#include <linux/module.h>
  45#include <linux/errno.h>
  46#include <linux/kernel.h>
  47#include <linux/major.h>
  48#include <linux/slab.h>
  49#include <linux/poll.h>
  50#include <linux/fcntl.h>
  51#include <linux/init.h>
  52#include <linux/skbuff.h>
  53#include <linux/netdevice.h>
  54#include <linux/etherdevice.h>
  55#include <linux/miscdevice.h>
  56#include <linux/ethtool.h>
  57#include <linux/rtnetlink.h>
  58#include <linux/compat.h>
  59#include <linux/if.h>
  60#include <linux/if_arp.h>
  61#include <linux/if_ether.h>
  62#include <linux/if_tun.h>
 
  63#include <linux/crc32.h>
  64#include <linux/nsproxy.h>
  65#include <linux/virtio_net.h>
  66#include <linux/rcupdate.h>
  67#include <net/net_namespace.h>
  68#include <net/netns/generic.h>
  69#include <net/rtnetlink.h>
  70#include <net/sock.h>
 
  71
  72#include <asm/system.h>
  73#include <asm/uaccess.h>
  74
  75/* Uncomment to enable debugging */
  76/* #define TUN_DEBUG 1 */
  77
  78#ifdef TUN_DEBUG
  79static int debug;
  80
  81#define tun_debug(level, tun, fmt, args...)			\
  82do {								\
  83	if (tun->debug)						\
  84		netdev_printk(level, tun->dev, fmt, ##args);	\
  85} while (0)
  86#define DBG1(level, fmt, args...)				\
  87do {								\
  88	if (debug == 2)						\
  89		printk(level fmt, ##args);			\
  90} while (0)
  91#else
  92#define tun_debug(level, tun, fmt, args...)			\
  93do {								\
  94	if (0)							\
  95		netdev_printk(level, tun->dev, fmt, ##args);	\
  96} while (0)
  97#define DBG1(level, fmt, args...)				\
  98do {								\
  99	if (0)							\
 100		printk(level fmt, ##args);			\
 101} while (0)
 102#endif
 103
 
 
 104#define FLT_EXACT_COUNT 8
 105struct tap_filter {
 106	unsigned int    count;    /* Number of addrs. Zero means disabled */
 107	u32             mask[2];  /* Mask of the hashed addrs */
 108	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
 109};
 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111struct tun_file {
 112	atomic_t count;
 113	struct tun_struct *tun;
 
 
 114	struct net *net;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 115};
 116
 117struct tun_sock;
 118
 
 
 
 
 119struct tun_struct {
 120	struct tun_file		*tfile;
 
 121	unsigned int 		flags;
 122	uid_t			owner;
 123	gid_t			group;
 124
 125	struct net_device	*dev;
 126	u32			set_features;
 127#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
 128			  NETIF_F_TSO6|NETIF_F_UFO)
 129	struct fasync_struct	*fasync;
 130
 131	struct tap_filter       txflt;
 132	struct socket		socket;
 133	struct socket_wq	wq;
 134
 135	int			vnet_hdr_sz;
 136
 
 
 
 
 137#ifdef TUN_DEBUG
 138	int debug;
 139#endif
 
 
 
 
 
 
 
 
 140};
 141
 142struct tun_sock {
 143	struct sock		sk;
 144	struct tun_struct	*tun;
 145};
 146
 147static inline struct tun_sock *tun_sk(struct sock *sk)
 148{
 149	return container_of(sk, struct tun_sock, sk);
 
 
 
 
 
 
 150}
 151
 152static int tun_attach(struct tun_struct *tun, struct file *file)
 
 
 153{
 154	struct tun_file *tfile = file->private_data;
 155	int err;
 156
 157	ASSERT_RTNL();
 
 
 
 
 
 
 
 
 
 
 
 
 158
 159	netif_tx_lock_bh(tun->dev);
 
 
 
 
 
 
 
 
 160
 161	err = -EINVAL;
 162	if (tfile->tun)
 163		goto out;
 164
 165	err = -EBUSY;
 166	if (tun->tfile)
 167		goto out;
 
 168
 169	err = 0;
 170	tfile->tun = tun;
 171	tun->tfile = tfile;
 172	tun->socket.file = file;
 173	netif_carrier_on(tun->dev);
 174	dev_hold(tun->dev);
 175	sock_hold(tun->socket.sk);
 176	atomic_inc(&tfile->count);
 177
 178out:
 179	netif_tx_unlock_bh(tun->dev);
 180	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 181}
 182
 183static void __tun_detach(struct tun_struct *tun)
 184{
 185	/* Detach from net device */
 186	netif_tx_lock_bh(tun->dev);
 187	netif_carrier_off(tun->dev);
 188	tun->tfile = NULL;
 189	tun->socket.file = NULL;
 190	netif_tx_unlock_bh(tun->dev);
 191
 192	/* Drop read queue */
 193	skb_queue_purge(&tun->socket.sk->sk_receive_queue);
 194
 195	/* Drop the extra count on the net device */
 196	dev_put(tun->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 197}
 198
 199static void tun_detach(struct tun_struct *tun)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 200{
 201	rtnl_lock();
 202	__tun_detach(tun);
 203	rtnl_unlock();
 204}
 205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 206static struct tun_struct *__tun_get(struct tun_file *tfile)
 207{
 208	struct tun_struct *tun = NULL;
 209
 210	if (atomic_inc_not_zero(&tfile->count))
 211		tun = tfile->tun;
 
 
 
 212
 213	return tun;
 214}
 215
 216static struct tun_struct *tun_get(struct file *file)
 217{
 218	return __tun_get(file->private_data);
 219}
 220
 221static void tun_put(struct tun_struct *tun)
 222{
 223	struct tun_file *tfile = tun->tfile;
 224
 225	if (atomic_dec_and_test(&tfile->count))
 226		tun_detach(tfile->tun);
 227}
 228
 229/* TAP filtering */
 230static void addr_hash_set(u32 *mask, const u8 *addr)
 231{
 232	int n = ether_crc(ETH_ALEN, addr) >> 26;
 233	mask[n >> 5] |= (1 << (n & 31));
 234}
 235
 236static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
 237{
 238	int n = ether_crc(ETH_ALEN, addr) >> 26;
 239	return mask[n >> 5] & (1 << (n & 31));
 240}
 241
 242static int update_filter(struct tap_filter *filter, void __user *arg)
 243{
 244	struct { u8 u[ETH_ALEN]; } *addr;
 245	struct tun_filter uf;
 246	int err, alen, n, nexact;
 247
 248	if (copy_from_user(&uf, arg, sizeof(uf)))
 249		return -EFAULT;
 250
 251	if (!uf.count) {
 252		/* Disabled */
 253		filter->count = 0;
 254		return 0;
 255	}
 256
 257	alen = ETH_ALEN * uf.count;
 258	addr = kmalloc(alen, GFP_KERNEL);
 259	if (!addr)
 260		return -ENOMEM;
 261
 262	if (copy_from_user(addr, arg + sizeof(uf), alen)) {
 263		err = -EFAULT;
 264		goto done;
 265	}
 266
 267	/* The filter is updated without holding any locks. Which is
 268	 * perfectly safe. We disable it first and in the worst
 269	 * case we'll accept a few undesired packets. */
 270	filter->count = 0;
 271	wmb();
 272
 273	/* Use first set of addresses as an exact filter */
 274	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
 275		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
 276
 277	nexact = n;
 278
 279	/* Remaining multicast addresses are hashed,
 280	 * unicast will leave the filter disabled. */
 281	memset(filter->mask, 0, sizeof(filter->mask));
 282	for (; n < uf.count; n++) {
 283		if (!is_multicast_ether_addr(addr[n].u)) {
 284			err = 0; /* no filter */
 285			goto done;
 286		}
 287		addr_hash_set(filter->mask, addr[n].u);
 288	}
 289
 290	/* For ALLMULTI just set the mask to all ones.
 291	 * This overrides the mask populated above. */
 292	if ((uf.flags & TUN_FLT_ALLMULTI))
 293		memset(filter->mask, ~0, sizeof(filter->mask));
 294
 295	/* Now enable the filter */
 296	wmb();
 297	filter->count = nexact;
 298
 299	/* Return the number of exact filters */
 300	err = nexact;
 301
 302done:
 303	kfree(addr);
 304	return err;
 305}
 306
 307/* Returns: 0 - drop, !=0 - accept */
 308static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
 309{
 310	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
 311	 * at this point. */
 312	struct ethhdr *eh = (struct ethhdr *) skb->data;
 313	int i;
 314
 315	/* Exact match */
 316	for (i = 0; i < filter->count; i++)
 317		if (!compare_ether_addr(eh->h_dest, filter->addr[i]))
 318			return 1;
 319
 320	/* Inexact match (multicast only) */
 321	if (is_multicast_ether_addr(eh->h_dest))
 322		return addr_hash_test(filter->mask, eh->h_dest);
 323
 324	return 0;
 325}
 326
 327/*
 328 * Checks whether the packet is accepted or not.
 329 * Returns: 0 - drop, !=0 - accept
 330 */
 331static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
 332{
 333	if (!filter->count)
 334		return 1;
 335
 336	return run_filter(filter, skb);
 337}
 338
 339/* Network device part of the driver */
 340
 341static const struct ethtool_ops tun_ethtool_ops;
 342
 343/* Net device detach from fd. */
 344static void tun_net_uninit(struct net_device *dev)
 345{
 346	struct tun_struct *tun = netdev_priv(dev);
 347	struct tun_file *tfile = tun->tfile;
 348
 349	/* Inform the methods they need to stop using the dev.
 350	 */
 351	if (tfile) {
 352		wake_up_all(&tun->wq.wait);
 353		if (atomic_dec_and_test(&tfile->count))
 354			__tun_detach(tun);
 355	}
 356}
 357
 358static void tun_free_netdev(struct net_device *dev)
 359{
 360	struct tun_struct *tun = netdev_priv(dev);
 361
 362	sock_put(tun->socket.sk);
 363}
 364
 365/* Net device open. */
 366static int tun_net_open(struct net_device *dev)
 367{
 368	netif_start_queue(dev);
 369	return 0;
 370}
 371
 372/* Net device close. */
 373static int tun_net_close(struct net_device *dev)
 374{
 375	netif_stop_queue(dev);
 376	return 0;
 377}
 378
 379/* Net device start xmit */
 380static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 381{
 382	struct tun_struct *tun = netdev_priv(dev);
 
 
 
 383
 384	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
 
 
 385
 386	/* Drop packet if interface is not attached */
 387	if (!tun->tfile)
 388		goto drop;
 389
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 390	/* Drop if the filter does not like it.
 391	 * This is a noop if the filter is disabled.
 392	 * Filter can be enabled only for the TAP devices. */
 393	if (!check_filter(&tun->txflt, skb))
 394		goto drop;
 395
 396	if (tun->socket.sk->sk_filter &&
 397	    sk_filter(tun->socket.sk, skb))
 398		goto drop;
 399
 400	if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) {
 401		if (!(tun->flags & TUN_ONE_QUEUE)) {
 402			/* Normal queueing mode. */
 403			/* Packet scheduler handles dropping of further packets. */
 404			netif_stop_queue(dev);
 405
 406			/* We won't see all dropped packets individually, so overrun
 407			 * error is more appropriate. */
 408			dev->stats.tx_fifo_errors++;
 409		} else {
 410			/* Single queue mode.
 411			 * Driver handles dropping of all packets itself. */
 412			goto drop;
 413		}
 414	}
 415
 416	/* Orphan the skb - required as we might hang on to it
 417	 * for indefinite time. */
 
 418	skb_orphan(skb);
 419
 
 
 420	/* Enqueue packet */
 421	skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
 422
 423	/* Notify and wake up reader process */
 424	if (tun->flags & TUN_FASYNC)
 425		kill_fasync(&tun->fasync, SIGIO, POLL_IN);
 426	wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
 427				   POLLRDNORM | POLLRDBAND);
 
 
 428	return NETDEV_TX_OK;
 429
 430drop:
 431	dev->stats.tx_dropped++;
 
 432	kfree_skb(skb);
 
 433	return NETDEV_TX_OK;
 434}
 435
 436static void tun_net_mclist(struct net_device *dev)
 437{
 438	/*
 439	 * This callback is supposed to deal with mc filter in
 440	 * _rx_ path and has nothing to do with the _tx_ path.
 441	 * In rx path we always accept everything userspace gives us.
 442	 */
 443}
 444
 445#define MIN_MTU 68
 446#define MAX_MTU 65535
 447
 448static int
 449tun_net_change_mtu(struct net_device *dev, int new_mtu)
 450{
 451	if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
 452		return -EINVAL;
 453	dev->mtu = new_mtu;
 454	return 0;
 455}
 456
 457static u32 tun_net_fix_features(struct net_device *dev, u32 features)
 
 458{
 459	struct tun_struct *tun = netdev_priv(dev);
 460
 461	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
 462}
 463#ifdef CONFIG_NET_POLL_CONTROLLER
 464static void tun_poll_controller(struct net_device *dev)
 465{
 466	/*
 467	 * Tun only receives frames when:
 468	 * 1) the char device endpoint gets data from user space
 469	 * 2) the tun socket gets a sendmsg call from user space
 470	 * Since both of those are syncronous operations, we are guaranteed
 471	 * never to have pending data when we poll for it
 472	 * so theres nothing to do here but return.
 473	 * We need this though so netpoll recognizes us as an interface that
 474	 * supports polling, which enables bridge devices in virt setups to
 475	 * still use netconsole
 476	 */
 477	return;
 478}
 479#endif
 480static const struct net_device_ops tun_netdev_ops = {
 481	.ndo_uninit		= tun_net_uninit,
 482	.ndo_open		= tun_net_open,
 483	.ndo_stop		= tun_net_close,
 484	.ndo_start_xmit		= tun_net_xmit,
 485	.ndo_change_mtu		= tun_net_change_mtu,
 486	.ndo_fix_features	= tun_net_fix_features,
 
 487#ifdef CONFIG_NET_POLL_CONTROLLER
 488	.ndo_poll_controller	= tun_poll_controller,
 489#endif
 490};
 491
 492static const struct net_device_ops tap_netdev_ops = {
 493	.ndo_uninit		= tun_net_uninit,
 494	.ndo_open		= tun_net_open,
 495	.ndo_stop		= tun_net_close,
 496	.ndo_start_xmit		= tun_net_xmit,
 497	.ndo_change_mtu		= tun_net_change_mtu,
 498	.ndo_fix_features	= tun_net_fix_features,
 499	.ndo_set_multicast_list	= tun_net_mclist,
 500	.ndo_set_mac_address	= eth_mac_addr,
 501	.ndo_validate_addr	= eth_validate_addr,
 
 502#ifdef CONFIG_NET_POLL_CONTROLLER
 503	.ndo_poll_controller	= tun_poll_controller,
 504#endif
 505};
 506
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507/* Initialize net device. */
 508static void tun_net_init(struct net_device *dev)
 509{
 510	struct tun_struct *tun = netdev_priv(dev);
 511
 512	switch (tun->flags & TUN_TYPE_MASK) {
 513	case TUN_TUN_DEV:
 514		dev->netdev_ops = &tun_netdev_ops;
 515
 516		/* Point-to-Point TUN Device */
 517		dev->hard_header_len = 0;
 518		dev->addr_len = 0;
 519		dev->mtu = 1500;
 520
 521		/* Zero header length */
 522		dev->type = ARPHRD_NONE;
 523		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
 524		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
 525		break;
 526
 527	case TUN_TAP_DEV:
 528		dev->netdev_ops = &tap_netdev_ops;
 529		/* Ethernet TAP Device */
 530		ether_setup(dev);
 531		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 
 532
 533		random_ether_addr(dev->dev_addr);
 534
 535		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
 536		break;
 537	}
 538}
 539
 540/* Character device part */
 541
 542/* Poll */
 543static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
 544{
 545	struct tun_file *tfile = file->private_data;
 546	struct tun_struct *tun = __tun_get(tfile);
 547	struct sock *sk;
 548	unsigned int mask = 0;
 549
 550	if (!tun)
 551		return POLLERR;
 552
 553	sk = tun->socket.sk;
 554
 555	tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
 556
 557	poll_wait(file, &tun->wq.wait, wait);
 558
 559	if (!skb_queue_empty(&sk->sk_receive_queue))
 560		mask |= POLLIN | POLLRDNORM;
 561
 562	if (sock_writeable(sk) ||
 563	    (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
 564	     sock_writeable(sk)))
 565		mask |= POLLOUT | POLLWRNORM;
 566
 567	if (tun->dev->reg_state != NETREG_REGISTERED)
 568		mask = POLLERR;
 569
 570	tun_put(tun);
 571	return mask;
 572}
 573
 574/* prepad is the amount to reserve at front.  len is length after that.
 575 * linear is a hint as to how much to copy (usually headers). */
 576static struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
 577				     size_t prepad, size_t len,
 578				     size_t linear, int noblock)
 579{
 580	struct sock *sk = tun->socket.sk;
 581	struct sk_buff *skb;
 582	int err;
 583
 584	sock_update_classid(sk);
 585
 586	/* Under a page?  Don't bother with paged skb. */
 587	if (prepad + len < PAGE_SIZE || !linear)
 588		linear = len;
 589
 590	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
 591				   &err);
 592	if (!skb)
 593		return ERR_PTR(err);
 594
 595	skb_reserve(skb, prepad);
 596	skb_put(skb, linear);
 597	skb->data_len = len - linear;
 598	skb->len += len - linear;
 599
 600	return skb;
 601}
 602
 603/* Get packet from user space buffer */
 604static ssize_t tun_get_user(struct tun_struct *tun,
 605			    const struct iovec *iv, size_t count,
 606			    int noblock)
 607{
 608	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
 609	struct sk_buff *skb;
 610	size_t len = count, align = NET_SKB_PAD;
 611	struct virtio_net_hdr gso = { 0 };
 
 612	int offset = 0;
 
 
 
 
 613
 614	if (!(tun->flags & TUN_NO_PI)) {
 615		if ((len -= sizeof(pi)) > count)
 616			return -EINVAL;
 
 617
 618		if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
 619			return -EFAULT;
 620		offset += sizeof(pi);
 621	}
 622
 623	if (tun->flags & TUN_VNET_HDR) {
 624		if ((len -= tun->vnet_hdr_sz) > count)
 625			return -EINVAL;
 
 626
 627		if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
 628			return -EFAULT;
 629
 630		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
 631		    gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
 632			gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
 633
 634		if (gso.hdr_len > len)
 635			return -EINVAL;
 636		offset += tun->vnet_hdr_sz;
 637	}
 638
 639	if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
 640		align += NET_IP_ALIGN;
 641		if (unlikely(len < ETH_HLEN ||
 642			     (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
 643			return -EINVAL;
 644	}
 645
 646	skb = tun_alloc_skb(tun, align, len, gso.hdr_len, noblock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 647	if (IS_ERR(skb)) {
 648		if (PTR_ERR(skb) != -EAGAIN)
 649			tun->dev->stats.rx_dropped++;
 650		return PTR_ERR(skb);
 651	}
 652
 653	if (skb_copy_datagram_from_iovec(skb, 0, iv, offset, len)) {
 
 
 
 
 
 
 
 
 
 
 654		tun->dev->stats.rx_dropped++;
 655		kfree_skb(skb);
 656		return -EFAULT;
 657	}
 658
 659	if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
 660		if (!skb_partial_csum_set(skb, gso.csum_start,
 661					  gso.csum_offset)) {
 662			tun->dev->stats.rx_frame_errors++;
 663			kfree_skb(skb);
 664			return -EINVAL;
 665		}
 666	}
 667
 668	switch (tun->flags & TUN_TYPE_MASK) {
 669	case TUN_TUN_DEV:
 670		if (tun->flags & TUN_NO_PI) {
 671			switch (skb->data[0] & 0xf0) {
 672			case 0x40:
 673				pi.proto = htons(ETH_P_IP);
 674				break;
 675			case 0x60:
 676				pi.proto = htons(ETH_P_IPV6);
 677				break;
 678			default:
 679				tun->dev->stats.rx_dropped++;
 680				kfree_skb(skb);
 681				return -EINVAL;
 682			}
 683		}
 684
 685		skb_reset_mac_header(skb);
 686		skb->protocol = pi.proto;
 687		skb->dev = tun->dev;
 688		break;
 689	case TUN_TAP_DEV:
 690		skb->protocol = eth_type_trans(skb, tun->dev);
 691		break;
 692	}
 693
 694	if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
 695		pr_debug("GSO!\n");
 696		switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
 697		case VIRTIO_NET_HDR_GSO_TCPV4:
 698			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
 699			break;
 700		case VIRTIO_NET_HDR_GSO_TCPV6:
 701			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
 702			break;
 703		case VIRTIO_NET_HDR_GSO_UDP:
 704			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
 705			break;
 706		default:
 707			tun->dev->stats.rx_frame_errors++;
 708			kfree_skb(skb);
 709			return -EINVAL;
 710		}
 711
 712		if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
 713			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
 714
 715		skb_shinfo(skb)->gso_size = gso.gso_size;
 716		if (skb_shinfo(skb)->gso_size == 0) {
 717			tun->dev->stats.rx_frame_errors++;
 718			kfree_skb(skb);
 719			return -EINVAL;
 720		}
 721
 722		/* Header must be checked, and gso_segs computed. */
 723		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
 724		skb_shinfo(skb)->gso_segs = 0;
 725	}
 726
 
 
 
 
 
 
 
 
 
 
 
 727	netif_rx_ni(skb);
 728
 729	tun->dev->stats.rx_packets++;
 730	tun->dev->stats.rx_bytes += len;
 731
 732	return count;
 
 733}
 734
 735static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
 736			      unsigned long count, loff_t pos)
 737{
 738	struct file *file = iocb->ki_filp;
 739	struct tun_struct *tun = tun_get(file);
 
 740	ssize_t result;
 741
 742	if (!tun)
 743		return -EBADFD;
 744
 745	tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
 746
 747	result = tun_get_user(tun, iv, iov_length(iv, count),
 748			      file->f_flags & O_NONBLOCK);
 749
 750	tun_put(tun);
 751	return result;
 752}
 753
 754/* Put packet to the user space buffer */
 755static ssize_t tun_put_user(struct tun_struct *tun,
 
 756			    struct sk_buff *skb,
 757			    const struct iovec *iv, int len)
 758{
 759	struct tun_pi pi = { 0, skb->protocol };
 760	ssize_t total = 0;
 
 761
 762	if (!(tun->flags & TUN_NO_PI)) {
 763		if ((len -= sizeof(pi)) < 0)
 764			return -EINVAL;
 765
 766		if (len < skb->len) {
 767			/* Packet will be striped */
 768			pi.flags |= TUN_PKT_STRIP;
 769		}
 770
 771		if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
 772			return -EFAULT;
 773		total += sizeof(pi);
 774	}
 775
 776	if (tun->flags & TUN_VNET_HDR) {
 777		struct virtio_net_hdr gso = { 0 }; /* no info leak */
 778		if ((len -= tun->vnet_hdr_sz) < 0)
 779			return -EINVAL;
 780
 781		if (skb_is_gso(skb)) {
 782			struct skb_shared_info *sinfo = skb_shinfo(skb);
 783
 784			/* This is a hint as to how much should be linear. */
 785			gso.hdr_len = skb_headlen(skb);
 786			gso.gso_size = sinfo->gso_size;
 787			if (sinfo->gso_type & SKB_GSO_TCPV4)
 788				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
 789			else if (sinfo->gso_type & SKB_GSO_TCPV6)
 790				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
 791			else if (sinfo->gso_type & SKB_GSO_UDP)
 792				gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
 793			else {
 794				pr_err("unexpected GSO type: "
 795				       "0x%x, gso_size %d, hdr_len %d\n",
 796				       sinfo->gso_type, gso.gso_size,
 797				       gso.hdr_len);
 798				print_hex_dump(KERN_ERR, "tun: ",
 799					       DUMP_PREFIX_NONE,
 800					       16, 1, skb->head,
 801					       min((int)gso.hdr_len, 64), true);
 802				WARN_ON_ONCE(1);
 803				return -EINVAL;
 804			}
 805			if (sinfo->gso_type & SKB_GSO_TCP_ECN)
 806				gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
 807		} else
 808			gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
 809
 810		if (skb->ip_summed == CHECKSUM_PARTIAL) {
 811			gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
 812			gso.csum_start = skb_checksum_start_offset(skb);
 813			gso.csum_offset = skb->csum_offset;
 814		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
 815			gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
 816		} /* else everything is zero */
 817
 818		if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
 819					       sizeof(gso))))
 820			return -EFAULT;
 821		total += tun->vnet_hdr_sz;
 822	}
 823
 824	len = min_t(int, skb->len, len);
 825
 826	skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
 827	total += skb->len;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 828
 
 
 
 
 
 
 
 
 
 
 
 829	tun->dev->stats.tx_packets++;
 830	tun->dev->stats.tx_bytes += len;
 831
 832	return total;
 833}
 834
 835static ssize_t tun_do_read(struct tun_struct *tun,
 836			   struct kiocb *iocb, const struct iovec *iv,
 837			   ssize_t len, int noblock)
 838{
 839	DECLARE_WAITQUEUE(wait, current);
 840	struct sk_buff *skb;
 841	ssize_t ret = 0;
 842
 843	tun_debug(KERN_INFO, tun, "tun_chr_read\n");
 844
 845	if (unlikely(!noblock))
 846		add_wait_queue(&tun->wq.wait, &wait);
 847	while (len) {
 848		current->state = TASK_INTERRUPTIBLE;
 
 849
 850		/* Read frames from the queue */
 851		if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
 852			if (noblock) {
 853				ret = -EAGAIN;
 854				break;
 855			}
 856			if (signal_pending(current)) {
 857				ret = -ERESTARTSYS;
 858				break;
 859			}
 860			if (tun->dev->reg_state != NETREG_REGISTERED) {
 861				ret = -EIO;
 862				break;
 863			}
 864
 865			/* Nothing to read, let's sleep */
 866			schedule();
 867			continue;
 868		}
 869		netif_wake_queue(tun->dev);
 870
 871		ret = tun_put_user(tun, skb, iv, len);
 872		kfree_skb(skb);
 873		break;
 874	}
 875
 876	current->state = TASK_RUNNING;
 877	if (unlikely(!noblock))
 878		remove_wait_queue(&tun->wq.wait, &wait);
 
 879
 880	return ret;
 881}
 882
 883static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
 884			    unsigned long count, loff_t pos)
 885{
 886	struct file *file = iocb->ki_filp;
 887	struct tun_file *tfile = file->private_data;
 888	struct tun_struct *tun = __tun_get(tfile);
 889	ssize_t len, ret;
 890
 891	if (!tun)
 892		return -EBADFD;
 893	len = iov_length(iv, count);
 894	if (len < 0) {
 895		ret = -EINVAL;
 896		goto out;
 897	}
 898
 899	ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
 
 900	ret = min_t(ssize_t, ret, len);
 
 
 901out:
 902	tun_put(tun);
 903	return ret;
 904}
 905
 
 
 
 
 
 
 
 
 
 
 906static void tun_setup(struct net_device *dev)
 907{
 908	struct tun_struct *tun = netdev_priv(dev);
 909
 910	tun->owner = -1;
 911	tun->group = -1;
 912
 913	dev->ethtool_ops = &tun_ethtool_ops;
 914	dev->destructor = tun_free_netdev;
 915}
 916
 917/* Trivial set of netlink ops to allow deleting tun or tap
 918 * device with netlink.
 919 */
 920static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
 921{
 922	return -EINVAL;
 923}
 924
 925static struct rtnl_link_ops tun_link_ops __read_mostly = {
 926	.kind		= DRV_NAME,
 927	.priv_size	= sizeof(struct tun_struct),
 928	.setup		= tun_setup,
 929	.validate	= tun_validate,
 930};
 931
 932static void tun_sock_write_space(struct sock *sk)
 933{
 934	struct tun_struct *tun;
 935	wait_queue_head_t *wqueue;
 936
 937	if (!sock_writeable(sk))
 938		return;
 939
 940	if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
 941		return;
 942
 943	wqueue = sk_sleep(sk);
 944	if (wqueue && waitqueue_active(wqueue))
 945		wake_up_interruptible_sync_poll(wqueue, POLLOUT |
 946						POLLWRNORM | POLLWRBAND);
 947
 948	tun = tun_sk(sk)->tun;
 949	kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
 950}
 951
 952static void tun_sock_destruct(struct sock *sk)
 953{
 954	free_netdev(tun_sk(sk)->tun->dev);
 955}
 956
 957static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
 958		       struct msghdr *m, size_t total_len)
 959{
 960	struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
 961	return tun_get_user(tun, m->msg_iov, total_len,
 962			    m->msg_flags & MSG_DONTWAIT);
 
 
 
 
 
 
 
 963}
 964
 965static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
 966		       struct msghdr *m, size_t total_len,
 967		       int flags)
 968{
 969	struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
 
 970	int ret;
 971	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
 972		return -EINVAL;
 973	ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
 
 
 
 
 
 
 
 
 
 
 
 974			  flags & MSG_DONTWAIT);
 975	if (ret > total_len) {
 976		m->msg_flags |= MSG_TRUNC;
 977		ret = flags & MSG_TRUNC ? ret : total_len;
 978	}
 
 
 979	return ret;
 980}
 981
 
 
 
 
 
 
 
 982/* Ops structure to mimic raw sockets with tun */
 983static const struct proto_ops tun_socket_ops = {
 984	.sendmsg = tun_sendmsg,
 985	.recvmsg = tun_recvmsg,
 
 986};
 987
 988static struct proto tun_proto = {
 989	.name		= "tun",
 990	.owner		= THIS_MODULE,
 991	.obj_size	= sizeof(struct tun_sock),
 992};
 993
 994static int tun_flags(struct tun_struct *tun)
 995{
 996	int flags = 0;
 997
 998	if (tun->flags & TUN_TUN_DEV)
 999		flags |= IFF_TUN;
1000	else
1001		flags |= IFF_TAP;
1002
1003	if (tun->flags & TUN_NO_PI)
1004		flags |= IFF_NO_PI;
1005
 
 
 
1006	if (tun->flags & TUN_ONE_QUEUE)
1007		flags |= IFF_ONE_QUEUE;
1008
1009	if (tun->flags & TUN_VNET_HDR)
1010		flags |= IFF_VNET_HDR;
1011
 
 
 
 
 
 
1012	return flags;
1013}
1014
1015static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
1016			      char *buf)
1017{
1018	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1019	return sprintf(buf, "0x%x\n", tun_flags(tun));
1020}
1021
1022static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
1023			      char *buf)
1024{
1025	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1026	return sprintf(buf, "%d\n", tun->owner);
 
 
 
1027}
1028
1029static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
1030			      char *buf)
1031{
1032	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1033	return sprintf(buf, "%d\n", tun->group);
 
 
 
1034}
1035
1036static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
1037static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
1038static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
1039
1040static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1041{
1042	struct sock *sk;
1043	struct tun_struct *tun;
 
1044	struct net_device *dev;
1045	int err;
1046
 
 
 
1047	dev = __dev_get_by_name(net, ifr->ifr_name);
1048	if (dev) {
1049		const struct cred *cred = current_cred();
1050
1051		if (ifr->ifr_flags & IFF_TUN_EXCL)
1052			return -EBUSY;
1053		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
1054			tun = netdev_priv(dev);
1055		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
1056			tun = netdev_priv(dev);
1057		else
1058			return -EINVAL;
1059
1060		if (((tun->owner != -1 && cred->euid != tun->owner) ||
1061		     (tun->group != -1 && !in_egroup_p(tun->group))) &&
1062		    !capable(CAP_NET_ADMIN))
 
 
1063			return -EPERM;
1064		err = security_tun_dev_attach(tun->socket.sk);
1065		if (err < 0)
1066			return err;
1067
1068		err = tun_attach(tun, file);
1069		if (err < 0)
1070			return err;
 
 
 
 
 
 
 
 
1071	}
1072	else {
1073		char *name;
1074		unsigned long flags = 0;
 
 
1075
1076		if (!capable(CAP_NET_ADMIN))
1077			return -EPERM;
1078		err = security_tun_dev_create();
1079		if (err < 0)
1080			return err;
1081
1082		/* Set dev type */
1083		if (ifr->ifr_flags & IFF_TUN) {
1084			/* TUN device */
1085			flags |= TUN_TUN_DEV;
1086			name = "tun%d";
1087		} else if (ifr->ifr_flags & IFF_TAP) {
1088			/* TAP device */
1089			flags |= TUN_TAP_DEV;
1090			name = "tap%d";
1091		} else
1092			return -EINVAL;
1093
1094		if (*ifr->ifr_name)
1095			name = ifr->ifr_name;
1096
1097		dev = alloc_netdev(sizeof(struct tun_struct), name,
1098				   tun_setup);
 
1099		if (!dev)
1100			return -ENOMEM;
1101
1102		dev_net_set(dev, net);
1103		dev->rtnl_link_ops = &tun_link_ops;
 
1104
1105		tun = netdev_priv(dev);
1106		tun->dev = dev;
1107		tun->flags = flags;
1108		tun->txflt.count = 0;
1109		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
1110
1111		err = -ENOMEM;
1112		sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
1113		if (!sk)
1114			goto err_free_dev;
1115
1116		tun->socket.wq = &tun->wq;
1117		init_waitqueue_head(&tun->wq.wait);
1118		tun->socket.ops = &tun_socket_ops;
1119		sock_init_data(&tun->socket, sk);
1120		sk->sk_write_space = tun_sock_write_space;
1121		sk->sk_sndbuf = INT_MAX;
1122
1123		tun_sk(sk)->tun = tun;
1124
1125		security_tun_dev_post_create(sk);
 
 
1126
1127		tun_net_init(dev);
 
1128
1129		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1130			TUN_USER_FEATURES;
 
1131		dev->features = dev->hw_features;
 
 
 
 
 
 
 
 
1132
1133		err = register_netdevice(tun->dev);
1134		if (err < 0)
1135			goto err_free_sk;
1136
1137		if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1138		    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1139		    device_create_file(&tun->dev->dev, &dev_attr_group))
1140			pr_err("Failed to create tun sysfs files\n");
1141
1142		sk->sk_destruct = tun_sock_destruct;
1143
1144		err = tun_attach(tun, file);
1145		if (err < 0)
1146			goto failed;
1147	}
1148
 
 
1149	tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1150
1151	if (ifr->ifr_flags & IFF_NO_PI)
1152		tun->flags |= TUN_NO_PI;
1153	else
1154		tun->flags &= ~TUN_NO_PI;
1155
 
 
 
1156	if (ifr->ifr_flags & IFF_ONE_QUEUE)
1157		tun->flags |= TUN_ONE_QUEUE;
1158	else
1159		tun->flags &= ~TUN_ONE_QUEUE;
1160
1161	if (ifr->ifr_flags & IFF_VNET_HDR)
1162		tun->flags |= TUN_VNET_HDR;
1163	else
1164		tun->flags &= ~TUN_VNET_HDR;
1165
 
 
 
 
 
1166	/* Make sure persistent devices do not get stuck in
1167	 * xoff state.
1168	 */
1169	if (netif_running(tun->dev))
1170		netif_wake_queue(tun->dev);
1171
1172	strcpy(ifr->ifr_name, tun->dev->name);
1173	return 0;
1174
1175 err_free_sk:
1176	sock_put(sk);
1177 err_free_dev:
 
 
 
1178	free_netdev(dev);
1179 failed:
1180	return err;
1181}
1182
1183static int tun_get_iff(struct net *net, struct tun_struct *tun,
1184		       struct ifreq *ifr)
1185{
1186	tun_debug(KERN_INFO, tun, "tun_get_iff\n");
1187
1188	strcpy(ifr->ifr_name, tun->dev->name);
1189
1190	ifr->ifr_flags = tun_flags(tun);
1191
1192	return 0;
1193}
1194
1195/* This is like a cut-down ethtool ops, except done via tun fd so no
1196 * privs required. */
1197static int set_offload(struct tun_struct *tun, unsigned long arg)
1198{
1199	u32 features = 0;
1200
1201	if (arg & TUN_F_CSUM) {
1202		features |= NETIF_F_HW_CSUM;
1203		arg &= ~TUN_F_CSUM;
1204
1205		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1206			if (arg & TUN_F_TSO_ECN) {
1207				features |= NETIF_F_TSO_ECN;
1208				arg &= ~TUN_F_TSO_ECN;
1209			}
1210			if (arg & TUN_F_TSO4)
1211				features |= NETIF_F_TSO;
1212			if (arg & TUN_F_TSO6)
1213				features |= NETIF_F_TSO6;
1214			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1215		}
1216
1217		if (arg & TUN_F_UFO) {
1218			features |= NETIF_F_UFO;
1219			arg &= ~TUN_F_UFO;
1220		}
1221	}
1222
1223	/* This gives the user a way to test for new features in future by
1224	 * trying to set them. */
1225	if (arg)
1226		return -EINVAL;
1227
1228	tun->set_features = features;
1229	netdev_update_features(tun->dev);
1230
1231	return 0;
1232}
1233
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1234static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1235			    unsigned long arg, int ifreq_len)
1236{
1237	struct tun_file *tfile = file->private_data;
1238	struct tun_struct *tun;
1239	void __user* argp = (void __user*)arg;
1240	struct sock_fprog fprog;
1241	struct ifreq ifr;
 
 
1242	int sndbuf;
1243	int vnet_hdr_sz;
 
1244	int ret;
1245
1246	if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
1247		if (copy_from_user(&ifr, argp, ifreq_len))
1248			return -EFAULT;
1249
 
 
1250	if (cmd == TUNGETFEATURES) {
1251		/* Currently this just means: "what IFF flags are valid?".
1252		 * This is needed because we never checked for invalid flags on
1253		 * TUNSETIFF. */
1254		return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
1255				IFF_VNET_HDR,
1256				(unsigned int __user*)argp);
1257	}
 
1258
 
1259	rtnl_lock();
1260
1261	tun = __tun_get(tfile);
1262	if (cmd == TUNSETIFF && !tun) {
1263		ifr.ifr_name[IFNAMSIZ-1] = '\0';
1264
1265		ret = tun_set_iff(tfile->net, file, &ifr);
1266
1267		if (ret)
1268			goto unlock;
1269
1270		if (copy_to_user(argp, &ifr, ifreq_len))
1271			ret = -EFAULT;
1272		goto unlock;
1273	}
 
 
 
 
 
 
 
 
 
 
 
 
 
1274
1275	ret = -EBADFD;
1276	if (!tun)
1277		goto unlock;
1278
1279	tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
1280
1281	ret = 0;
1282	switch (cmd) {
1283	case TUNGETIFF:
1284		ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
1285		if (ret)
1286			break;
 
 
 
1287
1288		if (copy_to_user(argp, &ifr, ifreq_len))
1289			ret = -EFAULT;
1290		break;
1291
1292	case TUNSETNOCSUM:
1293		/* Disable/Enable checksum */
1294
1295		/* [unimplemented] */
1296		tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
1297			  arg ? "disabled" : "enabled");
1298		break;
1299
1300	case TUNSETPERSIST:
1301		/* Disable/Enable persist mode */
1302		if (arg)
 
 
1303			tun->flags |= TUN_PERSIST;
1304		else
 
 
1305			tun->flags &= ~TUN_PERSIST;
 
 
1306
1307		tun_debug(KERN_INFO, tun, "persist %s\n",
1308			  arg ? "enabled" : "disabled");
1309		break;
1310
1311	case TUNSETOWNER:
1312		/* Set owner of the device */
1313		tun->owner = (uid_t) arg;
1314
1315		tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
 
 
 
 
 
1316		break;
1317
1318	case TUNSETGROUP:
1319		/* Set group of the device */
1320		tun->group= (gid_t) arg;
1321
1322		tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
 
 
 
 
 
1323		break;
1324
1325	case TUNSETLINK:
1326		/* Only allow setting the type when the interface is down */
1327		if (tun->dev->flags & IFF_UP) {
1328			tun_debug(KERN_INFO, tun,
1329				  "Linktype set failed because interface is up\n");
1330			ret = -EBUSY;
1331		} else {
1332			tun->dev->type = (int) arg;
1333			tun_debug(KERN_INFO, tun, "linktype set to %d\n",
1334				  tun->dev->type);
1335			ret = 0;
1336		}
1337		break;
1338
1339#ifdef TUN_DEBUG
1340	case TUNSETDEBUG:
1341		tun->debug = arg;
1342		break;
1343#endif
1344	case TUNSETOFFLOAD:
1345		ret = set_offload(tun, arg);
1346		break;
1347
1348	case TUNSETTXFILTER:
1349		/* Can be set only for TAPs */
1350		ret = -EINVAL;
1351		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1352			break;
1353		ret = update_filter(&tun->txflt, (void __user *)arg);
1354		break;
1355
1356	case SIOCGIFHWADDR:
1357		/* Get hw address */
1358		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
1359		ifr.ifr_hwaddr.sa_family = tun->dev->type;
1360		if (copy_to_user(argp, &ifr, ifreq_len))
1361			ret = -EFAULT;
1362		break;
1363
1364	case SIOCSIFHWADDR:
1365		/* Set hw address */
1366		tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
1367			  ifr.ifr_hwaddr.sa_data);
1368
1369		ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
1370		break;
1371
1372	case TUNGETSNDBUF:
1373		sndbuf = tun->socket.sk->sk_sndbuf;
1374		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
1375			ret = -EFAULT;
1376		break;
1377
1378	case TUNSETSNDBUF:
1379		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
1380			ret = -EFAULT;
1381			break;
1382		}
1383
1384		tun->socket.sk->sk_sndbuf = sndbuf;
 
1385		break;
1386
1387	case TUNGETVNETHDRSZ:
1388		vnet_hdr_sz = tun->vnet_hdr_sz;
1389		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
1390			ret = -EFAULT;
1391		break;
1392
1393	case TUNSETVNETHDRSZ:
1394		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
1395			ret = -EFAULT;
1396			break;
1397		}
1398		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
1399			ret = -EINVAL;
1400			break;
1401		}
1402
1403		tun->vnet_hdr_sz = vnet_hdr_sz;
1404		break;
1405
1406	case TUNATTACHFILTER:
1407		/* Can be set only for TAPs */
1408		ret = -EINVAL;
1409		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1410			break;
1411		ret = -EFAULT;
1412		if (copy_from_user(&fprog, argp, sizeof(fprog)))
1413			break;
1414
1415		ret = sk_attach_filter(&fprog, tun->socket.sk);
1416		break;
1417
1418	case TUNDETACHFILTER:
1419		/* Can be set only for TAPs */
1420		ret = -EINVAL;
1421		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1422			break;
1423		ret = sk_detach_filter(tun->socket.sk);
 
 
 
 
 
 
 
 
 
 
 
1424		break;
1425
1426	default:
1427		ret = -EINVAL;
1428		break;
1429	}
1430
1431unlock:
1432	rtnl_unlock();
1433	if (tun)
1434		tun_put(tun);
1435	return ret;
1436}
1437
1438static long tun_chr_ioctl(struct file *file,
1439			  unsigned int cmd, unsigned long arg)
1440{
1441	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
1442}
1443
1444#ifdef CONFIG_COMPAT
1445static long tun_chr_compat_ioctl(struct file *file,
1446			 unsigned int cmd, unsigned long arg)
1447{
1448	switch (cmd) {
1449	case TUNSETIFF:
1450	case TUNGETIFF:
1451	case TUNSETTXFILTER:
1452	case TUNGETSNDBUF:
1453	case TUNSETSNDBUF:
1454	case SIOCGIFHWADDR:
1455	case SIOCSIFHWADDR:
1456		arg = (unsigned long)compat_ptr(arg);
1457		break;
1458	default:
1459		arg = (compat_ulong_t)arg;
1460		break;
1461	}
1462
1463	/*
1464	 * compat_ifreq is shorter than ifreq, so we must not access beyond
1465	 * the end of that structure. All fields that are used in this
1466	 * driver are compatible though, we don't need to convert the
1467	 * contents.
1468	 */
1469	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
1470}
1471#endif /* CONFIG_COMPAT */
1472
1473static int tun_chr_fasync(int fd, struct file *file, int on)
1474{
1475	struct tun_struct *tun = tun_get(file);
1476	int ret;
1477
1478	if (!tun)
1479		return -EBADFD;
1480
1481	tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
1482
1483	if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
1484		goto out;
1485
1486	if (on) {
1487		ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
1488		if (ret)
1489			goto out;
1490		tun->flags |= TUN_FASYNC;
1491	} else
1492		tun->flags &= ~TUN_FASYNC;
1493	ret = 0;
1494out:
1495	tun_put(tun);
1496	return ret;
1497}
1498
1499static int tun_chr_open(struct inode *inode, struct file * file)
1500{
1501	struct tun_file *tfile;
1502
1503	DBG1(KERN_INFO, "tunX: tun_chr_open\n");
1504
1505	tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
 
1506	if (!tfile)
1507		return -ENOMEM;
1508	atomic_set(&tfile->count, 0);
1509	tfile->tun = NULL;
1510	tfile->net = get_net(current->nsproxy->net_ns);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1511	file->private_data = tfile;
 
 
 
 
 
1512	return 0;
1513}
1514
1515static int tun_chr_close(struct inode *inode, struct file *file)
1516{
1517	struct tun_file *tfile = file->private_data;
1518	struct tun_struct *tun;
1519
1520	tun = __tun_get(tfile);
1521	if (tun) {
1522		struct net_device *dev = tun->dev;
1523
1524		tun_debug(KERN_INFO, tun, "tun_chr_close\n");
 
1525
1526		__tun_detach(tun);
 
 
 
 
1527
1528		/* If desirable, unregister the netdevice. */
1529		if (!(tun->flags & TUN_PERSIST)) {
1530			rtnl_lock();
1531			if (dev->reg_state == NETREG_REGISTERED)
1532				unregister_netdevice(dev);
1533			rtnl_unlock();
1534		}
1535	}
1536
1537	tun = tfile->tun;
 
1538	if (tun)
1539		sock_put(tun->socket.sk);
 
1540
1541	put_net(tfile->net);
1542	kfree(tfile);
1543
1544	return 0;
1545}
 
1546
1547static const struct file_operations tun_fops = {
1548	.owner	= THIS_MODULE,
1549	.llseek = no_llseek,
1550	.read  = do_sync_read,
1551	.aio_read  = tun_chr_aio_read,
1552	.write = do_sync_write,
1553	.aio_write = tun_chr_aio_write,
1554	.poll	= tun_chr_poll,
1555	.unlocked_ioctl	= tun_chr_ioctl,
1556#ifdef CONFIG_COMPAT
1557	.compat_ioctl = tun_chr_compat_ioctl,
1558#endif
1559	.open	= tun_chr_open,
1560	.release = tun_chr_close,
1561	.fasync = tun_chr_fasync
 
 
 
1562};
1563
1564static struct miscdevice tun_miscdev = {
1565	.minor = TUN_MINOR,
1566	.name = "tun",
1567	.nodename = "net/tun",
1568	.fops = &tun_fops,
1569};
1570
1571/* ethtool interface */
1572
1573static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1574{
1575	cmd->supported		= 0;
1576	cmd->advertising	= 0;
1577	ethtool_cmd_speed_set(cmd, SPEED_10);
1578	cmd->duplex		= DUPLEX_FULL;
1579	cmd->port		= PORT_TP;
1580	cmd->phy_address	= 0;
1581	cmd->transceiver	= XCVR_INTERNAL;
1582	cmd->autoneg		= AUTONEG_DISABLE;
1583	cmd->maxtxpkt		= 0;
1584	cmd->maxrxpkt		= 0;
1585	return 0;
1586}
1587
1588static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1589{
1590	struct tun_struct *tun = netdev_priv(dev);
1591
1592	strcpy(info->driver, DRV_NAME);
1593	strcpy(info->version, DRV_VERSION);
1594	strcpy(info->fw_version, "N/A");
1595
1596	switch (tun->flags & TUN_TYPE_MASK) {
1597	case TUN_TUN_DEV:
1598		strcpy(info->bus_info, "tun");
1599		break;
1600	case TUN_TAP_DEV:
1601		strcpy(info->bus_info, "tap");
1602		break;
1603	}
1604}
1605
1606static u32 tun_get_msglevel(struct net_device *dev)
1607{
1608#ifdef TUN_DEBUG
1609	struct tun_struct *tun = netdev_priv(dev);
1610	return tun->debug;
1611#else
1612	return -EOPNOTSUPP;
1613#endif
1614}
1615
1616static void tun_set_msglevel(struct net_device *dev, u32 value)
1617{
1618#ifdef TUN_DEBUG
1619	struct tun_struct *tun = netdev_priv(dev);
1620	tun->debug = value;
1621#endif
1622}
1623
1624static const struct ethtool_ops tun_ethtool_ops = {
1625	.get_settings	= tun_get_settings,
1626	.get_drvinfo	= tun_get_drvinfo,
1627	.get_msglevel	= tun_get_msglevel,
1628	.set_msglevel	= tun_set_msglevel,
1629	.get_link	= ethtool_op_get_link,
 
1630};
1631
1632
1633static int __init tun_init(void)
1634{
1635	int ret = 0;
1636
1637	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
1638	pr_info("%s\n", DRV_COPYRIGHT);
1639
1640	ret = rtnl_link_register(&tun_link_ops);
1641	if (ret) {
1642		pr_err("Can't register link_ops\n");
1643		goto err_linkops;
1644	}
1645
1646	ret = misc_register(&tun_miscdev);
1647	if (ret) {
1648		pr_err("Can't register misc device %d\n", TUN_MINOR);
1649		goto err_misc;
1650	}
1651	return  0;
1652err_misc:
1653	rtnl_link_unregister(&tun_link_ops);
1654err_linkops:
1655	return ret;
1656}
1657
1658static void tun_cleanup(void)
1659{
1660	misc_deregister(&tun_miscdev);
1661	rtnl_link_unregister(&tun_link_ops);
1662}
1663
1664/* Get an underlying socket object from tun file.  Returns error unless file is
1665 * attached to a device.  The returned object works like a packet socket, it
1666 * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
1667 * holding a reference to the file for as long as the socket is in use. */
1668struct socket *tun_get_socket(struct file *file)
1669{
1670	struct tun_struct *tun;
1671	if (file->f_op != &tun_fops)
1672		return ERR_PTR(-EINVAL);
1673	tun = tun_get(file);
1674	if (!tun)
1675		return ERR_PTR(-EBADFD);
1676	tun_put(tun);
1677	return &tun->socket;
1678}
1679EXPORT_SYMBOL_GPL(tun_get_socket);
1680
1681module_init(tun_init);
1682module_exit(tun_cleanup);
1683MODULE_DESCRIPTION(DRV_DESCRIPTION);
1684MODULE_AUTHOR(DRV_COPYRIGHT);
1685MODULE_LICENSE("GPL");
1686MODULE_ALIAS_MISCDEV(TUN_MINOR);
1687MODULE_ALIAS("devname:net/tun");
v3.15
   1/*
   2 *  TUN - Universal TUN/TAP device driver.
   3 *  Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
   4 *
   5 *  This program is free software; you can redistribute it and/or modify
   6 *  it under the terms of the GNU General Public License as published by
   7 *  the Free Software Foundation; either version 2 of the License, or
   8 *  (at your option) any later version.
   9 *
  10 *  This program is distributed in the hope that it will be useful,
  11 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13 *  GNU General Public License for more details.
  14 *
  15 *  $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
  16 */
  17
  18/*
  19 *  Changes:
  20 *
  21 *  Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
  22 *    Add TUNSETLINK ioctl to set the link encapsulation
  23 *
  24 *  Mark Smith <markzzzsmith@yahoo.com.au>
  25 *    Use eth_random_addr() for tap MAC address.
  26 *
  27 *  Harald Roelle <harald.roelle@ifi.lmu.de>  2004/04/20
  28 *    Fixes in packet dropping, queue length setting and queue wakeup.
  29 *    Increased default tx queue length.
  30 *    Added ethtool API.
  31 *    Minor cleanups
  32 *
  33 *  Daniel Podlejski <underley@underley.eu.org>
  34 *    Modifications for 2.3.99-pre5 kernel.
  35 */
  36
  37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  38
  39#define DRV_NAME	"tun"
  40#define DRV_VERSION	"1.6"
  41#define DRV_DESCRIPTION	"Universal TUN/TAP device driver"
  42#define DRV_COPYRIGHT	"(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
  43
  44#include <linux/module.h>
  45#include <linux/errno.h>
  46#include <linux/kernel.h>
  47#include <linux/major.h>
  48#include <linux/slab.h>
  49#include <linux/poll.h>
  50#include <linux/fcntl.h>
  51#include <linux/init.h>
  52#include <linux/skbuff.h>
  53#include <linux/netdevice.h>
  54#include <linux/etherdevice.h>
  55#include <linux/miscdevice.h>
  56#include <linux/ethtool.h>
  57#include <linux/rtnetlink.h>
  58#include <linux/compat.h>
  59#include <linux/if.h>
  60#include <linux/if_arp.h>
  61#include <linux/if_ether.h>
  62#include <linux/if_tun.h>
  63#include <linux/if_vlan.h>
  64#include <linux/crc32.h>
  65#include <linux/nsproxy.h>
  66#include <linux/virtio_net.h>
  67#include <linux/rcupdate.h>
  68#include <net/net_namespace.h>
  69#include <net/netns/generic.h>
  70#include <net/rtnetlink.h>
  71#include <net/sock.h>
  72#include <linux/seq_file.h>
  73
 
  74#include <asm/uaccess.h>
  75
  76/* Uncomment to enable debugging */
  77/* #define TUN_DEBUG 1 */
  78
  79#ifdef TUN_DEBUG
  80static int debug;
  81
  82#define tun_debug(level, tun, fmt, args...)			\
  83do {								\
  84	if (tun->debug)						\
  85		netdev_printk(level, tun->dev, fmt, ##args);	\
  86} while (0)
  87#define DBG1(level, fmt, args...)				\
  88do {								\
  89	if (debug == 2)						\
  90		printk(level fmt, ##args);			\
  91} while (0)
  92#else
  93#define tun_debug(level, tun, fmt, args...)			\
  94do {								\
  95	if (0)							\
  96		netdev_printk(level, tun->dev, fmt, ##args);	\
  97} while (0)
  98#define DBG1(level, fmt, args...)				\
  99do {								\
 100	if (0)							\
 101		printk(level fmt, ##args);			\
 102} while (0)
 103#endif
 104
 105#define GOODCOPY_LEN 128
 106
 107#define FLT_EXACT_COUNT 8
 108struct tap_filter {
 109	unsigned int    count;    /* Number of addrs. Zero means disabled */
 110	u32             mask[2];  /* Mask of the hashed addrs */
 111	unsigned char	addr[FLT_EXACT_COUNT][ETH_ALEN];
 112};
 113
 114/* DEFAULT_MAX_NUM_RSS_QUEUES were chosen to let the rx/tx queues allocated for
 115 * the netdevice to be fit in one page. So we can make sure the success of
 116 * memory allocation. TODO: increase the limit. */
 117#define MAX_TAP_QUEUES DEFAULT_MAX_NUM_RSS_QUEUES
 118#define MAX_TAP_FLOWS  4096
 119
 120#define TUN_FLOW_EXPIRE (3 * HZ)
 121
 122/* A tun_file connects an open character device to a tuntap netdevice. It
 123 * also contains all socket related structures (except sock_fprog and tap_filter)
 124 * to serve as one transmit queue for tuntap device. The sock_fprog and
 125 * tap_filter were kept in tun_struct since they were used for filtering for the
 126 * netdevice not for a specific queue (at least I didn't see the requirement for
 127 * this).
 128 *
 129 * RCU usage:
 130 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
 131 * other can only be read while rcu_read_lock or rtnl_lock is held.
 132 */
 133struct tun_file {
 134	struct sock sk;
 135	struct socket socket;
 136	struct socket_wq wq;
 137	struct tun_struct __rcu *tun;
 138	struct net *net;
 139	struct fasync_struct *fasync;
 140	/* only used for fasnyc */
 141	unsigned int flags;
 142	union {
 143		u16 queue_index;
 144		unsigned int ifindex;
 145	};
 146	struct list_head next;
 147	struct tun_struct *detached;
 148};
 149
 150struct tun_flow_entry {
 151	struct hlist_node hash_link;
 152	struct rcu_head rcu;
 153	struct tun_struct *tun;
 154
 155	u32 rxhash;
 156	u32 rps_rxhash;
 157	int queue_index;
 158	unsigned long updated;
 159};
 160
 161#define TUN_NUM_FLOW_ENTRIES 1024
 162
 163/* Since the socket were moved to tun_file, to preserve the behavior of persist
 164 * device, socket filter, sndbuf and vnet header size were restore when the
 165 * file were attached to a persist device.
 166 */
 167struct tun_struct {
 168	struct tun_file __rcu	*tfiles[MAX_TAP_QUEUES];
 169	unsigned int            numqueues;
 170	unsigned int 		flags;
 171	kuid_t			owner;
 172	kgid_t			group;
 173
 174	struct net_device	*dev;
 175	netdev_features_t	set_features;
 176#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
 177			  NETIF_F_TSO6|NETIF_F_UFO)
 
 
 
 
 
 178
 179	int			vnet_hdr_sz;
 180	int			sndbuf;
 181	struct tap_filter	txflt;
 182	struct sock_fprog	fprog;
 183	/* protected by rtnl lock */
 184	bool			filter_attached;
 185#ifdef TUN_DEBUG
 186	int debug;
 187#endif
 188	spinlock_t lock;
 189	struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
 190	struct timer_list flow_gc_timer;
 191	unsigned long ageing_time;
 192	unsigned int numdisabled;
 193	struct list_head disabled;
 194	void *security;
 195	u32 flow_count;
 196};
 197
 198static inline u32 tun_hashfn(u32 rxhash)
 199{
 200	return rxhash & 0x3ff;
 201}
 202
 203static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
 204{
 205	struct tun_flow_entry *e;
 206
 207	hlist_for_each_entry_rcu(e, head, hash_link) {
 208		if (e->rxhash == rxhash)
 209			return e;
 210	}
 211	return NULL;
 212}
 213
 214static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
 215					      struct hlist_head *head,
 216					      u32 rxhash, u16 queue_index)
 217{
 218	struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
 
 219
 220	if (e) {
 221		tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
 222			  rxhash, queue_index);
 223		e->updated = jiffies;
 224		e->rxhash = rxhash;
 225		e->rps_rxhash = 0;
 226		e->queue_index = queue_index;
 227		e->tun = tun;
 228		hlist_add_head_rcu(&e->hash_link, head);
 229		++tun->flow_count;
 230	}
 231	return e;
 232}
 233
 234static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
 235{
 236	tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
 237		  e->rxhash, e->queue_index);
 238	sock_rps_reset_flow_hash(e->rps_rxhash);
 239	hlist_del_rcu(&e->hash_link);
 240	kfree_rcu(e, rcu);
 241	--tun->flow_count;
 242}
 243
 244static void tun_flow_flush(struct tun_struct *tun)
 245{
 246	int i;
 247
 248	spin_lock_bh(&tun->lock);
 249	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 250		struct tun_flow_entry *e;
 251		struct hlist_node *n;
 252
 253		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
 254			tun_flow_delete(tun, e);
 255	}
 256	spin_unlock_bh(&tun->lock);
 257}
 
 
 
 258
 259static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
 260{
 261	int i;
 262
 263	spin_lock_bh(&tun->lock);
 264	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 265		struct tun_flow_entry *e;
 266		struct hlist_node *n;
 267
 268		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
 269			if (e->queue_index == queue_index)
 270				tun_flow_delete(tun, e);
 271		}
 272	}
 273	spin_unlock_bh(&tun->lock);
 274}
 275
 276static void tun_flow_cleanup(unsigned long data)
 277{
 278	struct tun_struct *tun = (struct tun_struct *)data;
 279	unsigned long delay = tun->ageing_time;
 280	unsigned long next_timer = jiffies + delay;
 281	unsigned long count = 0;
 282	int i;
 
 283
 284	tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
 
 285
 286	spin_lock_bh(&tun->lock);
 287	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
 288		struct tun_flow_entry *e;
 289		struct hlist_node *n;
 290
 291		hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
 292			unsigned long this_timer;
 293			count++;
 294			this_timer = e->updated + delay;
 295			if (time_before_eq(this_timer, jiffies))
 296				tun_flow_delete(tun, e);
 297			else if (time_before(this_timer, next_timer))
 298				next_timer = this_timer;
 299		}
 300	}
 301
 302	if (count)
 303		mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
 304	spin_unlock_bh(&tun->lock);
 305}
 306
 307static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
 308			    struct tun_file *tfile)
 309{
 310	struct hlist_head *head;
 311	struct tun_flow_entry *e;
 312	unsigned long delay = tun->ageing_time;
 313	u16 queue_index = tfile->queue_index;
 314
 315	if (!rxhash)
 316		return;
 317	else
 318		head = &tun->flows[tun_hashfn(rxhash)];
 319
 320	rcu_read_lock();
 321
 322	/* We may get a very small possibility of OOO during switching, not
 323	 * worth to optimize.*/
 324	if (tun->numqueues == 1 || tfile->detached)
 325		goto unlock;
 326
 327	e = tun_flow_find(head, rxhash);
 328	if (likely(e)) {
 329		/* TODO: keep queueing to old queue until it's empty? */
 330		e->queue_index = queue_index;
 331		e->updated = jiffies;
 332		sock_rps_record_flow_hash(e->rps_rxhash);
 333	} else {
 334		spin_lock_bh(&tun->lock);
 335		if (!tun_flow_find(head, rxhash) &&
 336		    tun->flow_count < MAX_TAP_FLOWS)
 337			tun_flow_create(tun, head, rxhash, queue_index);
 338
 339		if (!timer_pending(&tun->flow_gc_timer))
 340			mod_timer(&tun->flow_gc_timer,
 341				  round_jiffies_up(jiffies + delay));
 342		spin_unlock_bh(&tun->lock);
 343	}
 344
 345unlock:
 346	rcu_read_unlock();
 347}
 348
 349/**
 350 * Save the hash received in the stack receive path and update the
 351 * flow_hash table accordingly.
 352 */
 353static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
 354{
 355	if (unlikely(e->rps_rxhash != hash)) {
 356		sock_rps_reset_flow_hash(e->rps_rxhash);
 357		e->rps_rxhash = hash;
 358	}
 359}
 360
 361/* We try to identify a flow through its rxhash first. The reason that
 362 * we do not check rxq no. is because some cards(e.g 82599), chooses
 363 * the rxq based on the txq where the last packet of the flow comes. As
 364 * the userspace application move between processors, we may get a
 365 * different rxq no. here. If we could not get rxhash, then we would
 366 * hope the rxq no. may help here.
 367 */
 368static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
 369			    void *accel_priv, select_queue_fallback_t fallback)
 370{
 371	struct tun_struct *tun = netdev_priv(dev);
 372	struct tun_flow_entry *e;
 373	u32 txq = 0;
 374	u32 numqueues = 0;
 375
 376	rcu_read_lock();
 377	numqueues = ACCESS_ONCE(tun->numqueues);
 378
 379	txq = skb_get_hash(skb);
 380	if (txq) {
 381		e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
 382		if (e) {
 383			tun_flow_save_rps_rxhash(e, txq);
 384			txq = e->queue_index;
 385		} else
 386			/* use multiply and shift instead of expensive divide */
 387			txq = ((u64)txq * numqueues) >> 32;
 388	} else if (likely(skb_rx_queue_recorded(skb))) {
 389		txq = skb_get_rx_queue(skb);
 390		while (unlikely(txq >= numqueues))
 391			txq -= numqueues;
 392	}
 393
 394	rcu_read_unlock();
 395	return txq;
 396}
 397
 398static inline bool tun_not_capable(struct tun_struct *tun)
 399{
 400	const struct cred *cred = current_cred();
 401	struct net *net = dev_net(tun->dev);
 402
 403	return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
 404		  (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
 405		!ns_capable(net->user_ns, CAP_NET_ADMIN);
 406}
 407
 408static void tun_set_real_num_queues(struct tun_struct *tun)
 409{
 410	netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
 411	netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
 412}
 413
 414static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
 415{
 416	tfile->detached = tun;
 417	list_add_tail(&tfile->next, &tun->disabled);
 418	++tun->numdisabled;
 419}
 420
 421static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
 422{
 423	struct tun_struct *tun = tfile->detached;
 424
 425	tfile->detached = NULL;
 426	list_del_init(&tfile->next);
 427	--tun->numdisabled;
 428	return tun;
 429}
 430
 431static void tun_queue_purge(struct tun_file *tfile)
 432{
 433	skb_queue_purge(&tfile->sk.sk_receive_queue);
 434	skb_queue_purge(&tfile->sk.sk_error_queue);
 435}
 436
 437static void __tun_detach(struct tun_file *tfile, bool clean)
 438{
 439	struct tun_file *ntfile;
 440	struct tun_struct *tun;
 441
 442	tun = rtnl_dereference(tfile->tun);
 443
 444	if (tun && !tfile->detached) {
 445		u16 index = tfile->queue_index;
 446		BUG_ON(index >= tun->numqueues);
 447
 448		rcu_assign_pointer(tun->tfiles[index],
 449				   tun->tfiles[tun->numqueues - 1]);
 450		ntfile = rtnl_dereference(tun->tfiles[index]);
 451		ntfile->queue_index = index;
 452
 453		--tun->numqueues;
 454		if (clean) {
 455			RCU_INIT_POINTER(tfile->tun, NULL);
 456			sock_put(&tfile->sk);
 457		} else
 458			tun_disable_queue(tun, tfile);
 459
 460		synchronize_net();
 461		tun_flow_delete_by_queue(tun, tun->numqueues + 1);
 462		/* Drop read queue */
 463		tun_queue_purge(tfile);
 464		tun_set_real_num_queues(tun);
 465	} else if (tfile->detached && clean) {
 466		tun = tun_enable_queue(tfile);
 467		sock_put(&tfile->sk);
 468	}
 469
 470	if (clean) {
 471		if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
 472			netif_carrier_off(tun->dev);
 473
 474			if (!(tun->flags & TUN_PERSIST) &&
 475			    tun->dev->reg_state == NETREG_REGISTERED)
 476				unregister_netdevice(tun->dev);
 477		}
 478
 479		BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
 480				 &tfile->socket.flags));
 481		sk_release_kernel(&tfile->sk);
 482	}
 483}
 484
 485static void tun_detach(struct tun_file *tfile, bool clean)
 486{
 487	rtnl_lock();
 488	__tun_detach(tfile, clean);
 489	rtnl_unlock();
 490}
 491
 492static void tun_detach_all(struct net_device *dev)
 493{
 494	struct tun_struct *tun = netdev_priv(dev);
 495	struct tun_file *tfile, *tmp;
 496	int i, n = tun->numqueues;
 497
 498	for (i = 0; i < n; i++) {
 499		tfile = rtnl_dereference(tun->tfiles[i]);
 500		BUG_ON(!tfile);
 501		wake_up_all(&tfile->wq.wait);
 502		RCU_INIT_POINTER(tfile->tun, NULL);
 503		--tun->numqueues;
 504	}
 505	list_for_each_entry(tfile, &tun->disabled, next) {
 506		wake_up_all(&tfile->wq.wait);
 507		RCU_INIT_POINTER(tfile->tun, NULL);
 508	}
 509	BUG_ON(tun->numqueues != 0);
 510
 511	synchronize_net();
 512	for (i = 0; i < n; i++) {
 513		tfile = rtnl_dereference(tun->tfiles[i]);
 514		/* Drop read queue */
 515		tun_queue_purge(tfile);
 516		sock_put(&tfile->sk);
 517	}
 518	list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
 519		tun_enable_queue(tfile);
 520		tun_queue_purge(tfile);
 521		sock_put(&tfile->sk);
 522	}
 523	BUG_ON(tun->numdisabled != 0);
 524
 525	if (tun->flags & TUN_PERSIST)
 526		module_put(THIS_MODULE);
 527}
 528
 529static int tun_attach(struct tun_struct *tun, struct file *file, bool skip_filter)
 530{
 531	struct tun_file *tfile = file->private_data;
 532	int err;
 533
 534	err = security_tun_dev_attach(tfile->socket.sk, tun->security);
 535	if (err < 0)
 536		goto out;
 537
 538	err = -EINVAL;
 539	if (rtnl_dereference(tfile->tun) && !tfile->detached)
 540		goto out;
 541
 542	err = -EBUSY;
 543	if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
 544		goto out;
 545
 546	err = -E2BIG;
 547	if (!tfile->detached &&
 548	    tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
 549		goto out;
 550
 551	err = 0;
 552
 553	/* Re-attach the filter to persist device */
 554	if (!skip_filter && (tun->filter_attached == true)) {
 555		err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
 556		if (!err)
 557			goto out;
 558	}
 559	tfile->queue_index = tun->numqueues;
 560	rcu_assign_pointer(tfile->tun, tun);
 561	rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
 562	tun->numqueues++;
 563
 564	if (tfile->detached)
 565		tun_enable_queue(tfile);
 566	else
 567		sock_hold(&tfile->sk);
 568
 569	tun_set_real_num_queues(tun);
 570
 571	/* device is allowed to go away first, so no need to hold extra
 572	 * refcnt.
 573	 */
 574
 575out:
 576	return err;
 577}
 578
 579static struct tun_struct *__tun_get(struct tun_file *tfile)
 580{
 581	struct tun_struct *tun;
 582
 583	rcu_read_lock();
 584	tun = rcu_dereference(tfile->tun);
 585	if (tun)
 586		dev_hold(tun->dev);
 587	rcu_read_unlock();
 588
 589	return tun;
 590}
 591
 592static struct tun_struct *tun_get(struct file *file)
 593{
 594	return __tun_get(file->private_data);
 595}
 596
 597static void tun_put(struct tun_struct *tun)
 598{
 599	dev_put(tun->dev);
 
 
 
 600}
 601
 602/* TAP filtering */
 603static void addr_hash_set(u32 *mask, const u8 *addr)
 604{
 605	int n = ether_crc(ETH_ALEN, addr) >> 26;
 606	mask[n >> 5] |= (1 << (n & 31));
 607}
 608
 609static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
 610{
 611	int n = ether_crc(ETH_ALEN, addr) >> 26;
 612	return mask[n >> 5] & (1 << (n & 31));
 613}
 614
 615static int update_filter(struct tap_filter *filter, void __user *arg)
 616{
 617	struct { u8 u[ETH_ALEN]; } *addr;
 618	struct tun_filter uf;
 619	int err, alen, n, nexact;
 620
 621	if (copy_from_user(&uf, arg, sizeof(uf)))
 622		return -EFAULT;
 623
 624	if (!uf.count) {
 625		/* Disabled */
 626		filter->count = 0;
 627		return 0;
 628	}
 629
 630	alen = ETH_ALEN * uf.count;
 631	addr = kmalloc(alen, GFP_KERNEL);
 632	if (!addr)
 633		return -ENOMEM;
 634
 635	if (copy_from_user(addr, arg + sizeof(uf), alen)) {
 636		err = -EFAULT;
 637		goto done;
 638	}
 639
 640	/* The filter is updated without holding any locks. Which is
 641	 * perfectly safe. We disable it first and in the worst
 642	 * case we'll accept a few undesired packets. */
 643	filter->count = 0;
 644	wmb();
 645
 646	/* Use first set of addresses as an exact filter */
 647	for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
 648		memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
 649
 650	nexact = n;
 651
 652	/* Remaining multicast addresses are hashed,
 653	 * unicast will leave the filter disabled. */
 654	memset(filter->mask, 0, sizeof(filter->mask));
 655	for (; n < uf.count; n++) {
 656		if (!is_multicast_ether_addr(addr[n].u)) {
 657			err = 0; /* no filter */
 658			goto done;
 659		}
 660		addr_hash_set(filter->mask, addr[n].u);
 661	}
 662
 663	/* For ALLMULTI just set the mask to all ones.
 664	 * This overrides the mask populated above. */
 665	if ((uf.flags & TUN_FLT_ALLMULTI))
 666		memset(filter->mask, ~0, sizeof(filter->mask));
 667
 668	/* Now enable the filter */
 669	wmb();
 670	filter->count = nexact;
 671
 672	/* Return the number of exact filters */
 673	err = nexact;
 674
 675done:
 676	kfree(addr);
 677	return err;
 678}
 679
 680/* Returns: 0 - drop, !=0 - accept */
 681static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
 682{
 683	/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
 684	 * at this point. */
 685	struct ethhdr *eh = (struct ethhdr *) skb->data;
 686	int i;
 687
 688	/* Exact match */
 689	for (i = 0; i < filter->count; i++)
 690		if (ether_addr_equal(eh->h_dest, filter->addr[i]))
 691			return 1;
 692
 693	/* Inexact match (multicast only) */
 694	if (is_multicast_ether_addr(eh->h_dest))
 695		return addr_hash_test(filter->mask, eh->h_dest);
 696
 697	return 0;
 698}
 699
 700/*
 701 * Checks whether the packet is accepted or not.
 702 * Returns: 0 - drop, !=0 - accept
 703 */
 704static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
 705{
 706	if (!filter->count)
 707		return 1;
 708
 709	return run_filter(filter, skb);
 710}
 711
 712/* Network device part of the driver */
 713
 714static const struct ethtool_ops tun_ethtool_ops;
 715
 716/* Net device detach from fd. */
 717static void tun_net_uninit(struct net_device *dev)
 718{
 719	tun_detach_all(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 720}
 721
 722/* Net device open. */
 723static int tun_net_open(struct net_device *dev)
 724{
 725	netif_tx_start_all_queues(dev);
 726	return 0;
 727}
 728
 729/* Net device close. */
 730static int tun_net_close(struct net_device *dev)
 731{
 732	netif_tx_stop_all_queues(dev);
 733	return 0;
 734}
 735
 736/* Net device start xmit */
 737static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
 738{
 739	struct tun_struct *tun = netdev_priv(dev);
 740	int txq = skb->queue_mapping;
 741	struct tun_file *tfile;
 742	u32 numqueues = 0;
 743
 744	rcu_read_lock();
 745	tfile = rcu_dereference(tun->tfiles[txq]);
 746	numqueues = ACCESS_ONCE(tun->numqueues);
 747
 748	/* Drop packet if interface is not attached */
 749	if (txq >= numqueues)
 750		goto drop;
 751
 752	if (numqueues == 1) {
 753		/* Select queue was not called for the skbuff, so we extract the
 754		 * RPS hash and save it into the flow_table here.
 755		 */
 756		__u32 rxhash;
 757
 758		rxhash = skb_get_hash(skb);
 759		if (rxhash) {
 760			struct tun_flow_entry *e;
 761			e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)],
 762					rxhash);
 763			if (e)
 764				tun_flow_save_rps_rxhash(e, rxhash);
 765		}
 766	}
 767
 768	tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
 769
 770	BUG_ON(!tfile);
 771
 772	/* Drop if the filter does not like it.
 773	 * This is a noop if the filter is disabled.
 774	 * Filter can be enabled only for the TAP devices. */
 775	if (!check_filter(&tun->txflt, skb))
 776		goto drop;
 777
 778	if (tfile->socket.sk->sk_filter &&
 779	    sk_filter(tfile->socket.sk, skb))
 780		goto drop;
 781
 782	/* Limit the number of packets queued by dividing txq length with the
 783	 * number of queues.
 784	 */
 785	if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) * numqueues
 786			  >= dev->tx_queue_len)
 787		goto drop;
 788
 789	if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
 790		goto drop;
 791
 792	if (skb->sk) {
 793		sock_tx_timestamp(skb->sk, &skb_shinfo(skb)->tx_flags);
 794		sw_tx_timestamp(skb);
 
 795	}
 796
 797	/* Orphan the skb - required as we might hang on to it
 798	 * for indefinite time.
 799	 */
 800	skb_orphan(skb);
 801
 802	nf_reset(skb);
 803
 804	/* Enqueue packet */
 805	skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
 806
 807	/* Notify and wake up reader process */
 808	if (tfile->flags & TUN_FASYNC)
 809		kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
 810	wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
 811				   POLLRDNORM | POLLRDBAND);
 812
 813	rcu_read_unlock();
 814	return NETDEV_TX_OK;
 815
 816drop:
 817	dev->stats.tx_dropped++;
 818	skb_tx_error(skb);
 819	kfree_skb(skb);
 820	rcu_read_unlock();
 821	return NETDEV_TX_OK;
 822}
 823
 824static void tun_net_mclist(struct net_device *dev)
 825{
 826	/*
 827	 * This callback is supposed to deal with mc filter in
 828	 * _rx_ path and has nothing to do with the _tx_ path.
 829	 * In rx path we always accept everything userspace gives us.
 830	 */
 831}
 832
 833#define MIN_MTU 68
 834#define MAX_MTU 65535
 835
 836static int
 837tun_net_change_mtu(struct net_device *dev, int new_mtu)
 838{
 839	if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
 840		return -EINVAL;
 841	dev->mtu = new_mtu;
 842	return 0;
 843}
 844
 845static netdev_features_t tun_net_fix_features(struct net_device *dev,
 846	netdev_features_t features)
 847{
 848	struct tun_struct *tun = netdev_priv(dev);
 849
 850	return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
 851}
 852#ifdef CONFIG_NET_POLL_CONTROLLER
 853static void tun_poll_controller(struct net_device *dev)
 854{
 855	/*
 856	 * Tun only receives frames when:
 857	 * 1) the char device endpoint gets data from user space
 858	 * 2) the tun socket gets a sendmsg call from user space
 859	 * Since both of those are synchronous operations, we are guaranteed
 860	 * never to have pending data when we poll for it
 861	 * so there is nothing to do here but return.
 862	 * We need this though so netpoll recognizes us as an interface that
 863	 * supports polling, which enables bridge devices in virt setups to
 864	 * still use netconsole
 865	 */
 866	return;
 867}
 868#endif
 869static const struct net_device_ops tun_netdev_ops = {
 870	.ndo_uninit		= tun_net_uninit,
 871	.ndo_open		= tun_net_open,
 872	.ndo_stop		= tun_net_close,
 873	.ndo_start_xmit		= tun_net_xmit,
 874	.ndo_change_mtu		= tun_net_change_mtu,
 875	.ndo_fix_features	= tun_net_fix_features,
 876	.ndo_select_queue	= tun_select_queue,
 877#ifdef CONFIG_NET_POLL_CONTROLLER
 878	.ndo_poll_controller	= tun_poll_controller,
 879#endif
 880};
 881
 882static const struct net_device_ops tap_netdev_ops = {
 883	.ndo_uninit		= tun_net_uninit,
 884	.ndo_open		= tun_net_open,
 885	.ndo_stop		= tun_net_close,
 886	.ndo_start_xmit		= tun_net_xmit,
 887	.ndo_change_mtu		= tun_net_change_mtu,
 888	.ndo_fix_features	= tun_net_fix_features,
 889	.ndo_set_rx_mode	= tun_net_mclist,
 890	.ndo_set_mac_address	= eth_mac_addr,
 891	.ndo_validate_addr	= eth_validate_addr,
 892	.ndo_select_queue	= tun_select_queue,
 893#ifdef CONFIG_NET_POLL_CONTROLLER
 894	.ndo_poll_controller	= tun_poll_controller,
 895#endif
 896};
 897
 898static void tun_flow_init(struct tun_struct *tun)
 899{
 900	int i;
 901
 902	for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
 903		INIT_HLIST_HEAD(&tun->flows[i]);
 904
 905	tun->ageing_time = TUN_FLOW_EXPIRE;
 906	setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
 907	mod_timer(&tun->flow_gc_timer,
 908		  round_jiffies_up(jiffies + tun->ageing_time));
 909}
 910
 911static void tun_flow_uninit(struct tun_struct *tun)
 912{
 913	del_timer_sync(&tun->flow_gc_timer);
 914	tun_flow_flush(tun);
 915}
 916
 917/* Initialize net device. */
 918static void tun_net_init(struct net_device *dev)
 919{
 920	struct tun_struct *tun = netdev_priv(dev);
 921
 922	switch (tun->flags & TUN_TYPE_MASK) {
 923	case TUN_TUN_DEV:
 924		dev->netdev_ops = &tun_netdev_ops;
 925
 926		/* Point-to-Point TUN Device */
 927		dev->hard_header_len = 0;
 928		dev->addr_len = 0;
 929		dev->mtu = 1500;
 930
 931		/* Zero header length */
 932		dev->type = ARPHRD_NONE;
 933		dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
 934		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
 935		break;
 936
 937	case TUN_TAP_DEV:
 938		dev->netdev_ops = &tap_netdev_ops;
 939		/* Ethernet TAP Device */
 940		ether_setup(dev);
 941		dev->priv_flags &= ~IFF_TX_SKB_SHARING;
 942		dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
 943
 944		eth_hw_addr_random(dev);
 945
 946		dev->tx_queue_len = TUN_READQ_SIZE;  /* We prefer our own queue length */
 947		break;
 948	}
 949}
 950
 951/* Character device part */
 952
 953/* Poll */
 954static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
 955{
 956	struct tun_file *tfile = file->private_data;
 957	struct tun_struct *tun = __tun_get(tfile);
 958	struct sock *sk;
 959	unsigned int mask = 0;
 960
 961	if (!tun)
 962		return POLLERR;
 963
 964	sk = tfile->socket.sk;
 965
 966	tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
 967
 968	poll_wait(file, &tfile->wq.wait, wait);
 969
 970	if (!skb_queue_empty(&sk->sk_receive_queue))
 971		mask |= POLLIN | POLLRDNORM;
 972
 973	if (sock_writeable(sk) ||
 974	    (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
 975	     sock_writeable(sk)))
 976		mask |= POLLOUT | POLLWRNORM;
 977
 978	if (tun->dev->reg_state != NETREG_REGISTERED)
 979		mask = POLLERR;
 980
 981	tun_put(tun);
 982	return mask;
 983}
 984
 985/* prepad is the amount to reserve at front.  len is length after that.
 986 * linear is a hint as to how much to copy (usually headers). */
 987static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
 988				     size_t prepad, size_t len,
 989				     size_t linear, int noblock)
 990{
 991	struct sock *sk = tfile->socket.sk;
 992	struct sk_buff *skb;
 993	int err;
 994
 
 
 995	/* Under a page?  Don't bother with paged skb. */
 996	if (prepad + len < PAGE_SIZE || !linear)
 997		linear = len;
 998
 999	skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1000				   &err, 0);
1001	if (!skb)
1002		return ERR_PTR(err);
1003
1004	skb_reserve(skb, prepad);
1005	skb_put(skb, linear);
1006	skb->data_len = len - linear;
1007	skb->len += len - linear;
1008
1009	return skb;
1010}
1011
1012/* Get packet from user space buffer */
1013static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1014			    void *msg_control, const struct iovec *iv,
1015			    size_t total_len, size_t count, int noblock)
1016{
1017	struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1018	struct sk_buff *skb;
1019	size_t len = total_len, align = NET_SKB_PAD, linear;
1020	struct virtio_net_hdr gso = { 0 };
1021	int good_linear;
1022	int offset = 0;
1023	int copylen;
1024	bool zerocopy = false;
1025	int err;
1026	u32 rxhash;
1027
1028	if (!(tun->flags & TUN_NO_PI)) {
1029		if (len < sizeof(pi))
1030			return -EINVAL;
1031		len -= sizeof(pi);
1032
1033		if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
1034			return -EFAULT;
1035		offset += sizeof(pi);
1036	}
1037
1038	if (tun->flags & TUN_VNET_HDR) {
1039		if (len < tun->vnet_hdr_sz)
1040			return -EINVAL;
1041		len -= tun->vnet_hdr_sz;
1042
1043		if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
1044			return -EFAULT;
1045
1046		if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1047		    gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
1048			gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
1049
1050		if (gso.hdr_len > len)
1051			return -EINVAL;
1052		offset += tun->vnet_hdr_sz;
1053	}
1054
1055	if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
1056		align += NET_IP_ALIGN;
1057		if (unlikely(len < ETH_HLEN ||
1058			     (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
1059			return -EINVAL;
1060	}
1061
1062	good_linear = SKB_MAX_HEAD(align);
1063
1064	if (msg_control) {
1065		/* There are 256 bytes to be copied in skb, so there is
1066		 * enough room for skb expand head in case it is used.
1067		 * The rest of the buffer is mapped from userspace.
1068		 */
1069		copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
1070		if (copylen > good_linear)
1071			copylen = good_linear;
1072		linear = copylen;
1073		if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
1074			zerocopy = true;
1075	}
1076
1077	if (!zerocopy) {
1078		copylen = len;
1079		if (gso.hdr_len > good_linear)
1080			linear = good_linear;
1081		else
1082			linear = gso.hdr_len;
1083	}
1084
1085	skb = tun_alloc_skb(tfile, align, copylen, linear, noblock);
1086	if (IS_ERR(skb)) {
1087		if (PTR_ERR(skb) != -EAGAIN)
1088			tun->dev->stats.rx_dropped++;
1089		return PTR_ERR(skb);
1090	}
1091
1092	if (zerocopy)
1093		err = zerocopy_sg_from_iovec(skb, iv, offset, count);
1094	else {
1095		err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
1096		if (!err && msg_control) {
1097			struct ubuf_info *uarg = msg_control;
1098			uarg->callback(uarg, false);
1099		}
1100	}
1101
1102	if (err) {
1103		tun->dev->stats.rx_dropped++;
1104		kfree_skb(skb);
1105		return -EFAULT;
1106	}
1107
1108	if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1109		if (!skb_partial_csum_set(skb, gso.csum_start,
1110					  gso.csum_offset)) {
1111			tun->dev->stats.rx_frame_errors++;
1112			kfree_skb(skb);
1113			return -EINVAL;
1114		}
1115	}
1116
1117	switch (tun->flags & TUN_TYPE_MASK) {
1118	case TUN_TUN_DEV:
1119		if (tun->flags & TUN_NO_PI) {
1120			switch (skb->data[0] & 0xf0) {
1121			case 0x40:
1122				pi.proto = htons(ETH_P_IP);
1123				break;
1124			case 0x60:
1125				pi.proto = htons(ETH_P_IPV6);
1126				break;
1127			default:
1128				tun->dev->stats.rx_dropped++;
1129				kfree_skb(skb);
1130				return -EINVAL;
1131			}
1132		}
1133
1134		skb_reset_mac_header(skb);
1135		skb->protocol = pi.proto;
1136		skb->dev = tun->dev;
1137		break;
1138	case TUN_TAP_DEV:
1139		skb->protocol = eth_type_trans(skb, tun->dev);
1140		break;
1141	}
1142
1143	if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1144		pr_debug("GSO!\n");
1145		switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1146		case VIRTIO_NET_HDR_GSO_TCPV4:
1147			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1148			break;
1149		case VIRTIO_NET_HDR_GSO_TCPV6:
1150			skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1151			break;
1152		case VIRTIO_NET_HDR_GSO_UDP:
1153			skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1154			break;
1155		default:
1156			tun->dev->stats.rx_frame_errors++;
1157			kfree_skb(skb);
1158			return -EINVAL;
1159		}
1160
1161		if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1162			skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1163
1164		skb_shinfo(skb)->gso_size = gso.gso_size;
1165		if (skb_shinfo(skb)->gso_size == 0) {
1166			tun->dev->stats.rx_frame_errors++;
1167			kfree_skb(skb);
1168			return -EINVAL;
1169		}
1170
1171		/* Header must be checked, and gso_segs computed. */
1172		skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1173		skb_shinfo(skb)->gso_segs = 0;
1174	}
1175
1176	/* copy skb_ubuf_info for callback when skb has no error */
1177	if (zerocopy) {
1178		skb_shinfo(skb)->destructor_arg = msg_control;
1179		skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1180		skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1181	}
1182
1183	skb_reset_network_header(skb);
1184	skb_probe_transport_header(skb, 0);
1185
1186	rxhash = skb_get_hash(skb);
1187	netif_rx_ni(skb);
1188
1189	tun->dev->stats.rx_packets++;
1190	tun->dev->stats.rx_bytes += len;
1191
1192	tun_flow_update(tun, rxhash, tfile);
1193	return total_len;
1194}
1195
1196static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
1197			      unsigned long count, loff_t pos)
1198{
1199	struct file *file = iocb->ki_filp;
1200	struct tun_struct *tun = tun_get(file);
1201	struct tun_file *tfile = file->private_data;
1202	ssize_t result;
1203
1204	if (!tun)
1205		return -EBADFD;
1206
1207	tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
1208
1209	result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
1210			      count, file->f_flags & O_NONBLOCK);
1211
1212	tun_put(tun);
1213	return result;
1214}
1215
1216/* Put packet to the user space buffer */
1217static ssize_t tun_put_user(struct tun_struct *tun,
1218			    struct tun_file *tfile,
1219			    struct sk_buff *skb,
1220			    const struct iovec *iv, int len)
1221{
1222	struct tun_pi pi = { 0, skb->protocol };
1223	ssize_t total = 0;
1224	int vlan_offset = 0, copied;
1225
1226	if (!(tun->flags & TUN_NO_PI)) {
1227		if ((len -= sizeof(pi)) < 0)
1228			return -EINVAL;
1229
1230		if (len < skb->len) {
1231			/* Packet will be striped */
1232			pi.flags |= TUN_PKT_STRIP;
1233		}
1234
1235		if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
1236			return -EFAULT;
1237		total += sizeof(pi);
1238	}
1239
1240	if (tun->flags & TUN_VNET_HDR) {
1241		struct virtio_net_hdr gso = { 0 }; /* no info leak */
1242		if ((len -= tun->vnet_hdr_sz) < 0)
1243			return -EINVAL;
1244
1245		if (skb_is_gso(skb)) {
1246			struct skb_shared_info *sinfo = skb_shinfo(skb);
1247
1248			/* This is a hint as to how much should be linear. */
1249			gso.hdr_len = skb_headlen(skb);
1250			gso.gso_size = sinfo->gso_size;
1251			if (sinfo->gso_type & SKB_GSO_TCPV4)
1252				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1253			else if (sinfo->gso_type & SKB_GSO_TCPV6)
1254				gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1255			else if (sinfo->gso_type & SKB_GSO_UDP)
1256				gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1257			else {
1258				pr_err("unexpected GSO type: "
1259				       "0x%x, gso_size %d, hdr_len %d\n",
1260				       sinfo->gso_type, gso.gso_size,
1261				       gso.hdr_len);
1262				print_hex_dump(KERN_ERR, "tun: ",
1263					       DUMP_PREFIX_NONE,
1264					       16, 1, skb->head,
1265					       min((int)gso.hdr_len, 64), true);
1266				WARN_ON_ONCE(1);
1267				return -EINVAL;
1268			}
1269			if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1270				gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1271		} else
1272			gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1273
1274		if (skb->ip_summed == CHECKSUM_PARTIAL) {
1275			gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1276			gso.csum_start = skb_checksum_start_offset(skb);
1277			gso.csum_offset = skb->csum_offset;
1278		} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1279			gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
1280		} /* else everything is zero */
1281
1282		if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
1283					       sizeof(gso))))
1284			return -EFAULT;
1285		total += tun->vnet_hdr_sz;
1286	}
1287
1288	copied = total;
 
 
1289	total += skb->len;
1290	if (!vlan_tx_tag_present(skb)) {
1291		len = min_t(int, skb->len, len);
1292	} else {
1293		int copy, ret;
1294		struct {
1295			__be16 h_vlan_proto;
1296			__be16 h_vlan_TCI;
1297		} veth;
1298
1299		veth.h_vlan_proto = skb->vlan_proto;
1300		veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
1301
1302		vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1303		len = min_t(int, skb->len + VLAN_HLEN, len);
1304		total += VLAN_HLEN;
1305
1306		copy = min_t(int, vlan_offset, len);
1307		ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
1308		len -= copy;
1309		copied += copy;
1310		if (ret || !len)
1311			goto done;
1312
1313		copy = min_t(int, sizeof(veth), len);
1314		ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
1315		len -= copy;
1316		copied += copy;
1317		if (ret || !len)
1318			goto done;
1319	}
1320
1321	skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
1322
1323done:
1324	tun->dev->stats.tx_packets++;
1325	tun->dev->stats.tx_bytes += len;
1326
1327	return total;
1328}
1329
1330static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
1331			   const struct iovec *iv, ssize_t len, int noblock)
 
1332{
1333	DECLARE_WAITQUEUE(wait, current);
1334	struct sk_buff *skb;
1335	ssize_t ret = 0;
1336
1337	tun_debug(KERN_INFO, tun, "tun_do_read\n");
1338
1339	if (unlikely(!noblock))
1340		add_wait_queue(&tfile->wq.wait, &wait);
1341	while (len) {
1342		if (unlikely(!noblock))
1343			current->state = TASK_INTERRUPTIBLE;
1344
1345		/* Read frames from the queue */
1346		if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
1347			if (noblock) {
1348				ret = -EAGAIN;
1349				break;
1350			}
1351			if (signal_pending(current)) {
1352				ret = -ERESTARTSYS;
1353				break;
1354			}
1355			if (tun->dev->reg_state != NETREG_REGISTERED) {
1356				ret = -EIO;
1357				break;
1358			}
1359
1360			/* Nothing to read, let's sleep */
1361			schedule();
1362			continue;
1363		}
 
1364
1365		ret = tun_put_user(tun, tfile, skb, iv, len);
1366		kfree_skb(skb);
1367		break;
1368	}
1369
1370	if (unlikely(!noblock)) {
1371		current->state = TASK_RUNNING;
1372		remove_wait_queue(&tfile->wq.wait, &wait);
1373	}
1374
1375	return ret;
1376}
1377
1378static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1379			    unsigned long count, loff_t pos)
1380{
1381	struct file *file = iocb->ki_filp;
1382	struct tun_file *tfile = file->private_data;
1383	struct tun_struct *tun = __tun_get(tfile);
1384	ssize_t len, ret;
1385
1386	if (!tun)
1387		return -EBADFD;
1388	len = iov_length(iv, count);
1389	if (len < 0) {
1390		ret = -EINVAL;
1391		goto out;
1392	}
1393
1394	ret = tun_do_read(tun, tfile, iv, len,
1395			  file->f_flags & O_NONBLOCK);
1396	ret = min_t(ssize_t, ret, len);
1397	if (ret > 0)
1398		iocb->ki_pos = ret;
1399out:
1400	tun_put(tun);
1401	return ret;
1402}
1403
1404static void tun_free_netdev(struct net_device *dev)
1405{
1406	struct tun_struct *tun = netdev_priv(dev);
1407
1408	BUG_ON(!(list_empty(&tun->disabled)));
1409	tun_flow_uninit(tun);
1410	security_tun_dev_free_security(tun->security);
1411	free_netdev(dev);
1412}
1413
1414static void tun_setup(struct net_device *dev)
1415{
1416	struct tun_struct *tun = netdev_priv(dev);
1417
1418	tun->owner = INVALID_UID;
1419	tun->group = INVALID_GID;
1420
1421	dev->ethtool_ops = &tun_ethtool_ops;
1422	dev->destructor = tun_free_netdev;
1423}
1424
1425/* Trivial set of netlink ops to allow deleting tun or tap
1426 * device with netlink.
1427 */
1428static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
1429{
1430	return -EINVAL;
1431}
1432
1433static struct rtnl_link_ops tun_link_ops __read_mostly = {
1434	.kind		= DRV_NAME,
1435	.priv_size	= sizeof(struct tun_struct),
1436	.setup		= tun_setup,
1437	.validate	= tun_validate,
1438};
1439
1440static void tun_sock_write_space(struct sock *sk)
1441{
1442	struct tun_file *tfile;
1443	wait_queue_head_t *wqueue;
1444
1445	if (!sock_writeable(sk))
1446		return;
1447
1448	if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
1449		return;
1450
1451	wqueue = sk_sleep(sk);
1452	if (wqueue && waitqueue_active(wqueue))
1453		wake_up_interruptible_sync_poll(wqueue, POLLOUT |
1454						POLLWRNORM | POLLWRBAND);
1455
1456	tfile = container_of(sk, struct tun_file, sk);
1457	kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
 
 
 
 
 
1458}
1459
1460static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
1461		       struct msghdr *m, size_t total_len)
1462{
1463	int ret;
1464	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1465	struct tun_struct *tun = __tun_get(tfile);
1466
1467	if (!tun)
1468		return -EBADFD;
1469	ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
1470			   m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
1471	tun_put(tun);
1472	return ret;
1473}
1474
1475static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1476		       struct msghdr *m, size_t total_len,
1477		       int flags)
1478{
1479	struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1480	struct tun_struct *tun = __tun_get(tfile);
1481	int ret;
1482
1483	if (!tun)
1484		return -EBADFD;
1485
1486	if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
1487		ret = -EINVAL;
1488		goto out;
1489	}
1490	if (flags & MSG_ERRQUEUE) {
1491		ret = sock_recv_errqueue(sock->sk, m, total_len,
1492					 SOL_PACKET, TUN_TX_TIMESTAMP);
1493		goto out;
1494	}
1495	ret = tun_do_read(tun, tfile, m->msg_iov, total_len,
1496			  flags & MSG_DONTWAIT);
1497	if (ret > total_len) {
1498		m->msg_flags |= MSG_TRUNC;
1499		ret = flags & MSG_TRUNC ? ret : total_len;
1500	}
1501out:
1502	tun_put(tun);
1503	return ret;
1504}
1505
1506static int tun_release(struct socket *sock)
1507{
1508	if (sock->sk)
1509		sock_put(sock->sk);
1510	return 0;
1511}
1512
1513/* Ops structure to mimic raw sockets with tun */
1514static const struct proto_ops tun_socket_ops = {
1515	.sendmsg = tun_sendmsg,
1516	.recvmsg = tun_recvmsg,
1517	.release = tun_release,
1518};
1519
1520static struct proto tun_proto = {
1521	.name		= "tun",
1522	.owner		= THIS_MODULE,
1523	.obj_size	= sizeof(struct tun_file),
1524};
1525
1526static int tun_flags(struct tun_struct *tun)
1527{
1528	int flags = 0;
1529
1530	if (tun->flags & TUN_TUN_DEV)
1531		flags |= IFF_TUN;
1532	else
1533		flags |= IFF_TAP;
1534
1535	if (tun->flags & TUN_NO_PI)
1536		flags |= IFF_NO_PI;
1537
1538	/* This flag has no real effect.  We track the value for backwards
1539	 * compatibility.
1540	 */
1541	if (tun->flags & TUN_ONE_QUEUE)
1542		flags |= IFF_ONE_QUEUE;
1543
1544	if (tun->flags & TUN_VNET_HDR)
1545		flags |= IFF_VNET_HDR;
1546
1547	if (tun->flags & TUN_TAP_MQ)
1548		flags |= IFF_MULTI_QUEUE;
1549
1550	if (tun->flags & TUN_PERSIST)
1551		flags |= IFF_PERSIST;
1552
1553	return flags;
1554}
1555
1556static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
1557			      char *buf)
1558{
1559	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1560	return sprintf(buf, "0x%x\n", tun_flags(tun));
1561}
1562
1563static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
1564			      char *buf)
1565{
1566	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1567	return uid_valid(tun->owner)?
1568		sprintf(buf, "%u\n",
1569			from_kuid_munged(current_user_ns(), tun->owner)):
1570		sprintf(buf, "-1\n");
1571}
1572
1573static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
1574			      char *buf)
1575{
1576	struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1577	return gid_valid(tun->group) ?
1578		sprintf(buf, "%u\n",
1579			from_kgid_munged(current_user_ns(), tun->group)):
1580		sprintf(buf, "-1\n");
1581}
1582
1583static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
1584static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
1585static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
1586
1587static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
1588{
 
1589	struct tun_struct *tun;
1590	struct tun_file *tfile = file->private_data;
1591	struct net_device *dev;
1592	int err;
1593
1594	if (tfile->detached)
1595		return -EINVAL;
1596
1597	dev = __dev_get_by_name(net, ifr->ifr_name);
1598	if (dev) {
 
 
1599		if (ifr->ifr_flags & IFF_TUN_EXCL)
1600			return -EBUSY;
1601		if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
1602			tun = netdev_priv(dev);
1603		else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
1604			tun = netdev_priv(dev);
1605		else
1606			return -EINVAL;
1607
1608		if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
1609		    !!(tun->flags & TUN_TAP_MQ))
1610			return -EINVAL;
1611
1612		if (tun_not_capable(tun))
1613			return -EPERM;
1614		err = security_tun_dev_open(tun->security);
1615		if (err < 0)
1616			return err;
1617
1618		err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER);
1619		if (err < 0)
1620			return err;
1621
1622		if (tun->flags & TUN_TAP_MQ &&
1623		    (tun->numqueues + tun->numdisabled > 1)) {
1624			/* One or more queue has already been attached, no need
1625			 * to initialize the device again.
1626			 */
1627			return 0;
1628		}
1629	}
1630	else {
1631		char *name;
1632		unsigned long flags = 0;
1633		int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
1634			     MAX_TAP_QUEUES : 1;
1635
1636		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
1637			return -EPERM;
1638		err = security_tun_dev_create();
1639		if (err < 0)
1640			return err;
1641
1642		/* Set dev type */
1643		if (ifr->ifr_flags & IFF_TUN) {
1644			/* TUN device */
1645			flags |= TUN_TUN_DEV;
1646			name = "tun%d";
1647		} else if (ifr->ifr_flags & IFF_TAP) {
1648			/* TAP device */
1649			flags |= TUN_TAP_DEV;
1650			name = "tap%d";
1651		} else
1652			return -EINVAL;
1653
1654		if (*ifr->ifr_name)
1655			name = ifr->ifr_name;
1656
1657		dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
1658				       tun_setup, queues, queues);
1659
1660		if (!dev)
1661			return -ENOMEM;
1662
1663		dev_net_set(dev, net);
1664		dev->rtnl_link_ops = &tun_link_ops;
1665		dev->ifindex = tfile->ifindex;
1666
1667		tun = netdev_priv(dev);
1668		tun->dev = dev;
1669		tun->flags = flags;
1670		tun->txflt.count = 0;
1671		tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
1672
1673		tun->filter_attached = false;
1674		tun->sndbuf = tfile->socket.sk->sk_sndbuf;
 
 
 
 
 
 
 
 
 
1675
1676		spin_lock_init(&tun->lock);
1677
1678		err = security_tun_dev_alloc_security(&tun->security);
1679		if (err < 0)
1680			goto err_free_dev;
1681
1682		tun_net_init(dev);
1683		tun_flow_init(tun);
1684
1685		dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1686				   TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
1687				   NETIF_F_HW_VLAN_STAG_TX;
1688		dev->features = dev->hw_features;
1689		dev->vlan_features = dev->features &
1690				     ~(NETIF_F_HW_VLAN_CTAG_TX |
1691				       NETIF_F_HW_VLAN_STAG_TX);
1692
1693		INIT_LIST_HEAD(&tun->disabled);
1694		err = tun_attach(tun, file, false);
1695		if (err < 0)
1696			goto err_free_flow;
1697
1698		err = register_netdevice(tun->dev);
1699		if (err < 0)
1700			goto err_detach;
1701
1702		if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1703		    device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1704		    device_create_file(&tun->dev->dev, &dev_attr_group))
1705			pr_err("Failed to create tun sysfs files\n");
 
 
 
 
 
 
1706	}
1707
1708	netif_carrier_on(tun->dev);
1709
1710	tun_debug(KERN_INFO, tun, "tun_set_iff\n");
1711
1712	if (ifr->ifr_flags & IFF_NO_PI)
1713		tun->flags |= TUN_NO_PI;
1714	else
1715		tun->flags &= ~TUN_NO_PI;
1716
1717	/* This flag has no real effect.  We track the value for backwards
1718	 * compatibility.
1719	 */
1720	if (ifr->ifr_flags & IFF_ONE_QUEUE)
1721		tun->flags |= TUN_ONE_QUEUE;
1722	else
1723		tun->flags &= ~TUN_ONE_QUEUE;
1724
1725	if (ifr->ifr_flags & IFF_VNET_HDR)
1726		tun->flags |= TUN_VNET_HDR;
1727	else
1728		tun->flags &= ~TUN_VNET_HDR;
1729
1730	if (ifr->ifr_flags & IFF_MULTI_QUEUE)
1731		tun->flags |= TUN_TAP_MQ;
1732	else
1733		tun->flags &= ~TUN_TAP_MQ;
1734
1735	/* Make sure persistent devices do not get stuck in
1736	 * xoff state.
1737	 */
1738	if (netif_running(tun->dev))
1739		netif_tx_wake_all_queues(tun->dev);
1740
1741	strcpy(ifr->ifr_name, tun->dev->name);
1742	return 0;
1743
1744err_detach:
1745	tun_detach_all(dev);
1746err_free_flow:
1747	tun_flow_uninit(tun);
1748	security_tun_dev_free_security(tun->security);
1749err_free_dev:
1750	free_netdev(dev);
 
1751	return err;
1752}
1753
1754static void tun_get_iff(struct net *net, struct tun_struct *tun,
1755		       struct ifreq *ifr)
1756{
1757	tun_debug(KERN_INFO, tun, "tun_get_iff\n");
1758
1759	strcpy(ifr->ifr_name, tun->dev->name);
1760
1761	ifr->ifr_flags = tun_flags(tun);
1762
 
1763}
1764
1765/* This is like a cut-down ethtool ops, except done via tun fd so no
1766 * privs required. */
1767static int set_offload(struct tun_struct *tun, unsigned long arg)
1768{
1769	netdev_features_t features = 0;
1770
1771	if (arg & TUN_F_CSUM) {
1772		features |= NETIF_F_HW_CSUM;
1773		arg &= ~TUN_F_CSUM;
1774
1775		if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1776			if (arg & TUN_F_TSO_ECN) {
1777				features |= NETIF_F_TSO_ECN;
1778				arg &= ~TUN_F_TSO_ECN;
1779			}
1780			if (arg & TUN_F_TSO4)
1781				features |= NETIF_F_TSO;
1782			if (arg & TUN_F_TSO6)
1783				features |= NETIF_F_TSO6;
1784			arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1785		}
1786
1787		if (arg & TUN_F_UFO) {
1788			features |= NETIF_F_UFO;
1789			arg &= ~TUN_F_UFO;
1790		}
1791	}
1792
1793	/* This gives the user a way to test for new features in future by
1794	 * trying to set them. */
1795	if (arg)
1796		return -EINVAL;
1797
1798	tun->set_features = features;
1799	netdev_update_features(tun->dev);
1800
1801	return 0;
1802}
1803
1804static void tun_detach_filter(struct tun_struct *tun, int n)
1805{
1806	int i;
1807	struct tun_file *tfile;
1808
1809	for (i = 0; i < n; i++) {
1810		tfile = rtnl_dereference(tun->tfiles[i]);
1811		sk_detach_filter(tfile->socket.sk);
1812	}
1813
1814	tun->filter_attached = false;
1815}
1816
1817static int tun_attach_filter(struct tun_struct *tun)
1818{
1819	int i, ret = 0;
1820	struct tun_file *tfile;
1821
1822	for (i = 0; i < tun->numqueues; i++) {
1823		tfile = rtnl_dereference(tun->tfiles[i]);
1824		ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
1825		if (ret) {
1826			tun_detach_filter(tun, i);
1827			return ret;
1828		}
1829	}
1830
1831	tun->filter_attached = true;
1832	return ret;
1833}
1834
1835static void tun_set_sndbuf(struct tun_struct *tun)
1836{
1837	struct tun_file *tfile;
1838	int i;
1839
1840	for (i = 0; i < tun->numqueues; i++) {
1841		tfile = rtnl_dereference(tun->tfiles[i]);
1842		tfile->socket.sk->sk_sndbuf = tun->sndbuf;
1843	}
1844}
1845
1846static int tun_set_queue(struct file *file, struct ifreq *ifr)
1847{
1848	struct tun_file *tfile = file->private_data;
1849	struct tun_struct *tun;
1850	int ret = 0;
1851
1852	rtnl_lock();
1853
1854	if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
1855		tun = tfile->detached;
1856		if (!tun) {
1857			ret = -EINVAL;
1858			goto unlock;
1859		}
1860		ret = security_tun_dev_attach_queue(tun->security);
1861		if (ret < 0)
1862			goto unlock;
1863		ret = tun_attach(tun, file, false);
1864	} else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
1865		tun = rtnl_dereference(tfile->tun);
1866		if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached)
1867			ret = -EINVAL;
1868		else
1869			__tun_detach(tfile, false);
1870	} else
1871		ret = -EINVAL;
1872
1873unlock:
1874	rtnl_unlock();
1875	return ret;
1876}
1877
1878static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1879			    unsigned long arg, int ifreq_len)
1880{
1881	struct tun_file *tfile = file->private_data;
1882	struct tun_struct *tun;
1883	void __user* argp = (void __user*)arg;
 
1884	struct ifreq ifr;
1885	kuid_t owner;
1886	kgid_t group;
1887	int sndbuf;
1888	int vnet_hdr_sz;
1889	unsigned int ifindex;
1890	int ret;
1891
1892	if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
1893		if (copy_from_user(&ifr, argp, ifreq_len))
1894			return -EFAULT;
1895	} else {
1896		memset(&ifr, 0, sizeof(ifr));
1897	}
1898	if (cmd == TUNGETFEATURES) {
1899		/* Currently this just means: "what IFF flags are valid?".
1900		 * This is needed because we never checked for invalid flags on
1901		 * TUNSETIFF. */
1902		return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
1903				IFF_VNET_HDR | IFF_MULTI_QUEUE,
1904				(unsigned int __user*)argp);
1905	} else if (cmd == TUNSETQUEUE)
1906		return tun_set_queue(file, &ifr);
1907
1908	ret = 0;
1909	rtnl_lock();
1910
1911	tun = __tun_get(tfile);
1912	if (cmd == TUNSETIFF && !tun) {
1913		ifr.ifr_name[IFNAMSIZ-1] = '\0';
1914
1915		ret = tun_set_iff(tfile->net, file, &ifr);
1916
1917		if (ret)
1918			goto unlock;
1919
1920		if (copy_to_user(argp, &ifr, ifreq_len))
1921			ret = -EFAULT;
1922		goto unlock;
1923	}
1924	if (cmd == TUNSETIFINDEX) {
1925		ret = -EPERM;
1926		if (tun)
1927			goto unlock;
1928
1929		ret = -EFAULT;
1930		if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
1931			goto unlock;
1932
1933		ret = 0;
1934		tfile->ifindex = ifindex;
1935		goto unlock;
1936	}
1937
1938	ret = -EBADFD;
1939	if (!tun)
1940		goto unlock;
1941
1942	tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
1943
1944	ret = 0;
1945	switch (cmd) {
1946	case TUNGETIFF:
1947		tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
1948
1949		if (tfile->detached)
1950			ifr.ifr_flags |= IFF_DETACH_QUEUE;
1951		if (!tfile->socket.sk->sk_filter)
1952			ifr.ifr_flags |= IFF_NOFILTER;
1953
1954		if (copy_to_user(argp, &ifr, ifreq_len))
1955			ret = -EFAULT;
1956		break;
1957
1958	case TUNSETNOCSUM:
1959		/* Disable/Enable checksum */
1960
1961		/* [unimplemented] */
1962		tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
1963			  arg ? "disabled" : "enabled");
1964		break;
1965
1966	case TUNSETPERSIST:
1967		/* Disable/Enable persist mode. Keep an extra reference to the
1968		 * module to prevent the module being unprobed.
1969		 */
1970		if (arg && !(tun->flags & TUN_PERSIST)) {
1971			tun->flags |= TUN_PERSIST;
1972			__module_get(THIS_MODULE);
1973		}
1974		if (!arg && (tun->flags & TUN_PERSIST)) {
1975			tun->flags &= ~TUN_PERSIST;
1976			module_put(THIS_MODULE);
1977		}
1978
1979		tun_debug(KERN_INFO, tun, "persist %s\n",
1980			  arg ? "enabled" : "disabled");
1981		break;
1982
1983	case TUNSETOWNER:
1984		/* Set owner of the device */
1985		owner = make_kuid(current_user_ns(), arg);
1986		if (!uid_valid(owner)) {
1987			ret = -EINVAL;
1988			break;
1989		}
1990		tun->owner = owner;
1991		tun_debug(KERN_INFO, tun, "owner set to %u\n",
1992			  from_kuid(&init_user_ns, tun->owner));
1993		break;
1994
1995	case TUNSETGROUP:
1996		/* Set group of the device */
1997		group = make_kgid(current_user_ns(), arg);
1998		if (!gid_valid(group)) {
1999			ret = -EINVAL;
2000			break;
2001		}
2002		tun->group = group;
2003		tun_debug(KERN_INFO, tun, "group set to %u\n",
2004			  from_kgid(&init_user_ns, tun->group));
2005		break;
2006
2007	case TUNSETLINK:
2008		/* Only allow setting the type when the interface is down */
2009		if (tun->dev->flags & IFF_UP) {
2010			tun_debug(KERN_INFO, tun,
2011				  "Linktype set failed because interface is up\n");
2012			ret = -EBUSY;
2013		} else {
2014			tun->dev->type = (int) arg;
2015			tun_debug(KERN_INFO, tun, "linktype set to %d\n",
2016				  tun->dev->type);
2017			ret = 0;
2018		}
2019		break;
2020
2021#ifdef TUN_DEBUG
2022	case TUNSETDEBUG:
2023		tun->debug = arg;
2024		break;
2025#endif
2026	case TUNSETOFFLOAD:
2027		ret = set_offload(tun, arg);
2028		break;
2029
2030	case TUNSETTXFILTER:
2031		/* Can be set only for TAPs */
2032		ret = -EINVAL;
2033		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2034			break;
2035		ret = update_filter(&tun->txflt, (void __user *)arg);
2036		break;
2037
2038	case SIOCGIFHWADDR:
2039		/* Get hw address */
2040		memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
2041		ifr.ifr_hwaddr.sa_family = tun->dev->type;
2042		if (copy_to_user(argp, &ifr, ifreq_len))
2043			ret = -EFAULT;
2044		break;
2045
2046	case SIOCSIFHWADDR:
2047		/* Set hw address */
2048		tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
2049			  ifr.ifr_hwaddr.sa_data);
2050
2051		ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
2052		break;
2053
2054	case TUNGETSNDBUF:
2055		sndbuf = tfile->socket.sk->sk_sndbuf;
2056		if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
2057			ret = -EFAULT;
2058		break;
2059
2060	case TUNSETSNDBUF:
2061		if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
2062			ret = -EFAULT;
2063			break;
2064		}
2065
2066		tun->sndbuf = sndbuf;
2067		tun_set_sndbuf(tun);
2068		break;
2069
2070	case TUNGETVNETHDRSZ:
2071		vnet_hdr_sz = tun->vnet_hdr_sz;
2072		if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
2073			ret = -EFAULT;
2074		break;
2075
2076	case TUNSETVNETHDRSZ:
2077		if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
2078			ret = -EFAULT;
2079			break;
2080		}
2081		if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
2082			ret = -EINVAL;
2083			break;
2084		}
2085
2086		tun->vnet_hdr_sz = vnet_hdr_sz;
2087		break;
2088
2089	case TUNATTACHFILTER:
2090		/* Can be set only for TAPs */
2091		ret = -EINVAL;
2092		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2093			break;
2094		ret = -EFAULT;
2095		if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
2096			break;
2097
2098		ret = tun_attach_filter(tun);
2099		break;
2100
2101	case TUNDETACHFILTER:
2102		/* Can be set only for TAPs */
2103		ret = -EINVAL;
2104		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2105			break;
2106		ret = 0;
2107		tun_detach_filter(tun, tun->numqueues);
2108		break;
2109
2110	case TUNGETFILTER:
2111		ret = -EINVAL;
2112		if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2113			break;
2114		ret = -EFAULT;
2115		if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
2116			break;
2117		ret = 0;
2118		break;
2119
2120	default:
2121		ret = -EINVAL;
2122		break;
2123	}
2124
2125unlock:
2126	rtnl_unlock();
2127	if (tun)
2128		tun_put(tun);
2129	return ret;
2130}
2131
2132static long tun_chr_ioctl(struct file *file,
2133			  unsigned int cmd, unsigned long arg)
2134{
2135	return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
2136}
2137
2138#ifdef CONFIG_COMPAT
2139static long tun_chr_compat_ioctl(struct file *file,
2140			 unsigned int cmd, unsigned long arg)
2141{
2142	switch (cmd) {
2143	case TUNSETIFF:
2144	case TUNGETIFF:
2145	case TUNSETTXFILTER:
2146	case TUNGETSNDBUF:
2147	case TUNSETSNDBUF:
2148	case SIOCGIFHWADDR:
2149	case SIOCSIFHWADDR:
2150		arg = (unsigned long)compat_ptr(arg);
2151		break;
2152	default:
2153		arg = (compat_ulong_t)arg;
2154		break;
2155	}
2156
2157	/*
2158	 * compat_ifreq is shorter than ifreq, so we must not access beyond
2159	 * the end of that structure. All fields that are used in this
2160	 * driver are compatible though, we don't need to convert the
2161	 * contents.
2162	 */
2163	return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
2164}
2165#endif /* CONFIG_COMPAT */
2166
2167static int tun_chr_fasync(int fd, struct file *file, int on)
2168{
2169	struct tun_file *tfile = file->private_data;
2170	int ret;
2171
2172	if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
 
 
 
 
 
2173		goto out;
2174
2175	if (on) {
2176		ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
2177		if (ret)
2178			goto out;
2179		tfile->flags |= TUN_FASYNC;
2180	} else
2181		tfile->flags &= ~TUN_FASYNC;
2182	ret = 0;
2183out:
 
2184	return ret;
2185}
2186
2187static int tun_chr_open(struct inode *inode, struct file * file)
2188{
2189	struct tun_file *tfile;
2190
2191	DBG1(KERN_INFO, "tunX: tun_chr_open\n");
2192
2193	tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
2194					    &tun_proto);
2195	if (!tfile)
2196		return -ENOMEM;
2197	RCU_INIT_POINTER(tfile->tun, NULL);
 
2198	tfile->net = get_net(current->nsproxy->net_ns);
2199	tfile->flags = 0;
2200	tfile->ifindex = 0;
2201
2202	rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
2203	init_waitqueue_head(&tfile->wq.wait);
2204
2205	tfile->socket.file = file;
2206	tfile->socket.ops = &tun_socket_ops;
2207
2208	sock_init_data(&tfile->socket, &tfile->sk);
2209	sk_change_net(&tfile->sk, tfile->net);
2210
2211	tfile->sk.sk_write_space = tun_sock_write_space;
2212	tfile->sk.sk_sndbuf = INT_MAX;
2213
2214	file->private_data = tfile;
2215	set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
2216	INIT_LIST_HEAD(&tfile->next);
2217
2218	sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
2219
2220	return 0;
2221}
2222
2223static int tun_chr_close(struct inode *inode, struct file *file)
2224{
2225	struct tun_file *tfile = file->private_data;
2226	struct net *net = tfile->net;
2227
2228	tun_detach(tfile, true);
2229	put_net(net);
 
2230
2231	return 0;
2232}
2233
2234#ifdef CONFIG_PROC_FS
2235static int tun_chr_show_fdinfo(struct seq_file *m, struct file *f)
2236{
2237	struct tun_struct *tun;
2238	struct ifreq ifr;
2239
2240	memset(&ifr, 0, sizeof(ifr));
 
 
 
 
 
 
 
2241
2242	rtnl_lock();
2243	tun = tun_get(f);
2244	if (tun)
2245		tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
2246	rtnl_unlock();
2247
2248	if (tun)
2249		tun_put(tun);
2250
2251	return seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
2252}
2253#endif
2254
2255static const struct file_operations tun_fops = {
2256	.owner	= THIS_MODULE,
2257	.llseek = no_llseek,
2258	.read  = do_sync_read,
2259	.aio_read  = tun_chr_aio_read,
2260	.write = do_sync_write,
2261	.aio_write = tun_chr_aio_write,
2262	.poll	= tun_chr_poll,
2263	.unlocked_ioctl	= tun_chr_ioctl,
2264#ifdef CONFIG_COMPAT
2265	.compat_ioctl = tun_chr_compat_ioctl,
2266#endif
2267	.open	= tun_chr_open,
2268	.release = tun_chr_close,
2269	.fasync = tun_chr_fasync,
2270#ifdef CONFIG_PROC_FS
2271	.show_fdinfo = tun_chr_show_fdinfo,
2272#endif
2273};
2274
2275static struct miscdevice tun_miscdev = {
2276	.minor = TUN_MINOR,
2277	.name = "tun",
2278	.nodename = "net/tun",
2279	.fops = &tun_fops,
2280};
2281
2282/* ethtool interface */
2283
2284static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2285{
2286	cmd->supported		= 0;
2287	cmd->advertising	= 0;
2288	ethtool_cmd_speed_set(cmd, SPEED_10);
2289	cmd->duplex		= DUPLEX_FULL;
2290	cmd->port		= PORT_TP;
2291	cmd->phy_address	= 0;
2292	cmd->transceiver	= XCVR_INTERNAL;
2293	cmd->autoneg		= AUTONEG_DISABLE;
2294	cmd->maxtxpkt		= 0;
2295	cmd->maxrxpkt		= 0;
2296	return 0;
2297}
2298
2299static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2300{
2301	struct tun_struct *tun = netdev_priv(dev);
2302
2303	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2304	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
 
2305
2306	switch (tun->flags & TUN_TYPE_MASK) {
2307	case TUN_TUN_DEV:
2308		strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
2309		break;
2310	case TUN_TAP_DEV:
2311		strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
2312		break;
2313	}
2314}
2315
2316static u32 tun_get_msglevel(struct net_device *dev)
2317{
2318#ifdef TUN_DEBUG
2319	struct tun_struct *tun = netdev_priv(dev);
2320	return tun->debug;
2321#else
2322	return -EOPNOTSUPP;
2323#endif
2324}
2325
2326static void tun_set_msglevel(struct net_device *dev, u32 value)
2327{
2328#ifdef TUN_DEBUG
2329	struct tun_struct *tun = netdev_priv(dev);
2330	tun->debug = value;
2331#endif
2332}
2333
2334static const struct ethtool_ops tun_ethtool_ops = {
2335	.get_settings	= tun_get_settings,
2336	.get_drvinfo	= tun_get_drvinfo,
2337	.get_msglevel	= tun_get_msglevel,
2338	.set_msglevel	= tun_set_msglevel,
2339	.get_link	= ethtool_op_get_link,
2340	.get_ts_info	= ethtool_op_get_ts_info,
2341};
2342
2343
2344static int __init tun_init(void)
2345{
2346	int ret = 0;
2347
2348	pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2349	pr_info("%s\n", DRV_COPYRIGHT);
2350
2351	ret = rtnl_link_register(&tun_link_ops);
2352	if (ret) {
2353		pr_err("Can't register link_ops\n");
2354		goto err_linkops;
2355	}
2356
2357	ret = misc_register(&tun_miscdev);
2358	if (ret) {
2359		pr_err("Can't register misc device %d\n", TUN_MINOR);
2360		goto err_misc;
2361	}
2362	return  0;
2363err_misc:
2364	rtnl_link_unregister(&tun_link_ops);
2365err_linkops:
2366	return ret;
2367}
2368
2369static void tun_cleanup(void)
2370{
2371	misc_deregister(&tun_miscdev);
2372	rtnl_link_unregister(&tun_link_ops);
2373}
2374
2375/* Get an underlying socket object from tun file.  Returns error unless file is
2376 * attached to a device.  The returned object works like a packet socket, it
2377 * can be used for sock_sendmsg/sock_recvmsg.  The caller is responsible for
2378 * holding a reference to the file for as long as the socket is in use. */
2379struct socket *tun_get_socket(struct file *file)
2380{
2381	struct tun_file *tfile;
2382	if (file->f_op != &tun_fops)
2383		return ERR_PTR(-EINVAL);
2384	tfile = file->private_data;
2385	if (!tfile)
2386		return ERR_PTR(-EBADFD);
2387	return &tfile->socket;
 
2388}
2389EXPORT_SYMBOL_GPL(tun_get_socket);
2390
2391module_init(tun_init);
2392module_exit(tun_cleanup);
2393MODULE_DESCRIPTION(DRV_DESCRIPTION);
2394MODULE_AUTHOR(DRV_COPYRIGHT);
2395MODULE_LICENSE("GPL");
2396MODULE_ALIAS_MISCDEV(TUN_MINOR);
2397MODULE_ALIAS("devname:net/tun");