Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * TUN - Universal TUN/TAP device driver.
4 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
5 *
6 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
7 */
8
9/*
10 * Changes:
11 *
12 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
13 * Add TUNSETLINK ioctl to set the link encapsulation
14 *
15 * Mark Smith <markzzzsmith@yahoo.com.au>
16 * Use eth_random_addr() for tap MAC address.
17 *
18 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
19 * Fixes in packet dropping, queue length setting and queue wakeup.
20 * Increased default tx queue length.
21 * Added ethtool API.
22 * Minor cleanups
23 *
24 * Daniel Podlejski <underley@underley.eu.org>
25 * Modifications for 2.3.99-pre5 kernel.
26 */
27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30#define DRV_NAME "tun"
31#define DRV_VERSION "1.6"
32#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
33#define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
34
35#include <linux/module.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
38#include <linux/sched/signal.h>
39#include <linux/major.h>
40#include <linux/slab.h>
41#include <linux/poll.h>
42#include <linux/fcntl.h>
43#include <linux/init.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/miscdevice.h>
48#include <linux/ethtool.h>
49#include <linux/rtnetlink.h>
50#include <linux/compat.h>
51#include <linux/if.h>
52#include <linux/if_arp.h>
53#include <linux/if_ether.h>
54#include <linux/if_tun.h>
55#include <linux/if_vlan.h>
56#include <linux/crc32.h>
57#include <linux/nsproxy.h>
58#include <linux/virtio_net.h>
59#include <linux/rcupdate.h>
60#include <net/net_namespace.h>
61#include <net/netns/generic.h>
62#include <net/rtnetlink.h>
63#include <net/sock.h>
64#include <net/xdp.h>
65#include <net/ip_tunnels.h>
66#include <linux/seq_file.h>
67#include <linux/uio.h>
68#include <linux/skb_array.h>
69#include <linux/bpf.h>
70#include <linux/bpf_trace.h>
71#include <linux/mutex.h>
72#include <linux/ieee802154.h>
73#include <linux/if_ltalk.h>
74#include <uapi/linux/if_fddi.h>
75#include <uapi/linux/if_hippi.h>
76#include <uapi/linux/if_fc.h>
77#include <net/ax25.h>
78#include <net/rose.h>
79#include <net/6lowpan.h>
80
81#include <linux/uaccess.h>
82#include <linux/proc_fs.h>
83
84static void tun_default_link_ksettings(struct net_device *dev,
85 struct ethtool_link_ksettings *cmd);
86
87#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
88
89/* TUN device flags */
90
91/* IFF_ATTACH_QUEUE is never stored in device flags,
92 * overload it to mean fasync when stored there.
93 */
94#define TUN_FASYNC IFF_ATTACH_QUEUE
95/* High bits in flags field are unused. */
96#define TUN_VNET_LE 0x80000000
97#define TUN_VNET_BE 0x40000000
98
99#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
100 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
101
102#define GOODCOPY_LEN 128
103
104#define FLT_EXACT_COUNT 8
105struct tap_filter {
106 unsigned int count; /* Number of addrs. Zero means disabled */
107 u32 mask[2]; /* Mask of the hashed addrs */
108 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
109};
110
111/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
112 * to max number of VCPUs in guest. */
113#define MAX_TAP_QUEUES 256
114#define MAX_TAP_FLOWS 4096
115
116#define TUN_FLOW_EXPIRE (3 * HZ)
117
118/* A tun_file connects an open character device to a tuntap netdevice. It
119 * also contains all socket related structures (except sock_fprog and tap_filter)
120 * to serve as one transmit queue for tuntap device. The sock_fprog and
121 * tap_filter were kept in tun_struct since they were used for filtering for the
122 * netdevice not for a specific queue (at least I didn't see the requirement for
123 * this).
124 *
125 * RCU usage:
126 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
127 * other can only be read while rcu_read_lock or rtnl_lock is held.
128 */
129struct tun_file {
130 struct sock sk;
131 struct socket socket;
132 struct tun_struct __rcu *tun;
133 struct fasync_struct *fasync;
134 /* only used for fasnyc */
135 unsigned int flags;
136 union {
137 u16 queue_index;
138 unsigned int ifindex;
139 };
140 struct napi_struct napi;
141 bool napi_enabled;
142 bool napi_frags_enabled;
143 struct mutex napi_mutex; /* Protects access to the above napi */
144 struct list_head next;
145 struct tun_struct *detached;
146 struct ptr_ring tx_ring;
147 struct xdp_rxq_info xdp_rxq;
148};
149
150struct tun_page {
151 struct page *page;
152 int count;
153};
154
155struct tun_flow_entry {
156 struct hlist_node hash_link;
157 struct rcu_head rcu;
158 struct tun_struct *tun;
159
160 u32 rxhash;
161 u32 rps_rxhash;
162 int queue_index;
163 unsigned long updated ____cacheline_aligned_in_smp;
164};
165
166#define TUN_NUM_FLOW_ENTRIES 1024
167#define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
168
169struct tun_prog {
170 struct rcu_head rcu;
171 struct bpf_prog *prog;
172};
173
174/* Since the socket were moved to tun_file, to preserve the behavior of persist
175 * device, socket filter, sndbuf and vnet header size were restore when the
176 * file were attached to a persist device.
177 */
178struct tun_struct {
179 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
180 unsigned int numqueues;
181 unsigned int flags;
182 kuid_t owner;
183 kgid_t group;
184
185 struct net_device *dev;
186 netdev_features_t set_features;
187#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
188 NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4)
189
190 int align;
191 int vnet_hdr_sz;
192 int sndbuf;
193 struct tap_filter txflt;
194 struct sock_fprog fprog;
195 /* protected by rtnl lock */
196 bool filter_attached;
197 u32 msg_enable;
198 spinlock_t lock;
199 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
200 struct timer_list flow_gc_timer;
201 unsigned long ageing_time;
202 unsigned int numdisabled;
203 struct list_head disabled;
204 void *security;
205 u32 flow_count;
206 u32 rx_batched;
207 atomic_long_t rx_frame_errors;
208 struct bpf_prog __rcu *xdp_prog;
209 struct tun_prog __rcu *steering_prog;
210 struct tun_prog __rcu *filter_prog;
211 struct ethtool_link_ksettings link_ksettings;
212 /* init args */
213 struct file *file;
214 struct ifreq *ifr;
215};
216
217struct veth {
218 __be16 h_vlan_proto;
219 __be16 h_vlan_TCI;
220};
221
222static void tun_flow_init(struct tun_struct *tun);
223static void tun_flow_uninit(struct tun_struct *tun);
224
225static int tun_napi_receive(struct napi_struct *napi, int budget)
226{
227 struct tun_file *tfile = container_of(napi, struct tun_file, napi);
228 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
229 struct sk_buff_head process_queue;
230 struct sk_buff *skb;
231 int received = 0;
232
233 __skb_queue_head_init(&process_queue);
234
235 spin_lock(&queue->lock);
236 skb_queue_splice_tail_init(queue, &process_queue);
237 spin_unlock(&queue->lock);
238
239 while (received < budget && (skb = __skb_dequeue(&process_queue))) {
240 napi_gro_receive(napi, skb);
241 ++received;
242 }
243
244 if (!skb_queue_empty(&process_queue)) {
245 spin_lock(&queue->lock);
246 skb_queue_splice(&process_queue, queue);
247 spin_unlock(&queue->lock);
248 }
249
250 return received;
251}
252
253static int tun_napi_poll(struct napi_struct *napi, int budget)
254{
255 unsigned int received;
256
257 received = tun_napi_receive(napi, budget);
258
259 if (received < budget)
260 napi_complete_done(napi, received);
261
262 return received;
263}
264
265static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
266 bool napi_en, bool napi_frags)
267{
268 tfile->napi_enabled = napi_en;
269 tfile->napi_frags_enabled = napi_en && napi_frags;
270 if (napi_en) {
271 netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll);
272 napi_enable(&tfile->napi);
273 }
274}
275
276static void tun_napi_enable(struct tun_file *tfile)
277{
278 if (tfile->napi_enabled)
279 napi_enable(&tfile->napi);
280}
281
282static void tun_napi_disable(struct tun_file *tfile)
283{
284 if (tfile->napi_enabled)
285 napi_disable(&tfile->napi);
286}
287
288static void tun_napi_del(struct tun_file *tfile)
289{
290 if (tfile->napi_enabled)
291 netif_napi_del(&tfile->napi);
292}
293
294static bool tun_napi_frags_enabled(const struct tun_file *tfile)
295{
296 return tfile->napi_frags_enabled;
297}
298
299#ifdef CONFIG_TUN_VNET_CROSS_LE
300static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
301{
302 return tun->flags & TUN_VNET_BE ? false :
303 virtio_legacy_is_little_endian();
304}
305
306static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
307{
308 int be = !!(tun->flags & TUN_VNET_BE);
309
310 if (put_user(be, argp))
311 return -EFAULT;
312
313 return 0;
314}
315
316static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
317{
318 int be;
319
320 if (get_user(be, argp))
321 return -EFAULT;
322
323 if (be)
324 tun->flags |= TUN_VNET_BE;
325 else
326 tun->flags &= ~TUN_VNET_BE;
327
328 return 0;
329}
330#else
331static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
332{
333 return virtio_legacy_is_little_endian();
334}
335
336static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
337{
338 return -EINVAL;
339}
340
341static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
342{
343 return -EINVAL;
344}
345#endif /* CONFIG_TUN_VNET_CROSS_LE */
346
347static inline bool tun_is_little_endian(struct tun_struct *tun)
348{
349 return tun->flags & TUN_VNET_LE ||
350 tun_legacy_is_little_endian(tun);
351}
352
353static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
354{
355 return __virtio16_to_cpu(tun_is_little_endian(tun), val);
356}
357
358static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
359{
360 return __cpu_to_virtio16(tun_is_little_endian(tun), val);
361}
362
363static inline u32 tun_hashfn(u32 rxhash)
364{
365 return rxhash & TUN_MASK_FLOW_ENTRIES;
366}
367
368static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
369{
370 struct tun_flow_entry *e;
371
372 hlist_for_each_entry_rcu(e, head, hash_link) {
373 if (e->rxhash == rxhash)
374 return e;
375 }
376 return NULL;
377}
378
379static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
380 struct hlist_head *head,
381 u32 rxhash, u16 queue_index)
382{
383 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
384
385 if (e) {
386 netif_info(tun, tx_queued, tun->dev,
387 "create flow: hash %u index %u\n",
388 rxhash, queue_index);
389 e->updated = jiffies;
390 e->rxhash = rxhash;
391 e->rps_rxhash = 0;
392 e->queue_index = queue_index;
393 e->tun = tun;
394 hlist_add_head_rcu(&e->hash_link, head);
395 ++tun->flow_count;
396 }
397 return e;
398}
399
400static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
401{
402 netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
403 e->rxhash, e->queue_index);
404 hlist_del_rcu(&e->hash_link);
405 kfree_rcu(e, rcu);
406 --tun->flow_count;
407}
408
409static void tun_flow_flush(struct tun_struct *tun)
410{
411 int i;
412
413 spin_lock_bh(&tun->lock);
414 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
415 struct tun_flow_entry *e;
416 struct hlist_node *n;
417
418 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
419 tun_flow_delete(tun, e);
420 }
421 spin_unlock_bh(&tun->lock);
422}
423
424static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
425{
426 int i;
427
428 spin_lock_bh(&tun->lock);
429 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
430 struct tun_flow_entry *e;
431 struct hlist_node *n;
432
433 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
434 if (e->queue_index == queue_index)
435 tun_flow_delete(tun, e);
436 }
437 }
438 spin_unlock_bh(&tun->lock);
439}
440
441static void tun_flow_cleanup(struct timer_list *t)
442{
443 struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
444 unsigned long delay = tun->ageing_time;
445 unsigned long next_timer = jiffies + delay;
446 unsigned long count = 0;
447 int i;
448
449 spin_lock(&tun->lock);
450 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
451 struct tun_flow_entry *e;
452 struct hlist_node *n;
453
454 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
455 unsigned long this_timer;
456
457 this_timer = e->updated + delay;
458 if (time_before_eq(this_timer, jiffies)) {
459 tun_flow_delete(tun, e);
460 continue;
461 }
462 count++;
463 if (time_before(this_timer, next_timer))
464 next_timer = this_timer;
465 }
466 }
467
468 if (count)
469 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
470 spin_unlock(&tun->lock);
471}
472
473static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
474 struct tun_file *tfile)
475{
476 struct hlist_head *head;
477 struct tun_flow_entry *e;
478 unsigned long delay = tun->ageing_time;
479 u16 queue_index = tfile->queue_index;
480
481 head = &tun->flows[tun_hashfn(rxhash)];
482
483 rcu_read_lock();
484
485 e = tun_flow_find(head, rxhash);
486 if (likely(e)) {
487 /* TODO: keep queueing to old queue until it's empty? */
488 if (READ_ONCE(e->queue_index) != queue_index)
489 WRITE_ONCE(e->queue_index, queue_index);
490 if (e->updated != jiffies)
491 e->updated = jiffies;
492 sock_rps_record_flow_hash(e->rps_rxhash);
493 } else {
494 spin_lock_bh(&tun->lock);
495 if (!tun_flow_find(head, rxhash) &&
496 tun->flow_count < MAX_TAP_FLOWS)
497 tun_flow_create(tun, head, rxhash, queue_index);
498
499 if (!timer_pending(&tun->flow_gc_timer))
500 mod_timer(&tun->flow_gc_timer,
501 round_jiffies_up(jiffies + delay));
502 spin_unlock_bh(&tun->lock);
503 }
504
505 rcu_read_unlock();
506}
507
508/* Save the hash received in the stack receive path and update the
509 * flow_hash table accordingly.
510 */
511static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
512{
513 if (unlikely(e->rps_rxhash != hash))
514 e->rps_rxhash = hash;
515}
516
517/* We try to identify a flow through its rxhash. The reason that
518 * we do not check rxq no. is because some cards(e.g 82599), chooses
519 * the rxq based on the txq where the last packet of the flow comes. As
520 * the userspace application move between processors, we may get a
521 * different rxq no. here.
522 */
523static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
524{
525 struct tun_flow_entry *e;
526 u32 txq = 0;
527 u32 numqueues = 0;
528
529 numqueues = READ_ONCE(tun->numqueues);
530
531 txq = __skb_get_hash_symmetric(skb);
532 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
533 if (e) {
534 tun_flow_save_rps_rxhash(e, txq);
535 txq = e->queue_index;
536 } else {
537 /* use multiply and shift instead of expensive divide */
538 txq = ((u64)txq * numqueues) >> 32;
539 }
540
541 return txq;
542}
543
544static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
545{
546 struct tun_prog *prog;
547 u32 numqueues;
548 u16 ret = 0;
549
550 numqueues = READ_ONCE(tun->numqueues);
551 if (!numqueues)
552 return 0;
553
554 prog = rcu_dereference(tun->steering_prog);
555 if (prog)
556 ret = bpf_prog_run_clear_cb(prog->prog, skb);
557
558 return ret % numqueues;
559}
560
561static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
562 struct net_device *sb_dev)
563{
564 struct tun_struct *tun = netdev_priv(dev);
565 u16 ret;
566
567 rcu_read_lock();
568 if (rcu_dereference(tun->steering_prog))
569 ret = tun_ebpf_select_queue(tun, skb);
570 else
571 ret = tun_automq_select_queue(tun, skb);
572 rcu_read_unlock();
573
574 return ret;
575}
576
577static inline bool tun_not_capable(struct tun_struct *tun)
578{
579 const struct cred *cred = current_cred();
580 struct net *net = dev_net(tun->dev);
581
582 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
583 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
584 !ns_capable(net->user_ns, CAP_NET_ADMIN);
585}
586
587static void tun_set_real_num_queues(struct tun_struct *tun)
588{
589 netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
590 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
591}
592
593static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
594{
595 tfile->detached = tun;
596 list_add_tail(&tfile->next, &tun->disabled);
597 ++tun->numdisabled;
598}
599
600static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
601{
602 struct tun_struct *tun = tfile->detached;
603
604 tfile->detached = NULL;
605 list_del_init(&tfile->next);
606 --tun->numdisabled;
607 return tun;
608}
609
610void tun_ptr_free(void *ptr)
611{
612 if (!ptr)
613 return;
614 if (tun_is_xdp_frame(ptr)) {
615 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
616
617 xdp_return_frame(xdpf);
618 } else {
619 __skb_array_destroy_skb(ptr);
620 }
621}
622EXPORT_SYMBOL_GPL(tun_ptr_free);
623
624static void tun_queue_purge(struct tun_file *tfile)
625{
626 void *ptr;
627
628 while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
629 tun_ptr_free(ptr);
630
631 skb_queue_purge(&tfile->sk.sk_write_queue);
632 skb_queue_purge(&tfile->sk.sk_error_queue);
633}
634
635static void __tun_detach(struct tun_file *tfile, bool clean)
636{
637 struct tun_file *ntfile;
638 struct tun_struct *tun;
639
640 tun = rtnl_dereference(tfile->tun);
641
642 if (tun && clean) {
643 if (!tfile->detached)
644 tun_napi_disable(tfile);
645 tun_napi_del(tfile);
646 }
647
648 if (tun && !tfile->detached) {
649 u16 index = tfile->queue_index;
650 BUG_ON(index >= tun->numqueues);
651
652 rcu_assign_pointer(tun->tfiles[index],
653 tun->tfiles[tun->numqueues - 1]);
654 ntfile = rtnl_dereference(tun->tfiles[index]);
655 ntfile->queue_index = index;
656 ntfile->xdp_rxq.queue_index = index;
657 rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
658 NULL);
659
660 --tun->numqueues;
661 if (clean) {
662 RCU_INIT_POINTER(tfile->tun, NULL);
663 sock_put(&tfile->sk);
664 } else {
665 tun_disable_queue(tun, tfile);
666 tun_napi_disable(tfile);
667 }
668
669 synchronize_net();
670 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
671 /* Drop read queue */
672 tun_queue_purge(tfile);
673 tun_set_real_num_queues(tun);
674 } else if (tfile->detached && clean) {
675 tun = tun_enable_queue(tfile);
676 sock_put(&tfile->sk);
677 }
678
679 if (clean) {
680 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
681 netif_carrier_off(tun->dev);
682
683 if (!(tun->flags & IFF_PERSIST) &&
684 tun->dev->reg_state == NETREG_REGISTERED)
685 unregister_netdevice(tun->dev);
686 }
687 if (tun)
688 xdp_rxq_info_unreg(&tfile->xdp_rxq);
689 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
690 }
691}
692
693static void tun_detach(struct tun_file *tfile, bool clean)
694{
695 struct tun_struct *tun;
696 struct net_device *dev;
697
698 rtnl_lock();
699 tun = rtnl_dereference(tfile->tun);
700 dev = tun ? tun->dev : NULL;
701 __tun_detach(tfile, clean);
702 if (dev)
703 netdev_state_change(dev);
704 rtnl_unlock();
705
706 if (clean)
707 sock_put(&tfile->sk);
708}
709
710static void tun_detach_all(struct net_device *dev)
711{
712 struct tun_struct *tun = netdev_priv(dev);
713 struct tun_file *tfile, *tmp;
714 int i, n = tun->numqueues;
715
716 for (i = 0; i < n; i++) {
717 tfile = rtnl_dereference(tun->tfiles[i]);
718 BUG_ON(!tfile);
719 tun_napi_disable(tfile);
720 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
721 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
722 RCU_INIT_POINTER(tfile->tun, NULL);
723 --tun->numqueues;
724 }
725 list_for_each_entry(tfile, &tun->disabled, next) {
726 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
727 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
728 RCU_INIT_POINTER(tfile->tun, NULL);
729 }
730 BUG_ON(tun->numqueues != 0);
731
732 synchronize_net();
733 for (i = 0; i < n; i++) {
734 tfile = rtnl_dereference(tun->tfiles[i]);
735 tun_napi_del(tfile);
736 /* Drop read queue */
737 tun_queue_purge(tfile);
738 xdp_rxq_info_unreg(&tfile->xdp_rxq);
739 sock_put(&tfile->sk);
740 }
741 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
742 tun_napi_del(tfile);
743 tun_enable_queue(tfile);
744 tun_queue_purge(tfile);
745 xdp_rxq_info_unreg(&tfile->xdp_rxq);
746 sock_put(&tfile->sk);
747 }
748 BUG_ON(tun->numdisabled != 0);
749
750 if (tun->flags & IFF_PERSIST)
751 module_put(THIS_MODULE);
752}
753
754static int tun_attach(struct tun_struct *tun, struct file *file,
755 bool skip_filter, bool napi, bool napi_frags,
756 bool publish_tun)
757{
758 struct tun_file *tfile = file->private_data;
759 struct net_device *dev = tun->dev;
760 int err;
761
762 err = security_tun_dev_attach(tfile->socket.sk, tun->security);
763 if (err < 0)
764 goto out;
765
766 err = -EINVAL;
767 if (rtnl_dereference(tfile->tun) && !tfile->detached)
768 goto out;
769
770 err = -EBUSY;
771 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
772 goto out;
773
774 err = -E2BIG;
775 if (!tfile->detached &&
776 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
777 goto out;
778
779 err = 0;
780
781 /* Re-attach the filter to persist device */
782 if (!skip_filter && (tun->filter_attached == true)) {
783 lock_sock(tfile->socket.sk);
784 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
785 release_sock(tfile->socket.sk);
786 if (!err)
787 goto out;
788 }
789
790 if (!tfile->detached &&
791 ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
792 GFP_KERNEL, tun_ptr_free)) {
793 err = -ENOMEM;
794 goto out;
795 }
796
797 tfile->queue_index = tun->numqueues;
798 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
799
800 if (tfile->detached) {
801 /* Re-attach detached tfile, updating XDP queue_index */
802 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
803
804 if (tfile->xdp_rxq.queue_index != tfile->queue_index)
805 tfile->xdp_rxq.queue_index = tfile->queue_index;
806 } else {
807 /* Setup XDP RX-queue info, for new tfile getting attached */
808 err = xdp_rxq_info_reg(&tfile->xdp_rxq,
809 tun->dev, tfile->queue_index, 0);
810 if (err < 0)
811 goto out;
812 err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
813 MEM_TYPE_PAGE_SHARED, NULL);
814 if (err < 0) {
815 xdp_rxq_info_unreg(&tfile->xdp_rxq);
816 goto out;
817 }
818 err = 0;
819 }
820
821 if (tfile->detached) {
822 tun_enable_queue(tfile);
823 tun_napi_enable(tfile);
824 } else {
825 sock_hold(&tfile->sk);
826 tun_napi_init(tun, tfile, napi, napi_frags);
827 }
828
829 if (rtnl_dereference(tun->xdp_prog))
830 sock_set_flag(&tfile->sk, SOCK_XDP);
831
832 /* device is allowed to go away first, so no need to hold extra
833 * refcnt.
834 */
835
836 /* Publish tfile->tun and tun->tfiles only after we've fully
837 * initialized tfile; otherwise we risk using half-initialized
838 * object.
839 */
840 if (publish_tun)
841 rcu_assign_pointer(tfile->tun, tun);
842 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
843 tun->numqueues++;
844 tun_set_real_num_queues(tun);
845out:
846 return err;
847}
848
849static struct tun_struct *tun_get(struct tun_file *tfile)
850{
851 struct tun_struct *tun;
852
853 rcu_read_lock();
854 tun = rcu_dereference(tfile->tun);
855 if (tun)
856 dev_hold(tun->dev);
857 rcu_read_unlock();
858
859 return tun;
860}
861
862static void tun_put(struct tun_struct *tun)
863{
864 dev_put(tun->dev);
865}
866
867/* TAP filtering */
868static void addr_hash_set(u32 *mask, const u8 *addr)
869{
870 int n = ether_crc(ETH_ALEN, addr) >> 26;
871 mask[n >> 5] |= (1 << (n & 31));
872}
873
874static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
875{
876 int n = ether_crc(ETH_ALEN, addr) >> 26;
877 return mask[n >> 5] & (1 << (n & 31));
878}
879
880static int update_filter(struct tap_filter *filter, void __user *arg)
881{
882 struct { u8 u[ETH_ALEN]; } *addr;
883 struct tun_filter uf;
884 int err, alen, n, nexact;
885
886 if (copy_from_user(&uf, arg, sizeof(uf)))
887 return -EFAULT;
888
889 if (!uf.count) {
890 /* Disabled */
891 filter->count = 0;
892 return 0;
893 }
894
895 alen = ETH_ALEN * uf.count;
896 addr = memdup_user(arg + sizeof(uf), alen);
897 if (IS_ERR(addr))
898 return PTR_ERR(addr);
899
900 /* The filter is updated without holding any locks. Which is
901 * perfectly safe. We disable it first and in the worst
902 * case we'll accept a few undesired packets. */
903 filter->count = 0;
904 wmb();
905
906 /* Use first set of addresses as an exact filter */
907 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
908 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
909
910 nexact = n;
911
912 /* Remaining multicast addresses are hashed,
913 * unicast will leave the filter disabled. */
914 memset(filter->mask, 0, sizeof(filter->mask));
915 for (; n < uf.count; n++) {
916 if (!is_multicast_ether_addr(addr[n].u)) {
917 err = 0; /* no filter */
918 goto free_addr;
919 }
920 addr_hash_set(filter->mask, addr[n].u);
921 }
922
923 /* For ALLMULTI just set the mask to all ones.
924 * This overrides the mask populated above. */
925 if ((uf.flags & TUN_FLT_ALLMULTI))
926 memset(filter->mask, ~0, sizeof(filter->mask));
927
928 /* Now enable the filter */
929 wmb();
930 filter->count = nexact;
931
932 /* Return the number of exact filters */
933 err = nexact;
934free_addr:
935 kfree(addr);
936 return err;
937}
938
939/* Returns: 0 - drop, !=0 - accept */
940static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
941{
942 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
943 * at this point. */
944 struct ethhdr *eh = (struct ethhdr *) skb->data;
945 int i;
946
947 /* Exact match */
948 for (i = 0; i < filter->count; i++)
949 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
950 return 1;
951
952 /* Inexact match (multicast only) */
953 if (is_multicast_ether_addr(eh->h_dest))
954 return addr_hash_test(filter->mask, eh->h_dest);
955
956 return 0;
957}
958
959/*
960 * Checks whether the packet is accepted or not.
961 * Returns: 0 - drop, !=0 - accept
962 */
963static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
964{
965 if (!filter->count)
966 return 1;
967
968 return run_filter(filter, skb);
969}
970
971/* Network device part of the driver */
972
973static const struct ethtool_ops tun_ethtool_ops;
974
975static int tun_net_init(struct net_device *dev)
976{
977 struct tun_struct *tun = netdev_priv(dev);
978 struct ifreq *ifr = tun->ifr;
979 int err;
980
981 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
982 if (!dev->tstats)
983 return -ENOMEM;
984
985 spin_lock_init(&tun->lock);
986
987 err = security_tun_dev_alloc_security(&tun->security);
988 if (err < 0) {
989 free_percpu(dev->tstats);
990 return err;
991 }
992
993 tun_flow_init(tun);
994
995 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
996 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
997 NETIF_F_HW_VLAN_STAG_TX;
998 dev->features = dev->hw_features | NETIF_F_LLTX;
999 dev->vlan_features = dev->features &
1000 ~(NETIF_F_HW_VLAN_CTAG_TX |
1001 NETIF_F_HW_VLAN_STAG_TX);
1002
1003 tun->flags = (tun->flags & ~TUN_FEATURES) |
1004 (ifr->ifr_flags & TUN_FEATURES);
1005
1006 INIT_LIST_HEAD(&tun->disabled);
1007 err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
1008 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
1009 if (err < 0) {
1010 tun_flow_uninit(tun);
1011 security_tun_dev_free_security(tun->security);
1012 free_percpu(dev->tstats);
1013 return err;
1014 }
1015 return 0;
1016}
1017
1018/* Net device detach from fd. */
1019static void tun_net_uninit(struct net_device *dev)
1020{
1021 tun_detach_all(dev);
1022}
1023
1024/* Net device open. */
1025static int tun_net_open(struct net_device *dev)
1026{
1027 netif_tx_start_all_queues(dev);
1028
1029 return 0;
1030}
1031
1032/* Net device close. */
1033static int tun_net_close(struct net_device *dev)
1034{
1035 netif_tx_stop_all_queues(dev);
1036 return 0;
1037}
1038
1039/* Net device start xmit */
1040static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1041{
1042#ifdef CONFIG_RPS
1043 if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1044 /* Select queue was not called for the skbuff, so we extract the
1045 * RPS hash and save it into the flow_table here.
1046 */
1047 struct tun_flow_entry *e;
1048 __u32 rxhash;
1049
1050 rxhash = __skb_get_hash_symmetric(skb);
1051 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1052 if (e)
1053 tun_flow_save_rps_rxhash(e, rxhash);
1054 }
1055#endif
1056}
1057
1058static unsigned int run_ebpf_filter(struct tun_struct *tun,
1059 struct sk_buff *skb,
1060 int len)
1061{
1062 struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1063
1064 if (prog)
1065 len = bpf_prog_run_clear_cb(prog->prog, skb);
1066
1067 return len;
1068}
1069
1070/* Net device start xmit */
1071static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1072{
1073 struct tun_struct *tun = netdev_priv(dev);
1074 enum skb_drop_reason drop_reason;
1075 int txq = skb->queue_mapping;
1076 struct netdev_queue *queue;
1077 struct tun_file *tfile;
1078 int len = skb->len;
1079
1080 rcu_read_lock();
1081 tfile = rcu_dereference(tun->tfiles[txq]);
1082
1083 /* Drop packet if interface is not attached */
1084 if (!tfile) {
1085 drop_reason = SKB_DROP_REASON_DEV_READY;
1086 goto drop;
1087 }
1088
1089 if (!rcu_dereference(tun->steering_prog))
1090 tun_automq_xmit(tun, skb);
1091
1092 netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
1093
1094 /* Drop if the filter does not like it.
1095 * This is a noop if the filter is disabled.
1096 * Filter can be enabled only for the TAP devices. */
1097 if (!check_filter(&tun->txflt, skb)) {
1098 drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
1099 goto drop;
1100 }
1101
1102 if (tfile->socket.sk->sk_filter &&
1103 sk_filter(tfile->socket.sk, skb)) {
1104 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1105 goto drop;
1106 }
1107
1108 len = run_ebpf_filter(tun, skb, len);
1109 if (len == 0) {
1110 drop_reason = SKB_DROP_REASON_TAP_FILTER;
1111 goto drop;
1112 }
1113
1114 if (pskb_trim(skb, len)) {
1115 drop_reason = SKB_DROP_REASON_NOMEM;
1116 goto drop;
1117 }
1118
1119 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
1120 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1121 goto drop;
1122 }
1123
1124 skb_tx_timestamp(skb);
1125
1126 /* Orphan the skb - required as we might hang on to it
1127 * for indefinite time.
1128 */
1129 skb_orphan(skb);
1130
1131 nf_reset_ct(skb);
1132
1133 if (ptr_ring_produce(&tfile->tx_ring, skb)) {
1134 drop_reason = SKB_DROP_REASON_FULL_RING;
1135 goto drop;
1136 }
1137
1138 /* NETIF_F_LLTX requires to do our own update of trans_start */
1139 queue = netdev_get_tx_queue(dev, txq);
1140 txq_trans_cond_update(queue);
1141
1142 /* Notify and wake up reader process */
1143 if (tfile->flags & TUN_FASYNC)
1144 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1145 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1146
1147 rcu_read_unlock();
1148 return NETDEV_TX_OK;
1149
1150drop:
1151 dev_core_stats_tx_dropped_inc(dev);
1152 skb_tx_error(skb);
1153 kfree_skb_reason(skb, drop_reason);
1154 rcu_read_unlock();
1155 return NET_XMIT_DROP;
1156}
1157
1158static void tun_net_mclist(struct net_device *dev)
1159{
1160 /*
1161 * This callback is supposed to deal with mc filter in
1162 * _rx_ path and has nothing to do with the _tx_ path.
1163 * In rx path we always accept everything userspace gives us.
1164 */
1165}
1166
1167static netdev_features_t tun_net_fix_features(struct net_device *dev,
1168 netdev_features_t features)
1169{
1170 struct tun_struct *tun = netdev_priv(dev);
1171
1172 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1173}
1174
1175static void tun_set_headroom(struct net_device *dev, int new_hr)
1176{
1177 struct tun_struct *tun = netdev_priv(dev);
1178
1179 if (new_hr < NET_SKB_PAD)
1180 new_hr = NET_SKB_PAD;
1181
1182 tun->align = new_hr;
1183}
1184
1185static void
1186tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1187{
1188 struct tun_struct *tun = netdev_priv(dev);
1189
1190 dev_get_tstats64(dev, stats);
1191
1192 stats->rx_frame_errors +=
1193 (unsigned long)atomic_long_read(&tun->rx_frame_errors);
1194}
1195
1196static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1197 struct netlink_ext_ack *extack)
1198{
1199 struct tun_struct *tun = netdev_priv(dev);
1200 struct tun_file *tfile;
1201 struct bpf_prog *old_prog;
1202 int i;
1203
1204 old_prog = rtnl_dereference(tun->xdp_prog);
1205 rcu_assign_pointer(tun->xdp_prog, prog);
1206 if (old_prog)
1207 bpf_prog_put(old_prog);
1208
1209 for (i = 0; i < tun->numqueues; i++) {
1210 tfile = rtnl_dereference(tun->tfiles[i]);
1211 if (prog)
1212 sock_set_flag(&tfile->sk, SOCK_XDP);
1213 else
1214 sock_reset_flag(&tfile->sk, SOCK_XDP);
1215 }
1216 list_for_each_entry(tfile, &tun->disabled, next) {
1217 if (prog)
1218 sock_set_flag(&tfile->sk, SOCK_XDP);
1219 else
1220 sock_reset_flag(&tfile->sk, SOCK_XDP);
1221 }
1222
1223 return 0;
1224}
1225
1226static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1227{
1228 switch (xdp->command) {
1229 case XDP_SETUP_PROG:
1230 return tun_xdp_set(dev, xdp->prog, xdp->extack);
1231 default:
1232 return -EINVAL;
1233 }
1234}
1235
1236static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1237{
1238 if (new_carrier) {
1239 struct tun_struct *tun = netdev_priv(dev);
1240
1241 if (!tun->numqueues)
1242 return -EPERM;
1243
1244 netif_carrier_on(dev);
1245 } else {
1246 netif_carrier_off(dev);
1247 }
1248 return 0;
1249}
1250
1251static const struct net_device_ops tun_netdev_ops = {
1252 .ndo_init = tun_net_init,
1253 .ndo_uninit = tun_net_uninit,
1254 .ndo_open = tun_net_open,
1255 .ndo_stop = tun_net_close,
1256 .ndo_start_xmit = tun_net_xmit,
1257 .ndo_fix_features = tun_net_fix_features,
1258 .ndo_select_queue = tun_select_queue,
1259 .ndo_set_rx_headroom = tun_set_headroom,
1260 .ndo_get_stats64 = tun_net_get_stats64,
1261 .ndo_change_carrier = tun_net_change_carrier,
1262};
1263
1264static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1265{
1266 /* Notify and wake up reader process */
1267 if (tfile->flags & TUN_FASYNC)
1268 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1269 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1270}
1271
1272static int tun_xdp_xmit(struct net_device *dev, int n,
1273 struct xdp_frame **frames, u32 flags)
1274{
1275 struct tun_struct *tun = netdev_priv(dev);
1276 struct tun_file *tfile;
1277 u32 numqueues;
1278 int nxmit = 0;
1279 int i;
1280
1281 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1282 return -EINVAL;
1283
1284 rcu_read_lock();
1285
1286resample:
1287 numqueues = READ_ONCE(tun->numqueues);
1288 if (!numqueues) {
1289 rcu_read_unlock();
1290 return -ENXIO; /* Caller will free/return all frames */
1291 }
1292
1293 tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1294 numqueues]);
1295 if (unlikely(!tfile))
1296 goto resample;
1297
1298 spin_lock(&tfile->tx_ring.producer_lock);
1299 for (i = 0; i < n; i++) {
1300 struct xdp_frame *xdp = frames[i];
1301 /* Encode the XDP flag into lowest bit for consumer to differ
1302 * XDP buffer from sk_buff.
1303 */
1304 void *frame = tun_xdp_to_ptr(xdp);
1305
1306 if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1307 dev_core_stats_tx_dropped_inc(dev);
1308 break;
1309 }
1310 nxmit++;
1311 }
1312 spin_unlock(&tfile->tx_ring.producer_lock);
1313
1314 if (flags & XDP_XMIT_FLUSH)
1315 __tun_xdp_flush_tfile(tfile);
1316
1317 rcu_read_unlock();
1318 return nxmit;
1319}
1320
1321static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1322{
1323 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
1324 int nxmit;
1325
1326 if (unlikely(!frame))
1327 return -EOVERFLOW;
1328
1329 nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1330 if (!nxmit)
1331 xdp_return_frame_rx_napi(frame);
1332 return nxmit;
1333}
1334
1335static const struct net_device_ops tap_netdev_ops = {
1336 .ndo_init = tun_net_init,
1337 .ndo_uninit = tun_net_uninit,
1338 .ndo_open = tun_net_open,
1339 .ndo_stop = tun_net_close,
1340 .ndo_start_xmit = tun_net_xmit,
1341 .ndo_fix_features = tun_net_fix_features,
1342 .ndo_set_rx_mode = tun_net_mclist,
1343 .ndo_set_mac_address = eth_mac_addr,
1344 .ndo_validate_addr = eth_validate_addr,
1345 .ndo_select_queue = tun_select_queue,
1346 .ndo_features_check = passthru_features_check,
1347 .ndo_set_rx_headroom = tun_set_headroom,
1348 .ndo_get_stats64 = dev_get_tstats64,
1349 .ndo_bpf = tun_xdp,
1350 .ndo_xdp_xmit = tun_xdp_xmit,
1351 .ndo_change_carrier = tun_net_change_carrier,
1352};
1353
1354static void tun_flow_init(struct tun_struct *tun)
1355{
1356 int i;
1357
1358 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1359 INIT_HLIST_HEAD(&tun->flows[i]);
1360
1361 tun->ageing_time = TUN_FLOW_EXPIRE;
1362 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1363 mod_timer(&tun->flow_gc_timer,
1364 round_jiffies_up(jiffies + tun->ageing_time));
1365}
1366
1367static void tun_flow_uninit(struct tun_struct *tun)
1368{
1369 del_timer_sync(&tun->flow_gc_timer);
1370 tun_flow_flush(tun);
1371}
1372
1373#define MIN_MTU 68
1374#define MAX_MTU 65535
1375
1376/* Initialize net device. */
1377static void tun_net_initialize(struct net_device *dev)
1378{
1379 struct tun_struct *tun = netdev_priv(dev);
1380
1381 switch (tun->flags & TUN_TYPE_MASK) {
1382 case IFF_TUN:
1383 dev->netdev_ops = &tun_netdev_ops;
1384 dev->header_ops = &ip_tunnel_header_ops;
1385
1386 /* Point-to-Point TUN Device */
1387 dev->hard_header_len = 0;
1388 dev->addr_len = 0;
1389 dev->mtu = 1500;
1390
1391 /* Zero header length */
1392 dev->type = ARPHRD_NONE;
1393 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1394 break;
1395
1396 case IFF_TAP:
1397 dev->netdev_ops = &tap_netdev_ops;
1398 /* Ethernet TAP Device */
1399 ether_setup(dev);
1400 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1401 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1402
1403 eth_hw_addr_random(dev);
1404
1405 /* Currently tun does not support XDP, only tap does. */
1406 dev->xdp_features = NETDEV_XDP_ACT_BASIC |
1407 NETDEV_XDP_ACT_REDIRECT |
1408 NETDEV_XDP_ACT_NDO_XMIT;
1409
1410 break;
1411 }
1412
1413 dev->min_mtu = MIN_MTU;
1414 dev->max_mtu = MAX_MTU - dev->hard_header_len;
1415}
1416
1417static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1418{
1419 struct sock *sk = tfile->socket.sk;
1420
1421 return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1422}
1423
1424/* Character device part */
1425
1426/* Poll */
1427static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1428{
1429 struct tun_file *tfile = file->private_data;
1430 struct tun_struct *tun = tun_get(tfile);
1431 struct sock *sk;
1432 __poll_t mask = 0;
1433
1434 if (!tun)
1435 return EPOLLERR;
1436
1437 sk = tfile->socket.sk;
1438
1439 poll_wait(file, sk_sleep(sk), wait);
1440
1441 if (!ptr_ring_empty(&tfile->tx_ring))
1442 mask |= EPOLLIN | EPOLLRDNORM;
1443
1444 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1445 * guarantee EPOLLOUT to be raised by either here or
1446 * tun_sock_write_space(). Then process could get notification
1447 * after it writes to a down device and meets -EIO.
1448 */
1449 if (tun_sock_writeable(tun, tfile) ||
1450 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1451 tun_sock_writeable(tun, tfile)))
1452 mask |= EPOLLOUT | EPOLLWRNORM;
1453
1454 if (tun->dev->reg_state != NETREG_REGISTERED)
1455 mask = EPOLLERR;
1456
1457 tun_put(tun);
1458 return mask;
1459}
1460
1461static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1462 size_t len,
1463 const struct iov_iter *it)
1464{
1465 struct sk_buff *skb;
1466 size_t linear;
1467 int err;
1468 int i;
1469
1470 if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
1471 len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
1472 return ERR_PTR(-EMSGSIZE);
1473
1474 local_bh_disable();
1475 skb = napi_get_frags(&tfile->napi);
1476 local_bh_enable();
1477 if (!skb)
1478 return ERR_PTR(-ENOMEM);
1479
1480 linear = iov_iter_single_seg_count(it);
1481 err = __skb_grow(skb, linear);
1482 if (err)
1483 goto free;
1484
1485 skb->len = len;
1486 skb->data_len = len - linear;
1487 skb->truesize += skb->data_len;
1488
1489 for (i = 1; i < it->nr_segs; i++) {
1490 const struct iovec *iov = iter_iov(it);
1491 size_t fragsz = iov->iov_len;
1492 struct page *page;
1493 void *frag;
1494
1495 if (fragsz == 0 || fragsz > PAGE_SIZE) {
1496 err = -EINVAL;
1497 goto free;
1498 }
1499 frag = netdev_alloc_frag(fragsz);
1500 if (!frag) {
1501 err = -ENOMEM;
1502 goto free;
1503 }
1504 page = virt_to_head_page(frag);
1505 skb_fill_page_desc(skb, i - 1, page,
1506 frag - page_address(page), fragsz);
1507 }
1508
1509 return skb;
1510free:
1511 /* frees skb and all frags allocated with napi_alloc_frag() */
1512 napi_free_frags(&tfile->napi);
1513 return ERR_PTR(err);
1514}
1515
1516/* prepad is the amount to reserve at front. len is length after that.
1517 * linear is a hint as to how much to copy (usually headers). */
1518static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1519 size_t prepad, size_t len,
1520 size_t linear, int noblock)
1521{
1522 struct sock *sk = tfile->socket.sk;
1523 struct sk_buff *skb;
1524 int err;
1525
1526 /* Under a page? Don't bother with paged skb. */
1527 if (prepad + len < PAGE_SIZE)
1528 linear = len;
1529
1530 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
1531 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
1532 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1533 &err, PAGE_ALLOC_COSTLY_ORDER);
1534 if (!skb)
1535 return ERR_PTR(err);
1536
1537 skb_reserve(skb, prepad);
1538 skb_put(skb, linear);
1539 skb->data_len = len - linear;
1540 skb->len += len - linear;
1541
1542 return skb;
1543}
1544
1545static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1546 struct sk_buff *skb, int more)
1547{
1548 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1549 struct sk_buff_head process_queue;
1550 u32 rx_batched = tun->rx_batched;
1551 bool rcv = false;
1552
1553 if (!rx_batched || (!more && skb_queue_empty(queue))) {
1554 local_bh_disable();
1555 skb_record_rx_queue(skb, tfile->queue_index);
1556 netif_receive_skb(skb);
1557 local_bh_enable();
1558 return;
1559 }
1560
1561 spin_lock(&queue->lock);
1562 if (!more || skb_queue_len(queue) == rx_batched) {
1563 __skb_queue_head_init(&process_queue);
1564 skb_queue_splice_tail_init(queue, &process_queue);
1565 rcv = true;
1566 } else {
1567 __skb_queue_tail(queue, skb);
1568 }
1569 spin_unlock(&queue->lock);
1570
1571 if (rcv) {
1572 struct sk_buff *nskb;
1573
1574 local_bh_disable();
1575 while ((nskb = __skb_dequeue(&process_queue))) {
1576 skb_record_rx_queue(nskb, tfile->queue_index);
1577 netif_receive_skb(nskb);
1578 }
1579 skb_record_rx_queue(skb, tfile->queue_index);
1580 netif_receive_skb(skb);
1581 local_bh_enable();
1582 }
1583}
1584
1585static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1586 int len, int noblock, bool zerocopy)
1587{
1588 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1589 return false;
1590
1591 if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1592 return false;
1593
1594 if (!noblock)
1595 return false;
1596
1597 if (zerocopy)
1598 return false;
1599
1600 if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) +
1601 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1602 return false;
1603
1604 return true;
1605}
1606
1607static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1608 struct page_frag *alloc_frag, char *buf,
1609 int buflen, int len, int pad)
1610{
1611 struct sk_buff *skb = build_skb(buf, buflen);
1612
1613 if (!skb)
1614 return ERR_PTR(-ENOMEM);
1615
1616 skb_reserve(skb, pad);
1617 skb_put(skb, len);
1618 skb_set_owner_w(skb, tfile->socket.sk);
1619
1620 get_page(alloc_frag->page);
1621 alloc_frag->offset += buflen;
1622
1623 return skb;
1624}
1625
1626static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1627 struct xdp_buff *xdp, u32 act)
1628{
1629 int err;
1630
1631 switch (act) {
1632 case XDP_REDIRECT:
1633 err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1634 if (err) {
1635 dev_core_stats_rx_dropped_inc(tun->dev);
1636 return err;
1637 }
1638 dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1639 break;
1640 case XDP_TX:
1641 err = tun_xdp_tx(tun->dev, xdp);
1642 if (err < 0) {
1643 dev_core_stats_rx_dropped_inc(tun->dev);
1644 return err;
1645 }
1646 dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1647 break;
1648 case XDP_PASS:
1649 break;
1650 default:
1651 bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act);
1652 fallthrough;
1653 case XDP_ABORTED:
1654 trace_xdp_exception(tun->dev, xdp_prog, act);
1655 fallthrough;
1656 case XDP_DROP:
1657 dev_core_stats_rx_dropped_inc(tun->dev);
1658 break;
1659 }
1660
1661 return act;
1662}
1663
1664static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1665 struct tun_file *tfile,
1666 struct iov_iter *from,
1667 struct virtio_net_hdr *hdr,
1668 int len, int *skb_xdp)
1669{
1670 struct page_frag *alloc_frag = ¤t->task_frag;
1671 struct bpf_prog *xdp_prog;
1672 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1673 char *buf;
1674 size_t copied;
1675 int pad = TUN_RX_PAD;
1676 int err = 0;
1677
1678 rcu_read_lock();
1679 xdp_prog = rcu_dereference(tun->xdp_prog);
1680 if (xdp_prog)
1681 pad += XDP_PACKET_HEADROOM;
1682 buflen += SKB_DATA_ALIGN(len + pad);
1683 rcu_read_unlock();
1684
1685 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1686 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1687 return ERR_PTR(-ENOMEM);
1688
1689 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1690 copied = copy_page_from_iter(alloc_frag->page,
1691 alloc_frag->offset + pad,
1692 len, from);
1693 if (copied != len)
1694 return ERR_PTR(-EFAULT);
1695
1696 /* There's a small window that XDP may be set after the check
1697 * of xdp_prog above, this should be rare and for simplicity
1698 * we do XDP on skb in case the headroom is not enough.
1699 */
1700 if (hdr->gso_type || !xdp_prog) {
1701 *skb_xdp = 1;
1702 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1703 pad);
1704 }
1705
1706 *skb_xdp = 0;
1707
1708 local_bh_disable();
1709 rcu_read_lock();
1710 xdp_prog = rcu_dereference(tun->xdp_prog);
1711 if (xdp_prog) {
1712 struct xdp_buff xdp;
1713 u32 act;
1714
1715 xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
1716 xdp_prepare_buff(&xdp, buf, pad, len, false);
1717
1718 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1719 if (act == XDP_REDIRECT || act == XDP_TX) {
1720 get_page(alloc_frag->page);
1721 alloc_frag->offset += buflen;
1722 }
1723 err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1724 if (err < 0) {
1725 if (act == XDP_REDIRECT || act == XDP_TX)
1726 put_page(alloc_frag->page);
1727 goto out;
1728 }
1729
1730 if (err == XDP_REDIRECT)
1731 xdp_do_flush();
1732 if (err != XDP_PASS)
1733 goto out;
1734
1735 pad = xdp.data - xdp.data_hard_start;
1736 len = xdp.data_end - xdp.data;
1737 }
1738 rcu_read_unlock();
1739 local_bh_enable();
1740
1741 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1742
1743out:
1744 rcu_read_unlock();
1745 local_bh_enable();
1746 return NULL;
1747}
1748
1749/* Get packet from user space buffer */
1750static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1751 void *msg_control, struct iov_iter *from,
1752 int noblock, bool more)
1753{
1754 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1755 struct sk_buff *skb;
1756 size_t total_len = iov_iter_count(from);
1757 size_t len = total_len, align = tun->align, linear;
1758 struct virtio_net_hdr gso = { 0 };
1759 int good_linear;
1760 int copylen;
1761 bool zerocopy = false;
1762 int err;
1763 u32 rxhash = 0;
1764 int skb_xdp = 1;
1765 bool frags = tun_napi_frags_enabled(tfile);
1766 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1767
1768 if (!(tun->flags & IFF_NO_PI)) {
1769 if (len < sizeof(pi))
1770 return -EINVAL;
1771 len -= sizeof(pi);
1772
1773 if (!copy_from_iter_full(&pi, sizeof(pi), from))
1774 return -EFAULT;
1775 }
1776
1777 if (tun->flags & IFF_VNET_HDR) {
1778 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1779
1780 if (len < vnet_hdr_sz)
1781 return -EINVAL;
1782 len -= vnet_hdr_sz;
1783
1784 if (!copy_from_iter_full(&gso, sizeof(gso), from))
1785 return -EFAULT;
1786
1787 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1788 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1789 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1790
1791 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1792 return -EINVAL;
1793 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1794 }
1795
1796 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1797 align += NET_IP_ALIGN;
1798 if (unlikely(len < ETH_HLEN ||
1799 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1800 return -EINVAL;
1801 }
1802
1803 good_linear = SKB_MAX_HEAD(align);
1804
1805 if (msg_control) {
1806 struct iov_iter i = *from;
1807
1808 /* There are 256 bytes to be copied in skb, so there is
1809 * enough room for skb expand head in case it is used.
1810 * The rest of the buffer is mapped from userspace.
1811 */
1812 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1813 if (copylen > good_linear)
1814 copylen = good_linear;
1815 linear = copylen;
1816 iov_iter_advance(&i, copylen);
1817 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1818 zerocopy = true;
1819 }
1820
1821 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1822 /* For the packet that is not easy to be processed
1823 * (e.g gso or jumbo packet), we will do it at after
1824 * skb was created with generic XDP routine.
1825 */
1826 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1827 err = PTR_ERR_OR_ZERO(skb);
1828 if (err)
1829 goto drop;
1830 if (!skb)
1831 return total_len;
1832 } else {
1833 if (!zerocopy) {
1834 copylen = len;
1835 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1836 linear = good_linear;
1837 else
1838 linear = tun16_to_cpu(tun, gso.hdr_len);
1839 }
1840
1841 if (frags) {
1842 mutex_lock(&tfile->napi_mutex);
1843 skb = tun_napi_alloc_frags(tfile, copylen, from);
1844 /* tun_napi_alloc_frags() enforces a layout for the skb.
1845 * If zerocopy is enabled, then this layout will be
1846 * overwritten by zerocopy_sg_from_iter().
1847 */
1848 zerocopy = false;
1849 } else {
1850 if (!linear)
1851 linear = min_t(size_t, good_linear, copylen);
1852
1853 skb = tun_alloc_skb(tfile, align, copylen, linear,
1854 noblock);
1855 }
1856
1857 err = PTR_ERR_OR_ZERO(skb);
1858 if (err)
1859 goto drop;
1860
1861 if (zerocopy)
1862 err = zerocopy_sg_from_iter(skb, from);
1863 else
1864 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1865
1866 if (err) {
1867 err = -EFAULT;
1868 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1869 goto drop;
1870 }
1871 }
1872
1873 if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1874 atomic_long_inc(&tun->rx_frame_errors);
1875 err = -EINVAL;
1876 goto free_skb;
1877 }
1878
1879 switch (tun->flags & TUN_TYPE_MASK) {
1880 case IFF_TUN:
1881 if (tun->flags & IFF_NO_PI) {
1882 u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1883
1884 switch (ip_version) {
1885 case 4:
1886 pi.proto = htons(ETH_P_IP);
1887 break;
1888 case 6:
1889 pi.proto = htons(ETH_P_IPV6);
1890 break;
1891 default:
1892 err = -EINVAL;
1893 goto drop;
1894 }
1895 }
1896
1897 skb_reset_mac_header(skb);
1898 skb->protocol = pi.proto;
1899 skb->dev = tun->dev;
1900 break;
1901 case IFF_TAP:
1902 if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
1903 err = -ENOMEM;
1904 drop_reason = SKB_DROP_REASON_HDR_TRUNC;
1905 goto drop;
1906 }
1907 skb->protocol = eth_type_trans(skb, tun->dev);
1908 break;
1909 }
1910
1911 /* copy skb_ubuf_info for callback when skb has no error */
1912 if (zerocopy) {
1913 skb_zcopy_init(skb, msg_control);
1914 } else if (msg_control) {
1915 struct ubuf_info *uarg = msg_control;
1916 uarg->callback(NULL, uarg, false);
1917 }
1918
1919 skb_reset_network_header(skb);
1920 skb_probe_transport_header(skb);
1921 skb_record_rx_queue(skb, tfile->queue_index);
1922
1923 if (skb_xdp) {
1924 struct bpf_prog *xdp_prog;
1925 int ret;
1926
1927 local_bh_disable();
1928 rcu_read_lock();
1929 xdp_prog = rcu_dereference(tun->xdp_prog);
1930 if (xdp_prog) {
1931 ret = do_xdp_generic(xdp_prog, skb);
1932 if (ret != XDP_PASS) {
1933 rcu_read_unlock();
1934 local_bh_enable();
1935 goto unlock_frags;
1936 }
1937 }
1938 rcu_read_unlock();
1939 local_bh_enable();
1940 }
1941
1942 /* Compute the costly rx hash only if needed for flow updates.
1943 * We may get a very small possibility of OOO during switching, not
1944 * worth to optimize.
1945 */
1946 if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1947 !tfile->detached)
1948 rxhash = __skb_get_hash_symmetric(skb);
1949
1950 rcu_read_lock();
1951 if (unlikely(!(tun->dev->flags & IFF_UP))) {
1952 err = -EIO;
1953 rcu_read_unlock();
1954 drop_reason = SKB_DROP_REASON_DEV_READY;
1955 goto drop;
1956 }
1957
1958 if (frags) {
1959 u32 headlen;
1960
1961 /* Exercise flow dissector code path. */
1962 skb_push(skb, ETH_HLEN);
1963 headlen = eth_get_headlen(tun->dev, skb->data,
1964 skb_headlen(skb));
1965
1966 if (unlikely(headlen > skb_headlen(skb))) {
1967 WARN_ON_ONCE(1);
1968 err = -ENOMEM;
1969 dev_core_stats_rx_dropped_inc(tun->dev);
1970napi_busy:
1971 napi_free_frags(&tfile->napi);
1972 rcu_read_unlock();
1973 mutex_unlock(&tfile->napi_mutex);
1974 return err;
1975 }
1976
1977 if (likely(napi_schedule_prep(&tfile->napi))) {
1978 local_bh_disable();
1979 napi_gro_frags(&tfile->napi);
1980 napi_complete(&tfile->napi);
1981 local_bh_enable();
1982 } else {
1983 err = -EBUSY;
1984 goto napi_busy;
1985 }
1986 mutex_unlock(&tfile->napi_mutex);
1987 } else if (tfile->napi_enabled) {
1988 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1989 int queue_len;
1990
1991 spin_lock_bh(&queue->lock);
1992
1993 if (unlikely(tfile->detached)) {
1994 spin_unlock_bh(&queue->lock);
1995 rcu_read_unlock();
1996 err = -EBUSY;
1997 goto free_skb;
1998 }
1999
2000 __skb_queue_tail(queue, skb);
2001 queue_len = skb_queue_len(queue);
2002 spin_unlock(&queue->lock);
2003
2004 if (!more || queue_len > NAPI_POLL_WEIGHT)
2005 napi_schedule(&tfile->napi);
2006
2007 local_bh_enable();
2008 } else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
2009 tun_rx_batched(tun, tfile, skb, more);
2010 } else {
2011 netif_rx(skb);
2012 }
2013 rcu_read_unlock();
2014
2015 preempt_disable();
2016 dev_sw_netstats_rx_add(tun->dev, len);
2017 preempt_enable();
2018
2019 if (rxhash)
2020 tun_flow_update(tun, rxhash, tfile);
2021
2022 return total_len;
2023
2024drop:
2025 if (err != -EAGAIN)
2026 dev_core_stats_rx_dropped_inc(tun->dev);
2027
2028free_skb:
2029 if (!IS_ERR_OR_NULL(skb))
2030 kfree_skb_reason(skb, drop_reason);
2031
2032unlock_frags:
2033 if (frags) {
2034 tfile->napi.skb = NULL;
2035 mutex_unlock(&tfile->napi_mutex);
2036 }
2037
2038 return err ?: total_len;
2039}
2040
2041static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
2042{
2043 struct file *file = iocb->ki_filp;
2044 struct tun_file *tfile = file->private_data;
2045 struct tun_struct *tun = tun_get(tfile);
2046 ssize_t result;
2047 int noblock = 0;
2048
2049 if (!tun)
2050 return -EBADFD;
2051
2052 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2053 noblock = 1;
2054
2055 result = tun_get_user(tun, tfile, NULL, from, noblock, false);
2056
2057 tun_put(tun);
2058 return result;
2059}
2060
2061static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2062 struct tun_file *tfile,
2063 struct xdp_frame *xdp_frame,
2064 struct iov_iter *iter)
2065{
2066 int vnet_hdr_sz = 0;
2067 size_t size = xdp_frame->len;
2068 size_t ret;
2069
2070 if (tun->flags & IFF_VNET_HDR) {
2071 struct virtio_net_hdr gso = { 0 };
2072
2073 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2074 if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2075 return -EINVAL;
2076 if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2077 sizeof(gso)))
2078 return -EFAULT;
2079 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2080 }
2081
2082 ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2083
2084 preempt_disable();
2085 dev_sw_netstats_tx_add(tun->dev, 1, ret);
2086 preempt_enable();
2087
2088 return ret;
2089}
2090
2091/* Put packet to the user space buffer */
2092static ssize_t tun_put_user(struct tun_struct *tun,
2093 struct tun_file *tfile,
2094 struct sk_buff *skb,
2095 struct iov_iter *iter)
2096{
2097 struct tun_pi pi = { 0, skb->protocol };
2098 ssize_t total;
2099 int vlan_offset = 0;
2100 int vlan_hlen = 0;
2101 int vnet_hdr_sz = 0;
2102
2103 if (skb_vlan_tag_present(skb))
2104 vlan_hlen = VLAN_HLEN;
2105
2106 if (tun->flags & IFF_VNET_HDR)
2107 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2108
2109 total = skb->len + vlan_hlen + vnet_hdr_sz;
2110
2111 if (!(tun->flags & IFF_NO_PI)) {
2112 if (iov_iter_count(iter) < sizeof(pi))
2113 return -EINVAL;
2114
2115 total += sizeof(pi);
2116 if (iov_iter_count(iter) < total) {
2117 /* Packet will be striped */
2118 pi.flags |= TUN_PKT_STRIP;
2119 }
2120
2121 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
2122 return -EFAULT;
2123 }
2124
2125 if (vnet_hdr_sz) {
2126 struct virtio_net_hdr gso;
2127
2128 if (iov_iter_count(iter) < vnet_hdr_sz)
2129 return -EINVAL;
2130
2131 if (virtio_net_hdr_from_skb(skb, &gso,
2132 tun_is_little_endian(tun), true,
2133 vlan_hlen)) {
2134 struct skb_shared_info *sinfo = skb_shinfo(skb);
2135 pr_err("unexpected GSO type: "
2136 "0x%x, gso_size %d, hdr_len %d\n",
2137 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2138 tun16_to_cpu(tun, gso.hdr_len));
2139 print_hex_dump(KERN_ERR, "tun: ",
2140 DUMP_PREFIX_NONE,
2141 16, 1, skb->head,
2142 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2143 WARN_ON_ONCE(1);
2144 return -EINVAL;
2145 }
2146
2147 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2148 return -EFAULT;
2149
2150 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2151 }
2152
2153 if (vlan_hlen) {
2154 int ret;
2155 struct veth veth;
2156
2157 veth.h_vlan_proto = skb->vlan_proto;
2158 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2159
2160 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2161
2162 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2163 if (ret || !iov_iter_count(iter))
2164 goto done;
2165
2166 ret = copy_to_iter(&veth, sizeof(veth), iter);
2167 if (ret != sizeof(veth) || !iov_iter_count(iter))
2168 goto done;
2169 }
2170
2171 skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2172
2173done:
2174 /* caller is in process context, */
2175 preempt_disable();
2176 dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
2177 preempt_enable();
2178
2179 return total;
2180}
2181
2182static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2183{
2184 DECLARE_WAITQUEUE(wait, current);
2185 void *ptr = NULL;
2186 int error = 0;
2187
2188 ptr = ptr_ring_consume(&tfile->tx_ring);
2189 if (ptr)
2190 goto out;
2191 if (noblock) {
2192 error = -EAGAIN;
2193 goto out;
2194 }
2195
2196 add_wait_queue(&tfile->socket.wq.wait, &wait);
2197
2198 while (1) {
2199 set_current_state(TASK_INTERRUPTIBLE);
2200 ptr = ptr_ring_consume(&tfile->tx_ring);
2201 if (ptr)
2202 break;
2203 if (signal_pending(current)) {
2204 error = -ERESTARTSYS;
2205 break;
2206 }
2207 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2208 error = -EFAULT;
2209 break;
2210 }
2211
2212 schedule();
2213 }
2214
2215 __set_current_state(TASK_RUNNING);
2216 remove_wait_queue(&tfile->socket.wq.wait, &wait);
2217
2218out:
2219 *err = error;
2220 return ptr;
2221}
2222
2223static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2224 struct iov_iter *to,
2225 int noblock, void *ptr)
2226{
2227 ssize_t ret;
2228 int err;
2229
2230 if (!iov_iter_count(to)) {
2231 tun_ptr_free(ptr);
2232 return 0;
2233 }
2234
2235 if (!ptr) {
2236 /* Read frames from ring */
2237 ptr = tun_ring_recv(tfile, noblock, &err);
2238 if (!ptr)
2239 return err;
2240 }
2241
2242 if (tun_is_xdp_frame(ptr)) {
2243 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2244
2245 ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2246 xdp_return_frame(xdpf);
2247 } else {
2248 struct sk_buff *skb = ptr;
2249
2250 ret = tun_put_user(tun, tfile, skb, to);
2251 if (unlikely(ret < 0))
2252 kfree_skb(skb);
2253 else
2254 consume_skb(skb);
2255 }
2256
2257 return ret;
2258}
2259
2260static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2261{
2262 struct file *file = iocb->ki_filp;
2263 struct tun_file *tfile = file->private_data;
2264 struct tun_struct *tun = tun_get(tfile);
2265 ssize_t len = iov_iter_count(to), ret;
2266 int noblock = 0;
2267
2268 if (!tun)
2269 return -EBADFD;
2270
2271 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2272 noblock = 1;
2273
2274 ret = tun_do_read(tun, tfile, to, noblock, NULL);
2275 ret = min_t(ssize_t, ret, len);
2276 if (ret > 0)
2277 iocb->ki_pos = ret;
2278 tun_put(tun);
2279 return ret;
2280}
2281
2282static void tun_prog_free(struct rcu_head *rcu)
2283{
2284 struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2285
2286 bpf_prog_destroy(prog->prog);
2287 kfree(prog);
2288}
2289
2290static int __tun_set_ebpf(struct tun_struct *tun,
2291 struct tun_prog __rcu **prog_p,
2292 struct bpf_prog *prog)
2293{
2294 struct tun_prog *old, *new = NULL;
2295
2296 if (prog) {
2297 new = kmalloc(sizeof(*new), GFP_KERNEL);
2298 if (!new)
2299 return -ENOMEM;
2300 new->prog = prog;
2301 }
2302
2303 spin_lock_bh(&tun->lock);
2304 old = rcu_dereference_protected(*prog_p,
2305 lockdep_is_held(&tun->lock));
2306 rcu_assign_pointer(*prog_p, new);
2307 spin_unlock_bh(&tun->lock);
2308
2309 if (old)
2310 call_rcu(&old->rcu, tun_prog_free);
2311
2312 return 0;
2313}
2314
2315static void tun_free_netdev(struct net_device *dev)
2316{
2317 struct tun_struct *tun = netdev_priv(dev);
2318
2319 BUG_ON(!(list_empty(&tun->disabled)));
2320
2321 free_percpu(dev->tstats);
2322 tun_flow_uninit(tun);
2323 security_tun_dev_free_security(tun->security);
2324 __tun_set_ebpf(tun, &tun->steering_prog, NULL);
2325 __tun_set_ebpf(tun, &tun->filter_prog, NULL);
2326}
2327
2328static void tun_setup(struct net_device *dev)
2329{
2330 struct tun_struct *tun = netdev_priv(dev);
2331
2332 tun->owner = INVALID_UID;
2333 tun->group = INVALID_GID;
2334 tun_default_link_ksettings(dev, &tun->link_ksettings);
2335
2336 dev->ethtool_ops = &tun_ethtool_ops;
2337 dev->needs_free_netdev = true;
2338 dev->priv_destructor = tun_free_netdev;
2339 /* We prefer our own queue length */
2340 dev->tx_queue_len = TUN_READQ_SIZE;
2341}
2342
2343/* Trivial set of netlink ops to allow deleting tun or tap
2344 * device with netlink.
2345 */
2346static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2347 struct netlink_ext_ack *extack)
2348{
2349 NL_SET_ERR_MSG(extack,
2350 "tun/tap creation via rtnetlink is not supported.");
2351 return -EOPNOTSUPP;
2352}
2353
2354static size_t tun_get_size(const struct net_device *dev)
2355{
2356 BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2357 BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2358
2359 return nla_total_size(sizeof(uid_t)) + /* OWNER */
2360 nla_total_size(sizeof(gid_t)) + /* GROUP */
2361 nla_total_size(sizeof(u8)) + /* TYPE */
2362 nla_total_size(sizeof(u8)) + /* PI */
2363 nla_total_size(sizeof(u8)) + /* VNET_HDR */
2364 nla_total_size(sizeof(u8)) + /* PERSIST */
2365 nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
2366 nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
2367 nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
2368 0;
2369}
2370
2371static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2372{
2373 struct tun_struct *tun = netdev_priv(dev);
2374
2375 if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2376 goto nla_put_failure;
2377 if (uid_valid(tun->owner) &&
2378 nla_put_u32(skb, IFLA_TUN_OWNER,
2379 from_kuid_munged(current_user_ns(), tun->owner)))
2380 goto nla_put_failure;
2381 if (gid_valid(tun->group) &&
2382 nla_put_u32(skb, IFLA_TUN_GROUP,
2383 from_kgid_munged(current_user_ns(), tun->group)))
2384 goto nla_put_failure;
2385 if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2386 goto nla_put_failure;
2387 if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2388 goto nla_put_failure;
2389 if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2390 goto nla_put_failure;
2391 if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2392 !!(tun->flags & IFF_MULTI_QUEUE)))
2393 goto nla_put_failure;
2394 if (tun->flags & IFF_MULTI_QUEUE) {
2395 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2396 goto nla_put_failure;
2397 if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2398 tun->numdisabled))
2399 goto nla_put_failure;
2400 }
2401
2402 return 0;
2403
2404nla_put_failure:
2405 return -EMSGSIZE;
2406}
2407
2408static struct rtnl_link_ops tun_link_ops __read_mostly = {
2409 .kind = DRV_NAME,
2410 .priv_size = sizeof(struct tun_struct),
2411 .setup = tun_setup,
2412 .validate = tun_validate,
2413 .get_size = tun_get_size,
2414 .fill_info = tun_fill_info,
2415};
2416
2417static void tun_sock_write_space(struct sock *sk)
2418{
2419 struct tun_file *tfile;
2420 wait_queue_head_t *wqueue;
2421
2422 if (!sock_writeable(sk))
2423 return;
2424
2425 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2426 return;
2427
2428 wqueue = sk_sleep(sk);
2429 if (wqueue && waitqueue_active(wqueue))
2430 wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2431 EPOLLWRNORM | EPOLLWRBAND);
2432
2433 tfile = container_of(sk, struct tun_file, sk);
2434 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2435}
2436
2437static void tun_put_page(struct tun_page *tpage)
2438{
2439 if (tpage->page)
2440 __page_frag_cache_drain(tpage->page, tpage->count);
2441}
2442
2443static int tun_xdp_one(struct tun_struct *tun,
2444 struct tun_file *tfile,
2445 struct xdp_buff *xdp, int *flush,
2446 struct tun_page *tpage)
2447{
2448 unsigned int datasize = xdp->data_end - xdp->data;
2449 struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2450 struct virtio_net_hdr *gso = &hdr->gso;
2451 struct bpf_prog *xdp_prog;
2452 struct sk_buff *skb = NULL;
2453 struct sk_buff_head *queue;
2454 u32 rxhash = 0, act;
2455 int buflen = hdr->buflen;
2456 int ret = 0;
2457 bool skb_xdp = false;
2458 struct page *page;
2459
2460 xdp_prog = rcu_dereference(tun->xdp_prog);
2461 if (xdp_prog) {
2462 if (gso->gso_type) {
2463 skb_xdp = true;
2464 goto build;
2465 }
2466
2467 xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
2468 xdp_set_data_meta_invalid(xdp);
2469
2470 act = bpf_prog_run_xdp(xdp_prog, xdp);
2471 ret = tun_xdp_act(tun, xdp_prog, xdp, act);
2472 if (ret < 0) {
2473 put_page(virt_to_head_page(xdp->data));
2474 return ret;
2475 }
2476
2477 switch (ret) {
2478 case XDP_REDIRECT:
2479 *flush = true;
2480 fallthrough;
2481 case XDP_TX:
2482 return 0;
2483 case XDP_PASS:
2484 break;
2485 default:
2486 page = virt_to_head_page(xdp->data);
2487 if (tpage->page == page) {
2488 ++tpage->count;
2489 } else {
2490 tun_put_page(tpage);
2491 tpage->page = page;
2492 tpage->count = 1;
2493 }
2494 return 0;
2495 }
2496 }
2497
2498build:
2499 skb = build_skb(xdp->data_hard_start, buflen);
2500 if (!skb) {
2501 ret = -ENOMEM;
2502 goto out;
2503 }
2504
2505 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2506 skb_put(skb, xdp->data_end - xdp->data);
2507
2508 if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2509 atomic_long_inc(&tun->rx_frame_errors);
2510 kfree_skb(skb);
2511 ret = -EINVAL;
2512 goto out;
2513 }
2514
2515 skb->protocol = eth_type_trans(skb, tun->dev);
2516 skb_reset_network_header(skb);
2517 skb_probe_transport_header(skb);
2518 skb_record_rx_queue(skb, tfile->queue_index);
2519
2520 if (skb_xdp) {
2521 ret = do_xdp_generic(xdp_prog, skb);
2522 if (ret != XDP_PASS) {
2523 ret = 0;
2524 goto out;
2525 }
2526 }
2527
2528 if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2529 !tfile->detached)
2530 rxhash = __skb_get_hash_symmetric(skb);
2531
2532 if (tfile->napi_enabled) {
2533 queue = &tfile->sk.sk_write_queue;
2534 spin_lock(&queue->lock);
2535
2536 if (unlikely(tfile->detached)) {
2537 spin_unlock(&queue->lock);
2538 kfree_skb(skb);
2539 return -EBUSY;
2540 }
2541
2542 __skb_queue_tail(queue, skb);
2543 spin_unlock(&queue->lock);
2544 ret = 1;
2545 } else {
2546 netif_receive_skb(skb);
2547 ret = 0;
2548 }
2549
2550 /* No need to disable preemption here since this function is
2551 * always called with bh disabled
2552 */
2553 dev_sw_netstats_rx_add(tun->dev, datasize);
2554
2555 if (rxhash)
2556 tun_flow_update(tun, rxhash, tfile);
2557
2558out:
2559 return ret;
2560}
2561
2562static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2563{
2564 int ret, i;
2565 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2566 struct tun_struct *tun = tun_get(tfile);
2567 struct tun_msg_ctl *ctl = m->msg_control;
2568 struct xdp_buff *xdp;
2569
2570 if (!tun)
2571 return -EBADFD;
2572
2573 if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
2574 ctl && ctl->type == TUN_MSG_PTR) {
2575 struct tun_page tpage;
2576 int n = ctl->num;
2577 int flush = 0, queued = 0;
2578
2579 memset(&tpage, 0, sizeof(tpage));
2580
2581 local_bh_disable();
2582 rcu_read_lock();
2583
2584 for (i = 0; i < n; i++) {
2585 xdp = &((struct xdp_buff *)ctl->ptr)[i];
2586 ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2587 if (ret > 0)
2588 queued += ret;
2589 }
2590
2591 if (flush)
2592 xdp_do_flush();
2593
2594 if (tfile->napi_enabled && queued > 0)
2595 napi_schedule(&tfile->napi);
2596
2597 rcu_read_unlock();
2598 local_bh_enable();
2599
2600 tun_put_page(&tpage);
2601
2602 ret = total_len;
2603 goto out;
2604 }
2605
2606 ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2607 m->msg_flags & MSG_DONTWAIT,
2608 m->msg_flags & MSG_MORE);
2609out:
2610 tun_put(tun);
2611 return ret;
2612}
2613
2614static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2615 int flags)
2616{
2617 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2618 struct tun_struct *tun = tun_get(tfile);
2619 void *ptr = m->msg_control;
2620 int ret;
2621
2622 if (!tun) {
2623 ret = -EBADFD;
2624 goto out_free;
2625 }
2626
2627 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2628 ret = -EINVAL;
2629 goto out_put_tun;
2630 }
2631 if (flags & MSG_ERRQUEUE) {
2632 ret = sock_recv_errqueue(sock->sk, m, total_len,
2633 SOL_PACKET, TUN_TX_TIMESTAMP);
2634 goto out;
2635 }
2636 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2637 if (ret > (ssize_t)total_len) {
2638 m->msg_flags |= MSG_TRUNC;
2639 ret = flags & MSG_TRUNC ? ret : total_len;
2640 }
2641out:
2642 tun_put(tun);
2643 return ret;
2644
2645out_put_tun:
2646 tun_put(tun);
2647out_free:
2648 tun_ptr_free(ptr);
2649 return ret;
2650}
2651
2652static int tun_ptr_peek_len(void *ptr)
2653{
2654 if (likely(ptr)) {
2655 if (tun_is_xdp_frame(ptr)) {
2656 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2657
2658 return xdpf->len;
2659 }
2660 return __skb_array_len_with_tag(ptr);
2661 } else {
2662 return 0;
2663 }
2664}
2665
2666static int tun_peek_len(struct socket *sock)
2667{
2668 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2669 struct tun_struct *tun;
2670 int ret = 0;
2671
2672 tun = tun_get(tfile);
2673 if (!tun)
2674 return 0;
2675
2676 ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2677 tun_put(tun);
2678
2679 return ret;
2680}
2681
2682/* Ops structure to mimic raw sockets with tun */
2683static const struct proto_ops tun_socket_ops = {
2684 .peek_len = tun_peek_len,
2685 .sendmsg = tun_sendmsg,
2686 .recvmsg = tun_recvmsg,
2687};
2688
2689static struct proto tun_proto = {
2690 .name = "tun",
2691 .owner = THIS_MODULE,
2692 .obj_size = sizeof(struct tun_file),
2693};
2694
2695static int tun_flags(struct tun_struct *tun)
2696{
2697 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2698}
2699
2700static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
2701 char *buf)
2702{
2703 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2704 return sysfs_emit(buf, "0x%x\n", tun_flags(tun));
2705}
2706
2707static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
2708 char *buf)
2709{
2710 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2711 return uid_valid(tun->owner)?
2712 sysfs_emit(buf, "%u\n",
2713 from_kuid_munged(current_user_ns(), tun->owner)) :
2714 sysfs_emit(buf, "-1\n");
2715}
2716
2717static ssize_t group_show(struct device *dev, struct device_attribute *attr,
2718 char *buf)
2719{
2720 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2721 return gid_valid(tun->group) ?
2722 sysfs_emit(buf, "%u\n",
2723 from_kgid_munged(current_user_ns(), tun->group)) :
2724 sysfs_emit(buf, "-1\n");
2725}
2726
2727static DEVICE_ATTR_RO(tun_flags);
2728static DEVICE_ATTR_RO(owner);
2729static DEVICE_ATTR_RO(group);
2730
2731static struct attribute *tun_dev_attrs[] = {
2732 &dev_attr_tun_flags.attr,
2733 &dev_attr_owner.attr,
2734 &dev_attr_group.attr,
2735 NULL
2736};
2737
2738static const struct attribute_group tun_attr_group = {
2739 .attrs = tun_dev_attrs
2740};
2741
2742static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2743{
2744 struct tun_struct *tun;
2745 struct tun_file *tfile = file->private_data;
2746 struct net_device *dev;
2747 int err;
2748
2749 if (tfile->detached)
2750 return -EINVAL;
2751
2752 if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2753 if (!capable(CAP_NET_ADMIN))
2754 return -EPERM;
2755
2756 if (!(ifr->ifr_flags & IFF_NAPI) ||
2757 (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2758 return -EINVAL;
2759 }
2760
2761 dev = __dev_get_by_name(net, ifr->ifr_name);
2762 if (dev) {
2763 if (ifr->ifr_flags & IFF_TUN_EXCL)
2764 return -EBUSY;
2765 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2766 tun = netdev_priv(dev);
2767 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2768 tun = netdev_priv(dev);
2769 else
2770 return -EINVAL;
2771
2772 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2773 !!(tun->flags & IFF_MULTI_QUEUE))
2774 return -EINVAL;
2775
2776 if (tun_not_capable(tun))
2777 return -EPERM;
2778 err = security_tun_dev_open(tun->security);
2779 if (err < 0)
2780 return err;
2781
2782 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2783 ifr->ifr_flags & IFF_NAPI,
2784 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2785 if (err < 0)
2786 return err;
2787
2788 if (tun->flags & IFF_MULTI_QUEUE &&
2789 (tun->numqueues + tun->numdisabled > 1)) {
2790 /* One or more queue has already been attached, no need
2791 * to initialize the device again.
2792 */
2793 netdev_state_change(dev);
2794 return 0;
2795 }
2796
2797 tun->flags = (tun->flags & ~TUN_FEATURES) |
2798 (ifr->ifr_flags & TUN_FEATURES);
2799
2800 netdev_state_change(dev);
2801 } else {
2802 char *name;
2803 unsigned long flags = 0;
2804 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2805 MAX_TAP_QUEUES : 1;
2806
2807 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2808 return -EPERM;
2809 err = security_tun_dev_create();
2810 if (err < 0)
2811 return err;
2812
2813 /* Set dev type */
2814 if (ifr->ifr_flags & IFF_TUN) {
2815 /* TUN device */
2816 flags |= IFF_TUN;
2817 name = "tun%d";
2818 } else if (ifr->ifr_flags & IFF_TAP) {
2819 /* TAP device */
2820 flags |= IFF_TAP;
2821 name = "tap%d";
2822 } else
2823 return -EINVAL;
2824
2825 if (*ifr->ifr_name)
2826 name = ifr->ifr_name;
2827
2828 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2829 NET_NAME_UNKNOWN, tun_setup, queues,
2830 queues);
2831
2832 if (!dev)
2833 return -ENOMEM;
2834
2835 dev_net_set(dev, net);
2836 dev->rtnl_link_ops = &tun_link_ops;
2837 dev->ifindex = tfile->ifindex;
2838 dev->sysfs_groups[0] = &tun_attr_group;
2839
2840 tun = netdev_priv(dev);
2841 tun->dev = dev;
2842 tun->flags = flags;
2843 tun->txflt.count = 0;
2844 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2845
2846 tun->align = NET_SKB_PAD;
2847 tun->filter_attached = false;
2848 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2849 tun->rx_batched = 0;
2850 RCU_INIT_POINTER(tun->steering_prog, NULL);
2851
2852 tun->ifr = ifr;
2853 tun->file = file;
2854
2855 tun_net_initialize(dev);
2856
2857 err = register_netdevice(tun->dev);
2858 if (err < 0) {
2859 free_netdev(dev);
2860 return err;
2861 }
2862 /* free_netdev() won't check refcnt, to avoid race
2863 * with dev_put() we need publish tun after registration.
2864 */
2865 rcu_assign_pointer(tfile->tun, tun);
2866 }
2867
2868 if (ifr->ifr_flags & IFF_NO_CARRIER)
2869 netif_carrier_off(tun->dev);
2870 else
2871 netif_carrier_on(tun->dev);
2872
2873 /* Make sure persistent devices do not get stuck in
2874 * xoff state.
2875 */
2876 if (netif_running(tun->dev))
2877 netif_tx_wake_all_queues(tun->dev);
2878
2879 strcpy(ifr->ifr_name, tun->dev->name);
2880 return 0;
2881}
2882
2883static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2884{
2885 strcpy(ifr->ifr_name, tun->dev->name);
2886
2887 ifr->ifr_flags = tun_flags(tun);
2888
2889}
2890
2891/* This is like a cut-down ethtool ops, except done via tun fd so no
2892 * privs required. */
2893static int set_offload(struct tun_struct *tun, unsigned long arg)
2894{
2895 netdev_features_t features = 0;
2896
2897 if (arg & TUN_F_CSUM) {
2898 features |= NETIF_F_HW_CSUM;
2899 arg &= ~TUN_F_CSUM;
2900
2901 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2902 if (arg & TUN_F_TSO_ECN) {
2903 features |= NETIF_F_TSO_ECN;
2904 arg &= ~TUN_F_TSO_ECN;
2905 }
2906 if (arg & TUN_F_TSO4)
2907 features |= NETIF_F_TSO;
2908 if (arg & TUN_F_TSO6)
2909 features |= NETIF_F_TSO6;
2910 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2911 }
2912
2913 arg &= ~TUN_F_UFO;
2914
2915 /* TODO: for now USO4 and USO6 should work simultaneously */
2916 if (arg & TUN_F_USO4 && arg & TUN_F_USO6) {
2917 features |= NETIF_F_GSO_UDP_L4;
2918 arg &= ~(TUN_F_USO4 | TUN_F_USO6);
2919 }
2920 }
2921
2922 /* This gives the user a way to test for new features in future by
2923 * trying to set them. */
2924 if (arg)
2925 return -EINVAL;
2926
2927 tun->set_features = features;
2928 tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2929 tun->dev->wanted_features |= features;
2930 netdev_update_features(tun->dev);
2931
2932 return 0;
2933}
2934
2935static void tun_detach_filter(struct tun_struct *tun, int n)
2936{
2937 int i;
2938 struct tun_file *tfile;
2939
2940 for (i = 0; i < n; i++) {
2941 tfile = rtnl_dereference(tun->tfiles[i]);
2942 lock_sock(tfile->socket.sk);
2943 sk_detach_filter(tfile->socket.sk);
2944 release_sock(tfile->socket.sk);
2945 }
2946
2947 tun->filter_attached = false;
2948}
2949
2950static int tun_attach_filter(struct tun_struct *tun)
2951{
2952 int i, ret = 0;
2953 struct tun_file *tfile;
2954
2955 for (i = 0; i < tun->numqueues; i++) {
2956 tfile = rtnl_dereference(tun->tfiles[i]);
2957 lock_sock(tfile->socket.sk);
2958 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2959 release_sock(tfile->socket.sk);
2960 if (ret) {
2961 tun_detach_filter(tun, i);
2962 return ret;
2963 }
2964 }
2965
2966 tun->filter_attached = true;
2967 return ret;
2968}
2969
2970static void tun_set_sndbuf(struct tun_struct *tun)
2971{
2972 struct tun_file *tfile;
2973 int i;
2974
2975 for (i = 0; i < tun->numqueues; i++) {
2976 tfile = rtnl_dereference(tun->tfiles[i]);
2977 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2978 }
2979}
2980
2981static int tun_set_queue(struct file *file, struct ifreq *ifr)
2982{
2983 struct tun_file *tfile = file->private_data;
2984 struct tun_struct *tun;
2985 int ret = 0;
2986
2987 rtnl_lock();
2988
2989 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2990 tun = tfile->detached;
2991 if (!tun) {
2992 ret = -EINVAL;
2993 goto unlock;
2994 }
2995 ret = security_tun_dev_attach_queue(tun->security);
2996 if (ret < 0)
2997 goto unlock;
2998 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2999 tun->flags & IFF_NAPI_FRAGS, true);
3000 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
3001 tun = rtnl_dereference(tfile->tun);
3002 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
3003 ret = -EINVAL;
3004 else
3005 __tun_detach(tfile, false);
3006 } else
3007 ret = -EINVAL;
3008
3009 if (ret >= 0)
3010 netdev_state_change(tun->dev);
3011
3012unlock:
3013 rtnl_unlock();
3014 return ret;
3015}
3016
3017static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
3018 void __user *data)
3019{
3020 struct bpf_prog *prog;
3021 int fd;
3022
3023 if (copy_from_user(&fd, data, sizeof(fd)))
3024 return -EFAULT;
3025
3026 if (fd == -1) {
3027 prog = NULL;
3028 } else {
3029 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
3030 if (IS_ERR(prog))
3031 return PTR_ERR(prog);
3032 }
3033
3034 return __tun_set_ebpf(tun, prog_p, prog);
3035}
3036
3037/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
3038static unsigned char tun_get_addr_len(unsigned short type)
3039{
3040 switch (type) {
3041 case ARPHRD_IP6GRE:
3042 case ARPHRD_TUNNEL6:
3043 return sizeof(struct in6_addr);
3044 case ARPHRD_IPGRE:
3045 case ARPHRD_TUNNEL:
3046 case ARPHRD_SIT:
3047 return 4;
3048 case ARPHRD_ETHER:
3049 return ETH_ALEN;
3050 case ARPHRD_IEEE802154:
3051 case ARPHRD_IEEE802154_MONITOR:
3052 return IEEE802154_EXTENDED_ADDR_LEN;
3053 case ARPHRD_PHONET_PIPE:
3054 case ARPHRD_PPP:
3055 case ARPHRD_NONE:
3056 return 0;
3057 case ARPHRD_6LOWPAN:
3058 return EUI64_ADDR_LEN;
3059 case ARPHRD_FDDI:
3060 return FDDI_K_ALEN;
3061 case ARPHRD_HIPPI:
3062 return HIPPI_ALEN;
3063 case ARPHRD_IEEE802:
3064 return FC_ALEN;
3065 case ARPHRD_ROSE:
3066 return ROSE_ADDR_LEN;
3067 case ARPHRD_NETROM:
3068 return AX25_ADDR_LEN;
3069 case ARPHRD_LOCALTLK:
3070 return LTALK_ALEN;
3071 default:
3072 return 0;
3073 }
3074}
3075
3076static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
3077 unsigned long arg, int ifreq_len)
3078{
3079 struct tun_file *tfile = file->private_data;
3080 struct net *net = sock_net(&tfile->sk);
3081 struct tun_struct *tun;
3082 void __user* argp = (void __user*)arg;
3083 unsigned int carrier;
3084 struct ifreq ifr;
3085 kuid_t owner;
3086 kgid_t group;
3087 int ifindex;
3088 int sndbuf;
3089 int vnet_hdr_sz;
3090 int le;
3091 int ret;
3092 bool do_notify = false;
3093
3094 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3095 (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
3096 if (copy_from_user(&ifr, argp, ifreq_len))
3097 return -EFAULT;
3098 } else {
3099 memset(&ifr, 0, sizeof(ifr));
3100 }
3101 if (cmd == TUNGETFEATURES) {
3102 /* Currently this just means: "what IFF flags are valid?".
3103 * This is needed because we never checked for invalid flags on
3104 * TUNSETIFF.
3105 */
3106 return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
3107 TUN_FEATURES, (unsigned int __user*)argp);
3108 } else if (cmd == TUNSETQUEUE) {
3109 return tun_set_queue(file, &ifr);
3110 } else if (cmd == SIOCGSKNS) {
3111 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3112 return -EPERM;
3113 return open_related_ns(&net->ns, get_net_ns);
3114 }
3115
3116 rtnl_lock();
3117
3118 tun = tun_get(tfile);
3119 if (cmd == TUNSETIFF) {
3120 ret = -EEXIST;
3121 if (tun)
3122 goto unlock;
3123
3124 ifr.ifr_name[IFNAMSIZ-1] = '\0';
3125
3126 ret = tun_set_iff(net, file, &ifr);
3127
3128 if (ret)
3129 goto unlock;
3130
3131 if (copy_to_user(argp, &ifr, ifreq_len))
3132 ret = -EFAULT;
3133 goto unlock;
3134 }
3135 if (cmd == TUNSETIFINDEX) {
3136 ret = -EPERM;
3137 if (tun)
3138 goto unlock;
3139
3140 ret = -EFAULT;
3141 if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3142 goto unlock;
3143 ret = -EINVAL;
3144 if (ifindex < 0)
3145 goto unlock;
3146 ret = 0;
3147 tfile->ifindex = ifindex;
3148 goto unlock;
3149 }
3150
3151 ret = -EBADFD;
3152 if (!tun)
3153 goto unlock;
3154
3155 netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
3156
3157 net = dev_net(tun->dev);
3158 ret = 0;
3159 switch (cmd) {
3160 case TUNGETIFF:
3161 tun_get_iff(tun, &ifr);
3162
3163 if (tfile->detached)
3164 ifr.ifr_flags |= IFF_DETACH_QUEUE;
3165 if (!tfile->socket.sk->sk_filter)
3166 ifr.ifr_flags |= IFF_NOFILTER;
3167
3168 if (copy_to_user(argp, &ifr, ifreq_len))
3169 ret = -EFAULT;
3170 break;
3171
3172 case TUNSETNOCSUM:
3173 /* Disable/Enable checksum */
3174
3175 /* [unimplemented] */
3176 netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
3177 arg ? "disabled" : "enabled");
3178 break;
3179
3180 case TUNSETPERSIST:
3181 /* Disable/Enable persist mode. Keep an extra reference to the
3182 * module to prevent the module being unprobed.
3183 */
3184 if (arg && !(tun->flags & IFF_PERSIST)) {
3185 tun->flags |= IFF_PERSIST;
3186 __module_get(THIS_MODULE);
3187 do_notify = true;
3188 }
3189 if (!arg && (tun->flags & IFF_PERSIST)) {
3190 tun->flags &= ~IFF_PERSIST;
3191 module_put(THIS_MODULE);
3192 do_notify = true;
3193 }
3194
3195 netif_info(tun, drv, tun->dev, "persist %s\n",
3196 arg ? "enabled" : "disabled");
3197 break;
3198
3199 case TUNSETOWNER:
3200 /* Set owner of the device */
3201 owner = make_kuid(current_user_ns(), arg);
3202 if (!uid_valid(owner)) {
3203 ret = -EINVAL;
3204 break;
3205 }
3206 tun->owner = owner;
3207 do_notify = true;
3208 netif_info(tun, drv, tun->dev, "owner set to %u\n",
3209 from_kuid(&init_user_ns, tun->owner));
3210 break;
3211
3212 case TUNSETGROUP:
3213 /* Set group of the device */
3214 group = make_kgid(current_user_ns(), arg);
3215 if (!gid_valid(group)) {
3216 ret = -EINVAL;
3217 break;
3218 }
3219 tun->group = group;
3220 do_notify = true;
3221 netif_info(tun, drv, tun->dev, "group set to %u\n",
3222 from_kgid(&init_user_ns, tun->group));
3223 break;
3224
3225 case TUNSETLINK:
3226 /* Only allow setting the type when the interface is down */
3227 if (tun->dev->flags & IFF_UP) {
3228 netif_info(tun, drv, tun->dev,
3229 "Linktype set failed because interface is up\n");
3230 ret = -EBUSY;
3231 } else {
3232 ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
3233 tun->dev);
3234 ret = notifier_to_errno(ret);
3235 if (ret) {
3236 netif_info(tun, drv, tun->dev,
3237 "Refused to change device type\n");
3238 break;
3239 }
3240 tun->dev->type = (int) arg;
3241 tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
3242 netif_info(tun, drv, tun->dev, "linktype set to %d\n",
3243 tun->dev->type);
3244 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
3245 tun->dev);
3246 }
3247 break;
3248
3249 case TUNSETDEBUG:
3250 tun->msg_enable = (u32)arg;
3251 break;
3252
3253 case TUNSETOFFLOAD:
3254 ret = set_offload(tun, arg);
3255 break;
3256
3257 case TUNSETTXFILTER:
3258 /* Can be set only for TAPs */
3259 ret = -EINVAL;
3260 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3261 break;
3262 ret = update_filter(&tun->txflt, (void __user *)arg);
3263 break;
3264
3265 case SIOCGIFHWADDR:
3266 /* Get hw address */
3267 dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
3268 if (copy_to_user(argp, &ifr, ifreq_len))
3269 ret = -EFAULT;
3270 break;
3271
3272 case SIOCSIFHWADDR:
3273 /* Set hw address */
3274 ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
3275 break;
3276
3277 case TUNGETSNDBUF:
3278 sndbuf = tfile->socket.sk->sk_sndbuf;
3279 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3280 ret = -EFAULT;
3281 break;
3282
3283 case TUNSETSNDBUF:
3284 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3285 ret = -EFAULT;
3286 break;
3287 }
3288 if (sndbuf <= 0) {
3289 ret = -EINVAL;
3290 break;
3291 }
3292
3293 tun->sndbuf = sndbuf;
3294 tun_set_sndbuf(tun);
3295 break;
3296
3297 case TUNGETVNETHDRSZ:
3298 vnet_hdr_sz = tun->vnet_hdr_sz;
3299 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3300 ret = -EFAULT;
3301 break;
3302
3303 case TUNSETVNETHDRSZ:
3304 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3305 ret = -EFAULT;
3306 break;
3307 }
3308 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3309 ret = -EINVAL;
3310 break;
3311 }
3312
3313 tun->vnet_hdr_sz = vnet_hdr_sz;
3314 break;
3315
3316 case TUNGETVNETLE:
3317 le = !!(tun->flags & TUN_VNET_LE);
3318 if (put_user(le, (int __user *)argp))
3319 ret = -EFAULT;
3320 break;
3321
3322 case TUNSETVNETLE:
3323 if (get_user(le, (int __user *)argp)) {
3324 ret = -EFAULT;
3325 break;
3326 }
3327 if (le)
3328 tun->flags |= TUN_VNET_LE;
3329 else
3330 tun->flags &= ~TUN_VNET_LE;
3331 break;
3332
3333 case TUNGETVNETBE:
3334 ret = tun_get_vnet_be(tun, argp);
3335 break;
3336
3337 case TUNSETVNETBE:
3338 ret = tun_set_vnet_be(tun, argp);
3339 break;
3340
3341 case TUNATTACHFILTER:
3342 /* Can be set only for TAPs */
3343 ret = -EINVAL;
3344 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3345 break;
3346 ret = -EFAULT;
3347 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3348 break;
3349
3350 ret = tun_attach_filter(tun);
3351 break;
3352
3353 case TUNDETACHFILTER:
3354 /* Can be set only for TAPs */
3355 ret = -EINVAL;
3356 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3357 break;
3358 ret = 0;
3359 tun_detach_filter(tun, tun->numqueues);
3360 break;
3361
3362 case TUNGETFILTER:
3363 ret = -EINVAL;
3364 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3365 break;
3366 ret = -EFAULT;
3367 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3368 break;
3369 ret = 0;
3370 break;
3371
3372 case TUNSETSTEERINGEBPF:
3373 ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3374 break;
3375
3376 case TUNSETFILTEREBPF:
3377 ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3378 break;
3379
3380 case TUNSETCARRIER:
3381 ret = -EFAULT;
3382 if (copy_from_user(&carrier, argp, sizeof(carrier)))
3383 goto unlock;
3384
3385 ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3386 break;
3387
3388 case TUNGETDEVNETNS:
3389 ret = -EPERM;
3390 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3391 goto unlock;
3392 ret = open_related_ns(&net->ns, get_net_ns);
3393 break;
3394
3395 default:
3396 ret = -EINVAL;
3397 break;
3398 }
3399
3400 if (do_notify)
3401 netdev_state_change(tun->dev);
3402
3403unlock:
3404 rtnl_unlock();
3405 if (tun)
3406 tun_put(tun);
3407 return ret;
3408}
3409
3410static long tun_chr_ioctl(struct file *file,
3411 unsigned int cmd, unsigned long arg)
3412{
3413 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3414}
3415
3416#ifdef CONFIG_COMPAT
3417static long tun_chr_compat_ioctl(struct file *file,
3418 unsigned int cmd, unsigned long arg)
3419{
3420 switch (cmd) {
3421 case TUNSETIFF:
3422 case TUNGETIFF:
3423 case TUNSETTXFILTER:
3424 case TUNGETSNDBUF:
3425 case TUNSETSNDBUF:
3426 case SIOCGIFHWADDR:
3427 case SIOCSIFHWADDR:
3428 arg = (unsigned long)compat_ptr(arg);
3429 break;
3430 default:
3431 arg = (compat_ulong_t)arg;
3432 break;
3433 }
3434
3435 /*
3436 * compat_ifreq is shorter than ifreq, so we must not access beyond
3437 * the end of that structure. All fields that are used in this
3438 * driver are compatible though, we don't need to convert the
3439 * contents.
3440 */
3441 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3442}
3443#endif /* CONFIG_COMPAT */
3444
3445static int tun_chr_fasync(int fd, struct file *file, int on)
3446{
3447 struct tun_file *tfile = file->private_data;
3448 int ret;
3449
3450 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3451 goto out;
3452
3453 if (on) {
3454 __f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3455 tfile->flags |= TUN_FASYNC;
3456 } else
3457 tfile->flags &= ~TUN_FASYNC;
3458 ret = 0;
3459out:
3460 return ret;
3461}
3462
3463static int tun_chr_open(struct inode *inode, struct file * file)
3464{
3465 struct net *net = current->nsproxy->net_ns;
3466 struct tun_file *tfile;
3467
3468 tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3469 &tun_proto, 0);
3470 if (!tfile)
3471 return -ENOMEM;
3472 if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3473 sk_free(&tfile->sk);
3474 return -ENOMEM;
3475 }
3476
3477 mutex_init(&tfile->napi_mutex);
3478 RCU_INIT_POINTER(tfile->tun, NULL);
3479 tfile->flags = 0;
3480 tfile->ifindex = 0;
3481
3482 init_waitqueue_head(&tfile->socket.wq.wait);
3483
3484 tfile->socket.file = file;
3485 tfile->socket.ops = &tun_socket_ops;
3486
3487 sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
3488
3489 tfile->sk.sk_write_space = tun_sock_write_space;
3490 tfile->sk.sk_sndbuf = INT_MAX;
3491
3492 file->private_data = tfile;
3493 INIT_LIST_HEAD(&tfile->next);
3494
3495 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3496
3497 /* tun groks IOCB_NOWAIT just fine, mark it as such */
3498 file->f_mode |= FMODE_NOWAIT;
3499 return 0;
3500}
3501
3502static int tun_chr_close(struct inode *inode, struct file *file)
3503{
3504 struct tun_file *tfile = file->private_data;
3505
3506 tun_detach(tfile, true);
3507
3508 return 0;
3509}
3510
3511#ifdef CONFIG_PROC_FS
3512static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3513{
3514 struct tun_file *tfile = file->private_data;
3515 struct tun_struct *tun;
3516 struct ifreq ifr;
3517
3518 memset(&ifr, 0, sizeof(ifr));
3519
3520 rtnl_lock();
3521 tun = tun_get(tfile);
3522 if (tun)
3523 tun_get_iff(tun, &ifr);
3524 rtnl_unlock();
3525
3526 if (tun)
3527 tun_put(tun);
3528
3529 seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3530}
3531#endif
3532
3533static const struct file_operations tun_fops = {
3534 .owner = THIS_MODULE,
3535 .llseek = no_llseek,
3536 .read_iter = tun_chr_read_iter,
3537 .write_iter = tun_chr_write_iter,
3538 .poll = tun_chr_poll,
3539 .unlocked_ioctl = tun_chr_ioctl,
3540#ifdef CONFIG_COMPAT
3541 .compat_ioctl = tun_chr_compat_ioctl,
3542#endif
3543 .open = tun_chr_open,
3544 .release = tun_chr_close,
3545 .fasync = tun_chr_fasync,
3546#ifdef CONFIG_PROC_FS
3547 .show_fdinfo = tun_chr_show_fdinfo,
3548#endif
3549};
3550
3551static struct miscdevice tun_miscdev = {
3552 .minor = TUN_MINOR,
3553 .name = "tun",
3554 .nodename = "net/tun",
3555 .fops = &tun_fops,
3556};
3557
3558/* ethtool interface */
3559
3560static void tun_default_link_ksettings(struct net_device *dev,
3561 struct ethtool_link_ksettings *cmd)
3562{
3563 ethtool_link_ksettings_zero_link_mode(cmd, supported);
3564 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3565 cmd->base.speed = SPEED_10000;
3566 cmd->base.duplex = DUPLEX_FULL;
3567 cmd->base.port = PORT_TP;
3568 cmd->base.phy_address = 0;
3569 cmd->base.autoneg = AUTONEG_DISABLE;
3570}
3571
3572static int tun_get_link_ksettings(struct net_device *dev,
3573 struct ethtool_link_ksettings *cmd)
3574{
3575 struct tun_struct *tun = netdev_priv(dev);
3576
3577 memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3578 return 0;
3579}
3580
3581static int tun_set_link_ksettings(struct net_device *dev,
3582 const struct ethtool_link_ksettings *cmd)
3583{
3584 struct tun_struct *tun = netdev_priv(dev);
3585
3586 memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3587 return 0;
3588}
3589
3590static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3591{
3592 struct tun_struct *tun = netdev_priv(dev);
3593
3594 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
3595 strscpy(info->version, DRV_VERSION, sizeof(info->version));
3596
3597 switch (tun->flags & TUN_TYPE_MASK) {
3598 case IFF_TUN:
3599 strscpy(info->bus_info, "tun", sizeof(info->bus_info));
3600 break;
3601 case IFF_TAP:
3602 strscpy(info->bus_info, "tap", sizeof(info->bus_info));
3603 break;
3604 }
3605}
3606
3607static u32 tun_get_msglevel(struct net_device *dev)
3608{
3609 struct tun_struct *tun = netdev_priv(dev);
3610
3611 return tun->msg_enable;
3612}
3613
3614static void tun_set_msglevel(struct net_device *dev, u32 value)
3615{
3616 struct tun_struct *tun = netdev_priv(dev);
3617
3618 tun->msg_enable = value;
3619}
3620
3621static int tun_get_coalesce(struct net_device *dev,
3622 struct ethtool_coalesce *ec,
3623 struct kernel_ethtool_coalesce *kernel_coal,
3624 struct netlink_ext_ack *extack)
3625{
3626 struct tun_struct *tun = netdev_priv(dev);
3627
3628 ec->rx_max_coalesced_frames = tun->rx_batched;
3629
3630 return 0;
3631}
3632
3633static int tun_set_coalesce(struct net_device *dev,
3634 struct ethtool_coalesce *ec,
3635 struct kernel_ethtool_coalesce *kernel_coal,
3636 struct netlink_ext_ack *extack)
3637{
3638 struct tun_struct *tun = netdev_priv(dev);
3639
3640 if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3641 tun->rx_batched = NAPI_POLL_WEIGHT;
3642 else
3643 tun->rx_batched = ec->rx_max_coalesced_frames;
3644
3645 return 0;
3646}
3647
3648static const struct ethtool_ops tun_ethtool_ops = {
3649 .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
3650 .get_drvinfo = tun_get_drvinfo,
3651 .get_msglevel = tun_get_msglevel,
3652 .set_msglevel = tun_set_msglevel,
3653 .get_link = ethtool_op_get_link,
3654 .get_ts_info = ethtool_op_get_ts_info,
3655 .get_coalesce = tun_get_coalesce,
3656 .set_coalesce = tun_set_coalesce,
3657 .get_link_ksettings = tun_get_link_ksettings,
3658 .set_link_ksettings = tun_set_link_ksettings,
3659};
3660
3661static int tun_queue_resize(struct tun_struct *tun)
3662{
3663 struct net_device *dev = tun->dev;
3664 struct tun_file *tfile;
3665 struct ptr_ring **rings;
3666 int n = tun->numqueues + tun->numdisabled;
3667 int ret, i;
3668
3669 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3670 if (!rings)
3671 return -ENOMEM;
3672
3673 for (i = 0; i < tun->numqueues; i++) {
3674 tfile = rtnl_dereference(tun->tfiles[i]);
3675 rings[i] = &tfile->tx_ring;
3676 }
3677 list_for_each_entry(tfile, &tun->disabled, next)
3678 rings[i++] = &tfile->tx_ring;
3679
3680 ret = ptr_ring_resize_multiple(rings, n,
3681 dev->tx_queue_len, GFP_KERNEL,
3682 tun_ptr_free);
3683
3684 kfree(rings);
3685 return ret;
3686}
3687
3688static int tun_device_event(struct notifier_block *unused,
3689 unsigned long event, void *ptr)
3690{
3691 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3692 struct tun_struct *tun = netdev_priv(dev);
3693 int i;
3694
3695 if (dev->rtnl_link_ops != &tun_link_ops)
3696 return NOTIFY_DONE;
3697
3698 switch (event) {
3699 case NETDEV_CHANGE_TX_QUEUE_LEN:
3700 if (tun_queue_resize(tun))
3701 return NOTIFY_BAD;
3702 break;
3703 case NETDEV_UP:
3704 for (i = 0; i < tun->numqueues; i++) {
3705 struct tun_file *tfile;
3706
3707 tfile = rtnl_dereference(tun->tfiles[i]);
3708 tfile->socket.sk->sk_write_space(tfile->socket.sk);
3709 }
3710 break;
3711 default:
3712 break;
3713 }
3714
3715 return NOTIFY_DONE;
3716}
3717
3718static struct notifier_block tun_notifier_block __read_mostly = {
3719 .notifier_call = tun_device_event,
3720};
3721
3722static int __init tun_init(void)
3723{
3724 int ret = 0;
3725
3726 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3727
3728 ret = rtnl_link_register(&tun_link_ops);
3729 if (ret) {
3730 pr_err("Can't register link_ops\n");
3731 goto err_linkops;
3732 }
3733
3734 ret = misc_register(&tun_miscdev);
3735 if (ret) {
3736 pr_err("Can't register misc device %d\n", TUN_MINOR);
3737 goto err_misc;
3738 }
3739
3740 ret = register_netdevice_notifier(&tun_notifier_block);
3741 if (ret) {
3742 pr_err("Can't register netdevice notifier\n");
3743 goto err_notifier;
3744 }
3745
3746 return 0;
3747
3748err_notifier:
3749 misc_deregister(&tun_miscdev);
3750err_misc:
3751 rtnl_link_unregister(&tun_link_ops);
3752err_linkops:
3753 return ret;
3754}
3755
3756static void __exit tun_cleanup(void)
3757{
3758 misc_deregister(&tun_miscdev);
3759 rtnl_link_unregister(&tun_link_ops);
3760 unregister_netdevice_notifier(&tun_notifier_block);
3761}
3762
3763/* Get an underlying socket object from tun file. Returns error unless file is
3764 * attached to a device. The returned object works like a packet socket, it
3765 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
3766 * holding a reference to the file for as long as the socket is in use. */
3767struct socket *tun_get_socket(struct file *file)
3768{
3769 struct tun_file *tfile;
3770 if (file->f_op != &tun_fops)
3771 return ERR_PTR(-EINVAL);
3772 tfile = file->private_data;
3773 if (!tfile)
3774 return ERR_PTR(-EBADFD);
3775 return &tfile->socket;
3776}
3777EXPORT_SYMBOL_GPL(tun_get_socket);
3778
3779struct ptr_ring *tun_get_tx_ring(struct file *file)
3780{
3781 struct tun_file *tfile;
3782
3783 if (file->f_op != &tun_fops)
3784 return ERR_PTR(-EINVAL);
3785 tfile = file->private_data;
3786 if (!tfile)
3787 return ERR_PTR(-EBADFD);
3788 return &tfile->tx_ring;
3789}
3790EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3791
3792module_init(tun_init);
3793module_exit(tun_cleanup);
3794MODULE_DESCRIPTION(DRV_DESCRIPTION);
3795MODULE_AUTHOR(DRV_COPYRIGHT);
3796MODULE_LICENSE("GPL");
3797MODULE_ALIAS_MISCDEV(TUN_MINOR);
3798MODULE_ALIAS("devname:net/tun");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * TUN - Universal TUN/TAP device driver.
4 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
5 *
6 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
7 */
8
9/*
10 * Changes:
11 *
12 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
13 * Add TUNSETLINK ioctl to set the link encapsulation
14 *
15 * Mark Smith <markzzzsmith@yahoo.com.au>
16 * Use eth_random_addr() for tap MAC address.
17 *
18 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
19 * Fixes in packet dropping, queue length setting and queue wakeup.
20 * Increased default tx queue length.
21 * Added ethtool API.
22 * Minor cleanups
23 *
24 * Daniel Podlejski <underley@underley.eu.org>
25 * Modifications for 2.3.99-pre5 kernel.
26 */
27
28#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29
30#define DRV_NAME "tun"
31#define DRV_VERSION "1.6"
32#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
33#define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
34
35#include <linux/module.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
38#include <linux/sched/signal.h>
39#include <linux/major.h>
40#include <linux/slab.h>
41#include <linux/poll.h>
42#include <linux/fcntl.h>
43#include <linux/init.h>
44#include <linux/skbuff.h>
45#include <linux/netdevice.h>
46#include <linux/etherdevice.h>
47#include <linux/miscdevice.h>
48#include <linux/ethtool.h>
49#include <linux/rtnetlink.h>
50#include <linux/compat.h>
51#include <linux/if.h>
52#include <linux/if_arp.h>
53#include <linux/if_ether.h>
54#include <linux/if_tun.h>
55#include <linux/if_vlan.h>
56#include <linux/crc32.h>
57#include <linux/math.h>
58#include <linux/nsproxy.h>
59#include <linux/virtio_net.h>
60#include <linux/rcupdate.h>
61#include <net/net_namespace.h>
62#include <net/netns/generic.h>
63#include <net/rtnetlink.h>
64#include <net/sock.h>
65#include <net/xdp.h>
66#include <net/ip_tunnels.h>
67#include <linux/seq_file.h>
68#include <linux/uio.h>
69#include <linux/skb_array.h>
70#include <linux/bpf.h>
71#include <linux/bpf_trace.h>
72#include <linux/mutex.h>
73#include <linux/ieee802154.h>
74#include <linux/if_ltalk.h>
75#include <uapi/linux/if_fddi.h>
76#include <uapi/linux/if_hippi.h>
77#include <uapi/linux/if_fc.h>
78#include <net/ax25.h>
79#include <net/rose.h>
80#include <net/6lowpan.h>
81#include <net/rps.h>
82
83#include <linux/uaccess.h>
84#include <linux/proc_fs.h>
85
86static void tun_default_link_ksettings(struct net_device *dev,
87 struct ethtool_link_ksettings *cmd);
88
89#define TUN_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD)
90
91/* TUN device flags */
92
93/* IFF_ATTACH_QUEUE is never stored in device flags,
94 * overload it to mean fasync when stored there.
95 */
96#define TUN_FASYNC IFF_ATTACH_QUEUE
97/* High bits in flags field are unused. */
98#define TUN_VNET_LE 0x80000000
99#define TUN_VNET_BE 0x40000000
100
101#define TUN_FEATURES (IFF_NO_PI | IFF_ONE_QUEUE | IFF_VNET_HDR | \
102 IFF_MULTI_QUEUE | IFF_NAPI | IFF_NAPI_FRAGS)
103
104#define GOODCOPY_LEN 128
105
106#define FLT_EXACT_COUNT 8
107struct tap_filter {
108 unsigned int count; /* Number of addrs. Zero means disabled */
109 u32 mask[2]; /* Mask of the hashed addrs */
110 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
111};
112
113/* MAX_TAP_QUEUES 256 is chosen to allow rx/tx queues to be equal
114 * to max number of VCPUs in guest. */
115#define MAX_TAP_QUEUES 256
116#define MAX_TAP_FLOWS 4096
117
118#define TUN_FLOW_EXPIRE (3 * HZ)
119
120/* A tun_file connects an open character device to a tuntap netdevice. It
121 * also contains all socket related structures (except sock_fprog and tap_filter)
122 * to serve as one transmit queue for tuntap device. The sock_fprog and
123 * tap_filter were kept in tun_struct since they were used for filtering for the
124 * netdevice not for a specific queue (at least I didn't see the requirement for
125 * this).
126 *
127 * RCU usage:
128 * The tun_file and tun_struct are loosely coupled, the pointer from one to the
129 * other can only be read while rcu_read_lock or rtnl_lock is held.
130 */
131struct tun_file {
132 struct sock sk;
133 struct socket socket;
134 struct tun_struct __rcu *tun;
135 struct fasync_struct *fasync;
136 /* only used for fasnyc */
137 unsigned int flags;
138 union {
139 u16 queue_index;
140 unsigned int ifindex;
141 };
142 struct napi_struct napi;
143 bool napi_enabled;
144 bool napi_frags_enabled;
145 struct mutex napi_mutex; /* Protects access to the above napi */
146 struct list_head next;
147 struct tun_struct *detached;
148 struct ptr_ring tx_ring;
149 struct xdp_rxq_info xdp_rxq;
150};
151
152struct tun_page {
153 struct page *page;
154 int count;
155};
156
157struct tun_flow_entry {
158 struct hlist_node hash_link;
159 struct rcu_head rcu;
160 struct tun_struct *tun;
161
162 u32 rxhash;
163 u32 rps_rxhash;
164 int queue_index;
165 unsigned long updated ____cacheline_aligned_in_smp;
166};
167
168#define TUN_NUM_FLOW_ENTRIES 1024
169#define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
170
171struct tun_prog {
172 struct rcu_head rcu;
173 struct bpf_prog *prog;
174};
175
176/* Since the socket were moved to tun_file, to preserve the behavior of persist
177 * device, socket filter, sndbuf and vnet header size were restore when the
178 * file were attached to a persist device.
179 */
180struct tun_struct {
181 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
182 unsigned int numqueues;
183 unsigned int flags;
184 kuid_t owner;
185 kgid_t group;
186
187 struct net_device *dev;
188 netdev_features_t set_features;
189#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
190 NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4)
191
192 int align;
193 int vnet_hdr_sz;
194 int sndbuf;
195 struct tap_filter txflt;
196 struct sock_fprog fprog;
197 /* protected by rtnl lock */
198 bool filter_attached;
199 u32 msg_enable;
200 spinlock_t lock;
201 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
202 struct timer_list flow_gc_timer;
203 unsigned long ageing_time;
204 unsigned int numdisabled;
205 struct list_head disabled;
206 void *security;
207 u32 flow_count;
208 u32 rx_batched;
209 atomic_long_t rx_frame_errors;
210 struct bpf_prog __rcu *xdp_prog;
211 struct tun_prog __rcu *steering_prog;
212 struct tun_prog __rcu *filter_prog;
213 struct ethtool_link_ksettings link_ksettings;
214 /* init args */
215 struct file *file;
216 struct ifreq *ifr;
217};
218
219struct veth {
220 __be16 h_vlan_proto;
221 __be16 h_vlan_TCI;
222};
223
224static void tun_flow_init(struct tun_struct *tun);
225static void tun_flow_uninit(struct tun_struct *tun);
226
227static int tun_napi_receive(struct napi_struct *napi, int budget)
228{
229 struct tun_file *tfile = container_of(napi, struct tun_file, napi);
230 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
231 struct sk_buff_head process_queue;
232 struct sk_buff *skb;
233 int received = 0;
234
235 __skb_queue_head_init(&process_queue);
236
237 spin_lock(&queue->lock);
238 skb_queue_splice_tail_init(queue, &process_queue);
239 spin_unlock(&queue->lock);
240
241 while (received < budget && (skb = __skb_dequeue(&process_queue))) {
242 napi_gro_receive(napi, skb);
243 ++received;
244 }
245
246 if (!skb_queue_empty(&process_queue)) {
247 spin_lock(&queue->lock);
248 skb_queue_splice(&process_queue, queue);
249 spin_unlock(&queue->lock);
250 }
251
252 return received;
253}
254
255static int tun_napi_poll(struct napi_struct *napi, int budget)
256{
257 unsigned int received;
258
259 received = tun_napi_receive(napi, budget);
260
261 if (received < budget)
262 napi_complete_done(napi, received);
263
264 return received;
265}
266
267static void tun_napi_init(struct tun_struct *tun, struct tun_file *tfile,
268 bool napi_en, bool napi_frags)
269{
270 tfile->napi_enabled = napi_en;
271 tfile->napi_frags_enabled = napi_en && napi_frags;
272 if (napi_en) {
273 netif_napi_add_tx(tun->dev, &tfile->napi, tun_napi_poll);
274 napi_enable(&tfile->napi);
275 }
276}
277
278static void tun_napi_enable(struct tun_file *tfile)
279{
280 if (tfile->napi_enabled)
281 napi_enable(&tfile->napi);
282}
283
284static void tun_napi_disable(struct tun_file *tfile)
285{
286 if (tfile->napi_enabled)
287 napi_disable(&tfile->napi);
288}
289
290static void tun_napi_del(struct tun_file *tfile)
291{
292 if (tfile->napi_enabled)
293 netif_napi_del(&tfile->napi);
294}
295
296static bool tun_napi_frags_enabled(const struct tun_file *tfile)
297{
298 return tfile->napi_frags_enabled;
299}
300
301#ifdef CONFIG_TUN_VNET_CROSS_LE
302static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
303{
304 return tun->flags & TUN_VNET_BE ? false :
305 virtio_legacy_is_little_endian();
306}
307
308static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
309{
310 int be = !!(tun->flags & TUN_VNET_BE);
311
312 if (put_user(be, argp))
313 return -EFAULT;
314
315 return 0;
316}
317
318static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
319{
320 int be;
321
322 if (get_user(be, argp))
323 return -EFAULT;
324
325 if (be)
326 tun->flags |= TUN_VNET_BE;
327 else
328 tun->flags &= ~TUN_VNET_BE;
329
330 return 0;
331}
332#else
333static inline bool tun_legacy_is_little_endian(struct tun_struct *tun)
334{
335 return virtio_legacy_is_little_endian();
336}
337
338static long tun_get_vnet_be(struct tun_struct *tun, int __user *argp)
339{
340 return -EINVAL;
341}
342
343static long tun_set_vnet_be(struct tun_struct *tun, int __user *argp)
344{
345 return -EINVAL;
346}
347#endif /* CONFIG_TUN_VNET_CROSS_LE */
348
349static inline bool tun_is_little_endian(struct tun_struct *tun)
350{
351 return tun->flags & TUN_VNET_LE ||
352 tun_legacy_is_little_endian(tun);
353}
354
355static inline u16 tun16_to_cpu(struct tun_struct *tun, __virtio16 val)
356{
357 return __virtio16_to_cpu(tun_is_little_endian(tun), val);
358}
359
360static inline __virtio16 cpu_to_tun16(struct tun_struct *tun, u16 val)
361{
362 return __cpu_to_virtio16(tun_is_little_endian(tun), val);
363}
364
365static inline u32 tun_hashfn(u32 rxhash)
366{
367 return rxhash & TUN_MASK_FLOW_ENTRIES;
368}
369
370static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
371{
372 struct tun_flow_entry *e;
373
374 hlist_for_each_entry_rcu(e, head, hash_link) {
375 if (e->rxhash == rxhash)
376 return e;
377 }
378 return NULL;
379}
380
381static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
382 struct hlist_head *head,
383 u32 rxhash, u16 queue_index)
384{
385 struct tun_flow_entry *e = kmalloc(sizeof(*e), GFP_ATOMIC);
386
387 if (e) {
388 netif_info(tun, tx_queued, tun->dev,
389 "create flow: hash %u index %u\n",
390 rxhash, queue_index);
391 e->updated = jiffies;
392 e->rxhash = rxhash;
393 e->rps_rxhash = 0;
394 e->queue_index = queue_index;
395 e->tun = tun;
396 hlist_add_head_rcu(&e->hash_link, head);
397 ++tun->flow_count;
398 }
399 return e;
400}
401
402static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
403{
404 netif_info(tun, tx_queued, tun->dev, "delete flow: hash %u index %u\n",
405 e->rxhash, e->queue_index);
406 hlist_del_rcu(&e->hash_link);
407 kfree_rcu(e, rcu);
408 --tun->flow_count;
409}
410
411static void tun_flow_flush(struct tun_struct *tun)
412{
413 int i;
414
415 spin_lock_bh(&tun->lock);
416 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
417 struct tun_flow_entry *e;
418 struct hlist_node *n;
419
420 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link)
421 tun_flow_delete(tun, e);
422 }
423 spin_unlock_bh(&tun->lock);
424}
425
426static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
427{
428 int i;
429
430 spin_lock_bh(&tun->lock);
431 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
432 struct tun_flow_entry *e;
433 struct hlist_node *n;
434
435 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
436 if (e->queue_index == queue_index)
437 tun_flow_delete(tun, e);
438 }
439 }
440 spin_unlock_bh(&tun->lock);
441}
442
443static void tun_flow_cleanup(struct timer_list *t)
444{
445 struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
446 unsigned long delay = tun->ageing_time;
447 unsigned long next_timer = jiffies + delay;
448 unsigned long count = 0;
449 int i;
450
451 spin_lock(&tun->lock);
452 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
453 struct tun_flow_entry *e;
454 struct hlist_node *n;
455
456 hlist_for_each_entry_safe(e, n, &tun->flows[i], hash_link) {
457 unsigned long this_timer;
458
459 this_timer = e->updated + delay;
460 if (time_before_eq(this_timer, jiffies)) {
461 tun_flow_delete(tun, e);
462 continue;
463 }
464 count++;
465 if (time_before(this_timer, next_timer))
466 next_timer = this_timer;
467 }
468 }
469
470 if (count)
471 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
472 spin_unlock(&tun->lock);
473}
474
475static void tun_flow_update(struct tun_struct *tun, u32 rxhash,
476 struct tun_file *tfile)
477{
478 struct hlist_head *head;
479 struct tun_flow_entry *e;
480 unsigned long delay = tun->ageing_time;
481 u16 queue_index = tfile->queue_index;
482
483 head = &tun->flows[tun_hashfn(rxhash)];
484
485 rcu_read_lock();
486
487 e = tun_flow_find(head, rxhash);
488 if (likely(e)) {
489 /* TODO: keep queueing to old queue until it's empty? */
490 if (READ_ONCE(e->queue_index) != queue_index)
491 WRITE_ONCE(e->queue_index, queue_index);
492 if (e->updated != jiffies)
493 e->updated = jiffies;
494 sock_rps_record_flow_hash(e->rps_rxhash);
495 } else {
496 spin_lock_bh(&tun->lock);
497 if (!tun_flow_find(head, rxhash) &&
498 tun->flow_count < MAX_TAP_FLOWS)
499 tun_flow_create(tun, head, rxhash, queue_index);
500
501 if (!timer_pending(&tun->flow_gc_timer))
502 mod_timer(&tun->flow_gc_timer,
503 round_jiffies_up(jiffies + delay));
504 spin_unlock_bh(&tun->lock);
505 }
506
507 rcu_read_unlock();
508}
509
510/* Save the hash received in the stack receive path and update the
511 * flow_hash table accordingly.
512 */
513static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
514{
515 if (unlikely(e->rps_rxhash != hash))
516 e->rps_rxhash = hash;
517}
518
519/* We try to identify a flow through its rxhash. The reason that
520 * we do not check rxq no. is because some cards(e.g 82599), chooses
521 * the rxq based on the txq where the last packet of the flow comes. As
522 * the userspace application move between processors, we may get a
523 * different rxq no. here.
524 */
525static u16 tun_automq_select_queue(struct tun_struct *tun, struct sk_buff *skb)
526{
527 struct tun_flow_entry *e;
528 u32 txq, numqueues;
529
530 numqueues = READ_ONCE(tun->numqueues);
531
532 txq = __skb_get_hash_symmetric(skb);
533 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
534 if (e) {
535 tun_flow_save_rps_rxhash(e, txq);
536 txq = e->queue_index;
537 } else {
538 txq = reciprocal_scale(txq, numqueues);
539 }
540
541 return txq;
542}
543
544static u16 tun_ebpf_select_queue(struct tun_struct *tun, struct sk_buff *skb)
545{
546 struct tun_prog *prog;
547 u32 numqueues;
548 u16 ret = 0;
549
550 numqueues = READ_ONCE(tun->numqueues);
551 if (!numqueues)
552 return 0;
553
554 prog = rcu_dereference(tun->steering_prog);
555 if (prog)
556 ret = bpf_prog_run_clear_cb(prog->prog, skb);
557
558 return ret % numqueues;
559}
560
561static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
562 struct net_device *sb_dev)
563{
564 struct tun_struct *tun = netdev_priv(dev);
565 u16 ret;
566
567 rcu_read_lock();
568 if (rcu_dereference(tun->steering_prog))
569 ret = tun_ebpf_select_queue(tun, skb);
570 else
571 ret = tun_automq_select_queue(tun, skb);
572 rcu_read_unlock();
573
574 return ret;
575}
576
577static inline bool tun_not_capable(struct tun_struct *tun)
578{
579 const struct cred *cred = current_cred();
580 struct net *net = dev_net(tun->dev);
581
582 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
583 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
584 !ns_capable(net->user_ns, CAP_NET_ADMIN);
585}
586
587static void tun_set_real_num_queues(struct tun_struct *tun)
588{
589 netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
590 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
591}
592
593static void tun_disable_queue(struct tun_struct *tun, struct tun_file *tfile)
594{
595 tfile->detached = tun;
596 list_add_tail(&tfile->next, &tun->disabled);
597 ++tun->numdisabled;
598}
599
600static struct tun_struct *tun_enable_queue(struct tun_file *tfile)
601{
602 struct tun_struct *tun = tfile->detached;
603
604 tfile->detached = NULL;
605 list_del_init(&tfile->next);
606 --tun->numdisabled;
607 return tun;
608}
609
610void tun_ptr_free(void *ptr)
611{
612 if (!ptr)
613 return;
614 if (tun_is_xdp_frame(ptr)) {
615 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
616
617 xdp_return_frame(xdpf);
618 } else {
619 __skb_array_destroy_skb(ptr);
620 }
621}
622EXPORT_SYMBOL_GPL(tun_ptr_free);
623
624static void tun_queue_purge(struct tun_file *tfile)
625{
626 void *ptr;
627
628 while ((ptr = ptr_ring_consume(&tfile->tx_ring)) != NULL)
629 tun_ptr_free(ptr);
630
631 skb_queue_purge(&tfile->sk.sk_write_queue);
632 skb_queue_purge(&tfile->sk.sk_error_queue);
633}
634
635static void __tun_detach(struct tun_file *tfile, bool clean)
636{
637 struct tun_file *ntfile;
638 struct tun_struct *tun;
639
640 tun = rtnl_dereference(tfile->tun);
641
642 if (tun && clean) {
643 if (!tfile->detached)
644 tun_napi_disable(tfile);
645 tun_napi_del(tfile);
646 }
647
648 if (tun && !tfile->detached) {
649 u16 index = tfile->queue_index;
650 BUG_ON(index >= tun->numqueues);
651
652 rcu_assign_pointer(tun->tfiles[index],
653 tun->tfiles[tun->numqueues - 1]);
654 ntfile = rtnl_dereference(tun->tfiles[index]);
655 ntfile->queue_index = index;
656 ntfile->xdp_rxq.queue_index = index;
657 rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
658 NULL);
659
660 --tun->numqueues;
661 if (clean) {
662 RCU_INIT_POINTER(tfile->tun, NULL);
663 sock_put(&tfile->sk);
664 } else {
665 tun_disable_queue(tun, tfile);
666 tun_napi_disable(tfile);
667 }
668
669 synchronize_net();
670 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
671 /* Drop read queue */
672 tun_queue_purge(tfile);
673 tun_set_real_num_queues(tun);
674 } else if (tfile->detached && clean) {
675 tun = tun_enable_queue(tfile);
676 sock_put(&tfile->sk);
677 }
678
679 if (clean) {
680 if (tun && tun->numqueues == 0 && tun->numdisabled == 0) {
681 netif_carrier_off(tun->dev);
682
683 if (!(tun->flags & IFF_PERSIST) &&
684 tun->dev->reg_state == NETREG_REGISTERED)
685 unregister_netdevice(tun->dev);
686 }
687 if (tun)
688 xdp_rxq_info_unreg(&tfile->xdp_rxq);
689 ptr_ring_cleanup(&tfile->tx_ring, tun_ptr_free);
690 }
691}
692
693static void tun_detach(struct tun_file *tfile, bool clean)
694{
695 struct tun_struct *tun;
696 struct net_device *dev;
697
698 rtnl_lock();
699 tun = rtnl_dereference(tfile->tun);
700 dev = tun ? tun->dev : NULL;
701 __tun_detach(tfile, clean);
702 if (dev)
703 netdev_state_change(dev);
704 rtnl_unlock();
705
706 if (clean)
707 sock_put(&tfile->sk);
708}
709
710static void tun_detach_all(struct net_device *dev)
711{
712 struct tun_struct *tun = netdev_priv(dev);
713 struct tun_file *tfile, *tmp;
714 int i, n = tun->numqueues;
715
716 for (i = 0; i < n; i++) {
717 tfile = rtnl_dereference(tun->tfiles[i]);
718 BUG_ON(!tfile);
719 tun_napi_disable(tfile);
720 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
721 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
722 RCU_INIT_POINTER(tfile->tun, NULL);
723 --tun->numqueues;
724 }
725 list_for_each_entry(tfile, &tun->disabled, next) {
726 tfile->socket.sk->sk_shutdown = RCV_SHUTDOWN;
727 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
728 RCU_INIT_POINTER(tfile->tun, NULL);
729 }
730 BUG_ON(tun->numqueues != 0);
731
732 synchronize_net();
733 for (i = 0; i < n; i++) {
734 tfile = rtnl_dereference(tun->tfiles[i]);
735 tun_napi_del(tfile);
736 /* Drop read queue */
737 tun_queue_purge(tfile);
738 xdp_rxq_info_unreg(&tfile->xdp_rxq);
739 sock_put(&tfile->sk);
740 }
741 list_for_each_entry_safe(tfile, tmp, &tun->disabled, next) {
742 tun_napi_del(tfile);
743 tun_enable_queue(tfile);
744 tun_queue_purge(tfile);
745 xdp_rxq_info_unreg(&tfile->xdp_rxq);
746 sock_put(&tfile->sk);
747 }
748 BUG_ON(tun->numdisabled != 0);
749
750 if (tun->flags & IFF_PERSIST)
751 module_put(THIS_MODULE);
752}
753
754static int tun_attach(struct tun_struct *tun, struct file *file,
755 bool skip_filter, bool napi, bool napi_frags,
756 bool publish_tun)
757{
758 struct tun_file *tfile = file->private_data;
759 struct net_device *dev = tun->dev;
760 int err;
761
762 err = security_tun_dev_attach(tfile->socket.sk, tun->security);
763 if (err < 0)
764 goto out;
765
766 err = -EINVAL;
767 if (rtnl_dereference(tfile->tun) && !tfile->detached)
768 goto out;
769
770 err = -EBUSY;
771 if (!(tun->flags & IFF_MULTI_QUEUE) && tun->numqueues == 1)
772 goto out;
773
774 err = -E2BIG;
775 if (!tfile->detached &&
776 tun->numqueues + tun->numdisabled == MAX_TAP_QUEUES)
777 goto out;
778
779 err = 0;
780
781 /* Re-attach the filter to persist device */
782 if (!skip_filter && (tun->filter_attached == true)) {
783 lock_sock(tfile->socket.sk);
784 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
785 release_sock(tfile->socket.sk);
786 if (!err)
787 goto out;
788 }
789
790 if (!tfile->detached &&
791 ptr_ring_resize(&tfile->tx_ring, dev->tx_queue_len,
792 GFP_KERNEL, tun_ptr_free)) {
793 err = -ENOMEM;
794 goto out;
795 }
796
797 tfile->queue_index = tun->numqueues;
798 tfile->socket.sk->sk_shutdown &= ~RCV_SHUTDOWN;
799
800 if (tfile->detached) {
801 /* Re-attach detached tfile, updating XDP queue_index */
802 WARN_ON(!xdp_rxq_info_is_reg(&tfile->xdp_rxq));
803
804 if (tfile->xdp_rxq.queue_index != tfile->queue_index)
805 tfile->xdp_rxq.queue_index = tfile->queue_index;
806 } else {
807 /* Setup XDP RX-queue info, for new tfile getting attached */
808 err = xdp_rxq_info_reg(&tfile->xdp_rxq,
809 tun->dev, tfile->queue_index, 0);
810 if (err < 0)
811 goto out;
812 err = xdp_rxq_info_reg_mem_model(&tfile->xdp_rxq,
813 MEM_TYPE_PAGE_SHARED, NULL);
814 if (err < 0) {
815 xdp_rxq_info_unreg(&tfile->xdp_rxq);
816 goto out;
817 }
818 err = 0;
819 }
820
821 if (tfile->detached) {
822 tun_enable_queue(tfile);
823 tun_napi_enable(tfile);
824 } else {
825 sock_hold(&tfile->sk);
826 tun_napi_init(tun, tfile, napi, napi_frags);
827 }
828
829 if (rtnl_dereference(tun->xdp_prog))
830 sock_set_flag(&tfile->sk, SOCK_XDP);
831
832 /* device is allowed to go away first, so no need to hold extra
833 * refcnt.
834 */
835
836 /* Publish tfile->tun and tun->tfiles only after we've fully
837 * initialized tfile; otherwise we risk using half-initialized
838 * object.
839 */
840 if (publish_tun)
841 rcu_assign_pointer(tfile->tun, tun);
842 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
843 tun->numqueues++;
844 tun_set_real_num_queues(tun);
845out:
846 return err;
847}
848
849static struct tun_struct *tun_get(struct tun_file *tfile)
850{
851 struct tun_struct *tun;
852
853 rcu_read_lock();
854 tun = rcu_dereference(tfile->tun);
855 if (tun)
856 dev_hold(tun->dev);
857 rcu_read_unlock();
858
859 return tun;
860}
861
862static void tun_put(struct tun_struct *tun)
863{
864 dev_put(tun->dev);
865}
866
867/* TAP filtering */
868static void addr_hash_set(u32 *mask, const u8 *addr)
869{
870 int n = ether_crc(ETH_ALEN, addr) >> 26;
871 mask[n >> 5] |= (1 << (n & 31));
872}
873
874static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
875{
876 int n = ether_crc(ETH_ALEN, addr) >> 26;
877 return mask[n >> 5] & (1 << (n & 31));
878}
879
880static int update_filter(struct tap_filter *filter, void __user *arg)
881{
882 struct { u8 u[ETH_ALEN]; } *addr;
883 struct tun_filter uf;
884 int err, alen, n, nexact;
885
886 if (copy_from_user(&uf, arg, sizeof(uf)))
887 return -EFAULT;
888
889 if (!uf.count) {
890 /* Disabled */
891 filter->count = 0;
892 return 0;
893 }
894
895 alen = ETH_ALEN * uf.count;
896 addr = memdup_user(arg + sizeof(uf), alen);
897 if (IS_ERR(addr))
898 return PTR_ERR(addr);
899
900 /* The filter is updated without holding any locks. Which is
901 * perfectly safe. We disable it first and in the worst
902 * case we'll accept a few undesired packets. */
903 filter->count = 0;
904 wmb();
905
906 /* Use first set of addresses as an exact filter */
907 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
908 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
909
910 nexact = n;
911
912 /* Remaining multicast addresses are hashed,
913 * unicast will leave the filter disabled. */
914 memset(filter->mask, 0, sizeof(filter->mask));
915 for (; n < uf.count; n++) {
916 if (!is_multicast_ether_addr(addr[n].u)) {
917 err = 0; /* no filter */
918 goto free_addr;
919 }
920 addr_hash_set(filter->mask, addr[n].u);
921 }
922
923 /* For ALLMULTI just set the mask to all ones.
924 * This overrides the mask populated above. */
925 if ((uf.flags & TUN_FLT_ALLMULTI))
926 memset(filter->mask, ~0, sizeof(filter->mask));
927
928 /* Now enable the filter */
929 wmb();
930 filter->count = nexact;
931
932 /* Return the number of exact filters */
933 err = nexact;
934free_addr:
935 kfree(addr);
936 return err;
937}
938
939/* Returns: 0 - drop, !=0 - accept */
940static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
941{
942 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
943 * at this point. */
944 struct ethhdr *eh = (struct ethhdr *) skb->data;
945 int i;
946
947 /* Exact match */
948 for (i = 0; i < filter->count; i++)
949 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
950 return 1;
951
952 /* Inexact match (multicast only) */
953 if (is_multicast_ether_addr(eh->h_dest))
954 return addr_hash_test(filter->mask, eh->h_dest);
955
956 return 0;
957}
958
959/*
960 * Checks whether the packet is accepted or not.
961 * Returns: 0 - drop, !=0 - accept
962 */
963static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
964{
965 if (!filter->count)
966 return 1;
967
968 return run_filter(filter, skb);
969}
970
971/* Network device part of the driver */
972
973static const struct ethtool_ops tun_ethtool_ops;
974
975static int tun_net_init(struct net_device *dev)
976{
977 struct tun_struct *tun = netdev_priv(dev);
978 struct ifreq *ifr = tun->ifr;
979 int err;
980
981 spin_lock_init(&tun->lock);
982
983 err = security_tun_dev_alloc_security(&tun->security);
984 if (err < 0)
985 return err;
986
987 tun_flow_init(tun);
988
989 dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS;
990 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
991 TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
992 NETIF_F_HW_VLAN_STAG_TX;
993 dev->features = dev->hw_features | NETIF_F_LLTX;
994 dev->vlan_features = dev->features &
995 ~(NETIF_F_HW_VLAN_CTAG_TX |
996 NETIF_F_HW_VLAN_STAG_TX);
997
998 tun->flags = (tun->flags & ~TUN_FEATURES) |
999 (ifr->ifr_flags & TUN_FEATURES);
1000
1001 INIT_LIST_HEAD(&tun->disabled);
1002 err = tun_attach(tun, tun->file, false, ifr->ifr_flags & IFF_NAPI,
1003 ifr->ifr_flags & IFF_NAPI_FRAGS, false);
1004 if (err < 0) {
1005 tun_flow_uninit(tun);
1006 security_tun_dev_free_security(tun->security);
1007 return err;
1008 }
1009 return 0;
1010}
1011
1012/* Net device detach from fd. */
1013static void tun_net_uninit(struct net_device *dev)
1014{
1015 tun_detach_all(dev);
1016}
1017
1018/* Net device open. */
1019static int tun_net_open(struct net_device *dev)
1020{
1021 netif_tx_start_all_queues(dev);
1022
1023 return 0;
1024}
1025
1026/* Net device close. */
1027static int tun_net_close(struct net_device *dev)
1028{
1029 netif_tx_stop_all_queues(dev);
1030 return 0;
1031}
1032
1033/* Net device start xmit */
1034static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
1035{
1036#ifdef CONFIG_RPS
1037 if (tun->numqueues == 1 && static_branch_unlikely(&rps_needed)) {
1038 /* Select queue was not called for the skbuff, so we extract the
1039 * RPS hash and save it into the flow_table here.
1040 */
1041 struct tun_flow_entry *e;
1042 __u32 rxhash;
1043
1044 rxhash = __skb_get_hash_symmetric(skb);
1045 e = tun_flow_find(&tun->flows[tun_hashfn(rxhash)], rxhash);
1046 if (e)
1047 tun_flow_save_rps_rxhash(e, rxhash);
1048 }
1049#endif
1050}
1051
1052static unsigned int run_ebpf_filter(struct tun_struct *tun,
1053 struct sk_buff *skb,
1054 int len)
1055{
1056 struct tun_prog *prog = rcu_dereference(tun->filter_prog);
1057
1058 if (prog)
1059 len = bpf_prog_run_clear_cb(prog->prog, skb);
1060
1061 return len;
1062}
1063
1064/* Net device start xmit */
1065static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
1066{
1067 struct tun_struct *tun = netdev_priv(dev);
1068 enum skb_drop_reason drop_reason;
1069 int txq = skb->queue_mapping;
1070 struct netdev_queue *queue;
1071 struct tun_file *tfile;
1072 int len = skb->len;
1073
1074 rcu_read_lock();
1075 tfile = rcu_dereference(tun->tfiles[txq]);
1076
1077 /* Drop packet if interface is not attached */
1078 if (!tfile) {
1079 drop_reason = SKB_DROP_REASON_DEV_READY;
1080 goto drop;
1081 }
1082
1083 if (!rcu_dereference(tun->steering_prog))
1084 tun_automq_xmit(tun, skb);
1085
1086 netif_info(tun, tx_queued, tun->dev, "%s %d\n", __func__, skb->len);
1087
1088 /* Drop if the filter does not like it.
1089 * This is a noop if the filter is disabled.
1090 * Filter can be enabled only for the TAP devices. */
1091 if (!check_filter(&tun->txflt, skb)) {
1092 drop_reason = SKB_DROP_REASON_TAP_TXFILTER;
1093 goto drop;
1094 }
1095
1096 if (tfile->socket.sk->sk_filter &&
1097 sk_filter(tfile->socket.sk, skb)) {
1098 drop_reason = SKB_DROP_REASON_SOCKET_FILTER;
1099 goto drop;
1100 }
1101
1102 len = run_ebpf_filter(tun, skb, len);
1103 if (len == 0) {
1104 drop_reason = SKB_DROP_REASON_TAP_FILTER;
1105 goto drop;
1106 }
1107
1108 if (pskb_trim(skb, len)) {
1109 drop_reason = SKB_DROP_REASON_NOMEM;
1110 goto drop;
1111 }
1112
1113 if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
1114 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1115 goto drop;
1116 }
1117
1118 skb_tx_timestamp(skb);
1119
1120 /* Orphan the skb - required as we might hang on to it
1121 * for indefinite time.
1122 */
1123 skb_orphan(skb);
1124
1125 nf_reset_ct(skb);
1126
1127 if (ptr_ring_produce(&tfile->tx_ring, skb)) {
1128 drop_reason = SKB_DROP_REASON_FULL_RING;
1129 goto drop;
1130 }
1131
1132 /* NETIF_F_LLTX requires to do our own update of trans_start */
1133 queue = netdev_get_tx_queue(dev, txq);
1134 txq_trans_cond_update(queue);
1135
1136 /* Notify and wake up reader process */
1137 if (tfile->flags & TUN_FASYNC)
1138 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1139 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1140
1141 rcu_read_unlock();
1142 return NETDEV_TX_OK;
1143
1144drop:
1145 dev_core_stats_tx_dropped_inc(dev);
1146 skb_tx_error(skb);
1147 kfree_skb_reason(skb, drop_reason);
1148 rcu_read_unlock();
1149 return NET_XMIT_DROP;
1150}
1151
1152static void tun_net_mclist(struct net_device *dev)
1153{
1154 /*
1155 * This callback is supposed to deal with mc filter in
1156 * _rx_ path and has nothing to do with the _tx_ path.
1157 * In rx path we always accept everything userspace gives us.
1158 */
1159}
1160
1161static netdev_features_t tun_net_fix_features(struct net_device *dev,
1162 netdev_features_t features)
1163{
1164 struct tun_struct *tun = netdev_priv(dev);
1165
1166 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
1167}
1168
1169static void tun_set_headroom(struct net_device *dev, int new_hr)
1170{
1171 struct tun_struct *tun = netdev_priv(dev);
1172
1173 if (new_hr < NET_SKB_PAD)
1174 new_hr = NET_SKB_PAD;
1175
1176 tun->align = new_hr;
1177}
1178
1179static void
1180tun_net_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1181{
1182 struct tun_struct *tun = netdev_priv(dev);
1183
1184 dev_get_tstats64(dev, stats);
1185
1186 stats->rx_frame_errors +=
1187 (unsigned long)atomic_long_read(&tun->rx_frame_errors);
1188}
1189
1190static int tun_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1191 struct netlink_ext_ack *extack)
1192{
1193 struct tun_struct *tun = netdev_priv(dev);
1194 struct tun_file *tfile;
1195 struct bpf_prog *old_prog;
1196 int i;
1197
1198 old_prog = rtnl_dereference(tun->xdp_prog);
1199 rcu_assign_pointer(tun->xdp_prog, prog);
1200 if (old_prog)
1201 bpf_prog_put(old_prog);
1202
1203 for (i = 0; i < tun->numqueues; i++) {
1204 tfile = rtnl_dereference(tun->tfiles[i]);
1205 if (prog)
1206 sock_set_flag(&tfile->sk, SOCK_XDP);
1207 else
1208 sock_reset_flag(&tfile->sk, SOCK_XDP);
1209 }
1210 list_for_each_entry(tfile, &tun->disabled, next) {
1211 if (prog)
1212 sock_set_flag(&tfile->sk, SOCK_XDP);
1213 else
1214 sock_reset_flag(&tfile->sk, SOCK_XDP);
1215 }
1216
1217 return 0;
1218}
1219
1220static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp)
1221{
1222 switch (xdp->command) {
1223 case XDP_SETUP_PROG:
1224 return tun_xdp_set(dev, xdp->prog, xdp->extack);
1225 default:
1226 return -EINVAL;
1227 }
1228}
1229
1230static int tun_net_change_carrier(struct net_device *dev, bool new_carrier)
1231{
1232 if (new_carrier) {
1233 struct tun_struct *tun = netdev_priv(dev);
1234
1235 if (!tun->numqueues)
1236 return -EPERM;
1237
1238 netif_carrier_on(dev);
1239 } else {
1240 netif_carrier_off(dev);
1241 }
1242 return 0;
1243}
1244
1245static const struct net_device_ops tun_netdev_ops = {
1246 .ndo_init = tun_net_init,
1247 .ndo_uninit = tun_net_uninit,
1248 .ndo_open = tun_net_open,
1249 .ndo_stop = tun_net_close,
1250 .ndo_start_xmit = tun_net_xmit,
1251 .ndo_fix_features = tun_net_fix_features,
1252 .ndo_select_queue = tun_select_queue,
1253 .ndo_set_rx_headroom = tun_set_headroom,
1254 .ndo_get_stats64 = tun_net_get_stats64,
1255 .ndo_change_carrier = tun_net_change_carrier,
1256};
1257
1258static void __tun_xdp_flush_tfile(struct tun_file *tfile)
1259{
1260 /* Notify and wake up reader process */
1261 if (tfile->flags & TUN_FASYNC)
1262 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
1263 tfile->socket.sk->sk_data_ready(tfile->socket.sk);
1264}
1265
1266static int tun_xdp_xmit(struct net_device *dev, int n,
1267 struct xdp_frame **frames, u32 flags)
1268{
1269 struct tun_struct *tun = netdev_priv(dev);
1270 struct tun_file *tfile;
1271 u32 numqueues;
1272 int nxmit = 0;
1273 int i;
1274
1275 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
1276 return -EINVAL;
1277
1278 rcu_read_lock();
1279
1280resample:
1281 numqueues = READ_ONCE(tun->numqueues);
1282 if (!numqueues) {
1283 rcu_read_unlock();
1284 return -ENXIO; /* Caller will free/return all frames */
1285 }
1286
1287 tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
1288 numqueues]);
1289 if (unlikely(!tfile))
1290 goto resample;
1291
1292 spin_lock(&tfile->tx_ring.producer_lock);
1293 for (i = 0; i < n; i++) {
1294 struct xdp_frame *xdp = frames[i];
1295 /* Encode the XDP flag into lowest bit for consumer to differ
1296 * XDP buffer from sk_buff.
1297 */
1298 void *frame = tun_xdp_to_ptr(xdp);
1299
1300 if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
1301 dev_core_stats_tx_dropped_inc(dev);
1302 break;
1303 }
1304 nxmit++;
1305 }
1306 spin_unlock(&tfile->tx_ring.producer_lock);
1307
1308 if (flags & XDP_XMIT_FLUSH)
1309 __tun_xdp_flush_tfile(tfile);
1310
1311 rcu_read_unlock();
1312 return nxmit;
1313}
1314
1315static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
1316{
1317 struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
1318 int nxmit;
1319
1320 if (unlikely(!frame))
1321 return -EOVERFLOW;
1322
1323 nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
1324 if (!nxmit)
1325 xdp_return_frame_rx_napi(frame);
1326 return nxmit;
1327}
1328
1329static const struct net_device_ops tap_netdev_ops = {
1330 .ndo_init = tun_net_init,
1331 .ndo_uninit = tun_net_uninit,
1332 .ndo_open = tun_net_open,
1333 .ndo_stop = tun_net_close,
1334 .ndo_start_xmit = tun_net_xmit,
1335 .ndo_fix_features = tun_net_fix_features,
1336 .ndo_set_rx_mode = tun_net_mclist,
1337 .ndo_set_mac_address = eth_mac_addr,
1338 .ndo_validate_addr = eth_validate_addr,
1339 .ndo_select_queue = tun_select_queue,
1340 .ndo_features_check = passthru_features_check,
1341 .ndo_set_rx_headroom = tun_set_headroom,
1342 .ndo_bpf = tun_xdp,
1343 .ndo_xdp_xmit = tun_xdp_xmit,
1344 .ndo_change_carrier = tun_net_change_carrier,
1345};
1346
1347static void tun_flow_init(struct tun_struct *tun)
1348{
1349 int i;
1350
1351 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
1352 INIT_HLIST_HEAD(&tun->flows[i]);
1353
1354 tun->ageing_time = TUN_FLOW_EXPIRE;
1355 timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
1356 mod_timer(&tun->flow_gc_timer,
1357 round_jiffies_up(jiffies + tun->ageing_time));
1358}
1359
1360static void tun_flow_uninit(struct tun_struct *tun)
1361{
1362 del_timer_sync(&tun->flow_gc_timer);
1363 tun_flow_flush(tun);
1364}
1365
1366#define MIN_MTU 68
1367#define MAX_MTU 65535
1368
1369/* Initialize net device. */
1370static void tun_net_initialize(struct net_device *dev)
1371{
1372 struct tun_struct *tun = netdev_priv(dev);
1373
1374 switch (tun->flags & TUN_TYPE_MASK) {
1375 case IFF_TUN:
1376 dev->netdev_ops = &tun_netdev_ops;
1377 dev->header_ops = &ip_tunnel_header_ops;
1378
1379 /* Point-to-Point TUN Device */
1380 dev->hard_header_len = 0;
1381 dev->addr_len = 0;
1382 dev->mtu = 1500;
1383
1384 /* Zero header length */
1385 dev->type = ARPHRD_NONE;
1386 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
1387 break;
1388
1389 case IFF_TAP:
1390 dev->netdev_ops = &tap_netdev_ops;
1391 /* Ethernet TAP Device */
1392 ether_setup(dev);
1393 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1394 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
1395
1396 eth_hw_addr_random(dev);
1397
1398 /* Currently tun does not support XDP, only tap does. */
1399 dev->xdp_features = NETDEV_XDP_ACT_BASIC |
1400 NETDEV_XDP_ACT_REDIRECT |
1401 NETDEV_XDP_ACT_NDO_XMIT;
1402
1403 break;
1404 }
1405
1406 dev->min_mtu = MIN_MTU;
1407 dev->max_mtu = MAX_MTU - dev->hard_header_len;
1408}
1409
1410static bool tun_sock_writeable(struct tun_struct *tun, struct tun_file *tfile)
1411{
1412 struct sock *sk = tfile->socket.sk;
1413
1414 return (tun->dev->flags & IFF_UP) && sock_writeable(sk);
1415}
1416
1417/* Character device part */
1418
1419/* Poll */
1420static __poll_t tun_chr_poll(struct file *file, poll_table *wait)
1421{
1422 struct tun_file *tfile = file->private_data;
1423 struct tun_struct *tun = tun_get(tfile);
1424 struct sock *sk;
1425 __poll_t mask = 0;
1426
1427 if (!tun)
1428 return EPOLLERR;
1429
1430 sk = tfile->socket.sk;
1431
1432 poll_wait(file, sk_sleep(sk), wait);
1433
1434 if (!ptr_ring_empty(&tfile->tx_ring))
1435 mask |= EPOLLIN | EPOLLRDNORM;
1436
1437 /* Make sure SOCKWQ_ASYNC_NOSPACE is set if not writable to
1438 * guarantee EPOLLOUT to be raised by either here or
1439 * tun_sock_write_space(). Then process could get notification
1440 * after it writes to a down device and meets -EIO.
1441 */
1442 if (tun_sock_writeable(tun, tfile) ||
1443 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
1444 tun_sock_writeable(tun, tfile)))
1445 mask |= EPOLLOUT | EPOLLWRNORM;
1446
1447 if (tun->dev->reg_state != NETREG_REGISTERED)
1448 mask = EPOLLERR;
1449
1450 tun_put(tun);
1451 return mask;
1452}
1453
1454static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile,
1455 size_t len,
1456 const struct iov_iter *it)
1457{
1458 struct sk_buff *skb;
1459 size_t linear;
1460 int err;
1461 int i;
1462
1463 if (it->nr_segs > MAX_SKB_FRAGS + 1 ||
1464 len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN))
1465 return ERR_PTR(-EMSGSIZE);
1466
1467 local_bh_disable();
1468 skb = napi_get_frags(&tfile->napi);
1469 local_bh_enable();
1470 if (!skb)
1471 return ERR_PTR(-ENOMEM);
1472
1473 linear = iov_iter_single_seg_count(it);
1474 err = __skb_grow(skb, linear);
1475 if (err)
1476 goto free;
1477
1478 skb->len = len;
1479 skb->data_len = len - linear;
1480 skb->truesize += skb->data_len;
1481
1482 for (i = 1; i < it->nr_segs; i++) {
1483 const struct iovec *iov = iter_iov(it);
1484 size_t fragsz = iov->iov_len;
1485 struct page *page;
1486 void *frag;
1487
1488 if (fragsz == 0 || fragsz > PAGE_SIZE) {
1489 err = -EINVAL;
1490 goto free;
1491 }
1492 frag = netdev_alloc_frag(fragsz);
1493 if (!frag) {
1494 err = -ENOMEM;
1495 goto free;
1496 }
1497 page = virt_to_head_page(frag);
1498 skb_fill_page_desc(skb, i - 1, page,
1499 frag - page_address(page), fragsz);
1500 }
1501
1502 return skb;
1503free:
1504 /* frees skb and all frags allocated with napi_alloc_frag() */
1505 napi_free_frags(&tfile->napi);
1506 return ERR_PTR(err);
1507}
1508
1509/* prepad is the amount to reserve at front. len is length after that.
1510 * linear is a hint as to how much to copy (usually headers). */
1511static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
1512 size_t prepad, size_t len,
1513 size_t linear, int noblock)
1514{
1515 struct sock *sk = tfile->socket.sk;
1516 struct sk_buff *skb;
1517 int err;
1518
1519 /* Under a page? Don't bother with paged skb. */
1520 if (prepad + len < PAGE_SIZE)
1521 linear = len;
1522
1523 if (len - linear > MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
1524 linear = len - MAX_SKB_FRAGS * (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER);
1525 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
1526 &err, PAGE_ALLOC_COSTLY_ORDER);
1527 if (!skb)
1528 return ERR_PTR(err);
1529
1530 skb_reserve(skb, prepad);
1531 skb_put(skb, linear);
1532 skb->data_len = len - linear;
1533 skb->len += len - linear;
1534
1535 return skb;
1536}
1537
1538static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
1539 struct sk_buff *skb, int more)
1540{
1541 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1542 struct sk_buff_head process_queue;
1543 u32 rx_batched = tun->rx_batched;
1544 bool rcv = false;
1545
1546 if (!rx_batched || (!more && skb_queue_empty(queue))) {
1547 local_bh_disable();
1548 skb_record_rx_queue(skb, tfile->queue_index);
1549 netif_receive_skb(skb);
1550 local_bh_enable();
1551 return;
1552 }
1553
1554 spin_lock(&queue->lock);
1555 if (!more || skb_queue_len(queue) == rx_batched) {
1556 __skb_queue_head_init(&process_queue);
1557 skb_queue_splice_tail_init(queue, &process_queue);
1558 rcv = true;
1559 } else {
1560 __skb_queue_tail(queue, skb);
1561 }
1562 spin_unlock(&queue->lock);
1563
1564 if (rcv) {
1565 struct sk_buff *nskb;
1566
1567 local_bh_disable();
1568 while ((nskb = __skb_dequeue(&process_queue))) {
1569 skb_record_rx_queue(nskb, tfile->queue_index);
1570 netif_receive_skb(nskb);
1571 }
1572 skb_record_rx_queue(skb, tfile->queue_index);
1573 netif_receive_skb(skb);
1574 local_bh_enable();
1575 }
1576}
1577
1578static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
1579 int len, int noblock, bool zerocopy)
1580{
1581 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
1582 return false;
1583
1584 if (tfile->socket.sk->sk_sndbuf != INT_MAX)
1585 return false;
1586
1587 if (!noblock)
1588 return false;
1589
1590 if (zerocopy)
1591 return false;
1592
1593 if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) +
1594 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
1595 return false;
1596
1597 return true;
1598}
1599
1600static struct sk_buff *__tun_build_skb(struct tun_file *tfile,
1601 struct page_frag *alloc_frag, char *buf,
1602 int buflen, int len, int pad)
1603{
1604 struct sk_buff *skb = build_skb(buf, buflen);
1605
1606 if (!skb)
1607 return ERR_PTR(-ENOMEM);
1608
1609 skb_reserve(skb, pad);
1610 skb_put(skb, len);
1611 skb_set_owner_w(skb, tfile->socket.sk);
1612
1613 get_page(alloc_frag->page);
1614 alloc_frag->offset += buflen;
1615
1616 return skb;
1617}
1618
1619static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
1620 struct xdp_buff *xdp, u32 act)
1621{
1622 int err;
1623
1624 switch (act) {
1625 case XDP_REDIRECT:
1626 err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
1627 if (err) {
1628 dev_core_stats_rx_dropped_inc(tun->dev);
1629 return err;
1630 }
1631 dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1632 break;
1633 case XDP_TX:
1634 err = tun_xdp_tx(tun->dev, xdp);
1635 if (err < 0) {
1636 dev_core_stats_rx_dropped_inc(tun->dev);
1637 return err;
1638 }
1639 dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
1640 break;
1641 case XDP_PASS:
1642 break;
1643 default:
1644 bpf_warn_invalid_xdp_action(tun->dev, xdp_prog, act);
1645 fallthrough;
1646 case XDP_ABORTED:
1647 trace_xdp_exception(tun->dev, xdp_prog, act);
1648 fallthrough;
1649 case XDP_DROP:
1650 dev_core_stats_rx_dropped_inc(tun->dev);
1651 break;
1652 }
1653
1654 return act;
1655}
1656
1657static struct sk_buff *tun_build_skb(struct tun_struct *tun,
1658 struct tun_file *tfile,
1659 struct iov_iter *from,
1660 struct virtio_net_hdr *hdr,
1661 int len, int *skb_xdp)
1662{
1663 struct page_frag *alloc_frag = ¤t->task_frag;
1664 struct bpf_prog *xdp_prog;
1665 int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
1666 char *buf;
1667 size_t copied;
1668 int pad = TUN_RX_PAD;
1669 int err = 0;
1670
1671 rcu_read_lock();
1672 xdp_prog = rcu_dereference(tun->xdp_prog);
1673 if (xdp_prog)
1674 pad += XDP_PACKET_HEADROOM;
1675 buflen += SKB_DATA_ALIGN(len + pad);
1676 rcu_read_unlock();
1677
1678 alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES);
1679 if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL)))
1680 return ERR_PTR(-ENOMEM);
1681
1682 buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
1683 copied = copy_page_from_iter(alloc_frag->page,
1684 alloc_frag->offset + pad,
1685 len, from);
1686 if (copied != len)
1687 return ERR_PTR(-EFAULT);
1688
1689 /* There's a small window that XDP may be set after the check
1690 * of xdp_prog above, this should be rare and for simplicity
1691 * we do XDP on skb in case the headroom is not enough.
1692 */
1693 if (hdr->gso_type || !xdp_prog) {
1694 *skb_xdp = 1;
1695 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len,
1696 pad);
1697 }
1698
1699 *skb_xdp = 0;
1700
1701 local_bh_disable();
1702 rcu_read_lock();
1703 xdp_prog = rcu_dereference(tun->xdp_prog);
1704 if (xdp_prog) {
1705 struct xdp_buff xdp;
1706 u32 act;
1707
1708 xdp_init_buff(&xdp, buflen, &tfile->xdp_rxq);
1709 xdp_prepare_buff(&xdp, buf, pad, len, false);
1710
1711 act = bpf_prog_run_xdp(xdp_prog, &xdp);
1712 if (act == XDP_REDIRECT || act == XDP_TX) {
1713 get_page(alloc_frag->page);
1714 alloc_frag->offset += buflen;
1715 }
1716 err = tun_xdp_act(tun, xdp_prog, &xdp, act);
1717 if (err < 0) {
1718 if (act == XDP_REDIRECT || act == XDP_TX)
1719 put_page(alloc_frag->page);
1720 goto out;
1721 }
1722
1723 if (err == XDP_REDIRECT)
1724 xdp_do_flush();
1725 if (err != XDP_PASS)
1726 goto out;
1727
1728 pad = xdp.data - xdp.data_hard_start;
1729 len = xdp.data_end - xdp.data;
1730 }
1731 rcu_read_unlock();
1732 local_bh_enable();
1733
1734 return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad);
1735
1736out:
1737 rcu_read_unlock();
1738 local_bh_enable();
1739 return NULL;
1740}
1741
1742/* Get packet from user space buffer */
1743static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1744 void *msg_control, struct iov_iter *from,
1745 int noblock, bool more)
1746{
1747 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
1748 struct sk_buff *skb;
1749 size_t total_len = iov_iter_count(from);
1750 size_t len = total_len, align = tun->align, linear;
1751 struct virtio_net_hdr gso = { 0 };
1752 int good_linear;
1753 int copylen;
1754 bool zerocopy = false;
1755 int err;
1756 u32 rxhash = 0;
1757 int skb_xdp = 1;
1758 bool frags = tun_napi_frags_enabled(tfile);
1759 enum skb_drop_reason drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
1760
1761 if (!(tun->flags & IFF_NO_PI)) {
1762 if (len < sizeof(pi))
1763 return -EINVAL;
1764 len -= sizeof(pi);
1765
1766 if (!copy_from_iter_full(&pi, sizeof(pi), from))
1767 return -EFAULT;
1768 }
1769
1770 if (tun->flags & IFF_VNET_HDR) {
1771 int vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
1772
1773 if (len < vnet_hdr_sz)
1774 return -EINVAL;
1775 len -= vnet_hdr_sz;
1776
1777 if (!copy_from_iter_full(&gso, sizeof(gso), from))
1778 return -EFAULT;
1779
1780 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1781 tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2 > tun16_to_cpu(tun, gso.hdr_len))
1782 gso.hdr_len = cpu_to_tun16(tun, tun16_to_cpu(tun, gso.csum_start) + tun16_to_cpu(tun, gso.csum_offset) + 2);
1783
1784 if (tun16_to_cpu(tun, gso.hdr_len) > len)
1785 return -EINVAL;
1786 iov_iter_advance(from, vnet_hdr_sz - sizeof(gso));
1787 }
1788
1789 if ((tun->flags & TUN_TYPE_MASK) == IFF_TAP) {
1790 align += NET_IP_ALIGN;
1791 if (unlikely(len < ETH_HLEN ||
1792 (gso.hdr_len && tun16_to_cpu(tun, gso.hdr_len) < ETH_HLEN)))
1793 return -EINVAL;
1794 }
1795
1796 good_linear = SKB_MAX_HEAD(align);
1797
1798 if (msg_control) {
1799 struct iov_iter i = *from;
1800
1801 /* There are 256 bytes to be copied in skb, so there is
1802 * enough room for skb expand head in case it is used.
1803 * The rest of the buffer is mapped from userspace.
1804 */
1805 copylen = gso.hdr_len ? tun16_to_cpu(tun, gso.hdr_len) : GOODCOPY_LEN;
1806 if (copylen > good_linear)
1807 copylen = good_linear;
1808 linear = copylen;
1809 iov_iter_advance(&i, copylen);
1810 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
1811 zerocopy = true;
1812 }
1813
1814 if (!frags && tun_can_build_skb(tun, tfile, len, noblock, zerocopy)) {
1815 /* For the packet that is not easy to be processed
1816 * (e.g gso or jumbo packet), we will do it at after
1817 * skb was created with generic XDP routine.
1818 */
1819 skb = tun_build_skb(tun, tfile, from, &gso, len, &skb_xdp);
1820 err = PTR_ERR_OR_ZERO(skb);
1821 if (err)
1822 goto drop;
1823 if (!skb)
1824 return total_len;
1825 } else {
1826 if (!zerocopy) {
1827 copylen = len;
1828 if (tun16_to_cpu(tun, gso.hdr_len) > good_linear)
1829 linear = good_linear;
1830 else
1831 linear = tun16_to_cpu(tun, gso.hdr_len);
1832 }
1833
1834 if (frags) {
1835 mutex_lock(&tfile->napi_mutex);
1836 skb = tun_napi_alloc_frags(tfile, copylen, from);
1837 /* tun_napi_alloc_frags() enforces a layout for the skb.
1838 * If zerocopy is enabled, then this layout will be
1839 * overwritten by zerocopy_sg_from_iter().
1840 */
1841 zerocopy = false;
1842 } else {
1843 if (!linear)
1844 linear = min_t(size_t, good_linear, copylen);
1845
1846 skb = tun_alloc_skb(tfile, align, copylen, linear,
1847 noblock);
1848 }
1849
1850 err = PTR_ERR_OR_ZERO(skb);
1851 if (err)
1852 goto drop;
1853
1854 if (zerocopy)
1855 err = zerocopy_sg_from_iter(skb, from);
1856 else
1857 err = skb_copy_datagram_from_iter(skb, 0, from, len);
1858
1859 if (err) {
1860 err = -EFAULT;
1861 drop_reason = SKB_DROP_REASON_SKB_UCOPY_FAULT;
1862 goto drop;
1863 }
1864 }
1865
1866 if (virtio_net_hdr_to_skb(skb, &gso, tun_is_little_endian(tun))) {
1867 atomic_long_inc(&tun->rx_frame_errors);
1868 err = -EINVAL;
1869 goto free_skb;
1870 }
1871
1872 switch (tun->flags & TUN_TYPE_MASK) {
1873 case IFF_TUN:
1874 if (tun->flags & IFF_NO_PI) {
1875 u8 ip_version = skb->len ? (skb->data[0] >> 4) : 0;
1876
1877 switch (ip_version) {
1878 case 4:
1879 pi.proto = htons(ETH_P_IP);
1880 break;
1881 case 6:
1882 pi.proto = htons(ETH_P_IPV6);
1883 break;
1884 default:
1885 err = -EINVAL;
1886 goto drop;
1887 }
1888 }
1889
1890 skb_reset_mac_header(skb);
1891 skb->protocol = pi.proto;
1892 skb->dev = tun->dev;
1893 break;
1894 case IFF_TAP:
1895 if (frags && !pskb_may_pull(skb, ETH_HLEN)) {
1896 err = -ENOMEM;
1897 drop_reason = SKB_DROP_REASON_HDR_TRUNC;
1898 goto drop;
1899 }
1900 skb->protocol = eth_type_trans(skb, tun->dev);
1901 break;
1902 }
1903
1904 /* copy skb_ubuf_info for callback when skb has no error */
1905 if (zerocopy) {
1906 skb_zcopy_init(skb, msg_control);
1907 } else if (msg_control) {
1908 struct ubuf_info *uarg = msg_control;
1909 uarg->callback(NULL, uarg, false);
1910 }
1911
1912 skb_reset_network_header(skb);
1913 skb_probe_transport_header(skb);
1914 skb_record_rx_queue(skb, tfile->queue_index);
1915
1916 if (skb_xdp) {
1917 struct bpf_prog *xdp_prog;
1918 int ret;
1919
1920 local_bh_disable();
1921 rcu_read_lock();
1922 xdp_prog = rcu_dereference(tun->xdp_prog);
1923 if (xdp_prog) {
1924 ret = do_xdp_generic(xdp_prog, &skb);
1925 if (ret != XDP_PASS) {
1926 rcu_read_unlock();
1927 local_bh_enable();
1928 goto unlock_frags;
1929 }
1930 }
1931 rcu_read_unlock();
1932 local_bh_enable();
1933 }
1934
1935 /* Compute the costly rx hash only if needed for flow updates.
1936 * We may get a very small possibility of OOO during switching, not
1937 * worth to optimize.
1938 */
1939 if (!rcu_access_pointer(tun->steering_prog) && tun->numqueues > 1 &&
1940 !tfile->detached)
1941 rxhash = __skb_get_hash_symmetric(skb);
1942
1943 rcu_read_lock();
1944 if (unlikely(!(tun->dev->flags & IFF_UP))) {
1945 err = -EIO;
1946 rcu_read_unlock();
1947 drop_reason = SKB_DROP_REASON_DEV_READY;
1948 goto drop;
1949 }
1950
1951 if (frags) {
1952 u32 headlen;
1953
1954 /* Exercise flow dissector code path. */
1955 skb_push(skb, ETH_HLEN);
1956 headlen = eth_get_headlen(tun->dev, skb->data,
1957 skb_headlen(skb));
1958
1959 if (unlikely(headlen > skb_headlen(skb))) {
1960 WARN_ON_ONCE(1);
1961 err = -ENOMEM;
1962 dev_core_stats_rx_dropped_inc(tun->dev);
1963napi_busy:
1964 napi_free_frags(&tfile->napi);
1965 rcu_read_unlock();
1966 mutex_unlock(&tfile->napi_mutex);
1967 return err;
1968 }
1969
1970 if (likely(napi_schedule_prep(&tfile->napi))) {
1971 local_bh_disable();
1972 napi_gro_frags(&tfile->napi);
1973 napi_complete(&tfile->napi);
1974 local_bh_enable();
1975 } else {
1976 err = -EBUSY;
1977 goto napi_busy;
1978 }
1979 mutex_unlock(&tfile->napi_mutex);
1980 } else if (tfile->napi_enabled) {
1981 struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
1982 int queue_len;
1983
1984 spin_lock_bh(&queue->lock);
1985
1986 if (unlikely(tfile->detached)) {
1987 spin_unlock_bh(&queue->lock);
1988 rcu_read_unlock();
1989 err = -EBUSY;
1990 goto free_skb;
1991 }
1992
1993 __skb_queue_tail(queue, skb);
1994 queue_len = skb_queue_len(queue);
1995 spin_unlock(&queue->lock);
1996
1997 if (!more || queue_len > NAPI_POLL_WEIGHT)
1998 napi_schedule(&tfile->napi);
1999
2000 local_bh_enable();
2001 } else if (!IS_ENABLED(CONFIG_4KSTACKS)) {
2002 tun_rx_batched(tun, tfile, skb, more);
2003 } else {
2004 netif_rx(skb);
2005 }
2006 rcu_read_unlock();
2007
2008 preempt_disable();
2009 dev_sw_netstats_rx_add(tun->dev, len);
2010 preempt_enable();
2011
2012 if (rxhash)
2013 tun_flow_update(tun, rxhash, tfile);
2014
2015 return total_len;
2016
2017drop:
2018 if (err != -EAGAIN)
2019 dev_core_stats_rx_dropped_inc(tun->dev);
2020
2021free_skb:
2022 if (!IS_ERR_OR_NULL(skb))
2023 kfree_skb_reason(skb, drop_reason);
2024
2025unlock_frags:
2026 if (frags) {
2027 tfile->napi.skb = NULL;
2028 mutex_unlock(&tfile->napi_mutex);
2029 }
2030
2031 return err ?: total_len;
2032}
2033
2034static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
2035{
2036 struct file *file = iocb->ki_filp;
2037 struct tun_file *tfile = file->private_data;
2038 struct tun_struct *tun = tun_get(tfile);
2039 ssize_t result;
2040 int noblock = 0;
2041
2042 if (!tun)
2043 return -EBADFD;
2044
2045 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2046 noblock = 1;
2047
2048 result = tun_get_user(tun, tfile, NULL, from, noblock, false);
2049
2050 tun_put(tun);
2051 return result;
2052}
2053
2054static ssize_t tun_put_user_xdp(struct tun_struct *tun,
2055 struct tun_file *tfile,
2056 struct xdp_frame *xdp_frame,
2057 struct iov_iter *iter)
2058{
2059 int vnet_hdr_sz = 0;
2060 size_t size = xdp_frame->len;
2061 size_t ret;
2062
2063 if (tun->flags & IFF_VNET_HDR) {
2064 struct virtio_net_hdr gso = { 0 };
2065
2066 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2067 if (unlikely(iov_iter_count(iter) < vnet_hdr_sz))
2068 return -EINVAL;
2069 if (unlikely(copy_to_iter(&gso, sizeof(gso), iter) !=
2070 sizeof(gso)))
2071 return -EFAULT;
2072 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2073 }
2074
2075 ret = copy_to_iter(xdp_frame->data, size, iter) + vnet_hdr_sz;
2076
2077 preempt_disable();
2078 dev_sw_netstats_tx_add(tun->dev, 1, ret);
2079 preempt_enable();
2080
2081 return ret;
2082}
2083
2084/* Put packet to the user space buffer */
2085static ssize_t tun_put_user(struct tun_struct *tun,
2086 struct tun_file *tfile,
2087 struct sk_buff *skb,
2088 struct iov_iter *iter)
2089{
2090 struct tun_pi pi = { 0, skb->protocol };
2091 ssize_t total;
2092 int vlan_offset = 0;
2093 int vlan_hlen = 0;
2094 int vnet_hdr_sz = 0;
2095
2096 if (skb_vlan_tag_present(skb))
2097 vlan_hlen = VLAN_HLEN;
2098
2099 if (tun->flags & IFF_VNET_HDR)
2100 vnet_hdr_sz = READ_ONCE(tun->vnet_hdr_sz);
2101
2102 total = skb->len + vlan_hlen + vnet_hdr_sz;
2103
2104 if (!(tun->flags & IFF_NO_PI)) {
2105 if (iov_iter_count(iter) < sizeof(pi))
2106 return -EINVAL;
2107
2108 total += sizeof(pi);
2109 if (iov_iter_count(iter) < total) {
2110 /* Packet will be striped */
2111 pi.flags |= TUN_PKT_STRIP;
2112 }
2113
2114 if (copy_to_iter(&pi, sizeof(pi), iter) != sizeof(pi))
2115 return -EFAULT;
2116 }
2117
2118 if (vnet_hdr_sz) {
2119 struct virtio_net_hdr gso;
2120
2121 if (iov_iter_count(iter) < vnet_hdr_sz)
2122 return -EINVAL;
2123
2124 if (virtio_net_hdr_from_skb(skb, &gso,
2125 tun_is_little_endian(tun), true,
2126 vlan_hlen)) {
2127 struct skb_shared_info *sinfo = skb_shinfo(skb);
2128
2129 if (net_ratelimit()) {
2130 netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
2131 sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
2132 tun16_to_cpu(tun, gso.hdr_len));
2133 print_hex_dump(KERN_ERR, "tun: ",
2134 DUMP_PREFIX_NONE,
2135 16, 1, skb->head,
2136 min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
2137 }
2138 WARN_ON_ONCE(1);
2139 return -EINVAL;
2140 }
2141
2142 if (copy_to_iter(&gso, sizeof(gso), iter) != sizeof(gso))
2143 return -EFAULT;
2144
2145 iov_iter_advance(iter, vnet_hdr_sz - sizeof(gso));
2146 }
2147
2148 if (vlan_hlen) {
2149 int ret;
2150 struct veth veth;
2151
2152 veth.h_vlan_proto = skb->vlan_proto;
2153 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
2154
2155 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
2156
2157 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
2158 if (ret || !iov_iter_count(iter))
2159 goto done;
2160
2161 ret = copy_to_iter(&veth, sizeof(veth), iter);
2162 if (ret != sizeof(veth) || !iov_iter_count(iter))
2163 goto done;
2164 }
2165
2166 skb_copy_datagram_iter(skb, vlan_offset, iter, skb->len - vlan_offset);
2167
2168done:
2169 /* caller is in process context, */
2170 preempt_disable();
2171 dev_sw_netstats_tx_add(tun->dev, 1, skb->len + vlan_hlen);
2172 preempt_enable();
2173
2174 return total;
2175}
2176
2177static void *tun_ring_recv(struct tun_file *tfile, int noblock, int *err)
2178{
2179 DECLARE_WAITQUEUE(wait, current);
2180 void *ptr = NULL;
2181 int error = 0;
2182
2183 ptr = ptr_ring_consume(&tfile->tx_ring);
2184 if (ptr)
2185 goto out;
2186 if (noblock) {
2187 error = -EAGAIN;
2188 goto out;
2189 }
2190
2191 add_wait_queue(&tfile->socket.wq.wait, &wait);
2192
2193 while (1) {
2194 set_current_state(TASK_INTERRUPTIBLE);
2195 ptr = ptr_ring_consume(&tfile->tx_ring);
2196 if (ptr)
2197 break;
2198 if (signal_pending(current)) {
2199 error = -ERESTARTSYS;
2200 break;
2201 }
2202 if (tfile->socket.sk->sk_shutdown & RCV_SHUTDOWN) {
2203 error = -EFAULT;
2204 break;
2205 }
2206
2207 schedule();
2208 }
2209
2210 __set_current_state(TASK_RUNNING);
2211 remove_wait_queue(&tfile->socket.wq.wait, &wait);
2212
2213out:
2214 *err = error;
2215 return ptr;
2216}
2217
2218static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
2219 struct iov_iter *to,
2220 int noblock, void *ptr)
2221{
2222 ssize_t ret;
2223 int err;
2224
2225 if (!iov_iter_count(to)) {
2226 tun_ptr_free(ptr);
2227 return 0;
2228 }
2229
2230 if (!ptr) {
2231 /* Read frames from ring */
2232 ptr = tun_ring_recv(tfile, noblock, &err);
2233 if (!ptr)
2234 return err;
2235 }
2236
2237 if (tun_is_xdp_frame(ptr)) {
2238 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2239
2240 ret = tun_put_user_xdp(tun, tfile, xdpf, to);
2241 xdp_return_frame(xdpf);
2242 } else {
2243 struct sk_buff *skb = ptr;
2244
2245 ret = tun_put_user(tun, tfile, skb, to);
2246 if (unlikely(ret < 0))
2247 kfree_skb(skb);
2248 else
2249 consume_skb(skb);
2250 }
2251
2252 return ret;
2253}
2254
2255static ssize_t tun_chr_read_iter(struct kiocb *iocb, struct iov_iter *to)
2256{
2257 struct file *file = iocb->ki_filp;
2258 struct tun_file *tfile = file->private_data;
2259 struct tun_struct *tun = tun_get(tfile);
2260 ssize_t len = iov_iter_count(to), ret;
2261 int noblock = 0;
2262
2263 if (!tun)
2264 return -EBADFD;
2265
2266 if ((file->f_flags & O_NONBLOCK) || (iocb->ki_flags & IOCB_NOWAIT))
2267 noblock = 1;
2268
2269 ret = tun_do_read(tun, tfile, to, noblock, NULL);
2270 ret = min_t(ssize_t, ret, len);
2271 if (ret > 0)
2272 iocb->ki_pos = ret;
2273 tun_put(tun);
2274 return ret;
2275}
2276
2277static void tun_prog_free(struct rcu_head *rcu)
2278{
2279 struct tun_prog *prog = container_of(rcu, struct tun_prog, rcu);
2280
2281 bpf_prog_destroy(prog->prog);
2282 kfree(prog);
2283}
2284
2285static int __tun_set_ebpf(struct tun_struct *tun,
2286 struct tun_prog __rcu **prog_p,
2287 struct bpf_prog *prog)
2288{
2289 struct tun_prog *old, *new = NULL;
2290
2291 if (prog) {
2292 new = kmalloc(sizeof(*new), GFP_KERNEL);
2293 if (!new)
2294 return -ENOMEM;
2295 new->prog = prog;
2296 }
2297
2298 spin_lock_bh(&tun->lock);
2299 old = rcu_dereference_protected(*prog_p,
2300 lockdep_is_held(&tun->lock));
2301 rcu_assign_pointer(*prog_p, new);
2302 spin_unlock_bh(&tun->lock);
2303
2304 if (old)
2305 call_rcu(&old->rcu, tun_prog_free);
2306
2307 return 0;
2308}
2309
2310static void tun_free_netdev(struct net_device *dev)
2311{
2312 struct tun_struct *tun = netdev_priv(dev);
2313
2314 BUG_ON(!(list_empty(&tun->disabled)));
2315
2316 tun_flow_uninit(tun);
2317 security_tun_dev_free_security(tun->security);
2318 __tun_set_ebpf(tun, &tun->steering_prog, NULL);
2319 __tun_set_ebpf(tun, &tun->filter_prog, NULL);
2320}
2321
2322static void tun_setup(struct net_device *dev)
2323{
2324 struct tun_struct *tun = netdev_priv(dev);
2325
2326 tun->owner = INVALID_UID;
2327 tun->group = INVALID_GID;
2328 tun_default_link_ksettings(dev, &tun->link_ksettings);
2329
2330 dev->ethtool_ops = &tun_ethtool_ops;
2331 dev->needs_free_netdev = true;
2332 dev->priv_destructor = tun_free_netdev;
2333 /* We prefer our own queue length */
2334 dev->tx_queue_len = TUN_READQ_SIZE;
2335}
2336
2337/* Trivial set of netlink ops to allow deleting tun or tap
2338 * device with netlink.
2339 */
2340static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
2341 struct netlink_ext_ack *extack)
2342{
2343 NL_SET_ERR_MSG(extack,
2344 "tun/tap creation via rtnetlink is not supported.");
2345 return -EOPNOTSUPP;
2346}
2347
2348static size_t tun_get_size(const struct net_device *dev)
2349{
2350 BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
2351 BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
2352
2353 return nla_total_size(sizeof(uid_t)) + /* OWNER */
2354 nla_total_size(sizeof(gid_t)) + /* GROUP */
2355 nla_total_size(sizeof(u8)) + /* TYPE */
2356 nla_total_size(sizeof(u8)) + /* PI */
2357 nla_total_size(sizeof(u8)) + /* VNET_HDR */
2358 nla_total_size(sizeof(u8)) + /* PERSIST */
2359 nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
2360 nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
2361 nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
2362 0;
2363}
2364
2365static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
2366{
2367 struct tun_struct *tun = netdev_priv(dev);
2368
2369 if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
2370 goto nla_put_failure;
2371 if (uid_valid(tun->owner) &&
2372 nla_put_u32(skb, IFLA_TUN_OWNER,
2373 from_kuid_munged(current_user_ns(), tun->owner)))
2374 goto nla_put_failure;
2375 if (gid_valid(tun->group) &&
2376 nla_put_u32(skb, IFLA_TUN_GROUP,
2377 from_kgid_munged(current_user_ns(), tun->group)))
2378 goto nla_put_failure;
2379 if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
2380 goto nla_put_failure;
2381 if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
2382 goto nla_put_failure;
2383 if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
2384 goto nla_put_failure;
2385 if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
2386 !!(tun->flags & IFF_MULTI_QUEUE)))
2387 goto nla_put_failure;
2388 if (tun->flags & IFF_MULTI_QUEUE) {
2389 if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
2390 goto nla_put_failure;
2391 if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
2392 tun->numdisabled))
2393 goto nla_put_failure;
2394 }
2395
2396 return 0;
2397
2398nla_put_failure:
2399 return -EMSGSIZE;
2400}
2401
2402static struct rtnl_link_ops tun_link_ops __read_mostly = {
2403 .kind = DRV_NAME,
2404 .priv_size = sizeof(struct tun_struct),
2405 .setup = tun_setup,
2406 .validate = tun_validate,
2407 .get_size = tun_get_size,
2408 .fill_info = tun_fill_info,
2409};
2410
2411static void tun_sock_write_space(struct sock *sk)
2412{
2413 struct tun_file *tfile;
2414 wait_queue_head_t *wqueue;
2415
2416 if (!sock_writeable(sk))
2417 return;
2418
2419 if (!test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
2420 return;
2421
2422 wqueue = sk_sleep(sk);
2423 if (wqueue && waitqueue_active(wqueue))
2424 wake_up_interruptible_sync_poll(wqueue, EPOLLOUT |
2425 EPOLLWRNORM | EPOLLWRBAND);
2426
2427 tfile = container_of(sk, struct tun_file, sk);
2428 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
2429}
2430
2431static void tun_put_page(struct tun_page *tpage)
2432{
2433 if (tpage->page)
2434 __page_frag_cache_drain(tpage->page, tpage->count);
2435}
2436
2437static int tun_xdp_one(struct tun_struct *tun,
2438 struct tun_file *tfile,
2439 struct xdp_buff *xdp, int *flush,
2440 struct tun_page *tpage)
2441{
2442 unsigned int datasize = xdp->data_end - xdp->data;
2443 struct tun_xdp_hdr *hdr = xdp->data_hard_start;
2444 struct virtio_net_hdr *gso = &hdr->gso;
2445 struct bpf_prog *xdp_prog;
2446 struct sk_buff *skb = NULL;
2447 struct sk_buff_head *queue;
2448 u32 rxhash = 0, act;
2449 int buflen = hdr->buflen;
2450 int ret = 0;
2451 bool skb_xdp = false;
2452 struct page *page;
2453
2454 xdp_prog = rcu_dereference(tun->xdp_prog);
2455 if (xdp_prog) {
2456 if (gso->gso_type) {
2457 skb_xdp = true;
2458 goto build;
2459 }
2460
2461 xdp_init_buff(xdp, buflen, &tfile->xdp_rxq);
2462 xdp_set_data_meta_invalid(xdp);
2463
2464 act = bpf_prog_run_xdp(xdp_prog, xdp);
2465 ret = tun_xdp_act(tun, xdp_prog, xdp, act);
2466 if (ret < 0) {
2467 put_page(virt_to_head_page(xdp->data));
2468 return ret;
2469 }
2470
2471 switch (ret) {
2472 case XDP_REDIRECT:
2473 *flush = true;
2474 fallthrough;
2475 case XDP_TX:
2476 return 0;
2477 case XDP_PASS:
2478 break;
2479 default:
2480 page = virt_to_head_page(xdp->data);
2481 if (tpage->page == page) {
2482 ++tpage->count;
2483 } else {
2484 tun_put_page(tpage);
2485 tpage->page = page;
2486 tpage->count = 1;
2487 }
2488 return 0;
2489 }
2490 }
2491
2492build:
2493 skb = build_skb(xdp->data_hard_start, buflen);
2494 if (!skb) {
2495 ret = -ENOMEM;
2496 goto out;
2497 }
2498
2499 skb_reserve(skb, xdp->data - xdp->data_hard_start);
2500 skb_put(skb, xdp->data_end - xdp->data);
2501
2502 if (virtio_net_hdr_to_skb(skb, gso, tun_is_little_endian(tun))) {
2503 atomic_long_inc(&tun->rx_frame_errors);
2504 kfree_skb(skb);
2505 ret = -EINVAL;
2506 goto out;
2507 }
2508
2509 skb->protocol = eth_type_trans(skb, tun->dev);
2510 skb_reset_network_header(skb);
2511 skb_probe_transport_header(skb);
2512 skb_record_rx_queue(skb, tfile->queue_index);
2513
2514 if (skb_xdp) {
2515 ret = do_xdp_generic(xdp_prog, &skb);
2516 if (ret != XDP_PASS) {
2517 ret = 0;
2518 goto out;
2519 }
2520 }
2521
2522 if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 &&
2523 !tfile->detached)
2524 rxhash = __skb_get_hash_symmetric(skb);
2525
2526 if (tfile->napi_enabled) {
2527 queue = &tfile->sk.sk_write_queue;
2528 spin_lock(&queue->lock);
2529
2530 if (unlikely(tfile->detached)) {
2531 spin_unlock(&queue->lock);
2532 kfree_skb(skb);
2533 return -EBUSY;
2534 }
2535
2536 __skb_queue_tail(queue, skb);
2537 spin_unlock(&queue->lock);
2538 ret = 1;
2539 } else {
2540 netif_receive_skb(skb);
2541 ret = 0;
2542 }
2543
2544 /* No need to disable preemption here since this function is
2545 * always called with bh disabled
2546 */
2547 dev_sw_netstats_rx_add(tun->dev, datasize);
2548
2549 if (rxhash)
2550 tun_flow_update(tun, rxhash, tfile);
2551
2552out:
2553 return ret;
2554}
2555
2556static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
2557{
2558 int ret, i;
2559 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2560 struct tun_struct *tun = tun_get(tfile);
2561 struct tun_msg_ctl *ctl = m->msg_control;
2562 struct xdp_buff *xdp;
2563
2564 if (!tun)
2565 return -EBADFD;
2566
2567 if (m->msg_controllen == sizeof(struct tun_msg_ctl) &&
2568 ctl && ctl->type == TUN_MSG_PTR) {
2569 struct tun_page tpage;
2570 int n = ctl->num;
2571 int flush = 0, queued = 0;
2572
2573 memset(&tpage, 0, sizeof(tpage));
2574
2575 local_bh_disable();
2576 rcu_read_lock();
2577
2578 for (i = 0; i < n; i++) {
2579 xdp = &((struct xdp_buff *)ctl->ptr)[i];
2580 ret = tun_xdp_one(tun, tfile, xdp, &flush, &tpage);
2581 if (ret > 0)
2582 queued += ret;
2583 }
2584
2585 if (flush)
2586 xdp_do_flush();
2587
2588 if (tfile->napi_enabled && queued > 0)
2589 napi_schedule(&tfile->napi);
2590
2591 rcu_read_unlock();
2592 local_bh_enable();
2593
2594 tun_put_page(&tpage);
2595
2596 ret = total_len;
2597 goto out;
2598 }
2599
2600 ret = tun_get_user(tun, tfile, ctl ? ctl->ptr : NULL, &m->msg_iter,
2601 m->msg_flags & MSG_DONTWAIT,
2602 m->msg_flags & MSG_MORE);
2603out:
2604 tun_put(tun);
2605 return ret;
2606}
2607
2608static int tun_recvmsg(struct socket *sock, struct msghdr *m, size_t total_len,
2609 int flags)
2610{
2611 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2612 struct tun_struct *tun = tun_get(tfile);
2613 void *ptr = m->msg_control;
2614 int ret;
2615
2616 if (!tun) {
2617 ret = -EBADFD;
2618 goto out_free;
2619 }
2620
2621 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC|MSG_ERRQUEUE)) {
2622 ret = -EINVAL;
2623 goto out_put_tun;
2624 }
2625 if (flags & MSG_ERRQUEUE) {
2626 ret = sock_recv_errqueue(sock->sk, m, total_len,
2627 SOL_PACKET, TUN_TX_TIMESTAMP);
2628 goto out;
2629 }
2630 ret = tun_do_read(tun, tfile, &m->msg_iter, flags & MSG_DONTWAIT, ptr);
2631 if (ret > (ssize_t)total_len) {
2632 m->msg_flags |= MSG_TRUNC;
2633 ret = flags & MSG_TRUNC ? ret : total_len;
2634 }
2635out:
2636 tun_put(tun);
2637 return ret;
2638
2639out_put_tun:
2640 tun_put(tun);
2641out_free:
2642 tun_ptr_free(ptr);
2643 return ret;
2644}
2645
2646static int tun_ptr_peek_len(void *ptr)
2647{
2648 if (likely(ptr)) {
2649 if (tun_is_xdp_frame(ptr)) {
2650 struct xdp_frame *xdpf = tun_ptr_to_xdp(ptr);
2651
2652 return xdpf->len;
2653 }
2654 return __skb_array_len_with_tag(ptr);
2655 } else {
2656 return 0;
2657 }
2658}
2659
2660static int tun_peek_len(struct socket *sock)
2661{
2662 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
2663 struct tun_struct *tun;
2664 int ret = 0;
2665
2666 tun = tun_get(tfile);
2667 if (!tun)
2668 return 0;
2669
2670 ret = PTR_RING_PEEK_CALL(&tfile->tx_ring, tun_ptr_peek_len);
2671 tun_put(tun);
2672
2673 return ret;
2674}
2675
2676/* Ops structure to mimic raw sockets with tun */
2677static const struct proto_ops tun_socket_ops = {
2678 .peek_len = tun_peek_len,
2679 .sendmsg = tun_sendmsg,
2680 .recvmsg = tun_recvmsg,
2681};
2682
2683static struct proto tun_proto = {
2684 .name = "tun",
2685 .owner = THIS_MODULE,
2686 .obj_size = sizeof(struct tun_file),
2687};
2688
2689static int tun_flags(struct tun_struct *tun)
2690{
2691 return tun->flags & (TUN_FEATURES | IFF_PERSIST | IFF_TUN | IFF_TAP);
2692}
2693
2694static ssize_t tun_flags_show(struct device *dev, struct device_attribute *attr,
2695 char *buf)
2696{
2697 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2698 return sysfs_emit(buf, "0x%x\n", tun_flags(tun));
2699}
2700
2701static ssize_t owner_show(struct device *dev, struct device_attribute *attr,
2702 char *buf)
2703{
2704 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2705 return uid_valid(tun->owner)?
2706 sysfs_emit(buf, "%u\n",
2707 from_kuid_munged(current_user_ns(), tun->owner)) :
2708 sysfs_emit(buf, "-1\n");
2709}
2710
2711static ssize_t group_show(struct device *dev, struct device_attribute *attr,
2712 char *buf)
2713{
2714 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
2715 return gid_valid(tun->group) ?
2716 sysfs_emit(buf, "%u\n",
2717 from_kgid_munged(current_user_ns(), tun->group)) :
2718 sysfs_emit(buf, "-1\n");
2719}
2720
2721static DEVICE_ATTR_RO(tun_flags);
2722static DEVICE_ATTR_RO(owner);
2723static DEVICE_ATTR_RO(group);
2724
2725static struct attribute *tun_dev_attrs[] = {
2726 &dev_attr_tun_flags.attr,
2727 &dev_attr_owner.attr,
2728 &dev_attr_group.attr,
2729 NULL
2730};
2731
2732static const struct attribute_group tun_attr_group = {
2733 .attrs = tun_dev_attrs
2734};
2735
2736static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
2737{
2738 struct tun_struct *tun;
2739 struct tun_file *tfile = file->private_data;
2740 struct net_device *dev;
2741 int err;
2742
2743 if (tfile->detached)
2744 return -EINVAL;
2745
2746 if ((ifr->ifr_flags & IFF_NAPI_FRAGS)) {
2747 if (!capable(CAP_NET_ADMIN))
2748 return -EPERM;
2749
2750 if (!(ifr->ifr_flags & IFF_NAPI) ||
2751 (ifr->ifr_flags & TUN_TYPE_MASK) != IFF_TAP)
2752 return -EINVAL;
2753 }
2754
2755 dev = __dev_get_by_name(net, ifr->ifr_name);
2756 if (dev) {
2757 if (ifr->ifr_flags & IFF_TUN_EXCL)
2758 return -EBUSY;
2759 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
2760 tun = netdev_priv(dev);
2761 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
2762 tun = netdev_priv(dev);
2763 else
2764 return -EINVAL;
2765
2766 if (!!(ifr->ifr_flags & IFF_MULTI_QUEUE) !=
2767 !!(tun->flags & IFF_MULTI_QUEUE))
2768 return -EINVAL;
2769
2770 if (tun_not_capable(tun))
2771 return -EPERM;
2772 err = security_tun_dev_open(tun->security);
2773 if (err < 0)
2774 return err;
2775
2776 err = tun_attach(tun, file, ifr->ifr_flags & IFF_NOFILTER,
2777 ifr->ifr_flags & IFF_NAPI,
2778 ifr->ifr_flags & IFF_NAPI_FRAGS, true);
2779 if (err < 0)
2780 return err;
2781
2782 if (tun->flags & IFF_MULTI_QUEUE &&
2783 (tun->numqueues + tun->numdisabled > 1)) {
2784 /* One or more queue has already been attached, no need
2785 * to initialize the device again.
2786 */
2787 netdev_state_change(dev);
2788 return 0;
2789 }
2790
2791 tun->flags = (tun->flags & ~TUN_FEATURES) |
2792 (ifr->ifr_flags & TUN_FEATURES);
2793
2794 netdev_state_change(dev);
2795 } else {
2796 char *name;
2797 unsigned long flags = 0;
2798 int queues = ifr->ifr_flags & IFF_MULTI_QUEUE ?
2799 MAX_TAP_QUEUES : 1;
2800
2801 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2802 return -EPERM;
2803 err = security_tun_dev_create();
2804 if (err < 0)
2805 return err;
2806
2807 /* Set dev type */
2808 if (ifr->ifr_flags & IFF_TUN) {
2809 /* TUN device */
2810 flags |= IFF_TUN;
2811 name = "tun%d";
2812 } else if (ifr->ifr_flags & IFF_TAP) {
2813 /* TAP device */
2814 flags |= IFF_TAP;
2815 name = "tap%d";
2816 } else
2817 return -EINVAL;
2818
2819 if (*ifr->ifr_name)
2820 name = ifr->ifr_name;
2821
2822 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
2823 NET_NAME_UNKNOWN, tun_setup, queues,
2824 queues);
2825
2826 if (!dev)
2827 return -ENOMEM;
2828
2829 dev_net_set(dev, net);
2830 dev->rtnl_link_ops = &tun_link_ops;
2831 dev->ifindex = tfile->ifindex;
2832 dev->sysfs_groups[0] = &tun_attr_group;
2833
2834 tun = netdev_priv(dev);
2835 tun->dev = dev;
2836 tun->flags = flags;
2837 tun->txflt.count = 0;
2838 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
2839
2840 tun->align = NET_SKB_PAD;
2841 tun->filter_attached = false;
2842 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
2843 tun->rx_batched = 0;
2844 RCU_INIT_POINTER(tun->steering_prog, NULL);
2845
2846 tun->ifr = ifr;
2847 tun->file = file;
2848
2849 tun_net_initialize(dev);
2850
2851 err = register_netdevice(tun->dev);
2852 if (err < 0) {
2853 free_netdev(dev);
2854 return err;
2855 }
2856 /* free_netdev() won't check refcnt, to avoid race
2857 * with dev_put() we need publish tun after registration.
2858 */
2859 rcu_assign_pointer(tfile->tun, tun);
2860 }
2861
2862 if (ifr->ifr_flags & IFF_NO_CARRIER)
2863 netif_carrier_off(tun->dev);
2864 else
2865 netif_carrier_on(tun->dev);
2866
2867 /* Make sure persistent devices do not get stuck in
2868 * xoff state.
2869 */
2870 if (netif_running(tun->dev))
2871 netif_tx_wake_all_queues(tun->dev);
2872
2873 strcpy(ifr->ifr_name, tun->dev->name);
2874 return 0;
2875}
2876
2877static void tun_get_iff(struct tun_struct *tun, struct ifreq *ifr)
2878{
2879 strcpy(ifr->ifr_name, tun->dev->name);
2880
2881 ifr->ifr_flags = tun_flags(tun);
2882
2883}
2884
2885/* This is like a cut-down ethtool ops, except done via tun fd so no
2886 * privs required. */
2887static int set_offload(struct tun_struct *tun, unsigned long arg)
2888{
2889 netdev_features_t features = 0;
2890
2891 if (arg & TUN_F_CSUM) {
2892 features |= NETIF_F_HW_CSUM;
2893 arg &= ~TUN_F_CSUM;
2894
2895 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
2896 if (arg & TUN_F_TSO_ECN) {
2897 features |= NETIF_F_TSO_ECN;
2898 arg &= ~TUN_F_TSO_ECN;
2899 }
2900 if (arg & TUN_F_TSO4)
2901 features |= NETIF_F_TSO;
2902 if (arg & TUN_F_TSO6)
2903 features |= NETIF_F_TSO6;
2904 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
2905 }
2906
2907 arg &= ~TUN_F_UFO;
2908
2909 /* TODO: for now USO4 and USO6 should work simultaneously */
2910 if (arg & TUN_F_USO4 && arg & TUN_F_USO6) {
2911 features |= NETIF_F_GSO_UDP_L4;
2912 arg &= ~(TUN_F_USO4 | TUN_F_USO6);
2913 }
2914 }
2915
2916 /* This gives the user a way to test for new features in future by
2917 * trying to set them. */
2918 if (arg)
2919 return -EINVAL;
2920
2921 tun->set_features = features;
2922 tun->dev->wanted_features &= ~TUN_USER_FEATURES;
2923 tun->dev->wanted_features |= features;
2924 netdev_update_features(tun->dev);
2925
2926 return 0;
2927}
2928
2929static void tun_detach_filter(struct tun_struct *tun, int n)
2930{
2931 int i;
2932 struct tun_file *tfile;
2933
2934 for (i = 0; i < n; i++) {
2935 tfile = rtnl_dereference(tun->tfiles[i]);
2936 lock_sock(tfile->socket.sk);
2937 sk_detach_filter(tfile->socket.sk);
2938 release_sock(tfile->socket.sk);
2939 }
2940
2941 tun->filter_attached = false;
2942}
2943
2944static int tun_attach_filter(struct tun_struct *tun)
2945{
2946 int i, ret = 0;
2947 struct tun_file *tfile;
2948
2949 for (i = 0; i < tun->numqueues; i++) {
2950 tfile = rtnl_dereference(tun->tfiles[i]);
2951 lock_sock(tfile->socket.sk);
2952 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
2953 release_sock(tfile->socket.sk);
2954 if (ret) {
2955 tun_detach_filter(tun, i);
2956 return ret;
2957 }
2958 }
2959
2960 tun->filter_attached = true;
2961 return ret;
2962}
2963
2964static void tun_set_sndbuf(struct tun_struct *tun)
2965{
2966 struct tun_file *tfile;
2967 int i;
2968
2969 for (i = 0; i < tun->numqueues; i++) {
2970 tfile = rtnl_dereference(tun->tfiles[i]);
2971 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
2972 }
2973}
2974
2975static int tun_set_queue(struct file *file, struct ifreq *ifr)
2976{
2977 struct tun_file *tfile = file->private_data;
2978 struct tun_struct *tun;
2979 int ret = 0;
2980
2981 rtnl_lock();
2982
2983 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
2984 tun = tfile->detached;
2985 if (!tun) {
2986 ret = -EINVAL;
2987 goto unlock;
2988 }
2989 ret = security_tun_dev_attach_queue(tun->security);
2990 if (ret < 0)
2991 goto unlock;
2992 ret = tun_attach(tun, file, false, tun->flags & IFF_NAPI,
2993 tun->flags & IFF_NAPI_FRAGS, true);
2994 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) {
2995 tun = rtnl_dereference(tfile->tun);
2996 if (!tun || !(tun->flags & IFF_MULTI_QUEUE) || tfile->detached)
2997 ret = -EINVAL;
2998 else
2999 __tun_detach(tfile, false);
3000 } else
3001 ret = -EINVAL;
3002
3003 if (ret >= 0)
3004 netdev_state_change(tun->dev);
3005
3006unlock:
3007 rtnl_unlock();
3008 return ret;
3009}
3010
3011static int tun_set_ebpf(struct tun_struct *tun, struct tun_prog __rcu **prog_p,
3012 void __user *data)
3013{
3014 struct bpf_prog *prog;
3015 int fd;
3016
3017 if (copy_from_user(&fd, data, sizeof(fd)))
3018 return -EFAULT;
3019
3020 if (fd == -1) {
3021 prog = NULL;
3022 } else {
3023 prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
3024 if (IS_ERR(prog))
3025 return PTR_ERR(prog);
3026 }
3027
3028 return __tun_set_ebpf(tun, prog_p, prog);
3029}
3030
3031/* Return correct value for tun->dev->addr_len based on tun->dev->type. */
3032static unsigned char tun_get_addr_len(unsigned short type)
3033{
3034 switch (type) {
3035 case ARPHRD_IP6GRE:
3036 case ARPHRD_TUNNEL6:
3037 return sizeof(struct in6_addr);
3038 case ARPHRD_IPGRE:
3039 case ARPHRD_TUNNEL:
3040 case ARPHRD_SIT:
3041 return 4;
3042 case ARPHRD_ETHER:
3043 return ETH_ALEN;
3044 case ARPHRD_IEEE802154:
3045 case ARPHRD_IEEE802154_MONITOR:
3046 return IEEE802154_EXTENDED_ADDR_LEN;
3047 case ARPHRD_PHONET_PIPE:
3048 case ARPHRD_PPP:
3049 case ARPHRD_NONE:
3050 return 0;
3051 case ARPHRD_6LOWPAN:
3052 return EUI64_ADDR_LEN;
3053 case ARPHRD_FDDI:
3054 return FDDI_K_ALEN;
3055 case ARPHRD_HIPPI:
3056 return HIPPI_ALEN;
3057 case ARPHRD_IEEE802:
3058 return FC_ALEN;
3059 case ARPHRD_ROSE:
3060 return ROSE_ADDR_LEN;
3061 case ARPHRD_NETROM:
3062 return AX25_ADDR_LEN;
3063 case ARPHRD_LOCALTLK:
3064 return LTALK_ALEN;
3065 default:
3066 return 0;
3067 }
3068}
3069
3070static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
3071 unsigned long arg, int ifreq_len)
3072{
3073 struct tun_file *tfile = file->private_data;
3074 struct net *net = sock_net(&tfile->sk);
3075 struct tun_struct *tun;
3076 void __user* argp = (void __user*)arg;
3077 unsigned int carrier;
3078 struct ifreq ifr;
3079 kuid_t owner;
3080 kgid_t group;
3081 int ifindex;
3082 int sndbuf;
3083 int vnet_hdr_sz;
3084 int le;
3085 int ret;
3086 bool do_notify = false;
3087
3088 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
3089 (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
3090 if (copy_from_user(&ifr, argp, ifreq_len))
3091 return -EFAULT;
3092 } else {
3093 memset(&ifr, 0, sizeof(ifr));
3094 }
3095 if (cmd == TUNGETFEATURES) {
3096 /* Currently this just means: "what IFF flags are valid?".
3097 * This is needed because we never checked for invalid flags on
3098 * TUNSETIFF.
3099 */
3100 return put_user(IFF_TUN | IFF_TAP | IFF_NO_CARRIER |
3101 TUN_FEATURES, (unsigned int __user*)argp);
3102 } else if (cmd == TUNSETQUEUE) {
3103 return tun_set_queue(file, &ifr);
3104 } else if (cmd == SIOCGSKNS) {
3105 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3106 return -EPERM;
3107 return open_related_ns(&net->ns, get_net_ns);
3108 }
3109
3110 rtnl_lock();
3111
3112 tun = tun_get(tfile);
3113 if (cmd == TUNSETIFF) {
3114 ret = -EEXIST;
3115 if (tun)
3116 goto unlock;
3117
3118 ifr.ifr_name[IFNAMSIZ-1] = '\0';
3119
3120 ret = tun_set_iff(net, file, &ifr);
3121
3122 if (ret)
3123 goto unlock;
3124
3125 if (copy_to_user(argp, &ifr, ifreq_len))
3126 ret = -EFAULT;
3127 goto unlock;
3128 }
3129 if (cmd == TUNSETIFINDEX) {
3130 ret = -EPERM;
3131 if (tun)
3132 goto unlock;
3133
3134 ret = -EFAULT;
3135 if (copy_from_user(&ifindex, argp, sizeof(ifindex)))
3136 goto unlock;
3137 ret = -EINVAL;
3138 if (ifindex < 0)
3139 goto unlock;
3140 ret = 0;
3141 tfile->ifindex = ifindex;
3142 goto unlock;
3143 }
3144
3145 ret = -EBADFD;
3146 if (!tun)
3147 goto unlock;
3148
3149 netif_info(tun, drv, tun->dev, "tun_chr_ioctl cmd %u\n", cmd);
3150
3151 net = dev_net(tun->dev);
3152 ret = 0;
3153 switch (cmd) {
3154 case TUNGETIFF:
3155 tun_get_iff(tun, &ifr);
3156
3157 if (tfile->detached)
3158 ifr.ifr_flags |= IFF_DETACH_QUEUE;
3159 if (!tfile->socket.sk->sk_filter)
3160 ifr.ifr_flags |= IFF_NOFILTER;
3161
3162 if (copy_to_user(argp, &ifr, ifreq_len))
3163 ret = -EFAULT;
3164 break;
3165
3166 case TUNSETNOCSUM:
3167 /* Disable/Enable checksum */
3168
3169 /* [unimplemented] */
3170 netif_info(tun, drv, tun->dev, "ignored: set checksum %s\n",
3171 arg ? "disabled" : "enabled");
3172 break;
3173
3174 case TUNSETPERSIST:
3175 /* Disable/Enable persist mode. Keep an extra reference to the
3176 * module to prevent the module being unprobed.
3177 */
3178 if (arg && !(tun->flags & IFF_PERSIST)) {
3179 tun->flags |= IFF_PERSIST;
3180 __module_get(THIS_MODULE);
3181 do_notify = true;
3182 }
3183 if (!arg && (tun->flags & IFF_PERSIST)) {
3184 tun->flags &= ~IFF_PERSIST;
3185 module_put(THIS_MODULE);
3186 do_notify = true;
3187 }
3188
3189 netif_info(tun, drv, tun->dev, "persist %s\n",
3190 arg ? "enabled" : "disabled");
3191 break;
3192
3193 case TUNSETOWNER:
3194 /* Set owner of the device */
3195 owner = make_kuid(current_user_ns(), arg);
3196 if (!uid_valid(owner)) {
3197 ret = -EINVAL;
3198 break;
3199 }
3200 tun->owner = owner;
3201 do_notify = true;
3202 netif_info(tun, drv, tun->dev, "owner set to %u\n",
3203 from_kuid(&init_user_ns, tun->owner));
3204 break;
3205
3206 case TUNSETGROUP:
3207 /* Set group of the device */
3208 group = make_kgid(current_user_ns(), arg);
3209 if (!gid_valid(group)) {
3210 ret = -EINVAL;
3211 break;
3212 }
3213 tun->group = group;
3214 do_notify = true;
3215 netif_info(tun, drv, tun->dev, "group set to %u\n",
3216 from_kgid(&init_user_ns, tun->group));
3217 break;
3218
3219 case TUNSETLINK:
3220 /* Only allow setting the type when the interface is down */
3221 if (tun->dev->flags & IFF_UP) {
3222 netif_info(tun, drv, tun->dev,
3223 "Linktype set failed because interface is up\n");
3224 ret = -EBUSY;
3225 } else {
3226 ret = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
3227 tun->dev);
3228 ret = notifier_to_errno(ret);
3229 if (ret) {
3230 netif_info(tun, drv, tun->dev,
3231 "Refused to change device type\n");
3232 break;
3233 }
3234 tun->dev->type = (int) arg;
3235 tun->dev->addr_len = tun_get_addr_len(tun->dev->type);
3236 netif_info(tun, drv, tun->dev, "linktype set to %d\n",
3237 tun->dev->type);
3238 call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE,
3239 tun->dev);
3240 }
3241 break;
3242
3243 case TUNSETDEBUG:
3244 tun->msg_enable = (u32)arg;
3245 break;
3246
3247 case TUNSETOFFLOAD:
3248 ret = set_offload(tun, arg);
3249 break;
3250
3251 case TUNSETTXFILTER:
3252 /* Can be set only for TAPs */
3253 ret = -EINVAL;
3254 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3255 break;
3256 ret = update_filter(&tun->txflt, (void __user *)arg);
3257 break;
3258
3259 case SIOCGIFHWADDR:
3260 /* Get hw address */
3261 dev_get_mac_address(&ifr.ifr_hwaddr, net, tun->dev->name);
3262 if (copy_to_user(argp, &ifr, ifreq_len))
3263 ret = -EFAULT;
3264 break;
3265
3266 case SIOCSIFHWADDR:
3267 /* Set hw address */
3268 ret = dev_set_mac_address_user(tun->dev, &ifr.ifr_hwaddr, NULL);
3269 break;
3270
3271 case TUNGETSNDBUF:
3272 sndbuf = tfile->socket.sk->sk_sndbuf;
3273 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
3274 ret = -EFAULT;
3275 break;
3276
3277 case TUNSETSNDBUF:
3278 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
3279 ret = -EFAULT;
3280 break;
3281 }
3282 if (sndbuf <= 0) {
3283 ret = -EINVAL;
3284 break;
3285 }
3286
3287 tun->sndbuf = sndbuf;
3288 tun_set_sndbuf(tun);
3289 break;
3290
3291 case TUNGETVNETHDRSZ:
3292 vnet_hdr_sz = tun->vnet_hdr_sz;
3293 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
3294 ret = -EFAULT;
3295 break;
3296
3297 case TUNSETVNETHDRSZ:
3298 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
3299 ret = -EFAULT;
3300 break;
3301 }
3302 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
3303 ret = -EINVAL;
3304 break;
3305 }
3306
3307 tun->vnet_hdr_sz = vnet_hdr_sz;
3308 break;
3309
3310 case TUNGETVNETLE:
3311 le = !!(tun->flags & TUN_VNET_LE);
3312 if (put_user(le, (int __user *)argp))
3313 ret = -EFAULT;
3314 break;
3315
3316 case TUNSETVNETLE:
3317 if (get_user(le, (int __user *)argp)) {
3318 ret = -EFAULT;
3319 break;
3320 }
3321 if (le)
3322 tun->flags |= TUN_VNET_LE;
3323 else
3324 tun->flags &= ~TUN_VNET_LE;
3325 break;
3326
3327 case TUNGETVNETBE:
3328 ret = tun_get_vnet_be(tun, argp);
3329 break;
3330
3331 case TUNSETVNETBE:
3332 ret = tun_set_vnet_be(tun, argp);
3333 break;
3334
3335 case TUNATTACHFILTER:
3336 /* Can be set only for TAPs */
3337 ret = -EINVAL;
3338 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3339 break;
3340 ret = -EFAULT;
3341 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
3342 break;
3343
3344 ret = tun_attach_filter(tun);
3345 break;
3346
3347 case TUNDETACHFILTER:
3348 /* Can be set only for TAPs */
3349 ret = -EINVAL;
3350 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3351 break;
3352 ret = 0;
3353 tun_detach_filter(tun, tun->numqueues);
3354 break;
3355
3356 case TUNGETFILTER:
3357 ret = -EINVAL;
3358 if ((tun->flags & TUN_TYPE_MASK) != IFF_TAP)
3359 break;
3360 ret = -EFAULT;
3361 if (copy_to_user(argp, &tun->fprog, sizeof(tun->fprog)))
3362 break;
3363 ret = 0;
3364 break;
3365
3366 case TUNSETSTEERINGEBPF:
3367 ret = tun_set_ebpf(tun, &tun->steering_prog, argp);
3368 break;
3369
3370 case TUNSETFILTEREBPF:
3371 ret = tun_set_ebpf(tun, &tun->filter_prog, argp);
3372 break;
3373
3374 case TUNSETCARRIER:
3375 ret = -EFAULT;
3376 if (copy_from_user(&carrier, argp, sizeof(carrier)))
3377 goto unlock;
3378
3379 ret = tun_net_change_carrier(tun->dev, (bool)carrier);
3380 break;
3381
3382 case TUNGETDEVNETNS:
3383 ret = -EPERM;
3384 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3385 goto unlock;
3386 ret = open_related_ns(&net->ns, get_net_ns);
3387 break;
3388
3389 default:
3390 ret = -EINVAL;
3391 break;
3392 }
3393
3394 if (do_notify)
3395 netdev_state_change(tun->dev);
3396
3397unlock:
3398 rtnl_unlock();
3399 if (tun)
3400 tun_put(tun);
3401 return ret;
3402}
3403
3404static long tun_chr_ioctl(struct file *file,
3405 unsigned int cmd, unsigned long arg)
3406{
3407 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
3408}
3409
3410#ifdef CONFIG_COMPAT
3411static long tun_chr_compat_ioctl(struct file *file,
3412 unsigned int cmd, unsigned long arg)
3413{
3414 switch (cmd) {
3415 case TUNSETIFF:
3416 case TUNGETIFF:
3417 case TUNSETTXFILTER:
3418 case TUNGETSNDBUF:
3419 case TUNSETSNDBUF:
3420 case SIOCGIFHWADDR:
3421 case SIOCSIFHWADDR:
3422 arg = (unsigned long)compat_ptr(arg);
3423 break;
3424 default:
3425 arg = (compat_ulong_t)arg;
3426 break;
3427 }
3428
3429 /*
3430 * compat_ifreq is shorter than ifreq, so we must not access beyond
3431 * the end of that structure. All fields that are used in this
3432 * driver are compatible though, we don't need to convert the
3433 * contents.
3434 */
3435 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
3436}
3437#endif /* CONFIG_COMPAT */
3438
3439static int tun_chr_fasync(int fd, struct file *file, int on)
3440{
3441 struct tun_file *tfile = file->private_data;
3442 int ret;
3443
3444 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
3445 goto out;
3446
3447 if (on) {
3448 __f_setown(file, task_pid(current), PIDTYPE_TGID, 0);
3449 tfile->flags |= TUN_FASYNC;
3450 } else
3451 tfile->flags &= ~TUN_FASYNC;
3452 ret = 0;
3453out:
3454 return ret;
3455}
3456
3457static int tun_chr_open(struct inode *inode, struct file * file)
3458{
3459 struct net *net = current->nsproxy->net_ns;
3460 struct tun_file *tfile;
3461
3462 tfile = (struct tun_file *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
3463 &tun_proto, 0);
3464 if (!tfile)
3465 return -ENOMEM;
3466 if (ptr_ring_init(&tfile->tx_ring, 0, GFP_KERNEL)) {
3467 sk_free(&tfile->sk);
3468 return -ENOMEM;
3469 }
3470
3471 mutex_init(&tfile->napi_mutex);
3472 RCU_INIT_POINTER(tfile->tun, NULL);
3473 tfile->flags = 0;
3474 tfile->ifindex = 0;
3475
3476 init_waitqueue_head(&tfile->socket.wq.wait);
3477
3478 tfile->socket.file = file;
3479 tfile->socket.ops = &tun_socket_ops;
3480
3481 sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid());
3482
3483 tfile->sk.sk_write_space = tun_sock_write_space;
3484 tfile->sk.sk_sndbuf = INT_MAX;
3485
3486 file->private_data = tfile;
3487 INIT_LIST_HEAD(&tfile->next);
3488
3489 sock_set_flag(&tfile->sk, SOCK_ZEROCOPY);
3490
3491 /* tun groks IOCB_NOWAIT just fine, mark it as such */
3492 file->f_mode |= FMODE_NOWAIT;
3493 return 0;
3494}
3495
3496static int tun_chr_close(struct inode *inode, struct file *file)
3497{
3498 struct tun_file *tfile = file->private_data;
3499
3500 tun_detach(tfile, true);
3501
3502 return 0;
3503}
3504
3505#ifdef CONFIG_PROC_FS
3506static void tun_chr_show_fdinfo(struct seq_file *m, struct file *file)
3507{
3508 struct tun_file *tfile = file->private_data;
3509 struct tun_struct *tun;
3510 struct ifreq ifr;
3511
3512 memset(&ifr, 0, sizeof(ifr));
3513
3514 rtnl_lock();
3515 tun = tun_get(tfile);
3516 if (tun)
3517 tun_get_iff(tun, &ifr);
3518 rtnl_unlock();
3519
3520 if (tun)
3521 tun_put(tun);
3522
3523 seq_printf(m, "iff:\t%s\n", ifr.ifr_name);
3524}
3525#endif
3526
3527static const struct file_operations tun_fops = {
3528 .owner = THIS_MODULE,
3529 .llseek = no_llseek,
3530 .read_iter = tun_chr_read_iter,
3531 .write_iter = tun_chr_write_iter,
3532 .poll = tun_chr_poll,
3533 .unlocked_ioctl = tun_chr_ioctl,
3534#ifdef CONFIG_COMPAT
3535 .compat_ioctl = tun_chr_compat_ioctl,
3536#endif
3537 .open = tun_chr_open,
3538 .release = tun_chr_close,
3539 .fasync = tun_chr_fasync,
3540#ifdef CONFIG_PROC_FS
3541 .show_fdinfo = tun_chr_show_fdinfo,
3542#endif
3543};
3544
3545static struct miscdevice tun_miscdev = {
3546 .minor = TUN_MINOR,
3547 .name = "tun",
3548 .nodename = "net/tun",
3549 .fops = &tun_fops,
3550};
3551
3552/* ethtool interface */
3553
3554static void tun_default_link_ksettings(struct net_device *dev,
3555 struct ethtool_link_ksettings *cmd)
3556{
3557 ethtool_link_ksettings_zero_link_mode(cmd, supported);
3558 ethtool_link_ksettings_zero_link_mode(cmd, advertising);
3559 cmd->base.speed = SPEED_10000;
3560 cmd->base.duplex = DUPLEX_FULL;
3561 cmd->base.port = PORT_TP;
3562 cmd->base.phy_address = 0;
3563 cmd->base.autoneg = AUTONEG_DISABLE;
3564}
3565
3566static int tun_get_link_ksettings(struct net_device *dev,
3567 struct ethtool_link_ksettings *cmd)
3568{
3569 struct tun_struct *tun = netdev_priv(dev);
3570
3571 memcpy(cmd, &tun->link_ksettings, sizeof(*cmd));
3572 return 0;
3573}
3574
3575static int tun_set_link_ksettings(struct net_device *dev,
3576 const struct ethtool_link_ksettings *cmd)
3577{
3578 struct tun_struct *tun = netdev_priv(dev);
3579
3580 memcpy(&tun->link_ksettings, cmd, sizeof(*cmd));
3581 return 0;
3582}
3583
3584static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
3585{
3586 struct tun_struct *tun = netdev_priv(dev);
3587
3588 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
3589 strscpy(info->version, DRV_VERSION, sizeof(info->version));
3590
3591 switch (tun->flags & TUN_TYPE_MASK) {
3592 case IFF_TUN:
3593 strscpy(info->bus_info, "tun", sizeof(info->bus_info));
3594 break;
3595 case IFF_TAP:
3596 strscpy(info->bus_info, "tap", sizeof(info->bus_info));
3597 break;
3598 }
3599}
3600
3601static u32 tun_get_msglevel(struct net_device *dev)
3602{
3603 struct tun_struct *tun = netdev_priv(dev);
3604
3605 return tun->msg_enable;
3606}
3607
3608static void tun_set_msglevel(struct net_device *dev, u32 value)
3609{
3610 struct tun_struct *tun = netdev_priv(dev);
3611
3612 tun->msg_enable = value;
3613}
3614
3615static int tun_get_coalesce(struct net_device *dev,
3616 struct ethtool_coalesce *ec,
3617 struct kernel_ethtool_coalesce *kernel_coal,
3618 struct netlink_ext_ack *extack)
3619{
3620 struct tun_struct *tun = netdev_priv(dev);
3621
3622 ec->rx_max_coalesced_frames = tun->rx_batched;
3623
3624 return 0;
3625}
3626
3627static int tun_set_coalesce(struct net_device *dev,
3628 struct ethtool_coalesce *ec,
3629 struct kernel_ethtool_coalesce *kernel_coal,
3630 struct netlink_ext_ack *extack)
3631{
3632 struct tun_struct *tun = netdev_priv(dev);
3633
3634 if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
3635 tun->rx_batched = NAPI_POLL_WEIGHT;
3636 else
3637 tun->rx_batched = ec->rx_max_coalesced_frames;
3638
3639 return 0;
3640}
3641
3642static void tun_get_channels(struct net_device *dev,
3643 struct ethtool_channels *channels)
3644{
3645 struct tun_struct *tun = netdev_priv(dev);
3646
3647 channels->combined_count = tun->numqueues;
3648 channels->max_combined = tun->flags & IFF_MULTI_QUEUE ? MAX_TAP_QUEUES : 1;
3649}
3650
3651static const struct ethtool_ops tun_ethtool_ops = {
3652 .supported_coalesce_params = ETHTOOL_COALESCE_RX_MAX_FRAMES,
3653 .get_drvinfo = tun_get_drvinfo,
3654 .get_msglevel = tun_get_msglevel,
3655 .set_msglevel = tun_set_msglevel,
3656 .get_link = ethtool_op_get_link,
3657 .get_channels = tun_get_channels,
3658 .get_ts_info = ethtool_op_get_ts_info,
3659 .get_coalesce = tun_get_coalesce,
3660 .set_coalesce = tun_set_coalesce,
3661 .get_link_ksettings = tun_get_link_ksettings,
3662 .set_link_ksettings = tun_set_link_ksettings,
3663};
3664
3665static int tun_queue_resize(struct tun_struct *tun)
3666{
3667 struct net_device *dev = tun->dev;
3668 struct tun_file *tfile;
3669 struct ptr_ring **rings;
3670 int n = tun->numqueues + tun->numdisabled;
3671 int ret, i;
3672
3673 rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL);
3674 if (!rings)
3675 return -ENOMEM;
3676
3677 for (i = 0; i < tun->numqueues; i++) {
3678 tfile = rtnl_dereference(tun->tfiles[i]);
3679 rings[i] = &tfile->tx_ring;
3680 }
3681 list_for_each_entry(tfile, &tun->disabled, next)
3682 rings[i++] = &tfile->tx_ring;
3683
3684 ret = ptr_ring_resize_multiple(rings, n,
3685 dev->tx_queue_len, GFP_KERNEL,
3686 tun_ptr_free);
3687
3688 kfree(rings);
3689 return ret;
3690}
3691
3692static int tun_device_event(struct notifier_block *unused,
3693 unsigned long event, void *ptr)
3694{
3695 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3696 struct tun_struct *tun = netdev_priv(dev);
3697 int i;
3698
3699 if (dev->rtnl_link_ops != &tun_link_ops)
3700 return NOTIFY_DONE;
3701
3702 switch (event) {
3703 case NETDEV_CHANGE_TX_QUEUE_LEN:
3704 if (tun_queue_resize(tun))
3705 return NOTIFY_BAD;
3706 break;
3707 case NETDEV_UP:
3708 for (i = 0; i < tun->numqueues; i++) {
3709 struct tun_file *tfile;
3710
3711 tfile = rtnl_dereference(tun->tfiles[i]);
3712 tfile->socket.sk->sk_write_space(tfile->socket.sk);
3713 }
3714 break;
3715 default:
3716 break;
3717 }
3718
3719 return NOTIFY_DONE;
3720}
3721
3722static struct notifier_block tun_notifier_block __read_mostly = {
3723 .notifier_call = tun_device_event,
3724};
3725
3726static int __init tun_init(void)
3727{
3728 int ret = 0;
3729
3730 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3731
3732 ret = rtnl_link_register(&tun_link_ops);
3733 if (ret) {
3734 pr_err("Can't register link_ops\n");
3735 goto err_linkops;
3736 }
3737
3738 ret = misc_register(&tun_miscdev);
3739 if (ret) {
3740 pr_err("Can't register misc device %d\n", TUN_MINOR);
3741 goto err_misc;
3742 }
3743
3744 ret = register_netdevice_notifier(&tun_notifier_block);
3745 if (ret) {
3746 pr_err("Can't register netdevice notifier\n");
3747 goto err_notifier;
3748 }
3749
3750 return 0;
3751
3752err_notifier:
3753 misc_deregister(&tun_miscdev);
3754err_misc:
3755 rtnl_link_unregister(&tun_link_ops);
3756err_linkops:
3757 return ret;
3758}
3759
3760static void __exit tun_cleanup(void)
3761{
3762 misc_deregister(&tun_miscdev);
3763 rtnl_link_unregister(&tun_link_ops);
3764 unregister_netdevice_notifier(&tun_notifier_block);
3765}
3766
3767/* Get an underlying socket object from tun file. Returns error unless file is
3768 * attached to a device. The returned object works like a packet socket, it
3769 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
3770 * holding a reference to the file for as long as the socket is in use. */
3771struct socket *tun_get_socket(struct file *file)
3772{
3773 struct tun_file *tfile;
3774 if (file->f_op != &tun_fops)
3775 return ERR_PTR(-EINVAL);
3776 tfile = file->private_data;
3777 if (!tfile)
3778 return ERR_PTR(-EBADFD);
3779 return &tfile->socket;
3780}
3781EXPORT_SYMBOL_GPL(tun_get_socket);
3782
3783struct ptr_ring *tun_get_tx_ring(struct file *file)
3784{
3785 struct tun_file *tfile;
3786
3787 if (file->f_op != &tun_fops)
3788 return ERR_PTR(-EINVAL);
3789 tfile = file->private_data;
3790 if (!tfile)
3791 return ERR_PTR(-EBADFD);
3792 return &tfile->tx_ring;
3793}
3794EXPORT_SYMBOL_GPL(tun_get_tx_ring);
3795
3796module_init(tun_init);
3797module_exit(tun_cleanup);
3798MODULE_DESCRIPTION(DRV_DESCRIPTION);
3799MODULE_AUTHOR(DRV_COPYRIGHT);
3800MODULE_LICENSE("GPL");
3801MODULE_ALIAS_MISCDEV(TUN_MINOR);
3802MODULE_ALIAS("devname:net/tun");