Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * INET		802.1Q VLAN
  3 *		Ethernet-type device handling.
  4 *
  5 * Authors:	Ben Greear <greearb@candelatech.com>
  6 *              Please send support related email to: netdev@vger.kernel.org
  7 *              VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
  8 *
  9 * Fixes:
 10 *              Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
 11 *		Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
 12 *		Correct all the locking - David S. Miller <davem@redhat.com>;
 13 *		Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
 14 *
 15 *		This program is free software; you can redistribute it and/or
 16 *		modify it under the terms of the GNU General Public License
 17 *		as published by the Free Software Foundation; either version
 18 *		2 of the License, or (at your option) any later version.
 19 */
 20
 21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 22
 23#include <linux/capability.h>
 24#include <linux/module.h>
 25#include <linux/netdevice.h>
 26#include <linux/skbuff.h>
 27#include <linux/slab.h>
 28#include <linux/init.h>
 29#include <linux/rculist.h>
 30#include <net/p8022.h>
 31#include <net/arp.h>
 32#include <linux/rtnetlink.h>
 33#include <linux/notifier.h>
 34#include <net/rtnetlink.h>
 35#include <net/net_namespace.h>
 36#include <net/netns/generic.h>
 37#include <linux/uaccess.h>
 38
 39#include <linux/if_vlan.h>
 40#include "vlan.h"
 41#include "vlanproc.h"
 42
 43#define DRV_VERSION "1.8"
 44
 45/* Global VLAN variables */
 46
 47unsigned int vlan_net_id __read_mostly;
 48
 49const char vlan_fullname[] = "802.1Q VLAN Support";
 50const char vlan_version[] = DRV_VERSION;
 51
 52/* End of global variables definitions. */
 53
 54static int vlan_group_prealloc_vid(struct vlan_group *vg,
 55				   __be16 vlan_proto, u16 vlan_id)
 56{
 57	struct net_device **array;
 58	unsigned int pidx, vidx;
 59	unsigned int size;
 60
 61	ASSERT_RTNL();
 62
 63	pidx  = vlan_proto_idx(vlan_proto);
 64	vidx  = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
 65	array = vg->vlan_devices_arrays[pidx][vidx];
 66	if (array != NULL)
 67		return 0;
 68
 69	size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
 70	array = kzalloc(size, GFP_KERNEL);
 71	if (array == NULL)
 72		return -ENOBUFS;
 73
 74	vg->vlan_devices_arrays[pidx][vidx] = array;
 75	return 0;
 76}
 77
 
 
 
 
 
 
 
 
 78void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 79{
 80	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 81	struct net_device *real_dev = vlan->real_dev;
 82	struct vlan_info *vlan_info;
 83	struct vlan_group *grp;
 84	u16 vlan_id = vlan->vlan_id;
 85
 86	ASSERT_RTNL();
 87
 88	vlan_info = rtnl_dereference(real_dev->vlan_info);
 89	BUG_ON(!vlan_info);
 90
 91	grp = &vlan_info->grp;
 92
 93	grp->nr_vlan_devs--;
 94
 95	if (vlan->flags & VLAN_FLAG_MVRP)
 96		vlan_mvrp_request_leave(dev);
 97	if (vlan->flags & VLAN_FLAG_GVRP)
 98		vlan_gvrp_request_leave(dev);
 99
100	vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
101
102	netdev_upper_dev_unlink(real_dev, dev);
103	/* Because unregister_netdevice_queue() makes sure at least one rcu
104	 * grace period is respected before device freeing,
105	 * we dont need to call synchronize_net() here.
106	 */
107	unregister_netdevice_queue(dev, head);
108
109	if (grp->nr_vlan_devs == 0) {
110		vlan_mvrp_uninit_applicant(real_dev);
111		vlan_gvrp_uninit_applicant(real_dev);
112	}
113
114	vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
115
116	/* Get rid of the vlan's reference to real_dev */
117	dev_put(real_dev);
118}
119
120int vlan_check_real_dev(struct net_device *real_dev,
121			__be16 protocol, u16 vlan_id)
 
122{
123	const char *name = real_dev->name;
124
125	if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
126		pr_info("VLANs not supported on %s\n", name);
 
127		return -EOPNOTSUPP;
128	}
129
130	if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL)
 
131		return -EEXIST;
 
132
133	return 0;
134}
135
136int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
137{
138	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
139	struct net_device *real_dev = vlan->real_dev;
140	u16 vlan_id = vlan->vlan_id;
141	struct vlan_info *vlan_info;
142	struct vlan_group *grp;
143	int err;
144
145	err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
146	if (err)
147		return err;
148
149	vlan_info = rtnl_dereference(real_dev->vlan_info);
150	/* vlan_info should be there now. vlan_vid_add took care of it */
151	BUG_ON(!vlan_info);
152
153	grp = &vlan_info->grp;
154	if (grp->nr_vlan_devs == 0) {
155		err = vlan_gvrp_init_applicant(real_dev);
156		if (err < 0)
157			goto out_vid_del;
158		err = vlan_mvrp_init_applicant(real_dev);
159		if (err < 0)
160			goto out_uninit_gvrp;
161	}
162
163	err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
164	if (err < 0)
165		goto out_uninit_mvrp;
166
167	vlan->nest_level = dev_get_nest_level(real_dev) + 1;
168	err = register_netdevice(dev);
169	if (err < 0)
170		goto out_uninit_mvrp;
171
172	err = netdev_upper_dev_link(real_dev, dev, extack);
173	if (err)
174		goto out_unregister_netdev;
175
176	/* Account for reference in struct vlan_dev_priv */
177	dev_hold(real_dev);
178
179	netif_stacked_transfer_operstate(real_dev, dev);
180	linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
181
182	/* So, got the sucker initialized, now lets place
183	 * it into our local structure.
184	 */
185	vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
186	grp->nr_vlan_devs++;
187
188	return 0;
189
190out_unregister_netdev:
191	unregister_netdevice(dev);
192out_uninit_mvrp:
193	if (grp->nr_vlan_devs == 0)
194		vlan_mvrp_uninit_applicant(real_dev);
195out_uninit_gvrp:
196	if (grp->nr_vlan_devs == 0)
197		vlan_gvrp_uninit_applicant(real_dev);
198out_vid_del:
199	vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
200	return err;
201}
202
203/*  Attach a VLAN device to a mac address (ie Ethernet Card).
204 *  Returns 0 if the device was created or a negative error code otherwise.
205 */
206static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
207{
208	struct net_device *new_dev;
209	struct vlan_dev_priv *vlan;
210	struct net *net = dev_net(real_dev);
211	struct vlan_net *vn = net_generic(net, vlan_net_id);
212	char name[IFNAMSIZ];
213	int err;
214
215	if (vlan_id >= VLAN_VID_MASK)
216		return -ERANGE;
217
218	err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id);
 
219	if (err < 0)
220		return err;
221
222	/* Gotta set up the fields for the device. */
223	switch (vn->name_type) {
224	case VLAN_NAME_TYPE_RAW_PLUS_VID:
225		/* name will look like:	 eth1.0005 */
226		snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
227		break;
228	case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
229		/* Put our vlan.VID in the name.
230		 * Name will look like:	 vlan5
231		 */
232		snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
233		break;
234	case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
235		/* Put our vlan.VID in the name.
236		 * Name will look like:	 eth0.5
237		 */
238		snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
239		break;
240	case VLAN_NAME_TYPE_PLUS_VID:
241		/* Put our vlan.VID in the name.
242		 * Name will look like:	 vlan0005
243		 */
244	default:
245		snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
246	}
247
248	new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
249			       NET_NAME_UNKNOWN, vlan_setup);
250
251	if (new_dev == NULL)
252		return -ENOBUFS;
253
254	dev_net_set(new_dev, net);
255	/* need 4 bytes for extra VLAN header info,
256	 * hope the underlying device can handle it.
257	 */
258	new_dev->mtu = real_dev->mtu;
259
260	vlan = vlan_dev_priv(new_dev);
261	vlan->vlan_proto = htons(ETH_P_8021Q);
262	vlan->vlan_id = vlan_id;
263	vlan->real_dev = real_dev;
264	vlan->dent = NULL;
265	vlan->flags = VLAN_FLAG_REORDER_HDR;
266
267	new_dev->rtnl_link_ops = &vlan_link_ops;
268	err = register_vlan_dev(new_dev, NULL);
269	if (err < 0)
270		goto out_free_newdev;
271
272	return 0;
273
274out_free_newdev:
275	if (new_dev->reg_state == NETREG_UNINITIALIZED)
276		free_netdev(new_dev);
277	return err;
278}
279
280static void vlan_sync_address(struct net_device *dev,
281			      struct net_device *vlandev)
282{
283	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
284
285	/* May be called without an actual change */
286	if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
287		return;
288
289	/* vlan continues to inherit address of lower device */
290	if (vlan_dev_inherit_address(vlandev, dev))
291		goto out;
292
293	/* vlan address was different from the old address and is equal to
294	 * the new address */
295	if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
296	    ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
297		dev_uc_del(dev, vlandev->dev_addr);
298
299	/* vlan address was equal to the old address and is different from
300	 * the new address */
301	if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
302	    !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
303		dev_uc_add(dev, vlandev->dev_addr);
304
305out:
306	ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
307}
308
309static void vlan_transfer_features(struct net_device *dev,
310				   struct net_device *vlandev)
311{
312	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
313
314	vlandev->gso_max_size = dev->gso_max_size;
315	vlandev->gso_max_segs = dev->gso_max_segs;
316
317	if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
318		vlandev->hard_header_len = dev->hard_header_len;
319	else
320		vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
321
322#if IS_ENABLED(CONFIG_FCOE)
323	vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
324#endif
325
326	vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
327	vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
 
328
329	netdev_update_features(vlandev);
330}
331
332static int __vlan_device_event(struct net_device *dev, unsigned long event)
333{
334	int err = 0;
335
336	switch (event) {
337	case NETDEV_CHANGENAME:
338		vlan_proc_rem_dev(dev);
339		err = vlan_proc_add_dev(dev);
340		break;
341	case NETDEV_REGISTER:
342		err = vlan_proc_add_dev(dev);
343		break;
344	case NETDEV_UNREGISTER:
345		vlan_proc_rem_dev(dev);
346		break;
347	}
348
349	return err;
350}
351
352static int vlan_device_event(struct notifier_block *unused, unsigned long event,
353			     void *ptr)
354{
 
355	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
356	struct vlan_group *grp;
357	struct vlan_info *vlan_info;
358	int i, flgs;
359	struct net_device *vlandev;
360	struct vlan_dev_priv *vlan;
361	bool last = false;
362	LIST_HEAD(list);
363	int err;
364
365	if (is_vlan_dev(dev)) {
366		int err = __vlan_device_event(dev, event);
367
368		if (err)
369			return notifier_from_errno(err);
370	}
371
372	if ((event == NETDEV_UP) &&
373	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
374		pr_info("adding VLAN 0 to HW filter on device %s\n",
375			dev->name);
376		vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
377	}
378	if (event == NETDEV_DOWN &&
379	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
380		vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
381
382	vlan_info = rtnl_dereference(dev->vlan_info);
383	if (!vlan_info)
384		goto out;
385	grp = &vlan_info->grp;
386
387	/* It is OK that we do not hold the group lock right now,
388	 * as we run under the RTNL lock.
389	 */
390
391	switch (event) {
392	case NETDEV_CHANGE:
393		/* Propagate real device state to vlan devices */
394		vlan_group_for_each_dev(grp, i, vlandev)
395			netif_stacked_transfer_operstate(dev, vlandev);
 
396		break;
397
398	case NETDEV_CHANGEADDR:
399		/* Adjust unicast filters on underlying device */
400		vlan_group_for_each_dev(grp, i, vlandev) {
401			flgs = vlandev->flags;
402			if (!(flgs & IFF_UP))
403				continue;
404
405			vlan_sync_address(dev, vlandev);
406		}
407		break;
408
409	case NETDEV_CHANGEMTU:
410		vlan_group_for_each_dev(grp, i, vlandev) {
411			if (vlandev->mtu <= dev->mtu)
412				continue;
413
414			dev_set_mtu(vlandev, dev->mtu);
415		}
416		break;
417
418	case NETDEV_FEAT_CHANGE:
419		/* Propagate device features to underlying device */
420		vlan_group_for_each_dev(grp, i, vlandev)
421			vlan_transfer_features(dev, vlandev);
422		break;
423
424	case NETDEV_DOWN: {
425		struct net_device *tmp;
426		LIST_HEAD(close_list);
427
428		/* Put all VLANs for this dev in the down state too.  */
429		vlan_group_for_each_dev(grp, i, vlandev) {
430			flgs = vlandev->flags;
431			if (!(flgs & IFF_UP))
432				continue;
433
434			vlan = vlan_dev_priv(vlandev);
435			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
436				list_add(&vlandev->close_list, &close_list);
437		}
438
439		dev_close_many(&close_list, false);
440
441		list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
442			netif_stacked_transfer_operstate(dev, vlandev);
 
443			list_del_init(&vlandev->close_list);
444		}
445		list_del(&close_list);
446		break;
447	}
448	case NETDEV_UP:
449		/* Put all VLANs for this dev in the up state too.  */
450		vlan_group_for_each_dev(grp, i, vlandev) {
451			flgs = dev_get_flags(vlandev);
452			if (flgs & IFF_UP)
453				continue;
454
455			vlan = vlan_dev_priv(vlandev);
456			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
457				dev_change_flags(vlandev, flgs | IFF_UP);
458			netif_stacked_transfer_operstate(dev, vlandev);
 
459		}
460		break;
461
462	case NETDEV_UNREGISTER:
463		/* twiddle thumbs on netns device moves */
464		if (dev->reg_state != NETREG_UNREGISTERING)
465			break;
466
467		vlan_group_for_each_dev(grp, i, vlandev) {
468			/* removal of last vid destroys vlan_info, abort
469			 * afterwards */
470			if (vlan_info->nr_vids == 1)
471				last = true;
472
473			unregister_vlan_dev(vlandev, &list);
474			if (last)
475				break;
476		}
477		unregister_netdevice_many(&list);
478		break;
479
480	case NETDEV_PRE_TYPE_CHANGE:
481		/* Forbid underlaying device to change its type. */
482		if (vlan_uses_dev(dev))
483			return NOTIFY_BAD;
484		break;
485
486	case NETDEV_NOTIFY_PEERS:
487	case NETDEV_BONDING_FAILOVER:
488	case NETDEV_RESEND_IGMP:
489		/* Propagate to vlan devices */
490		vlan_group_for_each_dev(grp, i, vlandev)
491			call_netdevice_notifiers(event, vlandev);
492		break;
493
494	case NETDEV_CVLAN_FILTER_PUSH_INFO:
495		err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021Q));
496		if (err)
497			return notifier_from_errno(err);
498		break;
499
500	case NETDEV_CVLAN_FILTER_DROP_INFO:
501		vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021Q));
502		break;
503
504	case NETDEV_SVLAN_FILTER_PUSH_INFO:
505		err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021AD));
506		if (err)
507			return notifier_from_errno(err);
508		break;
509
510	case NETDEV_SVLAN_FILTER_DROP_INFO:
511		vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021AD));
512		break;
513	}
514
515out:
516	return NOTIFY_DONE;
517}
518
519static struct notifier_block vlan_notifier_block __read_mostly = {
520	.notifier_call = vlan_device_event,
521};
522
523/*
524 *	VLAN IOCTL handler.
525 *	o execute requested action or pass command to the device driver
526 *   arg is really a struct vlan_ioctl_args __user *.
527 */
528static int vlan_ioctl_handler(struct net *net, void __user *arg)
529{
530	int err;
531	struct vlan_ioctl_args args;
532	struct net_device *dev = NULL;
533
534	if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
535		return -EFAULT;
536
537	/* Null terminate this sucker, just in case. */
538	args.device1[sizeof(args.device1) - 1] = 0;
539	args.u.device2[sizeof(args.u.device2) - 1] = 0;
540
541	rtnl_lock();
542
543	switch (args.cmd) {
544	case SET_VLAN_INGRESS_PRIORITY_CMD:
545	case SET_VLAN_EGRESS_PRIORITY_CMD:
546	case SET_VLAN_FLAG_CMD:
547	case ADD_VLAN_CMD:
548	case DEL_VLAN_CMD:
549	case GET_VLAN_REALDEV_NAME_CMD:
550	case GET_VLAN_VID_CMD:
551		err = -ENODEV;
552		dev = __dev_get_by_name(net, args.device1);
553		if (!dev)
554			goto out;
555
556		err = -EINVAL;
557		if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
558			goto out;
559	}
560
561	switch (args.cmd) {
562	case SET_VLAN_INGRESS_PRIORITY_CMD:
563		err = -EPERM;
564		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
565			break;
566		vlan_dev_set_ingress_priority(dev,
567					      args.u.skb_priority,
568					      args.vlan_qos);
569		err = 0;
570		break;
571
572	case SET_VLAN_EGRESS_PRIORITY_CMD:
573		err = -EPERM;
574		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
575			break;
576		err = vlan_dev_set_egress_priority(dev,
577						   args.u.skb_priority,
578						   args.vlan_qos);
579		break;
580
581	case SET_VLAN_FLAG_CMD:
582		err = -EPERM;
583		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
584			break;
585		err = vlan_dev_change_flags(dev,
586					    args.vlan_qos ? args.u.flag : 0,
587					    args.u.flag);
588		break;
589
590	case SET_VLAN_NAME_TYPE_CMD:
591		err = -EPERM;
592		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
593			break;
594		if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
595			struct vlan_net *vn;
596
597			vn = net_generic(net, vlan_net_id);
598			vn->name_type = args.u.name_type;
599			err = 0;
600		} else {
601			err = -EINVAL;
602		}
603		break;
604
605	case ADD_VLAN_CMD:
606		err = -EPERM;
607		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
608			break;
609		err = register_vlan_device(dev, args.u.VID);
610		break;
611
612	case DEL_VLAN_CMD:
613		err = -EPERM;
614		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
615			break;
616		unregister_vlan_dev(dev, NULL);
617		err = 0;
618		break;
619
620	case GET_VLAN_REALDEV_NAME_CMD:
621		err = 0;
622		vlan_dev_get_realdev_name(dev, args.u.device2);
623		if (copy_to_user(arg, &args,
624				 sizeof(struct vlan_ioctl_args)))
625			err = -EFAULT;
626		break;
627
628	case GET_VLAN_VID_CMD:
629		err = 0;
630		args.u.VID = vlan_dev_vlan_id(dev);
631		if (copy_to_user(arg, &args,
632				 sizeof(struct vlan_ioctl_args)))
633		      err = -EFAULT;
634		break;
635
636	default:
637		err = -EOPNOTSUPP;
638		break;
639	}
640out:
641	rtnl_unlock();
642	return err;
643}
644
645static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
646					 struct sk_buff *skb)
647{
648	struct sk_buff *p, **pp = NULL;
649	struct vlan_hdr *vhdr;
650	unsigned int hlen, off_vlan;
651	const struct packet_offload *ptype;
652	__be16 type;
653	int flush = 1;
654
655	off_vlan = skb_gro_offset(skb);
656	hlen = off_vlan + sizeof(*vhdr);
657	vhdr = skb_gro_header_fast(skb, off_vlan);
658	if (skb_gro_header_hard(skb, hlen)) {
659		vhdr = skb_gro_header_slow(skb, hlen, off_vlan);
660		if (unlikely(!vhdr))
661			goto out;
662	}
663
664	type = vhdr->h_vlan_encapsulated_proto;
665
666	rcu_read_lock();
667	ptype = gro_find_receive_by_type(type);
668	if (!ptype)
669		goto out_unlock;
670
671	flush = 0;
672
673	for (p = *head; p; p = p->next) {
674		struct vlan_hdr *vhdr2;
675
676		if (!NAPI_GRO_CB(p)->same_flow)
677			continue;
678
679		vhdr2 = (struct vlan_hdr *)(p->data + off_vlan);
680		if (compare_vlan_header(vhdr, vhdr2))
681			NAPI_GRO_CB(p)->same_flow = 0;
682	}
683
684	skb_gro_pull(skb, sizeof(*vhdr));
685	skb_gro_postpull_rcsum(skb, vhdr, sizeof(*vhdr));
686	pp = call_gro_receive(ptype->callbacks.gro_receive, head, skb);
687
688out_unlock:
689	rcu_read_unlock();
690out:
691	NAPI_GRO_CB(skb)->flush |= flush;
692
693	return pp;
694}
695
696static int vlan_gro_complete(struct sk_buff *skb, int nhoff)
697{
698	struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + nhoff);
699	__be16 type = vhdr->h_vlan_encapsulated_proto;
700	struct packet_offload *ptype;
701	int err = -ENOENT;
702
703	rcu_read_lock();
704	ptype = gro_find_complete_by_type(type);
705	if (ptype)
706		err = ptype->callbacks.gro_complete(skb, nhoff + sizeof(*vhdr));
707
708	rcu_read_unlock();
709	return err;
710}
711
712static struct packet_offload vlan_packet_offloads[] __read_mostly = {
713	{
714		.type = cpu_to_be16(ETH_P_8021Q),
715		.priority = 10,
716		.callbacks = {
717			.gro_receive = vlan_gro_receive,
718			.gro_complete = vlan_gro_complete,
719		},
720	},
721	{
722		.type = cpu_to_be16(ETH_P_8021AD),
723		.priority = 10,
724		.callbacks = {
725			.gro_receive = vlan_gro_receive,
726			.gro_complete = vlan_gro_complete,
727		},
728	},
729};
730
731static int __net_init vlan_init_net(struct net *net)
732{
733	struct vlan_net *vn = net_generic(net, vlan_net_id);
734	int err;
735
736	vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
737
738	err = vlan_proc_init(net);
739
740	return err;
741}
742
743static void __net_exit vlan_exit_net(struct net *net)
744{
745	vlan_proc_cleanup(net);
746}
747
748static struct pernet_operations vlan_net_ops = {
749	.init = vlan_init_net,
750	.exit = vlan_exit_net,
751	.id   = &vlan_net_id,
752	.size = sizeof(struct vlan_net),
753};
754
755static int __init vlan_proto_init(void)
756{
757	int err;
758	unsigned int i;
759
760	pr_info("%s v%s\n", vlan_fullname, vlan_version);
761
762	err = register_pernet_subsys(&vlan_net_ops);
763	if (err < 0)
764		goto err0;
765
766	err = register_netdevice_notifier(&vlan_notifier_block);
767	if (err < 0)
768		goto err2;
769
770	err = vlan_gvrp_init();
771	if (err < 0)
772		goto err3;
773
774	err = vlan_mvrp_init();
775	if (err < 0)
776		goto err4;
777
778	err = vlan_netlink_init();
779	if (err < 0)
780		goto err5;
781
782	for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
783		dev_add_offload(&vlan_packet_offloads[i]);
784
785	vlan_ioctl_set(vlan_ioctl_handler);
786	return 0;
787
788err5:
789	vlan_mvrp_uninit();
790err4:
791	vlan_gvrp_uninit();
792err3:
793	unregister_netdevice_notifier(&vlan_notifier_block);
794err2:
795	unregister_pernet_subsys(&vlan_net_ops);
796err0:
797	return err;
798}
799
800static void __exit vlan_cleanup_module(void)
801{
802	unsigned int i;
803
804	vlan_ioctl_set(NULL);
805
806	for (i = 0; i < ARRAY_SIZE(vlan_packet_offloads); i++)
807		dev_remove_offload(&vlan_packet_offloads[i]);
808
809	vlan_netlink_fini();
810
811	unregister_netdevice_notifier(&vlan_notifier_block);
812
813	unregister_pernet_subsys(&vlan_net_ops);
814	rcu_barrier(); /* Wait for completion of call_rcu()'s */
815
816	vlan_mvrp_uninit();
817	vlan_gvrp_uninit();
818}
819
820module_init(vlan_proto_init);
821module_exit(vlan_cleanup_module);
822
823MODULE_LICENSE("GPL");
824MODULE_VERSION(DRV_VERSION);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * INET		802.1Q VLAN
  4 *		Ethernet-type device handling.
  5 *
  6 * Authors:	Ben Greear <greearb@candelatech.com>
  7 *              Please send support related email to: netdev@vger.kernel.org
  8 *              VLAN Home Page: http://www.candelatech.com/~greear/vlan.html
  9 *
 10 * Fixes:
 11 *              Fix for packet capture - Nick Eggleston <nick@dccinc.com>;
 12 *		Add HW acceleration hooks - David S. Miller <davem@redhat.com>;
 13 *		Correct all the locking - David S. Miller <davem@redhat.com>;
 14 *		Use hash table for VLAN groups - David S. Miller <davem@redhat.com>
 
 
 
 
 
 15 */
 16
 17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 18
 19#include <linux/capability.h>
 20#include <linux/module.h>
 21#include <linux/netdevice.h>
 22#include <linux/skbuff.h>
 23#include <linux/slab.h>
 24#include <linux/init.h>
 25#include <linux/rculist.h>
 26#include <net/p8022.h>
 27#include <net/arp.h>
 28#include <linux/rtnetlink.h>
 29#include <linux/notifier.h>
 30#include <net/rtnetlink.h>
 31#include <net/net_namespace.h>
 32#include <net/netns/generic.h>
 33#include <linux/uaccess.h>
 34
 35#include <linux/if_vlan.h>
 36#include "vlan.h"
 37#include "vlanproc.h"
 38
 39#define DRV_VERSION "1.8"
 40
 41/* Global VLAN variables */
 42
 43unsigned int vlan_net_id __read_mostly;
 44
 45const char vlan_fullname[] = "802.1Q VLAN Support";
 46const char vlan_version[] = DRV_VERSION;
 47
 48/* End of global variables definitions. */
 49
 50static int vlan_group_prealloc_vid(struct vlan_group *vg,
 51				   __be16 vlan_proto, u16 vlan_id)
 52{
 53	struct net_device **array;
 54	unsigned int pidx, vidx;
 55	unsigned int size;
 56
 57	ASSERT_RTNL();
 58
 59	pidx  = vlan_proto_idx(vlan_proto);
 60	vidx  = vlan_id / VLAN_GROUP_ARRAY_PART_LEN;
 61	array = vg->vlan_devices_arrays[pidx][vidx];
 62	if (array != NULL)
 63		return 0;
 64
 65	size = sizeof(struct net_device *) * VLAN_GROUP_ARRAY_PART_LEN;
 66	array = kzalloc(size, GFP_KERNEL);
 67	if (array == NULL)
 68		return -ENOBUFS;
 69
 70	vg->vlan_devices_arrays[pidx][vidx] = array;
 71	return 0;
 72}
 73
 74static void vlan_stacked_transfer_operstate(const struct net_device *rootdev,
 75					    struct net_device *dev,
 76					    struct vlan_dev_priv *vlan)
 77{
 78	if (!(vlan->flags & VLAN_FLAG_BRIDGE_BINDING))
 79		netif_stacked_transfer_operstate(rootdev, dev);
 80}
 81
 82void unregister_vlan_dev(struct net_device *dev, struct list_head *head)
 83{
 84	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
 85	struct net_device *real_dev = vlan->real_dev;
 86	struct vlan_info *vlan_info;
 87	struct vlan_group *grp;
 88	u16 vlan_id = vlan->vlan_id;
 89
 90	ASSERT_RTNL();
 91
 92	vlan_info = rtnl_dereference(real_dev->vlan_info);
 93	BUG_ON(!vlan_info);
 94
 95	grp = &vlan_info->grp;
 96
 97	grp->nr_vlan_devs--;
 98
 99	if (vlan->flags & VLAN_FLAG_MVRP)
100		vlan_mvrp_request_leave(dev);
101	if (vlan->flags & VLAN_FLAG_GVRP)
102		vlan_gvrp_request_leave(dev);
103
104	vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, NULL);
105
106	netdev_upper_dev_unlink(real_dev, dev);
107	/* Because unregister_netdevice_queue() makes sure at least one rcu
108	 * grace period is respected before device freeing,
109	 * we dont need to call synchronize_net() here.
110	 */
111	unregister_netdevice_queue(dev, head);
112
113	if (grp->nr_vlan_devs == 0) {
114		vlan_mvrp_uninit_applicant(real_dev);
115		vlan_gvrp_uninit_applicant(real_dev);
116	}
117
118	vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
119
120	/* Get rid of the vlan's reference to real_dev */
121	dev_put(real_dev);
122}
123
124int vlan_check_real_dev(struct net_device *real_dev,
125			__be16 protocol, u16 vlan_id,
126			struct netlink_ext_ack *extack)
127{
128	const char *name = real_dev->name;
129
130	if (real_dev->features & NETIF_F_VLAN_CHALLENGED) {
131		pr_info("VLANs not supported on %s\n", name);
132		NL_SET_ERR_MSG_MOD(extack, "VLANs not supported on device");
133		return -EOPNOTSUPP;
134	}
135
136	if (vlan_find_dev(real_dev, protocol, vlan_id) != NULL) {
137		NL_SET_ERR_MSG_MOD(extack, "VLAN device already exists");
138		return -EEXIST;
139	}
140
141	return 0;
142}
143
144int register_vlan_dev(struct net_device *dev, struct netlink_ext_ack *extack)
145{
146	struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
147	struct net_device *real_dev = vlan->real_dev;
148	u16 vlan_id = vlan->vlan_id;
149	struct vlan_info *vlan_info;
150	struct vlan_group *grp;
151	int err;
152
153	err = vlan_vid_add(real_dev, vlan->vlan_proto, vlan_id);
154	if (err)
155		return err;
156
157	vlan_info = rtnl_dereference(real_dev->vlan_info);
158	/* vlan_info should be there now. vlan_vid_add took care of it */
159	BUG_ON(!vlan_info);
160
161	grp = &vlan_info->grp;
162	if (grp->nr_vlan_devs == 0) {
163		err = vlan_gvrp_init_applicant(real_dev);
164		if (err < 0)
165			goto out_vid_del;
166		err = vlan_mvrp_init_applicant(real_dev);
167		if (err < 0)
168			goto out_uninit_gvrp;
169	}
170
171	err = vlan_group_prealloc_vid(grp, vlan->vlan_proto, vlan_id);
172	if (err < 0)
173		goto out_uninit_mvrp;
174
 
175	err = register_netdevice(dev);
176	if (err < 0)
177		goto out_uninit_mvrp;
178
179	err = netdev_upper_dev_link(real_dev, dev, extack);
180	if (err)
181		goto out_unregister_netdev;
182
183	/* Account for reference in struct vlan_dev_priv */
184	dev_hold(real_dev);
185
186	vlan_stacked_transfer_operstate(real_dev, dev, vlan);
187	linkwatch_fire_event(dev); /* _MUST_ call rfc2863_policy() */
188
189	/* So, got the sucker initialized, now lets place
190	 * it into our local structure.
191	 */
192	vlan_group_set_device(grp, vlan->vlan_proto, vlan_id, dev);
193	grp->nr_vlan_devs++;
194
195	return 0;
196
197out_unregister_netdev:
198	unregister_netdevice(dev);
199out_uninit_mvrp:
200	if (grp->nr_vlan_devs == 0)
201		vlan_mvrp_uninit_applicant(real_dev);
202out_uninit_gvrp:
203	if (grp->nr_vlan_devs == 0)
204		vlan_gvrp_uninit_applicant(real_dev);
205out_vid_del:
206	vlan_vid_del(real_dev, vlan->vlan_proto, vlan_id);
207	return err;
208}
209
210/*  Attach a VLAN device to a mac address (ie Ethernet Card).
211 *  Returns 0 if the device was created or a negative error code otherwise.
212 */
213static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
214{
215	struct net_device *new_dev;
216	struct vlan_dev_priv *vlan;
217	struct net *net = dev_net(real_dev);
218	struct vlan_net *vn = net_generic(net, vlan_net_id);
219	char name[IFNAMSIZ];
220	int err;
221
222	if (vlan_id >= VLAN_VID_MASK)
223		return -ERANGE;
224
225	err = vlan_check_real_dev(real_dev, htons(ETH_P_8021Q), vlan_id,
226				  NULL);
227	if (err < 0)
228		return err;
229
230	/* Gotta set up the fields for the device. */
231	switch (vn->name_type) {
232	case VLAN_NAME_TYPE_RAW_PLUS_VID:
233		/* name will look like:	 eth1.0005 */
234		snprintf(name, IFNAMSIZ, "%s.%.4i", real_dev->name, vlan_id);
235		break;
236	case VLAN_NAME_TYPE_PLUS_VID_NO_PAD:
237		/* Put our vlan.VID in the name.
238		 * Name will look like:	 vlan5
239		 */
240		snprintf(name, IFNAMSIZ, "vlan%i", vlan_id);
241		break;
242	case VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD:
243		/* Put our vlan.VID in the name.
244		 * Name will look like:	 eth0.5
245		 */
246		snprintf(name, IFNAMSIZ, "%s.%i", real_dev->name, vlan_id);
247		break;
248	case VLAN_NAME_TYPE_PLUS_VID:
249		/* Put our vlan.VID in the name.
250		 * Name will look like:	 vlan0005
251		 */
252	default:
253		snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
254	}
255
256	new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
257			       NET_NAME_UNKNOWN, vlan_setup);
258
259	if (new_dev == NULL)
260		return -ENOBUFS;
261
262	dev_net_set(new_dev, net);
263	/* need 4 bytes for extra VLAN header info,
264	 * hope the underlying device can handle it.
265	 */
266	new_dev->mtu = real_dev->mtu;
267
268	vlan = vlan_dev_priv(new_dev);
269	vlan->vlan_proto = htons(ETH_P_8021Q);
270	vlan->vlan_id = vlan_id;
271	vlan->real_dev = real_dev;
272	vlan->dent = NULL;
273	vlan->flags = VLAN_FLAG_REORDER_HDR;
274
275	new_dev->rtnl_link_ops = &vlan_link_ops;
276	err = register_vlan_dev(new_dev, NULL);
277	if (err < 0)
278		goto out_free_newdev;
279
280	return 0;
281
282out_free_newdev:
283	if (new_dev->reg_state == NETREG_UNINITIALIZED)
284		free_netdev(new_dev);
285	return err;
286}
287
288static void vlan_sync_address(struct net_device *dev,
289			      struct net_device *vlandev)
290{
291	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
292
293	/* May be called without an actual change */
294	if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
295		return;
296
297	/* vlan continues to inherit address of lower device */
298	if (vlan_dev_inherit_address(vlandev, dev))
299		goto out;
300
301	/* vlan address was different from the old address and is equal to
302	 * the new address */
303	if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
304	    ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
305		dev_uc_del(dev, vlandev->dev_addr);
306
307	/* vlan address was equal to the old address and is different from
308	 * the new address */
309	if (ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
310	    !ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
311		dev_uc_add(dev, vlandev->dev_addr);
312
313out:
314	ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
315}
316
317static void vlan_transfer_features(struct net_device *dev,
318				   struct net_device *vlandev)
319{
320	struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
321
322	vlandev->gso_max_size = dev->gso_max_size;
323	vlandev->gso_max_segs = dev->gso_max_segs;
324
325	if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
326		vlandev->hard_header_len = dev->hard_header_len;
327	else
328		vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
329
330#if IS_ENABLED(CONFIG_FCOE)
331	vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
332#endif
333
334	vlandev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
335	vlandev->priv_flags |= (vlan->real_dev->priv_flags & IFF_XMIT_DST_RELEASE);
336	vlandev->hw_enc_features = vlan_tnl_features(vlan->real_dev);
337
338	netdev_update_features(vlandev);
339}
340
341static int __vlan_device_event(struct net_device *dev, unsigned long event)
342{
343	int err = 0;
344
345	switch (event) {
346	case NETDEV_CHANGENAME:
347		vlan_proc_rem_dev(dev);
348		err = vlan_proc_add_dev(dev);
349		break;
350	case NETDEV_REGISTER:
351		err = vlan_proc_add_dev(dev);
352		break;
353	case NETDEV_UNREGISTER:
354		vlan_proc_rem_dev(dev);
355		break;
356	}
357
358	return err;
359}
360
361static int vlan_device_event(struct notifier_block *unused, unsigned long event,
362			     void *ptr)
363{
364	struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
365	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
366	struct vlan_group *grp;
367	struct vlan_info *vlan_info;
368	int i, flgs;
369	struct net_device *vlandev;
370	struct vlan_dev_priv *vlan;
371	bool last = false;
372	LIST_HEAD(list);
373	int err;
374
375	if (is_vlan_dev(dev)) {
376		int err = __vlan_device_event(dev, event);
377
378		if (err)
379			return notifier_from_errno(err);
380	}
381
382	if ((event == NETDEV_UP) &&
383	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
384		pr_info("adding VLAN 0 to HW filter on device %s\n",
385			dev->name);
386		vlan_vid_add(dev, htons(ETH_P_8021Q), 0);
387	}
388	if (event == NETDEV_DOWN &&
389	    (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
390		vlan_vid_del(dev, htons(ETH_P_8021Q), 0);
391
392	vlan_info = rtnl_dereference(dev->vlan_info);
393	if (!vlan_info)
394		goto out;
395	grp = &vlan_info->grp;
396
397	/* It is OK that we do not hold the group lock right now,
398	 * as we run under the RTNL lock.
399	 */
400
401	switch (event) {
402	case NETDEV_CHANGE:
403		/* Propagate real device state to vlan devices */
404		vlan_group_for_each_dev(grp, i, vlandev)
405			vlan_stacked_transfer_operstate(dev, vlandev,
406							vlan_dev_priv(vlandev));
407		break;
408
409	case NETDEV_CHANGEADDR:
410		/* Adjust unicast filters on underlying device */
411		vlan_group_for_each_dev(grp, i, vlandev) {
412			flgs = vlandev->flags;
413			if (!(flgs & IFF_UP))
414				continue;
415
416			vlan_sync_address(dev, vlandev);
417		}
418		break;
419
420	case NETDEV_CHANGEMTU:
421		vlan_group_for_each_dev(grp, i, vlandev) {
422			if (vlandev->mtu <= dev->mtu)
423				continue;
424
425			dev_set_mtu(vlandev, dev->mtu);
426		}
427		break;
428
429	case NETDEV_FEAT_CHANGE:
430		/* Propagate device features to underlying device */
431		vlan_group_for_each_dev(grp, i, vlandev)
432			vlan_transfer_features(dev, vlandev);
433		break;
434
435	case NETDEV_DOWN: {
436		struct net_device *tmp;
437		LIST_HEAD(close_list);
438
439		/* Put all VLANs for this dev in the down state too.  */
440		vlan_group_for_each_dev(grp, i, vlandev) {
441			flgs = vlandev->flags;
442			if (!(flgs & IFF_UP))
443				continue;
444
445			vlan = vlan_dev_priv(vlandev);
446			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
447				list_add(&vlandev->close_list, &close_list);
448		}
449
450		dev_close_many(&close_list, false);
451
452		list_for_each_entry_safe(vlandev, tmp, &close_list, close_list) {
453			vlan_stacked_transfer_operstate(dev, vlandev,
454							vlan_dev_priv(vlandev));
455			list_del_init(&vlandev->close_list);
456		}
457		list_del(&close_list);
458		break;
459	}
460	case NETDEV_UP:
461		/* Put all VLANs for this dev in the up state too.  */
462		vlan_group_for_each_dev(grp, i, vlandev) {
463			flgs = dev_get_flags(vlandev);
464			if (flgs & IFF_UP)
465				continue;
466
467			vlan = vlan_dev_priv(vlandev);
468			if (!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
469				dev_change_flags(vlandev, flgs | IFF_UP,
470						 extack);
471			vlan_stacked_transfer_operstate(dev, vlandev, vlan);
472		}
473		break;
474
475	case NETDEV_UNREGISTER:
476		/* twiddle thumbs on netns device moves */
477		if (dev->reg_state != NETREG_UNREGISTERING)
478			break;
479
480		vlan_group_for_each_dev(grp, i, vlandev) {
481			/* removal of last vid destroys vlan_info, abort
482			 * afterwards */
483			if (vlan_info->nr_vids == 1)
484				last = true;
485
486			unregister_vlan_dev(vlandev, &list);
487			if (last)
488				break;
489		}
490		unregister_netdevice_many(&list);
491		break;
492
493	case NETDEV_PRE_TYPE_CHANGE:
494		/* Forbid underlaying device to change its type. */
495		if (vlan_uses_dev(dev))
496			return NOTIFY_BAD;
497		break;
498
499	case NETDEV_NOTIFY_PEERS:
500	case NETDEV_BONDING_FAILOVER:
501	case NETDEV_RESEND_IGMP:
502		/* Propagate to vlan devices */
503		vlan_group_for_each_dev(grp, i, vlandev)
504			call_netdevice_notifiers(event, vlandev);
505		break;
506
507	case NETDEV_CVLAN_FILTER_PUSH_INFO:
508		err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021Q));
509		if (err)
510			return notifier_from_errno(err);
511		break;
512
513	case NETDEV_CVLAN_FILTER_DROP_INFO:
514		vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021Q));
515		break;
516
517	case NETDEV_SVLAN_FILTER_PUSH_INFO:
518		err = vlan_filter_push_vids(vlan_info, htons(ETH_P_8021AD));
519		if (err)
520			return notifier_from_errno(err);
521		break;
522
523	case NETDEV_SVLAN_FILTER_DROP_INFO:
524		vlan_filter_drop_vids(vlan_info, htons(ETH_P_8021AD));
525		break;
526	}
527
528out:
529	return NOTIFY_DONE;
530}
531
532static struct notifier_block vlan_notifier_block __read_mostly = {
533	.notifier_call = vlan_device_event,
534};
535
536/*
537 *	VLAN IOCTL handler.
538 *	o execute requested action or pass command to the device driver
539 *   arg is really a struct vlan_ioctl_args __user *.
540 */
541static int vlan_ioctl_handler(struct net *net, void __user *arg)
542{
543	int err;
544	struct vlan_ioctl_args args;
545	struct net_device *dev = NULL;
546
547	if (copy_from_user(&args, arg, sizeof(struct vlan_ioctl_args)))
548		return -EFAULT;
549
550	/* Null terminate this sucker, just in case. */
551	args.device1[sizeof(args.device1) - 1] = 0;
552	args.u.device2[sizeof(args.u.device2) - 1] = 0;
553
554	rtnl_lock();
555
556	switch (args.cmd) {
557	case SET_VLAN_INGRESS_PRIORITY_CMD:
558	case SET_VLAN_EGRESS_PRIORITY_CMD:
559	case SET_VLAN_FLAG_CMD:
560	case ADD_VLAN_CMD:
561	case DEL_VLAN_CMD:
562	case GET_VLAN_REALDEV_NAME_CMD:
563	case GET_VLAN_VID_CMD:
564		err = -ENODEV;
565		dev = __dev_get_by_name(net, args.device1);
566		if (!dev)
567			goto out;
568
569		err = -EINVAL;
570		if (args.cmd != ADD_VLAN_CMD && !is_vlan_dev(dev))
571			goto out;
572	}
573
574	switch (args.cmd) {
575	case SET_VLAN_INGRESS_PRIORITY_CMD:
576		err = -EPERM;
577		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
578			break;
579		vlan_dev_set_ingress_priority(dev,
580					      args.u.skb_priority,
581					      args.vlan_qos);
582		err = 0;
583		break;
584
585	case SET_VLAN_EGRESS_PRIORITY_CMD:
586		err = -EPERM;
587		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
588			break;
589		err = vlan_dev_set_egress_priority(dev,
590						   args.u.skb_priority,
591						   args.vlan_qos);
592		break;
593
594	case SET_VLAN_FLAG_CMD:
595		err = -EPERM;
596		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
597			break;
598		err = vlan_dev_change_flags(dev,
599					    args.vlan_qos ? args.u.flag : 0,
600					    args.u.flag);
601		break;
602
603	case SET_VLAN_NAME_TYPE_CMD:
604		err = -EPERM;
605		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
606			break;
607		if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
608			struct vlan_net *vn;
609
610			vn = net_generic(net, vlan_net_id);
611			vn->name_type = args.u.name_type;
612			err = 0;
613		} else {
614			err = -EINVAL;
615		}
616		break;
617
618	case ADD_VLAN_CMD:
619		err = -EPERM;
620		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
621			break;
622		err = register_vlan_device(dev, args.u.VID);
623		break;
624
625	case DEL_VLAN_CMD:
626		err = -EPERM;
627		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
628			break;
629		unregister_vlan_dev(dev, NULL);
630		err = 0;
631		break;
632
633	case GET_VLAN_REALDEV_NAME_CMD:
634		err = 0;
635		vlan_dev_get_realdev_name(dev, args.u.device2);
636		if (copy_to_user(arg, &args,
637				 sizeof(struct vlan_ioctl_args)))
638			err = -EFAULT;
639		break;
640
641	case GET_VLAN_VID_CMD:
642		err = 0;
643		args.u.VID = vlan_dev_vlan_id(dev);
644		if (copy_to_user(arg, &args,
645				 sizeof(struct vlan_ioctl_args)))
646		      err = -EFAULT;
647		break;
648
649	default:
650		err = -EOPNOTSUPP;
651		break;
652	}
653out:
654	rtnl_unlock();
655	return err;
656}
657
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
658static int __net_init vlan_init_net(struct net *net)
659{
660	struct vlan_net *vn = net_generic(net, vlan_net_id);
661	int err;
662
663	vn->name_type = VLAN_NAME_TYPE_RAW_PLUS_VID_NO_PAD;
664
665	err = vlan_proc_init(net);
666
667	return err;
668}
669
670static void __net_exit vlan_exit_net(struct net *net)
671{
672	vlan_proc_cleanup(net);
673}
674
675static struct pernet_operations vlan_net_ops = {
676	.init = vlan_init_net,
677	.exit = vlan_exit_net,
678	.id   = &vlan_net_id,
679	.size = sizeof(struct vlan_net),
680};
681
682static int __init vlan_proto_init(void)
683{
684	int err;
 
685
686	pr_info("%s v%s\n", vlan_fullname, vlan_version);
687
688	err = register_pernet_subsys(&vlan_net_ops);
689	if (err < 0)
690		goto err0;
691
692	err = register_netdevice_notifier(&vlan_notifier_block);
693	if (err < 0)
694		goto err2;
695
696	err = vlan_gvrp_init();
697	if (err < 0)
698		goto err3;
699
700	err = vlan_mvrp_init();
701	if (err < 0)
702		goto err4;
703
704	err = vlan_netlink_init();
705	if (err < 0)
706		goto err5;
707
 
 
 
708	vlan_ioctl_set(vlan_ioctl_handler);
709	return 0;
710
711err5:
712	vlan_mvrp_uninit();
713err4:
714	vlan_gvrp_uninit();
715err3:
716	unregister_netdevice_notifier(&vlan_notifier_block);
717err2:
718	unregister_pernet_subsys(&vlan_net_ops);
719err0:
720	return err;
721}
722
723static void __exit vlan_cleanup_module(void)
724{
 
 
725	vlan_ioctl_set(NULL);
 
 
 
726
727	vlan_netlink_fini();
728
729	unregister_netdevice_notifier(&vlan_notifier_block);
730
731	unregister_pernet_subsys(&vlan_net_ops);
732	rcu_barrier(); /* Wait for completion of call_rcu()'s */
733
734	vlan_mvrp_uninit();
735	vlan_gvrp_uninit();
736}
737
738module_init(vlan_proto_init);
739module_exit(vlan_cleanup_module);
740
741MODULE_LICENSE("GPL");
742MODULE_VERSION(DRV_VERSION);