Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/kernel.h>
   3#include <linux/netdevice.h>
   4#include <linux/rtnetlink.h>
   5#include <linux/slab.h>
   6#include <net/switchdev.h>
   7
   8#include "br_private.h"
   9#include "br_private_tunnel.h"
  10
  11static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
  12
  13static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  14			      const void *ptr)
  15{
  16	const struct net_bridge_vlan *vle = ptr;
  17	u16 vid = *(u16 *)arg->key;
  18
  19	return vle->vid != vid;
  20}
  21
  22static const struct rhashtable_params br_vlan_rht_params = {
  23	.head_offset = offsetof(struct net_bridge_vlan, vnode),
  24	.key_offset = offsetof(struct net_bridge_vlan, vid),
  25	.key_len = sizeof(u16),
  26	.nelem_hint = 3,
 
  27	.max_size = VLAN_N_VID,
  28	.obj_cmpfn = br_vlan_cmp,
  29	.automatic_shrinking = true,
  30};
  31
  32static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  33{
  34	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  35}
  36
  37static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  38{
  39	if (vg->pvid == vid)
  40		return false;
  41
  42	smp_wmb();
  43	vg->pvid = vid;
  44
  45	return true;
  46}
  47
  48static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  49{
  50	if (vg->pvid != vid)
  51		return false;
  52
  53	smp_wmb();
  54	vg->pvid = 0;
  55
  56	return true;
  57}
  58
  59/* return true if anything changed, false otherwise */
  60static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  61{
  62	struct net_bridge_vlan_group *vg;
  63	u16 old_flags = v->flags;
  64	bool ret;
  65
  66	if (br_vlan_is_master(v))
  67		vg = br_vlan_group(v->br);
  68	else
  69		vg = nbp_vlan_group(v->port);
  70
  71	if (flags & BRIDGE_VLAN_INFO_PVID)
  72		ret = __vlan_add_pvid(vg, v->vid);
  73	else
  74		ret = __vlan_delete_pvid(vg, v->vid);
  75
  76	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  77		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  78	else
  79		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  80
  81	return ret || !!(old_flags ^ v->flags);
  82}
  83
  84static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  85			  struct net_bridge_vlan *v, u16 flags,
  86			  struct netlink_ext_ack *extack)
  87{
 
 
 
 
 
 
 
  88	int err;
  89
  90	/* Try switchdev op first. In case it is not supported, fallback to
  91	 * 8021q add.
  92	 */
  93	err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
  94	if (err == -EOPNOTSUPP)
  95		return vlan_vid_add(dev, br->vlan_proto, v->vid);
  96	v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
  97	return err;
  98}
  99
 100static void __vlan_add_list(struct net_bridge_vlan *v)
 101{
 102	struct net_bridge_vlan_group *vg;
 103	struct list_head *headp, *hpos;
 104	struct net_bridge_vlan *vent;
 105
 106	if (br_vlan_is_master(v))
 107		vg = br_vlan_group(v->br);
 108	else
 109		vg = nbp_vlan_group(v->port);
 110
 111	headp = &vg->vlan_list;
 112	list_for_each_prev(hpos, headp) {
 113		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
 114		if (v->vid < vent->vid)
 115			continue;
 116		else
 117			break;
 118	}
 119	list_add_rcu(&v->vlist, hpos);
 120}
 121
 122static void __vlan_del_list(struct net_bridge_vlan *v)
 123{
 124	list_del_rcu(&v->vlist);
 125}
 126
 127static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
 128			  const struct net_bridge_vlan *v)
 129{
 
 
 
 
 
 
 130	int err;
 131
 132	/* Try switchdev op first. In case it is not supported, fallback to
 133	 * 8021q del.
 134	 */
 135	err = br_switchdev_port_vlan_del(dev, v->vid);
 136	if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
 137		vlan_vid_del(dev, br->vlan_proto, v->vid);
 138	return err == -EOPNOTSUPP ? 0 : err;
 
 
 139}
 140
 141/* Returns a master vlan, if it didn't exist it gets created. In all cases a
 142 * a reference is taken to the master vlan before returning.
 143 */
 144static struct net_bridge_vlan *
 145br_vlan_get_master(struct net_bridge *br, u16 vid,
 146		   struct netlink_ext_ack *extack)
 147{
 148	struct net_bridge_vlan_group *vg;
 149	struct net_bridge_vlan *masterv;
 150
 151	vg = br_vlan_group(br);
 152	masterv = br_vlan_find(vg, vid);
 153	if (!masterv) {
 154		bool changed;
 155
 156		/* missing global ctx, create it now */
 157		if (br_vlan_add(br, vid, 0, &changed, extack))
 158			return NULL;
 159		masterv = br_vlan_find(vg, vid);
 160		if (WARN_ON(!masterv))
 161			return NULL;
 162		refcount_set(&masterv->refcnt, 1);
 163		return masterv;
 164	}
 165	refcount_inc(&masterv->refcnt);
 166
 167	return masterv;
 168}
 169
 170static void br_master_vlan_rcu_free(struct rcu_head *rcu)
 171{
 172	struct net_bridge_vlan *v;
 173
 174	v = container_of(rcu, struct net_bridge_vlan, rcu);
 175	WARN_ON(!br_vlan_is_master(v));
 176	free_percpu(v->stats);
 177	v->stats = NULL;
 178	kfree(v);
 179}
 180
 181static void br_vlan_put_master(struct net_bridge_vlan *masterv)
 182{
 183	struct net_bridge_vlan_group *vg;
 184
 185	if (!br_vlan_is_master(masterv))
 186		return;
 187
 188	vg = br_vlan_group(masterv->br);
 189	if (refcount_dec_and_test(&masterv->refcnt)) {
 190		rhashtable_remove_fast(&vg->vlan_hash,
 191				       &masterv->vnode, br_vlan_rht_params);
 192		__vlan_del_list(masterv);
 193		call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
 194	}
 195}
 196
 197static void nbp_vlan_rcu_free(struct rcu_head *rcu)
 198{
 199	struct net_bridge_vlan *v;
 200
 201	v = container_of(rcu, struct net_bridge_vlan, rcu);
 202	WARN_ON(br_vlan_is_master(v));
 203	/* if we had per-port stats configured then free them here */
 204	if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
 205		free_percpu(v->stats);
 206	v->stats = NULL;
 207	kfree(v);
 208}
 209
 210/* This is the shared VLAN add function which works for both ports and bridge
 211 * devices. There are four possible calls to this function in terms of the
 212 * vlan entry type:
 213 * 1. vlan is being added on a port (no master flags, global entry exists)
 214 * 2. vlan is being added on a bridge (both master and brentry flags)
 215 * 3. vlan is being added on a port, but a global entry didn't exist which
 216 *    is being created right now (master flag set, brentry flag unset), the
 217 *    global entry is used for global per-vlan features, but not for filtering
 218 * 4. same as 3 but with both master and brentry flags set so the entry
 219 *    will be used for filtering in both the port and the bridge
 220 */
 221static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
 222		      struct netlink_ext_ack *extack)
 223{
 224	struct net_bridge_vlan *masterv = NULL;
 225	struct net_bridge_port *p = NULL;
 226	struct net_bridge_vlan_group *vg;
 227	struct net_device *dev;
 228	struct net_bridge *br;
 229	int err;
 230
 231	if (br_vlan_is_master(v)) {
 232		br = v->br;
 233		dev = br->dev;
 234		vg = br_vlan_group(br);
 235	} else {
 236		p = v->port;
 237		br = p->br;
 238		dev = p->dev;
 239		vg = nbp_vlan_group(p);
 240	}
 241
 242	if (p) {
 243		/* Add VLAN to the device filter if it is supported.
 244		 * This ensures tagged traffic enters the bridge when
 245		 * promiscuous mode is disabled by br_manage_promisc().
 246		 */
 247		err = __vlan_vid_add(dev, br, v, flags, extack);
 248		if (err)
 249			goto out;
 250
 251		/* need to work on the master vlan too */
 252		if (flags & BRIDGE_VLAN_INFO_MASTER) {
 253			bool changed;
 254
 255			err = br_vlan_add(br, v->vid,
 256					  flags | BRIDGE_VLAN_INFO_BRENTRY,
 257					  &changed, extack);
 258			if (err)
 259				goto out_filt;
 260		}
 261
 262		masterv = br_vlan_get_master(br, v->vid, extack);
 263		if (!masterv)
 264			goto out_filt;
 265		v->brvlan = masterv;
 266		if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
 267			v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
 268			if (!v->stats) {
 269				err = -ENOMEM;
 270				goto out_filt;
 271			}
 272			v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
 273		} else {
 274			v->stats = masterv->stats;
 275		}
 276	} else {
 277		err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
 278		if (err && err != -EOPNOTSUPP)
 279			goto out;
 280	}
 281
 282	/* Add the dev mac and count the vlan only if it's usable */
 283	if (br_vlan_should_use(v)) {
 284		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
 285		if (err) {
 286			br_err(br, "failed insert local address into bridge forwarding table\n");
 287			goto out_filt;
 288		}
 289		vg->num_vlans++;
 290	}
 291
 292	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
 293					    br_vlan_rht_params);
 294	if (err)
 295		goto out_fdb_insert;
 296
 297	__vlan_add_list(v);
 298	__vlan_add_flags(v, flags);
 299
 300	if (p)
 301		nbp_vlan_set_vlan_dev_state(p, v->vid);
 302out:
 303	return err;
 304
 305out_fdb_insert:
 306	if (br_vlan_should_use(v)) {
 307		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
 308		vg->num_vlans--;
 309	}
 310
 311out_filt:
 312	if (p) {
 313		__vlan_vid_del(dev, br, v);
 314		if (masterv) {
 315			if (v->stats && masterv->stats != v->stats)
 316				free_percpu(v->stats);
 317			v->stats = NULL;
 318
 319			br_vlan_put_master(masterv);
 320			v->brvlan = NULL;
 321		}
 322	} else {
 323		br_switchdev_port_vlan_del(dev, v->vid);
 324	}
 325
 326	goto out;
 327}
 328
 329static int __vlan_del(struct net_bridge_vlan *v)
 330{
 331	struct net_bridge_vlan *masterv = v;
 332	struct net_bridge_vlan_group *vg;
 333	struct net_bridge_port *p = NULL;
 334	int err = 0;
 335
 336	if (br_vlan_is_master(v)) {
 337		vg = br_vlan_group(v->br);
 338	} else {
 339		p = v->port;
 340		vg = nbp_vlan_group(v->port);
 341		masterv = v->brvlan;
 342	}
 343
 344	__vlan_delete_pvid(vg, v->vid);
 345	if (p) {
 346		err = __vlan_vid_del(p->dev, p->br, v);
 347		if (err)
 348			goto out;
 349	} else {
 350		err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
 351		if (err && err != -EOPNOTSUPP)
 352			goto out;
 353		err = 0;
 354	}
 355
 356	if (br_vlan_should_use(v)) {
 357		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
 358		vg->num_vlans--;
 359	}
 360
 361	if (masterv != v) {
 362		vlan_tunnel_info_del(vg, v);
 363		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
 364				       br_vlan_rht_params);
 365		__vlan_del_list(v);
 366		nbp_vlan_set_vlan_dev_state(p, v->vid);
 367		call_rcu(&v->rcu, nbp_vlan_rcu_free);
 368	}
 369
 370	br_vlan_put_master(masterv);
 371out:
 372	return err;
 373}
 374
 375static void __vlan_group_free(struct net_bridge_vlan_group *vg)
 376{
 377	WARN_ON(!list_empty(&vg->vlan_list));
 378	rhashtable_destroy(&vg->vlan_hash);
 379	vlan_tunnel_deinit(vg);
 380	kfree(vg);
 381}
 382
 383static void __vlan_flush(struct net_bridge_vlan_group *vg)
 384{
 385	struct net_bridge_vlan *vlan, *tmp;
 386
 387	__vlan_delete_pvid(vg, vg->pvid);
 388	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
 389		__vlan_del(vlan);
 390}
 391
 392struct sk_buff *br_handle_vlan(struct net_bridge *br,
 393			       const struct net_bridge_port *p,
 394			       struct net_bridge_vlan_group *vg,
 395			       struct sk_buff *skb)
 396{
 397	struct br_vlan_stats *stats;
 398	struct net_bridge_vlan *v;
 399	u16 vid;
 400
 401	/* If this packet was not filtered at input, let it pass */
 402	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 403		goto out;
 404
 405	/* At this point, we know that the frame was filtered and contains
 406	 * a valid vlan id.  If the vlan id has untagged flag set,
 407	 * send untagged; otherwise, send tagged.
 408	 */
 409	br_vlan_get_tag(skb, &vid);
 410	v = br_vlan_find(vg, vid);
 411	/* Vlan entry must be configured at this point.  The
 412	 * only exception is the bridge is set in promisc mode and the
 413	 * packet is destined for the bridge device.  In this case
 414	 * pass the packet as is.
 415	 */
 416	if (!v || !br_vlan_should_use(v)) {
 417		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
 418			goto out;
 419		} else {
 420			kfree_skb(skb);
 421			return NULL;
 422		}
 423	}
 424	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
 425		stats = this_cpu_ptr(v->stats);
 426		u64_stats_update_begin(&stats->syncp);
 427		stats->tx_bytes += skb->len;
 428		stats->tx_packets++;
 429		u64_stats_update_end(&stats->syncp);
 430	}
 431
 432	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 433		__vlan_hwaccel_clear_tag(skb);
 434
 435	if (p && (p->flags & BR_VLAN_TUNNEL) &&
 436	    br_handle_egress_vlan_tunnel(skb, v)) {
 437		kfree_skb(skb);
 438		return NULL;
 439	}
 440out:
 441	return skb;
 442}
 443
 444/* Called under RCU */
 445static bool __allowed_ingress(const struct net_bridge *br,
 446			      struct net_bridge_vlan_group *vg,
 447			      struct sk_buff *skb, u16 *vid)
 448{
 449	struct br_vlan_stats *stats;
 450	struct net_bridge_vlan *v;
 451	bool tagged;
 452
 453	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
 454	/* If vlan tx offload is disabled on bridge device and frame was
 455	 * sent from vlan device on the bridge device, it does not have
 456	 * HW accelerated vlan tag.
 457	 */
 458	if (unlikely(!skb_vlan_tag_present(skb) &&
 459		     skb->protocol == br->vlan_proto)) {
 460		skb = skb_vlan_untag(skb);
 461		if (unlikely(!skb))
 462			return false;
 463	}
 464
 465	if (!br_vlan_get_tag(skb, vid)) {
 466		/* Tagged frame */
 467		if (skb->vlan_proto != br->vlan_proto) {
 468			/* Protocol-mismatch, empty out vlan_tci for new tag */
 469			skb_push(skb, ETH_HLEN);
 470			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
 471							skb_vlan_tag_get(skb));
 472			if (unlikely(!skb))
 473				return false;
 474
 475			skb_pull(skb, ETH_HLEN);
 476			skb_reset_mac_len(skb);
 477			*vid = 0;
 478			tagged = false;
 479		} else {
 480			tagged = true;
 481		}
 482	} else {
 483		/* Untagged frame */
 484		tagged = false;
 485	}
 486
 487	if (!*vid) {
 488		u16 pvid = br_get_pvid(vg);
 489
 490		/* Frame had a tag with VID 0 or did not have a tag.
 491		 * See if pvid is set on this port.  That tells us which
 492		 * vlan untagged or priority-tagged traffic belongs to.
 493		 */
 494		if (!pvid)
 495			goto drop;
 496
 497		/* PVID is set on this port.  Any untagged or priority-tagged
 498		 * ingress frame is considered to belong to this vlan.
 499		 */
 500		*vid = pvid;
 501		if (likely(!tagged))
 502			/* Untagged Frame. */
 503			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
 504		else
 505			/* Priority-tagged Frame.
 506			 * At this point, we know that skb->vlan_tci VID
 507			 * field was 0.
 508			 * We update only VID field and preserve PCP field.
 509			 */
 510			skb->vlan_tci |= pvid;
 511
 512		/* if stats are disabled we can avoid the lookup */
 513		if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED))
 514			return true;
 515	}
 516	v = br_vlan_find(vg, *vid);
 517	if (!v || !br_vlan_should_use(v))
 518		goto drop;
 519
 520	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
 521		stats = this_cpu_ptr(v->stats);
 522		u64_stats_update_begin(&stats->syncp);
 523		stats->rx_bytes += skb->len;
 524		stats->rx_packets++;
 525		u64_stats_update_end(&stats->syncp);
 526	}
 527
 528	return true;
 529
 
 
 530drop:
 531	kfree_skb(skb);
 532	return false;
 533}
 534
 535bool br_allowed_ingress(const struct net_bridge *br,
 536			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
 537			u16 *vid)
 538{
 539	/* If VLAN filtering is disabled on the bridge, all packets are
 540	 * permitted.
 541	 */
 542	if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
 543		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
 544		return true;
 545	}
 546
 547	return __allowed_ingress(br, vg, skb, vid);
 548}
 549
 550/* Called under RCU. */
 551bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 552		       const struct sk_buff *skb)
 553{
 554	const struct net_bridge_vlan *v;
 555	u16 vid;
 556
 557	/* If this packet was not filtered at input, let it pass */
 558	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 559		return true;
 560
 561	br_vlan_get_tag(skb, &vid);
 562	v = br_vlan_find(vg, vid);
 563	if (v && br_vlan_should_use(v))
 564		return true;
 565
 566	return false;
 567}
 568
 569/* Called under RCU */
 570bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
 571{
 572	struct net_bridge_vlan_group *vg;
 573	struct net_bridge *br = p->br;
 574
 575	/* If filtering was disabled at input, let it pass. */
 576	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
 577		return true;
 578
 579	vg = nbp_vlan_group_rcu(p);
 580	if (!vg || !vg->num_vlans)
 581		return false;
 582
 583	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
 584		*vid = 0;
 585
 586	if (!*vid) {
 587		*vid = br_get_pvid(vg);
 588		if (!*vid)
 589			return false;
 590
 591		return true;
 592	}
 593
 594	if (br_vlan_find(vg, *vid))
 595		return true;
 596
 597	return false;
 598}
 599
 600static int br_vlan_add_existing(struct net_bridge *br,
 601				struct net_bridge_vlan_group *vg,
 602				struct net_bridge_vlan *vlan,
 603				u16 flags, bool *changed,
 604				struct netlink_ext_ack *extack)
 605{
 606	int err;
 607
 608	err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
 609	if (err && err != -EOPNOTSUPP)
 610		return err;
 611
 612	if (!br_vlan_is_brentry(vlan)) {
 613		/* Trying to change flags of non-existent bridge vlan */
 614		if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
 615			err = -EINVAL;
 616			goto err_flags;
 617		}
 618		/* It was only kept for port vlans, now make it real */
 619		err = br_fdb_insert(br, NULL, br->dev->dev_addr,
 620				    vlan->vid);
 621		if (err) {
 622			br_err(br, "failed to insert local address into bridge forwarding table\n");
 623			goto err_fdb_insert;
 624		}
 625
 626		refcount_inc(&vlan->refcnt);
 627		vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
 628		vg->num_vlans++;
 629		*changed = true;
 630	}
 631
 632	if (__vlan_add_flags(vlan, flags))
 633		*changed = true;
 634
 635	return 0;
 636
 637err_fdb_insert:
 638err_flags:
 639	br_switchdev_port_vlan_del(br->dev, vlan->vid);
 640	return err;
 641}
 642
 643/* Must be protected by RTNL.
 644 * Must be called with vid in range from 1 to 4094 inclusive.
 645 * changed must be true only if the vlan was created or updated
 646 */
 647int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
 648		struct netlink_ext_ack *extack)
 649{
 650	struct net_bridge_vlan_group *vg;
 651	struct net_bridge_vlan *vlan;
 652	int ret;
 653
 654	ASSERT_RTNL();
 655
 656	*changed = false;
 657	vg = br_vlan_group(br);
 658	vlan = br_vlan_find(vg, vid);
 659	if (vlan)
 660		return br_vlan_add_existing(br, vg, vlan, flags, changed,
 661					    extack);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662
 663	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
 664	if (!vlan)
 665		return -ENOMEM;
 666
 667	vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
 668	if (!vlan->stats) {
 669		kfree(vlan);
 670		return -ENOMEM;
 671	}
 672	vlan->vid = vid;
 673	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
 674	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
 675	vlan->br = br;
 676	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
 677		refcount_set(&vlan->refcnt, 1);
 678	ret = __vlan_add(vlan, flags, extack);
 679	if (ret) {
 680		free_percpu(vlan->stats);
 681		kfree(vlan);
 682	} else {
 683		*changed = true;
 684	}
 685
 686	return ret;
 687}
 688
 689/* Must be protected by RTNL.
 690 * Must be called with vid in range from 1 to 4094 inclusive.
 691 */
 692int br_vlan_delete(struct net_bridge *br, u16 vid)
 693{
 694	struct net_bridge_vlan_group *vg;
 695	struct net_bridge_vlan *v;
 696
 697	ASSERT_RTNL();
 698
 699	vg = br_vlan_group(br);
 700	v = br_vlan_find(vg, vid);
 701	if (!v || !br_vlan_is_brentry(v))
 702		return -ENOENT;
 703
 704	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
 705	br_fdb_delete_by_port(br, NULL, vid, 0);
 706
 707	vlan_tunnel_info_del(vg, v);
 708
 709	return __vlan_del(v);
 710}
 711
 712void br_vlan_flush(struct net_bridge *br)
 713{
 714	struct net_bridge_vlan_group *vg;
 715
 716	ASSERT_RTNL();
 717
 718	vg = br_vlan_group(br);
 719	__vlan_flush(vg);
 720	RCU_INIT_POINTER(br->vlgrp, NULL);
 721	synchronize_rcu();
 722	__vlan_group_free(vg);
 723}
 724
 725struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
 726{
 727	if (!vg)
 728		return NULL;
 729
 730	return br_vlan_lookup(&vg->vlan_hash, vid);
 731}
 732
 733/* Must be protected by RTNL. */
 734static void recalculate_group_addr(struct net_bridge *br)
 735{
 736	if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
 737		return;
 738
 739	spin_lock_bh(&br->lock);
 740	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
 741	    br->vlan_proto == htons(ETH_P_8021Q)) {
 742		/* Bridge Group Address */
 743		br->group_addr[5] = 0x00;
 744	} else { /* vlan_enabled && ETH_P_8021AD */
 745		/* Provider Bridge Group Address */
 746		br->group_addr[5] = 0x08;
 747	}
 748	spin_unlock_bh(&br->lock);
 749}
 750
 751/* Must be protected by RTNL. */
 752void br_recalculate_fwd_mask(struct net_bridge *br)
 753{
 754	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
 755	    br->vlan_proto == htons(ETH_P_8021Q))
 756		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
 757	else /* vlan_enabled && ETH_P_8021AD */
 758		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
 759					      ~(1u << br->group_addr[5]);
 760}
 761
 762int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 763{
 764	struct switchdev_attr attr = {
 765		.orig_dev = br->dev,
 766		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
 767		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
 768		.u.vlan_filtering = val,
 769	};
 770	int err;
 771
 772	if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
 773		return 0;
 774
 775	err = switchdev_port_attr_set(br->dev, &attr);
 776	if (err && err != -EOPNOTSUPP)
 777		return err;
 778
 779	br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
 780	br_manage_promisc(br);
 781	recalculate_group_addr(br);
 782	br_recalculate_fwd_mask(br);
 783
 784	return 0;
 785}
 786
 787int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 788{
 789	return __br_vlan_filter_toggle(br, val);
 790}
 791
 792bool br_vlan_enabled(const struct net_device *dev)
 793{
 794	struct net_bridge *br = netdev_priv(dev);
 795
 796	return br_opt_get(br, BROPT_VLAN_ENABLED);
 797}
 798EXPORT_SYMBOL_GPL(br_vlan_enabled);
 799
 800int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
 801{
 802	struct net_bridge *br = netdev_priv(dev);
 803
 804	*p_proto = ntohs(br->vlan_proto);
 
 805
 806	return 0;
 807}
 808EXPORT_SYMBOL_GPL(br_vlan_get_proto);
 809
 810int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
 811{
 812	int err = 0;
 813	struct net_bridge_port *p;
 814	struct net_bridge_vlan *vlan;
 815	struct net_bridge_vlan_group *vg;
 816	__be16 oldproto;
 817
 818	if (br->vlan_proto == proto)
 819		return 0;
 820
 821	/* Add VLANs for the new proto to the device filter. */
 822	list_for_each_entry(p, &br->port_list, list) {
 823		vg = nbp_vlan_group(p);
 824		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
 825			err = vlan_vid_add(p->dev, proto, vlan->vid);
 826			if (err)
 827				goto err_filt;
 828		}
 829	}
 830
 831	oldproto = br->vlan_proto;
 832	br->vlan_proto = proto;
 833
 834	recalculate_group_addr(br);
 835	br_recalculate_fwd_mask(br);
 836
 837	/* Delete VLANs for the old proto from the device filter. */
 838	list_for_each_entry(p, &br->port_list, list) {
 839		vg = nbp_vlan_group(p);
 840		list_for_each_entry(vlan, &vg->vlan_list, vlist)
 841			vlan_vid_del(p->dev, oldproto, vlan->vid);
 842	}
 843
 844	return 0;
 845
 846err_filt:
 847	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
 848		vlan_vid_del(p->dev, proto, vlan->vid);
 849
 850	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
 851		vg = nbp_vlan_group(p);
 852		list_for_each_entry(vlan, &vg->vlan_list, vlist)
 853			vlan_vid_del(p->dev, proto, vlan->vid);
 854	}
 855
 856	return err;
 857}
 858
 859int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
 860{
 
 
 861	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
 862		return -EPROTONOSUPPORT;
 863
 864	return __br_vlan_set_proto(br, htons(val));
 865}
 866
 867int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
 868{
 869	switch (val) {
 870	case 0:
 871	case 1:
 872		br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
 873		break;
 874	default:
 875		return -EINVAL;
 876	}
 877
 878	return 0;
 879}
 880
 881int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
 882{
 883	struct net_bridge_port *p;
 884
 885	/* allow to change the option if there are no port vlans configured */
 886	list_for_each_entry(p, &br->port_list, list) {
 887		struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
 888
 889		if (vg->num_vlans)
 890			return -EBUSY;
 891	}
 892
 893	switch (val) {
 894	case 0:
 895	case 1:
 896		br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
 897		break;
 898	default:
 899		return -EINVAL;
 900	}
 901
 902	return 0;
 903}
 904
 905static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
 906{
 907	struct net_bridge_vlan *v;
 908
 909	if (vid != vg->pvid)
 910		return false;
 911
 912	v = br_vlan_lookup(&vg->vlan_hash, vid);
 913	if (v && br_vlan_should_use(v) &&
 914	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
 915		return true;
 916
 917	return false;
 918}
 919
 920static void br_vlan_disable_default_pvid(struct net_bridge *br)
 921{
 922	struct net_bridge_port *p;
 923	u16 pvid = br->default_pvid;
 924
 925	/* Disable default_pvid on all ports where it is still
 926	 * configured.
 927	 */
 928	if (vlan_default_pvid(br_vlan_group(br), pvid))
 929		br_vlan_delete(br, pvid);
 930
 931	list_for_each_entry(p, &br->port_list, list) {
 932		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
 933			nbp_vlan_delete(p, pvid);
 934	}
 935
 936	br->default_pvid = 0;
 937}
 938
 939int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
 940			       struct netlink_ext_ack *extack)
 941{
 942	const struct net_bridge_vlan *pvent;
 943	struct net_bridge_vlan_group *vg;
 944	struct net_bridge_port *p;
 945	unsigned long *changed;
 946	bool vlchange;
 947	u16 old_pvid;
 948	int err = 0;
 
 949
 950	if (!pvid) {
 951		br_vlan_disable_default_pvid(br);
 952		return 0;
 953	}
 954
 955	changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
 
 956	if (!changed)
 957		return -ENOMEM;
 958
 959	old_pvid = br->default_pvid;
 960
 961	/* Update default_pvid config only if we do not conflict with
 962	 * user configuration.
 963	 */
 964	vg = br_vlan_group(br);
 965	pvent = br_vlan_find(vg, pvid);
 966	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
 967	    (!pvent || !br_vlan_should_use(pvent))) {
 968		err = br_vlan_add(br, pvid,
 969				  BRIDGE_VLAN_INFO_PVID |
 970				  BRIDGE_VLAN_INFO_UNTAGGED |
 971				  BRIDGE_VLAN_INFO_BRENTRY,
 972				  &vlchange, extack);
 973		if (err)
 974			goto out;
 975		br_vlan_delete(br, old_pvid);
 976		set_bit(0, changed);
 977	}
 978
 979	list_for_each_entry(p, &br->port_list, list) {
 980		/* Update default_pvid config only if we do not conflict with
 981		 * user configuration.
 982		 */
 983		vg = nbp_vlan_group(p);
 984		if ((old_pvid &&
 985		     !vlan_default_pvid(vg, old_pvid)) ||
 986		    br_vlan_find(vg, pvid))
 987			continue;
 988
 989		err = nbp_vlan_add(p, pvid,
 990				   BRIDGE_VLAN_INFO_PVID |
 991				   BRIDGE_VLAN_INFO_UNTAGGED,
 992				   &vlchange, extack);
 993		if (err)
 994			goto err_port;
 995		nbp_vlan_delete(p, old_pvid);
 996		set_bit(p->port_no, changed);
 997	}
 998
 999	br->default_pvid = pvid;
1000
1001out:
1002	bitmap_free(changed);
1003	return err;
1004
1005err_port:
1006	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1007		if (!test_bit(p->port_no, changed))
1008			continue;
1009
1010		if (old_pvid)
1011			nbp_vlan_add(p, old_pvid,
1012				     BRIDGE_VLAN_INFO_PVID |
1013				     BRIDGE_VLAN_INFO_UNTAGGED,
1014				     &vlchange, NULL);
1015		nbp_vlan_delete(p, pvid);
1016	}
1017
1018	if (test_bit(0, changed)) {
1019		if (old_pvid)
1020			br_vlan_add(br, old_pvid,
1021				    BRIDGE_VLAN_INFO_PVID |
1022				    BRIDGE_VLAN_INFO_UNTAGGED |
1023				    BRIDGE_VLAN_INFO_BRENTRY,
1024				    &vlchange, NULL);
1025		br_vlan_delete(br, pvid);
1026	}
1027	goto out;
1028}
1029
1030int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
1031{
1032	u16 pvid = val;
1033	int err = 0;
1034
1035	if (val >= VLAN_VID_MASK)
1036		return -EINVAL;
1037
 
 
 
1038	if (pvid == br->default_pvid)
1039		goto out;
1040
1041	/* Only allow default pvid change when filtering is disabled */
1042	if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1043		pr_info_once("Please disable vlan filtering to change default_pvid\n");
1044		err = -EPERM;
1045		goto out;
1046	}
1047	err = __br_vlan_set_default_pvid(br, pvid, NULL);
1048out:
 
1049	return err;
1050}
1051
1052int br_vlan_init(struct net_bridge *br)
1053{
1054	struct net_bridge_vlan_group *vg;
1055	int ret = -ENOMEM;
1056
1057	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1058	if (!vg)
1059		goto out;
1060	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1061	if (ret)
1062		goto err_rhtbl;
1063	ret = vlan_tunnel_init(vg);
1064	if (ret)
1065		goto err_tunnel_init;
1066	INIT_LIST_HEAD(&vg->vlan_list);
1067	br->vlan_proto = htons(ETH_P_8021Q);
1068	br->default_pvid = 1;
1069	rcu_assign_pointer(br->vlgrp, vg);
 
 
 
 
 
1070
1071out:
1072	return ret;
1073
1074err_tunnel_init:
1075	rhashtable_destroy(&vg->vlan_hash);
1076err_rhtbl:
1077	kfree(vg);
1078
1079	goto out;
1080}
1081
1082int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1083{
1084	struct switchdev_attr attr = {
1085		.orig_dev = p->br->dev,
1086		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1087		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1088		.u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1089	};
1090	struct net_bridge_vlan_group *vg;
1091	int ret = -ENOMEM;
1092
1093	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1094	if (!vg)
1095		goto out;
1096
1097	ret = switchdev_port_attr_set(p->dev, &attr);
1098	if (ret && ret != -EOPNOTSUPP)
1099		goto err_vlan_enabled;
1100
1101	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1102	if (ret)
1103		goto err_rhtbl;
1104	ret = vlan_tunnel_init(vg);
1105	if (ret)
1106		goto err_tunnel_init;
1107	INIT_LIST_HEAD(&vg->vlan_list);
1108	rcu_assign_pointer(p->vlgrp, vg);
1109	if (p->br->default_pvid) {
1110		bool changed;
1111
1112		ret = nbp_vlan_add(p, p->br->default_pvid,
1113				   BRIDGE_VLAN_INFO_PVID |
1114				   BRIDGE_VLAN_INFO_UNTAGGED,
1115				   &changed, extack);
1116		if (ret)
1117			goto err_vlan_add;
1118	}
1119out:
1120	return ret;
1121
1122err_vlan_add:
1123	RCU_INIT_POINTER(p->vlgrp, NULL);
1124	synchronize_rcu();
1125	vlan_tunnel_deinit(vg);
1126err_tunnel_init:
1127	rhashtable_destroy(&vg->vlan_hash);
1128err_rhtbl:
1129err_vlan_enabled:
 
1130	kfree(vg);
1131
1132	goto out;
1133}
1134
1135/* Must be protected by RTNL.
1136 * Must be called with vid in range from 1 to 4094 inclusive.
1137 * changed must be true only if the vlan was created or updated
1138 */
1139int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1140		 bool *changed, struct netlink_ext_ack *extack)
1141{
 
 
 
 
 
 
 
1142	struct net_bridge_vlan *vlan;
1143	int ret;
1144
1145	ASSERT_RTNL();
1146
1147	*changed = false;
1148	vlan = br_vlan_find(nbp_vlan_group(port), vid);
1149	if (vlan) {
1150		/* Pass the flags to the hardware bridge */
1151		ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1152		if (ret && ret != -EOPNOTSUPP)
1153			return ret;
1154		*changed = __vlan_add_flags(vlan, flags);
1155
1156		return 0;
1157	}
1158
1159	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1160	if (!vlan)
1161		return -ENOMEM;
1162
1163	vlan->vid = vid;
1164	vlan->port = port;
1165	ret = __vlan_add(vlan, flags, extack);
1166	if (ret)
1167		kfree(vlan);
1168	else
1169		*changed = true;
1170
1171	return ret;
1172}
1173
1174/* Must be protected by RTNL.
1175 * Must be called with vid in range from 1 to 4094 inclusive.
1176 */
1177int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1178{
1179	struct net_bridge_vlan *v;
1180
1181	ASSERT_RTNL();
1182
1183	v = br_vlan_find(nbp_vlan_group(port), vid);
1184	if (!v)
1185		return -ENOENT;
1186	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1187	br_fdb_delete_by_port(port->br, port, vid, 0);
1188
1189	return __vlan_del(v);
1190}
1191
1192void nbp_vlan_flush(struct net_bridge_port *port)
1193{
1194	struct net_bridge_vlan_group *vg;
1195
1196	ASSERT_RTNL();
1197
1198	vg = nbp_vlan_group(port);
1199	__vlan_flush(vg);
1200	RCU_INIT_POINTER(port->vlgrp, NULL);
1201	synchronize_rcu();
1202	__vlan_group_free(vg);
1203}
1204
1205void br_vlan_get_stats(const struct net_bridge_vlan *v,
1206		       struct br_vlan_stats *stats)
1207{
1208	int i;
1209
1210	memset(stats, 0, sizeof(*stats));
1211	for_each_possible_cpu(i) {
1212		u64 rxpackets, rxbytes, txpackets, txbytes;
1213		struct br_vlan_stats *cpu_stats;
1214		unsigned int start;
1215
1216		cpu_stats = per_cpu_ptr(v->stats, i);
1217		do {
1218			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1219			rxpackets = cpu_stats->rx_packets;
1220			rxbytes = cpu_stats->rx_bytes;
1221			txbytes = cpu_stats->tx_bytes;
1222			txpackets = cpu_stats->tx_packets;
1223		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1224
1225		stats->rx_packets += rxpackets;
1226		stats->rx_bytes += rxbytes;
1227		stats->tx_bytes += txbytes;
1228		stats->tx_packets += txpackets;
1229	}
1230}
1231
1232static int __br_vlan_get_pvid(const struct net_device *dev,
1233			      struct net_bridge_port *p, u16 *p_pvid)
1234{
1235	struct net_bridge_vlan_group *vg;
1236
1237	if (p)
1238		vg = nbp_vlan_group(p);
1239	else if (netif_is_bridge_master(dev))
1240		vg = br_vlan_group(netdev_priv(dev));
1241	else
1242		return -EINVAL;
1243
1244	*p_pvid = br_get_pvid(vg);
1245	return 0;
1246}
1247
1248int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1249{
1250	ASSERT_RTNL();
1251
1252	return __br_vlan_get_pvid(dev, br_port_get_check_rtnl(dev), p_pvid);
1253}
1254EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1255
1256int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1257{
1258	return __br_vlan_get_pvid(dev, br_port_get_check_rcu(dev), p_pvid);
1259}
1260EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1261
1262int br_vlan_get_info(const struct net_device *dev, u16 vid,
1263		     struct bridge_vlan_info *p_vinfo)
1264{
1265	struct net_bridge_vlan_group *vg;
1266	struct net_bridge_vlan *v;
1267	struct net_bridge_port *p;
1268
1269	ASSERT_RTNL();
1270	p = br_port_get_check_rtnl(dev);
1271	if (p)
1272		vg = nbp_vlan_group(p);
1273	else if (netif_is_bridge_master(dev))
1274		vg = br_vlan_group(netdev_priv(dev));
1275	else
1276		return -EINVAL;
1277
1278	v = br_vlan_find(vg, vid);
1279	if (!v)
1280		return -ENOENT;
1281
1282	p_vinfo->vid = vid;
1283	p_vinfo->flags = v->flags;
1284	if (vid == br_get_pvid(vg))
1285		p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1286	return 0;
1287}
1288EXPORT_SYMBOL_GPL(br_vlan_get_info);
1289
1290static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1291{
1292	return is_vlan_dev(dev) &&
1293		!!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1294}
1295
1296static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1297				       __always_unused void *data)
1298{
1299	return br_vlan_is_bind_vlan_dev(dev);
1300}
1301
1302static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1303{
1304	int found;
1305
1306	rcu_read_lock();
1307	found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1308					      NULL);
1309	rcu_read_unlock();
1310
1311	return !!found;
1312}
1313
1314struct br_vlan_bind_walk_data {
1315	u16 vid;
1316	struct net_device *result;
1317};
1318
1319static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1320					  void *data_in)
1321{
1322	struct br_vlan_bind_walk_data *data = data_in;
1323	int found = 0;
1324
1325	if (br_vlan_is_bind_vlan_dev(dev) &&
1326	    vlan_dev_priv(dev)->vlan_id == data->vid) {
1327		data->result = dev;
1328		found = 1;
1329	}
1330
1331	return found;
1332}
1333
1334static struct net_device *
1335br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1336{
1337	struct br_vlan_bind_walk_data data = {
1338		.vid = vid,
1339	};
1340
1341	rcu_read_lock();
1342	netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1343				      &data);
1344	rcu_read_unlock();
1345
1346	return data.result;
1347}
1348
1349static bool br_vlan_is_dev_up(const struct net_device *dev)
1350{
1351	return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1352}
1353
1354static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1355				       struct net_device *vlan_dev)
1356{
1357	u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1358	struct net_bridge_vlan_group *vg;
1359	struct net_bridge_port *p;
1360	bool has_carrier = false;
1361
1362	if (!netif_carrier_ok(br->dev)) {
1363		netif_carrier_off(vlan_dev);
1364		return;
1365	}
1366
1367	list_for_each_entry(p, &br->port_list, list) {
1368		vg = nbp_vlan_group(p);
1369		if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1370			has_carrier = true;
1371			break;
1372		}
1373	}
1374
1375	if (has_carrier)
1376		netif_carrier_on(vlan_dev);
1377	else
1378		netif_carrier_off(vlan_dev);
1379}
1380
1381static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1382{
1383	struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1384	struct net_bridge_vlan *vlan;
1385	struct net_device *vlan_dev;
1386
1387	list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1388		vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1389							   vlan->vid);
1390		if (vlan_dev) {
1391			if (br_vlan_is_dev_up(p->dev)) {
1392				if (netif_carrier_ok(p->br->dev))
1393					netif_carrier_on(vlan_dev);
1394			} else {
1395				br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1396			}
1397		}
1398	}
1399}
1400
1401static void br_vlan_upper_change(struct net_device *dev,
1402				 struct net_device *upper_dev,
1403				 bool linking)
1404{
1405	struct net_bridge *br = netdev_priv(dev);
1406
1407	if (!br_vlan_is_bind_vlan_dev(upper_dev))
1408		return;
1409
1410	if (linking) {
1411		br_vlan_set_vlan_dev_state(br, upper_dev);
1412		br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1413	} else {
1414		br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1415			      br_vlan_has_upper_bind_vlan_dev(dev));
1416	}
1417}
1418
1419struct br_vlan_link_state_walk_data {
1420	struct net_bridge *br;
1421};
1422
1423static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1424					void *data_in)
1425{
1426	struct br_vlan_link_state_walk_data *data = data_in;
1427
1428	if (br_vlan_is_bind_vlan_dev(vlan_dev))
1429		br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1430
1431	return 0;
1432}
1433
1434static void br_vlan_link_state_change(struct net_device *dev,
1435				      struct net_bridge *br)
1436{
1437	struct br_vlan_link_state_walk_data data = {
1438		.br = br
1439	};
1440
1441	rcu_read_lock();
1442	netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1443				      &data);
1444	rcu_read_unlock();
1445}
1446
1447/* Must be protected by RTNL. */
1448static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1449{
1450	struct net_device *vlan_dev;
1451
1452	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1453		return;
1454
1455	vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1456	if (vlan_dev)
1457		br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1458}
1459
1460/* Must be protected by RTNL. */
1461int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1462{
1463	struct netdev_notifier_changeupper_info *info;
1464	struct net_bridge *br = netdev_priv(dev);
1465	bool changed;
1466	int ret = 0;
1467
1468	switch (event) {
1469	case NETDEV_REGISTER:
1470		ret = br_vlan_add(br, br->default_pvid,
1471				  BRIDGE_VLAN_INFO_PVID |
1472				  BRIDGE_VLAN_INFO_UNTAGGED |
1473				  BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1474		break;
1475	case NETDEV_UNREGISTER:
1476		br_vlan_delete(br, br->default_pvid);
1477		break;
1478	case NETDEV_CHANGEUPPER:
1479		info = ptr;
1480		br_vlan_upper_change(dev, info->upper_dev, info->linking);
1481		break;
1482
1483	case NETDEV_CHANGE:
1484	case NETDEV_UP:
1485		if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1486			break;
1487		br_vlan_link_state_change(dev, br);
1488		break;
1489	}
1490
1491	return ret;
1492}
1493
1494/* Must be protected by RTNL. */
1495void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1496{
1497	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1498		return;
1499
1500	switch (event) {
1501	case NETDEV_CHANGE:
1502	case NETDEV_DOWN:
1503	case NETDEV_UP:
1504		br_vlan_set_all_vlan_dev_state(p);
1505		break;
1506	}
1507}
v4.6
 
   1#include <linux/kernel.h>
   2#include <linux/netdevice.h>
   3#include <linux/rtnetlink.h>
   4#include <linux/slab.h>
   5#include <net/switchdev.h>
   6
   7#include "br_private.h"
 
 
 
   8
   9static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  10			      const void *ptr)
  11{
  12	const struct net_bridge_vlan *vle = ptr;
  13	u16 vid = *(u16 *)arg->key;
  14
  15	return vle->vid != vid;
  16}
  17
  18static const struct rhashtable_params br_vlan_rht_params = {
  19	.head_offset = offsetof(struct net_bridge_vlan, vnode),
  20	.key_offset = offsetof(struct net_bridge_vlan, vid),
  21	.key_len = sizeof(u16),
  22	.nelem_hint = 3,
  23	.locks_mul = 1,
  24	.max_size = VLAN_N_VID,
  25	.obj_cmpfn = br_vlan_cmp,
  26	.automatic_shrinking = true,
  27};
  28
  29static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  30{
  31	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  32}
  33
  34static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  35{
  36	if (vg->pvid == vid)
  37		return;
  38
  39	smp_wmb();
  40	vg->pvid = vid;
 
 
  41}
  42
  43static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  44{
  45	if (vg->pvid != vid)
  46		return;
  47
  48	smp_wmb();
  49	vg->pvid = 0;
 
 
  50}
  51
  52static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
 
  53{
  54	struct net_bridge_vlan_group *vg;
 
 
  55
  56	if (br_vlan_is_master(v))
  57		vg = br_vlan_group(v->br);
  58	else
  59		vg = nbp_vlan_group(v->port);
  60
  61	if (flags & BRIDGE_VLAN_INFO_PVID)
  62		__vlan_add_pvid(vg, v->vid);
  63	else
  64		__vlan_delete_pvid(vg, v->vid);
  65
  66	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  67		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  68	else
  69		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
 
 
  70}
  71
  72static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  73			  u16 vid, u16 flags)
 
  74{
  75	struct switchdev_obj_port_vlan v = {
  76		.obj.orig_dev = dev,
  77		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  78		.flags = flags,
  79		.vid_begin = vid,
  80		.vid_end = vid,
  81	};
  82	int err;
  83
  84	/* Try switchdev op first. In case it is not supported, fallback to
  85	 * 8021q add.
  86	 */
  87	err = switchdev_port_obj_add(dev, &v.obj);
  88	if (err == -EOPNOTSUPP)
  89		return vlan_vid_add(dev, br->vlan_proto, vid);
 
  90	return err;
  91}
  92
  93static void __vlan_add_list(struct net_bridge_vlan *v)
  94{
  95	struct net_bridge_vlan_group *vg;
  96	struct list_head *headp, *hpos;
  97	struct net_bridge_vlan *vent;
  98
  99	if (br_vlan_is_master(v))
 100		vg = br_vlan_group(v->br);
 101	else
 102		vg = nbp_vlan_group(v->port);
 103
 104	headp = &vg->vlan_list;
 105	list_for_each_prev(hpos, headp) {
 106		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
 107		if (v->vid < vent->vid)
 108			continue;
 109		else
 110			break;
 111	}
 112	list_add_rcu(&v->vlist, hpos);
 113}
 114
 115static void __vlan_del_list(struct net_bridge_vlan *v)
 116{
 117	list_del_rcu(&v->vlist);
 118}
 119
 120static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
 121			  u16 vid)
 122{
 123	struct switchdev_obj_port_vlan v = {
 124		.obj.orig_dev = dev,
 125		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
 126		.vid_begin = vid,
 127		.vid_end = vid,
 128	};
 129	int err;
 130
 131	/* Try switchdev op first. In case it is not supported, fallback to
 132	 * 8021q del.
 133	 */
 134	err = switchdev_port_obj_del(dev, &v.obj);
 135	if (err == -EOPNOTSUPP) {
 136		vlan_vid_del(dev, br->vlan_proto, vid);
 137		return 0;
 138	}
 139	return err;
 140}
 141
 142/* Returns a master vlan, if it didn't exist it gets created. In all cases a
 143 * a reference is taken to the master vlan before returning.
 144 */
 145static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
 
 
 146{
 147	struct net_bridge_vlan_group *vg;
 148	struct net_bridge_vlan *masterv;
 149
 150	vg = br_vlan_group(br);
 151	masterv = br_vlan_find(vg, vid);
 152	if (!masterv) {
 
 
 153		/* missing global ctx, create it now */
 154		if (br_vlan_add(br, vid, 0))
 155			return NULL;
 156		masterv = br_vlan_find(vg, vid);
 157		if (WARN_ON(!masterv))
 158			return NULL;
 
 
 159	}
 160	atomic_inc(&masterv->refcnt);
 161
 162	return masterv;
 163}
 164
 
 
 
 
 
 
 
 
 
 
 
 165static void br_vlan_put_master(struct net_bridge_vlan *masterv)
 166{
 167	struct net_bridge_vlan_group *vg;
 168
 169	if (!br_vlan_is_master(masterv))
 170		return;
 171
 172	vg = br_vlan_group(masterv->br);
 173	if (atomic_dec_and_test(&masterv->refcnt)) {
 174		rhashtable_remove_fast(&vg->vlan_hash,
 175				       &masterv->vnode, br_vlan_rht_params);
 176		__vlan_del_list(masterv);
 177		kfree_rcu(masterv, rcu);
 178	}
 179}
 180
 
 
 
 
 
 
 
 
 
 
 
 
 
 181/* This is the shared VLAN add function which works for both ports and bridge
 182 * devices. There are four possible calls to this function in terms of the
 183 * vlan entry type:
 184 * 1. vlan is being added on a port (no master flags, global entry exists)
 185 * 2. vlan is being added on a bridge (both master and brentry flags)
 186 * 3. vlan is being added on a port, but a global entry didn't exist which
 187 *    is being created right now (master flag set, brentry flag unset), the
 188 *    global entry is used for global per-vlan features, but not for filtering
 189 * 4. same as 3 but with both master and brentry flags set so the entry
 190 *    will be used for filtering in both the port and the bridge
 191 */
 192static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
 
 193{
 194	struct net_bridge_vlan *masterv = NULL;
 195	struct net_bridge_port *p = NULL;
 196	struct net_bridge_vlan_group *vg;
 197	struct net_device *dev;
 198	struct net_bridge *br;
 199	int err;
 200
 201	if (br_vlan_is_master(v)) {
 202		br = v->br;
 203		dev = br->dev;
 204		vg = br_vlan_group(br);
 205	} else {
 206		p = v->port;
 207		br = p->br;
 208		dev = p->dev;
 209		vg = nbp_vlan_group(p);
 210	}
 211
 212	if (p) {
 213		/* Add VLAN to the device filter if it is supported.
 214		 * This ensures tagged traffic enters the bridge when
 215		 * promiscuous mode is disabled by br_manage_promisc().
 216		 */
 217		err = __vlan_vid_add(dev, br, v->vid, flags);
 218		if (err)
 219			goto out;
 220
 221		/* need to work on the master vlan too */
 222		if (flags & BRIDGE_VLAN_INFO_MASTER) {
 223			err = br_vlan_add(br, v->vid, flags |
 224						      BRIDGE_VLAN_INFO_BRENTRY);
 
 
 
 225			if (err)
 226				goto out_filt;
 227		}
 228
 229		masterv = br_vlan_get_master(br, v->vid);
 230		if (!masterv)
 231			goto out_filt;
 232		v->brvlan = masterv;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 233	}
 234
 235	/* Add the dev mac and count the vlan only if it's usable */
 236	if (br_vlan_should_use(v)) {
 237		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
 238		if (err) {
 239			br_err(br, "failed insert local address into bridge forwarding table\n");
 240			goto out_filt;
 241		}
 242		vg->num_vlans++;
 243	}
 244
 245	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
 246					    br_vlan_rht_params);
 247	if (err)
 248		goto out_fdb_insert;
 249
 250	__vlan_add_list(v);
 251	__vlan_add_flags(v, flags);
 
 
 
 252out:
 253	return err;
 254
 255out_fdb_insert:
 256	if (br_vlan_should_use(v)) {
 257		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
 258		vg->num_vlans--;
 259	}
 260
 261out_filt:
 262	if (p) {
 263		__vlan_vid_del(dev, br, v->vid);
 264		if (masterv) {
 
 
 
 
 265			br_vlan_put_master(masterv);
 266			v->brvlan = NULL;
 267		}
 
 
 268	}
 269
 270	goto out;
 271}
 272
 273static int __vlan_del(struct net_bridge_vlan *v)
 274{
 275	struct net_bridge_vlan *masterv = v;
 276	struct net_bridge_vlan_group *vg;
 277	struct net_bridge_port *p = NULL;
 278	int err = 0;
 279
 280	if (br_vlan_is_master(v)) {
 281		vg = br_vlan_group(v->br);
 282	} else {
 283		p = v->port;
 284		vg = nbp_vlan_group(v->port);
 285		masterv = v->brvlan;
 286	}
 287
 288	__vlan_delete_pvid(vg, v->vid);
 289	if (p) {
 290		err = __vlan_vid_del(p->dev, p->br, v->vid);
 291		if (err)
 292			goto out;
 
 
 
 
 
 293	}
 294
 295	if (br_vlan_should_use(v)) {
 296		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
 297		vg->num_vlans--;
 298	}
 299
 300	if (masterv != v) {
 
 301		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
 302				       br_vlan_rht_params);
 303		__vlan_del_list(v);
 304		kfree_rcu(v, rcu);
 
 305	}
 306
 307	br_vlan_put_master(masterv);
 308out:
 309	return err;
 310}
 311
 312static void __vlan_group_free(struct net_bridge_vlan_group *vg)
 313{
 314	WARN_ON(!list_empty(&vg->vlan_list));
 315	rhashtable_destroy(&vg->vlan_hash);
 
 316	kfree(vg);
 317}
 318
 319static void __vlan_flush(struct net_bridge_vlan_group *vg)
 320{
 321	struct net_bridge_vlan *vlan, *tmp;
 322
 323	__vlan_delete_pvid(vg, vg->pvid);
 324	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
 325		__vlan_del(vlan);
 326}
 327
 328struct sk_buff *br_handle_vlan(struct net_bridge *br,
 
 329			       struct net_bridge_vlan_group *vg,
 330			       struct sk_buff *skb)
 331{
 
 332	struct net_bridge_vlan *v;
 333	u16 vid;
 334
 335	/* If this packet was not filtered at input, let it pass */
 336	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 337		goto out;
 338
 339	/* At this point, we know that the frame was filtered and contains
 340	 * a valid vlan id.  If the vlan id has untagged flag set,
 341	 * send untagged; otherwise, send tagged.
 342	 */
 343	br_vlan_get_tag(skb, &vid);
 344	v = br_vlan_find(vg, vid);
 345	/* Vlan entry must be configured at this point.  The
 346	 * only exception is the bridge is set in promisc mode and the
 347	 * packet is destined for the bridge device.  In this case
 348	 * pass the packet as is.
 349	 */
 350	if (!v || !br_vlan_should_use(v)) {
 351		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
 352			goto out;
 353		} else {
 354			kfree_skb(skb);
 355			return NULL;
 356		}
 357	}
 
 
 
 
 
 
 
 
 358	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 359		skb->vlan_tci = 0;
 360
 
 
 
 
 
 361out:
 362	return skb;
 363}
 364
 365/* Called under RCU */
 366static bool __allowed_ingress(struct net_bridge_vlan_group *vg, __be16 proto,
 
 367			      struct sk_buff *skb, u16 *vid)
 368{
 369	const struct net_bridge_vlan *v;
 
 370	bool tagged;
 371
 372	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
 373	/* If vlan tx offload is disabled on bridge device and frame was
 374	 * sent from vlan device on the bridge device, it does not have
 375	 * HW accelerated vlan tag.
 376	 */
 377	if (unlikely(!skb_vlan_tag_present(skb) &&
 378		     skb->protocol == proto)) {
 379		skb = skb_vlan_untag(skb);
 380		if (unlikely(!skb))
 381			return false;
 382	}
 383
 384	if (!br_vlan_get_tag(skb, vid)) {
 385		/* Tagged frame */
 386		if (skb->vlan_proto != proto) {
 387			/* Protocol-mismatch, empty out vlan_tci for new tag */
 388			skb_push(skb, ETH_HLEN);
 389			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
 390							skb_vlan_tag_get(skb));
 391			if (unlikely(!skb))
 392				return false;
 393
 394			skb_pull(skb, ETH_HLEN);
 395			skb_reset_mac_len(skb);
 396			*vid = 0;
 397			tagged = false;
 398		} else {
 399			tagged = true;
 400		}
 401	} else {
 402		/* Untagged frame */
 403		tagged = false;
 404	}
 405
 406	if (!*vid) {
 407		u16 pvid = br_get_pvid(vg);
 408
 409		/* Frame had a tag with VID 0 or did not have a tag.
 410		 * See if pvid is set on this port.  That tells us which
 411		 * vlan untagged or priority-tagged traffic belongs to.
 412		 */
 413		if (!pvid)
 414			goto drop;
 415
 416		/* PVID is set on this port.  Any untagged or priority-tagged
 417		 * ingress frame is considered to belong to this vlan.
 418		 */
 419		*vid = pvid;
 420		if (likely(!tagged))
 421			/* Untagged Frame. */
 422			__vlan_hwaccel_put_tag(skb, proto, pvid);
 423		else
 424			/* Priority-tagged Frame.
 425			 * At this point, We know that skb->vlan_tci had
 426			 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
 427			 * We update only VID field and preserve PCP field.
 428			 */
 429			skb->vlan_tci |= pvid;
 430
 431		return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 432	}
 433
 434	/* Frame had a valid vlan tag.  See if vlan is allowed */
 435	v = br_vlan_find(vg, *vid);
 436	if (v && br_vlan_should_use(v))
 437		return true;
 438drop:
 439	kfree_skb(skb);
 440	return false;
 441}
 442
 443bool br_allowed_ingress(const struct net_bridge *br,
 444			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
 445			u16 *vid)
 446{
 447	/* If VLAN filtering is disabled on the bridge, all packets are
 448	 * permitted.
 449	 */
 450	if (!br->vlan_enabled) {
 451		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
 452		return true;
 453	}
 454
 455	return __allowed_ingress(vg, br->vlan_proto, skb, vid);
 456}
 457
 458/* Called under RCU. */
 459bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 460		       const struct sk_buff *skb)
 461{
 462	const struct net_bridge_vlan *v;
 463	u16 vid;
 464
 465	/* If this packet was not filtered at input, let it pass */
 466	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 467		return true;
 468
 469	br_vlan_get_tag(skb, &vid);
 470	v = br_vlan_find(vg, vid);
 471	if (v && br_vlan_should_use(v))
 472		return true;
 473
 474	return false;
 475}
 476
 477/* Called under RCU */
 478bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
 479{
 480	struct net_bridge_vlan_group *vg;
 481	struct net_bridge *br = p->br;
 482
 483	/* If filtering was disabled at input, let it pass. */
 484	if (!br->vlan_enabled)
 485		return true;
 486
 487	vg = nbp_vlan_group_rcu(p);
 488	if (!vg || !vg->num_vlans)
 489		return false;
 490
 491	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
 492		*vid = 0;
 493
 494	if (!*vid) {
 495		*vid = br_get_pvid(vg);
 496		if (!*vid)
 497			return false;
 498
 499		return true;
 500	}
 501
 502	if (br_vlan_find(vg, *vid))
 503		return true;
 504
 505	return false;
 506}
 507
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 508/* Must be protected by RTNL.
 509 * Must be called with vid in range from 1 to 4094 inclusive.
 
 510 */
 511int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
 
 512{
 513	struct net_bridge_vlan_group *vg;
 514	struct net_bridge_vlan *vlan;
 515	int ret;
 516
 517	ASSERT_RTNL();
 518
 
 519	vg = br_vlan_group(br);
 520	vlan = br_vlan_find(vg, vid);
 521	if (vlan) {
 522		if (!br_vlan_is_brentry(vlan)) {
 523			/* Trying to change flags of non-existent bridge vlan */
 524			if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
 525				return -EINVAL;
 526			/* It was only kept for port vlans, now make it real */
 527			ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
 528					    vlan->vid);
 529			if (ret) {
 530				br_err(br, "failed insert local address into bridge forwarding table\n");
 531				return ret;
 532			}
 533			atomic_inc(&vlan->refcnt);
 534			vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
 535			vg->num_vlans++;
 536		}
 537		__vlan_add_flags(vlan, flags);
 538		return 0;
 539	}
 540
 541	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
 542	if (!vlan)
 543		return -ENOMEM;
 544
 
 
 
 
 
 545	vlan->vid = vid;
 546	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
 547	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
 548	vlan->br = br;
 549	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
 550		atomic_set(&vlan->refcnt, 1);
 551	ret = __vlan_add(vlan, flags);
 552	if (ret)
 
 553		kfree(vlan);
 
 
 
 554
 555	return ret;
 556}
 557
 558/* Must be protected by RTNL.
 559 * Must be called with vid in range from 1 to 4094 inclusive.
 560 */
 561int br_vlan_delete(struct net_bridge *br, u16 vid)
 562{
 563	struct net_bridge_vlan_group *vg;
 564	struct net_bridge_vlan *v;
 565
 566	ASSERT_RTNL();
 567
 568	vg = br_vlan_group(br);
 569	v = br_vlan_find(vg, vid);
 570	if (!v || !br_vlan_is_brentry(v))
 571		return -ENOENT;
 572
 573	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
 574	br_fdb_delete_by_port(br, NULL, vid, 0);
 575
 
 
 576	return __vlan_del(v);
 577}
 578
 579void br_vlan_flush(struct net_bridge *br)
 580{
 581	struct net_bridge_vlan_group *vg;
 582
 583	ASSERT_RTNL();
 584
 585	vg = br_vlan_group(br);
 586	__vlan_flush(vg);
 587	RCU_INIT_POINTER(br->vlgrp, NULL);
 588	synchronize_rcu();
 589	__vlan_group_free(vg);
 590}
 591
 592struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
 593{
 594	if (!vg)
 595		return NULL;
 596
 597	return br_vlan_lookup(&vg->vlan_hash, vid);
 598}
 599
 600/* Must be protected by RTNL. */
 601static void recalculate_group_addr(struct net_bridge *br)
 602{
 603	if (br->group_addr_set)
 604		return;
 605
 606	spin_lock_bh(&br->lock);
 607	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
 
 608		/* Bridge Group Address */
 609		br->group_addr[5] = 0x00;
 610	} else { /* vlan_enabled && ETH_P_8021AD */
 611		/* Provider Bridge Group Address */
 612		br->group_addr[5] = 0x08;
 613	}
 614	spin_unlock_bh(&br->lock);
 615}
 616
 617/* Must be protected by RTNL. */
 618void br_recalculate_fwd_mask(struct net_bridge *br)
 619{
 620	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
 
 621		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
 622	else /* vlan_enabled && ETH_P_8021AD */
 623		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
 624					      ~(1u << br->group_addr[5]);
 625}
 626
 627int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 628{
 629	struct switchdev_attr attr = {
 630		.orig_dev = br->dev,
 631		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
 632		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
 633		.u.vlan_filtering = val,
 634	};
 635	int err;
 636
 637	if (br->vlan_enabled == val)
 638		return 0;
 639
 640	err = switchdev_port_attr_set(br->dev, &attr);
 641	if (err && err != -EOPNOTSUPP)
 642		return err;
 643
 644	br->vlan_enabled = val;
 645	br_manage_promisc(br);
 646	recalculate_group_addr(br);
 647	br_recalculate_fwd_mask(br);
 648
 649	return 0;
 650}
 651
 652int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 653{
 654	int err;
 
 
 
 
 
 
 
 
 
 655
 656	if (!rtnl_trylock())
 657		return restart_syscall();
 
 658
 659	err = __br_vlan_filter_toggle(br, val);
 660	rtnl_unlock();
 661
 662	return err;
 663}
 
 664
 665int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
 666{
 667	int err = 0;
 668	struct net_bridge_port *p;
 669	struct net_bridge_vlan *vlan;
 670	struct net_bridge_vlan_group *vg;
 671	__be16 oldproto;
 672
 673	if (br->vlan_proto == proto)
 674		return 0;
 675
 676	/* Add VLANs for the new proto to the device filter. */
 677	list_for_each_entry(p, &br->port_list, list) {
 678		vg = nbp_vlan_group(p);
 679		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
 680			err = vlan_vid_add(p->dev, proto, vlan->vid);
 681			if (err)
 682				goto err_filt;
 683		}
 684	}
 685
 686	oldproto = br->vlan_proto;
 687	br->vlan_proto = proto;
 688
 689	recalculate_group_addr(br);
 690	br_recalculate_fwd_mask(br);
 691
 692	/* Delete VLANs for the old proto from the device filter. */
 693	list_for_each_entry(p, &br->port_list, list) {
 694		vg = nbp_vlan_group(p);
 695		list_for_each_entry(vlan, &vg->vlan_list, vlist)
 696			vlan_vid_del(p->dev, oldproto, vlan->vid);
 697	}
 698
 699	return 0;
 700
 701err_filt:
 702	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
 703		vlan_vid_del(p->dev, proto, vlan->vid);
 704
 705	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
 706		vg = nbp_vlan_group(p);
 707		list_for_each_entry(vlan, &vg->vlan_list, vlist)
 708			vlan_vid_del(p->dev, proto, vlan->vid);
 709	}
 710
 711	return err;
 712}
 713
 714int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
 715{
 716	int err;
 717
 718	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
 719		return -EPROTONOSUPPORT;
 720
 721	if (!rtnl_trylock())
 722		return restart_syscall();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 723
 724	err = __br_vlan_set_proto(br, htons(val));
 725	rtnl_unlock();
 
 
 
 
 
 
 
 
 
 
 726
 727	return err;
 728}
 729
 730static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
 731{
 732	struct net_bridge_vlan *v;
 733
 734	if (vid != vg->pvid)
 735		return false;
 736
 737	v = br_vlan_lookup(&vg->vlan_hash, vid);
 738	if (v && br_vlan_should_use(v) &&
 739	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
 740		return true;
 741
 742	return false;
 743}
 744
 745static void br_vlan_disable_default_pvid(struct net_bridge *br)
 746{
 747	struct net_bridge_port *p;
 748	u16 pvid = br->default_pvid;
 749
 750	/* Disable default_pvid on all ports where it is still
 751	 * configured.
 752	 */
 753	if (vlan_default_pvid(br_vlan_group(br), pvid))
 754		br_vlan_delete(br, pvid);
 755
 756	list_for_each_entry(p, &br->port_list, list) {
 757		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
 758			nbp_vlan_delete(p, pvid);
 759	}
 760
 761	br->default_pvid = 0;
 762}
 763
 764int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
 
 765{
 766	const struct net_bridge_vlan *pvent;
 767	struct net_bridge_vlan_group *vg;
 768	struct net_bridge_port *p;
 
 
 769	u16 old_pvid;
 770	int err = 0;
 771	unsigned long *changed;
 772
 773	if (!pvid) {
 774		br_vlan_disable_default_pvid(br);
 775		return 0;
 776	}
 777
 778	changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
 779			  GFP_KERNEL);
 780	if (!changed)
 781		return -ENOMEM;
 782
 783	old_pvid = br->default_pvid;
 784
 785	/* Update default_pvid config only if we do not conflict with
 786	 * user configuration.
 787	 */
 788	vg = br_vlan_group(br);
 789	pvent = br_vlan_find(vg, pvid);
 790	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
 791	    (!pvent || !br_vlan_should_use(pvent))) {
 792		err = br_vlan_add(br, pvid,
 793				  BRIDGE_VLAN_INFO_PVID |
 794				  BRIDGE_VLAN_INFO_UNTAGGED |
 795				  BRIDGE_VLAN_INFO_BRENTRY);
 
 796		if (err)
 797			goto out;
 798		br_vlan_delete(br, old_pvid);
 799		set_bit(0, changed);
 800	}
 801
 802	list_for_each_entry(p, &br->port_list, list) {
 803		/* Update default_pvid config only if we do not conflict with
 804		 * user configuration.
 805		 */
 806		vg = nbp_vlan_group(p);
 807		if ((old_pvid &&
 808		     !vlan_default_pvid(vg, old_pvid)) ||
 809		    br_vlan_find(vg, pvid))
 810			continue;
 811
 812		err = nbp_vlan_add(p, pvid,
 813				   BRIDGE_VLAN_INFO_PVID |
 814				   BRIDGE_VLAN_INFO_UNTAGGED);
 
 815		if (err)
 816			goto err_port;
 817		nbp_vlan_delete(p, old_pvid);
 818		set_bit(p->port_no, changed);
 819	}
 820
 821	br->default_pvid = pvid;
 822
 823out:
 824	kfree(changed);
 825	return err;
 826
 827err_port:
 828	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
 829		if (!test_bit(p->port_no, changed))
 830			continue;
 831
 832		if (old_pvid)
 833			nbp_vlan_add(p, old_pvid,
 834				     BRIDGE_VLAN_INFO_PVID |
 835				     BRIDGE_VLAN_INFO_UNTAGGED);
 
 836		nbp_vlan_delete(p, pvid);
 837	}
 838
 839	if (test_bit(0, changed)) {
 840		if (old_pvid)
 841			br_vlan_add(br, old_pvid,
 842				    BRIDGE_VLAN_INFO_PVID |
 843				    BRIDGE_VLAN_INFO_UNTAGGED |
 844				    BRIDGE_VLAN_INFO_BRENTRY);
 
 845		br_vlan_delete(br, pvid);
 846	}
 847	goto out;
 848}
 849
 850int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
 851{
 852	u16 pvid = val;
 853	int err = 0;
 854
 855	if (val >= VLAN_VID_MASK)
 856		return -EINVAL;
 857
 858	if (!rtnl_trylock())
 859		return restart_syscall();
 860
 861	if (pvid == br->default_pvid)
 862		goto unlock;
 863
 864	/* Only allow default pvid change when filtering is disabled */
 865	if (br->vlan_enabled) {
 866		pr_info_once("Please disable vlan filtering to change default_pvid\n");
 867		err = -EPERM;
 868		goto unlock;
 869	}
 870	err = __br_vlan_set_default_pvid(br, pvid);
 871unlock:
 872	rtnl_unlock();
 873	return err;
 874}
 875
 876int br_vlan_init(struct net_bridge *br)
 877{
 878	struct net_bridge_vlan_group *vg;
 879	int ret = -ENOMEM;
 880
 881	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
 882	if (!vg)
 883		goto out;
 884	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
 885	if (ret)
 886		goto err_rhtbl;
 
 
 
 887	INIT_LIST_HEAD(&vg->vlan_list);
 888	br->vlan_proto = htons(ETH_P_8021Q);
 889	br->default_pvid = 1;
 890	rcu_assign_pointer(br->vlgrp, vg);
 891	ret = br_vlan_add(br, 1,
 892			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
 893			  BRIDGE_VLAN_INFO_BRENTRY);
 894	if (ret)
 895		goto err_vlan_add;
 896
 897out:
 898	return ret;
 899
 900err_vlan_add:
 901	rhashtable_destroy(&vg->vlan_hash);
 902err_rhtbl:
 903	kfree(vg);
 904
 905	goto out;
 906}
 907
 908int nbp_vlan_init(struct net_bridge_port *p)
 909{
 910	struct switchdev_attr attr = {
 911		.orig_dev = p->br->dev,
 912		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
 913		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
 914		.u.vlan_filtering = p->br->vlan_enabled,
 915	};
 916	struct net_bridge_vlan_group *vg;
 917	int ret = -ENOMEM;
 918
 919	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
 920	if (!vg)
 921		goto out;
 922
 923	ret = switchdev_port_attr_set(p->dev, &attr);
 924	if (ret && ret != -EOPNOTSUPP)
 925		goto err_vlan_enabled;
 926
 927	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
 928	if (ret)
 929		goto err_rhtbl;
 
 
 
 930	INIT_LIST_HEAD(&vg->vlan_list);
 931	rcu_assign_pointer(p->vlgrp, vg);
 932	if (p->br->default_pvid) {
 
 
 933		ret = nbp_vlan_add(p, p->br->default_pvid,
 934				   BRIDGE_VLAN_INFO_PVID |
 935				   BRIDGE_VLAN_INFO_UNTAGGED);
 
 936		if (ret)
 937			goto err_vlan_add;
 938	}
 939out:
 940	return ret;
 941
 942err_vlan_add:
 943	RCU_INIT_POINTER(p->vlgrp, NULL);
 944	synchronize_rcu();
 
 
 945	rhashtable_destroy(&vg->vlan_hash);
 
 946err_vlan_enabled:
 947err_rhtbl:
 948	kfree(vg);
 949
 950	goto out;
 951}
 952
 953/* Must be protected by RTNL.
 954 * Must be called with vid in range from 1 to 4094 inclusive.
 
 955 */
 956int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
 
 957{
 958	struct switchdev_obj_port_vlan v = {
 959		.obj.orig_dev = port->dev,
 960		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
 961		.flags = flags,
 962		.vid_begin = vid,
 963		.vid_end = vid,
 964	};
 965	struct net_bridge_vlan *vlan;
 966	int ret;
 967
 968	ASSERT_RTNL();
 969
 
 970	vlan = br_vlan_find(nbp_vlan_group(port), vid);
 971	if (vlan) {
 972		/* Pass the flags to the hardware bridge */
 973		ret = switchdev_port_obj_add(port->dev, &v.obj);
 974		if (ret && ret != -EOPNOTSUPP)
 975			return ret;
 976		__vlan_add_flags(vlan, flags);
 
 977		return 0;
 978	}
 979
 980	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
 981	if (!vlan)
 982		return -ENOMEM;
 983
 984	vlan->vid = vid;
 985	vlan->port = port;
 986	ret = __vlan_add(vlan, flags);
 987	if (ret)
 988		kfree(vlan);
 
 
 989
 990	return ret;
 991}
 992
 993/* Must be protected by RTNL.
 994 * Must be called with vid in range from 1 to 4094 inclusive.
 995 */
 996int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
 997{
 998	struct net_bridge_vlan *v;
 999
1000	ASSERT_RTNL();
1001
1002	v = br_vlan_find(nbp_vlan_group(port), vid);
1003	if (!v)
1004		return -ENOENT;
1005	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1006	br_fdb_delete_by_port(port->br, port, vid, 0);
1007
1008	return __vlan_del(v);
1009}
1010
1011void nbp_vlan_flush(struct net_bridge_port *port)
1012{
1013	struct net_bridge_vlan_group *vg;
1014
1015	ASSERT_RTNL();
1016
1017	vg = nbp_vlan_group(port);
1018	__vlan_flush(vg);
1019	RCU_INIT_POINTER(port->vlgrp, NULL);
1020	synchronize_rcu();
1021	__vlan_group_free(vg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1022}