Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2#include <linux/kernel.h>
   3#include <linux/netdevice.h>
   4#include <linux/rtnetlink.h>
   5#include <linux/slab.h>
   6#include <net/switchdev.h>
   7
   8#include "br_private.h"
   9#include "br_private_tunnel.h"
  10
  11static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid);
  12
  13static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  14			      const void *ptr)
  15{
  16	const struct net_bridge_vlan *vle = ptr;
  17	u16 vid = *(u16 *)arg->key;
  18
  19	return vle->vid != vid;
  20}
  21
  22static const struct rhashtable_params br_vlan_rht_params = {
  23	.head_offset = offsetof(struct net_bridge_vlan, vnode),
  24	.key_offset = offsetof(struct net_bridge_vlan, vid),
  25	.key_len = sizeof(u16),
  26	.nelem_hint = 3,
 
  27	.max_size = VLAN_N_VID,
  28	.obj_cmpfn = br_vlan_cmp,
  29	.automatic_shrinking = true,
  30};
  31
  32static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  33{
  34	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  35}
  36
  37static bool __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  38{
  39	if (vg->pvid == vid)
  40		return false;
  41
  42	smp_wmb();
  43	vg->pvid = vid;
  44
  45	return true;
  46}
  47
  48static bool __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  49{
  50	if (vg->pvid != vid)
  51		return false;
  52
  53	smp_wmb();
  54	vg->pvid = 0;
  55
  56	return true;
  57}
  58
  59/* return true if anything changed, false otherwise */
  60static bool __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
  61{
  62	struct net_bridge_vlan_group *vg;
  63	u16 old_flags = v->flags;
  64	bool ret;
  65
  66	if (br_vlan_is_master(v))
  67		vg = br_vlan_group(v->br);
  68	else
  69		vg = nbp_vlan_group(v->port);
  70
  71	if (flags & BRIDGE_VLAN_INFO_PVID)
  72		ret = __vlan_add_pvid(vg, v->vid);
  73	else
  74		ret = __vlan_delete_pvid(vg, v->vid);
  75
  76	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  77		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  78	else
  79		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
  80
  81	return ret || !!(old_flags ^ v->flags);
  82}
  83
  84static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  85			  struct net_bridge_vlan *v, u16 flags,
  86			  struct netlink_ext_ack *extack)
  87{
 
 
 
 
 
 
 
  88	int err;
  89
  90	/* Try switchdev op first. In case it is not supported, fallback to
  91	 * 8021q add.
  92	 */
  93	err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
  94	if (err == -EOPNOTSUPP)
  95		return vlan_vid_add(dev, br->vlan_proto, v->vid);
  96	v->priv_flags |= BR_VLFLAG_ADDED_BY_SWITCHDEV;
  97	return err;
  98}
  99
 100static void __vlan_add_list(struct net_bridge_vlan *v)
 101{
 102	struct net_bridge_vlan_group *vg;
 103	struct list_head *headp, *hpos;
 104	struct net_bridge_vlan *vent;
 105
 106	if (br_vlan_is_master(v))
 107		vg = br_vlan_group(v->br);
 108	else
 109		vg = nbp_vlan_group(v->port);
 110
 111	headp = &vg->vlan_list;
 112	list_for_each_prev(hpos, headp) {
 113		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
 114		if (v->vid < vent->vid)
 115			continue;
 116		else
 117			break;
 118	}
 119	list_add_rcu(&v->vlist, hpos);
 120}
 121
 122static void __vlan_del_list(struct net_bridge_vlan *v)
 123{
 124	list_del_rcu(&v->vlist);
 125}
 126
 127static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
 128			  const struct net_bridge_vlan *v)
 129{
 
 
 
 
 
 
 130	int err;
 131
 132	/* Try switchdev op first. In case it is not supported, fallback to
 133	 * 8021q del.
 134	 */
 135	err = br_switchdev_port_vlan_del(dev, v->vid);
 136	if (!(v->priv_flags & BR_VLFLAG_ADDED_BY_SWITCHDEV))
 137		vlan_vid_del(dev, br->vlan_proto, v->vid);
 138	return err == -EOPNOTSUPP ? 0 : err;
 
 
 139}
 140
 141/* Returns a master vlan, if it didn't exist it gets created. In all cases a
 142 * a reference is taken to the master vlan before returning.
 143 */
 144static struct net_bridge_vlan *
 145br_vlan_get_master(struct net_bridge *br, u16 vid,
 146		   struct netlink_ext_ack *extack)
 147{
 148	struct net_bridge_vlan_group *vg;
 149	struct net_bridge_vlan *masterv;
 150
 151	vg = br_vlan_group(br);
 152	masterv = br_vlan_find(vg, vid);
 153	if (!masterv) {
 154		bool changed;
 155
 156		/* missing global ctx, create it now */
 157		if (br_vlan_add(br, vid, 0, &changed, extack))
 158			return NULL;
 159		masterv = br_vlan_find(vg, vid);
 160		if (WARN_ON(!masterv))
 161			return NULL;
 162		refcount_set(&masterv->refcnt, 1);
 163		return masterv;
 164	}
 165	refcount_inc(&masterv->refcnt);
 166
 167	return masterv;
 168}
 169
 170static void br_master_vlan_rcu_free(struct rcu_head *rcu)
 171{
 172	struct net_bridge_vlan *v;
 173
 174	v = container_of(rcu, struct net_bridge_vlan, rcu);
 175	WARN_ON(!br_vlan_is_master(v));
 176	free_percpu(v->stats);
 177	v->stats = NULL;
 178	kfree(v);
 179}
 180
 181static void br_vlan_put_master(struct net_bridge_vlan *masterv)
 182{
 183	struct net_bridge_vlan_group *vg;
 184
 185	if (!br_vlan_is_master(masterv))
 186		return;
 187
 188	vg = br_vlan_group(masterv->br);
 189	if (refcount_dec_and_test(&masterv->refcnt)) {
 190		rhashtable_remove_fast(&vg->vlan_hash,
 191				       &masterv->vnode, br_vlan_rht_params);
 192		__vlan_del_list(masterv);
 193		call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
 194	}
 195}
 196
 197static void nbp_vlan_rcu_free(struct rcu_head *rcu)
 198{
 199	struct net_bridge_vlan *v;
 200
 201	v = container_of(rcu, struct net_bridge_vlan, rcu);
 202	WARN_ON(br_vlan_is_master(v));
 203	/* if we had per-port stats configured then free them here */
 204	if (v->priv_flags & BR_VLFLAG_PER_PORT_STATS)
 205		free_percpu(v->stats);
 206	v->stats = NULL;
 207	kfree(v);
 208}
 209
 210/* This is the shared VLAN add function which works for both ports and bridge
 211 * devices. There are four possible calls to this function in terms of the
 212 * vlan entry type:
 213 * 1. vlan is being added on a port (no master flags, global entry exists)
 214 * 2. vlan is being added on a bridge (both master and brentry flags)
 215 * 3. vlan is being added on a port, but a global entry didn't exist which
 216 *    is being created right now (master flag set, brentry flag unset), the
 217 *    global entry is used for global per-vlan features, but not for filtering
 218 * 4. same as 3 but with both master and brentry flags set so the entry
 219 *    will be used for filtering in both the port and the bridge
 220 */
 221static int __vlan_add(struct net_bridge_vlan *v, u16 flags,
 222		      struct netlink_ext_ack *extack)
 223{
 224	struct net_bridge_vlan *masterv = NULL;
 225	struct net_bridge_port *p = NULL;
 226	struct net_bridge_vlan_group *vg;
 227	struct net_device *dev;
 228	struct net_bridge *br;
 229	int err;
 230
 231	if (br_vlan_is_master(v)) {
 232		br = v->br;
 233		dev = br->dev;
 234		vg = br_vlan_group(br);
 235	} else {
 236		p = v->port;
 237		br = p->br;
 238		dev = p->dev;
 239		vg = nbp_vlan_group(p);
 240	}
 241
 242	if (p) {
 243		/* Add VLAN to the device filter if it is supported.
 244		 * This ensures tagged traffic enters the bridge when
 245		 * promiscuous mode is disabled by br_manage_promisc().
 246		 */
 247		err = __vlan_vid_add(dev, br, v, flags, extack);
 248		if (err)
 249			goto out;
 250
 251		/* need to work on the master vlan too */
 252		if (flags & BRIDGE_VLAN_INFO_MASTER) {
 253			bool changed;
 254
 255			err = br_vlan_add(br, v->vid,
 256					  flags | BRIDGE_VLAN_INFO_BRENTRY,
 257					  &changed, extack);
 258			if (err)
 259				goto out_filt;
 260		}
 261
 262		masterv = br_vlan_get_master(br, v->vid, extack);
 263		if (!masterv)
 264			goto out_filt;
 265		v->brvlan = masterv;
 266		if (br_opt_get(br, BROPT_VLAN_STATS_PER_PORT)) {
 267			v->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
 268			if (!v->stats) {
 269				err = -ENOMEM;
 270				goto out_filt;
 271			}
 272			v->priv_flags |= BR_VLFLAG_PER_PORT_STATS;
 273		} else {
 274			v->stats = masterv->stats;
 275		}
 276	} else {
 277		err = br_switchdev_port_vlan_add(dev, v->vid, flags, extack);
 278		if (err && err != -EOPNOTSUPP)
 279			goto out;
 280	}
 281
 282	/* Add the dev mac and count the vlan only if it's usable */
 283	if (br_vlan_should_use(v)) {
 284		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
 285		if (err) {
 286			br_err(br, "failed insert local address into bridge forwarding table\n");
 287			goto out_filt;
 288		}
 289		vg->num_vlans++;
 290	}
 291
 292	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
 293					    br_vlan_rht_params);
 294	if (err)
 295		goto out_fdb_insert;
 296
 297	__vlan_add_list(v);
 298	__vlan_add_flags(v, flags);
 299
 300	if (p)
 301		nbp_vlan_set_vlan_dev_state(p, v->vid);
 302out:
 303	return err;
 304
 305out_fdb_insert:
 306	if (br_vlan_should_use(v)) {
 307		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
 308		vg->num_vlans--;
 309	}
 310
 311out_filt:
 312	if (p) {
 313		__vlan_vid_del(dev, br, v);
 314		if (masterv) {
 315			if (v->stats && masterv->stats != v->stats)
 316				free_percpu(v->stats);
 317			v->stats = NULL;
 318
 319			br_vlan_put_master(masterv);
 320			v->brvlan = NULL;
 321		}
 322	} else {
 323		br_switchdev_port_vlan_del(dev, v->vid);
 324	}
 325
 326	goto out;
 327}
 328
 329static int __vlan_del(struct net_bridge_vlan *v)
 330{
 331	struct net_bridge_vlan *masterv = v;
 332	struct net_bridge_vlan_group *vg;
 333	struct net_bridge_port *p = NULL;
 334	int err = 0;
 335
 336	if (br_vlan_is_master(v)) {
 337		vg = br_vlan_group(v->br);
 338	} else {
 339		p = v->port;
 340		vg = nbp_vlan_group(v->port);
 341		masterv = v->brvlan;
 342	}
 343
 344	__vlan_delete_pvid(vg, v->vid);
 345	if (p) {
 346		err = __vlan_vid_del(p->dev, p->br, v);
 347		if (err)
 348			goto out;
 349	} else {
 350		err = br_switchdev_port_vlan_del(v->br->dev, v->vid);
 351		if (err && err != -EOPNOTSUPP)
 352			goto out;
 353		err = 0;
 354	}
 355
 356	if (br_vlan_should_use(v)) {
 357		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
 358		vg->num_vlans--;
 359	}
 360
 361	if (masterv != v) {
 362		vlan_tunnel_info_del(vg, v);
 363		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
 364				       br_vlan_rht_params);
 365		__vlan_del_list(v);
 366		nbp_vlan_set_vlan_dev_state(p, v->vid);
 367		call_rcu(&v->rcu, nbp_vlan_rcu_free);
 368	}
 369
 370	br_vlan_put_master(masterv);
 371out:
 372	return err;
 373}
 374
 375static void __vlan_group_free(struct net_bridge_vlan_group *vg)
 376{
 377	WARN_ON(!list_empty(&vg->vlan_list));
 378	rhashtable_destroy(&vg->vlan_hash);
 379	vlan_tunnel_deinit(vg);
 380	kfree(vg);
 381}
 382
 383static void __vlan_flush(struct net_bridge_vlan_group *vg)
 384{
 385	struct net_bridge_vlan *vlan, *tmp;
 386
 387	__vlan_delete_pvid(vg, vg->pvid);
 388	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
 389		__vlan_del(vlan);
 390}
 391
 392struct sk_buff *br_handle_vlan(struct net_bridge *br,
 393			       const struct net_bridge_port *p,
 394			       struct net_bridge_vlan_group *vg,
 395			       struct sk_buff *skb)
 396{
 397	struct br_vlan_stats *stats;
 398	struct net_bridge_vlan *v;
 399	u16 vid;
 400
 401	/* If this packet was not filtered at input, let it pass */
 402	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 403		goto out;
 404
 405	/* At this point, we know that the frame was filtered and contains
 406	 * a valid vlan id.  If the vlan id has untagged flag set,
 407	 * send untagged; otherwise, send tagged.
 408	 */
 409	br_vlan_get_tag(skb, &vid);
 410	v = br_vlan_find(vg, vid);
 411	/* Vlan entry must be configured at this point.  The
 412	 * only exception is the bridge is set in promisc mode and the
 413	 * packet is destined for the bridge device.  In this case
 414	 * pass the packet as is.
 415	 */
 416	if (!v || !br_vlan_should_use(v)) {
 417		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
 418			goto out;
 419		} else {
 420			kfree_skb(skb);
 421			return NULL;
 422		}
 423	}
 424	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
 425		stats = this_cpu_ptr(v->stats);
 426		u64_stats_update_begin(&stats->syncp);
 427		stats->tx_bytes += skb->len;
 428		stats->tx_packets++;
 429		u64_stats_update_end(&stats->syncp);
 430	}
 431
 432	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 433		__vlan_hwaccel_clear_tag(skb);
 434
 435	if (p && (p->flags & BR_VLAN_TUNNEL) &&
 436	    br_handle_egress_vlan_tunnel(skb, v)) {
 437		kfree_skb(skb);
 438		return NULL;
 439	}
 440out:
 441	return skb;
 442}
 443
 444/* Called under RCU */
 445static bool __allowed_ingress(const struct net_bridge *br,
 446			      struct net_bridge_vlan_group *vg,
 447			      struct sk_buff *skb, u16 *vid)
 448{
 449	struct br_vlan_stats *stats;
 450	struct net_bridge_vlan *v;
 451	bool tagged;
 452
 453	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
 454	/* If vlan tx offload is disabled on bridge device and frame was
 455	 * sent from vlan device on the bridge device, it does not have
 456	 * HW accelerated vlan tag.
 457	 */
 458	if (unlikely(!skb_vlan_tag_present(skb) &&
 459		     skb->protocol == br->vlan_proto)) {
 460		skb = skb_vlan_untag(skb);
 461		if (unlikely(!skb))
 462			return false;
 463	}
 464
 465	if (!br_vlan_get_tag(skb, vid)) {
 466		/* Tagged frame */
 467		if (skb->vlan_proto != br->vlan_proto) {
 468			/* Protocol-mismatch, empty out vlan_tci for new tag */
 469			skb_push(skb, ETH_HLEN);
 470			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
 471							skb_vlan_tag_get(skb));
 472			if (unlikely(!skb))
 473				return false;
 474
 475			skb_pull(skb, ETH_HLEN);
 476			skb_reset_mac_len(skb);
 477			*vid = 0;
 478			tagged = false;
 479		} else {
 480			tagged = true;
 481		}
 482	} else {
 483		/* Untagged frame */
 484		tagged = false;
 485	}
 486
 487	if (!*vid) {
 488		u16 pvid = br_get_pvid(vg);
 489
 490		/* Frame had a tag with VID 0 or did not have a tag.
 491		 * See if pvid is set on this port.  That tells us which
 492		 * vlan untagged or priority-tagged traffic belongs to.
 493		 */
 494		if (!pvid)
 495			goto drop;
 496
 497		/* PVID is set on this port.  Any untagged or priority-tagged
 498		 * ingress frame is considered to belong to this vlan.
 499		 */
 500		*vid = pvid;
 501		if (likely(!tagged))
 502			/* Untagged Frame. */
 503			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
 504		else
 505			/* Priority-tagged Frame.
 506			 * At this point, we know that skb->vlan_tci VID
 507			 * field was 0.
 508			 * We update only VID field and preserve PCP field.
 509			 */
 510			skb->vlan_tci |= pvid;
 511
 512		/* if stats are disabled we can avoid the lookup */
 513		if (!br_opt_get(br, BROPT_VLAN_STATS_ENABLED))
 514			return true;
 515	}
 516	v = br_vlan_find(vg, *vid);
 517	if (!v || !br_vlan_should_use(v))
 518		goto drop;
 519
 520	if (br_opt_get(br, BROPT_VLAN_STATS_ENABLED)) {
 521		stats = this_cpu_ptr(v->stats);
 522		u64_stats_update_begin(&stats->syncp);
 523		stats->rx_bytes += skb->len;
 524		stats->rx_packets++;
 525		u64_stats_update_end(&stats->syncp);
 526	}
 527
 528	return true;
 529
 530drop:
 531	kfree_skb(skb);
 532	return false;
 533}
 534
 535bool br_allowed_ingress(const struct net_bridge *br,
 536			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
 537			u16 *vid)
 538{
 539	/* If VLAN filtering is disabled on the bridge, all packets are
 540	 * permitted.
 541	 */
 542	if (!br_opt_get(br, BROPT_VLAN_ENABLED)) {
 543		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
 544		return true;
 545	}
 546
 547	return __allowed_ingress(br, vg, skb, vid);
 548}
 549
 550/* Called under RCU. */
 551bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 552		       const struct sk_buff *skb)
 553{
 554	const struct net_bridge_vlan *v;
 555	u16 vid;
 556
 557	/* If this packet was not filtered at input, let it pass */
 558	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 559		return true;
 560
 561	br_vlan_get_tag(skb, &vid);
 562	v = br_vlan_find(vg, vid);
 563	if (v && br_vlan_should_use(v))
 564		return true;
 565
 566	return false;
 567}
 568
 569/* Called under RCU */
 570bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
 571{
 572	struct net_bridge_vlan_group *vg;
 573	struct net_bridge *br = p->br;
 574
 575	/* If filtering was disabled at input, let it pass. */
 576	if (!br_opt_get(br, BROPT_VLAN_ENABLED))
 577		return true;
 578
 579	vg = nbp_vlan_group_rcu(p);
 580	if (!vg || !vg->num_vlans)
 581		return false;
 582
 583	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
 584		*vid = 0;
 585
 586	if (!*vid) {
 587		*vid = br_get_pvid(vg);
 588		if (!*vid)
 589			return false;
 590
 591		return true;
 592	}
 593
 594	if (br_vlan_find(vg, *vid))
 595		return true;
 596
 597	return false;
 598}
 599
 600static int br_vlan_add_existing(struct net_bridge *br,
 601				struct net_bridge_vlan_group *vg,
 602				struct net_bridge_vlan *vlan,
 603				u16 flags, bool *changed,
 604				struct netlink_ext_ack *extack)
 605{
 606	int err;
 607
 608	err = br_switchdev_port_vlan_add(br->dev, vlan->vid, flags, extack);
 609	if (err && err != -EOPNOTSUPP)
 610		return err;
 611
 612	if (!br_vlan_is_brentry(vlan)) {
 613		/* Trying to change flags of non-existent bridge vlan */
 614		if (!(flags & BRIDGE_VLAN_INFO_BRENTRY)) {
 615			err = -EINVAL;
 616			goto err_flags;
 617		}
 618		/* It was only kept for port vlans, now make it real */
 619		err = br_fdb_insert(br, NULL, br->dev->dev_addr,
 620				    vlan->vid);
 621		if (err) {
 622			br_err(br, "failed to insert local address into bridge forwarding table\n");
 623			goto err_fdb_insert;
 624		}
 625
 626		refcount_inc(&vlan->refcnt);
 627		vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
 628		vg->num_vlans++;
 629		*changed = true;
 630	}
 631
 632	if (__vlan_add_flags(vlan, flags))
 633		*changed = true;
 634
 635	return 0;
 636
 637err_fdb_insert:
 638err_flags:
 639	br_switchdev_port_vlan_del(br->dev, vlan->vid);
 640	return err;
 641}
 642
 643/* Must be protected by RTNL.
 644 * Must be called with vid in range from 1 to 4094 inclusive.
 645 * changed must be true only if the vlan was created or updated
 646 */
 647int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags, bool *changed,
 648		struct netlink_ext_ack *extack)
 649{
 650	struct net_bridge_vlan_group *vg;
 651	struct net_bridge_vlan *vlan;
 652	int ret;
 653
 654	ASSERT_RTNL();
 655
 656	*changed = false;
 657	vg = br_vlan_group(br);
 658	vlan = br_vlan_find(vg, vid);
 659	if (vlan)
 660		return br_vlan_add_existing(br, vg, vlan, flags, changed,
 661					    extack);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662
 663	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
 664	if (!vlan)
 665		return -ENOMEM;
 666
 667	vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
 668	if (!vlan->stats) {
 669		kfree(vlan);
 670		return -ENOMEM;
 671	}
 672	vlan->vid = vid;
 673	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
 674	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
 675	vlan->br = br;
 676	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
 677		refcount_set(&vlan->refcnt, 1);
 678	ret = __vlan_add(vlan, flags, extack);
 679	if (ret) {
 680		free_percpu(vlan->stats);
 681		kfree(vlan);
 682	} else {
 683		*changed = true;
 684	}
 685
 686	return ret;
 687}
 688
 689/* Must be protected by RTNL.
 690 * Must be called with vid in range from 1 to 4094 inclusive.
 691 */
 692int br_vlan_delete(struct net_bridge *br, u16 vid)
 693{
 694	struct net_bridge_vlan_group *vg;
 695	struct net_bridge_vlan *v;
 696
 697	ASSERT_RTNL();
 698
 699	vg = br_vlan_group(br);
 700	v = br_vlan_find(vg, vid);
 701	if (!v || !br_vlan_is_brentry(v))
 702		return -ENOENT;
 703
 704	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
 705	br_fdb_delete_by_port(br, NULL, vid, 0);
 706
 707	vlan_tunnel_info_del(vg, v);
 708
 709	return __vlan_del(v);
 710}
 711
 712void br_vlan_flush(struct net_bridge *br)
 713{
 714	struct net_bridge_vlan_group *vg;
 715
 716	ASSERT_RTNL();
 717
 718	vg = br_vlan_group(br);
 719	__vlan_flush(vg);
 720	RCU_INIT_POINTER(br->vlgrp, NULL);
 721	synchronize_rcu();
 722	__vlan_group_free(vg);
 723}
 724
 725struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
 726{
 727	if (!vg)
 728		return NULL;
 729
 730	return br_vlan_lookup(&vg->vlan_hash, vid);
 731}
 732
 733/* Must be protected by RTNL. */
 734static void recalculate_group_addr(struct net_bridge *br)
 735{
 736	if (br_opt_get(br, BROPT_GROUP_ADDR_SET))
 737		return;
 738
 739	spin_lock_bh(&br->lock);
 740	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
 741	    br->vlan_proto == htons(ETH_P_8021Q)) {
 742		/* Bridge Group Address */
 743		br->group_addr[5] = 0x00;
 744	} else { /* vlan_enabled && ETH_P_8021AD */
 745		/* Provider Bridge Group Address */
 746		br->group_addr[5] = 0x08;
 747	}
 748	spin_unlock_bh(&br->lock);
 749}
 750
 751/* Must be protected by RTNL. */
 752void br_recalculate_fwd_mask(struct net_bridge *br)
 753{
 754	if (!br_opt_get(br, BROPT_VLAN_ENABLED) ||
 755	    br->vlan_proto == htons(ETH_P_8021Q))
 756		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
 757	else /* vlan_enabled && ETH_P_8021AD */
 758		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
 759					      ~(1u << br->group_addr[5]);
 760}
 761
 762int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 763{
 764	struct switchdev_attr attr = {
 765		.orig_dev = br->dev,
 766		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
 767		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
 768		.u.vlan_filtering = val,
 769	};
 770	int err;
 771
 772	if (br_opt_get(br, BROPT_VLAN_ENABLED) == !!val)
 773		return 0;
 774
 775	err = switchdev_port_attr_set(br->dev, &attr);
 776	if (err && err != -EOPNOTSUPP)
 777		return err;
 778
 779	br_opt_toggle(br, BROPT_VLAN_ENABLED, !!val);
 780	br_manage_promisc(br);
 781	recalculate_group_addr(br);
 782	br_recalculate_fwd_mask(br);
 783
 784	return 0;
 785}
 786
 787int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 788{
 789	return __br_vlan_filter_toggle(br, val);
 790}
 791
 792bool br_vlan_enabled(const struct net_device *dev)
 793{
 794	struct net_bridge *br = netdev_priv(dev);
 795
 796	return br_opt_get(br, BROPT_VLAN_ENABLED);
 797}
 798EXPORT_SYMBOL_GPL(br_vlan_enabled);
 799
 800int br_vlan_get_proto(const struct net_device *dev, u16 *p_proto)
 801{
 802	struct net_bridge *br = netdev_priv(dev);
 803
 804	*p_proto = ntohs(br->vlan_proto);
 805
 806	return 0;
 807}
 808EXPORT_SYMBOL_GPL(br_vlan_get_proto);
 809
 810int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
 811{
 812	int err = 0;
 813	struct net_bridge_port *p;
 814	struct net_bridge_vlan *vlan;
 815	struct net_bridge_vlan_group *vg;
 816	__be16 oldproto;
 817
 818	if (br->vlan_proto == proto)
 819		return 0;
 820
 821	/* Add VLANs for the new proto to the device filter. */
 822	list_for_each_entry(p, &br->port_list, list) {
 823		vg = nbp_vlan_group(p);
 824		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
 825			err = vlan_vid_add(p->dev, proto, vlan->vid);
 826			if (err)
 827				goto err_filt;
 828		}
 829	}
 830
 831	oldproto = br->vlan_proto;
 832	br->vlan_proto = proto;
 833
 834	recalculate_group_addr(br);
 835	br_recalculate_fwd_mask(br);
 836
 837	/* Delete VLANs for the old proto from the device filter. */
 838	list_for_each_entry(p, &br->port_list, list) {
 839		vg = nbp_vlan_group(p);
 840		list_for_each_entry(vlan, &vg->vlan_list, vlist)
 841			vlan_vid_del(p->dev, oldproto, vlan->vid);
 842	}
 843
 844	return 0;
 845
 846err_filt:
 847	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
 848		vlan_vid_del(p->dev, proto, vlan->vid);
 849
 850	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
 851		vg = nbp_vlan_group(p);
 852		list_for_each_entry(vlan, &vg->vlan_list, vlist)
 853			vlan_vid_del(p->dev, proto, vlan->vid);
 854	}
 855
 856	return err;
 857}
 858
 859int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
 860{
 861	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
 862		return -EPROTONOSUPPORT;
 863
 864	return __br_vlan_set_proto(br, htons(val));
 865}
 866
 867int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
 868{
 869	switch (val) {
 870	case 0:
 871	case 1:
 872		br_opt_toggle(br, BROPT_VLAN_STATS_ENABLED, !!val);
 873		break;
 874	default:
 875		return -EINVAL;
 876	}
 877
 878	return 0;
 879}
 880
 881int br_vlan_set_stats_per_port(struct net_bridge *br, unsigned long val)
 882{
 883	struct net_bridge_port *p;
 884
 885	/* allow to change the option if there are no port vlans configured */
 886	list_for_each_entry(p, &br->port_list, list) {
 887		struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
 888
 889		if (vg->num_vlans)
 890			return -EBUSY;
 891	}
 892
 893	switch (val) {
 894	case 0:
 895	case 1:
 896		br_opt_toggle(br, BROPT_VLAN_STATS_PER_PORT, !!val);
 897		break;
 898	default:
 899		return -EINVAL;
 900	}
 901
 902	return 0;
 903}
 904
 905static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
 906{
 907	struct net_bridge_vlan *v;
 908
 909	if (vid != vg->pvid)
 910		return false;
 911
 912	v = br_vlan_lookup(&vg->vlan_hash, vid);
 913	if (v && br_vlan_should_use(v) &&
 914	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
 915		return true;
 916
 917	return false;
 918}
 919
 920static void br_vlan_disable_default_pvid(struct net_bridge *br)
 921{
 922	struct net_bridge_port *p;
 923	u16 pvid = br->default_pvid;
 924
 925	/* Disable default_pvid on all ports where it is still
 926	 * configured.
 927	 */
 928	if (vlan_default_pvid(br_vlan_group(br), pvid))
 929		br_vlan_delete(br, pvid);
 930
 931	list_for_each_entry(p, &br->port_list, list) {
 932		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
 933			nbp_vlan_delete(p, pvid);
 934	}
 935
 936	br->default_pvid = 0;
 937}
 938
 939int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid,
 940			       struct netlink_ext_ack *extack)
 941{
 942	const struct net_bridge_vlan *pvent;
 943	struct net_bridge_vlan_group *vg;
 944	struct net_bridge_port *p;
 945	unsigned long *changed;
 946	bool vlchange;
 947	u16 old_pvid;
 948	int err = 0;
 
 949
 950	if (!pvid) {
 951		br_vlan_disable_default_pvid(br);
 952		return 0;
 953	}
 954
 955	changed = bitmap_zalloc(BR_MAX_PORTS, GFP_KERNEL);
 
 956	if (!changed)
 957		return -ENOMEM;
 958
 959	old_pvid = br->default_pvid;
 960
 961	/* Update default_pvid config only if we do not conflict with
 962	 * user configuration.
 963	 */
 964	vg = br_vlan_group(br);
 965	pvent = br_vlan_find(vg, pvid);
 966	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
 967	    (!pvent || !br_vlan_should_use(pvent))) {
 968		err = br_vlan_add(br, pvid,
 969				  BRIDGE_VLAN_INFO_PVID |
 970				  BRIDGE_VLAN_INFO_UNTAGGED |
 971				  BRIDGE_VLAN_INFO_BRENTRY,
 972				  &vlchange, extack);
 973		if (err)
 974			goto out;
 975		br_vlan_delete(br, old_pvid);
 976		set_bit(0, changed);
 977	}
 978
 979	list_for_each_entry(p, &br->port_list, list) {
 980		/* Update default_pvid config only if we do not conflict with
 981		 * user configuration.
 982		 */
 983		vg = nbp_vlan_group(p);
 984		if ((old_pvid &&
 985		     !vlan_default_pvid(vg, old_pvid)) ||
 986		    br_vlan_find(vg, pvid))
 987			continue;
 988
 989		err = nbp_vlan_add(p, pvid,
 990				   BRIDGE_VLAN_INFO_PVID |
 991				   BRIDGE_VLAN_INFO_UNTAGGED,
 992				   &vlchange, extack);
 993		if (err)
 994			goto err_port;
 995		nbp_vlan_delete(p, old_pvid);
 996		set_bit(p->port_no, changed);
 997	}
 998
 999	br->default_pvid = pvid;
1000
1001out:
1002	bitmap_free(changed);
1003	return err;
1004
1005err_port:
1006	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
1007		if (!test_bit(p->port_no, changed))
1008			continue;
1009
1010		if (old_pvid)
1011			nbp_vlan_add(p, old_pvid,
1012				     BRIDGE_VLAN_INFO_PVID |
1013				     BRIDGE_VLAN_INFO_UNTAGGED,
1014				     &vlchange, NULL);
1015		nbp_vlan_delete(p, pvid);
1016	}
1017
1018	if (test_bit(0, changed)) {
1019		if (old_pvid)
1020			br_vlan_add(br, old_pvid,
1021				    BRIDGE_VLAN_INFO_PVID |
1022				    BRIDGE_VLAN_INFO_UNTAGGED |
1023				    BRIDGE_VLAN_INFO_BRENTRY,
1024				    &vlchange, NULL);
1025		br_vlan_delete(br, pvid);
1026	}
1027	goto out;
1028}
1029
1030int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
1031{
1032	u16 pvid = val;
1033	int err = 0;
1034
1035	if (val >= VLAN_VID_MASK)
1036		return -EINVAL;
1037
1038	if (pvid == br->default_pvid)
1039		goto out;
1040
1041	/* Only allow default pvid change when filtering is disabled */
1042	if (br_opt_get(br, BROPT_VLAN_ENABLED)) {
1043		pr_info_once("Please disable vlan filtering to change default_pvid\n");
1044		err = -EPERM;
1045		goto out;
1046	}
1047	err = __br_vlan_set_default_pvid(br, pvid, NULL);
1048out:
1049	return err;
1050}
1051
1052int br_vlan_init(struct net_bridge *br)
1053{
1054	struct net_bridge_vlan_group *vg;
1055	int ret = -ENOMEM;
1056
1057	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
1058	if (!vg)
1059		goto out;
1060	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1061	if (ret)
1062		goto err_rhtbl;
1063	ret = vlan_tunnel_init(vg);
1064	if (ret)
1065		goto err_tunnel_init;
1066	INIT_LIST_HEAD(&vg->vlan_list);
1067	br->vlan_proto = htons(ETH_P_8021Q);
1068	br->default_pvid = 1;
1069	rcu_assign_pointer(br->vlgrp, vg);
 
 
 
 
 
1070
1071out:
1072	return ret;
1073
1074err_tunnel_init:
1075	rhashtable_destroy(&vg->vlan_hash);
1076err_rhtbl:
1077	kfree(vg);
1078
1079	goto out;
1080}
1081
1082int nbp_vlan_init(struct net_bridge_port *p, struct netlink_ext_ack *extack)
1083{
1084	struct switchdev_attr attr = {
1085		.orig_dev = p->br->dev,
1086		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
1087		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
1088		.u.vlan_filtering = br_opt_get(p->br, BROPT_VLAN_ENABLED),
1089	};
1090	struct net_bridge_vlan_group *vg;
1091	int ret = -ENOMEM;
1092
1093	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
1094	if (!vg)
1095		goto out;
1096
1097	ret = switchdev_port_attr_set(p->dev, &attr);
1098	if (ret && ret != -EOPNOTSUPP)
1099		goto err_vlan_enabled;
1100
1101	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
1102	if (ret)
1103		goto err_rhtbl;
1104	ret = vlan_tunnel_init(vg);
1105	if (ret)
1106		goto err_tunnel_init;
1107	INIT_LIST_HEAD(&vg->vlan_list);
1108	rcu_assign_pointer(p->vlgrp, vg);
1109	if (p->br->default_pvid) {
1110		bool changed;
1111
1112		ret = nbp_vlan_add(p, p->br->default_pvid,
1113				   BRIDGE_VLAN_INFO_PVID |
1114				   BRIDGE_VLAN_INFO_UNTAGGED,
1115				   &changed, extack);
1116		if (ret)
1117			goto err_vlan_add;
1118	}
1119out:
1120	return ret;
1121
1122err_vlan_add:
1123	RCU_INIT_POINTER(p->vlgrp, NULL);
1124	synchronize_rcu();
1125	vlan_tunnel_deinit(vg);
1126err_tunnel_init:
1127	rhashtable_destroy(&vg->vlan_hash);
1128err_rhtbl:
1129err_vlan_enabled:
 
1130	kfree(vg);
1131
1132	goto out;
1133}
1134
1135/* Must be protected by RTNL.
1136 * Must be called with vid in range from 1 to 4094 inclusive.
1137 * changed must be true only if the vlan was created or updated
1138 */
1139int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags,
1140		 bool *changed, struct netlink_ext_ack *extack)
1141{
 
 
 
 
 
 
 
1142	struct net_bridge_vlan *vlan;
1143	int ret;
1144
1145	ASSERT_RTNL();
1146
1147	*changed = false;
1148	vlan = br_vlan_find(nbp_vlan_group(port), vid);
1149	if (vlan) {
1150		/* Pass the flags to the hardware bridge */
1151		ret = br_switchdev_port_vlan_add(port->dev, vid, flags, extack);
1152		if (ret && ret != -EOPNOTSUPP)
1153			return ret;
1154		*changed = __vlan_add_flags(vlan, flags);
1155
1156		return 0;
1157	}
1158
1159	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1160	if (!vlan)
1161		return -ENOMEM;
1162
1163	vlan->vid = vid;
1164	vlan->port = port;
1165	ret = __vlan_add(vlan, flags, extack);
1166	if (ret)
1167		kfree(vlan);
1168	else
1169		*changed = true;
1170
1171	return ret;
1172}
1173
1174/* Must be protected by RTNL.
1175 * Must be called with vid in range from 1 to 4094 inclusive.
1176 */
1177int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1178{
1179	struct net_bridge_vlan *v;
1180
1181	ASSERT_RTNL();
1182
1183	v = br_vlan_find(nbp_vlan_group(port), vid);
1184	if (!v)
1185		return -ENOENT;
1186	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1187	br_fdb_delete_by_port(port->br, port, vid, 0);
1188
1189	return __vlan_del(v);
1190}
1191
1192void nbp_vlan_flush(struct net_bridge_port *port)
1193{
1194	struct net_bridge_vlan_group *vg;
1195
1196	ASSERT_RTNL();
1197
1198	vg = nbp_vlan_group(port);
1199	__vlan_flush(vg);
1200	RCU_INIT_POINTER(port->vlgrp, NULL);
1201	synchronize_rcu();
1202	__vlan_group_free(vg);
1203}
1204
1205void br_vlan_get_stats(const struct net_bridge_vlan *v,
1206		       struct br_vlan_stats *stats)
1207{
1208	int i;
1209
1210	memset(stats, 0, sizeof(*stats));
1211	for_each_possible_cpu(i) {
1212		u64 rxpackets, rxbytes, txpackets, txbytes;
1213		struct br_vlan_stats *cpu_stats;
1214		unsigned int start;
1215
1216		cpu_stats = per_cpu_ptr(v->stats, i);
1217		do {
1218			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1219			rxpackets = cpu_stats->rx_packets;
1220			rxbytes = cpu_stats->rx_bytes;
1221			txbytes = cpu_stats->tx_bytes;
1222			txpackets = cpu_stats->tx_packets;
1223		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1224
1225		stats->rx_packets += rxpackets;
1226		stats->rx_bytes += rxbytes;
1227		stats->tx_bytes += txbytes;
1228		stats->tx_packets += txpackets;
1229	}
1230}
1231
1232static int __br_vlan_get_pvid(const struct net_device *dev,
1233			      struct net_bridge_port *p, u16 *p_pvid)
1234{
1235	struct net_bridge_vlan_group *vg;
1236
1237	if (p)
1238		vg = nbp_vlan_group(p);
1239	else if (netif_is_bridge_master(dev))
1240		vg = br_vlan_group(netdev_priv(dev));
1241	else
1242		return -EINVAL;
1243
1244	*p_pvid = br_get_pvid(vg);
1245	return 0;
1246}
1247
1248int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
1249{
1250	ASSERT_RTNL();
1251
1252	return __br_vlan_get_pvid(dev, br_port_get_check_rtnl(dev), p_pvid);
1253}
1254EXPORT_SYMBOL_GPL(br_vlan_get_pvid);
1255
1256int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid)
1257{
1258	return __br_vlan_get_pvid(dev, br_port_get_check_rcu(dev), p_pvid);
1259}
1260EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu);
1261
1262int br_vlan_get_info(const struct net_device *dev, u16 vid,
1263		     struct bridge_vlan_info *p_vinfo)
1264{
1265	struct net_bridge_vlan_group *vg;
1266	struct net_bridge_vlan *v;
1267	struct net_bridge_port *p;
1268
1269	ASSERT_RTNL();
1270	p = br_port_get_check_rtnl(dev);
1271	if (p)
1272		vg = nbp_vlan_group(p);
1273	else if (netif_is_bridge_master(dev))
1274		vg = br_vlan_group(netdev_priv(dev));
1275	else
1276		return -EINVAL;
1277
1278	v = br_vlan_find(vg, vid);
1279	if (!v)
1280		return -ENOENT;
1281
1282	p_vinfo->vid = vid;
1283	p_vinfo->flags = v->flags;
1284	if (vid == br_get_pvid(vg))
1285		p_vinfo->flags |= BRIDGE_VLAN_INFO_PVID;
1286	return 0;
1287}
1288EXPORT_SYMBOL_GPL(br_vlan_get_info);
1289
1290static int br_vlan_is_bind_vlan_dev(const struct net_device *dev)
1291{
1292	return is_vlan_dev(dev) &&
1293		!!(vlan_dev_priv(dev)->flags & VLAN_FLAG_BRIDGE_BINDING);
1294}
1295
1296static int br_vlan_is_bind_vlan_dev_fn(struct net_device *dev,
1297				       __always_unused void *data)
1298{
1299	return br_vlan_is_bind_vlan_dev(dev);
1300}
1301
1302static bool br_vlan_has_upper_bind_vlan_dev(struct net_device *dev)
1303{
1304	int found;
1305
1306	rcu_read_lock();
1307	found = netdev_walk_all_upper_dev_rcu(dev, br_vlan_is_bind_vlan_dev_fn,
1308					      NULL);
1309	rcu_read_unlock();
1310
1311	return !!found;
1312}
1313
1314struct br_vlan_bind_walk_data {
1315	u16 vid;
1316	struct net_device *result;
1317};
1318
1319static int br_vlan_match_bind_vlan_dev_fn(struct net_device *dev,
1320					  void *data_in)
1321{
1322	struct br_vlan_bind_walk_data *data = data_in;
1323	int found = 0;
1324
1325	if (br_vlan_is_bind_vlan_dev(dev) &&
1326	    vlan_dev_priv(dev)->vlan_id == data->vid) {
1327		data->result = dev;
1328		found = 1;
1329	}
1330
1331	return found;
1332}
1333
1334static struct net_device *
1335br_vlan_get_upper_bind_vlan_dev(struct net_device *dev, u16 vid)
1336{
1337	struct br_vlan_bind_walk_data data = {
1338		.vid = vid,
1339	};
1340
1341	rcu_read_lock();
1342	netdev_walk_all_upper_dev_rcu(dev, br_vlan_match_bind_vlan_dev_fn,
1343				      &data);
1344	rcu_read_unlock();
1345
1346	return data.result;
1347}
1348
1349static bool br_vlan_is_dev_up(const struct net_device *dev)
1350{
1351	return  !!(dev->flags & IFF_UP) && netif_oper_up(dev);
1352}
1353
1354static void br_vlan_set_vlan_dev_state(const struct net_bridge *br,
1355				       struct net_device *vlan_dev)
1356{
1357	u16 vid = vlan_dev_priv(vlan_dev)->vlan_id;
1358	struct net_bridge_vlan_group *vg;
1359	struct net_bridge_port *p;
1360	bool has_carrier = false;
1361
1362	if (!netif_carrier_ok(br->dev)) {
1363		netif_carrier_off(vlan_dev);
1364		return;
1365	}
1366
1367	list_for_each_entry(p, &br->port_list, list) {
1368		vg = nbp_vlan_group(p);
1369		if (br_vlan_find(vg, vid) && br_vlan_is_dev_up(p->dev)) {
1370			has_carrier = true;
1371			break;
1372		}
1373	}
1374
1375	if (has_carrier)
1376		netif_carrier_on(vlan_dev);
1377	else
1378		netif_carrier_off(vlan_dev);
1379}
1380
1381static void br_vlan_set_all_vlan_dev_state(struct net_bridge_port *p)
1382{
1383	struct net_bridge_vlan_group *vg = nbp_vlan_group(p);
1384	struct net_bridge_vlan *vlan;
1385	struct net_device *vlan_dev;
1386
1387	list_for_each_entry(vlan, &vg->vlan_list, vlist) {
1388		vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev,
1389							   vlan->vid);
1390		if (vlan_dev) {
1391			if (br_vlan_is_dev_up(p->dev)) {
1392				if (netif_carrier_ok(p->br->dev))
1393					netif_carrier_on(vlan_dev);
1394			} else {
1395				br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1396			}
1397		}
1398	}
1399}
1400
1401static void br_vlan_upper_change(struct net_device *dev,
1402				 struct net_device *upper_dev,
1403				 bool linking)
1404{
1405	struct net_bridge *br = netdev_priv(dev);
1406
1407	if (!br_vlan_is_bind_vlan_dev(upper_dev))
1408		return;
1409
1410	if (linking) {
1411		br_vlan_set_vlan_dev_state(br, upper_dev);
1412		br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING, true);
1413	} else {
1414		br_opt_toggle(br, BROPT_VLAN_BRIDGE_BINDING,
1415			      br_vlan_has_upper_bind_vlan_dev(dev));
1416	}
1417}
1418
1419struct br_vlan_link_state_walk_data {
1420	struct net_bridge *br;
1421};
1422
1423static int br_vlan_link_state_change_fn(struct net_device *vlan_dev,
1424					void *data_in)
1425{
1426	struct br_vlan_link_state_walk_data *data = data_in;
1427
1428	if (br_vlan_is_bind_vlan_dev(vlan_dev))
1429		br_vlan_set_vlan_dev_state(data->br, vlan_dev);
1430
1431	return 0;
1432}
1433
1434static void br_vlan_link_state_change(struct net_device *dev,
1435				      struct net_bridge *br)
1436{
1437	struct br_vlan_link_state_walk_data data = {
1438		.br = br
1439	};
1440
1441	rcu_read_lock();
1442	netdev_walk_all_upper_dev_rcu(dev, br_vlan_link_state_change_fn,
1443				      &data);
1444	rcu_read_unlock();
1445}
1446
1447/* Must be protected by RTNL. */
1448static void nbp_vlan_set_vlan_dev_state(struct net_bridge_port *p, u16 vid)
1449{
1450	struct net_device *vlan_dev;
1451
1452	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1453		return;
1454
1455	vlan_dev = br_vlan_get_upper_bind_vlan_dev(p->br->dev, vid);
1456	if (vlan_dev)
1457		br_vlan_set_vlan_dev_state(p->br, vlan_dev);
1458}
1459
1460/* Must be protected by RTNL. */
1461int br_vlan_bridge_event(struct net_device *dev, unsigned long event, void *ptr)
1462{
1463	struct netdev_notifier_changeupper_info *info;
1464	struct net_bridge *br = netdev_priv(dev);
1465	bool changed;
1466	int ret = 0;
1467
1468	switch (event) {
1469	case NETDEV_REGISTER:
1470		ret = br_vlan_add(br, br->default_pvid,
1471				  BRIDGE_VLAN_INFO_PVID |
1472				  BRIDGE_VLAN_INFO_UNTAGGED |
1473				  BRIDGE_VLAN_INFO_BRENTRY, &changed, NULL);
1474		break;
1475	case NETDEV_UNREGISTER:
1476		br_vlan_delete(br, br->default_pvid);
1477		break;
1478	case NETDEV_CHANGEUPPER:
1479		info = ptr;
1480		br_vlan_upper_change(dev, info->upper_dev, info->linking);
1481		break;
1482
1483	case NETDEV_CHANGE:
1484	case NETDEV_UP:
1485		if (!br_opt_get(br, BROPT_VLAN_BRIDGE_BINDING))
1486			break;
1487		br_vlan_link_state_change(dev, br);
1488		break;
1489	}
1490
1491	return ret;
1492}
1493
1494/* Must be protected by RTNL. */
1495void br_vlan_port_event(struct net_bridge_port *p, unsigned long event)
1496{
1497	if (!br_opt_get(p->br, BROPT_VLAN_BRIDGE_BINDING))
1498		return;
1499
1500	switch (event) {
1501	case NETDEV_CHANGE:
1502	case NETDEV_DOWN:
1503	case NETDEV_UP:
1504		br_vlan_set_all_vlan_dev_state(p);
1505		break;
1506	}
1507}
v4.10.11
 
   1#include <linux/kernel.h>
   2#include <linux/netdevice.h>
   3#include <linux/rtnetlink.h>
   4#include <linux/slab.h>
   5#include <net/switchdev.h>
   6
   7#include "br_private.h"
 
 
 
   8
   9static inline int br_vlan_cmp(struct rhashtable_compare_arg *arg,
  10			      const void *ptr)
  11{
  12	const struct net_bridge_vlan *vle = ptr;
  13	u16 vid = *(u16 *)arg->key;
  14
  15	return vle->vid != vid;
  16}
  17
  18static const struct rhashtable_params br_vlan_rht_params = {
  19	.head_offset = offsetof(struct net_bridge_vlan, vnode),
  20	.key_offset = offsetof(struct net_bridge_vlan, vid),
  21	.key_len = sizeof(u16),
  22	.nelem_hint = 3,
  23	.locks_mul = 1,
  24	.max_size = VLAN_N_VID,
  25	.obj_cmpfn = br_vlan_cmp,
  26	.automatic_shrinking = true,
  27};
  28
  29static struct net_bridge_vlan *br_vlan_lookup(struct rhashtable *tbl, u16 vid)
  30{
  31	return rhashtable_lookup_fast(tbl, &vid, br_vlan_rht_params);
  32}
  33
  34static void __vlan_add_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  35{
  36	if (vg->pvid == vid)
  37		return;
  38
  39	smp_wmb();
  40	vg->pvid = vid;
 
 
  41}
  42
  43static void __vlan_delete_pvid(struct net_bridge_vlan_group *vg, u16 vid)
  44{
  45	if (vg->pvid != vid)
  46		return;
  47
  48	smp_wmb();
  49	vg->pvid = 0;
 
 
  50}
  51
  52static void __vlan_add_flags(struct net_bridge_vlan *v, u16 flags)
 
  53{
  54	struct net_bridge_vlan_group *vg;
 
 
  55
  56	if (br_vlan_is_master(v))
  57		vg = br_vlan_group(v->br);
  58	else
  59		vg = nbp_vlan_group(v->port);
  60
  61	if (flags & BRIDGE_VLAN_INFO_PVID)
  62		__vlan_add_pvid(vg, v->vid);
  63	else
  64		__vlan_delete_pvid(vg, v->vid);
  65
  66	if (flags & BRIDGE_VLAN_INFO_UNTAGGED)
  67		v->flags |= BRIDGE_VLAN_INFO_UNTAGGED;
  68	else
  69		v->flags &= ~BRIDGE_VLAN_INFO_UNTAGGED;
 
 
  70}
  71
  72static int __vlan_vid_add(struct net_device *dev, struct net_bridge *br,
  73			  u16 vid, u16 flags)
 
  74{
  75	struct switchdev_obj_port_vlan v = {
  76		.obj.orig_dev = dev,
  77		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
  78		.flags = flags,
  79		.vid_begin = vid,
  80		.vid_end = vid,
  81	};
  82	int err;
  83
  84	/* Try switchdev op first. In case it is not supported, fallback to
  85	 * 8021q add.
  86	 */
  87	err = switchdev_port_obj_add(dev, &v.obj);
  88	if (err == -EOPNOTSUPP)
  89		return vlan_vid_add(dev, br->vlan_proto, vid);
 
  90	return err;
  91}
  92
  93static void __vlan_add_list(struct net_bridge_vlan *v)
  94{
  95	struct net_bridge_vlan_group *vg;
  96	struct list_head *headp, *hpos;
  97	struct net_bridge_vlan *vent;
  98
  99	if (br_vlan_is_master(v))
 100		vg = br_vlan_group(v->br);
 101	else
 102		vg = nbp_vlan_group(v->port);
 103
 104	headp = &vg->vlan_list;
 105	list_for_each_prev(hpos, headp) {
 106		vent = list_entry(hpos, struct net_bridge_vlan, vlist);
 107		if (v->vid < vent->vid)
 108			continue;
 109		else
 110			break;
 111	}
 112	list_add_rcu(&v->vlist, hpos);
 113}
 114
 115static void __vlan_del_list(struct net_bridge_vlan *v)
 116{
 117	list_del_rcu(&v->vlist);
 118}
 119
 120static int __vlan_vid_del(struct net_device *dev, struct net_bridge *br,
 121			  u16 vid)
 122{
 123	struct switchdev_obj_port_vlan v = {
 124		.obj.orig_dev = dev,
 125		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
 126		.vid_begin = vid,
 127		.vid_end = vid,
 128	};
 129	int err;
 130
 131	/* Try switchdev op first. In case it is not supported, fallback to
 132	 * 8021q del.
 133	 */
 134	err = switchdev_port_obj_del(dev, &v.obj);
 135	if (err == -EOPNOTSUPP) {
 136		vlan_vid_del(dev, br->vlan_proto, vid);
 137		return 0;
 138	}
 139	return err;
 140}
 141
 142/* Returns a master vlan, if it didn't exist it gets created. In all cases a
 143 * a reference is taken to the master vlan before returning.
 144 */
 145static struct net_bridge_vlan *br_vlan_get_master(struct net_bridge *br, u16 vid)
 
 
 146{
 147	struct net_bridge_vlan_group *vg;
 148	struct net_bridge_vlan *masterv;
 149
 150	vg = br_vlan_group(br);
 151	masterv = br_vlan_find(vg, vid);
 152	if (!masterv) {
 
 
 153		/* missing global ctx, create it now */
 154		if (br_vlan_add(br, vid, 0))
 155			return NULL;
 156		masterv = br_vlan_find(vg, vid);
 157		if (WARN_ON(!masterv))
 158			return NULL;
 
 
 159	}
 160	atomic_inc(&masterv->refcnt);
 161
 162	return masterv;
 163}
 164
 165static void br_master_vlan_rcu_free(struct rcu_head *rcu)
 166{
 167	struct net_bridge_vlan *v;
 168
 169	v = container_of(rcu, struct net_bridge_vlan, rcu);
 170	WARN_ON(!br_vlan_is_master(v));
 171	free_percpu(v->stats);
 172	v->stats = NULL;
 173	kfree(v);
 174}
 175
 176static void br_vlan_put_master(struct net_bridge_vlan *masterv)
 177{
 178	struct net_bridge_vlan_group *vg;
 179
 180	if (!br_vlan_is_master(masterv))
 181		return;
 182
 183	vg = br_vlan_group(masterv->br);
 184	if (atomic_dec_and_test(&masterv->refcnt)) {
 185		rhashtable_remove_fast(&vg->vlan_hash,
 186				       &masterv->vnode, br_vlan_rht_params);
 187		__vlan_del_list(masterv);
 188		call_rcu(&masterv->rcu, br_master_vlan_rcu_free);
 189	}
 190}
 191
 
 
 
 
 
 
 
 
 
 
 
 
 
 192/* This is the shared VLAN add function which works for both ports and bridge
 193 * devices. There are four possible calls to this function in terms of the
 194 * vlan entry type:
 195 * 1. vlan is being added on a port (no master flags, global entry exists)
 196 * 2. vlan is being added on a bridge (both master and brentry flags)
 197 * 3. vlan is being added on a port, but a global entry didn't exist which
 198 *    is being created right now (master flag set, brentry flag unset), the
 199 *    global entry is used for global per-vlan features, but not for filtering
 200 * 4. same as 3 but with both master and brentry flags set so the entry
 201 *    will be used for filtering in both the port and the bridge
 202 */
 203static int __vlan_add(struct net_bridge_vlan *v, u16 flags)
 
 204{
 205	struct net_bridge_vlan *masterv = NULL;
 206	struct net_bridge_port *p = NULL;
 207	struct net_bridge_vlan_group *vg;
 208	struct net_device *dev;
 209	struct net_bridge *br;
 210	int err;
 211
 212	if (br_vlan_is_master(v)) {
 213		br = v->br;
 214		dev = br->dev;
 215		vg = br_vlan_group(br);
 216	} else {
 217		p = v->port;
 218		br = p->br;
 219		dev = p->dev;
 220		vg = nbp_vlan_group(p);
 221	}
 222
 223	if (p) {
 224		/* Add VLAN to the device filter if it is supported.
 225		 * This ensures tagged traffic enters the bridge when
 226		 * promiscuous mode is disabled by br_manage_promisc().
 227		 */
 228		err = __vlan_vid_add(dev, br, v->vid, flags);
 229		if (err)
 230			goto out;
 231
 232		/* need to work on the master vlan too */
 233		if (flags & BRIDGE_VLAN_INFO_MASTER) {
 234			err = br_vlan_add(br, v->vid, flags |
 235						      BRIDGE_VLAN_INFO_BRENTRY);
 
 
 
 236			if (err)
 237				goto out_filt;
 238		}
 239
 240		masterv = br_vlan_get_master(br, v->vid);
 241		if (!masterv)
 242			goto out_filt;
 243		v->brvlan = masterv;
 244		v->stats = masterv->stats;
 
 
 
 
 
 
 
 
 
 
 
 
 
 245	}
 246
 247	/* Add the dev mac and count the vlan only if it's usable */
 248	if (br_vlan_should_use(v)) {
 249		err = br_fdb_insert(br, p, dev->dev_addr, v->vid);
 250		if (err) {
 251			br_err(br, "failed insert local address into bridge forwarding table\n");
 252			goto out_filt;
 253		}
 254		vg->num_vlans++;
 255	}
 256
 257	err = rhashtable_lookup_insert_fast(&vg->vlan_hash, &v->vnode,
 258					    br_vlan_rht_params);
 259	if (err)
 260		goto out_fdb_insert;
 261
 262	__vlan_add_list(v);
 263	__vlan_add_flags(v, flags);
 
 
 
 264out:
 265	return err;
 266
 267out_fdb_insert:
 268	if (br_vlan_should_use(v)) {
 269		br_fdb_find_delete_local(br, p, dev->dev_addr, v->vid);
 270		vg->num_vlans--;
 271	}
 272
 273out_filt:
 274	if (p) {
 275		__vlan_vid_del(dev, br, v->vid);
 276		if (masterv) {
 
 
 
 
 277			br_vlan_put_master(masterv);
 278			v->brvlan = NULL;
 279		}
 
 
 280	}
 281
 282	goto out;
 283}
 284
 285static int __vlan_del(struct net_bridge_vlan *v)
 286{
 287	struct net_bridge_vlan *masterv = v;
 288	struct net_bridge_vlan_group *vg;
 289	struct net_bridge_port *p = NULL;
 290	int err = 0;
 291
 292	if (br_vlan_is_master(v)) {
 293		vg = br_vlan_group(v->br);
 294	} else {
 295		p = v->port;
 296		vg = nbp_vlan_group(v->port);
 297		masterv = v->brvlan;
 298	}
 299
 300	__vlan_delete_pvid(vg, v->vid);
 301	if (p) {
 302		err = __vlan_vid_del(p->dev, p->br, v->vid);
 303		if (err)
 304			goto out;
 
 
 
 
 
 305	}
 306
 307	if (br_vlan_should_use(v)) {
 308		v->flags &= ~BRIDGE_VLAN_INFO_BRENTRY;
 309		vg->num_vlans--;
 310	}
 311
 312	if (masterv != v) {
 
 313		rhashtable_remove_fast(&vg->vlan_hash, &v->vnode,
 314				       br_vlan_rht_params);
 315		__vlan_del_list(v);
 316		kfree_rcu(v, rcu);
 
 317	}
 318
 319	br_vlan_put_master(masterv);
 320out:
 321	return err;
 322}
 323
 324static void __vlan_group_free(struct net_bridge_vlan_group *vg)
 325{
 326	WARN_ON(!list_empty(&vg->vlan_list));
 327	rhashtable_destroy(&vg->vlan_hash);
 
 328	kfree(vg);
 329}
 330
 331static void __vlan_flush(struct net_bridge_vlan_group *vg)
 332{
 333	struct net_bridge_vlan *vlan, *tmp;
 334
 335	__vlan_delete_pvid(vg, vg->pvid);
 336	list_for_each_entry_safe(vlan, tmp, &vg->vlan_list, vlist)
 337		__vlan_del(vlan);
 338}
 339
 340struct sk_buff *br_handle_vlan(struct net_bridge *br,
 
 341			       struct net_bridge_vlan_group *vg,
 342			       struct sk_buff *skb)
 343{
 344	struct br_vlan_stats *stats;
 345	struct net_bridge_vlan *v;
 346	u16 vid;
 347
 348	/* If this packet was not filtered at input, let it pass */
 349	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 350		goto out;
 351
 352	/* At this point, we know that the frame was filtered and contains
 353	 * a valid vlan id.  If the vlan id has untagged flag set,
 354	 * send untagged; otherwise, send tagged.
 355	 */
 356	br_vlan_get_tag(skb, &vid);
 357	v = br_vlan_find(vg, vid);
 358	/* Vlan entry must be configured at this point.  The
 359	 * only exception is the bridge is set in promisc mode and the
 360	 * packet is destined for the bridge device.  In this case
 361	 * pass the packet as is.
 362	 */
 363	if (!v || !br_vlan_should_use(v)) {
 364		if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
 365			goto out;
 366		} else {
 367			kfree_skb(skb);
 368			return NULL;
 369		}
 370	}
 371	if (br->vlan_stats_enabled) {
 372		stats = this_cpu_ptr(v->stats);
 373		u64_stats_update_begin(&stats->syncp);
 374		stats->tx_bytes += skb->len;
 375		stats->tx_packets++;
 376		u64_stats_update_end(&stats->syncp);
 377	}
 378
 379	if (v->flags & BRIDGE_VLAN_INFO_UNTAGGED)
 380		skb->vlan_tci = 0;
 
 
 
 
 
 
 381out:
 382	return skb;
 383}
 384
 385/* Called under RCU */
 386static bool __allowed_ingress(const struct net_bridge *br,
 387			      struct net_bridge_vlan_group *vg,
 388			      struct sk_buff *skb, u16 *vid)
 389{
 390	struct br_vlan_stats *stats;
 391	struct net_bridge_vlan *v;
 392	bool tagged;
 393
 394	BR_INPUT_SKB_CB(skb)->vlan_filtered = true;
 395	/* If vlan tx offload is disabled on bridge device and frame was
 396	 * sent from vlan device on the bridge device, it does not have
 397	 * HW accelerated vlan tag.
 398	 */
 399	if (unlikely(!skb_vlan_tag_present(skb) &&
 400		     skb->protocol == br->vlan_proto)) {
 401		skb = skb_vlan_untag(skb);
 402		if (unlikely(!skb))
 403			return false;
 404	}
 405
 406	if (!br_vlan_get_tag(skb, vid)) {
 407		/* Tagged frame */
 408		if (skb->vlan_proto != br->vlan_proto) {
 409			/* Protocol-mismatch, empty out vlan_tci for new tag */
 410			skb_push(skb, ETH_HLEN);
 411			skb = vlan_insert_tag_set_proto(skb, skb->vlan_proto,
 412							skb_vlan_tag_get(skb));
 413			if (unlikely(!skb))
 414				return false;
 415
 416			skb_pull(skb, ETH_HLEN);
 417			skb_reset_mac_len(skb);
 418			*vid = 0;
 419			tagged = false;
 420		} else {
 421			tagged = true;
 422		}
 423	} else {
 424		/* Untagged frame */
 425		tagged = false;
 426	}
 427
 428	if (!*vid) {
 429		u16 pvid = br_get_pvid(vg);
 430
 431		/* Frame had a tag with VID 0 or did not have a tag.
 432		 * See if pvid is set on this port.  That tells us which
 433		 * vlan untagged or priority-tagged traffic belongs to.
 434		 */
 435		if (!pvid)
 436			goto drop;
 437
 438		/* PVID is set on this port.  Any untagged or priority-tagged
 439		 * ingress frame is considered to belong to this vlan.
 440		 */
 441		*vid = pvid;
 442		if (likely(!tagged))
 443			/* Untagged Frame. */
 444			__vlan_hwaccel_put_tag(skb, br->vlan_proto, pvid);
 445		else
 446			/* Priority-tagged Frame.
 447			 * At this point, We know that skb->vlan_tci had
 448			 * VLAN_TAG_PRESENT bit and its VID field was 0x000.
 449			 * We update only VID field and preserve PCP field.
 450			 */
 451			skb->vlan_tci |= pvid;
 452
 453		/* if stats are disabled we can avoid the lookup */
 454		if (!br->vlan_stats_enabled)
 455			return true;
 456	}
 457	v = br_vlan_find(vg, *vid);
 458	if (!v || !br_vlan_should_use(v))
 459		goto drop;
 460
 461	if (br->vlan_stats_enabled) {
 462		stats = this_cpu_ptr(v->stats);
 463		u64_stats_update_begin(&stats->syncp);
 464		stats->rx_bytes += skb->len;
 465		stats->rx_packets++;
 466		u64_stats_update_end(&stats->syncp);
 467	}
 468
 469	return true;
 470
 471drop:
 472	kfree_skb(skb);
 473	return false;
 474}
 475
 476bool br_allowed_ingress(const struct net_bridge *br,
 477			struct net_bridge_vlan_group *vg, struct sk_buff *skb,
 478			u16 *vid)
 479{
 480	/* If VLAN filtering is disabled on the bridge, all packets are
 481	 * permitted.
 482	 */
 483	if (!br->vlan_enabled) {
 484		BR_INPUT_SKB_CB(skb)->vlan_filtered = false;
 485		return true;
 486	}
 487
 488	return __allowed_ingress(br, vg, skb, vid);
 489}
 490
 491/* Called under RCU. */
 492bool br_allowed_egress(struct net_bridge_vlan_group *vg,
 493		       const struct sk_buff *skb)
 494{
 495	const struct net_bridge_vlan *v;
 496	u16 vid;
 497
 498	/* If this packet was not filtered at input, let it pass */
 499	if (!BR_INPUT_SKB_CB(skb)->vlan_filtered)
 500		return true;
 501
 502	br_vlan_get_tag(skb, &vid);
 503	v = br_vlan_find(vg, vid);
 504	if (v && br_vlan_should_use(v))
 505		return true;
 506
 507	return false;
 508}
 509
 510/* Called under RCU */
 511bool br_should_learn(struct net_bridge_port *p, struct sk_buff *skb, u16 *vid)
 512{
 513	struct net_bridge_vlan_group *vg;
 514	struct net_bridge *br = p->br;
 515
 516	/* If filtering was disabled at input, let it pass. */
 517	if (!br->vlan_enabled)
 518		return true;
 519
 520	vg = nbp_vlan_group_rcu(p);
 521	if (!vg || !vg->num_vlans)
 522		return false;
 523
 524	if (!br_vlan_get_tag(skb, vid) && skb->vlan_proto != br->vlan_proto)
 525		*vid = 0;
 526
 527	if (!*vid) {
 528		*vid = br_get_pvid(vg);
 529		if (!*vid)
 530			return false;
 531
 532		return true;
 533	}
 534
 535	if (br_vlan_find(vg, *vid))
 536		return true;
 537
 538	return false;
 539}
 540
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 541/* Must be protected by RTNL.
 542 * Must be called with vid in range from 1 to 4094 inclusive.
 
 543 */
 544int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags)
 
 545{
 546	struct net_bridge_vlan_group *vg;
 547	struct net_bridge_vlan *vlan;
 548	int ret;
 549
 550	ASSERT_RTNL();
 551
 
 552	vg = br_vlan_group(br);
 553	vlan = br_vlan_find(vg, vid);
 554	if (vlan) {
 555		if (!br_vlan_is_brentry(vlan)) {
 556			/* Trying to change flags of non-existent bridge vlan */
 557			if (!(flags & BRIDGE_VLAN_INFO_BRENTRY))
 558				return -EINVAL;
 559			/* It was only kept for port vlans, now make it real */
 560			ret = br_fdb_insert(br, NULL, br->dev->dev_addr,
 561					    vlan->vid);
 562			if (ret) {
 563				br_err(br, "failed insert local address into bridge forwarding table\n");
 564				return ret;
 565			}
 566			atomic_inc(&vlan->refcnt);
 567			vlan->flags |= BRIDGE_VLAN_INFO_BRENTRY;
 568			vg->num_vlans++;
 569		}
 570		__vlan_add_flags(vlan, flags);
 571		return 0;
 572	}
 573
 574	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
 575	if (!vlan)
 576		return -ENOMEM;
 577
 578	vlan->stats = netdev_alloc_pcpu_stats(struct br_vlan_stats);
 579	if (!vlan->stats) {
 580		kfree(vlan);
 581		return -ENOMEM;
 582	}
 583	vlan->vid = vid;
 584	vlan->flags = flags | BRIDGE_VLAN_INFO_MASTER;
 585	vlan->flags &= ~BRIDGE_VLAN_INFO_PVID;
 586	vlan->br = br;
 587	if (flags & BRIDGE_VLAN_INFO_BRENTRY)
 588		atomic_set(&vlan->refcnt, 1);
 589	ret = __vlan_add(vlan, flags);
 590	if (ret) {
 591		free_percpu(vlan->stats);
 592		kfree(vlan);
 
 
 593	}
 594
 595	return ret;
 596}
 597
 598/* Must be protected by RTNL.
 599 * Must be called with vid in range from 1 to 4094 inclusive.
 600 */
 601int br_vlan_delete(struct net_bridge *br, u16 vid)
 602{
 603	struct net_bridge_vlan_group *vg;
 604	struct net_bridge_vlan *v;
 605
 606	ASSERT_RTNL();
 607
 608	vg = br_vlan_group(br);
 609	v = br_vlan_find(vg, vid);
 610	if (!v || !br_vlan_is_brentry(v))
 611		return -ENOENT;
 612
 613	br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
 614	br_fdb_delete_by_port(br, NULL, vid, 0);
 615
 
 
 616	return __vlan_del(v);
 617}
 618
 619void br_vlan_flush(struct net_bridge *br)
 620{
 621	struct net_bridge_vlan_group *vg;
 622
 623	ASSERT_RTNL();
 624
 625	vg = br_vlan_group(br);
 626	__vlan_flush(vg);
 627	RCU_INIT_POINTER(br->vlgrp, NULL);
 628	synchronize_rcu();
 629	__vlan_group_free(vg);
 630}
 631
 632struct net_bridge_vlan *br_vlan_find(struct net_bridge_vlan_group *vg, u16 vid)
 633{
 634	if (!vg)
 635		return NULL;
 636
 637	return br_vlan_lookup(&vg->vlan_hash, vid);
 638}
 639
 640/* Must be protected by RTNL. */
 641static void recalculate_group_addr(struct net_bridge *br)
 642{
 643	if (br->group_addr_set)
 644		return;
 645
 646	spin_lock_bh(&br->lock);
 647	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q)) {
 
 648		/* Bridge Group Address */
 649		br->group_addr[5] = 0x00;
 650	} else { /* vlan_enabled && ETH_P_8021AD */
 651		/* Provider Bridge Group Address */
 652		br->group_addr[5] = 0x08;
 653	}
 654	spin_unlock_bh(&br->lock);
 655}
 656
 657/* Must be protected by RTNL. */
 658void br_recalculate_fwd_mask(struct net_bridge *br)
 659{
 660	if (!br->vlan_enabled || br->vlan_proto == htons(ETH_P_8021Q))
 
 661		br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
 662	else /* vlan_enabled && ETH_P_8021AD */
 663		br->group_fwd_mask_required = BR_GROUPFWD_8021AD &
 664					      ~(1u << br->group_addr[5]);
 665}
 666
 667int __br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 668{
 669	struct switchdev_attr attr = {
 670		.orig_dev = br->dev,
 671		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
 672		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
 673		.u.vlan_filtering = val,
 674	};
 675	int err;
 676
 677	if (br->vlan_enabled == val)
 678		return 0;
 679
 680	err = switchdev_port_attr_set(br->dev, &attr);
 681	if (err && err != -EOPNOTSUPP)
 682		return err;
 683
 684	br->vlan_enabled = val;
 685	br_manage_promisc(br);
 686	recalculate_group_addr(br);
 687	br_recalculate_fwd_mask(br);
 688
 689	return 0;
 690}
 691
 692int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
 693{
 694	return __br_vlan_filter_toggle(br, val);
 695}
 696
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 697int __br_vlan_set_proto(struct net_bridge *br, __be16 proto)
 698{
 699	int err = 0;
 700	struct net_bridge_port *p;
 701	struct net_bridge_vlan *vlan;
 702	struct net_bridge_vlan_group *vg;
 703	__be16 oldproto;
 704
 705	if (br->vlan_proto == proto)
 706		return 0;
 707
 708	/* Add VLANs for the new proto to the device filter. */
 709	list_for_each_entry(p, &br->port_list, list) {
 710		vg = nbp_vlan_group(p);
 711		list_for_each_entry(vlan, &vg->vlan_list, vlist) {
 712			err = vlan_vid_add(p->dev, proto, vlan->vid);
 713			if (err)
 714				goto err_filt;
 715		}
 716	}
 717
 718	oldproto = br->vlan_proto;
 719	br->vlan_proto = proto;
 720
 721	recalculate_group_addr(br);
 722	br_recalculate_fwd_mask(br);
 723
 724	/* Delete VLANs for the old proto from the device filter. */
 725	list_for_each_entry(p, &br->port_list, list) {
 726		vg = nbp_vlan_group(p);
 727		list_for_each_entry(vlan, &vg->vlan_list, vlist)
 728			vlan_vid_del(p->dev, oldproto, vlan->vid);
 729	}
 730
 731	return 0;
 732
 733err_filt:
 734	list_for_each_entry_continue_reverse(vlan, &vg->vlan_list, vlist)
 735		vlan_vid_del(p->dev, proto, vlan->vid);
 736
 737	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
 738		vg = nbp_vlan_group(p);
 739		list_for_each_entry(vlan, &vg->vlan_list, vlist)
 740			vlan_vid_del(p->dev, proto, vlan->vid);
 741	}
 742
 743	return err;
 744}
 745
 746int br_vlan_set_proto(struct net_bridge *br, unsigned long val)
 747{
 748	if (val != ETH_P_8021Q && val != ETH_P_8021AD)
 749		return -EPROTONOSUPPORT;
 750
 751	return __br_vlan_set_proto(br, htons(val));
 752}
 753
 754int br_vlan_set_stats(struct net_bridge *br, unsigned long val)
 755{
 756	switch (val) {
 757	case 0:
 758	case 1:
 759		br->vlan_stats_enabled = val;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 760		break;
 761	default:
 762		return -EINVAL;
 763	}
 764
 765	return 0;
 766}
 767
 768static bool vlan_default_pvid(struct net_bridge_vlan_group *vg, u16 vid)
 769{
 770	struct net_bridge_vlan *v;
 771
 772	if (vid != vg->pvid)
 773		return false;
 774
 775	v = br_vlan_lookup(&vg->vlan_hash, vid);
 776	if (v && br_vlan_should_use(v) &&
 777	    (v->flags & BRIDGE_VLAN_INFO_UNTAGGED))
 778		return true;
 779
 780	return false;
 781}
 782
 783static void br_vlan_disable_default_pvid(struct net_bridge *br)
 784{
 785	struct net_bridge_port *p;
 786	u16 pvid = br->default_pvid;
 787
 788	/* Disable default_pvid on all ports where it is still
 789	 * configured.
 790	 */
 791	if (vlan_default_pvid(br_vlan_group(br), pvid))
 792		br_vlan_delete(br, pvid);
 793
 794	list_for_each_entry(p, &br->port_list, list) {
 795		if (vlan_default_pvid(nbp_vlan_group(p), pvid))
 796			nbp_vlan_delete(p, pvid);
 797	}
 798
 799	br->default_pvid = 0;
 800}
 801
 802int __br_vlan_set_default_pvid(struct net_bridge *br, u16 pvid)
 
 803{
 804	const struct net_bridge_vlan *pvent;
 805	struct net_bridge_vlan_group *vg;
 806	struct net_bridge_port *p;
 
 
 807	u16 old_pvid;
 808	int err = 0;
 809	unsigned long *changed;
 810
 811	if (!pvid) {
 812		br_vlan_disable_default_pvid(br);
 813		return 0;
 814	}
 815
 816	changed = kcalloc(BITS_TO_LONGS(BR_MAX_PORTS), sizeof(unsigned long),
 817			  GFP_KERNEL);
 818	if (!changed)
 819		return -ENOMEM;
 820
 821	old_pvid = br->default_pvid;
 822
 823	/* Update default_pvid config only if we do not conflict with
 824	 * user configuration.
 825	 */
 826	vg = br_vlan_group(br);
 827	pvent = br_vlan_find(vg, pvid);
 828	if ((!old_pvid || vlan_default_pvid(vg, old_pvid)) &&
 829	    (!pvent || !br_vlan_should_use(pvent))) {
 830		err = br_vlan_add(br, pvid,
 831				  BRIDGE_VLAN_INFO_PVID |
 832				  BRIDGE_VLAN_INFO_UNTAGGED |
 833				  BRIDGE_VLAN_INFO_BRENTRY);
 
 834		if (err)
 835			goto out;
 836		br_vlan_delete(br, old_pvid);
 837		set_bit(0, changed);
 838	}
 839
 840	list_for_each_entry(p, &br->port_list, list) {
 841		/* Update default_pvid config only if we do not conflict with
 842		 * user configuration.
 843		 */
 844		vg = nbp_vlan_group(p);
 845		if ((old_pvid &&
 846		     !vlan_default_pvid(vg, old_pvid)) ||
 847		    br_vlan_find(vg, pvid))
 848			continue;
 849
 850		err = nbp_vlan_add(p, pvid,
 851				   BRIDGE_VLAN_INFO_PVID |
 852				   BRIDGE_VLAN_INFO_UNTAGGED);
 
 853		if (err)
 854			goto err_port;
 855		nbp_vlan_delete(p, old_pvid);
 856		set_bit(p->port_no, changed);
 857	}
 858
 859	br->default_pvid = pvid;
 860
 861out:
 862	kfree(changed);
 863	return err;
 864
 865err_port:
 866	list_for_each_entry_continue_reverse(p, &br->port_list, list) {
 867		if (!test_bit(p->port_no, changed))
 868			continue;
 869
 870		if (old_pvid)
 871			nbp_vlan_add(p, old_pvid,
 872				     BRIDGE_VLAN_INFO_PVID |
 873				     BRIDGE_VLAN_INFO_UNTAGGED);
 
 874		nbp_vlan_delete(p, pvid);
 875	}
 876
 877	if (test_bit(0, changed)) {
 878		if (old_pvid)
 879			br_vlan_add(br, old_pvid,
 880				    BRIDGE_VLAN_INFO_PVID |
 881				    BRIDGE_VLAN_INFO_UNTAGGED |
 882				    BRIDGE_VLAN_INFO_BRENTRY);
 
 883		br_vlan_delete(br, pvid);
 884	}
 885	goto out;
 886}
 887
 888int br_vlan_set_default_pvid(struct net_bridge *br, unsigned long val)
 889{
 890	u16 pvid = val;
 891	int err = 0;
 892
 893	if (val >= VLAN_VID_MASK)
 894		return -EINVAL;
 895
 896	if (pvid == br->default_pvid)
 897		goto out;
 898
 899	/* Only allow default pvid change when filtering is disabled */
 900	if (br->vlan_enabled) {
 901		pr_info_once("Please disable vlan filtering to change default_pvid\n");
 902		err = -EPERM;
 903		goto out;
 904	}
 905	err = __br_vlan_set_default_pvid(br, pvid);
 906out:
 907	return err;
 908}
 909
 910int br_vlan_init(struct net_bridge *br)
 911{
 912	struct net_bridge_vlan_group *vg;
 913	int ret = -ENOMEM;
 914
 915	vg = kzalloc(sizeof(*vg), GFP_KERNEL);
 916	if (!vg)
 917		goto out;
 918	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
 919	if (ret)
 920		goto err_rhtbl;
 
 
 
 921	INIT_LIST_HEAD(&vg->vlan_list);
 922	br->vlan_proto = htons(ETH_P_8021Q);
 923	br->default_pvid = 1;
 924	rcu_assign_pointer(br->vlgrp, vg);
 925	ret = br_vlan_add(br, 1,
 926			  BRIDGE_VLAN_INFO_PVID | BRIDGE_VLAN_INFO_UNTAGGED |
 927			  BRIDGE_VLAN_INFO_BRENTRY);
 928	if (ret)
 929		goto err_vlan_add;
 930
 931out:
 932	return ret;
 933
 934err_vlan_add:
 935	rhashtable_destroy(&vg->vlan_hash);
 936err_rhtbl:
 937	kfree(vg);
 938
 939	goto out;
 940}
 941
 942int nbp_vlan_init(struct net_bridge_port *p)
 943{
 944	struct switchdev_attr attr = {
 945		.orig_dev = p->br->dev,
 946		.id = SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING,
 947		.flags = SWITCHDEV_F_SKIP_EOPNOTSUPP,
 948		.u.vlan_filtering = p->br->vlan_enabled,
 949	};
 950	struct net_bridge_vlan_group *vg;
 951	int ret = -ENOMEM;
 952
 953	vg = kzalloc(sizeof(struct net_bridge_vlan_group), GFP_KERNEL);
 954	if (!vg)
 955		goto out;
 956
 957	ret = switchdev_port_attr_set(p->dev, &attr);
 958	if (ret && ret != -EOPNOTSUPP)
 959		goto err_vlan_enabled;
 960
 961	ret = rhashtable_init(&vg->vlan_hash, &br_vlan_rht_params);
 962	if (ret)
 963		goto err_rhtbl;
 
 
 
 964	INIT_LIST_HEAD(&vg->vlan_list);
 965	rcu_assign_pointer(p->vlgrp, vg);
 966	if (p->br->default_pvid) {
 
 
 967		ret = nbp_vlan_add(p, p->br->default_pvid,
 968				   BRIDGE_VLAN_INFO_PVID |
 969				   BRIDGE_VLAN_INFO_UNTAGGED);
 
 970		if (ret)
 971			goto err_vlan_add;
 972	}
 973out:
 974	return ret;
 975
 976err_vlan_add:
 977	RCU_INIT_POINTER(p->vlgrp, NULL);
 978	synchronize_rcu();
 
 
 979	rhashtable_destroy(&vg->vlan_hash);
 
 980err_vlan_enabled:
 981err_rhtbl:
 982	kfree(vg);
 983
 984	goto out;
 985}
 986
 987/* Must be protected by RTNL.
 988 * Must be called with vid in range from 1 to 4094 inclusive.
 
 989 */
 990int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
 
 991{
 992	struct switchdev_obj_port_vlan v = {
 993		.obj.orig_dev = port->dev,
 994		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
 995		.flags = flags,
 996		.vid_begin = vid,
 997		.vid_end = vid,
 998	};
 999	struct net_bridge_vlan *vlan;
1000	int ret;
1001
1002	ASSERT_RTNL();
1003
 
1004	vlan = br_vlan_find(nbp_vlan_group(port), vid);
1005	if (vlan) {
1006		/* Pass the flags to the hardware bridge */
1007		ret = switchdev_port_obj_add(port->dev, &v.obj);
1008		if (ret && ret != -EOPNOTSUPP)
1009			return ret;
1010		__vlan_add_flags(vlan, flags);
 
1011		return 0;
1012	}
1013
1014	vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
1015	if (!vlan)
1016		return -ENOMEM;
1017
1018	vlan->vid = vid;
1019	vlan->port = port;
1020	ret = __vlan_add(vlan, flags);
1021	if (ret)
1022		kfree(vlan);
 
 
1023
1024	return ret;
1025}
1026
1027/* Must be protected by RTNL.
1028 * Must be called with vid in range from 1 to 4094 inclusive.
1029 */
1030int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
1031{
1032	struct net_bridge_vlan *v;
1033
1034	ASSERT_RTNL();
1035
1036	v = br_vlan_find(nbp_vlan_group(port), vid);
1037	if (!v)
1038		return -ENOENT;
1039	br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
1040	br_fdb_delete_by_port(port->br, port, vid, 0);
1041
1042	return __vlan_del(v);
1043}
1044
1045void nbp_vlan_flush(struct net_bridge_port *port)
1046{
1047	struct net_bridge_vlan_group *vg;
1048
1049	ASSERT_RTNL();
1050
1051	vg = nbp_vlan_group(port);
1052	__vlan_flush(vg);
1053	RCU_INIT_POINTER(port->vlgrp, NULL);
1054	synchronize_rcu();
1055	__vlan_group_free(vg);
1056}
1057
1058void br_vlan_get_stats(const struct net_bridge_vlan *v,
1059		       struct br_vlan_stats *stats)
1060{
1061	int i;
1062
1063	memset(stats, 0, sizeof(*stats));
1064	for_each_possible_cpu(i) {
1065		u64 rxpackets, rxbytes, txpackets, txbytes;
1066		struct br_vlan_stats *cpu_stats;
1067		unsigned int start;
1068
1069		cpu_stats = per_cpu_ptr(v->stats, i);
1070		do {
1071			start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
1072			rxpackets = cpu_stats->rx_packets;
1073			rxbytes = cpu_stats->rx_bytes;
1074			txbytes = cpu_stats->tx_bytes;
1075			txpackets = cpu_stats->tx_packets;
1076		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
1077
1078		stats->rx_packets += rxpackets;
1079		stats->rx_bytes += rxbytes;
1080		stats->tx_bytes += txbytes;
1081		stats->tx_packets += txpackets;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1082	}
1083}