Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Handling of a single switch chip, part of a switch fabric
   4 *
   5 * Copyright (c) 2017 Savoir-faire Linux Inc.
   6 *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
   7 */
   8
   9#include <linux/if_bridge.h>
  10#include <linux/netdevice.h>
  11#include <linux/notifier.h>
  12#include <linux/if_vlan.h>
  13#include <net/switchdev.h>
  14
  15#include "dsa.h"
  16#include "netlink.h"
  17#include "port.h"
  18#include "slave.h"
  19#include "switch.h"
  20#include "tag_8021q.h"
 
 
  21
  22static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
  23						   unsigned int ageing_time)
  24{
  25	struct dsa_port *dp;
  26
  27	dsa_switch_for_each_port(dp, ds)
  28		if (dp->ageing_time && dp->ageing_time < ageing_time)
  29			ageing_time = dp->ageing_time;
  30
  31	return ageing_time;
  32}
  33
  34static int dsa_switch_ageing_time(struct dsa_switch *ds,
  35				  struct dsa_notifier_ageing_time_info *info)
  36{
  37	unsigned int ageing_time = info->ageing_time;
  38
  39	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
  40		return -ERANGE;
  41
  42	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
  43		return -ERANGE;
  44
  45	/* Program the fastest ageing time in case of multiple bridges */
  46	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
  47
  48	if (ds->ops->set_ageing_time)
  49		return ds->ops->set_ageing_time(ds, ageing_time);
  50
  51	return 0;
  52}
  53
  54static bool dsa_port_mtu_match(struct dsa_port *dp,
  55			       struct dsa_notifier_mtu_info *info)
  56{
  57	return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
  58}
  59
  60static int dsa_switch_mtu(struct dsa_switch *ds,
  61			  struct dsa_notifier_mtu_info *info)
  62{
  63	struct dsa_port *dp;
  64	int ret;
  65
  66	if (!ds->ops->port_change_mtu)
  67		return -EOPNOTSUPP;
  68
  69	dsa_switch_for_each_port(dp, ds) {
  70		if (dsa_port_mtu_match(dp, info)) {
  71			ret = ds->ops->port_change_mtu(ds, dp->index,
  72						       info->mtu);
  73			if (ret)
  74				return ret;
  75		}
  76	}
  77
  78	return 0;
  79}
  80
  81static int dsa_switch_bridge_join(struct dsa_switch *ds,
  82				  struct dsa_notifier_bridge_info *info)
  83{
  84	int err;
  85
  86	if (info->dp->ds == ds) {
  87		if (!ds->ops->port_bridge_join)
  88			return -EOPNOTSUPP;
  89
  90		err = ds->ops->port_bridge_join(ds, info->dp->index,
  91						info->bridge,
  92						&info->tx_fwd_offload,
  93						info->extack);
  94		if (err)
  95			return err;
  96	}
  97
  98	if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
  99		err = ds->ops->crosschip_bridge_join(ds,
 100						     info->dp->ds->dst->index,
 101						     info->dp->ds->index,
 102						     info->dp->index,
 103						     info->bridge,
 104						     info->extack);
 105		if (err)
 106			return err;
 107	}
 108
 109	return 0;
 110}
 111
 112static int dsa_switch_bridge_leave(struct dsa_switch *ds,
 113				   struct dsa_notifier_bridge_info *info)
 114{
 115	if (info->dp->ds == ds && ds->ops->port_bridge_leave)
 116		ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
 117
 118	if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
 119		ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
 120						info->dp->ds->index,
 121						info->dp->index,
 122						info->bridge);
 123
 124	return 0;
 125}
 126
 127/* Matches for all upstream-facing ports (the CPU port and all upstream-facing
 128 * DSA links) that sit between the targeted port on which the notifier was
 129 * emitted and its dedicated CPU port.
 130 */
 131static bool dsa_port_host_address_match(struct dsa_port *dp,
 132					const struct dsa_port *targeted_dp)
 133{
 134	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
 135
 136	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
 137		return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
 138						     cpu_dp->index);
 139
 140	return false;
 141}
 142
 143static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
 144					      const unsigned char *addr, u16 vid,
 145					      struct dsa_db db)
 146{
 147	struct dsa_mac_addr *a;
 148
 149	list_for_each_entry(a, addr_list, list)
 150		if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
 151		    dsa_db_equal(&a->db, &db))
 152			return a;
 153
 154	return NULL;
 155}
 156
 157static int dsa_port_do_mdb_add(struct dsa_port *dp,
 158			       const struct switchdev_obj_port_mdb *mdb,
 159			       struct dsa_db db)
 160{
 161	struct dsa_switch *ds = dp->ds;
 162	struct dsa_mac_addr *a;
 163	int port = dp->index;
 164	int err = 0;
 165
 166	/* No need to bother with refcounting for user ports */
 167	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
 168		return ds->ops->port_mdb_add(ds, port, mdb, db);
 
 
 
 
 169
 170	mutex_lock(&dp->addr_lists_lock);
 171
 172	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
 173	if (a) {
 174		refcount_inc(&a->refcount);
 
 
 175		goto out;
 176	}
 177
 178	a = kzalloc(sizeof(*a), GFP_KERNEL);
 179	if (!a) {
 180		err = -ENOMEM;
 181		goto out;
 182	}
 183
 184	err = ds->ops->port_mdb_add(ds, port, mdb, db);
 
 185	if (err) {
 186		kfree(a);
 187		goto out;
 188	}
 189
 190	ether_addr_copy(a->addr, mdb->addr);
 191	a->vid = mdb->vid;
 192	a->db = db;
 193	refcount_set(&a->refcount, 1);
 194	list_add_tail(&a->list, &dp->mdbs);
 195
 196out:
 197	mutex_unlock(&dp->addr_lists_lock);
 198
 199	return err;
 200}
 201
 202static int dsa_port_do_mdb_del(struct dsa_port *dp,
 203			       const struct switchdev_obj_port_mdb *mdb,
 204			       struct dsa_db db)
 205{
 206	struct dsa_switch *ds = dp->ds;
 207	struct dsa_mac_addr *a;
 208	int port = dp->index;
 209	int err = 0;
 210
 211	/* No need to bother with refcounting for user ports */
 212	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
 213		return ds->ops->port_mdb_del(ds, port, mdb, db);
 
 
 
 
 214
 215	mutex_lock(&dp->addr_lists_lock);
 216
 217	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
 218	if (!a) {
 
 219		err = -ENOENT;
 220		goto out;
 221	}
 222
 223	if (!refcount_dec_and_test(&a->refcount))
 
 
 224		goto out;
 
 225
 226	err = ds->ops->port_mdb_del(ds, port, mdb, db);
 
 227	if (err) {
 228		refcount_set(&a->refcount, 1);
 229		goto out;
 230	}
 231
 232	list_del(&a->list);
 233	kfree(a);
 234
 235out:
 236	mutex_unlock(&dp->addr_lists_lock);
 237
 238	return err;
 239}
 240
 241static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
 242			       u16 vid, struct dsa_db db)
 243{
 244	struct dsa_switch *ds = dp->ds;
 245	struct dsa_mac_addr *a;
 246	int port = dp->index;
 247	int err = 0;
 248
 249	/* No need to bother with refcounting for user ports */
 250	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
 251		return ds->ops->port_fdb_add(ds, port, addr, vid, db);
 
 
 
 
 252
 253	mutex_lock(&dp->addr_lists_lock);
 254
 255	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
 256	if (a) {
 257		refcount_inc(&a->refcount);
 
 258		goto out;
 259	}
 260
 261	a = kzalloc(sizeof(*a), GFP_KERNEL);
 262	if (!a) {
 263		err = -ENOMEM;
 264		goto out;
 265	}
 266
 267	err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
 
 268	if (err) {
 269		kfree(a);
 270		goto out;
 271	}
 272
 273	ether_addr_copy(a->addr, addr);
 274	a->vid = vid;
 275	a->db = db;
 276	refcount_set(&a->refcount, 1);
 277	list_add_tail(&a->list, &dp->fdbs);
 278
 279out:
 280	mutex_unlock(&dp->addr_lists_lock);
 281
 282	return err;
 283}
 284
 285static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
 286			       u16 vid, struct dsa_db db)
 287{
 288	struct dsa_switch *ds = dp->ds;
 289	struct dsa_mac_addr *a;
 290	int port = dp->index;
 291	int err = 0;
 292
 293	/* No need to bother with refcounting for user ports */
 294	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
 295		return ds->ops->port_fdb_del(ds, port, addr, vid, db);
 
 
 
 
 296
 297	mutex_lock(&dp->addr_lists_lock);
 298
 299	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
 300	if (!a) {
 
 301		err = -ENOENT;
 302		goto out;
 303	}
 304
 305	if (!refcount_dec_and_test(&a->refcount))
 
 306		goto out;
 
 307
 308	err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
 
 309	if (err) {
 310		refcount_set(&a->refcount, 1);
 311		goto out;
 312	}
 313
 314	list_del(&a->list);
 315	kfree(a);
 316
 317out:
 318	mutex_unlock(&dp->addr_lists_lock);
 319
 320	return err;
 321}
 322
 323static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
 324				     const unsigned char *addr, u16 vid,
 325				     struct dsa_db db)
 326{
 327	struct dsa_mac_addr *a;
 328	int err = 0;
 329
 330	mutex_lock(&lag->fdb_lock);
 331
 332	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
 333	if (a) {
 334		refcount_inc(&a->refcount);
 
 
 335		goto out;
 336	}
 337
 338	a = kzalloc(sizeof(*a), GFP_KERNEL);
 339	if (!a) {
 340		err = -ENOMEM;
 341		goto out;
 342	}
 343
 344	err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
 
 345	if (err) {
 346		kfree(a);
 347		goto out;
 348	}
 349
 350	ether_addr_copy(a->addr, addr);
 351	a->vid = vid;
 352	a->db = db;
 353	refcount_set(&a->refcount, 1);
 354	list_add_tail(&a->list, &lag->fdbs);
 355
 356out:
 357	mutex_unlock(&lag->fdb_lock);
 358
 359	return err;
 360}
 361
 362static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
 363				     const unsigned char *addr, u16 vid,
 364				     struct dsa_db db)
 365{
 366	struct dsa_mac_addr *a;
 367	int err = 0;
 368
 369	mutex_lock(&lag->fdb_lock);
 370
 371	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
 372	if (!a) {
 
 373		err = -ENOENT;
 374		goto out;
 375	}
 376
 377	if (!refcount_dec_and_test(&a->refcount))
 
 
 378		goto out;
 
 379
 380	err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
 
 381	if (err) {
 382		refcount_set(&a->refcount, 1);
 383		goto out;
 384	}
 385
 386	list_del(&a->list);
 387	kfree(a);
 388
 389out:
 390	mutex_unlock(&lag->fdb_lock);
 391
 392	return err;
 393}
 394
 395static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
 396				   struct dsa_notifier_fdb_info *info)
 397{
 398	struct dsa_port *dp;
 399	int err = 0;
 400
 401	if (!ds->ops->port_fdb_add)
 402		return -EOPNOTSUPP;
 403
 404	dsa_switch_for_each_port(dp, ds) {
 405		if (dsa_port_host_address_match(dp, info->dp)) {
 406			if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
 407				err = dsa_switch_do_lag_fdb_add(ds, dp->lag,
 408								info->addr,
 409								info->vid,
 410								info->db);
 411			} else {
 412				err = dsa_port_do_fdb_add(dp, info->addr,
 413							  info->vid, info->db);
 414			}
 415			if (err)
 416				break;
 417		}
 418	}
 419
 420	return err;
 421}
 422
 423static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
 424				   struct dsa_notifier_fdb_info *info)
 425{
 426	struct dsa_port *dp;
 427	int err = 0;
 428
 429	if (!ds->ops->port_fdb_del)
 430		return -EOPNOTSUPP;
 431
 432	dsa_switch_for_each_port(dp, ds) {
 433		if (dsa_port_host_address_match(dp, info->dp)) {
 434			if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
 435				err = dsa_switch_do_lag_fdb_del(ds, dp->lag,
 436								info->addr,
 437								info->vid,
 438								info->db);
 439			} else {
 440				err = dsa_port_do_fdb_del(dp, info->addr,
 441							  info->vid, info->db);
 442			}
 443			if (err)
 444				break;
 445		}
 446	}
 447
 448	return err;
 449}
 450
 451static int dsa_switch_fdb_add(struct dsa_switch *ds,
 452			      struct dsa_notifier_fdb_info *info)
 453{
 454	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 455	struct dsa_port *dp = dsa_to_port(ds, port);
 456
 457	if (!ds->ops->port_fdb_add)
 458		return -EOPNOTSUPP;
 459
 460	return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
 461}
 462
 463static int dsa_switch_fdb_del(struct dsa_switch *ds,
 464			      struct dsa_notifier_fdb_info *info)
 465{
 466	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 467	struct dsa_port *dp = dsa_to_port(ds, port);
 468
 469	if (!ds->ops->port_fdb_del)
 470		return -EOPNOTSUPP;
 471
 472	return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
 473}
 474
 475static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
 476				  struct dsa_notifier_lag_fdb_info *info)
 477{
 478	struct dsa_port *dp;
 479
 480	if (!ds->ops->lag_fdb_add)
 481		return -EOPNOTSUPP;
 482
 483	/* Notify switch only if it has a port in this LAG */
 484	dsa_switch_for_each_port(dp, ds)
 485		if (dsa_port_offloads_lag(dp, info->lag))
 486			return dsa_switch_do_lag_fdb_add(ds, info->lag,
 487							 info->addr, info->vid,
 488							 info->db);
 489
 490	return 0;
 491}
 492
 493static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
 494				  struct dsa_notifier_lag_fdb_info *info)
 495{
 496	struct dsa_port *dp;
 497
 498	if (!ds->ops->lag_fdb_del)
 499		return -EOPNOTSUPP;
 500
 501	/* Notify switch only if it has a port in this LAG */
 502	dsa_switch_for_each_port(dp, ds)
 503		if (dsa_port_offloads_lag(dp, info->lag))
 504			return dsa_switch_do_lag_fdb_del(ds, info->lag,
 505							 info->addr, info->vid,
 506							 info->db);
 507
 508	return 0;
 509}
 510
 511static int dsa_switch_lag_change(struct dsa_switch *ds,
 512				 struct dsa_notifier_lag_info *info)
 513{
 514	if (info->dp->ds == ds && ds->ops->port_lag_change)
 515		return ds->ops->port_lag_change(ds, info->dp->index);
 516
 517	if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
 518		return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
 519						     info->dp->index);
 520
 521	return 0;
 522}
 523
 524static int dsa_switch_lag_join(struct dsa_switch *ds,
 525			       struct dsa_notifier_lag_info *info)
 526{
 527	if (info->dp->ds == ds && ds->ops->port_lag_join)
 528		return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
 529					      info->info, info->extack);
 530
 531	if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
 532		return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
 533						   info->dp->index, info->lag,
 534						   info->info, info->extack);
 535
 536	return -EOPNOTSUPP;
 537}
 538
 539static int dsa_switch_lag_leave(struct dsa_switch *ds,
 540				struct dsa_notifier_lag_info *info)
 541{
 542	if (info->dp->ds == ds && ds->ops->port_lag_leave)
 543		return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
 544
 545	if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
 546		return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
 547						    info->dp->index, info->lag);
 548
 549	return -EOPNOTSUPP;
 550}
 551
 552static int dsa_switch_mdb_add(struct dsa_switch *ds,
 553			      struct dsa_notifier_mdb_info *info)
 554{
 555	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 556	struct dsa_port *dp = dsa_to_port(ds, port);
 557
 558	if (!ds->ops->port_mdb_add)
 559		return -EOPNOTSUPP;
 560
 561	return dsa_port_do_mdb_add(dp, info->mdb, info->db);
 562}
 563
 564static int dsa_switch_mdb_del(struct dsa_switch *ds,
 565			      struct dsa_notifier_mdb_info *info)
 566{
 567	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 568	struct dsa_port *dp = dsa_to_port(ds, port);
 569
 570	if (!ds->ops->port_mdb_del)
 571		return -EOPNOTSUPP;
 572
 573	return dsa_port_do_mdb_del(dp, info->mdb, info->db);
 574}
 575
 576static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
 577				   struct dsa_notifier_mdb_info *info)
 578{
 579	struct dsa_port *dp;
 580	int err = 0;
 581
 582	if (!ds->ops->port_mdb_add)
 583		return -EOPNOTSUPP;
 584
 585	dsa_switch_for_each_port(dp, ds) {
 586		if (dsa_port_host_address_match(dp, info->dp)) {
 587			err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
 588			if (err)
 589				break;
 590		}
 591	}
 592
 593	return err;
 594}
 595
 596static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
 597				   struct dsa_notifier_mdb_info *info)
 598{
 599	struct dsa_port *dp;
 600	int err = 0;
 601
 602	if (!ds->ops->port_mdb_del)
 603		return -EOPNOTSUPP;
 604
 605	dsa_switch_for_each_port(dp, ds) {
 606		if (dsa_port_host_address_match(dp, info->dp)) {
 607			err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
 608			if (err)
 609				break;
 610		}
 611	}
 612
 613	return err;
 614}
 615
 616/* Port VLANs match on the targeted port and on all DSA ports */
 617static bool dsa_port_vlan_match(struct dsa_port *dp,
 618				struct dsa_notifier_vlan_info *info)
 619{
 620	return dsa_port_is_dsa(dp) || dp == info->dp;
 621}
 622
 623/* Host VLANs match on the targeted port's CPU port, and on all DSA ports
 624 * (upstream and downstream) of that switch and its upstream switches.
 625 */
 626static bool dsa_port_host_vlan_match(struct dsa_port *dp,
 627				     const struct dsa_port *targeted_dp)
 628{
 629	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
 630
 631	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
 632		return dsa_port_is_dsa(dp) || dp == cpu_dp;
 633
 634	return false;
 635}
 636
 637static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
 638				      const struct switchdev_obj_port_vlan *vlan)
 639{
 640	struct dsa_vlan *v;
 641
 642	list_for_each_entry(v, vlan_list, list)
 643		if (v->vid == vlan->vid)
 644			return v;
 645
 646	return NULL;
 647}
 648
 649static int dsa_port_do_vlan_add(struct dsa_port *dp,
 650				const struct switchdev_obj_port_vlan *vlan,
 651				struct netlink_ext_ack *extack)
 652{
 653	struct dsa_switch *ds = dp->ds;
 654	int port = dp->index;
 655	struct dsa_vlan *v;
 656	int err = 0;
 657
 658	/* No need to bother with refcounting for user ports. */
 659	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
 660		return ds->ops->port_vlan_add(ds, port, vlan, extack);
 
 
 
 
 661
 662	/* No need to propagate on shared ports the existing VLANs that were
 663	 * re-notified after just the flags have changed. This would cause a
 664	 * refcount bump which we need to avoid, since it unbalances the
 665	 * additions with the deletions.
 666	 */
 667	if (vlan->changed)
 668		return 0;
 669
 670	mutex_lock(&dp->vlans_lock);
 671
 672	v = dsa_vlan_find(&dp->vlans, vlan);
 673	if (v) {
 674		refcount_inc(&v->refcount);
 
 675		goto out;
 676	}
 677
 678	v = kzalloc(sizeof(*v), GFP_KERNEL);
 679	if (!v) {
 680		err = -ENOMEM;
 681		goto out;
 682	}
 683
 684	err = ds->ops->port_vlan_add(ds, port, vlan, extack);
 
 685	if (err) {
 686		kfree(v);
 687		goto out;
 688	}
 689
 690	v->vid = vlan->vid;
 691	refcount_set(&v->refcount, 1);
 692	list_add_tail(&v->list, &dp->vlans);
 693
 694out:
 695	mutex_unlock(&dp->vlans_lock);
 696
 697	return err;
 698}
 699
 700static int dsa_port_do_vlan_del(struct dsa_port *dp,
 701				const struct switchdev_obj_port_vlan *vlan)
 702{
 703	struct dsa_switch *ds = dp->ds;
 704	int port = dp->index;
 705	struct dsa_vlan *v;
 706	int err = 0;
 707
 708	/* No need to bother with refcounting for user ports */
 709	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
 710		return ds->ops->port_vlan_del(ds, port, vlan);
 
 
 
 
 711
 712	mutex_lock(&dp->vlans_lock);
 713
 714	v = dsa_vlan_find(&dp->vlans, vlan);
 715	if (!v) {
 
 716		err = -ENOENT;
 717		goto out;
 718	}
 719
 720	if (!refcount_dec_and_test(&v->refcount))
 
 721		goto out;
 
 722
 723	err = ds->ops->port_vlan_del(ds, port, vlan);
 
 724	if (err) {
 725		refcount_set(&v->refcount, 1);
 726		goto out;
 727	}
 728
 729	list_del(&v->list);
 730	kfree(v);
 731
 732out:
 733	mutex_unlock(&dp->vlans_lock);
 734
 735	return err;
 736}
 737
 738static int dsa_switch_vlan_add(struct dsa_switch *ds,
 739			       struct dsa_notifier_vlan_info *info)
 740{
 741	struct dsa_port *dp;
 742	int err;
 743
 744	if (!ds->ops->port_vlan_add)
 745		return -EOPNOTSUPP;
 746
 747	dsa_switch_for_each_port(dp, ds) {
 748		if (dsa_port_vlan_match(dp, info)) {
 749			err = dsa_port_do_vlan_add(dp, info->vlan,
 750						   info->extack);
 751			if (err)
 752				return err;
 753		}
 754	}
 755
 756	return 0;
 757}
 758
 759static int dsa_switch_vlan_del(struct dsa_switch *ds,
 760			       struct dsa_notifier_vlan_info *info)
 761{
 762	struct dsa_port *dp;
 763	int err;
 764
 765	if (!ds->ops->port_vlan_del)
 766		return -EOPNOTSUPP;
 767
 768	dsa_switch_for_each_port(dp, ds) {
 769		if (dsa_port_vlan_match(dp, info)) {
 770			err = dsa_port_do_vlan_del(dp, info->vlan);
 771			if (err)
 772				return err;
 773		}
 774	}
 775
 776	return 0;
 777}
 778
 779static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
 780				    struct dsa_notifier_vlan_info *info)
 781{
 782	struct dsa_port *dp;
 783	int err;
 784
 785	if (!ds->ops->port_vlan_add)
 786		return -EOPNOTSUPP;
 787
 788	dsa_switch_for_each_port(dp, ds) {
 789		if (dsa_port_host_vlan_match(dp, info->dp)) {
 790			err = dsa_port_do_vlan_add(dp, info->vlan,
 791						   info->extack);
 792			if (err)
 793				return err;
 794		}
 795	}
 796
 797	return 0;
 798}
 799
 800static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
 801				    struct dsa_notifier_vlan_info *info)
 802{
 803	struct dsa_port *dp;
 804	int err;
 805
 806	if (!ds->ops->port_vlan_del)
 807		return -EOPNOTSUPP;
 808
 809	dsa_switch_for_each_port(dp, ds) {
 810		if (dsa_port_host_vlan_match(dp, info->dp)) {
 811			err = dsa_port_do_vlan_del(dp, info->vlan);
 812			if (err)
 813				return err;
 814		}
 815	}
 816
 817	return 0;
 818}
 819
 820static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
 821				       struct dsa_notifier_tag_proto_info *info)
 822{
 823	const struct dsa_device_ops *tag_ops = info->tag_ops;
 824	struct dsa_port *dp, *cpu_dp;
 825	int err;
 826
 827	if (!ds->ops->change_tag_protocol)
 828		return -EOPNOTSUPP;
 829
 830	ASSERT_RTNL();
 831
 832	err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
 833	if (err)
 834		return err;
 835
 836	dsa_switch_for_each_cpu_port(cpu_dp, ds)
 837		dsa_port_set_tag_protocol(cpu_dp, tag_ops);
 838
 839	/* Now that changing the tag protocol can no longer fail, let's update
 840	 * the remaining bits which are "duplicated for faster access", and the
 841	 * bits that depend on the tagger, such as the MTU.
 842	 */
 843	dsa_switch_for_each_user_port(dp, ds) {
 844		struct net_device *slave = dp->slave;
 845
 846		dsa_slave_setup_tagger(slave);
 847
 848		/* rtnl_mutex is held in dsa_tree_change_tag_proto */
 849		dsa_slave_change_mtu(slave, slave->mtu);
 850	}
 851
 852	return 0;
 853}
 854
 855/* We use the same cross-chip notifiers to inform both the tagger side, as well
 856 * as the switch side, of connection and disconnection events.
 857 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
 858 * switch side doesn't support connecting to this tagger, and therefore, the
 859 * fact that we don't disconnect the tagger side doesn't constitute a memory
 860 * leak: the tagger will still operate with persistent per-switch memory, just
 861 * with the switch side unconnected to it. What does constitute a hard error is
 862 * when the switch side supports connecting but fails.
 863 */
 864static int
 865dsa_switch_connect_tag_proto(struct dsa_switch *ds,
 866			     struct dsa_notifier_tag_proto_info *info)
 867{
 868	const struct dsa_device_ops *tag_ops = info->tag_ops;
 869	int err;
 870
 871	/* Notify the new tagger about the connection to this switch */
 872	if (tag_ops->connect) {
 873		err = tag_ops->connect(ds);
 874		if (err)
 875			return err;
 876	}
 877
 878	if (!ds->ops->connect_tag_protocol)
 879		return -EOPNOTSUPP;
 880
 881	/* Notify the switch about the connection to the new tagger */
 882	err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
 883	if (err) {
 884		/* Revert the new tagger's connection to this tree */
 885		if (tag_ops->disconnect)
 886			tag_ops->disconnect(ds);
 887		return err;
 888	}
 889
 890	return 0;
 891}
 892
 893static int
 894dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
 895				struct dsa_notifier_tag_proto_info *info)
 896{
 897	const struct dsa_device_ops *tag_ops = info->tag_ops;
 898
 899	/* Notify the tagger about the disconnection from this switch */
 900	if (tag_ops->disconnect && ds->tagger_data)
 901		tag_ops->disconnect(ds);
 902
 903	/* No need to notify the switch, since it shouldn't have any
 904	 * resources to tear down
 905	 */
 906	return 0;
 907}
 908
 909static int
 910dsa_switch_master_state_change(struct dsa_switch *ds,
 911			       struct dsa_notifier_master_state_info *info)
 912{
 913	if (!ds->ops->master_state_change)
 914		return 0;
 915
 916	ds->ops->master_state_change(ds, info->master, info->operational);
 917
 918	return 0;
 919}
 920
 921static int dsa_switch_event(struct notifier_block *nb,
 922			    unsigned long event, void *info)
 923{
 924	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
 925	int err;
 926
 927	switch (event) {
 928	case DSA_NOTIFIER_AGEING_TIME:
 929		err = dsa_switch_ageing_time(ds, info);
 930		break;
 931	case DSA_NOTIFIER_BRIDGE_JOIN:
 932		err = dsa_switch_bridge_join(ds, info);
 933		break;
 934	case DSA_NOTIFIER_BRIDGE_LEAVE:
 935		err = dsa_switch_bridge_leave(ds, info);
 936		break;
 937	case DSA_NOTIFIER_FDB_ADD:
 938		err = dsa_switch_fdb_add(ds, info);
 939		break;
 940	case DSA_NOTIFIER_FDB_DEL:
 941		err = dsa_switch_fdb_del(ds, info);
 942		break;
 943	case DSA_NOTIFIER_HOST_FDB_ADD:
 944		err = dsa_switch_host_fdb_add(ds, info);
 945		break;
 946	case DSA_NOTIFIER_HOST_FDB_DEL:
 947		err = dsa_switch_host_fdb_del(ds, info);
 948		break;
 949	case DSA_NOTIFIER_LAG_FDB_ADD:
 950		err = dsa_switch_lag_fdb_add(ds, info);
 951		break;
 952	case DSA_NOTIFIER_LAG_FDB_DEL:
 953		err = dsa_switch_lag_fdb_del(ds, info);
 954		break;
 955	case DSA_NOTIFIER_LAG_CHANGE:
 956		err = dsa_switch_lag_change(ds, info);
 957		break;
 958	case DSA_NOTIFIER_LAG_JOIN:
 959		err = dsa_switch_lag_join(ds, info);
 960		break;
 961	case DSA_NOTIFIER_LAG_LEAVE:
 962		err = dsa_switch_lag_leave(ds, info);
 963		break;
 964	case DSA_NOTIFIER_MDB_ADD:
 965		err = dsa_switch_mdb_add(ds, info);
 966		break;
 967	case DSA_NOTIFIER_MDB_DEL:
 968		err = dsa_switch_mdb_del(ds, info);
 969		break;
 970	case DSA_NOTIFIER_HOST_MDB_ADD:
 971		err = dsa_switch_host_mdb_add(ds, info);
 972		break;
 973	case DSA_NOTIFIER_HOST_MDB_DEL:
 974		err = dsa_switch_host_mdb_del(ds, info);
 975		break;
 976	case DSA_NOTIFIER_VLAN_ADD:
 977		err = dsa_switch_vlan_add(ds, info);
 978		break;
 979	case DSA_NOTIFIER_VLAN_DEL:
 980		err = dsa_switch_vlan_del(ds, info);
 981		break;
 982	case DSA_NOTIFIER_HOST_VLAN_ADD:
 983		err = dsa_switch_host_vlan_add(ds, info);
 984		break;
 985	case DSA_NOTIFIER_HOST_VLAN_DEL:
 986		err = dsa_switch_host_vlan_del(ds, info);
 987		break;
 988	case DSA_NOTIFIER_MTU:
 989		err = dsa_switch_mtu(ds, info);
 990		break;
 991	case DSA_NOTIFIER_TAG_PROTO:
 992		err = dsa_switch_change_tag_proto(ds, info);
 993		break;
 994	case DSA_NOTIFIER_TAG_PROTO_CONNECT:
 995		err = dsa_switch_connect_tag_proto(ds, info);
 996		break;
 997	case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
 998		err = dsa_switch_disconnect_tag_proto(ds, info);
 999		break;
1000	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
1001		err = dsa_switch_tag_8021q_vlan_add(ds, info);
1002		break;
1003	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
1004		err = dsa_switch_tag_8021q_vlan_del(ds, info);
1005		break;
1006	case DSA_NOTIFIER_MASTER_STATE_CHANGE:
1007		err = dsa_switch_master_state_change(ds, info);
1008		break;
1009	default:
1010		err = -EOPNOTSUPP;
1011		break;
1012	}
1013
1014	if (err)
1015		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
1016			event, err);
1017
1018	return notifier_from_errno(err);
1019}
1020
1021/**
1022 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
1023 * @dst: collection of struct dsa_switch devices to notify.
1024 * @e: event, must be of type DSA_NOTIFIER_*
1025 * @v: event-specific value.
1026 *
1027 * Given a struct dsa_switch_tree, this can be used to run a function once for
1028 * each member DSA switch. The other alternative of traversing the tree is only
1029 * through its ports list, which does not uniquely list the switches.
1030 */
1031int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
1032{
1033	struct raw_notifier_head *nh = &dst->nh;
1034	int err;
1035
1036	err = raw_notifier_call_chain(nh, e, v);
1037
1038	return notifier_to_errno(err);
1039}
1040
1041/**
1042 * dsa_broadcast - Notify all DSA trees in the system.
1043 * @e: event, must be of type DSA_NOTIFIER_*
1044 * @v: event-specific value.
1045 *
1046 * Can be used to notify the switching fabric of events such as cross-chip
1047 * bridging between disjoint trees (such as islands of tagger-compatible
1048 * switches bridged by an incompatible middle switch).
1049 *
1050 * WARNING: this function is not reliable during probe time, because probing
1051 * between trees is asynchronous and not all DSA trees might have probed.
1052 */
1053int dsa_broadcast(unsigned long e, void *v)
1054{
1055	struct dsa_switch_tree *dst;
1056	int err = 0;
1057
1058	list_for_each_entry(dst, &dsa_tree_list, list) {
1059		err = dsa_tree_notify(dst, e, v);
1060		if (err)
1061			break;
1062	}
1063
1064	return err;
1065}
1066
1067int dsa_switch_register_notifier(struct dsa_switch *ds)
1068{
1069	ds->nb.notifier_call = dsa_switch_event;
1070
1071	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1072}
1073
1074void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1075{
1076	int err;
1077
1078	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1079	if (err)
1080		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
1081}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Handling of a single switch chip, part of a switch fabric
   4 *
   5 * Copyright (c) 2017 Savoir-faire Linux Inc.
   6 *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
   7 */
   8
   9#include <linux/if_bridge.h>
  10#include <linux/netdevice.h>
  11#include <linux/notifier.h>
  12#include <linux/if_vlan.h>
  13#include <net/switchdev.h>
  14
  15#include "dsa.h"
  16#include "netlink.h"
  17#include "port.h"
 
  18#include "switch.h"
  19#include "tag_8021q.h"
  20#include "trace.h"
  21#include "user.h"
  22
  23static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
  24						   unsigned int ageing_time)
  25{
  26	struct dsa_port *dp;
  27
  28	dsa_switch_for_each_port(dp, ds)
  29		if (dp->ageing_time && dp->ageing_time < ageing_time)
  30			ageing_time = dp->ageing_time;
  31
  32	return ageing_time;
  33}
  34
  35static int dsa_switch_ageing_time(struct dsa_switch *ds,
  36				  struct dsa_notifier_ageing_time_info *info)
  37{
  38	unsigned int ageing_time = info->ageing_time;
  39
  40	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
  41		return -ERANGE;
  42
  43	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
  44		return -ERANGE;
  45
  46	/* Program the fastest ageing time in case of multiple bridges */
  47	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
  48
  49	if (ds->ops->set_ageing_time)
  50		return ds->ops->set_ageing_time(ds, ageing_time);
  51
  52	return 0;
  53}
  54
  55static bool dsa_port_mtu_match(struct dsa_port *dp,
  56			       struct dsa_notifier_mtu_info *info)
  57{
  58	return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
  59}
  60
  61static int dsa_switch_mtu(struct dsa_switch *ds,
  62			  struct dsa_notifier_mtu_info *info)
  63{
  64	struct dsa_port *dp;
  65	int ret;
  66
  67	if (!ds->ops->port_change_mtu)
  68		return -EOPNOTSUPP;
  69
  70	dsa_switch_for_each_port(dp, ds) {
  71		if (dsa_port_mtu_match(dp, info)) {
  72			ret = ds->ops->port_change_mtu(ds, dp->index,
  73						       info->mtu);
  74			if (ret)
  75				return ret;
  76		}
  77	}
  78
  79	return 0;
  80}
  81
  82static int dsa_switch_bridge_join(struct dsa_switch *ds,
  83				  struct dsa_notifier_bridge_info *info)
  84{
  85	int err;
  86
  87	if (info->dp->ds == ds) {
  88		if (!ds->ops->port_bridge_join)
  89			return -EOPNOTSUPP;
  90
  91		err = ds->ops->port_bridge_join(ds, info->dp->index,
  92						info->bridge,
  93						&info->tx_fwd_offload,
  94						info->extack);
  95		if (err)
  96			return err;
  97	}
  98
  99	if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
 100		err = ds->ops->crosschip_bridge_join(ds,
 101						     info->dp->ds->dst->index,
 102						     info->dp->ds->index,
 103						     info->dp->index,
 104						     info->bridge,
 105						     info->extack);
 106		if (err)
 107			return err;
 108	}
 109
 110	return 0;
 111}
 112
 113static int dsa_switch_bridge_leave(struct dsa_switch *ds,
 114				   struct dsa_notifier_bridge_info *info)
 115{
 116	if (info->dp->ds == ds && ds->ops->port_bridge_leave)
 117		ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
 118
 119	if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
 120		ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
 121						info->dp->ds->index,
 122						info->dp->index,
 123						info->bridge);
 124
 125	return 0;
 126}
 127
 128/* Matches for all upstream-facing ports (the CPU port and all upstream-facing
 129 * DSA links) that sit between the targeted port on which the notifier was
 130 * emitted and its dedicated CPU port.
 131 */
 132static bool dsa_port_host_address_match(struct dsa_port *dp,
 133					const struct dsa_port *targeted_dp)
 134{
 135	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
 136
 137	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
 138		return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
 139						     cpu_dp->index);
 140
 141	return false;
 142}
 143
 144static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
 145					      const unsigned char *addr, u16 vid,
 146					      struct dsa_db db)
 147{
 148	struct dsa_mac_addr *a;
 149
 150	list_for_each_entry(a, addr_list, list)
 151		if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
 152		    dsa_db_equal(&a->db, &db))
 153			return a;
 154
 155	return NULL;
 156}
 157
 158static int dsa_port_do_mdb_add(struct dsa_port *dp,
 159			       const struct switchdev_obj_port_mdb *mdb,
 160			       struct dsa_db db)
 161{
 162	struct dsa_switch *ds = dp->ds;
 163	struct dsa_mac_addr *a;
 164	int port = dp->index;
 165	int err = 0;
 166
 167	/* No need to bother with refcounting for user ports */
 168	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
 169		err = ds->ops->port_mdb_add(ds, port, mdb, db);
 170		trace_dsa_mdb_add_hw(dp, mdb->addr, mdb->vid, &db, err);
 171
 172		return err;
 173	}
 174
 175	mutex_lock(&dp->addr_lists_lock);
 176
 177	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
 178	if (a) {
 179		refcount_inc(&a->refcount);
 180		trace_dsa_mdb_add_bump(dp, mdb->addr, mdb->vid, &db,
 181				       &a->refcount);
 182		goto out;
 183	}
 184
 185	a = kzalloc(sizeof(*a), GFP_KERNEL);
 186	if (!a) {
 187		err = -ENOMEM;
 188		goto out;
 189	}
 190
 191	err = ds->ops->port_mdb_add(ds, port, mdb, db);
 192	trace_dsa_mdb_add_hw(dp, mdb->addr, mdb->vid, &db, err);
 193	if (err) {
 194		kfree(a);
 195		goto out;
 196	}
 197
 198	ether_addr_copy(a->addr, mdb->addr);
 199	a->vid = mdb->vid;
 200	a->db = db;
 201	refcount_set(&a->refcount, 1);
 202	list_add_tail(&a->list, &dp->mdbs);
 203
 204out:
 205	mutex_unlock(&dp->addr_lists_lock);
 206
 207	return err;
 208}
 209
 210static int dsa_port_do_mdb_del(struct dsa_port *dp,
 211			       const struct switchdev_obj_port_mdb *mdb,
 212			       struct dsa_db db)
 213{
 214	struct dsa_switch *ds = dp->ds;
 215	struct dsa_mac_addr *a;
 216	int port = dp->index;
 217	int err = 0;
 218
 219	/* No need to bother with refcounting for user ports */
 220	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
 221		err = ds->ops->port_mdb_del(ds, port, mdb, db);
 222		trace_dsa_mdb_del_hw(dp, mdb->addr, mdb->vid, &db, err);
 223
 224		return err;
 225	}
 226
 227	mutex_lock(&dp->addr_lists_lock);
 228
 229	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
 230	if (!a) {
 231		trace_dsa_mdb_del_not_found(dp, mdb->addr, mdb->vid, &db);
 232		err = -ENOENT;
 233		goto out;
 234	}
 235
 236	if (!refcount_dec_and_test(&a->refcount)) {
 237		trace_dsa_mdb_del_drop(dp, mdb->addr, mdb->vid, &db,
 238				       &a->refcount);
 239		goto out;
 240	}
 241
 242	err = ds->ops->port_mdb_del(ds, port, mdb, db);
 243	trace_dsa_mdb_del_hw(dp, mdb->addr, mdb->vid, &db, err);
 244	if (err) {
 245		refcount_set(&a->refcount, 1);
 246		goto out;
 247	}
 248
 249	list_del(&a->list);
 250	kfree(a);
 251
 252out:
 253	mutex_unlock(&dp->addr_lists_lock);
 254
 255	return err;
 256}
 257
 258static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
 259			       u16 vid, struct dsa_db db)
 260{
 261	struct dsa_switch *ds = dp->ds;
 262	struct dsa_mac_addr *a;
 263	int port = dp->index;
 264	int err = 0;
 265
 266	/* No need to bother with refcounting for user ports */
 267	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
 268		err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
 269		trace_dsa_fdb_add_hw(dp, addr, vid, &db, err);
 270
 271		return err;
 272	}
 273
 274	mutex_lock(&dp->addr_lists_lock);
 275
 276	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
 277	if (a) {
 278		refcount_inc(&a->refcount);
 279		trace_dsa_fdb_add_bump(dp, addr, vid, &db, &a->refcount);
 280		goto out;
 281	}
 282
 283	a = kzalloc(sizeof(*a), GFP_KERNEL);
 284	if (!a) {
 285		err = -ENOMEM;
 286		goto out;
 287	}
 288
 289	err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
 290	trace_dsa_fdb_add_hw(dp, addr, vid, &db, err);
 291	if (err) {
 292		kfree(a);
 293		goto out;
 294	}
 295
 296	ether_addr_copy(a->addr, addr);
 297	a->vid = vid;
 298	a->db = db;
 299	refcount_set(&a->refcount, 1);
 300	list_add_tail(&a->list, &dp->fdbs);
 301
 302out:
 303	mutex_unlock(&dp->addr_lists_lock);
 304
 305	return err;
 306}
 307
 308static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
 309			       u16 vid, struct dsa_db db)
 310{
 311	struct dsa_switch *ds = dp->ds;
 312	struct dsa_mac_addr *a;
 313	int port = dp->index;
 314	int err = 0;
 315
 316	/* No need to bother with refcounting for user ports */
 317	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
 318		err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
 319		trace_dsa_fdb_del_hw(dp, addr, vid, &db, err);
 320
 321		return err;
 322	}
 323
 324	mutex_lock(&dp->addr_lists_lock);
 325
 326	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
 327	if (!a) {
 328		trace_dsa_fdb_del_not_found(dp, addr, vid, &db);
 329		err = -ENOENT;
 330		goto out;
 331	}
 332
 333	if (!refcount_dec_and_test(&a->refcount)) {
 334		trace_dsa_fdb_del_drop(dp, addr, vid, &db, &a->refcount);
 335		goto out;
 336	}
 337
 338	err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
 339	trace_dsa_fdb_del_hw(dp, addr, vid, &db, err);
 340	if (err) {
 341		refcount_set(&a->refcount, 1);
 342		goto out;
 343	}
 344
 345	list_del(&a->list);
 346	kfree(a);
 347
 348out:
 349	mutex_unlock(&dp->addr_lists_lock);
 350
 351	return err;
 352}
 353
 354static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
 355				     const unsigned char *addr, u16 vid,
 356				     struct dsa_db db)
 357{
 358	struct dsa_mac_addr *a;
 359	int err = 0;
 360
 361	mutex_lock(&lag->fdb_lock);
 362
 363	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
 364	if (a) {
 365		refcount_inc(&a->refcount);
 366		trace_dsa_lag_fdb_add_bump(lag->dev, addr, vid, &db,
 367					   &a->refcount);
 368		goto out;
 369	}
 370
 371	a = kzalloc(sizeof(*a), GFP_KERNEL);
 372	if (!a) {
 373		err = -ENOMEM;
 374		goto out;
 375	}
 376
 377	err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
 378	trace_dsa_lag_fdb_add_hw(lag->dev, addr, vid, &db, err);
 379	if (err) {
 380		kfree(a);
 381		goto out;
 382	}
 383
 384	ether_addr_copy(a->addr, addr);
 385	a->vid = vid;
 386	a->db = db;
 387	refcount_set(&a->refcount, 1);
 388	list_add_tail(&a->list, &lag->fdbs);
 389
 390out:
 391	mutex_unlock(&lag->fdb_lock);
 392
 393	return err;
 394}
 395
 396static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
 397				     const unsigned char *addr, u16 vid,
 398				     struct dsa_db db)
 399{
 400	struct dsa_mac_addr *a;
 401	int err = 0;
 402
 403	mutex_lock(&lag->fdb_lock);
 404
 405	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
 406	if (!a) {
 407		trace_dsa_lag_fdb_del_not_found(lag->dev, addr, vid, &db);
 408		err = -ENOENT;
 409		goto out;
 410	}
 411
 412	if (!refcount_dec_and_test(&a->refcount)) {
 413		trace_dsa_lag_fdb_del_drop(lag->dev, addr, vid, &db,
 414					   &a->refcount);
 415		goto out;
 416	}
 417
 418	err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
 419	trace_dsa_lag_fdb_del_hw(lag->dev, addr, vid, &db, err);
 420	if (err) {
 421		refcount_set(&a->refcount, 1);
 422		goto out;
 423	}
 424
 425	list_del(&a->list);
 426	kfree(a);
 427
 428out:
 429	mutex_unlock(&lag->fdb_lock);
 430
 431	return err;
 432}
 433
 434static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
 435				   struct dsa_notifier_fdb_info *info)
 436{
 437	struct dsa_port *dp;
 438	int err = 0;
 439
 440	if (!ds->ops->port_fdb_add)
 441		return -EOPNOTSUPP;
 442
 443	dsa_switch_for_each_port(dp, ds) {
 444		if (dsa_port_host_address_match(dp, info->dp)) {
 445			if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
 446				err = dsa_switch_do_lag_fdb_add(ds, dp->lag,
 447								info->addr,
 448								info->vid,
 449								info->db);
 450			} else {
 451				err = dsa_port_do_fdb_add(dp, info->addr,
 452							  info->vid, info->db);
 453			}
 454			if (err)
 455				break;
 456		}
 457	}
 458
 459	return err;
 460}
 461
 462static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
 463				   struct dsa_notifier_fdb_info *info)
 464{
 465	struct dsa_port *dp;
 466	int err = 0;
 467
 468	if (!ds->ops->port_fdb_del)
 469		return -EOPNOTSUPP;
 470
 471	dsa_switch_for_each_port(dp, ds) {
 472		if (dsa_port_host_address_match(dp, info->dp)) {
 473			if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
 474				err = dsa_switch_do_lag_fdb_del(ds, dp->lag,
 475								info->addr,
 476								info->vid,
 477								info->db);
 478			} else {
 479				err = dsa_port_do_fdb_del(dp, info->addr,
 480							  info->vid, info->db);
 481			}
 482			if (err)
 483				break;
 484		}
 485	}
 486
 487	return err;
 488}
 489
 490static int dsa_switch_fdb_add(struct dsa_switch *ds,
 491			      struct dsa_notifier_fdb_info *info)
 492{
 493	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 494	struct dsa_port *dp = dsa_to_port(ds, port);
 495
 496	if (!ds->ops->port_fdb_add)
 497		return -EOPNOTSUPP;
 498
 499	return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
 500}
 501
 502static int dsa_switch_fdb_del(struct dsa_switch *ds,
 503			      struct dsa_notifier_fdb_info *info)
 504{
 505	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 506	struct dsa_port *dp = dsa_to_port(ds, port);
 507
 508	if (!ds->ops->port_fdb_del)
 509		return -EOPNOTSUPP;
 510
 511	return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
 512}
 513
 514static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
 515				  struct dsa_notifier_lag_fdb_info *info)
 516{
 517	struct dsa_port *dp;
 518
 519	if (!ds->ops->lag_fdb_add)
 520		return -EOPNOTSUPP;
 521
 522	/* Notify switch only if it has a port in this LAG */
 523	dsa_switch_for_each_port(dp, ds)
 524		if (dsa_port_offloads_lag(dp, info->lag))
 525			return dsa_switch_do_lag_fdb_add(ds, info->lag,
 526							 info->addr, info->vid,
 527							 info->db);
 528
 529	return 0;
 530}
 531
 532static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
 533				  struct dsa_notifier_lag_fdb_info *info)
 534{
 535	struct dsa_port *dp;
 536
 537	if (!ds->ops->lag_fdb_del)
 538		return -EOPNOTSUPP;
 539
 540	/* Notify switch only if it has a port in this LAG */
 541	dsa_switch_for_each_port(dp, ds)
 542		if (dsa_port_offloads_lag(dp, info->lag))
 543			return dsa_switch_do_lag_fdb_del(ds, info->lag,
 544							 info->addr, info->vid,
 545							 info->db);
 546
 547	return 0;
 548}
 549
 550static int dsa_switch_lag_change(struct dsa_switch *ds,
 551				 struct dsa_notifier_lag_info *info)
 552{
 553	if (info->dp->ds == ds && ds->ops->port_lag_change)
 554		return ds->ops->port_lag_change(ds, info->dp->index);
 555
 556	if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
 557		return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
 558						     info->dp->index);
 559
 560	return 0;
 561}
 562
 563static int dsa_switch_lag_join(struct dsa_switch *ds,
 564			       struct dsa_notifier_lag_info *info)
 565{
 566	if (info->dp->ds == ds && ds->ops->port_lag_join)
 567		return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
 568					      info->info, info->extack);
 569
 570	if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
 571		return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
 572						   info->dp->index, info->lag,
 573						   info->info, info->extack);
 574
 575	return -EOPNOTSUPP;
 576}
 577
 578static int dsa_switch_lag_leave(struct dsa_switch *ds,
 579				struct dsa_notifier_lag_info *info)
 580{
 581	if (info->dp->ds == ds && ds->ops->port_lag_leave)
 582		return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
 583
 584	if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
 585		return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
 586						    info->dp->index, info->lag);
 587
 588	return -EOPNOTSUPP;
 589}
 590
 591static int dsa_switch_mdb_add(struct dsa_switch *ds,
 592			      struct dsa_notifier_mdb_info *info)
 593{
 594	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 595	struct dsa_port *dp = dsa_to_port(ds, port);
 596
 597	if (!ds->ops->port_mdb_add)
 598		return -EOPNOTSUPP;
 599
 600	return dsa_port_do_mdb_add(dp, info->mdb, info->db);
 601}
 602
 603static int dsa_switch_mdb_del(struct dsa_switch *ds,
 604			      struct dsa_notifier_mdb_info *info)
 605{
 606	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 607	struct dsa_port *dp = dsa_to_port(ds, port);
 608
 609	if (!ds->ops->port_mdb_del)
 610		return -EOPNOTSUPP;
 611
 612	return dsa_port_do_mdb_del(dp, info->mdb, info->db);
 613}
 614
 615static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
 616				   struct dsa_notifier_mdb_info *info)
 617{
 618	struct dsa_port *dp;
 619	int err = 0;
 620
 621	if (!ds->ops->port_mdb_add)
 622		return -EOPNOTSUPP;
 623
 624	dsa_switch_for_each_port(dp, ds) {
 625		if (dsa_port_host_address_match(dp, info->dp)) {
 626			err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
 627			if (err)
 628				break;
 629		}
 630	}
 631
 632	return err;
 633}
 634
 635static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
 636				   struct dsa_notifier_mdb_info *info)
 637{
 638	struct dsa_port *dp;
 639	int err = 0;
 640
 641	if (!ds->ops->port_mdb_del)
 642		return -EOPNOTSUPP;
 643
 644	dsa_switch_for_each_port(dp, ds) {
 645		if (dsa_port_host_address_match(dp, info->dp)) {
 646			err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
 647			if (err)
 648				break;
 649		}
 650	}
 651
 652	return err;
 653}
 654
 655/* Port VLANs match on the targeted port and on all DSA ports */
 656static bool dsa_port_vlan_match(struct dsa_port *dp,
 657				struct dsa_notifier_vlan_info *info)
 658{
 659	return dsa_port_is_dsa(dp) || dp == info->dp;
 660}
 661
 662/* Host VLANs match on the targeted port's CPU port, and on all DSA ports
 663 * (upstream and downstream) of that switch and its upstream switches.
 664 */
 665static bool dsa_port_host_vlan_match(struct dsa_port *dp,
 666				     const struct dsa_port *targeted_dp)
 667{
 668	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
 669
 670	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
 671		return dsa_port_is_dsa(dp) || dp == cpu_dp;
 672
 673	return false;
 674}
 675
 676struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
 677			       const struct switchdev_obj_port_vlan *vlan)
 678{
 679	struct dsa_vlan *v;
 680
 681	list_for_each_entry(v, vlan_list, list)
 682		if (v->vid == vlan->vid)
 683			return v;
 684
 685	return NULL;
 686}
 687
 688static int dsa_port_do_vlan_add(struct dsa_port *dp,
 689				const struct switchdev_obj_port_vlan *vlan,
 690				struct netlink_ext_ack *extack)
 691{
 692	struct dsa_switch *ds = dp->ds;
 693	int port = dp->index;
 694	struct dsa_vlan *v;
 695	int err = 0;
 696
 697	/* No need to bother with refcounting for user ports. */
 698	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
 699		err = ds->ops->port_vlan_add(ds, port, vlan, extack);
 700		trace_dsa_vlan_add_hw(dp, vlan, err);
 701
 702		return err;
 703	}
 704
 705	/* No need to propagate on shared ports the existing VLANs that were
 706	 * re-notified after just the flags have changed. This would cause a
 707	 * refcount bump which we need to avoid, since it unbalances the
 708	 * additions with the deletions.
 709	 */
 710	if (vlan->changed)
 711		return 0;
 712
 713	mutex_lock(&dp->vlans_lock);
 714
 715	v = dsa_vlan_find(&dp->vlans, vlan);
 716	if (v) {
 717		refcount_inc(&v->refcount);
 718		trace_dsa_vlan_add_bump(dp, vlan, &v->refcount);
 719		goto out;
 720	}
 721
 722	v = kzalloc(sizeof(*v), GFP_KERNEL);
 723	if (!v) {
 724		err = -ENOMEM;
 725		goto out;
 726	}
 727
 728	err = ds->ops->port_vlan_add(ds, port, vlan, extack);
 729	trace_dsa_vlan_add_hw(dp, vlan, err);
 730	if (err) {
 731		kfree(v);
 732		goto out;
 733	}
 734
 735	v->vid = vlan->vid;
 736	refcount_set(&v->refcount, 1);
 737	list_add_tail(&v->list, &dp->vlans);
 738
 739out:
 740	mutex_unlock(&dp->vlans_lock);
 741
 742	return err;
 743}
 744
 745static int dsa_port_do_vlan_del(struct dsa_port *dp,
 746				const struct switchdev_obj_port_vlan *vlan)
 747{
 748	struct dsa_switch *ds = dp->ds;
 749	int port = dp->index;
 750	struct dsa_vlan *v;
 751	int err = 0;
 752
 753	/* No need to bother with refcounting for user ports */
 754	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
 755		err = ds->ops->port_vlan_del(ds, port, vlan);
 756		trace_dsa_vlan_del_hw(dp, vlan, err);
 757
 758		return err;
 759	}
 760
 761	mutex_lock(&dp->vlans_lock);
 762
 763	v = dsa_vlan_find(&dp->vlans, vlan);
 764	if (!v) {
 765		trace_dsa_vlan_del_not_found(dp, vlan);
 766		err = -ENOENT;
 767		goto out;
 768	}
 769
 770	if (!refcount_dec_and_test(&v->refcount)) {
 771		trace_dsa_vlan_del_drop(dp, vlan, &v->refcount);
 772		goto out;
 773	}
 774
 775	err = ds->ops->port_vlan_del(ds, port, vlan);
 776	trace_dsa_vlan_del_hw(dp, vlan, err);
 777	if (err) {
 778		refcount_set(&v->refcount, 1);
 779		goto out;
 780	}
 781
 782	list_del(&v->list);
 783	kfree(v);
 784
 785out:
 786	mutex_unlock(&dp->vlans_lock);
 787
 788	return err;
 789}
 790
 791static int dsa_switch_vlan_add(struct dsa_switch *ds,
 792			       struct dsa_notifier_vlan_info *info)
 793{
 794	struct dsa_port *dp;
 795	int err;
 796
 797	if (!ds->ops->port_vlan_add)
 798		return -EOPNOTSUPP;
 799
 800	dsa_switch_for_each_port(dp, ds) {
 801		if (dsa_port_vlan_match(dp, info)) {
 802			err = dsa_port_do_vlan_add(dp, info->vlan,
 803						   info->extack);
 804			if (err)
 805				return err;
 806		}
 807	}
 808
 809	return 0;
 810}
 811
 812static int dsa_switch_vlan_del(struct dsa_switch *ds,
 813			       struct dsa_notifier_vlan_info *info)
 814{
 815	struct dsa_port *dp;
 816	int err;
 817
 818	if (!ds->ops->port_vlan_del)
 819		return -EOPNOTSUPP;
 820
 821	dsa_switch_for_each_port(dp, ds) {
 822		if (dsa_port_vlan_match(dp, info)) {
 823			err = dsa_port_do_vlan_del(dp, info->vlan);
 824			if (err)
 825				return err;
 826		}
 827	}
 828
 829	return 0;
 830}
 831
 832static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
 833				    struct dsa_notifier_vlan_info *info)
 834{
 835	struct dsa_port *dp;
 836	int err;
 837
 838	if (!ds->ops->port_vlan_add)
 839		return -EOPNOTSUPP;
 840
 841	dsa_switch_for_each_port(dp, ds) {
 842		if (dsa_port_host_vlan_match(dp, info->dp)) {
 843			err = dsa_port_do_vlan_add(dp, info->vlan,
 844						   info->extack);
 845			if (err)
 846				return err;
 847		}
 848	}
 849
 850	return 0;
 851}
 852
 853static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
 854				    struct dsa_notifier_vlan_info *info)
 855{
 856	struct dsa_port *dp;
 857	int err;
 858
 859	if (!ds->ops->port_vlan_del)
 860		return -EOPNOTSUPP;
 861
 862	dsa_switch_for_each_port(dp, ds) {
 863		if (dsa_port_host_vlan_match(dp, info->dp)) {
 864			err = dsa_port_do_vlan_del(dp, info->vlan);
 865			if (err)
 866				return err;
 867		}
 868	}
 869
 870	return 0;
 871}
 872
 873static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
 874				       struct dsa_notifier_tag_proto_info *info)
 875{
 876	const struct dsa_device_ops *tag_ops = info->tag_ops;
 877	struct dsa_port *dp, *cpu_dp;
 878	int err;
 879
 880	if (!ds->ops->change_tag_protocol)
 881		return -EOPNOTSUPP;
 882
 883	ASSERT_RTNL();
 884
 885	err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
 886	if (err)
 887		return err;
 888
 889	dsa_switch_for_each_cpu_port(cpu_dp, ds)
 890		dsa_port_set_tag_protocol(cpu_dp, tag_ops);
 891
 892	/* Now that changing the tag protocol can no longer fail, let's update
 893	 * the remaining bits which are "duplicated for faster access", and the
 894	 * bits that depend on the tagger, such as the MTU.
 895	 */
 896	dsa_switch_for_each_user_port(dp, ds) {
 897		struct net_device *user = dp->user;
 898
 899		dsa_user_setup_tagger(user);
 900
 901		/* rtnl_mutex is held in dsa_tree_change_tag_proto */
 902		dsa_user_change_mtu(user, user->mtu);
 903	}
 904
 905	return 0;
 906}
 907
 908/* We use the same cross-chip notifiers to inform both the tagger side, as well
 909 * as the switch side, of connection and disconnection events.
 910 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
 911 * switch side doesn't support connecting to this tagger, and therefore, the
 912 * fact that we don't disconnect the tagger side doesn't constitute a memory
 913 * leak: the tagger will still operate with persistent per-switch memory, just
 914 * with the switch side unconnected to it. What does constitute a hard error is
 915 * when the switch side supports connecting but fails.
 916 */
 917static int
 918dsa_switch_connect_tag_proto(struct dsa_switch *ds,
 919			     struct dsa_notifier_tag_proto_info *info)
 920{
 921	const struct dsa_device_ops *tag_ops = info->tag_ops;
 922	int err;
 923
 924	/* Notify the new tagger about the connection to this switch */
 925	if (tag_ops->connect) {
 926		err = tag_ops->connect(ds);
 927		if (err)
 928			return err;
 929	}
 930
 931	if (!ds->ops->connect_tag_protocol)
 932		return -EOPNOTSUPP;
 933
 934	/* Notify the switch about the connection to the new tagger */
 935	err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
 936	if (err) {
 937		/* Revert the new tagger's connection to this tree */
 938		if (tag_ops->disconnect)
 939			tag_ops->disconnect(ds);
 940		return err;
 941	}
 942
 943	return 0;
 944}
 945
 946static int
 947dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
 948				struct dsa_notifier_tag_proto_info *info)
 949{
 950	const struct dsa_device_ops *tag_ops = info->tag_ops;
 951
 952	/* Notify the tagger about the disconnection from this switch */
 953	if (tag_ops->disconnect && ds->tagger_data)
 954		tag_ops->disconnect(ds);
 955
 956	/* No need to notify the switch, since it shouldn't have any
 957	 * resources to tear down
 958	 */
 959	return 0;
 960}
 961
 962static int
 963dsa_switch_conduit_state_change(struct dsa_switch *ds,
 964				struct dsa_notifier_conduit_state_info *info)
 965{
 966	if (!ds->ops->conduit_state_change)
 967		return 0;
 968
 969	ds->ops->conduit_state_change(ds, info->conduit, info->operational);
 970
 971	return 0;
 972}
 973
 974static int dsa_switch_event(struct notifier_block *nb,
 975			    unsigned long event, void *info)
 976{
 977	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
 978	int err;
 979
 980	switch (event) {
 981	case DSA_NOTIFIER_AGEING_TIME:
 982		err = dsa_switch_ageing_time(ds, info);
 983		break;
 984	case DSA_NOTIFIER_BRIDGE_JOIN:
 985		err = dsa_switch_bridge_join(ds, info);
 986		break;
 987	case DSA_NOTIFIER_BRIDGE_LEAVE:
 988		err = dsa_switch_bridge_leave(ds, info);
 989		break;
 990	case DSA_NOTIFIER_FDB_ADD:
 991		err = dsa_switch_fdb_add(ds, info);
 992		break;
 993	case DSA_NOTIFIER_FDB_DEL:
 994		err = dsa_switch_fdb_del(ds, info);
 995		break;
 996	case DSA_NOTIFIER_HOST_FDB_ADD:
 997		err = dsa_switch_host_fdb_add(ds, info);
 998		break;
 999	case DSA_NOTIFIER_HOST_FDB_DEL:
1000		err = dsa_switch_host_fdb_del(ds, info);
1001		break;
1002	case DSA_NOTIFIER_LAG_FDB_ADD:
1003		err = dsa_switch_lag_fdb_add(ds, info);
1004		break;
1005	case DSA_NOTIFIER_LAG_FDB_DEL:
1006		err = dsa_switch_lag_fdb_del(ds, info);
1007		break;
1008	case DSA_NOTIFIER_LAG_CHANGE:
1009		err = dsa_switch_lag_change(ds, info);
1010		break;
1011	case DSA_NOTIFIER_LAG_JOIN:
1012		err = dsa_switch_lag_join(ds, info);
1013		break;
1014	case DSA_NOTIFIER_LAG_LEAVE:
1015		err = dsa_switch_lag_leave(ds, info);
1016		break;
1017	case DSA_NOTIFIER_MDB_ADD:
1018		err = dsa_switch_mdb_add(ds, info);
1019		break;
1020	case DSA_NOTIFIER_MDB_DEL:
1021		err = dsa_switch_mdb_del(ds, info);
1022		break;
1023	case DSA_NOTIFIER_HOST_MDB_ADD:
1024		err = dsa_switch_host_mdb_add(ds, info);
1025		break;
1026	case DSA_NOTIFIER_HOST_MDB_DEL:
1027		err = dsa_switch_host_mdb_del(ds, info);
1028		break;
1029	case DSA_NOTIFIER_VLAN_ADD:
1030		err = dsa_switch_vlan_add(ds, info);
1031		break;
1032	case DSA_NOTIFIER_VLAN_DEL:
1033		err = dsa_switch_vlan_del(ds, info);
1034		break;
1035	case DSA_NOTIFIER_HOST_VLAN_ADD:
1036		err = dsa_switch_host_vlan_add(ds, info);
1037		break;
1038	case DSA_NOTIFIER_HOST_VLAN_DEL:
1039		err = dsa_switch_host_vlan_del(ds, info);
1040		break;
1041	case DSA_NOTIFIER_MTU:
1042		err = dsa_switch_mtu(ds, info);
1043		break;
1044	case DSA_NOTIFIER_TAG_PROTO:
1045		err = dsa_switch_change_tag_proto(ds, info);
1046		break;
1047	case DSA_NOTIFIER_TAG_PROTO_CONNECT:
1048		err = dsa_switch_connect_tag_proto(ds, info);
1049		break;
1050	case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
1051		err = dsa_switch_disconnect_tag_proto(ds, info);
1052		break;
1053	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
1054		err = dsa_switch_tag_8021q_vlan_add(ds, info);
1055		break;
1056	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
1057		err = dsa_switch_tag_8021q_vlan_del(ds, info);
1058		break;
1059	case DSA_NOTIFIER_CONDUIT_STATE_CHANGE:
1060		err = dsa_switch_conduit_state_change(ds, info);
1061		break;
1062	default:
1063		err = -EOPNOTSUPP;
1064		break;
1065	}
1066
1067	if (err)
1068		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
1069			event, err);
1070
1071	return notifier_from_errno(err);
1072}
1073
1074/**
1075 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
1076 * @dst: collection of struct dsa_switch devices to notify.
1077 * @e: event, must be of type DSA_NOTIFIER_*
1078 * @v: event-specific value.
1079 *
1080 * Given a struct dsa_switch_tree, this can be used to run a function once for
1081 * each member DSA switch. The other alternative of traversing the tree is only
1082 * through its ports list, which does not uniquely list the switches.
1083 */
1084int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
1085{
1086	struct raw_notifier_head *nh = &dst->nh;
1087	int err;
1088
1089	err = raw_notifier_call_chain(nh, e, v);
1090
1091	return notifier_to_errno(err);
1092}
1093
1094/**
1095 * dsa_broadcast - Notify all DSA trees in the system.
1096 * @e: event, must be of type DSA_NOTIFIER_*
1097 * @v: event-specific value.
1098 *
1099 * Can be used to notify the switching fabric of events such as cross-chip
1100 * bridging between disjoint trees (such as islands of tagger-compatible
1101 * switches bridged by an incompatible middle switch).
1102 *
1103 * WARNING: this function is not reliable during probe time, because probing
1104 * between trees is asynchronous and not all DSA trees might have probed.
1105 */
1106int dsa_broadcast(unsigned long e, void *v)
1107{
1108	struct dsa_switch_tree *dst;
1109	int err = 0;
1110
1111	list_for_each_entry(dst, &dsa_tree_list, list) {
1112		err = dsa_tree_notify(dst, e, v);
1113		if (err)
1114			break;
1115	}
1116
1117	return err;
1118}
1119
1120int dsa_switch_register_notifier(struct dsa_switch *ds)
1121{
1122	ds->nb.notifier_call = dsa_switch_event;
1123
1124	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1125}
1126
1127void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1128{
1129	int err;
1130
1131	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1132	if (err)
1133		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
1134}