Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
   4 * Copyright (c) 2008-2009 Marvell Semiconductor
   5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
   6 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
   7 */
   8
   9#include <linux/device.h>
  10#include <linux/err.h>
  11#include <linux/list.h>
  12#include <linux/netdevice.h>
  13#include <linux/slab.h>
  14#include <linux/rtnetlink.h>
  15#include <linux/of.h>
  16#include <linux/of_net.h>
  17#include <net/devlink.h>
  18
  19#include "dsa_priv.h"
  20
  21static DEFINE_MUTEX(dsa2_mutex);
  22LIST_HEAD(dsa_tree_list);
  23
  24/**
  25 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
  26 * @dst: collection of struct dsa_switch devices to notify.
  27 * @e: event, must be of type DSA_NOTIFIER_*
  28 * @v: event-specific value.
  29 *
  30 * Given a struct dsa_switch_tree, this can be used to run a function once for
  31 * each member DSA switch. The other alternative of traversing the tree is only
  32 * through its ports list, which does not uniquely list the switches.
  33 */
  34int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
  35{
  36	struct raw_notifier_head *nh = &dst->nh;
  37	int err;
  38
  39	err = raw_notifier_call_chain(nh, e, v);
  40
  41	return notifier_to_errno(err);
  42}
  43
  44/**
  45 * dsa_broadcast - Notify all DSA trees in the system.
  46 * @e: event, must be of type DSA_NOTIFIER_*
  47 * @v: event-specific value.
  48 *
  49 * Can be used to notify the switching fabric of events such as cross-chip
  50 * bridging between disjoint trees (such as islands of tagger-compatible
  51 * switches bridged by an incompatible middle switch).
  52 */
  53int dsa_broadcast(unsigned long e, void *v)
  54{
  55	struct dsa_switch_tree *dst;
  56	int err = 0;
  57
  58	list_for_each_entry(dst, &dsa_tree_list, list) {
  59		err = dsa_tree_notify(dst, e, v);
  60		if (err)
  61			break;
  62	}
  63
  64	return err;
  65}
  66
  67/**
  68 * dsa_lag_map() - Map LAG netdev to a linear LAG ID
  69 * @dst: Tree in which to record the mapping.
  70 * @lag: Netdev that is to be mapped to an ID.
  71 *
  72 * dsa_lag_id/dsa_lag_dev can then be used to translate between the
  73 * two spaces. The size of the mapping space is determined by the
  74 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
  75 * it unset if it is not needed, in which case these functions become
  76 * no-ops.
  77 */
  78void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
  79{
  80	unsigned int id;
  81
  82	if (dsa_lag_id(dst, lag) >= 0)
  83		/* Already mapped */
  84		return;
  85
  86	for (id = 0; id < dst->lags_len; id++) {
  87		if (!dsa_lag_dev(dst, id)) {
  88			dst->lags[id] = lag;
  89			return;
  90		}
  91	}
  92
  93	/* No IDs left, which is OK. Some drivers do not need it. The
  94	 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
  95	 * returns an error for this device when joining the LAG. The
  96	 * driver can then return -EOPNOTSUPP back to DSA, which will
  97	 * fall back to a software LAG.
  98	 */
  99}
 100
 101/**
 102 * dsa_lag_unmap() - Remove a LAG ID mapping
 103 * @dst: Tree in which the mapping is recorded.
 104 * @lag: Netdev that was mapped.
 105 *
 106 * As there may be multiple users of the mapping, it is only removed
 107 * if there are no other references to it.
 108 */
 109void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
 110{
 111	struct dsa_port *dp;
 112	unsigned int id;
 113
 114	dsa_lag_foreach_port(dp, dst, lag)
 115		/* There are remaining users of this mapping */
 116		return;
 117
 118	dsa_lags_foreach_id(id, dst) {
 119		if (dsa_lag_dev(dst, id) == lag) {
 120			dst->lags[id] = NULL;
 121			break;
 122		}
 123	}
 124}
 125
 126struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
 127{
 128	struct dsa_switch_tree *dst;
 129	struct dsa_port *dp;
 130
 131	list_for_each_entry(dst, &dsa_tree_list, list) {
 132		if (dst->index != tree_index)
 133			continue;
 134
 135		list_for_each_entry(dp, &dst->ports, list) {
 136			if (dp->ds->index != sw_index)
 137				continue;
 138
 139			return dp->ds;
 140		}
 141	}
 142
 143	return NULL;
 144}
 145EXPORT_SYMBOL_GPL(dsa_switch_find);
 146
 147static struct dsa_switch_tree *dsa_tree_find(int index)
 148{
 149	struct dsa_switch_tree *dst;
 150
 151	list_for_each_entry(dst, &dsa_tree_list, list)
 152		if (dst->index == index)
 153			return dst;
 154
 155	return NULL;
 156}
 157
 158static struct dsa_switch_tree *dsa_tree_alloc(int index)
 159{
 160	struct dsa_switch_tree *dst;
 161
 162	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
 163	if (!dst)
 164		return NULL;
 165
 166	dst->index = index;
 167
 168	INIT_LIST_HEAD(&dst->rtable);
 169
 170	INIT_LIST_HEAD(&dst->ports);
 171
 172	INIT_LIST_HEAD(&dst->list);
 173	list_add_tail(&dst->list, &dsa_tree_list);
 174
 175	kref_init(&dst->refcount);
 176
 177	return dst;
 178}
 179
 180static void dsa_tree_free(struct dsa_switch_tree *dst)
 181{
 182	if (dst->tag_ops)
 183		dsa_tag_driver_put(dst->tag_ops);
 184	list_del(&dst->list);
 185	kfree(dst);
 186}
 187
 188static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
 189{
 190	if (dst)
 191		kref_get(&dst->refcount);
 192
 193	return dst;
 194}
 195
 196static struct dsa_switch_tree *dsa_tree_touch(int index)
 197{
 198	struct dsa_switch_tree *dst;
 199
 200	dst = dsa_tree_find(index);
 201	if (dst)
 202		return dsa_tree_get(dst);
 203	else
 204		return dsa_tree_alloc(index);
 205}
 206
 207static void dsa_tree_release(struct kref *ref)
 208{
 209	struct dsa_switch_tree *dst;
 210
 211	dst = container_of(ref, struct dsa_switch_tree, refcount);
 212
 213	dsa_tree_free(dst);
 214}
 215
 216static void dsa_tree_put(struct dsa_switch_tree *dst)
 217{
 218	if (dst)
 219		kref_put(&dst->refcount, dsa_tree_release);
 220}
 221
 222static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
 223						   struct device_node *dn)
 224{
 225	struct dsa_port *dp;
 226
 227	list_for_each_entry(dp, &dst->ports, list)
 228		if (dp->dn == dn)
 229			return dp;
 230
 231	return NULL;
 232}
 233
 234static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
 235				       struct dsa_port *link_dp)
 236{
 237	struct dsa_switch *ds = dp->ds;
 238	struct dsa_switch_tree *dst;
 239	struct dsa_link *dl;
 240
 241	dst = ds->dst;
 242
 243	list_for_each_entry(dl, &dst->rtable, list)
 244		if (dl->dp == dp && dl->link_dp == link_dp)
 245			return dl;
 246
 247	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
 248	if (!dl)
 249		return NULL;
 250
 251	dl->dp = dp;
 252	dl->link_dp = link_dp;
 253
 254	INIT_LIST_HEAD(&dl->list);
 255	list_add_tail(&dl->list, &dst->rtable);
 256
 257	return dl;
 258}
 259
 260static bool dsa_port_setup_routing_table(struct dsa_port *dp)
 261{
 262	struct dsa_switch *ds = dp->ds;
 263	struct dsa_switch_tree *dst = ds->dst;
 264	struct device_node *dn = dp->dn;
 265	struct of_phandle_iterator it;
 266	struct dsa_port *link_dp;
 267	struct dsa_link *dl;
 268	int err;
 269
 270	of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
 271		link_dp = dsa_tree_find_port_by_node(dst, it.node);
 272		if (!link_dp) {
 273			of_node_put(it.node);
 274			return false;
 275		}
 276
 277		dl = dsa_link_touch(dp, link_dp);
 278		if (!dl) {
 279			of_node_put(it.node);
 280			return false;
 281		}
 282	}
 283
 284	return true;
 285}
 286
 287static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
 288{
 289	bool complete = true;
 290	struct dsa_port *dp;
 291
 292	list_for_each_entry(dp, &dst->ports, list) {
 293		if (dsa_port_is_dsa(dp)) {
 294			complete = dsa_port_setup_routing_table(dp);
 295			if (!complete)
 296				break;
 297		}
 298	}
 299
 300	return complete;
 301}
 302
 303static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
 304{
 305	struct dsa_port *dp;
 306
 307	list_for_each_entry(dp, &dst->ports, list)
 308		if (dsa_port_is_cpu(dp))
 309			return dp;
 310
 311	return NULL;
 312}
 313
 314static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
 315{
 316	struct dsa_port *cpu_dp, *dp;
 317
 318	cpu_dp = dsa_tree_find_first_cpu(dst);
 319	if (!cpu_dp) {
 320		pr_err("DSA: tree %d has no CPU port\n", dst->index);
 321		return -EINVAL;
 322	}
 323
 324	/* Assign the default CPU port to all ports of the fabric */
 325	list_for_each_entry(dp, &dst->ports, list)
 326		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
 327			dp->cpu_dp = cpu_dp;
 328
 329	return 0;
 330}
 331
 332static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
 333{
 334	struct dsa_port *dp;
 335
 336	list_for_each_entry(dp, &dst->ports, list)
 337		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
 338			dp->cpu_dp = NULL;
 339}
 340
 341static int dsa_port_setup(struct dsa_port *dp)
 342{
 343	struct devlink_port *dlp = &dp->devlink_port;
 344	bool dsa_port_link_registered = false;
 345	struct dsa_switch *ds = dp->ds;
 346	bool dsa_port_enabled = false;
 347	int err = 0;
 348
 349	if (dp->setup)
 350		return 0;
 351
 352	INIT_LIST_HEAD(&dp->fdbs);
 353	INIT_LIST_HEAD(&dp->mdbs);
 354
 355	if (ds->ops->port_setup) {
 356		err = ds->ops->port_setup(ds, dp->index);
 357		if (err)
 358			return err;
 359	}
 360
 361	switch (dp->type) {
 362	case DSA_PORT_TYPE_UNUSED:
 363		dsa_port_disable(dp);
 364		break;
 365	case DSA_PORT_TYPE_CPU:
 366		err = dsa_port_link_register_of(dp);
 367		if (err)
 368			break;
 369		dsa_port_link_registered = true;
 370
 371		err = dsa_port_enable(dp, NULL);
 372		if (err)
 373			break;
 374		dsa_port_enabled = true;
 375
 376		break;
 377	case DSA_PORT_TYPE_DSA:
 378		err = dsa_port_link_register_of(dp);
 379		if (err)
 380			break;
 381		dsa_port_link_registered = true;
 382
 383		err = dsa_port_enable(dp, NULL);
 384		if (err)
 385			break;
 386		dsa_port_enabled = true;
 387
 388		break;
 389	case DSA_PORT_TYPE_USER:
 390		of_get_mac_address(dp->dn, dp->mac);
 391		err = dsa_slave_create(dp);
 392		if (err)
 393			break;
 394
 395		devlink_port_type_eth_set(dlp, dp->slave);
 396		break;
 397	}
 398
 399	if (err && dsa_port_enabled)
 400		dsa_port_disable(dp);
 401	if (err && dsa_port_link_registered)
 402		dsa_port_link_unregister_of(dp);
 403	if (err) {
 404		if (ds->ops->port_teardown)
 405			ds->ops->port_teardown(ds, dp->index);
 406		return err;
 407	}
 408
 409	dp->setup = true;
 410
 411	return 0;
 412}
 413
 414static int dsa_port_devlink_setup(struct dsa_port *dp)
 415{
 416	struct devlink_port *dlp = &dp->devlink_port;
 417	struct dsa_switch_tree *dst = dp->ds->dst;
 418	struct devlink_port_attrs attrs = {};
 419	struct devlink *dl = dp->ds->devlink;
 420	const unsigned char *id;
 421	unsigned char len;
 422	int err;
 423
 424	id = (const unsigned char *)&dst->index;
 425	len = sizeof(dst->index);
 426
 427	attrs.phys.port_number = dp->index;
 428	memcpy(attrs.switch_id.id, id, len);
 429	attrs.switch_id.id_len = len;
 430	memset(dlp, 0, sizeof(*dlp));
 431
 432	switch (dp->type) {
 433	case DSA_PORT_TYPE_UNUSED:
 434		attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
 435		break;
 436	case DSA_PORT_TYPE_CPU:
 437		attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
 438		break;
 439	case DSA_PORT_TYPE_DSA:
 440		attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
 441		break;
 442	case DSA_PORT_TYPE_USER:
 443		attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
 444		break;
 445	}
 446
 447	devlink_port_attrs_set(dlp, &attrs);
 448	err = devlink_port_register(dl, dlp, dp->index);
 449
 450	if (!err)
 451		dp->devlink_port_setup = true;
 452
 453	return err;
 454}
 455
 456static void dsa_port_teardown(struct dsa_port *dp)
 457{
 458	struct devlink_port *dlp = &dp->devlink_port;
 459	struct dsa_switch *ds = dp->ds;
 460	struct dsa_mac_addr *a, *tmp;
 461
 462	if (!dp->setup)
 463		return;
 464
 465	if (ds->ops->port_teardown)
 466		ds->ops->port_teardown(ds, dp->index);
 467
 468	devlink_port_type_clear(dlp);
 469
 470	switch (dp->type) {
 471	case DSA_PORT_TYPE_UNUSED:
 472		break;
 473	case DSA_PORT_TYPE_CPU:
 474		dsa_port_disable(dp);
 475		dsa_port_link_unregister_of(dp);
 476		break;
 477	case DSA_PORT_TYPE_DSA:
 478		dsa_port_disable(dp);
 479		dsa_port_link_unregister_of(dp);
 480		break;
 481	case DSA_PORT_TYPE_USER:
 482		if (dp->slave) {
 483			dsa_slave_destroy(dp->slave);
 484			dp->slave = NULL;
 485		}
 486		break;
 487	}
 488
 489	list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
 490		list_del(&a->list);
 491		kfree(a);
 492	}
 493
 494	list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
 495		list_del(&a->list);
 496		kfree(a);
 497	}
 498
 499	dp->setup = false;
 500}
 501
 502static void dsa_port_devlink_teardown(struct dsa_port *dp)
 503{
 504	struct devlink_port *dlp = &dp->devlink_port;
 505
 506	if (dp->devlink_port_setup)
 507		devlink_port_unregister(dlp);
 508	dp->devlink_port_setup = false;
 509}
 510
 511/* Destroy the current devlink port, and create a new one which has the UNUSED
 512 * flavour. At this point, any call to ds->ops->port_setup has been already
 513 * balanced out by a call to ds->ops->port_teardown, so we know that any
 514 * devlink port regions the driver had are now unregistered. We then call its
 515 * ds->ops->port_setup again, in order for the driver to re-create them on the
 516 * new devlink port.
 517 */
 518static int dsa_port_reinit_as_unused(struct dsa_port *dp)
 519{
 520	struct dsa_switch *ds = dp->ds;
 521	int err;
 522
 523	dsa_port_devlink_teardown(dp);
 524	dp->type = DSA_PORT_TYPE_UNUSED;
 525	err = dsa_port_devlink_setup(dp);
 526	if (err)
 527		return err;
 528
 529	if (ds->ops->port_setup) {
 530		/* On error, leave the devlink port registered,
 531		 * dsa_switch_teardown will clean it up later.
 532		 */
 533		err = ds->ops->port_setup(ds, dp->index);
 534		if (err)
 535			return err;
 536	}
 537
 538	return 0;
 539}
 540
 541static int dsa_devlink_info_get(struct devlink *dl,
 542				struct devlink_info_req *req,
 543				struct netlink_ext_ack *extack)
 544{
 545	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 546
 547	if (ds->ops->devlink_info_get)
 548		return ds->ops->devlink_info_get(ds, req, extack);
 549
 550	return -EOPNOTSUPP;
 551}
 552
 553static int dsa_devlink_sb_pool_get(struct devlink *dl,
 554				   unsigned int sb_index, u16 pool_index,
 555				   struct devlink_sb_pool_info *pool_info)
 556{
 557	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 558
 559	if (!ds->ops->devlink_sb_pool_get)
 560		return -EOPNOTSUPP;
 561
 562	return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
 563					    pool_info);
 564}
 565
 566static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
 567				   u16 pool_index, u32 size,
 568				   enum devlink_sb_threshold_type threshold_type,
 569				   struct netlink_ext_ack *extack)
 570{
 571	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 572
 573	if (!ds->ops->devlink_sb_pool_set)
 574		return -EOPNOTSUPP;
 575
 576	return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
 577					    threshold_type, extack);
 578}
 579
 580static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
 581					unsigned int sb_index, u16 pool_index,
 582					u32 *p_threshold)
 583{
 584	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 585	int port = dsa_devlink_port_to_port(dlp);
 586
 587	if (!ds->ops->devlink_sb_port_pool_get)
 588		return -EOPNOTSUPP;
 589
 590	return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
 591						 pool_index, p_threshold);
 592}
 593
 594static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
 595					unsigned int sb_index, u16 pool_index,
 596					u32 threshold,
 597					struct netlink_ext_ack *extack)
 598{
 599	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 600	int port = dsa_devlink_port_to_port(dlp);
 601
 602	if (!ds->ops->devlink_sb_port_pool_set)
 603		return -EOPNOTSUPP;
 604
 605	return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
 606						 pool_index, threshold, extack);
 607}
 608
 609static int
 610dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
 611				unsigned int sb_index, u16 tc_index,
 612				enum devlink_sb_pool_type pool_type,
 613				u16 *p_pool_index, u32 *p_threshold)
 614{
 615	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 616	int port = dsa_devlink_port_to_port(dlp);
 617
 618	if (!ds->ops->devlink_sb_tc_pool_bind_get)
 619		return -EOPNOTSUPP;
 620
 621	return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
 622						    tc_index, pool_type,
 623						    p_pool_index, p_threshold);
 624}
 625
 626static int
 627dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
 628				unsigned int sb_index, u16 tc_index,
 629				enum devlink_sb_pool_type pool_type,
 630				u16 pool_index, u32 threshold,
 631				struct netlink_ext_ack *extack)
 632{
 633	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 634	int port = dsa_devlink_port_to_port(dlp);
 635
 636	if (!ds->ops->devlink_sb_tc_pool_bind_set)
 637		return -EOPNOTSUPP;
 638
 639	return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
 640						    tc_index, pool_type,
 641						    pool_index, threshold,
 642						    extack);
 643}
 644
 645static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
 646				       unsigned int sb_index)
 647{
 648	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 649
 650	if (!ds->ops->devlink_sb_occ_snapshot)
 651		return -EOPNOTSUPP;
 652
 653	return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
 654}
 655
 656static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
 657					unsigned int sb_index)
 658{
 659	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 660
 661	if (!ds->ops->devlink_sb_occ_max_clear)
 662		return -EOPNOTSUPP;
 663
 664	return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
 665}
 666
 667static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
 668					    unsigned int sb_index,
 669					    u16 pool_index, u32 *p_cur,
 670					    u32 *p_max)
 671{
 672	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 673	int port = dsa_devlink_port_to_port(dlp);
 674
 675	if (!ds->ops->devlink_sb_occ_port_pool_get)
 676		return -EOPNOTSUPP;
 677
 678	return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
 679						     pool_index, p_cur, p_max);
 680}
 681
 682static int
 683dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
 684				    unsigned int sb_index, u16 tc_index,
 685				    enum devlink_sb_pool_type pool_type,
 686				    u32 *p_cur, u32 *p_max)
 687{
 688	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 689	int port = dsa_devlink_port_to_port(dlp);
 690
 691	if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
 692		return -EOPNOTSUPP;
 693
 694	return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
 695							sb_index, tc_index,
 696							pool_type, p_cur,
 697							p_max);
 698}
 699
 700static const struct devlink_ops dsa_devlink_ops = {
 701	.info_get			= dsa_devlink_info_get,
 702	.sb_pool_get			= dsa_devlink_sb_pool_get,
 703	.sb_pool_set			= dsa_devlink_sb_pool_set,
 704	.sb_port_pool_get		= dsa_devlink_sb_port_pool_get,
 705	.sb_port_pool_set		= dsa_devlink_sb_port_pool_set,
 706	.sb_tc_pool_bind_get		= dsa_devlink_sb_tc_pool_bind_get,
 707	.sb_tc_pool_bind_set		= dsa_devlink_sb_tc_pool_bind_set,
 708	.sb_occ_snapshot		= dsa_devlink_sb_occ_snapshot,
 709	.sb_occ_max_clear		= dsa_devlink_sb_occ_max_clear,
 710	.sb_occ_port_pool_get		= dsa_devlink_sb_occ_port_pool_get,
 711	.sb_occ_tc_port_bind_get	= dsa_devlink_sb_occ_tc_port_bind_get,
 712};
 713
 714static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
 715{
 716	const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
 717	struct dsa_switch_tree *dst = ds->dst;
 718	int port, err;
 719
 720	if (tag_ops->proto == dst->default_proto)
 721		return 0;
 722
 723	for (port = 0; port < ds->num_ports; port++) {
 724		if (!dsa_is_cpu_port(ds, port))
 725			continue;
 726
 727		err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
 728		if (err) {
 729			dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
 730				tag_ops->name, ERR_PTR(err));
 731			return err;
 732		}
 733	}
 734
 735	return 0;
 736}
 737
 738static int dsa_switch_setup(struct dsa_switch *ds)
 739{
 740	struct dsa_devlink_priv *dl_priv;
 741	struct dsa_port *dp;
 742	int err;
 743
 744	if (ds->setup)
 745		return 0;
 746
 747	/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
 748	 * driver and before ops->setup() has run, since the switch drivers and
 749	 * the slave MDIO bus driver rely on these values for probing PHY
 750	 * devices or not
 751	 */
 752	ds->phys_mii_mask |= dsa_user_ports(ds);
 753
 754	/* Add the switch to devlink before calling setup, so that setup can
 755	 * add dpipe tables
 756	 */
 757	ds->devlink = devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv));
 758	if (!ds->devlink)
 759		return -ENOMEM;
 760	dl_priv = devlink_priv(ds->devlink);
 761	dl_priv->ds = ds;
 762
 763	err = devlink_register(ds->devlink, ds->dev);
 764	if (err)
 765		goto free_devlink;
 766
 767	/* Setup devlink port instances now, so that the switch
 768	 * setup() can register regions etc, against the ports
 769	 */
 770	list_for_each_entry(dp, &ds->dst->ports, list) {
 771		if (dp->ds == ds) {
 772			err = dsa_port_devlink_setup(dp);
 773			if (err)
 774				goto unregister_devlink_ports;
 775		}
 776	}
 777
 778	err = dsa_switch_register_notifier(ds);
 779	if (err)
 780		goto unregister_devlink_ports;
 781
 782	ds->configure_vlan_while_not_filtering = true;
 783
 784	err = ds->ops->setup(ds);
 785	if (err < 0)
 786		goto unregister_notifier;
 787
 788	err = dsa_switch_setup_tag_protocol(ds);
 789	if (err)
 790		goto teardown;
 791
 792	devlink_params_publish(ds->devlink);
 793
 794	if (!ds->slave_mii_bus && ds->ops->phy_read) {
 795		ds->slave_mii_bus = mdiobus_alloc();
 796		if (!ds->slave_mii_bus) {
 797			err = -ENOMEM;
 798			goto teardown;
 799		}
 800
 801		dsa_slave_mii_bus_init(ds);
 802
 803		err = mdiobus_register(ds->slave_mii_bus);
 804		if (err < 0)
 805			goto free_slave_mii_bus;
 806	}
 807
 808	ds->setup = true;
 809
 810	return 0;
 811
 812free_slave_mii_bus:
 813	if (ds->slave_mii_bus && ds->ops->phy_read)
 814		mdiobus_free(ds->slave_mii_bus);
 815teardown:
 816	if (ds->ops->teardown)
 817		ds->ops->teardown(ds);
 818unregister_notifier:
 819	dsa_switch_unregister_notifier(ds);
 820unregister_devlink_ports:
 821	list_for_each_entry(dp, &ds->dst->ports, list)
 822		if (dp->ds == ds)
 823			dsa_port_devlink_teardown(dp);
 824	devlink_unregister(ds->devlink);
 825free_devlink:
 826	devlink_free(ds->devlink);
 827	ds->devlink = NULL;
 828
 829	return err;
 830}
 831
 832static void dsa_switch_teardown(struct dsa_switch *ds)
 833{
 834	struct dsa_port *dp;
 835
 836	if (!ds->setup)
 837		return;
 838
 839	if (ds->slave_mii_bus && ds->ops->phy_read) {
 840		mdiobus_unregister(ds->slave_mii_bus);
 841		mdiobus_free(ds->slave_mii_bus);
 842		ds->slave_mii_bus = NULL;
 843	}
 844
 845	dsa_switch_unregister_notifier(ds);
 846
 847	if (ds->ops->teardown)
 848		ds->ops->teardown(ds);
 849
 850	if (ds->devlink) {
 851		list_for_each_entry(dp, &ds->dst->ports, list)
 852			if (dp->ds == ds)
 853				dsa_port_devlink_teardown(dp);
 854		devlink_unregister(ds->devlink);
 855		devlink_free(ds->devlink);
 856		ds->devlink = NULL;
 857	}
 858
 859	ds->setup = false;
 860}
 861
 862/* First tear down the non-shared, then the shared ports. This ensures that
 863 * all work items scheduled by our switchdev handlers for user ports have
 864 * completed before we destroy the refcounting kept on the shared ports.
 865 */
 866static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
 867{
 868	struct dsa_port *dp;
 869
 870	list_for_each_entry(dp, &dst->ports, list)
 871		if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
 872			dsa_port_teardown(dp);
 873
 874	dsa_flush_workqueue();
 875
 876	list_for_each_entry(dp, &dst->ports, list)
 877		if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
 878			dsa_port_teardown(dp);
 879}
 880
 881static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
 882{
 883	struct dsa_port *dp;
 884
 885	list_for_each_entry(dp, &dst->ports, list)
 886		dsa_switch_teardown(dp->ds);
 887}
 888
 889static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
 890{
 891	struct dsa_port *dp;
 892	int err;
 893
 894	list_for_each_entry(dp, &dst->ports, list) {
 895		err = dsa_switch_setup(dp->ds);
 896		if (err)
 897			goto teardown;
 898	}
 899
 900	list_for_each_entry(dp, &dst->ports, list) {
 901		err = dsa_port_setup(dp);
 902		if (err) {
 903			err = dsa_port_reinit_as_unused(dp);
 904			if (err)
 905				goto teardown;
 906		}
 907	}
 908
 909	return 0;
 910
 911teardown:
 912	dsa_tree_teardown_ports(dst);
 913
 914	dsa_tree_teardown_switches(dst);
 915
 916	return err;
 917}
 918
 919static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
 920{
 921	struct dsa_port *dp;
 922	int err;
 923
 924	list_for_each_entry(dp, &dst->ports, list) {
 925		if (dsa_port_is_cpu(dp)) {
 926			err = dsa_master_setup(dp->master, dp);
 927			if (err)
 928				return err;
 929		}
 930	}
 931
 932	return 0;
 933}
 934
 935static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
 936{
 937	struct dsa_port *dp;
 938
 939	list_for_each_entry(dp, &dst->ports, list)
 940		if (dsa_port_is_cpu(dp))
 941			dsa_master_teardown(dp->master);
 942}
 943
 944static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
 945{
 946	unsigned int len = 0;
 947	struct dsa_port *dp;
 948
 949	list_for_each_entry(dp, &dst->ports, list) {
 950		if (dp->ds->num_lag_ids > len)
 951			len = dp->ds->num_lag_ids;
 952	}
 953
 954	if (!len)
 955		return 0;
 956
 957	dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
 958	if (!dst->lags)
 959		return -ENOMEM;
 960
 961	dst->lags_len = len;
 962	return 0;
 963}
 964
 965static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
 966{
 967	kfree(dst->lags);
 968}
 969
 970static int dsa_tree_setup(struct dsa_switch_tree *dst)
 971{
 972	bool complete;
 973	int err;
 974
 975	if (dst->setup) {
 976		pr_err("DSA: tree %d already setup! Disjoint trees?\n",
 977		       dst->index);
 978		return -EEXIST;
 979	}
 980
 981	complete = dsa_tree_setup_routing_table(dst);
 982	if (!complete)
 983		return 0;
 984
 985	err = dsa_tree_setup_default_cpu(dst);
 986	if (err)
 987		return err;
 988
 989	err = dsa_tree_setup_switches(dst);
 990	if (err)
 991		goto teardown_default_cpu;
 992
 993	err = dsa_tree_setup_master(dst);
 994	if (err)
 995		goto teardown_switches;
 996
 997	err = dsa_tree_setup_lags(dst);
 998	if (err)
 999		goto teardown_master;
1000
1001	dst->setup = true;
1002
1003	pr_info("DSA: tree %d setup\n", dst->index);
1004
1005	return 0;
1006
1007teardown_master:
1008	dsa_tree_teardown_master(dst);
1009teardown_switches:
1010	dsa_tree_teardown_ports(dst);
1011	dsa_tree_teardown_switches(dst);
1012teardown_default_cpu:
1013	dsa_tree_teardown_default_cpu(dst);
1014
1015	return err;
1016}
1017
1018static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1019{
1020	struct dsa_link *dl, *next;
1021
1022	if (!dst->setup)
1023		return;
1024
1025	dsa_tree_teardown_lags(dst);
1026
1027	dsa_tree_teardown_master(dst);
1028
1029	dsa_tree_teardown_ports(dst);
1030
1031	dsa_tree_teardown_switches(dst);
1032
1033	dsa_tree_teardown_default_cpu(dst);
1034
1035	list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1036		list_del(&dl->list);
1037		kfree(dl);
1038	}
1039
1040	pr_info("DSA: tree %d torn down\n", dst->index);
1041
1042	dst->setup = false;
1043}
1044
1045/* Since the dsa/tagging sysfs device attribute is per master, the assumption
1046 * is that all DSA switches within a tree share the same tagger, otherwise
1047 * they would have formed disjoint trees (different "dsa,member" values).
1048 */
1049int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1050			      struct net_device *master,
1051			      const struct dsa_device_ops *tag_ops,
1052			      const struct dsa_device_ops *old_tag_ops)
1053{
1054	struct dsa_notifier_tag_proto_info info;
1055	struct dsa_port *dp;
1056	int err = -EBUSY;
1057
1058	if (!rtnl_trylock())
1059		return restart_syscall();
1060
1061	/* At the moment we don't allow changing the tag protocol under
1062	 * traffic. The rtnl_mutex also happens to serialize concurrent
1063	 * attempts to change the tagging protocol. If we ever lift the IFF_UP
1064	 * restriction, there needs to be another mutex which serializes this.
1065	 */
1066	if (master->flags & IFF_UP)
1067		goto out_unlock;
1068
1069	list_for_each_entry(dp, &dst->ports, list) {
1070		if (!dsa_is_user_port(dp->ds, dp->index))
1071			continue;
1072
1073		if (dp->slave->flags & IFF_UP)
1074			goto out_unlock;
1075	}
1076
1077	info.tag_ops = tag_ops;
1078	err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1079	if (err)
1080		goto out_unwind_tagger;
1081
1082	dst->tag_ops = tag_ops;
1083
1084	rtnl_unlock();
1085
1086	return 0;
1087
1088out_unwind_tagger:
1089	info.tag_ops = old_tag_ops;
1090	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1091out_unlock:
1092	rtnl_unlock();
1093	return err;
1094}
1095
1096static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1097{
1098	struct dsa_switch_tree *dst = ds->dst;
1099	struct dsa_port *dp;
1100
1101	list_for_each_entry(dp, &dst->ports, list)
1102		if (dp->ds == ds && dp->index == index)
1103			return dp;
1104
1105	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1106	if (!dp)
1107		return NULL;
1108
1109	dp->ds = ds;
1110	dp->index = index;
1111
1112	INIT_LIST_HEAD(&dp->list);
1113	list_add_tail(&dp->list, &dst->ports);
1114
1115	return dp;
1116}
1117
1118static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1119{
1120	if (!name)
1121		name = "eth%d";
1122
1123	dp->type = DSA_PORT_TYPE_USER;
1124	dp->name = name;
1125
1126	return 0;
1127}
1128
1129static int dsa_port_parse_dsa(struct dsa_port *dp)
1130{
1131	dp->type = DSA_PORT_TYPE_DSA;
1132
1133	return 0;
1134}
1135
1136static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1137						  struct net_device *master)
1138{
1139	enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1140	struct dsa_switch *mds, *ds = dp->ds;
1141	unsigned int mdp_upstream;
1142	struct dsa_port *mdp;
1143
1144	/* It is possible to stack DSA switches onto one another when that
1145	 * happens the switch driver may want to know if its tagging protocol
1146	 * is going to work in such a configuration.
1147	 */
1148	if (dsa_slave_dev_check(master)) {
1149		mdp = dsa_slave_to_port(master);
1150		mds = mdp->ds;
1151		mdp_upstream = dsa_upstream_port(mds, mdp->index);
1152		tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1153							  DSA_TAG_PROTO_NONE);
1154	}
1155
1156	/* If the master device is not itself a DSA slave in a disjoint DSA
1157	 * tree, then return immediately.
1158	 */
1159	return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1160}
1161
1162static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1163			      const char *user_protocol)
1164{
1165	struct dsa_switch *ds = dp->ds;
1166	struct dsa_switch_tree *dst = ds->dst;
1167	const struct dsa_device_ops *tag_ops;
1168	enum dsa_tag_protocol default_proto;
1169
1170	/* Find out which protocol the switch would prefer. */
1171	default_proto = dsa_get_tag_protocol(dp, master);
1172	if (dst->default_proto) {
1173		if (dst->default_proto != default_proto) {
1174			dev_err(ds->dev,
1175				"A DSA switch tree can have only one tagging protocol\n");
1176			return -EINVAL;
1177		}
1178	} else {
1179		dst->default_proto = default_proto;
1180	}
1181
1182	/* See if the user wants to override that preference. */
1183	if (user_protocol) {
1184		if (!ds->ops->change_tag_protocol) {
1185			dev_err(ds->dev, "Tag protocol cannot be modified\n");
1186			return -EINVAL;
1187		}
1188
1189		tag_ops = dsa_find_tagger_by_name(user_protocol);
1190	} else {
1191		tag_ops = dsa_tag_driver_get(default_proto);
1192	}
1193
1194	if (IS_ERR(tag_ops)) {
1195		if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1196			return -EPROBE_DEFER;
1197
1198		dev_warn(ds->dev, "No tagger for this switch\n");
1199		return PTR_ERR(tag_ops);
1200	}
1201
1202	if (dst->tag_ops) {
1203		if (dst->tag_ops != tag_ops) {
1204			dev_err(ds->dev,
1205				"A DSA switch tree can have only one tagging protocol\n");
1206
1207			dsa_tag_driver_put(tag_ops);
1208			return -EINVAL;
1209		}
1210
1211		/* In the case of multiple CPU ports per switch, the tagging
1212		 * protocol is still reference-counted only per switch tree.
1213		 */
1214		dsa_tag_driver_put(tag_ops);
1215	} else {
1216		dst->tag_ops = tag_ops;
1217	}
1218
1219	dp->master = master;
1220	dp->type = DSA_PORT_TYPE_CPU;
1221	dsa_port_set_tag_protocol(dp, dst->tag_ops);
1222	dp->dst = dst;
1223
1224	/* At this point, the tree may be configured to use a different
1225	 * tagger than the one chosen by the switch driver during
1226	 * .setup, in the case when a user selects a custom protocol
1227	 * through the DT.
1228	 *
1229	 * This is resolved by syncing the driver with the tree in
1230	 * dsa_switch_setup_tag_protocol once .setup has run and the
1231	 * driver is ready to accept calls to .change_tag_protocol. If
1232	 * the driver does not support the custom protocol at that
1233	 * point, the tree is wholly rejected, thereby ensuring that the
1234	 * tree and driver are always in agreement on the protocol to
1235	 * use.
1236	 */
1237	return 0;
1238}
1239
1240static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1241{
1242	struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1243	const char *name = of_get_property(dn, "label", NULL);
1244	bool link = of_property_read_bool(dn, "link");
1245
1246	dp->dn = dn;
1247
1248	if (ethernet) {
1249		struct net_device *master;
1250		const char *user_protocol;
1251
1252		master = of_find_net_device_by_node(ethernet);
1253		if (!master)
1254			return -EPROBE_DEFER;
1255
1256		user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1257		return dsa_port_parse_cpu(dp, master, user_protocol);
1258	}
1259
1260	if (link)
1261		return dsa_port_parse_dsa(dp);
1262
1263	return dsa_port_parse_user(dp, name);
1264}
1265
1266static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1267				     struct device_node *dn)
1268{
1269	struct device_node *ports, *port;
1270	struct dsa_port *dp;
1271	int err = 0;
1272	u32 reg;
1273
1274	ports = of_get_child_by_name(dn, "ports");
1275	if (!ports) {
1276		/* The second possibility is "ethernet-ports" */
1277		ports = of_get_child_by_name(dn, "ethernet-ports");
1278		if (!ports) {
1279			dev_err(ds->dev, "no ports child node found\n");
1280			return -EINVAL;
1281		}
1282	}
1283
1284	for_each_available_child_of_node(ports, port) {
1285		err = of_property_read_u32(port, "reg", &reg);
1286		if (err) {
1287			of_node_put(port);
1288			goto out_put_node;
1289		}
1290
1291		if (reg >= ds->num_ports) {
1292			dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
1293				port, reg, ds->num_ports);
1294			of_node_put(port);
1295			err = -EINVAL;
1296			goto out_put_node;
1297		}
1298
1299		dp = dsa_to_port(ds, reg);
1300
1301		err = dsa_port_parse_of(dp, port);
1302		if (err) {
1303			of_node_put(port);
1304			goto out_put_node;
1305		}
1306	}
1307
1308out_put_node:
1309	of_node_put(ports);
1310	return err;
1311}
1312
1313static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1314				      struct device_node *dn)
1315{
1316	u32 m[2] = { 0, 0 };
1317	int sz;
1318
1319	/* Don't error out if this optional property isn't found */
1320	sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1321	if (sz < 0 && sz != -EINVAL)
1322		return sz;
1323
1324	ds->index = m[1];
1325
1326	ds->dst = dsa_tree_touch(m[0]);
1327	if (!ds->dst)
1328		return -ENOMEM;
1329
1330	if (dsa_switch_find(ds->dst->index, ds->index)) {
1331		dev_err(ds->dev,
1332			"A DSA switch with index %d already exists in tree %d\n",
1333			ds->index, ds->dst->index);
1334		return -EEXIST;
1335	}
1336
1337	return 0;
1338}
1339
1340static int dsa_switch_touch_ports(struct dsa_switch *ds)
1341{
1342	struct dsa_port *dp;
1343	int port;
1344
1345	for (port = 0; port < ds->num_ports; port++) {
1346		dp = dsa_port_touch(ds, port);
1347		if (!dp)
1348			return -ENOMEM;
1349	}
1350
1351	return 0;
1352}
1353
1354static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1355{
1356	int err;
1357
1358	err = dsa_switch_parse_member_of(ds, dn);
1359	if (err)
1360		return err;
1361
1362	err = dsa_switch_touch_ports(ds);
1363	if (err)
1364		return err;
1365
1366	return dsa_switch_parse_ports_of(ds, dn);
1367}
1368
1369static int dsa_port_parse(struct dsa_port *dp, const char *name,
1370			  struct device *dev)
1371{
1372	if (!strcmp(name, "cpu")) {
1373		struct net_device *master;
1374
1375		master = dsa_dev_to_net_device(dev);
1376		if (!master)
1377			return -EPROBE_DEFER;
1378
1379		dev_put(master);
1380
1381		return dsa_port_parse_cpu(dp, master, NULL);
1382	}
1383
1384	if (!strcmp(name, "dsa"))
1385		return dsa_port_parse_dsa(dp);
1386
1387	return dsa_port_parse_user(dp, name);
1388}
1389
1390static int dsa_switch_parse_ports(struct dsa_switch *ds,
1391				  struct dsa_chip_data *cd)
1392{
1393	bool valid_name_found = false;
1394	struct dsa_port *dp;
1395	struct device *dev;
1396	const char *name;
1397	unsigned int i;
1398	int err;
1399
1400	for (i = 0; i < DSA_MAX_PORTS; i++) {
1401		name = cd->port_names[i];
1402		dev = cd->netdev[i];
1403		dp = dsa_to_port(ds, i);
1404
1405		if (!name)
1406			continue;
1407
1408		err = dsa_port_parse(dp, name, dev);
1409		if (err)
1410			return err;
1411
1412		valid_name_found = true;
1413	}
1414
1415	if (!valid_name_found && i == DSA_MAX_PORTS)
1416		return -EINVAL;
1417
1418	return 0;
1419}
1420
1421static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1422{
1423	int err;
1424
1425	ds->cd = cd;
1426
1427	/* We don't support interconnected switches nor multiple trees via
1428	 * platform data, so this is the unique switch of the tree.
1429	 */
1430	ds->index = 0;
1431	ds->dst = dsa_tree_touch(0);
1432	if (!ds->dst)
1433		return -ENOMEM;
1434
1435	err = dsa_switch_touch_ports(ds);
1436	if (err)
1437		return err;
1438
1439	return dsa_switch_parse_ports(ds, cd);
1440}
1441
1442static void dsa_switch_release_ports(struct dsa_switch *ds)
1443{
1444	struct dsa_switch_tree *dst = ds->dst;
1445	struct dsa_port *dp, *next;
1446
1447	list_for_each_entry_safe(dp, next, &dst->ports, list) {
1448		if (dp->ds != ds)
1449			continue;
1450		list_del(&dp->list);
1451		kfree(dp);
1452	}
1453}
1454
1455static int dsa_switch_probe(struct dsa_switch *ds)
1456{
1457	struct dsa_switch_tree *dst;
1458	struct dsa_chip_data *pdata;
1459	struct device_node *np;
1460	int err;
1461
1462	if (!ds->dev)
1463		return -ENODEV;
1464
1465	pdata = ds->dev->platform_data;
1466	np = ds->dev->of_node;
1467
1468	if (!ds->num_ports)
1469		return -EINVAL;
1470
1471	if (np) {
1472		err = dsa_switch_parse_of(ds, np);
1473		if (err)
1474			dsa_switch_release_ports(ds);
1475	} else if (pdata) {
1476		err = dsa_switch_parse(ds, pdata);
1477		if (err)
1478			dsa_switch_release_ports(ds);
1479	} else {
1480		err = -ENODEV;
1481	}
1482
1483	if (err)
1484		return err;
1485
1486	dst = ds->dst;
1487	dsa_tree_get(dst);
1488	err = dsa_tree_setup(dst);
1489	if (err) {
1490		dsa_switch_release_ports(ds);
1491		dsa_tree_put(dst);
1492	}
1493
1494	return err;
1495}
1496
1497int dsa_register_switch(struct dsa_switch *ds)
1498{
1499	int err;
1500
1501	mutex_lock(&dsa2_mutex);
1502	err = dsa_switch_probe(ds);
1503	dsa_tree_put(ds->dst);
1504	mutex_unlock(&dsa2_mutex);
1505
1506	return err;
1507}
1508EXPORT_SYMBOL_GPL(dsa_register_switch);
1509
1510static void dsa_switch_remove(struct dsa_switch *ds)
1511{
1512	struct dsa_switch_tree *dst = ds->dst;
1513
1514	dsa_tree_teardown(dst);
1515	dsa_switch_release_ports(ds);
1516	dsa_tree_put(dst);
1517}
1518
1519void dsa_unregister_switch(struct dsa_switch *ds)
1520{
1521	mutex_lock(&dsa2_mutex);
1522	dsa_switch_remove(ds);
1523	mutex_unlock(&dsa2_mutex);
1524}
1525EXPORT_SYMBOL_GPL(dsa_unregister_switch);