Linux Audio

Check our new training course

Loading...
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
   4 * Copyright (c) 2008-2009 Marvell Semiconductor
   5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
   6 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
   7 */
   8
   9#include <linux/device.h>
  10#include <linux/err.h>
  11#include <linux/list.h>
  12#include <linux/netdevice.h>
  13#include <linux/slab.h>
  14#include <linux/rtnetlink.h>
  15#include <linux/of.h>
  16#include <linux/of_net.h>
  17#include <net/devlink.h>
  18
  19#include "dsa_priv.h"
  20
  21static DEFINE_MUTEX(dsa2_mutex);
  22LIST_HEAD(dsa_tree_list);
  23
  24/**
  25 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
  26 * @dst: collection of struct dsa_switch devices to notify.
  27 * @e: event, must be of type DSA_NOTIFIER_*
  28 * @v: event-specific value.
  29 *
  30 * Given a struct dsa_switch_tree, this can be used to run a function once for
  31 * each member DSA switch. The other alternative of traversing the tree is only
  32 * through its ports list, which does not uniquely list the switches.
  33 */
  34int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
  35{
  36	struct raw_notifier_head *nh = &dst->nh;
  37	int err;
  38
  39	err = raw_notifier_call_chain(nh, e, v);
  40
  41	return notifier_to_errno(err);
  42}
  43
  44/**
  45 * dsa_broadcast - Notify all DSA trees in the system.
  46 * @e: event, must be of type DSA_NOTIFIER_*
  47 * @v: event-specific value.
  48 *
  49 * Can be used to notify the switching fabric of events such as cross-chip
  50 * bridging between disjoint trees (such as islands of tagger-compatible
  51 * switches bridged by an incompatible middle switch).
  52 */
  53int dsa_broadcast(unsigned long e, void *v)
  54{
  55	struct dsa_switch_tree *dst;
  56	int err = 0;
  57
  58	list_for_each_entry(dst, &dsa_tree_list, list) {
  59		err = dsa_tree_notify(dst, e, v);
  60		if (err)
  61			break;
  62	}
  63
  64	return err;
  65}
  66
  67/**
  68 * dsa_lag_map() - Map LAG netdev to a linear LAG ID
  69 * @dst: Tree in which to record the mapping.
  70 * @lag: Netdev that is to be mapped to an ID.
  71 *
  72 * dsa_lag_id/dsa_lag_dev can then be used to translate between the
  73 * two spaces. The size of the mapping space is determined by the
  74 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
  75 * it unset if it is not needed, in which case these functions become
  76 * no-ops.
  77 */
  78void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
  79{
  80	unsigned int id;
  81
  82	if (dsa_lag_id(dst, lag) >= 0)
  83		/* Already mapped */
  84		return;
  85
  86	for (id = 0; id < dst->lags_len; id++) {
  87		if (!dsa_lag_dev(dst, id)) {
  88			dst->lags[id] = lag;
  89			return;
  90		}
  91	}
  92
  93	/* No IDs left, which is OK. Some drivers do not need it. The
  94	 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
  95	 * returns an error for this device when joining the LAG. The
  96	 * driver can then return -EOPNOTSUPP back to DSA, which will
  97	 * fall back to a software LAG.
  98	 */
  99}
 100
 101/**
 102 * dsa_lag_unmap() - Remove a LAG ID mapping
 103 * @dst: Tree in which the mapping is recorded.
 104 * @lag: Netdev that was mapped.
 105 *
 106 * As there may be multiple users of the mapping, it is only removed
 107 * if there are no other references to it.
 108 */
 109void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
 110{
 111	struct dsa_port *dp;
 112	unsigned int id;
 113
 114	dsa_lag_foreach_port(dp, dst, lag)
 115		/* There are remaining users of this mapping */
 116		return;
 117
 118	dsa_lags_foreach_id(id, dst) {
 119		if (dsa_lag_dev(dst, id) == lag) {
 120			dst->lags[id] = NULL;
 121			break;
 122		}
 123	}
 124}
 125
 126struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
 127{
 128	struct dsa_switch_tree *dst;
 129	struct dsa_port *dp;
 130
 131	list_for_each_entry(dst, &dsa_tree_list, list) {
 132		if (dst->index != tree_index)
 133			continue;
 134
 135		list_for_each_entry(dp, &dst->ports, list) {
 136			if (dp->ds->index != sw_index)
 137				continue;
 138
 139			return dp->ds;
 140		}
 141	}
 142
 143	return NULL;
 144}
 145EXPORT_SYMBOL_GPL(dsa_switch_find);
 146
 147static struct dsa_switch_tree *dsa_tree_find(int index)
 148{
 149	struct dsa_switch_tree *dst;
 150
 151	list_for_each_entry(dst, &dsa_tree_list, list)
 152		if (dst->index == index)
 153			return dst;
 154
 155	return NULL;
 156}
 157
 158static struct dsa_switch_tree *dsa_tree_alloc(int index)
 159{
 160	struct dsa_switch_tree *dst;
 161
 162	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
 163	if (!dst)
 164		return NULL;
 165
 166	dst->index = index;
 167
 168	INIT_LIST_HEAD(&dst->rtable);
 169
 170	INIT_LIST_HEAD(&dst->ports);
 171
 172	INIT_LIST_HEAD(&dst->list);
 173	list_add_tail(&dst->list, &dsa_tree_list);
 174
 175	kref_init(&dst->refcount);
 176
 177	return dst;
 178}
 179
 180static void dsa_tree_free(struct dsa_switch_tree *dst)
 181{
 182	if (dst->tag_ops)
 183		dsa_tag_driver_put(dst->tag_ops);
 184	list_del(&dst->list);
 185	kfree(dst);
 186}
 187
 188static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
 189{
 190	if (dst)
 191		kref_get(&dst->refcount);
 192
 193	return dst;
 194}
 195
 196static struct dsa_switch_tree *dsa_tree_touch(int index)
 197{
 198	struct dsa_switch_tree *dst;
 199
 200	dst = dsa_tree_find(index);
 201	if (dst)
 202		return dsa_tree_get(dst);
 203	else
 204		return dsa_tree_alloc(index);
 205}
 206
 207static void dsa_tree_release(struct kref *ref)
 208{
 209	struct dsa_switch_tree *dst;
 210
 211	dst = container_of(ref, struct dsa_switch_tree, refcount);
 212
 213	dsa_tree_free(dst);
 214}
 215
 216static void dsa_tree_put(struct dsa_switch_tree *dst)
 217{
 218	if (dst)
 219		kref_put(&dst->refcount, dsa_tree_release);
 220}
 221
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 222static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
 223						   struct device_node *dn)
 224{
 225	struct dsa_port *dp;
 226
 227	list_for_each_entry(dp, &dst->ports, list)
 228		if (dp->dn == dn)
 229			return dp;
 230
 231	return NULL;
 232}
 233
 234static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
 235				       struct dsa_port *link_dp)
 236{
 237	struct dsa_switch *ds = dp->ds;
 238	struct dsa_switch_tree *dst;
 239	struct dsa_link *dl;
 240
 241	dst = ds->dst;
 242
 243	list_for_each_entry(dl, &dst->rtable, list)
 244		if (dl->dp == dp && dl->link_dp == link_dp)
 245			return dl;
 246
 247	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
 248	if (!dl)
 249		return NULL;
 250
 251	dl->dp = dp;
 252	dl->link_dp = link_dp;
 253
 254	INIT_LIST_HEAD(&dl->list);
 255	list_add_tail(&dl->list, &dst->rtable);
 256
 257	return dl;
 258}
 259
 260static bool dsa_port_setup_routing_table(struct dsa_port *dp)
 261{
 262	struct dsa_switch *ds = dp->ds;
 263	struct dsa_switch_tree *dst = ds->dst;
 264	struct device_node *dn = dp->dn;
 265	struct of_phandle_iterator it;
 266	struct dsa_port *link_dp;
 267	struct dsa_link *dl;
 268	int err;
 269
 270	of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
 271		link_dp = dsa_tree_find_port_by_node(dst, it.node);
 272		if (!link_dp) {
 273			of_node_put(it.node);
 274			return false;
 275		}
 276
 277		dl = dsa_link_touch(dp, link_dp);
 278		if (!dl) {
 279			of_node_put(it.node);
 280			return false;
 281		}
 282	}
 283
 284	return true;
 285}
 286
 287static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
 288{
 289	bool complete = true;
 290	struct dsa_port *dp;
 291
 292	list_for_each_entry(dp, &dst->ports, list) {
 293		if (dsa_port_is_dsa(dp)) {
 294			complete = dsa_port_setup_routing_table(dp);
 295			if (!complete)
 296				break;
 297		}
 298	}
 299
 300	return complete;
 301}
 302
 303static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
 304{
 305	struct dsa_port *dp;
 306
 307	list_for_each_entry(dp, &dst->ports, list)
 308		if (dsa_port_is_cpu(dp))
 309			return dp;
 310
 311	return NULL;
 312}
 313
 314static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
 315{
 316	struct dsa_port *cpu_dp, *dp;
 317
 318	cpu_dp = dsa_tree_find_first_cpu(dst);
 319	if (!cpu_dp) {
 320		pr_err("DSA: tree %d has no CPU port\n", dst->index);
 321		return -EINVAL;
 322	}
 323
 324	/* Assign the default CPU port to all ports of the fabric */
 325	list_for_each_entry(dp, &dst->ports, list)
 326		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
 327			dp->cpu_dp = cpu_dp;
 328
 329	return 0;
 330}
 331
 332static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
 333{
 334	struct dsa_port *dp;
 335
 336	list_for_each_entry(dp, &dst->ports, list)
 337		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
 338			dp->cpu_dp = NULL;
 339}
 340
 341static int dsa_port_setup(struct dsa_port *dp)
 342{
 
 
 
 
 343	struct devlink_port *dlp = &dp->devlink_port;
 344	bool dsa_port_link_registered = false;
 345	struct dsa_switch *ds = dp->ds;
 
 
 346	bool dsa_port_enabled = false;
 347	int err = 0;
 348
 
 
 
 
 349	if (dp->setup)
 350		return 0;
 351
 352	INIT_LIST_HEAD(&dp->fdbs);
 353	INIT_LIST_HEAD(&dp->mdbs);
 354
 355	if (ds->ops->port_setup) {
 356		err = ds->ops->port_setup(ds, dp->index);
 357		if (err)
 358			return err;
 359	}
 360
 361	switch (dp->type) {
 362	case DSA_PORT_TYPE_UNUSED:
 363		dsa_port_disable(dp);
 364		break;
 365	case DSA_PORT_TYPE_CPU:
 
 
 
 
 
 
 
 
 366		err = dsa_port_link_register_of(dp);
 367		if (err)
 368			break;
 369		dsa_port_link_registered = true;
 370
 371		err = dsa_port_enable(dp, NULL);
 372		if (err)
 373			break;
 374		dsa_port_enabled = true;
 375
 376		break;
 377	case DSA_PORT_TYPE_DSA:
 
 
 
 
 
 
 
 
 378		err = dsa_port_link_register_of(dp);
 379		if (err)
 380			break;
 381		dsa_port_link_registered = true;
 382
 383		err = dsa_port_enable(dp, NULL);
 384		if (err)
 385			break;
 386		dsa_port_enabled = true;
 387
 388		break;
 389	case DSA_PORT_TYPE_USER:
 390		of_get_mac_address(dp->dn, dp->mac);
 
 
 
 
 
 
 
 
 391		err = dsa_slave_create(dp);
 392		if (err)
 393			break;
 394
 395		devlink_port_type_eth_set(dlp, dp->slave);
 396		break;
 397	}
 398
 399	if (err && dsa_port_enabled)
 400		dsa_port_disable(dp);
 401	if (err && dsa_port_link_registered)
 402		dsa_port_link_unregister_of(dp);
 403	if (err) {
 404		if (ds->ops->port_teardown)
 405			ds->ops->port_teardown(ds, dp->index);
 406		return err;
 407	}
 408
 409	dp->setup = true;
 410
 411	return 0;
 412}
 413
 414static int dsa_port_devlink_setup(struct dsa_port *dp)
 415{
 416	struct devlink_port *dlp = &dp->devlink_port;
 417	struct dsa_switch_tree *dst = dp->ds->dst;
 418	struct devlink_port_attrs attrs = {};
 419	struct devlink *dl = dp->ds->devlink;
 420	const unsigned char *id;
 421	unsigned char len;
 422	int err;
 423
 424	id = (const unsigned char *)&dst->index;
 425	len = sizeof(dst->index);
 426
 427	attrs.phys.port_number = dp->index;
 428	memcpy(attrs.switch_id.id, id, len);
 429	attrs.switch_id.id_len = len;
 430	memset(dlp, 0, sizeof(*dlp));
 431
 432	switch (dp->type) {
 433	case DSA_PORT_TYPE_UNUSED:
 434		attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
 435		break;
 436	case DSA_PORT_TYPE_CPU:
 437		attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
 438		break;
 439	case DSA_PORT_TYPE_DSA:
 440		attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
 441		break;
 442	case DSA_PORT_TYPE_USER:
 443		attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
 444		break;
 445	}
 446
 447	devlink_port_attrs_set(dlp, &attrs);
 448	err = devlink_port_register(dl, dlp, dp->index);
 449
 450	if (!err)
 451		dp->devlink_port_setup = true;
 452
 453	return err;
 454}
 455
 456static void dsa_port_teardown(struct dsa_port *dp)
 457{
 458	struct devlink_port *dlp = &dp->devlink_port;
 459	struct dsa_switch *ds = dp->ds;
 460	struct dsa_mac_addr *a, *tmp;
 461
 462	if (!dp->setup)
 463		return;
 464
 465	if (ds->ops->port_teardown)
 466		ds->ops->port_teardown(ds, dp->index);
 467
 468	devlink_port_type_clear(dlp);
 469
 470	switch (dp->type) {
 471	case DSA_PORT_TYPE_UNUSED:
 472		break;
 473	case DSA_PORT_TYPE_CPU:
 474		dsa_port_disable(dp);
 
 
 475		dsa_port_link_unregister_of(dp);
 476		break;
 477	case DSA_PORT_TYPE_DSA:
 478		dsa_port_disable(dp);
 
 479		dsa_port_link_unregister_of(dp);
 480		break;
 481	case DSA_PORT_TYPE_USER:
 
 482		if (dp->slave) {
 483			dsa_slave_destroy(dp->slave);
 484			dp->slave = NULL;
 485		}
 486		break;
 487	}
 488
 489	list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
 490		list_del(&a->list);
 491		kfree(a);
 492	}
 493
 494	list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
 495		list_del(&a->list);
 496		kfree(a);
 497	}
 498
 499	dp->setup = false;
 500}
 501
 502static void dsa_port_devlink_teardown(struct dsa_port *dp)
 503{
 504	struct devlink_port *dlp = &dp->devlink_port;
 505
 506	if (dp->devlink_port_setup)
 507		devlink_port_unregister(dlp);
 508	dp->devlink_port_setup = false;
 509}
 510
 511/* Destroy the current devlink port, and create a new one which has the UNUSED
 512 * flavour. At this point, any call to ds->ops->port_setup has been already
 513 * balanced out by a call to ds->ops->port_teardown, so we know that any
 514 * devlink port regions the driver had are now unregistered. We then call its
 515 * ds->ops->port_setup again, in order for the driver to re-create them on the
 516 * new devlink port.
 517 */
 518static int dsa_port_reinit_as_unused(struct dsa_port *dp)
 519{
 520	struct dsa_switch *ds = dp->ds;
 521	int err;
 522
 523	dsa_port_devlink_teardown(dp);
 524	dp->type = DSA_PORT_TYPE_UNUSED;
 525	err = dsa_port_devlink_setup(dp);
 526	if (err)
 527		return err;
 528
 529	if (ds->ops->port_setup) {
 530		/* On error, leave the devlink port registered,
 531		 * dsa_switch_teardown will clean it up later.
 532		 */
 533		err = ds->ops->port_setup(ds, dp->index);
 534		if (err)
 535			return err;
 536	}
 537
 538	return 0;
 539}
 540
 541static int dsa_devlink_info_get(struct devlink *dl,
 542				struct devlink_info_req *req,
 543				struct netlink_ext_ack *extack)
 544{
 545	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 546
 547	if (ds->ops->devlink_info_get)
 548		return ds->ops->devlink_info_get(ds, req, extack);
 549
 550	return -EOPNOTSUPP;
 551}
 552
 553static int dsa_devlink_sb_pool_get(struct devlink *dl,
 554				   unsigned int sb_index, u16 pool_index,
 555				   struct devlink_sb_pool_info *pool_info)
 556{
 557	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 558
 559	if (!ds->ops->devlink_sb_pool_get)
 560		return -EOPNOTSUPP;
 561
 562	return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
 563					    pool_info);
 564}
 565
 566static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
 567				   u16 pool_index, u32 size,
 568				   enum devlink_sb_threshold_type threshold_type,
 569				   struct netlink_ext_ack *extack)
 570{
 571	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 572
 573	if (!ds->ops->devlink_sb_pool_set)
 574		return -EOPNOTSUPP;
 575
 576	return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
 577					    threshold_type, extack);
 578}
 579
 580static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
 581					unsigned int sb_index, u16 pool_index,
 582					u32 *p_threshold)
 583{
 584	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 585	int port = dsa_devlink_port_to_port(dlp);
 586
 587	if (!ds->ops->devlink_sb_port_pool_get)
 588		return -EOPNOTSUPP;
 589
 590	return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
 591						 pool_index, p_threshold);
 592}
 593
 594static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
 595					unsigned int sb_index, u16 pool_index,
 596					u32 threshold,
 597					struct netlink_ext_ack *extack)
 598{
 599	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 600	int port = dsa_devlink_port_to_port(dlp);
 601
 602	if (!ds->ops->devlink_sb_port_pool_set)
 603		return -EOPNOTSUPP;
 604
 605	return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
 606						 pool_index, threshold, extack);
 607}
 608
 609static int
 610dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
 611				unsigned int sb_index, u16 tc_index,
 612				enum devlink_sb_pool_type pool_type,
 613				u16 *p_pool_index, u32 *p_threshold)
 614{
 615	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 616	int port = dsa_devlink_port_to_port(dlp);
 617
 618	if (!ds->ops->devlink_sb_tc_pool_bind_get)
 619		return -EOPNOTSUPP;
 620
 621	return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
 622						    tc_index, pool_type,
 623						    p_pool_index, p_threshold);
 624}
 625
 626static int
 627dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
 628				unsigned int sb_index, u16 tc_index,
 629				enum devlink_sb_pool_type pool_type,
 630				u16 pool_index, u32 threshold,
 631				struct netlink_ext_ack *extack)
 632{
 633	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 634	int port = dsa_devlink_port_to_port(dlp);
 635
 636	if (!ds->ops->devlink_sb_tc_pool_bind_set)
 637		return -EOPNOTSUPP;
 638
 639	return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
 640						    tc_index, pool_type,
 641						    pool_index, threshold,
 642						    extack);
 643}
 644
 645static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
 646				       unsigned int sb_index)
 647{
 648	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 649
 650	if (!ds->ops->devlink_sb_occ_snapshot)
 651		return -EOPNOTSUPP;
 652
 653	return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
 654}
 655
 656static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
 657					unsigned int sb_index)
 658{
 659	struct dsa_switch *ds = dsa_devlink_to_ds(dl);
 660
 661	if (!ds->ops->devlink_sb_occ_max_clear)
 662		return -EOPNOTSUPP;
 663
 664	return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
 665}
 666
 667static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
 668					    unsigned int sb_index,
 669					    u16 pool_index, u32 *p_cur,
 670					    u32 *p_max)
 671{
 672	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 673	int port = dsa_devlink_port_to_port(dlp);
 674
 675	if (!ds->ops->devlink_sb_occ_port_pool_get)
 676		return -EOPNOTSUPP;
 677
 678	return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
 679						     pool_index, p_cur, p_max);
 680}
 681
 682static int
 683dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
 684				    unsigned int sb_index, u16 tc_index,
 685				    enum devlink_sb_pool_type pool_type,
 686				    u32 *p_cur, u32 *p_max)
 687{
 688	struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
 689	int port = dsa_devlink_port_to_port(dlp);
 690
 691	if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
 692		return -EOPNOTSUPP;
 693
 694	return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
 695							sb_index, tc_index,
 696							pool_type, p_cur,
 697							p_max);
 698}
 699
 700static const struct devlink_ops dsa_devlink_ops = {
 701	.info_get			= dsa_devlink_info_get,
 702	.sb_pool_get			= dsa_devlink_sb_pool_get,
 703	.sb_pool_set			= dsa_devlink_sb_pool_set,
 704	.sb_port_pool_get		= dsa_devlink_sb_port_pool_get,
 705	.sb_port_pool_set		= dsa_devlink_sb_port_pool_set,
 706	.sb_tc_pool_bind_get		= dsa_devlink_sb_tc_pool_bind_get,
 707	.sb_tc_pool_bind_set		= dsa_devlink_sb_tc_pool_bind_set,
 708	.sb_occ_snapshot		= dsa_devlink_sb_occ_snapshot,
 709	.sb_occ_max_clear		= dsa_devlink_sb_occ_max_clear,
 710	.sb_occ_port_pool_get		= dsa_devlink_sb_occ_port_pool_get,
 711	.sb_occ_tc_port_bind_get	= dsa_devlink_sb_occ_tc_port_bind_get,
 712};
 713
 714static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
 715{
 716	const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
 717	struct dsa_switch_tree *dst = ds->dst;
 718	int port, err;
 719
 720	if (tag_ops->proto == dst->default_proto)
 721		return 0;
 722
 723	for (port = 0; port < ds->num_ports; port++) {
 724		if (!dsa_is_cpu_port(ds, port))
 725			continue;
 726
 727		err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
 728		if (err) {
 729			dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
 730				tag_ops->name, ERR_PTR(err));
 731			return err;
 732		}
 733	}
 734
 735	return 0;
 736}
 737
 738static int dsa_switch_setup(struct dsa_switch *ds)
 739{
 740	struct dsa_devlink_priv *dl_priv;
 741	struct dsa_port *dp;
 742	int err;
 743
 744	if (ds->setup)
 745		return 0;
 746
 747	/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
 748	 * driver and before ops->setup() has run, since the switch drivers and
 749	 * the slave MDIO bus driver rely on these values for probing PHY
 750	 * devices or not
 751	 */
 752	ds->phys_mii_mask |= dsa_user_ports(ds);
 753
 754	/* Add the switch to devlink before calling setup, so that setup can
 755	 * add dpipe tables
 756	 */
 757	ds->devlink = devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv));
 758	if (!ds->devlink)
 759		return -ENOMEM;
 760	dl_priv = devlink_priv(ds->devlink);
 761	dl_priv->ds = ds;
 762
 763	err = devlink_register(ds->devlink, ds->dev);
 764	if (err)
 765		goto free_devlink;
 766
 767	/* Setup devlink port instances now, so that the switch
 768	 * setup() can register regions etc, against the ports
 769	 */
 770	list_for_each_entry(dp, &ds->dst->ports, list) {
 771		if (dp->ds == ds) {
 772			err = dsa_port_devlink_setup(dp);
 773			if (err)
 774				goto unregister_devlink_ports;
 775		}
 776	}
 777
 778	err = dsa_switch_register_notifier(ds);
 779	if (err)
 780		goto unregister_devlink_ports;
 781
 782	ds->configure_vlan_while_not_filtering = true;
 783
 784	err = ds->ops->setup(ds);
 785	if (err < 0)
 786		goto unregister_notifier;
 787
 788	err = dsa_switch_setup_tag_protocol(ds);
 789	if (err)
 790		goto teardown;
 791
 792	devlink_params_publish(ds->devlink);
 793
 794	if (!ds->slave_mii_bus && ds->ops->phy_read) {
 795		ds->slave_mii_bus = mdiobus_alloc();
 796		if (!ds->slave_mii_bus) {
 797			err = -ENOMEM;
 798			goto teardown;
 799		}
 800
 801		dsa_slave_mii_bus_init(ds);
 802
 803		err = mdiobus_register(ds->slave_mii_bus);
 804		if (err < 0)
 805			goto free_slave_mii_bus;
 806	}
 807
 808	ds->setup = true;
 809
 810	return 0;
 811
 812free_slave_mii_bus:
 813	if (ds->slave_mii_bus && ds->ops->phy_read)
 814		mdiobus_free(ds->slave_mii_bus);
 815teardown:
 816	if (ds->ops->teardown)
 817		ds->ops->teardown(ds);
 818unregister_notifier:
 819	dsa_switch_unregister_notifier(ds);
 820unregister_devlink_ports:
 821	list_for_each_entry(dp, &ds->dst->ports, list)
 822		if (dp->ds == ds)
 823			dsa_port_devlink_teardown(dp);
 824	devlink_unregister(ds->devlink);
 825free_devlink:
 826	devlink_free(ds->devlink);
 827	ds->devlink = NULL;
 828
 829	return err;
 830}
 831
 832static void dsa_switch_teardown(struct dsa_switch *ds)
 833{
 834	struct dsa_port *dp;
 835
 836	if (!ds->setup)
 837		return;
 838
 839	if (ds->slave_mii_bus && ds->ops->phy_read) {
 840		mdiobus_unregister(ds->slave_mii_bus);
 841		mdiobus_free(ds->slave_mii_bus);
 842		ds->slave_mii_bus = NULL;
 843	}
 844
 845	dsa_switch_unregister_notifier(ds);
 846
 847	if (ds->ops->teardown)
 848		ds->ops->teardown(ds);
 849
 850	if (ds->devlink) {
 851		list_for_each_entry(dp, &ds->dst->ports, list)
 852			if (dp->ds == ds)
 853				dsa_port_devlink_teardown(dp);
 854		devlink_unregister(ds->devlink);
 855		devlink_free(ds->devlink);
 856		ds->devlink = NULL;
 857	}
 858
 859	ds->setup = false;
 860}
 861
 862/* First tear down the non-shared, then the shared ports. This ensures that
 863 * all work items scheduled by our switchdev handlers for user ports have
 864 * completed before we destroy the refcounting kept on the shared ports.
 865 */
 866static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
 867{
 868	struct dsa_port *dp;
 869
 870	list_for_each_entry(dp, &dst->ports, list)
 871		if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
 872			dsa_port_teardown(dp);
 873
 874	dsa_flush_workqueue();
 875
 876	list_for_each_entry(dp, &dst->ports, list)
 877		if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
 878			dsa_port_teardown(dp);
 879}
 880
 881static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
 882{
 883	struct dsa_port *dp;
 884
 885	list_for_each_entry(dp, &dst->ports, list)
 886		dsa_switch_teardown(dp->ds);
 887}
 888
 889static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
 890{
 891	struct dsa_port *dp;
 892	int err;
 893
 894	list_for_each_entry(dp, &dst->ports, list) {
 895		err = dsa_switch_setup(dp->ds);
 896		if (err)
 897			goto teardown;
 898	}
 899
 900	list_for_each_entry(dp, &dst->ports, list) {
 901		err = dsa_port_setup(dp);
 902		if (err) {
 903			err = dsa_port_reinit_as_unused(dp);
 904			if (err)
 905				goto teardown;
 906		}
 907	}
 908
 909	return 0;
 910
 911teardown:
 912	dsa_tree_teardown_ports(dst);
 
 913
 914	dsa_tree_teardown_switches(dst);
 
 915
 916	return err;
 917}
 918
 
 
 
 
 
 
 
 
 
 
 
 919static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
 920{
 921	struct dsa_port *dp;
 922	int err;
 923
 924	list_for_each_entry(dp, &dst->ports, list) {
 925		if (dsa_port_is_cpu(dp)) {
 926			err = dsa_master_setup(dp->master, dp);
 927			if (err)
 928				return err;
 929		}
 930	}
 931
 932	return 0;
 933}
 934
 935static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
 936{
 937	struct dsa_port *dp;
 938
 939	list_for_each_entry(dp, &dst->ports, list)
 940		if (dsa_port_is_cpu(dp))
 941			dsa_master_teardown(dp->master);
 942}
 943
 944static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
 945{
 946	unsigned int len = 0;
 947	struct dsa_port *dp;
 948
 949	list_for_each_entry(dp, &dst->ports, list) {
 950		if (dp->ds->num_lag_ids > len)
 951			len = dp->ds->num_lag_ids;
 952	}
 953
 954	if (!len)
 955		return 0;
 956
 957	dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
 958	if (!dst->lags)
 959		return -ENOMEM;
 960
 961	dst->lags_len = len;
 962	return 0;
 963}
 964
 965static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
 966{
 967	kfree(dst->lags);
 968}
 969
 970static int dsa_tree_setup(struct dsa_switch_tree *dst)
 971{
 972	bool complete;
 973	int err;
 974
 975	if (dst->setup) {
 976		pr_err("DSA: tree %d already setup! Disjoint trees?\n",
 977		       dst->index);
 978		return -EEXIST;
 979	}
 980
 981	complete = dsa_tree_setup_routing_table(dst);
 982	if (!complete)
 983		return 0;
 984
 985	err = dsa_tree_setup_default_cpu(dst);
 986	if (err)
 987		return err;
 988
 989	err = dsa_tree_setup_switches(dst);
 990	if (err)
 991		goto teardown_default_cpu;
 992
 993	err = dsa_tree_setup_master(dst);
 994	if (err)
 995		goto teardown_switches;
 996
 997	err = dsa_tree_setup_lags(dst);
 998	if (err)
 999		goto teardown_master;
1000
1001	dst->setup = true;
1002
1003	pr_info("DSA: tree %d setup\n", dst->index);
1004
1005	return 0;
1006
1007teardown_master:
1008	dsa_tree_teardown_master(dst);
1009teardown_switches:
1010	dsa_tree_teardown_ports(dst);
1011	dsa_tree_teardown_switches(dst);
1012teardown_default_cpu:
1013	dsa_tree_teardown_default_cpu(dst);
1014
1015	return err;
1016}
1017
1018static void dsa_tree_teardown(struct dsa_switch_tree *dst)
1019{
1020	struct dsa_link *dl, *next;
1021
1022	if (!dst->setup)
1023		return;
1024
1025	dsa_tree_teardown_lags(dst);
1026
1027	dsa_tree_teardown_master(dst);
1028
1029	dsa_tree_teardown_ports(dst);
1030
1031	dsa_tree_teardown_switches(dst);
1032
1033	dsa_tree_teardown_default_cpu(dst);
1034
1035	list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1036		list_del(&dl->list);
1037		kfree(dl);
1038	}
1039
1040	pr_info("DSA: tree %d torn down\n", dst->index);
1041
1042	dst->setup = false;
1043}
1044
1045/* Since the dsa/tagging sysfs device attribute is per master, the assumption
1046 * is that all DSA switches within a tree share the same tagger, otherwise
1047 * they would have formed disjoint trees (different "dsa,member" values).
1048 */
1049int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1050			      struct net_device *master,
1051			      const struct dsa_device_ops *tag_ops,
1052			      const struct dsa_device_ops *old_tag_ops)
1053{
1054	struct dsa_notifier_tag_proto_info info;
1055	struct dsa_port *dp;
1056	int err = -EBUSY;
1057
1058	if (!rtnl_trylock())
1059		return restart_syscall();
1060
1061	/* At the moment we don't allow changing the tag protocol under
1062	 * traffic. The rtnl_mutex also happens to serialize concurrent
1063	 * attempts to change the tagging protocol. If we ever lift the IFF_UP
1064	 * restriction, there needs to be another mutex which serializes this.
1065	 */
1066	if (master->flags & IFF_UP)
1067		goto out_unlock;
1068
1069	list_for_each_entry(dp, &dst->ports, list) {
1070		if (!dsa_is_user_port(dp->ds, dp->index))
1071			continue;
1072
1073		if (dp->slave->flags & IFF_UP)
1074			goto out_unlock;
1075	}
1076
1077	info.tag_ops = tag_ops;
1078	err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1079	if (err)
1080		goto out_unwind_tagger;
1081
1082	dst->tag_ops = tag_ops;
1083
1084	rtnl_unlock();
1085
1086	return 0;
1087
1088out_unwind_tagger:
1089	info.tag_ops = old_tag_ops;
1090	dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1091out_unlock:
1092	rtnl_unlock();
1093	return err;
1094}
1095
1096static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1097{
1098	struct dsa_switch_tree *dst = ds->dst;
1099	struct dsa_port *dp;
1100
1101	list_for_each_entry(dp, &dst->ports, list)
1102		if (dp->ds == ds && dp->index == index)
1103			return dp;
1104
1105	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1106	if (!dp)
1107		return NULL;
1108
1109	dp->ds = ds;
1110	dp->index = index;
1111
1112	INIT_LIST_HEAD(&dp->list);
1113	list_add_tail(&dp->list, &dst->ports);
1114
1115	return dp;
1116}
1117
1118static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1119{
1120	if (!name)
1121		name = "eth%d";
1122
1123	dp->type = DSA_PORT_TYPE_USER;
1124	dp->name = name;
1125
1126	return 0;
1127}
1128
1129static int dsa_port_parse_dsa(struct dsa_port *dp)
1130{
1131	dp->type = DSA_PORT_TYPE_DSA;
1132
1133	return 0;
1134}
1135
1136static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1137						  struct net_device *master)
1138{
1139	enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1140	struct dsa_switch *mds, *ds = dp->ds;
1141	unsigned int mdp_upstream;
1142	struct dsa_port *mdp;
1143
1144	/* It is possible to stack DSA switches onto one another when that
1145	 * happens the switch driver may want to know if its tagging protocol
1146	 * is going to work in such a configuration.
1147	 */
1148	if (dsa_slave_dev_check(master)) {
1149		mdp = dsa_slave_to_port(master);
1150		mds = mdp->ds;
1151		mdp_upstream = dsa_upstream_port(mds, mdp->index);
1152		tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1153							  DSA_TAG_PROTO_NONE);
1154	}
1155
1156	/* If the master device is not itself a DSA slave in a disjoint DSA
1157	 * tree, then return immediately.
1158	 */
1159	return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1160}
1161
1162static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1163			      const char *user_protocol)
1164{
1165	struct dsa_switch *ds = dp->ds;
1166	struct dsa_switch_tree *dst = ds->dst;
1167	const struct dsa_device_ops *tag_ops;
1168	enum dsa_tag_protocol default_proto;
1169
1170	/* Find out which protocol the switch would prefer. */
1171	default_proto = dsa_get_tag_protocol(dp, master);
1172	if (dst->default_proto) {
1173		if (dst->default_proto != default_proto) {
1174			dev_err(ds->dev,
1175				"A DSA switch tree can have only one tagging protocol\n");
1176			return -EINVAL;
1177		}
1178	} else {
1179		dst->default_proto = default_proto;
1180	}
1181
1182	/* See if the user wants to override that preference. */
1183	if (user_protocol) {
1184		if (!ds->ops->change_tag_protocol) {
1185			dev_err(ds->dev, "Tag protocol cannot be modified\n");
1186			return -EINVAL;
1187		}
1188
1189		tag_ops = dsa_find_tagger_by_name(user_protocol);
1190	} else {
1191		tag_ops = dsa_tag_driver_get(default_proto);
1192	}
1193
 
 
1194	if (IS_ERR(tag_ops)) {
1195		if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1196			return -EPROBE_DEFER;
1197
1198		dev_warn(ds->dev, "No tagger for this switch\n");
 
1199		return PTR_ERR(tag_ops);
1200	}
1201
1202	if (dst->tag_ops) {
1203		if (dst->tag_ops != tag_ops) {
1204			dev_err(ds->dev,
1205				"A DSA switch tree can have only one tagging protocol\n");
1206
1207			dsa_tag_driver_put(tag_ops);
1208			return -EINVAL;
1209		}
1210
1211		/* In the case of multiple CPU ports per switch, the tagging
1212		 * protocol is still reference-counted only per switch tree.
1213		 */
1214		dsa_tag_driver_put(tag_ops);
1215	} else {
1216		dst->tag_ops = tag_ops;
1217	}
1218
1219	dp->master = master;
1220	dp->type = DSA_PORT_TYPE_CPU;
1221	dsa_port_set_tag_protocol(dp, dst->tag_ops);
 
 
1222	dp->dst = dst;
1223
1224	/* At this point, the tree may be configured to use a different
1225	 * tagger than the one chosen by the switch driver during
1226	 * .setup, in the case when a user selects a custom protocol
1227	 * through the DT.
1228	 *
1229	 * This is resolved by syncing the driver with the tree in
1230	 * dsa_switch_setup_tag_protocol once .setup has run and the
1231	 * driver is ready to accept calls to .change_tag_protocol. If
1232	 * the driver does not support the custom protocol at that
1233	 * point, the tree is wholly rejected, thereby ensuring that the
1234	 * tree and driver are always in agreement on the protocol to
1235	 * use.
1236	 */
1237	return 0;
1238}
1239
1240static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1241{
1242	struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
1243	const char *name = of_get_property(dn, "label", NULL);
1244	bool link = of_property_read_bool(dn, "link");
1245
1246	dp->dn = dn;
1247
1248	if (ethernet) {
1249		struct net_device *master;
1250		const char *user_protocol;
1251
1252		master = of_find_net_device_by_node(ethernet);
1253		if (!master)
1254			return -EPROBE_DEFER;
1255
1256		user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1257		return dsa_port_parse_cpu(dp, master, user_protocol);
1258	}
1259
1260	if (link)
1261		return dsa_port_parse_dsa(dp);
1262
1263	return dsa_port_parse_user(dp, name);
1264}
1265
1266static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1267				     struct device_node *dn)
1268{
1269	struct device_node *ports, *port;
1270	struct dsa_port *dp;
1271	int err = 0;
1272	u32 reg;
1273
1274	ports = of_get_child_by_name(dn, "ports");
1275	if (!ports) {
1276		/* The second possibility is "ethernet-ports" */
1277		ports = of_get_child_by_name(dn, "ethernet-ports");
1278		if (!ports) {
1279			dev_err(ds->dev, "no ports child node found\n");
1280			return -EINVAL;
1281		}
1282	}
1283
1284	for_each_available_child_of_node(ports, port) {
1285		err = of_property_read_u32(port, "reg", &reg);
1286		if (err) {
1287			of_node_put(port);
1288			goto out_put_node;
1289		}
1290
1291		if (reg >= ds->num_ports) {
1292			dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%zu)\n",
1293				port, reg, ds->num_ports);
1294			of_node_put(port);
1295			err = -EINVAL;
1296			goto out_put_node;
1297		}
1298
1299		dp = dsa_to_port(ds, reg);
1300
1301		err = dsa_port_parse_of(dp, port);
1302		if (err) {
1303			of_node_put(port);
1304			goto out_put_node;
1305		}
1306	}
1307
1308out_put_node:
1309	of_node_put(ports);
1310	return err;
1311}
1312
1313static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1314				      struct device_node *dn)
1315{
1316	u32 m[2] = { 0, 0 };
1317	int sz;
1318
1319	/* Don't error out if this optional property isn't found */
1320	sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1321	if (sz < 0 && sz != -EINVAL)
1322		return sz;
1323
1324	ds->index = m[1];
1325
1326	ds->dst = dsa_tree_touch(m[0]);
1327	if (!ds->dst)
1328		return -ENOMEM;
1329
1330	if (dsa_switch_find(ds->dst->index, ds->index)) {
1331		dev_err(ds->dev,
1332			"A DSA switch with index %d already exists in tree %d\n",
1333			ds->index, ds->dst->index);
1334		return -EEXIST;
1335	}
1336
1337	return 0;
1338}
1339
1340static int dsa_switch_touch_ports(struct dsa_switch *ds)
1341{
1342	struct dsa_port *dp;
1343	int port;
1344
1345	for (port = 0; port < ds->num_ports; port++) {
1346		dp = dsa_port_touch(ds, port);
1347		if (!dp)
1348			return -ENOMEM;
1349	}
1350
1351	return 0;
1352}
1353
1354static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1355{
1356	int err;
1357
1358	err = dsa_switch_parse_member_of(ds, dn);
1359	if (err)
1360		return err;
1361
1362	err = dsa_switch_touch_ports(ds);
1363	if (err)
1364		return err;
1365
1366	return dsa_switch_parse_ports_of(ds, dn);
1367}
1368
1369static int dsa_port_parse(struct dsa_port *dp, const char *name,
1370			  struct device *dev)
1371{
1372	if (!strcmp(name, "cpu")) {
1373		struct net_device *master;
1374
1375		master = dsa_dev_to_net_device(dev);
1376		if (!master)
1377			return -EPROBE_DEFER;
1378
1379		dev_put(master);
1380
1381		return dsa_port_parse_cpu(dp, master, NULL);
1382	}
1383
1384	if (!strcmp(name, "dsa"))
1385		return dsa_port_parse_dsa(dp);
1386
1387	return dsa_port_parse_user(dp, name);
1388}
1389
1390static int dsa_switch_parse_ports(struct dsa_switch *ds,
1391				  struct dsa_chip_data *cd)
1392{
1393	bool valid_name_found = false;
1394	struct dsa_port *dp;
1395	struct device *dev;
1396	const char *name;
1397	unsigned int i;
1398	int err;
1399
1400	for (i = 0; i < DSA_MAX_PORTS; i++) {
1401		name = cd->port_names[i];
1402		dev = cd->netdev[i];
1403		dp = dsa_to_port(ds, i);
1404
1405		if (!name)
1406			continue;
1407
1408		err = dsa_port_parse(dp, name, dev);
1409		if (err)
1410			return err;
1411
1412		valid_name_found = true;
1413	}
1414
1415	if (!valid_name_found && i == DSA_MAX_PORTS)
1416		return -EINVAL;
1417
1418	return 0;
1419}
1420
1421static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
1422{
1423	int err;
1424
1425	ds->cd = cd;
1426
1427	/* We don't support interconnected switches nor multiple trees via
1428	 * platform data, so this is the unique switch of the tree.
1429	 */
1430	ds->index = 0;
1431	ds->dst = dsa_tree_touch(0);
1432	if (!ds->dst)
1433		return -ENOMEM;
1434
1435	err = dsa_switch_touch_ports(ds);
1436	if (err)
1437		return err;
1438
1439	return dsa_switch_parse_ports(ds, cd);
1440}
1441
1442static void dsa_switch_release_ports(struct dsa_switch *ds)
1443{
1444	struct dsa_switch_tree *dst = ds->dst;
1445	struct dsa_port *dp, *next;
1446
1447	list_for_each_entry_safe(dp, next, &dst->ports, list) {
1448		if (dp->ds != ds)
1449			continue;
1450		list_del(&dp->list);
1451		kfree(dp);
1452	}
1453}
1454
1455static int dsa_switch_probe(struct dsa_switch *ds)
1456{
1457	struct dsa_switch_tree *dst;
1458	struct dsa_chip_data *pdata;
1459	struct device_node *np;
1460	int err;
1461
1462	if (!ds->dev)
1463		return -ENODEV;
1464
1465	pdata = ds->dev->platform_data;
1466	np = ds->dev->of_node;
1467
1468	if (!ds->num_ports)
1469		return -EINVAL;
1470
1471	if (np) {
1472		err = dsa_switch_parse_of(ds, np);
1473		if (err)
1474			dsa_switch_release_ports(ds);
1475	} else if (pdata) {
1476		err = dsa_switch_parse(ds, pdata);
1477		if (err)
1478			dsa_switch_release_ports(ds);
1479	} else {
1480		err = -ENODEV;
1481	}
1482
1483	if (err)
1484		return err;
1485
1486	dst = ds->dst;
1487	dsa_tree_get(dst);
1488	err = dsa_tree_setup(dst);
1489	if (err) {
1490		dsa_switch_release_ports(ds);
1491		dsa_tree_put(dst);
1492	}
1493
1494	return err;
1495}
1496
1497int dsa_register_switch(struct dsa_switch *ds)
1498{
1499	int err;
1500
1501	mutex_lock(&dsa2_mutex);
1502	err = dsa_switch_probe(ds);
1503	dsa_tree_put(ds->dst);
1504	mutex_unlock(&dsa2_mutex);
1505
1506	return err;
1507}
1508EXPORT_SYMBOL_GPL(dsa_register_switch);
1509
1510static void dsa_switch_remove(struct dsa_switch *ds)
1511{
1512	struct dsa_switch_tree *dst = ds->dst;
1513
1514	dsa_tree_teardown(dst);
1515	dsa_switch_release_ports(ds);
1516	dsa_tree_put(dst);
1517}
1518
1519void dsa_unregister_switch(struct dsa_switch *ds)
1520{
1521	mutex_lock(&dsa2_mutex);
1522	dsa_switch_remove(ds);
1523	mutex_unlock(&dsa2_mutex);
1524}
1525EXPORT_SYMBOL_GPL(dsa_unregister_switch);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
  4 * Copyright (c) 2008-2009 Marvell Semiconductor
  5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
  6 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
  7 */
  8
  9#include <linux/device.h>
 10#include <linux/err.h>
 11#include <linux/list.h>
 12#include <linux/netdevice.h>
 13#include <linux/slab.h>
 14#include <linux/rtnetlink.h>
 15#include <linux/of.h>
 16#include <linux/of_net.h>
 17#include <net/devlink.h>
 18
 19#include "dsa_priv.h"
 20
 21static DEFINE_MUTEX(dsa2_mutex);
 22LIST_HEAD(dsa_tree_list);
 23
 24static const struct devlink_ops dsa_devlink_ops = {
 25};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 26
 27struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
 28{
 29	struct dsa_switch_tree *dst;
 30	struct dsa_port *dp;
 31
 32	list_for_each_entry(dst, &dsa_tree_list, list) {
 33		if (dst->index != tree_index)
 34			continue;
 35
 36		list_for_each_entry(dp, &dst->ports, list) {
 37			if (dp->ds->index != sw_index)
 38				continue;
 39
 40			return dp->ds;
 41		}
 42	}
 43
 44	return NULL;
 45}
 46EXPORT_SYMBOL_GPL(dsa_switch_find);
 47
 48static struct dsa_switch_tree *dsa_tree_find(int index)
 49{
 50	struct dsa_switch_tree *dst;
 51
 52	list_for_each_entry(dst, &dsa_tree_list, list)
 53		if (dst->index == index)
 54			return dst;
 55
 56	return NULL;
 57}
 58
 59static struct dsa_switch_tree *dsa_tree_alloc(int index)
 60{
 61	struct dsa_switch_tree *dst;
 62
 63	dst = kzalloc(sizeof(*dst), GFP_KERNEL);
 64	if (!dst)
 65		return NULL;
 66
 67	dst->index = index;
 68
 69	INIT_LIST_HEAD(&dst->rtable);
 70
 71	INIT_LIST_HEAD(&dst->ports);
 72
 73	INIT_LIST_HEAD(&dst->list);
 74	list_add_tail(&dst->list, &dsa_tree_list);
 75
 76	kref_init(&dst->refcount);
 77
 78	return dst;
 79}
 80
 81static void dsa_tree_free(struct dsa_switch_tree *dst)
 82{
 
 
 83	list_del(&dst->list);
 84	kfree(dst);
 85}
 86
 87static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
 88{
 89	if (dst)
 90		kref_get(&dst->refcount);
 91
 92	return dst;
 93}
 94
 95static struct dsa_switch_tree *dsa_tree_touch(int index)
 96{
 97	struct dsa_switch_tree *dst;
 98
 99	dst = dsa_tree_find(index);
100	if (dst)
101		return dsa_tree_get(dst);
102	else
103		return dsa_tree_alloc(index);
104}
105
106static void dsa_tree_release(struct kref *ref)
107{
108	struct dsa_switch_tree *dst;
109
110	dst = container_of(ref, struct dsa_switch_tree, refcount);
111
112	dsa_tree_free(dst);
113}
114
115static void dsa_tree_put(struct dsa_switch_tree *dst)
116{
117	if (dst)
118		kref_put(&dst->refcount, dsa_tree_release);
119}
120
121static bool dsa_port_is_dsa(struct dsa_port *port)
122{
123	return port->type == DSA_PORT_TYPE_DSA;
124}
125
126static bool dsa_port_is_cpu(struct dsa_port *port)
127{
128	return port->type == DSA_PORT_TYPE_CPU;
129}
130
131static bool dsa_port_is_user(struct dsa_port *dp)
132{
133	return dp->type == DSA_PORT_TYPE_USER;
134}
135
136static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
137						   struct device_node *dn)
138{
139	struct dsa_port *dp;
140
141	list_for_each_entry(dp, &dst->ports, list)
142		if (dp->dn == dn)
143			return dp;
144
145	return NULL;
146}
147
148static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
149				       struct dsa_port *link_dp)
150{
151	struct dsa_switch *ds = dp->ds;
152	struct dsa_switch_tree *dst;
153	struct dsa_link *dl;
154
155	dst = ds->dst;
156
157	list_for_each_entry(dl, &dst->rtable, list)
158		if (dl->dp == dp && dl->link_dp == link_dp)
159			return dl;
160
161	dl = kzalloc(sizeof(*dl), GFP_KERNEL);
162	if (!dl)
163		return NULL;
164
165	dl->dp = dp;
166	dl->link_dp = link_dp;
167
168	INIT_LIST_HEAD(&dl->list);
169	list_add_tail(&dl->list, &dst->rtable);
170
171	return dl;
172}
173
174static bool dsa_port_setup_routing_table(struct dsa_port *dp)
175{
176	struct dsa_switch *ds = dp->ds;
177	struct dsa_switch_tree *dst = ds->dst;
178	struct device_node *dn = dp->dn;
179	struct of_phandle_iterator it;
180	struct dsa_port *link_dp;
181	struct dsa_link *dl;
182	int err;
183
184	of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
185		link_dp = dsa_tree_find_port_by_node(dst, it.node);
186		if (!link_dp) {
187			of_node_put(it.node);
188			return false;
189		}
190
191		dl = dsa_link_touch(dp, link_dp);
192		if (!dl) {
193			of_node_put(it.node);
194			return false;
195		}
196	}
197
198	return true;
199}
200
201static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
202{
203	bool complete = true;
204	struct dsa_port *dp;
205
206	list_for_each_entry(dp, &dst->ports, list) {
207		if (dsa_port_is_dsa(dp)) {
208			complete = dsa_port_setup_routing_table(dp);
209			if (!complete)
210				break;
211		}
212	}
213
214	return complete;
215}
216
217static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
218{
219	struct dsa_port *dp;
220
221	list_for_each_entry(dp, &dst->ports, list)
222		if (dsa_port_is_cpu(dp))
223			return dp;
224
225	return NULL;
226}
227
228static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
229{
230	struct dsa_port *cpu_dp, *dp;
231
232	cpu_dp = dsa_tree_find_first_cpu(dst);
233	if (!cpu_dp) {
234		pr_err("DSA: tree %d has no CPU port\n", dst->index);
235		return -EINVAL;
236	}
237
238	/* Assign the default CPU port to all ports of the fabric */
239	list_for_each_entry(dp, &dst->ports, list)
240		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
241			dp->cpu_dp = cpu_dp;
242
243	return 0;
244}
245
246static void dsa_tree_teardown_default_cpu(struct dsa_switch_tree *dst)
247{
248	struct dsa_port *dp;
249
250	list_for_each_entry(dp, &dst->ports, list)
251		if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
252			dp->cpu_dp = NULL;
253}
254
255static int dsa_port_setup(struct dsa_port *dp)
256{
257	struct dsa_switch *ds = dp->ds;
258	struct dsa_switch_tree *dst = ds->dst;
259	const unsigned char *id = (const unsigned char *)&dst->index;
260	const unsigned char len = sizeof(dst->index);
261	struct devlink_port *dlp = &dp->devlink_port;
262	bool dsa_port_link_registered = false;
263	bool devlink_port_registered = false;
264	struct devlink_port_attrs attrs = {};
265	struct devlink *dl = ds->devlink;
266	bool dsa_port_enabled = false;
267	int err = 0;
268
269	attrs.phys.port_number = dp->index;
270	memcpy(attrs.switch_id.id, id, len);
271	attrs.switch_id.id_len = len;
272
273	if (dp->setup)
274		return 0;
275
 
 
 
 
 
 
 
 
 
276	switch (dp->type) {
277	case DSA_PORT_TYPE_UNUSED:
278		dsa_port_disable(dp);
279		break;
280	case DSA_PORT_TYPE_CPU:
281		memset(dlp, 0, sizeof(*dlp));
282		attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
283		devlink_port_attrs_set(dlp, &attrs);
284		err = devlink_port_register(dl, dlp, dp->index);
285		if (err)
286			break;
287		devlink_port_registered = true;
288
289		err = dsa_port_link_register_of(dp);
290		if (err)
291			break;
292		dsa_port_link_registered = true;
293
294		err = dsa_port_enable(dp, NULL);
295		if (err)
296			break;
297		dsa_port_enabled = true;
298
299		break;
300	case DSA_PORT_TYPE_DSA:
301		memset(dlp, 0, sizeof(*dlp));
302		attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
303		devlink_port_attrs_set(dlp, &attrs);
304		err = devlink_port_register(dl, dlp, dp->index);
305		if (err)
306			break;
307		devlink_port_registered = true;
308
309		err = dsa_port_link_register_of(dp);
310		if (err)
311			break;
312		dsa_port_link_registered = true;
313
314		err = dsa_port_enable(dp, NULL);
315		if (err)
316			break;
317		dsa_port_enabled = true;
318
319		break;
320	case DSA_PORT_TYPE_USER:
321		memset(dlp, 0, sizeof(*dlp));
322		attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
323		devlink_port_attrs_set(dlp, &attrs);
324		err = devlink_port_register(dl, dlp, dp->index);
325		if (err)
326			break;
327		devlink_port_registered = true;
328
329		dp->mac = of_get_mac_address(dp->dn);
330		err = dsa_slave_create(dp);
331		if (err)
332			break;
333
334		devlink_port_type_eth_set(dlp, dp->slave);
335		break;
336	}
337
338	if (err && dsa_port_enabled)
339		dsa_port_disable(dp);
340	if (err && dsa_port_link_registered)
341		dsa_port_link_unregister_of(dp);
342	if (err && devlink_port_registered)
343		devlink_port_unregister(dlp);
344	if (err)
345		return err;
 
346
347	dp->setup = true;
348
349	return 0;
350}
351
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352static void dsa_port_teardown(struct dsa_port *dp)
353{
354	struct devlink_port *dlp = &dp->devlink_port;
 
 
355
356	if (!dp->setup)
357		return;
358
 
 
 
 
 
359	switch (dp->type) {
360	case DSA_PORT_TYPE_UNUSED:
361		break;
362	case DSA_PORT_TYPE_CPU:
363		dsa_port_disable(dp);
364		dsa_tag_driver_put(dp->tag_ops);
365		devlink_port_unregister(dlp);
366		dsa_port_link_unregister_of(dp);
367		break;
368	case DSA_PORT_TYPE_DSA:
369		dsa_port_disable(dp);
370		devlink_port_unregister(dlp);
371		dsa_port_link_unregister_of(dp);
372		break;
373	case DSA_PORT_TYPE_USER:
374		devlink_port_unregister(dlp);
375		if (dp->slave) {
376			dsa_slave_destroy(dp->slave);
377			dp->slave = NULL;
378		}
379		break;
380	}
381
 
 
 
 
 
 
 
 
 
 
382	dp->setup = false;
383}
384
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
385static int dsa_switch_setup(struct dsa_switch *ds)
386{
387	struct dsa_devlink_priv *dl_priv;
 
388	int err;
389
390	if (ds->setup)
391		return 0;
392
393	/* Initialize ds->phys_mii_mask before registering the slave MDIO bus
394	 * driver and before ops->setup() has run, since the switch drivers and
395	 * the slave MDIO bus driver rely on these values for probing PHY
396	 * devices or not
397	 */
398	ds->phys_mii_mask |= dsa_user_ports(ds);
399
400	/* Add the switch to devlink before calling setup, so that setup can
401	 * add dpipe tables
402	 */
403	ds->devlink = devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv));
404	if (!ds->devlink)
405		return -ENOMEM;
406	dl_priv = devlink_priv(ds->devlink);
407	dl_priv->ds = ds;
408
409	err = devlink_register(ds->devlink, ds->dev);
410	if (err)
411		goto free_devlink;
412
 
 
 
 
 
 
 
 
 
 
 
413	err = dsa_switch_register_notifier(ds);
414	if (err)
415		goto unregister_devlink;
 
 
416
417	err = ds->ops->setup(ds);
418	if (err < 0)
419		goto unregister_notifier;
420
 
 
 
 
421	devlink_params_publish(ds->devlink);
422
423	if (!ds->slave_mii_bus && ds->ops->phy_read) {
424		ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
425		if (!ds->slave_mii_bus) {
426			err = -ENOMEM;
427			goto unregister_notifier;
428		}
429
430		dsa_slave_mii_bus_init(ds);
431
432		err = mdiobus_register(ds->slave_mii_bus);
433		if (err < 0)
434			goto unregister_notifier;
435	}
436
437	ds->setup = true;
438
439	return 0;
440
 
 
 
 
 
 
441unregister_notifier:
442	dsa_switch_unregister_notifier(ds);
443unregister_devlink:
 
 
 
444	devlink_unregister(ds->devlink);
445free_devlink:
446	devlink_free(ds->devlink);
447	ds->devlink = NULL;
448
449	return err;
450}
451
452static void dsa_switch_teardown(struct dsa_switch *ds)
453{
 
 
454	if (!ds->setup)
455		return;
456
457	if (ds->slave_mii_bus && ds->ops->phy_read)
458		mdiobus_unregister(ds->slave_mii_bus);
 
 
 
459
460	dsa_switch_unregister_notifier(ds);
461
462	if (ds->ops->teardown)
463		ds->ops->teardown(ds);
464
465	if (ds->devlink) {
 
 
 
466		devlink_unregister(ds->devlink);
467		devlink_free(ds->devlink);
468		ds->devlink = NULL;
469	}
470
471	ds->setup = false;
472}
473
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
474static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
475{
476	struct dsa_port *dp;
477	int err;
478
479	list_for_each_entry(dp, &dst->ports, list) {
480		err = dsa_switch_setup(dp->ds);
481		if (err)
482			goto teardown;
483	}
484
485	list_for_each_entry(dp, &dst->ports, list) {
486		err = dsa_port_setup(dp);
487		if (err)
488			continue;
 
 
 
489	}
490
491	return 0;
492
493teardown:
494	list_for_each_entry(dp, &dst->ports, list)
495		dsa_port_teardown(dp);
496
497	list_for_each_entry(dp, &dst->ports, list)
498		dsa_switch_teardown(dp->ds);
499
500	return err;
501}
502
503static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
504{
505	struct dsa_port *dp;
506
507	list_for_each_entry(dp, &dst->ports, list)
508		dsa_port_teardown(dp);
509
510	list_for_each_entry(dp, &dst->ports, list)
511		dsa_switch_teardown(dp->ds);
512}
513
514static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
515{
516	struct dsa_port *dp;
517	int err;
518
519	list_for_each_entry(dp, &dst->ports, list) {
520		if (dsa_port_is_cpu(dp)) {
521			err = dsa_master_setup(dp->master, dp);
522			if (err)
523				return err;
524		}
525	}
526
527	return 0;
528}
529
530static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
531{
532	struct dsa_port *dp;
533
534	list_for_each_entry(dp, &dst->ports, list)
535		if (dsa_port_is_cpu(dp))
536			dsa_master_teardown(dp->master);
537}
538
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
539static int dsa_tree_setup(struct dsa_switch_tree *dst)
540{
541	bool complete;
542	int err;
543
544	if (dst->setup) {
545		pr_err("DSA: tree %d already setup! Disjoint trees?\n",
546		       dst->index);
547		return -EEXIST;
548	}
549
550	complete = dsa_tree_setup_routing_table(dst);
551	if (!complete)
552		return 0;
553
554	err = dsa_tree_setup_default_cpu(dst);
555	if (err)
556		return err;
557
558	err = dsa_tree_setup_switches(dst);
559	if (err)
560		goto teardown_default_cpu;
561
562	err = dsa_tree_setup_master(dst);
563	if (err)
564		goto teardown_switches;
565
 
 
 
 
566	dst->setup = true;
567
568	pr_info("DSA: tree %d setup\n", dst->index);
569
570	return 0;
571
 
 
572teardown_switches:
 
573	dsa_tree_teardown_switches(dst);
574teardown_default_cpu:
575	dsa_tree_teardown_default_cpu(dst);
576
577	return err;
578}
579
580static void dsa_tree_teardown(struct dsa_switch_tree *dst)
581{
582	struct dsa_link *dl, *next;
583
584	if (!dst->setup)
585		return;
586
 
 
587	dsa_tree_teardown_master(dst);
588
 
 
589	dsa_tree_teardown_switches(dst);
590
591	dsa_tree_teardown_default_cpu(dst);
592
593	list_for_each_entry_safe(dl, next, &dst->rtable, list) {
594		list_del(&dl->list);
595		kfree(dl);
596	}
597
598	pr_info("DSA: tree %d torn down\n", dst->index);
599
600	dst->setup = false;
601}
602
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
604{
605	struct dsa_switch_tree *dst = ds->dst;
606	struct dsa_port *dp;
607
608	list_for_each_entry(dp, &dst->ports, list)
609		if (dp->ds == ds && dp->index == index)
610			return dp;
611
612	dp = kzalloc(sizeof(*dp), GFP_KERNEL);
613	if (!dp)
614		return NULL;
615
616	dp->ds = ds;
617	dp->index = index;
618
619	INIT_LIST_HEAD(&dp->list);
620	list_add_tail(&dp->list, &dst->ports);
621
622	return dp;
623}
624
625static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
626{
627	if (!name)
628		name = "eth%d";
629
630	dp->type = DSA_PORT_TYPE_USER;
631	dp->name = name;
632
633	return 0;
634}
635
636static int dsa_port_parse_dsa(struct dsa_port *dp)
637{
638	dp->type = DSA_PORT_TYPE_DSA;
639
640	return 0;
641}
642
643static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
644						  struct net_device *master)
645{
646	enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
647	struct dsa_switch *mds, *ds = dp->ds;
648	unsigned int mdp_upstream;
649	struct dsa_port *mdp;
650
651	/* It is possible to stack DSA switches onto one another when that
652	 * happens the switch driver may want to know if its tagging protocol
653	 * is going to work in such a configuration.
654	 */
655	if (dsa_slave_dev_check(master)) {
656		mdp = dsa_slave_to_port(master);
657		mds = mdp->ds;
658		mdp_upstream = dsa_upstream_port(mds, mdp->index);
659		tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
660							  DSA_TAG_PROTO_NONE);
661	}
662
663	/* If the master device is not itself a DSA slave in a disjoint DSA
664	 * tree, then return immediately.
665	 */
666	return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
667}
668
669static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master)
 
670{
671	struct dsa_switch *ds = dp->ds;
672	struct dsa_switch_tree *dst = ds->dst;
673	const struct dsa_device_ops *tag_ops;
674	enum dsa_tag_protocol tag_protocol;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
675
676	tag_protocol = dsa_get_tag_protocol(dp, master);
677	tag_ops = dsa_tag_driver_get(tag_protocol);
678	if (IS_ERR(tag_ops)) {
679		if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
680			return -EPROBE_DEFER;
 
681		dev_warn(ds->dev, "No tagger for this switch\n");
682		dp->master = NULL;
683		return PTR_ERR(tag_ops);
684	}
685
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
686	dp->master = master;
687	dp->type = DSA_PORT_TYPE_CPU;
688	dp->filter = tag_ops->filter;
689	dp->rcv = tag_ops->rcv;
690	dp->tag_ops = tag_ops;
691	dp->dst = dst;
692
 
 
 
 
 
 
 
 
 
 
 
 
 
693	return 0;
694}
695
696static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
697{
698	struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
699	const char *name = of_get_property(dn, "label", NULL);
700	bool link = of_property_read_bool(dn, "link");
701
702	dp->dn = dn;
703
704	if (ethernet) {
705		struct net_device *master;
 
706
707		master = of_find_net_device_by_node(ethernet);
708		if (!master)
709			return -EPROBE_DEFER;
710
711		return dsa_port_parse_cpu(dp, master);
 
712	}
713
714	if (link)
715		return dsa_port_parse_dsa(dp);
716
717	return dsa_port_parse_user(dp, name);
718}
719
720static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
721				     struct device_node *dn)
722{
723	struct device_node *ports, *port;
724	struct dsa_port *dp;
725	int err = 0;
726	u32 reg;
727
728	ports = of_get_child_by_name(dn, "ports");
729	if (!ports) {
730		/* The second possibility is "ethernet-ports" */
731		ports = of_get_child_by_name(dn, "ethernet-ports");
732		if (!ports) {
733			dev_err(ds->dev, "no ports child node found\n");
734			return -EINVAL;
735		}
736	}
737
738	for_each_available_child_of_node(ports, port) {
739		err = of_property_read_u32(port, "reg", &reg);
740		if (err)
 
741			goto out_put_node;
 
742
743		if (reg >= ds->num_ports) {
 
 
 
744			err = -EINVAL;
745			goto out_put_node;
746		}
747
748		dp = dsa_to_port(ds, reg);
749
750		err = dsa_port_parse_of(dp, port);
751		if (err)
 
752			goto out_put_node;
 
753	}
754
755out_put_node:
756	of_node_put(ports);
757	return err;
758}
759
760static int dsa_switch_parse_member_of(struct dsa_switch *ds,
761				      struct device_node *dn)
762{
763	u32 m[2] = { 0, 0 };
764	int sz;
765
766	/* Don't error out if this optional property isn't found */
767	sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
768	if (sz < 0 && sz != -EINVAL)
769		return sz;
770
771	ds->index = m[1];
772
773	ds->dst = dsa_tree_touch(m[0]);
774	if (!ds->dst)
775		return -ENOMEM;
776
 
 
 
 
 
 
 
777	return 0;
778}
779
780static int dsa_switch_touch_ports(struct dsa_switch *ds)
781{
782	struct dsa_port *dp;
783	int port;
784
785	for (port = 0; port < ds->num_ports; port++) {
786		dp = dsa_port_touch(ds, port);
787		if (!dp)
788			return -ENOMEM;
789	}
790
791	return 0;
792}
793
794static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
795{
796	int err;
797
798	err = dsa_switch_parse_member_of(ds, dn);
799	if (err)
800		return err;
801
802	err = dsa_switch_touch_ports(ds);
803	if (err)
804		return err;
805
806	return dsa_switch_parse_ports_of(ds, dn);
807}
808
809static int dsa_port_parse(struct dsa_port *dp, const char *name,
810			  struct device *dev)
811{
812	if (!strcmp(name, "cpu")) {
813		struct net_device *master;
814
815		master = dsa_dev_to_net_device(dev);
816		if (!master)
817			return -EPROBE_DEFER;
818
819		dev_put(master);
820
821		return dsa_port_parse_cpu(dp, master);
822	}
823
824	if (!strcmp(name, "dsa"))
825		return dsa_port_parse_dsa(dp);
826
827	return dsa_port_parse_user(dp, name);
828}
829
830static int dsa_switch_parse_ports(struct dsa_switch *ds,
831				  struct dsa_chip_data *cd)
832{
833	bool valid_name_found = false;
834	struct dsa_port *dp;
835	struct device *dev;
836	const char *name;
837	unsigned int i;
838	int err;
839
840	for (i = 0; i < DSA_MAX_PORTS; i++) {
841		name = cd->port_names[i];
842		dev = cd->netdev[i];
843		dp = dsa_to_port(ds, i);
844
845		if (!name)
846			continue;
847
848		err = dsa_port_parse(dp, name, dev);
849		if (err)
850			return err;
851
852		valid_name_found = true;
853	}
854
855	if (!valid_name_found && i == DSA_MAX_PORTS)
856		return -EINVAL;
857
858	return 0;
859}
860
861static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
862{
863	int err;
864
865	ds->cd = cd;
866
867	/* We don't support interconnected switches nor multiple trees via
868	 * platform data, so this is the unique switch of the tree.
869	 */
870	ds->index = 0;
871	ds->dst = dsa_tree_touch(0);
872	if (!ds->dst)
873		return -ENOMEM;
874
875	err = dsa_switch_touch_ports(ds);
876	if (err)
877		return err;
878
879	return dsa_switch_parse_ports(ds, cd);
880}
881
882static void dsa_switch_release_ports(struct dsa_switch *ds)
883{
884	struct dsa_switch_tree *dst = ds->dst;
885	struct dsa_port *dp, *next;
886
887	list_for_each_entry_safe(dp, next, &dst->ports, list) {
888		if (dp->ds != ds)
889			continue;
890		list_del(&dp->list);
891		kfree(dp);
892	}
893}
894
895static int dsa_switch_probe(struct dsa_switch *ds)
896{
897	struct dsa_switch_tree *dst;
898	struct dsa_chip_data *pdata;
899	struct device_node *np;
900	int err;
901
902	if (!ds->dev)
903		return -ENODEV;
904
905	pdata = ds->dev->platform_data;
906	np = ds->dev->of_node;
907
908	if (!ds->num_ports)
909		return -EINVAL;
910
911	if (np) {
912		err = dsa_switch_parse_of(ds, np);
913		if (err)
914			dsa_switch_release_ports(ds);
915	} else if (pdata) {
916		err = dsa_switch_parse(ds, pdata);
917		if (err)
918			dsa_switch_release_ports(ds);
919	} else {
920		err = -ENODEV;
921	}
922
923	if (err)
924		return err;
925
926	dst = ds->dst;
927	dsa_tree_get(dst);
928	err = dsa_tree_setup(dst);
929	if (err) {
930		dsa_switch_release_ports(ds);
931		dsa_tree_put(dst);
932	}
933
934	return err;
935}
936
937int dsa_register_switch(struct dsa_switch *ds)
938{
939	int err;
940
941	mutex_lock(&dsa2_mutex);
942	err = dsa_switch_probe(ds);
943	dsa_tree_put(ds->dst);
944	mutex_unlock(&dsa2_mutex);
945
946	return err;
947}
948EXPORT_SYMBOL_GPL(dsa_register_switch);
949
950static void dsa_switch_remove(struct dsa_switch *ds)
951{
952	struct dsa_switch_tree *dst = ds->dst;
953
954	dsa_tree_teardown(dst);
955	dsa_switch_release_ports(ds);
956	dsa_tree_put(dst);
957}
958
959void dsa_unregister_switch(struct dsa_switch *ds)
960{
961	mutex_lock(&dsa2_mutex);
962	dsa_switch_remove(ds);
963	mutex_unlock(&dsa2_mutex);
964}
965EXPORT_SYMBOL_GPL(dsa_unregister_switch);