Linux Audio

Check our new training course

Loading...
v5.14.15
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * include/net/dsa.h - Driver for Distributed Switch Architecture switch chips
   4 * Copyright (c) 2008-2009 Marvell Semiconductor
   5 */
   6
   7#ifndef __LINUX_NET_DSA_H
   8#define __LINUX_NET_DSA_H
   9
  10#include <linux/if.h>
  11#include <linux/if_ether.h>
  12#include <linux/list.h>
  13#include <linux/notifier.h>
  14#include <linux/timer.h>
  15#include <linux/workqueue.h>
  16#include <linux/of.h>
  17#include <linux/ethtool.h>
  18#include <linux/net_tstamp.h>
  19#include <linux/phy.h>
  20#include <linux/platform_data/dsa.h>
  21#include <linux/phylink.h>
  22#include <net/devlink.h>
  23#include <net/switchdev.h>
  24
 
  25struct tc_action;
  26struct phy_device;
  27struct fixed_phy_status;
  28struct phylink_link_state;
  29
  30#define DSA_TAG_PROTO_NONE_VALUE		0
  31#define DSA_TAG_PROTO_BRCM_VALUE		1
  32#define DSA_TAG_PROTO_BRCM_PREPEND_VALUE	2
  33#define DSA_TAG_PROTO_DSA_VALUE			3
  34#define DSA_TAG_PROTO_EDSA_VALUE		4
  35#define DSA_TAG_PROTO_GSWIP_VALUE		5
  36#define DSA_TAG_PROTO_KSZ9477_VALUE		6
  37#define DSA_TAG_PROTO_KSZ9893_VALUE		7
  38#define DSA_TAG_PROTO_LAN9303_VALUE		8
  39#define DSA_TAG_PROTO_MTK_VALUE			9
  40#define DSA_TAG_PROTO_QCA_VALUE			10
  41#define DSA_TAG_PROTO_TRAILER_VALUE		11
  42#define DSA_TAG_PROTO_8021Q_VALUE		12
  43#define DSA_TAG_PROTO_SJA1105_VALUE		13
  44#define DSA_TAG_PROTO_KSZ8795_VALUE		14
  45#define DSA_TAG_PROTO_OCELOT_VALUE		15
  46#define DSA_TAG_PROTO_AR9331_VALUE		16
  47#define DSA_TAG_PROTO_RTL4_A_VALUE		17
  48#define DSA_TAG_PROTO_HELLCREEK_VALUE		18
  49#define DSA_TAG_PROTO_XRS700X_VALUE		19
  50#define DSA_TAG_PROTO_OCELOT_8021Q_VALUE	20
  51#define DSA_TAG_PROTO_SEVILLE_VALUE		21
  52#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE		22
  53#define DSA_TAG_PROTO_SJA1110_VALUE		23
 
 
 
 
  54
  55enum dsa_tag_protocol {
  56	DSA_TAG_PROTO_NONE		= DSA_TAG_PROTO_NONE_VALUE,
  57	DSA_TAG_PROTO_BRCM		= DSA_TAG_PROTO_BRCM_VALUE,
  58	DSA_TAG_PROTO_BRCM_LEGACY	= DSA_TAG_PROTO_BRCM_LEGACY_VALUE,
  59	DSA_TAG_PROTO_BRCM_PREPEND	= DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
  60	DSA_TAG_PROTO_DSA		= DSA_TAG_PROTO_DSA_VALUE,
  61	DSA_TAG_PROTO_EDSA		= DSA_TAG_PROTO_EDSA_VALUE,
  62	DSA_TAG_PROTO_GSWIP		= DSA_TAG_PROTO_GSWIP_VALUE,
  63	DSA_TAG_PROTO_KSZ9477		= DSA_TAG_PROTO_KSZ9477_VALUE,
  64	DSA_TAG_PROTO_KSZ9893		= DSA_TAG_PROTO_KSZ9893_VALUE,
  65	DSA_TAG_PROTO_LAN9303		= DSA_TAG_PROTO_LAN9303_VALUE,
  66	DSA_TAG_PROTO_MTK		= DSA_TAG_PROTO_MTK_VALUE,
  67	DSA_TAG_PROTO_QCA		= DSA_TAG_PROTO_QCA_VALUE,
  68	DSA_TAG_PROTO_TRAILER		= DSA_TAG_PROTO_TRAILER_VALUE,
  69	DSA_TAG_PROTO_8021Q		= DSA_TAG_PROTO_8021Q_VALUE,
  70	DSA_TAG_PROTO_SJA1105		= DSA_TAG_PROTO_SJA1105_VALUE,
  71	DSA_TAG_PROTO_KSZ8795		= DSA_TAG_PROTO_KSZ8795_VALUE,
  72	DSA_TAG_PROTO_OCELOT		= DSA_TAG_PROTO_OCELOT_VALUE,
  73	DSA_TAG_PROTO_AR9331		= DSA_TAG_PROTO_AR9331_VALUE,
  74	DSA_TAG_PROTO_RTL4_A		= DSA_TAG_PROTO_RTL4_A_VALUE,
  75	DSA_TAG_PROTO_HELLCREEK		= DSA_TAG_PROTO_HELLCREEK_VALUE,
  76	DSA_TAG_PROTO_XRS700X		= DSA_TAG_PROTO_XRS700X_VALUE,
  77	DSA_TAG_PROTO_OCELOT_8021Q	= DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
  78	DSA_TAG_PROTO_SEVILLE		= DSA_TAG_PROTO_SEVILLE_VALUE,
  79	DSA_TAG_PROTO_SJA1110		= DSA_TAG_PROTO_SJA1110_VALUE,
 
 
 
 
  80};
  81
  82struct packet_type;
  83struct dsa_switch;
  84
  85struct dsa_device_ops {
  86	struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
  87	struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
  88			       struct packet_type *pt);
  89	void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
  90			     int *offset);
  91	/* Used to determine which traffic should match the DSA filter in
  92	 * eth_type_trans, and which, if any, should bypass it and be processed
  93	 * as regular on the master net device.
  94	 */
  95	bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
  96	unsigned int needed_headroom;
  97	unsigned int needed_tailroom;
  98	const char *name;
  99	enum dsa_tag_protocol proto;
 100	/* Some tagging protocols either mangle or shift the destination MAC
 101	 * address, in which case the DSA master would drop packets on ingress
 102	 * if what it understands out of the destination MAC address is not in
 103	 * its RX filter.
 104	 */
 105	bool promisc_on_master;
 106};
 107
 108/* This structure defines the control interfaces that are overlayed by the
 109 * DSA layer on top of the DSA CPU/management net_device instance. This is
 110 * used by the core net_device layer while calling various net_device_ops
 111 * function pointers.
 112 */
 113struct dsa_netdevice_ops {
 114	int (*ndo_do_ioctl)(struct net_device *dev, struct ifreq *ifr,
 115			    int cmd);
 116};
 117
 118#define DSA_TAG_DRIVER_ALIAS "dsa_tag-"
 119#define MODULE_ALIAS_DSA_TAG_DRIVER(__proto)				\
 120	MODULE_ALIAS(DSA_TAG_DRIVER_ALIAS __stringify(__proto##_VALUE))
 
 
 
 
 121
 122struct dsa_switch_tree {
 123	struct list_head	list;
 124
 
 
 
 125	/* Notifier chain for switch-wide events */
 126	struct raw_notifier_head	nh;
 127
 128	/* Tree identifier */
 129	unsigned int index;
 130
 131	/* Number of switches attached to this tree */
 132	struct kref refcount;
 133
 134	/* Has this tree been applied to the hardware? */
 135	bool setup;
 
 
 136
 137	/* Tagging protocol operations */
 138	const struct dsa_device_ops *tag_ops;
 139
 140	/* Default tagging protocol preferred by the switches in this
 141	 * tree.
 142	 */
 143	enum dsa_tag_protocol default_proto;
 144
 
 
 
 145	/*
 146	 * Configuration data for the platform device that owns
 147	 * this dsa switch tree instance.
 148	 */
 149	struct dsa_platform_data	*pd;
 150
 151	/* List of switch ports */
 152	struct list_head ports;
 153
 154	/* List of DSA links composing the routing table */
 155	struct list_head rtable;
 156
 157	/* Maps offloaded LAG netdevs to a zero-based linear ID for
 158	 * drivers that need it.
 159	 */
 160	struct net_device **lags;
 161	unsigned int lags_len;
 
 
 
 162};
 163
 
 164#define dsa_lags_foreach_id(_id, _dst)				\
 165	for ((_id) = 0; (_id) < (_dst)->lags_len; (_id)++)	\
 166		if ((_dst)->lags[(_id)])
 167
 168#define dsa_lag_foreach_port(_dp, _dst, _lag)			\
 169	list_for_each_entry((_dp), &(_dst)->ports, list)	\
 170		if ((_dp)->lag_dev == (_lag))
 171
 172#define dsa_hsr_foreach_port(_dp, _ds, _hsr)			\
 173	list_for_each_entry((_dp), &(_ds)->dst->ports, list)	\
 174		if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
 175
 176static inline struct net_device *dsa_lag_dev(struct dsa_switch_tree *dst,
 177					     unsigned int id)
 178{
 179	return dst->lags[id];
 
 180}
 181
 182static inline int dsa_lag_id(struct dsa_switch_tree *dst,
 183			     struct net_device *lag)
 184{
 185	unsigned int id;
 186
 187	dsa_lags_foreach_id(id, dst) {
 188		if (dsa_lag_dev(dst, id) == lag)
 189			return id;
 
 
 190	}
 191
 192	return -ENODEV;
 193}
 194
 195/* TC matchall action types */
 196enum dsa_port_mall_action_type {
 197	DSA_PORT_MALL_MIRROR,
 198	DSA_PORT_MALL_POLICER,
 199};
 200
 201/* TC mirroring entry */
 202struct dsa_mall_mirror_tc_entry {
 203	u8 to_local_port;
 204	bool ingress;
 205};
 206
 207/* TC port policer entry */
 208struct dsa_mall_policer_tc_entry {
 209	u32 burst;
 210	u64 rate_bytes_per_sec;
 211};
 212
 213/* TC matchall entry */
 214struct dsa_mall_tc_entry {
 215	struct list_head list;
 216	unsigned long cookie;
 217	enum dsa_port_mall_action_type type;
 218	union {
 219		struct dsa_mall_mirror_tc_entry mirror;
 220		struct dsa_mall_policer_tc_entry policer;
 221	};
 222};
 223
 
 
 
 
 
 
 224
 225struct dsa_port {
 226	/* A CPU port is physically connected to a master device.
 227	 * A user port exposed to userspace has a slave device.
 228	 */
 229	union {
 230		struct net_device *master;
 231		struct net_device *slave;
 232	};
 233
 234	/* Copy of the tagging protocol operations, for quicker access
 235	 * in the data path. Valid only for the CPU ports.
 236	 */
 237	const struct dsa_device_ops *tag_ops;
 238
 239	/* Copies for faster access in master receive hot path */
 240	struct dsa_switch_tree *dst;
 241	struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev,
 242			       struct packet_type *pt);
 243	bool (*filter)(const struct sk_buff *skb, struct net_device *dev);
 
 
 244
 245	enum {
 246		DSA_PORT_TYPE_UNUSED = 0,
 247		DSA_PORT_TYPE_CPU,
 248		DSA_PORT_TYPE_DSA,
 249		DSA_PORT_TYPE_USER,
 250	} type;
 251
 252	struct dsa_switch	*ds;
 253	unsigned int		index;
 254	const char		*name;
 255	struct dsa_port		*cpu_dp;
 256	u8			mac[ETH_ALEN];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 257	struct device_node	*dn;
 258	unsigned int		ageing_time;
 259	bool			vlan_filtering;
 260	u8			stp_state;
 261	struct net_device	*bridge_dev;
 262	struct devlink_port	devlink_port;
 263	bool			devlink_port_setup;
 264	struct phylink		*pl;
 265	struct phylink_config	pl_config;
 266	struct net_device	*lag_dev;
 267	bool			lag_tx_enabled;
 268	struct net_device	*hsr_dev;
 269
 270	struct list_head list;
 271
 272	/*
 273	 * Give the switch driver somewhere to hang its per-port private data
 274	 * structures (accessible from the tagger).
 275	 */
 276	void *priv;
 277
 278	/*
 279	 * Original copy of the master netdev ethtool_ops
 280	 */
 281	const struct ethtool_ops *orig_ethtool_ops;
 282
 283	/*
 284	 * Original copy of the master netdev net_device_ops
 285	 */
 286	const struct dsa_netdevice_ops *netdev_ops;
 287
 288	/* List of MAC addresses that must be forwarded on this port.
 289	 * These are only valid on CPU ports and DSA links.
 290	 */
 
 291	struct list_head	fdbs;
 292	struct list_head	mdbs;
 293
 294	bool setup;
 
 
 295};
 296
 297/* TODO: ideally DSA ports would have a single dp->link_dp member,
 298 * and no dst->rtable nor this struct dsa_link would be needed,
 299 * but this would require some more complex tree walking,
 300 * so keep it stupid at the moment and list them all.
 301 */
 302struct dsa_link {
 303	struct dsa_port *dp;
 304	struct dsa_port *link_dp;
 305	struct list_head list;
 306};
 307
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 308struct dsa_mac_addr {
 309	unsigned char addr[ETH_ALEN];
 310	u16 vid;
 311	refcount_t refcount;
 312	struct list_head list;
 
 313};
 314
 315struct dsa_switch {
 316	bool setup;
 
 
 
 317
 
 318	struct device *dev;
 319
 320	/*
 321	 * Parent switch tree, and switch index.
 322	 */
 323	struct dsa_switch_tree	*dst;
 324	unsigned int		index;
 325
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 326	/* Listener for switch fabric events */
 327	struct notifier_block	nb;
 328
 329	/*
 330	 * Give the switch driver somewhere to hang its private data
 331	 * structure.
 332	 */
 333	void *priv;
 334
 
 
 335	/*
 336	 * Configuration data for this switch.
 337	 */
 338	struct dsa_chip_data	*cd;
 339
 340	/*
 341	 * The switch operations.
 342	 */
 343	const struct dsa_switch_ops	*ops;
 344
 345	/*
 346	 * Slave mii_bus and devices for the individual ports.
 347	 */
 348	u32			phys_mii_mask;
 349	struct mii_bus		*slave_mii_bus;
 350
 351	/* Ageing Time limits in msecs */
 352	unsigned int ageing_time_min;
 353	unsigned int ageing_time_max;
 354
 
 
 
 355	/* devlink used to represent this switch device */
 356	struct devlink		*devlink;
 357
 358	/* Number of switch port queues */
 359	unsigned int		num_tx_queues;
 360
 361	/* Disallow bridge core from requesting different VLAN awareness
 362	 * settings on ports if not hardware-supported
 363	 */
 364	bool			vlan_filtering_is_global;
 365
 366	/* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges
 367	 * that have vlan_filtering=0. All drivers should ideally set this (and
 368	 * then the option would get removed), but it is unknown whether this
 369	 * would break things or not.
 370	 */
 371	bool			configure_vlan_while_not_filtering;
 372
 373	/* If the switch driver always programs the CPU port as egress tagged
 374	 * despite the VLAN configuration indicating otherwise, then setting
 375	 * @untag_bridge_pvid will force the DSA receive path to pop the bridge's
 376	 * default_pvid VLAN tagged frames to offer a consistent behavior
 377	 * between a vlan_filtering=0 and vlan_filtering=1 bridge device.
 378	 */
 379	bool			untag_bridge_pvid;
 380
 381	/* Let DSA manage the FDB entries towards the CPU, based on the
 382	 * software bridge database.
 383	 */
 384	bool			assisted_learning_on_cpu_port;
 385
 386	/* In case vlan_filtering_is_global is set, the VLAN awareness state
 387	 * should be retrieved from here and not from the per-port settings.
 388	 */
 389	bool			vlan_filtering;
 390
 391	/* MAC PCS does not provide link state change interrupt, and requires
 392	 * polling. Flag passed on to PHYLINK.
 393	 */
 394	bool			pcs_poll;
 395
 396	/* For switches that only have the MRU configurable. To ensure the
 397	 * configured MTU is not exceeded, normalization of MRU on all bridged
 398	 * interfaces is needed.
 399	 */
 400	bool			mtu_enforcement_ingress;
 401
 402	/* Drivers that benefit from having an ID associated with each
 403	 * offloaded LAG should set this to the maximum number of
 404	 * supported IDs. DSA will then maintain a mapping of _at
 405	 * least_ these many IDs, accessible to drivers via
 406	 * dsa_lag_id().
 407	 */
 408	unsigned int		num_lag_ids;
 409
 410	size_t num_ports;
 
 
 
 
 
 
 
 411};
 412
 413static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
 414{
 415	struct dsa_switch_tree *dst = ds->dst;
 416	struct dsa_port *dp;
 417
 418	list_for_each_entry(dp, &dst->ports, list)
 419		if (dp->ds == ds && dp->index == p)
 420			return dp;
 421
 422	return NULL;
 423}
 424
 425static inline bool dsa_port_is_dsa(struct dsa_port *port)
 426{
 427	return port->type == DSA_PORT_TYPE_DSA;
 428}
 429
 430static inline bool dsa_port_is_cpu(struct dsa_port *port)
 431{
 432	return port->type == DSA_PORT_TYPE_CPU;
 433}
 434
 435static inline bool dsa_port_is_user(struct dsa_port *dp)
 436{
 437	return dp->type == DSA_PORT_TYPE_USER;
 438}
 439
 440static inline bool dsa_port_is_unused(struct dsa_port *dp)
 441{
 442	return dp->type == DSA_PORT_TYPE_UNUSED;
 443}
 444
 
 
 
 
 
 
 445static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
 446{
 447	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
 448}
 449
 450static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
 451{
 452	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_CPU;
 453}
 454
 455static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
 456{
 457	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_DSA;
 458}
 459
 460static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
 461{
 462	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
 463}
 464
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 465static inline u32 dsa_user_ports(struct dsa_switch *ds)
 466{
 
 467	u32 mask = 0;
 468	int p;
 469
 470	for (p = 0; p < ds->num_ports; p++)
 471		if (dsa_is_user_port(ds, p))
 472			mask |= BIT(p);
 
 
 
 
 
 
 
 
 
 
 473
 474	return mask;
 475}
 476
 477/* Return the local port used to reach an arbitrary switch device */
 478static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
 479{
 480	struct dsa_switch_tree *dst = ds->dst;
 481	struct dsa_link *dl;
 482
 483	list_for_each_entry(dl, &dst->rtable, list)
 484		if (dl->dp->ds == ds && dl->link_dp->ds->index == device)
 485			return dl->dp->index;
 486
 487	return ds->num_ports;
 488}
 489
 490/* Return the local port used to reach an arbitrary switch port */
 491static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
 492					    int port)
 493{
 494	if (device == ds->index)
 495		return port;
 496	else
 497		return dsa_routing_port(ds, device);
 498}
 499
 500/* Return the local port used to reach the dedicated CPU port */
 501static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
 502{
 503	const struct dsa_port *dp = dsa_to_port(ds, port);
 504	const struct dsa_port *cpu_dp = dp->cpu_dp;
 505
 506	if (!cpu_dp)
 507		return port;
 508
 509	return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
 510}
 511
 512/* Return true if this is the local port used to reach the CPU port */
 513static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port)
 514{
 515	if (dsa_is_unused_port(ds, port))
 516		return false;
 517
 518	return port == dsa_upstream_port(ds, port);
 519}
 520
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 521/* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning
 522 * that the routing port from @downstream_ds to @upstream_ds is also the port
 523 * which @downstream_ds uses to reach its dedicated CPU.
 524 */
 525static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds,
 526					     struct dsa_switch *downstream_ds)
 527{
 528	int routing_port;
 529
 530	if (upstream_ds == downstream_ds)
 531		return true;
 532
 533	routing_port = dsa_routing_port(downstream_ds, upstream_ds->index);
 534
 535	return dsa_is_upstream_port(downstream_ds, routing_port);
 536}
 537
 538static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
 539{
 540	const struct dsa_switch *ds = dp->ds;
 541
 542	if (ds->vlan_filtering_is_global)
 543		return ds->vlan_filtering;
 544	else
 545		return dp->vlan_filtering;
 546}
 547
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 548static inline
 549struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
 550{
 551	if (!dp->bridge_dev)
 552		return NULL;
 553
 554	if (dp->lag_dev)
 555		return dp->lag_dev;
 556	else if (dp->hsr_dev)
 557		return dp->hsr_dev;
 558
 559	return dp->slave;
 560}
 561
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 562typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
 563			      bool is_static, void *data);
 564struct dsa_switch_ops {
 565	/*
 566	 * Tagging protocol helpers called for the CPU ports and DSA links.
 567	 * @get_tag_protocol retrieves the initial tagging protocol and is
 568	 * mandatory. Switches which can operate using multiple tagging
 569	 * protocols should implement @change_tag_protocol and report in
 570	 * @get_tag_protocol the tagger in current use.
 571	 */
 572	enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
 573						  int port,
 574						  enum dsa_tag_protocol mprot);
 575	int	(*change_tag_protocol)(struct dsa_switch *ds, int port,
 576				       enum dsa_tag_protocol proto);
 
 
 
 
 
 
 
 
 
 
 
 577
 578	/* Optional switch-wide initialization and destruction methods */
 579	int	(*setup)(struct dsa_switch *ds);
 580	void	(*teardown)(struct dsa_switch *ds);
 581
 582	/* Per-port initialization and destruction methods. Mandatory if the
 583	 * driver registers devlink port regions, optional otherwise.
 584	 */
 585	int	(*port_setup)(struct dsa_switch *ds, int port);
 586	void	(*port_teardown)(struct dsa_switch *ds, int port);
 587
 588	u32	(*get_phy_flags)(struct dsa_switch *ds, int port);
 589
 590	/*
 591	 * Access to the switch's PHY registers.
 592	 */
 593	int	(*phy_read)(struct dsa_switch *ds, int port, int regnum);
 594	int	(*phy_write)(struct dsa_switch *ds, int port,
 595			     int regnum, u16 val);
 596
 597	/*
 598	 * Link state adjustment (called from libphy)
 599	 */
 600	void	(*adjust_link)(struct dsa_switch *ds, int port,
 601				struct phy_device *phydev);
 602	void	(*fixed_link_update)(struct dsa_switch *ds, int port,
 603				struct fixed_phy_status *st);
 604
 605	/*
 606	 * PHYLINK integration
 607	 */
 608	void	(*phylink_validate)(struct dsa_switch *ds, int port,
 609				    unsigned long *supported,
 610				    struct phylink_link_state *state);
 
 
 611	int	(*phylink_mac_link_state)(struct dsa_switch *ds, int port,
 612					  struct phylink_link_state *state);
 613	void	(*phylink_mac_config)(struct dsa_switch *ds, int port,
 614				      unsigned int mode,
 615				      const struct phylink_link_state *state);
 616	void	(*phylink_mac_an_restart)(struct dsa_switch *ds, int port);
 617	void	(*phylink_mac_link_down)(struct dsa_switch *ds, int port,
 618					 unsigned int mode,
 619					 phy_interface_t interface);
 620	void	(*phylink_mac_link_up)(struct dsa_switch *ds, int port,
 621				       unsigned int mode,
 622				       phy_interface_t interface,
 623				       struct phy_device *phydev,
 624				       int speed, int duplex,
 625				       bool tx_pause, bool rx_pause);
 626	void	(*phylink_fixed_state)(struct dsa_switch *ds, int port,
 627				       struct phylink_link_state *state);
 628	/*
 629	 * Port statistics counters.
 630	 */
 631	void	(*get_strings)(struct dsa_switch *ds, int port,
 632			       u32 stringset, uint8_t *data);
 633	void	(*get_ethtool_stats)(struct dsa_switch *ds,
 634				     int port, uint64_t *data);
 635	int	(*get_sset_count)(struct dsa_switch *ds, int port, int sset);
 636	void	(*get_ethtool_phy_stats)(struct dsa_switch *ds,
 637					 int port, uint64_t *data);
 
 
 
 
 
 
 
 
 
 638	void	(*get_stats64)(struct dsa_switch *ds, int port,
 639				   struct rtnl_link_stats64 *s);
 
 
 640	void	(*self_test)(struct dsa_switch *ds, int port,
 641			     struct ethtool_test *etest, u64 *data);
 642
 643	/*
 644	 * ethtool Wake-on-LAN
 645	 */
 646	void	(*get_wol)(struct dsa_switch *ds, int port,
 647			   struct ethtool_wolinfo *w);
 648	int	(*set_wol)(struct dsa_switch *ds, int port,
 649			   struct ethtool_wolinfo *w);
 650
 651	/*
 652	 * ethtool timestamp info
 653	 */
 654	int	(*get_ts_info)(struct dsa_switch *ds, int port,
 655			       struct ethtool_ts_info *ts);
 656
 657	/*
 
 
 
 
 
 
 
 
 
 
 
 
 658	 * Suspend and resume
 659	 */
 660	int	(*suspend)(struct dsa_switch *ds);
 661	int	(*resume)(struct dsa_switch *ds);
 662
 663	/*
 664	 * Port enable/disable
 665	 */
 666	int	(*port_enable)(struct dsa_switch *ds, int port,
 667			       struct phy_device *phy);
 668	void	(*port_disable)(struct dsa_switch *ds, int port);
 669
 670	/*
 671	 * Port's MAC EEE settings
 672	 */
 673	int	(*set_mac_eee)(struct dsa_switch *ds, int port,
 674			       struct ethtool_eee *e);
 675	int	(*get_mac_eee)(struct dsa_switch *ds, int port,
 676			       struct ethtool_eee *e);
 677
 678	/* EEPROM access */
 679	int	(*get_eeprom_len)(struct dsa_switch *ds);
 680	int	(*get_eeprom)(struct dsa_switch *ds,
 681			      struct ethtool_eeprom *eeprom, u8 *data);
 682	int	(*set_eeprom)(struct dsa_switch *ds,
 683			      struct ethtool_eeprom *eeprom, u8 *data);
 684
 685	/*
 686	 * Register access.
 687	 */
 688	int	(*get_regs_len)(struct dsa_switch *ds, int port);
 689	void	(*get_regs)(struct dsa_switch *ds, int port,
 690			    struct ethtool_regs *regs, void *p);
 691
 692	/*
 693	 * Upper device tracking.
 694	 */
 695	int	(*port_prechangeupper)(struct dsa_switch *ds, int port,
 696				       struct netdev_notifier_changeupper_info *info);
 697
 698	/*
 699	 * Bridge integration
 700	 */
 701	int	(*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs);
 702	int	(*port_bridge_join)(struct dsa_switch *ds, int port,
 703				    struct net_device *bridge);
 
 
 704	void	(*port_bridge_leave)(struct dsa_switch *ds, int port,
 705				     struct net_device *bridge);
 706	void	(*port_stp_state_set)(struct dsa_switch *ds, int port,
 707				      u8 state);
 
 
 708	void	(*port_fast_age)(struct dsa_switch *ds, int port);
 
 709	int	(*port_pre_bridge_flags)(struct dsa_switch *ds, int port,
 710					 struct switchdev_brport_flags flags,
 711					 struct netlink_ext_ack *extack);
 712	int	(*port_bridge_flags)(struct dsa_switch *ds, int port,
 713				     struct switchdev_brport_flags flags,
 714				     struct netlink_ext_ack *extack);
 
 
 715
 716	/*
 717	 * VLAN support
 718	 */
 719	int	(*port_vlan_filtering)(struct dsa_switch *ds, int port,
 720				       bool vlan_filtering,
 721				       struct netlink_ext_ack *extack);
 722	int	(*port_vlan_add)(struct dsa_switch *ds, int port,
 723				 const struct switchdev_obj_port_vlan *vlan,
 724				 struct netlink_ext_ack *extack);
 725	int	(*port_vlan_del)(struct dsa_switch *ds, int port,
 726				 const struct switchdev_obj_port_vlan *vlan);
 
 
 
 727	/*
 728	 * Forwarding database
 729	 */
 730	int	(*port_fdb_add)(struct dsa_switch *ds, int port,
 731				const unsigned char *addr, u16 vid);
 
 732	int	(*port_fdb_del)(struct dsa_switch *ds, int port,
 733				const unsigned char *addr, u16 vid);
 
 734	int	(*port_fdb_dump)(struct dsa_switch *ds, int port,
 735				 dsa_fdb_dump_cb_t *cb, void *data);
 
 
 
 
 
 
 736
 737	/*
 738	 * Multicast database
 739	 */
 740	int	(*port_mdb_add)(struct dsa_switch *ds, int port,
 741				const struct switchdev_obj_port_mdb *mdb);
 
 742	int	(*port_mdb_del)(struct dsa_switch *ds, int port,
 743				const struct switchdev_obj_port_mdb *mdb);
 
 744	/*
 745	 * RXNFC
 746	 */
 747	int	(*get_rxnfc)(struct dsa_switch *ds, int port,
 748			     struct ethtool_rxnfc *nfc, u32 *rule_locs);
 749	int	(*set_rxnfc)(struct dsa_switch *ds, int port,
 750			     struct ethtool_rxnfc *nfc);
 751
 752	/*
 753	 * TC integration
 754	 */
 755	int	(*cls_flower_add)(struct dsa_switch *ds, int port,
 756				  struct flow_cls_offload *cls, bool ingress);
 757	int	(*cls_flower_del)(struct dsa_switch *ds, int port,
 758				  struct flow_cls_offload *cls, bool ingress);
 759	int	(*cls_flower_stats)(struct dsa_switch *ds, int port,
 760				    struct flow_cls_offload *cls, bool ingress);
 761	int	(*port_mirror_add)(struct dsa_switch *ds, int port,
 762				   struct dsa_mall_mirror_tc_entry *mirror,
 763				   bool ingress);
 764	void	(*port_mirror_del)(struct dsa_switch *ds, int port,
 765				   struct dsa_mall_mirror_tc_entry *mirror);
 766	int	(*port_policer_add)(struct dsa_switch *ds, int port,
 767				    struct dsa_mall_policer_tc_entry *policer);
 768	void	(*port_policer_del)(struct dsa_switch *ds, int port);
 769	int	(*port_setup_tc)(struct dsa_switch *ds, int port,
 770				 enum tc_setup_type type, void *type_data);
 771
 772	/*
 773	 * Cross-chip operations
 774	 */
 775	int	(*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index,
 776					 int sw_index, int port,
 777					 struct net_device *br);
 
 778	void	(*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index,
 779					  int sw_index, int port,
 780					  struct net_device *br);
 781	int	(*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
 782					int port);
 783	int	(*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
 784				      int port, struct net_device *lag,
 785				      struct netdev_lag_upper_info *info);
 
 786	int	(*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
 787				       int port, struct net_device *lag);
 788
 789	/*
 790	 * PTP functionality
 791	 */
 792	int	(*port_hwtstamp_get)(struct dsa_switch *ds, int port,
 793				     struct ifreq *ifr);
 794	int	(*port_hwtstamp_set)(struct dsa_switch *ds, int port,
 795				     struct ifreq *ifr);
 796	void	(*port_txtstamp)(struct dsa_switch *ds, int port,
 797				 struct sk_buff *skb);
 798	bool	(*port_rxtstamp)(struct dsa_switch *ds, int port,
 799				 struct sk_buff *skb, unsigned int type);
 800
 801	/* Devlink parameters, etc */
 802	int	(*devlink_param_get)(struct dsa_switch *ds, u32 id,
 803				     struct devlink_param_gset_ctx *ctx);
 804	int	(*devlink_param_set)(struct dsa_switch *ds, u32 id,
 805				     struct devlink_param_gset_ctx *ctx);
 806	int	(*devlink_info_get)(struct dsa_switch *ds,
 807				    struct devlink_info_req *req,
 808				    struct netlink_ext_ack *extack);
 809	int	(*devlink_sb_pool_get)(struct dsa_switch *ds,
 810				       unsigned int sb_index, u16 pool_index,
 811				       struct devlink_sb_pool_info *pool_info);
 812	int	(*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index,
 813				       u16 pool_index, u32 size,
 814				       enum devlink_sb_threshold_type threshold_type,
 815				       struct netlink_ext_ack *extack);
 816	int	(*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port,
 817					    unsigned int sb_index, u16 pool_index,
 818					    u32 *p_threshold);
 819	int	(*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port,
 820					    unsigned int sb_index, u16 pool_index,
 821					    u32 threshold,
 822					    struct netlink_ext_ack *extack);
 823	int	(*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port,
 824					       unsigned int sb_index, u16 tc_index,
 825					       enum devlink_sb_pool_type pool_type,
 826					       u16 *p_pool_index, u32 *p_threshold);
 827	int	(*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port,
 828					       unsigned int sb_index, u16 tc_index,
 829					       enum devlink_sb_pool_type pool_type,
 830					       u16 pool_index, u32 threshold,
 831					       struct netlink_ext_ack *extack);
 832	int	(*devlink_sb_occ_snapshot)(struct dsa_switch *ds,
 833					   unsigned int sb_index);
 834	int	(*devlink_sb_occ_max_clear)(struct dsa_switch *ds,
 835					    unsigned int sb_index);
 836	int	(*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port,
 837						unsigned int sb_index, u16 pool_index,
 838						u32 *p_cur, u32 *p_max);
 839	int	(*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port,
 840						   unsigned int sb_index, u16 tc_index,
 841						   enum devlink_sb_pool_type pool_type,
 842						   u32 *p_cur, u32 *p_max);
 843
 844	/*
 845	 * MTU change functionality. Switches can also adjust their MRU through
 846	 * this method. By MTU, one understands the SDU (L2 payload) length.
 847	 * If the switch needs to account for the DSA tag on the CPU port, this
 848	 * method needs to do so privately.
 849	 */
 850	int	(*port_change_mtu)(struct dsa_switch *ds, int port,
 851				   int new_mtu);
 852	int	(*port_max_mtu)(struct dsa_switch *ds, int port);
 853
 854	/*
 855	 * LAG integration
 856	 */
 857	int	(*port_lag_change)(struct dsa_switch *ds, int port);
 858	int	(*port_lag_join)(struct dsa_switch *ds, int port,
 859				 struct net_device *lag,
 860				 struct netdev_lag_upper_info *info);
 
 861	int	(*port_lag_leave)(struct dsa_switch *ds, int port,
 862				  struct net_device *lag);
 863
 864	/*
 865	 * HSR integration
 866	 */
 867	int	(*port_hsr_join)(struct dsa_switch *ds, int port,
 868				 struct net_device *hsr);
 869	int	(*port_hsr_leave)(struct dsa_switch *ds, int port,
 870				  struct net_device *hsr);
 871
 872	/*
 873	 * MRP integration
 874	 */
 875	int	(*port_mrp_add)(struct dsa_switch *ds, int port,
 876				const struct switchdev_obj_mrp *mrp);
 877	int	(*port_mrp_del)(struct dsa_switch *ds, int port,
 878				const struct switchdev_obj_mrp *mrp);
 879	int	(*port_mrp_add_ring_role)(struct dsa_switch *ds, int port,
 880					  const struct switchdev_obj_ring_role_mrp *mrp);
 881	int	(*port_mrp_del_ring_role)(struct dsa_switch *ds, int port,
 882					  const struct switchdev_obj_ring_role_mrp *mrp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 883};
 884
 885#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes)		\
 886	DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes,		\
 887			     dsa_devlink_param_get, dsa_devlink_param_set, NULL)
 888
 889int dsa_devlink_param_get(struct devlink *dl, u32 id,
 890			  struct devlink_param_gset_ctx *ctx);
 891int dsa_devlink_param_set(struct devlink *dl, u32 id,
 892			  struct devlink_param_gset_ctx *ctx);
 893int dsa_devlink_params_register(struct dsa_switch *ds,
 894				const struct devlink_param *params,
 895				size_t params_count);
 896void dsa_devlink_params_unregister(struct dsa_switch *ds,
 897				   const struct devlink_param *params,
 898				   size_t params_count);
 899int dsa_devlink_resource_register(struct dsa_switch *ds,
 900				  const char *resource_name,
 901				  u64 resource_size,
 902				  u64 resource_id,
 903				  u64 parent_resource_id,
 904				  const struct devlink_resource_size_params *size_params);
 905
 906void dsa_devlink_resources_unregister(struct dsa_switch *ds);
 907
 908void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
 909					   u64 resource_id,
 910					   devlink_resource_occ_get_t *occ_get,
 911					   void *occ_get_priv);
 912void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
 913					     u64 resource_id);
 914struct devlink_region *
 915dsa_devlink_region_create(struct dsa_switch *ds,
 916			  const struct devlink_region_ops *ops,
 917			  u32 region_max_snapshots, u64 region_size);
 918struct devlink_region *
 919dsa_devlink_port_region_create(struct dsa_switch *ds,
 920			       int port,
 921			       const struct devlink_port_region_ops *ops,
 922			       u32 region_max_snapshots, u64 region_size);
 923void dsa_devlink_region_destroy(struct devlink_region *region);
 924
 925struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
 926
 927struct dsa_devlink_priv {
 928	struct dsa_switch *ds;
 929};
 930
 931static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl)
 932{
 933	struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
 934
 935	return dl_priv->ds;
 936}
 937
 938static inline
 939struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port)
 940{
 941	struct devlink *dl = port->devlink;
 942	struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
 943
 944	return dl_priv->ds;
 945}
 946
 947static inline int dsa_devlink_port_to_port(struct devlink_port *port)
 948{
 949	return port->index;
 950}
 951
 952struct dsa_switch_driver {
 953	struct list_head	list;
 954	const struct dsa_switch_ops *ops;
 955};
 956
 957struct net_device *dsa_dev_to_net_device(struct device *dev);
 
 
 
 
 
 958
 959/* Keep inline for faster access in hot path */
 960static inline bool netdev_uses_dsa(const struct net_device *dev)
 961{
 962#if IS_ENABLED(CONFIG_NET_DSA)
 963	return dev->dsa_ptr && dev->dsa_ptr->rcv;
 964#endif
 965	return false;
 966}
 967
 968static inline bool dsa_can_decode(const struct sk_buff *skb,
 969				  struct net_device *dev)
 970{
 971#if IS_ENABLED(CONFIG_NET_DSA)
 972	return !dev->dsa_ptr->filter || dev->dsa_ptr->filter(skb, dev);
 973#endif
 974	return false;
 975}
 976
 977/* All DSA tags that push the EtherType to the right (basically all except tail
 978 * tags, which don't break dissection) can be treated the same from the
 979 * perspective of the flow dissector.
 980 *
 981 * We need to return:
 982 *  - offset: the (B - A) difference between:
 983 *    A. the position of the real EtherType and
 984 *    B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes
 985 *       after the normal EtherType was supposed to be)
 986 *    The offset in bytes is exactly equal to the tagger overhead (and half of
 987 *    that, in __be16 shorts).
 988 *
 989 *  - proto: the value of the real EtherType.
 990 */
 991static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
 992						__be16 *proto, int *offset)
 993{
 994#if IS_ENABLED(CONFIG_NET_DSA)
 995	const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
 996	int tag_len = ops->needed_headroom;
 997
 998	*offset = tag_len;
 999	*proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
1000#endif
1001}
1002
1003#if IS_ENABLED(CONFIG_NET_DSA)
1004static inline int __dsa_netdevice_ops_check(struct net_device *dev)
1005{
1006	int err = -EOPNOTSUPP;
1007
1008	if (!dev->dsa_ptr)
1009		return err;
1010
1011	if (!dev->dsa_ptr->netdev_ops)
1012		return err;
1013
1014	return 0;
1015}
1016
1017static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr,
1018				   int cmd)
1019{
1020	const struct dsa_netdevice_ops *ops;
1021	int err;
1022
1023	err = __dsa_netdevice_ops_check(dev);
1024	if (err)
1025		return err;
1026
1027	ops = dev->dsa_ptr->netdev_ops;
1028
1029	return ops->ndo_do_ioctl(dev, ifr, cmd);
1030}
1031#else
1032static inline int dsa_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr,
1033				   int cmd)
1034{
1035	return -EOPNOTSUPP;
1036}
1037#endif
1038
1039void dsa_unregister_switch(struct dsa_switch *ds);
1040int dsa_register_switch(struct dsa_switch *ds);
 
1041struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
 
1042#ifdef CONFIG_PM_SLEEP
1043int dsa_switch_suspend(struct dsa_switch *ds);
1044int dsa_switch_resume(struct dsa_switch *ds);
1045#else
1046static inline int dsa_switch_suspend(struct dsa_switch *ds)
1047{
1048	return 0;
1049}
1050static inline int dsa_switch_resume(struct dsa_switch *ds)
1051{
1052	return 0;
1053}
1054#endif /* CONFIG_PM_SLEEP */
1055
1056#if IS_ENABLED(CONFIG_NET_DSA)
1057bool dsa_slave_dev_check(const struct net_device *dev);
1058#else
1059static inline bool dsa_slave_dev_check(const struct net_device *dev)
1060{
1061	return false;
1062}
1063#endif
1064
1065netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
1066int dsa_port_get_phy_strings(struct dsa_port *dp, uint8_t *data);
1067int dsa_port_get_ethtool_phy_stats(struct dsa_port *dp, uint64_t *data);
1068int dsa_port_get_phy_sset_count(struct dsa_port *dp);
1069void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
1070
1071struct dsa_tag_driver {
1072	const struct dsa_device_ops *ops;
1073	struct list_head list;
1074	struct module *owner;
1075};
1076
1077void dsa_tag_drivers_register(struct dsa_tag_driver *dsa_tag_driver_array[],
1078			      unsigned int count,
1079			      struct module *owner);
1080void dsa_tag_drivers_unregister(struct dsa_tag_driver *dsa_tag_driver_array[],
1081				unsigned int count);
1082
1083#define dsa_tag_driver_module_drivers(__dsa_tag_drivers_array, __count)	\
1084static int __init dsa_tag_driver_module_init(void)			\
1085{									\
1086	dsa_tag_drivers_register(__dsa_tag_drivers_array, __count,	\
1087				 THIS_MODULE);				\
1088	return 0;							\
1089}									\
1090module_init(dsa_tag_driver_module_init);				\
1091									\
1092static void __exit dsa_tag_driver_module_exit(void)			\
1093{									\
1094	dsa_tag_drivers_unregister(__dsa_tag_drivers_array, __count);	\
1095}									\
1096module_exit(dsa_tag_driver_module_exit)
1097
1098/**
1099 * module_dsa_tag_drivers() - Helper macro for registering DSA tag
1100 * drivers
1101 * @__ops_array: Array of tag driver strucutres
1102 *
1103 * Helper macro for DSA tag drivers which do not do anything special
1104 * in module init/exit. Each module may only use this macro once, and
1105 * calling it replaces module_init() and module_exit().
1106 */
1107#define module_dsa_tag_drivers(__ops_array)				\
1108dsa_tag_driver_module_drivers(__ops_array, ARRAY_SIZE(__ops_array))
1109
1110#define DSA_TAG_DRIVER_NAME(__ops) dsa_tag_driver ## _ ## __ops
1111
1112/* Create a static structure we can build a linked list of dsa_tag
1113 * drivers
1114 */
1115#define DSA_TAG_DRIVER(__ops)						\
1116static struct dsa_tag_driver DSA_TAG_DRIVER_NAME(__ops) = {		\
1117	.ops = &__ops,							\
1118}
1119
1120/**
1121 * module_dsa_tag_driver() - Helper macro for registering a single DSA tag
1122 * driver
1123 * @__ops: Single tag driver structures
1124 *
1125 * Helper macro for DSA tag drivers which do not do anything special
1126 * in module init/exit. Each module may only use this macro once, and
1127 * calling it replaces module_init() and module_exit().
1128 */
1129#define module_dsa_tag_driver(__ops)					\
1130DSA_TAG_DRIVER(__ops);							\
1131									\
1132static struct dsa_tag_driver *dsa_tag_driver_array[] =	{		\
1133	&DSA_TAG_DRIVER_NAME(__ops)					\
1134};									\
1135module_dsa_tag_drivers(dsa_tag_driver_array)
1136#endif
1137
v6.2
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 * include/net/dsa.h - Driver for Distributed Switch Architecture switch chips
   4 * Copyright (c) 2008-2009 Marvell Semiconductor
   5 */
   6
   7#ifndef __LINUX_NET_DSA_H
   8#define __LINUX_NET_DSA_H
   9
  10#include <linux/if.h>
  11#include <linux/if_ether.h>
  12#include <linux/list.h>
  13#include <linux/notifier.h>
  14#include <linux/timer.h>
  15#include <linux/workqueue.h>
  16#include <linux/of.h>
  17#include <linux/ethtool.h>
  18#include <linux/net_tstamp.h>
  19#include <linux/phy.h>
  20#include <linux/platform_data/dsa.h>
  21#include <linux/phylink.h>
  22#include <net/devlink.h>
  23#include <net/switchdev.h>
  24
  25struct dsa_8021q_context;
  26struct tc_action;
  27struct phy_device;
  28struct fixed_phy_status;
  29struct phylink_link_state;
  30
  31#define DSA_TAG_PROTO_NONE_VALUE		0
  32#define DSA_TAG_PROTO_BRCM_VALUE		1
  33#define DSA_TAG_PROTO_BRCM_PREPEND_VALUE	2
  34#define DSA_TAG_PROTO_DSA_VALUE			3
  35#define DSA_TAG_PROTO_EDSA_VALUE		4
  36#define DSA_TAG_PROTO_GSWIP_VALUE		5
  37#define DSA_TAG_PROTO_KSZ9477_VALUE		6
  38#define DSA_TAG_PROTO_KSZ9893_VALUE		7
  39#define DSA_TAG_PROTO_LAN9303_VALUE		8
  40#define DSA_TAG_PROTO_MTK_VALUE			9
  41#define DSA_TAG_PROTO_QCA_VALUE			10
  42#define DSA_TAG_PROTO_TRAILER_VALUE		11
  43#define DSA_TAG_PROTO_8021Q_VALUE		12
  44#define DSA_TAG_PROTO_SJA1105_VALUE		13
  45#define DSA_TAG_PROTO_KSZ8795_VALUE		14
  46#define DSA_TAG_PROTO_OCELOT_VALUE		15
  47#define DSA_TAG_PROTO_AR9331_VALUE		16
  48#define DSA_TAG_PROTO_RTL4_A_VALUE		17
  49#define DSA_TAG_PROTO_HELLCREEK_VALUE		18
  50#define DSA_TAG_PROTO_XRS700X_VALUE		19
  51#define DSA_TAG_PROTO_OCELOT_8021Q_VALUE	20
  52#define DSA_TAG_PROTO_SEVILLE_VALUE		21
  53#define DSA_TAG_PROTO_BRCM_LEGACY_VALUE		22
  54#define DSA_TAG_PROTO_SJA1110_VALUE		23
  55#define DSA_TAG_PROTO_RTL8_4_VALUE		24
  56#define DSA_TAG_PROTO_RTL8_4T_VALUE		25
  57#define DSA_TAG_PROTO_RZN1_A5PSW_VALUE		26
  58#define DSA_TAG_PROTO_LAN937X_VALUE		27
  59
  60enum dsa_tag_protocol {
  61	DSA_TAG_PROTO_NONE		= DSA_TAG_PROTO_NONE_VALUE,
  62	DSA_TAG_PROTO_BRCM		= DSA_TAG_PROTO_BRCM_VALUE,
  63	DSA_TAG_PROTO_BRCM_LEGACY	= DSA_TAG_PROTO_BRCM_LEGACY_VALUE,
  64	DSA_TAG_PROTO_BRCM_PREPEND	= DSA_TAG_PROTO_BRCM_PREPEND_VALUE,
  65	DSA_TAG_PROTO_DSA		= DSA_TAG_PROTO_DSA_VALUE,
  66	DSA_TAG_PROTO_EDSA		= DSA_TAG_PROTO_EDSA_VALUE,
  67	DSA_TAG_PROTO_GSWIP		= DSA_TAG_PROTO_GSWIP_VALUE,
  68	DSA_TAG_PROTO_KSZ9477		= DSA_TAG_PROTO_KSZ9477_VALUE,
  69	DSA_TAG_PROTO_KSZ9893		= DSA_TAG_PROTO_KSZ9893_VALUE,
  70	DSA_TAG_PROTO_LAN9303		= DSA_TAG_PROTO_LAN9303_VALUE,
  71	DSA_TAG_PROTO_MTK		= DSA_TAG_PROTO_MTK_VALUE,
  72	DSA_TAG_PROTO_QCA		= DSA_TAG_PROTO_QCA_VALUE,
  73	DSA_TAG_PROTO_TRAILER		= DSA_TAG_PROTO_TRAILER_VALUE,
  74	DSA_TAG_PROTO_8021Q		= DSA_TAG_PROTO_8021Q_VALUE,
  75	DSA_TAG_PROTO_SJA1105		= DSA_TAG_PROTO_SJA1105_VALUE,
  76	DSA_TAG_PROTO_KSZ8795		= DSA_TAG_PROTO_KSZ8795_VALUE,
  77	DSA_TAG_PROTO_OCELOT		= DSA_TAG_PROTO_OCELOT_VALUE,
  78	DSA_TAG_PROTO_AR9331		= DSA_TAG_PROTO_AR9331_VALUE,
  79	DSA_TAG_PROTO_RTL4_A		= DSA_TAG_PROTO_RTL4_A_VALUE,
  80	DSA_TAG_PROTO_HELLCREEK		= DSA_TAG_PROTO_HELLCREEK_VALUE,
  81	DSA_TAG_PROTO_XRS700X		= DSA_TAG_PROTO_XRS700X_VALUE,
  82	DSA_TAG_PROTO_OCELOT_8021Q	= DSA_TAG_PROTO_OCELOT_8021Q_VALUE,
  83	DSA_TAG_PROTO_SEVILLE		= DSA_TAG_PROTO_SEVILLE_VALUE,
  84	DSA_TAG_PROTO_SJA1110		= DSA_TAG_PROTO_SJA1110_VALUE,
  85	DSA_TAG_PROTO_RTL8_4		= DSA_TAG_PROTO_RTL8_4_VALUE,
  86	DSA_TAG_PROTO_RTL8_4T		= DSA_TAG_PROTO_RTL8_4T_VALUE,
  87	DSA_TAG_PROTO_RZN1_A5PSW	= DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
  88	DSA_TAG_PROTO_LAN937X		= DSA_TAG_PROTO_LAN937X_VALUE,
  89};
  90
 
  91struct dsa_switch;
  92
  93struct dsa_device_ops {
  94	struct sk_buff *(*xmit)(struct sk_buff *skb, struct net_device *dev);
  95	struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
 
  96	void (*flow_dissect)(const struct sk_buff *skb, __be16 *proto,
  97			     int *offset);
  98	int (*connect)(struct dsa_switch *ds);
  99	void (*disconnect)(struct dsa_switch *ds);
 
 
 
 100	unsigned int needed_headroom;
 101	unsigned int needed_tailroom;
 102	const char *name;
 103	enum dsa_tag_protocol proto;
 104	/* Some tagging protocols either mangle or shift the destination MAC
 105	 * address, in which case the DSA master would drop packets on ingress
 106	 * if what it understands out of the destination MAC address is not in
 107	 * its RX filter.
 108	 */
 109	bool promisc_on_master;
 110};
 111
 112/* This structure defines the control interfaces that are overlayed by the
 113 * DSA layer on top of the DSA CPU/management net_device instance. This is
 114 * used by the core net_device layer while calling various net_device_ops
 115 * function pointers.
 116 */
 117struct dsa_netdevice_ops {
 118	int (*ndo_eth_ioctl)(struct net_device *dev, struct ifreq *ifr,
 119			     int cmd);
 120};
 121
 122struct dsa_lag {
 123	struct net_device *dev;
 124	unsigned int id;
 125	struct mutex fdb_lock;
 126	struct list_head fdbs;
 127	refcount_t refcount;
 128};
 129
 130struct dsa_switch_tree {
 131	struct list_head	list;
 132
 133	/* List of switch ports */
 134	struct list_head ports;
 135
 136	/* Notifier chain for switch-wide events */
 137	struct raw_notifier_head	nh;
 138
 139	/* Tree identifier */
 140	unsigned int index;
 141
 142	/* Number of switches attached to this tree */
 143	struct kref refcount;
 144
 145	/* Maps offloaded LAG netdevs to a zero-based linear ID for
 146	 * drivers that need it.
 147	 */
 148	struct dsa_lag **lags;
 149
 150	/* Tagging protocol operations */
 151	const struct dsa_device_ops *tag_ops;
 152
 153	/* Default tagging protocol preferred by the switches in this
 154	 * tree.
 155	 */
 156	enum dsa_tag_protocol default_proto;
 157
 158	/* Has this tree been applied to the hardware? */
 159	bool setup;
 160
 161	/*
 162	 * Configuration data for the platform device that owns
 163	 * this dsa switch tree instance.
 164	 */
 165	struct dsa_platform_data	*pd;
 166
 
 
 
 167	/* List of DSA links composing the routing table */
 168	struct list_head rtable;
 169
 170	/* Length of "lags" array */
 
 
 
 171	unsigned int lags_len;
 172
 173	/* Track the largest switch index within a tree */
 174	unsigned int last_switch;
 175};
 176
 177/* LAG IDs are one-based, the dst->lags array is zero-based */
 178#define dsa_lags_foreach_id(_id, _dst)				\
 179	for ((_id) = 1; (_id) <= (_dst)->lags_len; (_id)++)	\
 180		if ((_dst)->lags[(_id) - 1])
 181
 182#define dsa_lag_foreach_port(_dp, _dst, _lag)			\
 183	list_for_each_entry((_dp), &(_dst)->ports, list)	\
 184		if (dsa_port_offloads_lag((_dp), (_lag)))
 185
 186#define dsa_hsr_foreach_port(_dp, _ds, _hsr)			\
 187	list_for_each_entry((_dp), &(_ds)->dst->ports, list)	\
 188		if ((_dp)->ds == (_ds) && (_dp)->hsr_dev == (_hsr))
 189
 190static inline struct dsa_lag *dsa_lag_by_id(struct dsa_switch_tree *dst,
 191					    unsigned int id)
 192{
 193	/* DSA LAG IDs are one-based, dst->lags is zero-based */
 194	return dst->lags[id - 1];
 195}
 196
 197static inline int dsa_lag_id(struct dsa_switch_tree *dst,
 198			     struct net_device *lag_dev)
 199{
 200	unsigned int id;
 201
 202	dsa_lags_foreach_id(id, dst) {
 203		struct dsa_lag *lag = dsa_lag_by_id(dst, id);
 204
 205		if (lag->dev == lag_dev)
 206			return lag->id;
 207	}
 208
 209	return -ENODEV;
 210}
 211
 212/* TC matchall action types */
 213enum dsa_port_mall_action_type {
 214	DSA_PORT_MALL_MIRROR,
 215	DSA_PORT_MALL_POLICER,
 216};
 217
 218/* TC mirroring entry */
 219struct dsa_mall_mirror_tc_entry {
 220	u8 to_local_port;
 221	bool ingress;
 222};
 223
 224/* TC port policer entry */
 225struct dsa_mall_policer_tc_entry {
 226	u32 burst;
 227	u64 rate_bytes_per_sec;
 228};
 229
 230/* TC matchall entry */
 231struct dsa_mall_tc_entry {
 232	struct list_head list;
 233	unsigned long cookie;
 234	enum dsa_port_mall_action_type type;
 235	union {
 236		struct dsa_mall_mirror_tc_entry mirror;
 237		struct dsa_mall_policer_tc_entry policer;
 238	};
 239};
 240
 241struct dsa_bridge {
 242	struct net_device *dev;
 243	unsigned int num;
 244	bool tx_fwd_offload;
 245	refcount_t refcount;
 246};
 247
 248struct dsa_port {
 249	/* A CPU port is physically connected to a master device.
 250	 * A user port exposed to userspace has a slave device.
 251	 */
 252	union {
 253		struct net_device *master;
 254		struct net_device *slave;
 255	};
 256
 257	/* Copy of the tagging protocol operations, for quicker access
 258	 * in the data path. Valid only for the CPU ports.
 259	 */
 260	const struct dsa_device_ops *tag_ops;
 261
 262	/* Copies for faster access in master receive hot path */
 263	struct dsa_switch_tree *dst;
 264	struct sk_buff *(*rcv)(struct sk_buff *skb, struct net_device *dev);
 265
 266	struct dsa_switch	*ds;
 267
 268	unsigned int		index;
 269
 270	enum {
 271		DSA_PORT_TYPE_UNUSED = 0,
 272		DSA_PORT_TYPE_CPU,
 273		DSA_PORT_TYPE_DSA,
 274		DSA_PORT_TYPE_USER,
 275	} type;
 276
 
 
 277	const char		*name;
 278	struct dsa_port		*cpu_dp;
 279	u8			mac[ETH_ALEN];
 280
 281	u8			stp_state;
 282
 283	/* Warning: the following bit fields are not atomic, and updating them
 284	 * can only be done from code paths where concurrency is not possible
 285	 * (probe time or under rtnl_lock).
 286	 */
 287	u8			vlan_filtering:1;
 288
 289	/* Managed by DSA on user ports and by drivers on CPU and DSA ports */
 290	u8			learning:1;
 291
 292	u8			lag_tx_enabled:1;
 293
 294	/* Master state bits, valid only on CPU ports */
 295	u8			master_admin_up:1;
 296	u8			master_oper_up:1;
 297
 298	/* Valid only on user ports */
 299	u8			cpu_port_in_lag:1;
 300
 301	u8			setup:1;
 302
 303	struct device_node	*dn;
 304	unsigned int		ageing_time;
 305
 306	struct dsa_bridge	*bridge;
 
 307	struct devlink_port	devlink_port;
 
 308	struct phylink		*pl;
 309	struct phylink_config	pl_config;
 310	struct dsa_lag		*lag;
 
 311	struct net_device	*hsr_dev;
 312
 313	struct list_head list;
 314
 315	/*
 
 
 
 
 
 
 316	 * Original copy of the master netdev ethtool_ops
 317	 */
 318	const struct ethtool_ops *orig_ethtool_ops;
 319
 320	/*
 321	 * Original copy of the master netdev net_device_ops
 322	 */
 323	const struct dsa_netdevice_ops *netdev_ops;
 324
 325	/* List of MAC addresses that must be forwarded on this port.
 326	 * These are only valid on CPU ports and DSA links.
 327	 */
 328	struct mutex		addr_lists_lock;
 329	struct list_head	fdbs;
 330	struct list_head	mdbs;
 331
 332	/* List of VLANs that CPU and DSA ports are members of. */
 333	struct mutex		vlans_lock;
 334	struct list_head	vlans;
 335};
 336
 337/* TODO: ideally DSA ports would have a single dp->link_dp member,
 338 * and no dst->rtable nor this struct dsa_link would be needed,
 339 * but this would require some more complex tree walking,
 340 * so keep it stupid at the moment and list them all.
 341 */
 342struct dsa_link {
 343	struct dsa_port *dp;
 344	struct dsa_port *link_dp;
 345	struct list_head list;
 346};
 347
 348enum dsa_db_type {
 349	DSA_DB_PORT,
 350	DSA_DB_LAG,
 351	DSA_DB_BRIDGE,
 352};
 353
 354struct dsa_db {
 355	enum dsa_db_type type;
 356
 357	union {
 358		const struct dsa_port *dp;
 359		struct dsa_lag lag;
 360		struct dsa_bridge bridge;
 361	};
 362};
 363
 364struct dsa_mac_addr {
 365	unsigned char addr[ETH_ALEN];
 366	u16 vid;
 367	refcount_t refcount;
 368	struct list_head list;
 369	struct dsa_db db;
 370};
 371
 372struct dsa_vlan {
 373	u16 vid;
 374	refcount_t refcount;
 375	struct list_head list;
 376};
 377
 378struct dsa_switch {
 379	struct device *dev;
 380
 381	/*
 382	 * Parent switch tree, and switch index.
 383	 */
 384	struct dsa_switch_tree	*dst;
 385	unsigned int		index;
 386
 387	/* Warning: the following bit fields are not atomic, and updating them
 388	 * can only be done from code paths where concurrency is not possible
 389	 * (probe time or under rtnl_lock).
 390	 */
 391	u32			setup:1;
 392
 393	/* Disallow bridge core from requesting different VLAN awareness
 394	 * settings on ports if not hardware-supported
 395	 */
 396	u32			vlan_filtering_is_global:1;
 397
 398	/* Keep VLAN filtering enabled on ports not offloading any upper */
 399	u32			needs_standalone_vlan_filtering:1;
 400
 401	/* Pass .port_vlan_add and .port_vlan_del to drivers even for bridges
 402	 * that have vlan_filtering=0. All drivers should ideally set this (and
 403	 * then the option would get removed), but it is unknown whether this
 404	 * would break things or not.
 405	 */
 406	u32			configure_vlan_while_not_filtering:1;
 407
 408	/* If the switch driver always programs the CPU port as egress tagged
 409	 * despite the VLAN configuration indicating otherwise, then setting
 410	 * @untag_bridge_pvid will force the DSA receive path to pop the
 411	 * bridge's default_pvid VLAN tagged frames to offer a consistent
 412	 * behavior between a vlan_filtering=0 and vlan_filtering=1 bridge
 413	 * device.
 414	 */
 415	u32			untag_bridge_pvid:1;
 416
 417	/* Let DSA manage the FDB entries towards the
 418	 * CPU, based on the software bridge database.
 419	 */
 420	u32			assisted_learning_on_cpu_port:1;
 421
 422	/* In case vlan_filtering_is_global is set, the VLAN awareness state
 423	 * should be retrieved from here and not from the per-port settings.
 424	 */
 425	u32			vlan_filtering:1;
 426
 427	/* For switches that only have the MRU configurable. To ensure the
 428	 * configured MTU is not exceeded, normalization of MRU on all bridged
 429	 * interfaces is needed.
 430	 */
 431	u32			mtu_enforcement_ingress:1;
 432
 433	/* Drivers that isolate the FDBs of multiple bridges must set this
 434	 * to true to receive the bridge as an argument in .port_fdb_{add,del}
 435	 * and .port_mdb_{add,del}. Otherwise, the bridge.num will always be
 436	 * passed as zero.
 437	 */
 438	u32			fdb_isolation:1;
 439
 440	/* Listener for switch fabric events */
 441	struct notifier_block	nb;
 442
 443	/*
 444	 * Give the switch driver somewhere to hang its private data
 445	 * structure.
 446	 */
 447	void *priv;
 448
 449	void *tagger_data;
 450
 451	/*
 452	 * Configuration data for this switch.
 453	 */
 454	struct dsa_chip_data	*cd;
 455
 456	/*
 457	 * The switch operations.
 458	 */
 459	const struct dsa_switch_ops	*ops;
 460
 461	/*
 462	 * Slave mii_bus and devices for the individual ports.
 463	 */
 464	u32			phys_mii_mask;
 465	struct mii_bus		*slave_mii_bus;
 466
 467	/* Ageing Time limits in msecs */
 468	unsigned int ageing_time_min;
 469	unsigned int ageing_time_max;
 470
 471	/* Storage for drivers using tag_8021q */
 472	struct dsa_8021q_context *tag_8021q_ctx;
 473
 474	/* devlink used to represent this switch device */
 475	struct devlink		*devlink;
 476
 477	/* Number of switch port queues */
 478	unsigned int		num_tx_queues;
 479
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 480	/* Drivers that benefit from having an ID associated with each
 481	 * offloaded LAG should set this to the maximum number of
 482	 * supported IDs. DSA will then maintain a mapping of _at
 483	 * least_ these many IDs, accessible to drivers via
 484	 * dsa_lag_id().
 485	 */
 486	unsigned int		num_lag_ids;
 487
 488	/* Drivers that support bridge forwarding offload or FDB isolation
 489	 * should set this to the maximum number of bridges spanning the same
 490	 * switch tree (or all trees, in the case of cross-tree bridging
 491	 * support) that can be offloaded.
 492	 */
 493	unsigned int		max_num_bridges;
 494
 495	unsigned int		num_ports;
 496};
 497
 498static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
 499{
 500	struct dsa_switch_tree *dst = ds->dst;
 501	struct dsa_port *dp;
 502
 503	list_for_each_entry(dp, &dst->ports, list)
 504		if (dp->ds == ds && dp->index == p)
 505			return dp;
 506
 507	return NULL;
 508}
 509
 510static inline bool dsa_port_is_dsa(struct dsa_port *port)
 511{
 512	return port->type == DSA_PORT_TYPE_DSA;
 513}
 514
 515static inline bool dsa_port_is_cpu(struct dsa_port *port)
 516{
 517	return port->type == DSA_PORT_TYPE_CPU;
 518}
 519
 520static inline bool dsa_port_is_user(struct dsa_port *dp)
 521{
 522	return dp->type == DSA_PORT_TYPE_USER;
 523}
 524
 525static inline bool dsa_port_is_unused(struct dsa_port *dp)
 526{
 527	return dp->type == DSA_PORT_TYPE_UNUSED;
 528}
 529
 530static inline bool dsa_port_master_is_operational(struct dsa_port *dp)
 531{
 532	return dsa_port_is_cpu(dp) && dp->master_admin_up &&
 533	       dp->master_oper_up;
 534}
 535
 536static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
 537{
 538	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_UNUSED;
 539}
 540
 541static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
 542{
 543	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_CPU;
 544}
 545
 546static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
 547{
 548	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_DSA;
 549}
 550
 551static inline bool dsa_is_user_port(struct dsa_switch *ds, int p)
 552{
 553	return dsa_to_port(ds, p)->type == DSA_PORT_TYPE_USER;
 554}
 555
 556#define dsa_tree_for_each_user_port(_dp, _dst) \
 557	list_for_each_entry((_dp), &(_dst)->ports, list) \
 558		if (dsa_port_is_user((_dp)))
 559
 560#define dsa_tree_for_each_user_port_continue_reverse(_dp, _dst) \
 561	list_for_each_entry_continue_reverse((_dp), &(_dst)->ports, list) \
 562		if (dsa_port_is_user((_dp)))
 563
 564#define dsa_tree_for_each_cpu_port(_dp, _dst) \
 565	list_for_each_entry((_dp), &(_dst)->ports, list) \
 566		if (dsa_port_is_cpu((_dp)))
 567
 568#define dsa_switch_for_each_port(_dp, _ds) \
 569	list_for_each_entry((_dp), &(_ds)->dst->ports, list) \
 570		if ((_dp)->ds == (_ds))
 571
 572#define dsa_switch_for_each_port_safe(_dp, _next, _ds) \
 573	list_for_each_entry_safe((_dp), (_next), &(_ds)->dst->ports, list) \
 574		if ((_dp)->ds == (_ds))
 575
 576#define dsa_switch_for_each_port_continue_reverse(_dp, _ds) \
 577	list_for_each_entry_continue_reverse((_dp), &(_ds)->dst->ports, list) \
 578		if ((_dp)->ds == (_ds))
 579
 580#define dsa_switch_for_each_available_port(_dp, _ds) \
 581	dsa_switch_for_each_port((_dp), (_ds)) \
 582		if (!dsa_port_is_unused((_dp)))
 583
 584#define dsa_switch_for_each_user_port(_dp, _ds) \
 585	dsa_switch_for_each_port((_dp), (_ds)) \
 586		if (dsa_port_is_user((_dp)))
 587
 588#define dsa_switch_for_each_cpu_port(_dp, _ds) \
 589	dsa_switch_for_each_port((_dp), (_ds)) \
 590		if (dsa_port_is_cpu((_dp)))
 591
 592#define dsa_switch_for_each_cpu_port_continue_reverse(_dp, _ds) \
 593	dsa_switch_for_each_port_continue_reverse((_dp), (_ds)) \
 594		if (dsa_port_is_cpu((_dp)))
 595
 596static inline u32 dsa_user_ports(struct dsa_switch *ds)
 597{
 598	struct dsa_port *dp;
 599	u32 mask = 0;
 
 600
 601	dsa_switch_for_each_user_port(dp, ds)
 602		mask |= BIT(dp->index);
 603
 604	return mask;
 605}
 606
 607static inline u32 dsa_cpu_ports(struct dsa_switch *ds)
 608{
 609	struct dsa_port *cpu_dp;
 610	u32 mask = 0;
 611
 612	dsa_switch_for_each_cpu_port(cpu_dp, ds)
 613		mask |= BIT(cpu_dp->index);
 614
 615	return mask;
 616}
 617
 618/* Return the local port used to reach an arbitrary switch device */
 619static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
 620{
 621	struct dsa_switch_tree *dst = ds->dst;
 622	struct dsa_link *dl;
 623
 624	list_for_each_entry(dl, &dst->rtable, list)
 625		if (dl->dp->ds == ds && dl->link_dp->ds->index == device)
 626			return dl->dp->index;
 627
 628	return ds->num_ports;
 629}
 630
 631/* Return the local port used to reach an arbitrary switch port */
 632static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
 633					    int port)
 634{
 635	if (device == ds->index)
 636		return port;
 637	else
 638		return dsa_routing_port(ds, device);
 639}
 640
 641/* Return the local port used to reach the dedicated CPU port */
 642static inline unsigned int dsa_upstream_port(struct dsa_switch *ds, int port)
 643{
 644	const struct dsa_port *dp = dsa_to_port(ds, port);
 645	const struct dsa_port *cpu_dp = dp->cpu_dp;
 646
 647	if (!cpu_dp)
 648		return port;
 649
 650	return dsa_towards_port(ds, cpu_dp->ds->index, cpu_dp->index);
 651}
 652
 653/* Return true if this is the local port used to reach the CPU port */
 654static inline bool dsa_is_upstream_port(struct dsa_switch *ds, int port)
 655{
 656	if (dsa_is_unused_port(ds, port))
 657		return false;
 658
 659	return port == dsa_upstream_port(ds, port);
 660}
 661
 662/* Return true if this is a DSA port leading away from the CPU */
 663static inline bool dsa_is_downstream_port(struct dsa_switch *ds, int port)
 664{
 665	return dsa_is_dsa_port(ds, port) && !dsa_is_upstream_port(ds, port);
 666}
 667
 668/* Return the local port used to reach the CPU port */
 669static inline unsigned int dsa_switch_upstream_port(struct dsa_switch *ds)
 670{
 671	struct dsa_port *dp;
 672
 673	dsa_switch_for_each_available_port(dp, ds) {
 674		return dsa_upstream_port(ds, dp->index);
 675	}
 676
 677	return ds->num_ports;
 678}
 679
 680/* Return true if @upstream_ds is an upstream switch of @downstream_ds, meaning
 681 * that the routing port from @downstream_ds to @upstream_ds is also the port
 682 * which @downstream_ds uses to reach its dedicated CPU.
 683 */
 684static inline bool dsa_switch_is_upstream_of(struct dsa_switch *upstream_ds,
 685					     struct dsa_switch *downstream_ds)
 686{
 687	int routing_port;
 688
 689	if (upstream_ds == downstream_ds)
 690		return true;
 691
 692	routing_port = dsa_routing_port(downstream_ds, upstream_ds->index);
 693
 694	return dsa_is_upstream_port(downstream_ds, routing_port);
 695}
 696
 697static inline bool dsa_port_is_vlan_filtering(const struct dsa_port *dp)
 698{
 699	const struct dsa_switch *ds = dp->ds;
 700
 701	if (ds->vlan_filtering_is_global)
 702		return ds->vlan_filtering;
 703	else
 704		return dp->vlan_filtering;
 705}
 706
 707static inline unsigned int dsa_port_lag_id_get(struct dsa_port *dp)
 708{
 709	return dp->lag ? dp->lag->id : 0;
 710}
 711
 712static inline struct net_device *dsa_port_lag_dev_get(struct dsa_port *dp)
 713{
 714	return dp->lag ? dp->lag->dev : NULL;
 715}
 716
 717static inline bool dsa_port_offloads_lag(struct dsa_port *dp,
 718					 const struct dsa_lag *lag)
 719{
 720	return dsa_port_lag_dev_get(dp) == lag->dev;
 721}
 722
 723static inline struct net_device *dsa_port_to_master(const struct dsa_port *dp)
 724{
 725	if (dp->cpu_port_in_lag)
 726		return dsa_port_lag_dev_get(dp->cpu_dp);
 727
 728	return dp->cpu_dp->master;
 729}
 730
 731static inline
 732struct net_device *dsa_port_to_bridge_port(const struct dsa_port *dp)
 733{
 734	if (!dp->bridge)
 735		return NULL;
 736
 737	if (dp->lag)
 738		return dp->lag->dev;
 739	else if (dp->hsr_dev)
 740		return dp->hsr_dev;
 741
 742	return dp->slave;
 743}
 744
 745static inline struct net_device *
 746dsa_port_bridge_dev_get(const struct dsa_port *dp)
 747{
 748	return dp->bridge ? dp->bridge->dev : NULL;
 749}
 750
 751static inline unsigned int dsa_port_bridge_num_get(struct dsa_port *dp)
 752{
 753	return dp->bridge ? dp->bridge->num : 0;
 754}
 755
 756static inline bool dsa_port_bridge_same(const struct dsa_port *a,
 757					const struct dsa_port *b)
 758{
 759	struct net_device *br_a = dsa_port_bridge_dev_get(a);
 760	struct net_device *br_b = dsa_port_bridge_dev_get(b);
 761
 762	/* Standalone ports are not in the same bridge with one another */
 763	return (!br_a || !br_b) ? false : (br_a == br_b);
 764}
 765
 766static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
 767						 const struct net_device *dev)
 768{
 769	return dsa_port_to_bridge_port(dp) == dev;
 770}
 771
 772static inline bool
 773dsa_port_offloads_bridge_dev(struct dsa_port *dp,
 774			     const struct net_device *bridge_dev)
 775{
 776	/* DSA ports connected to a bridge, and event was emitted
 777	 * for the bridge.
 778	 */
 779	return dsa_port_bridge_dev_get(dp) == bridge_dev;
 780}
 781
 782static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
 783					    const struct dsa_bridge *bridge)
 784{
 785	return dsa_port_bridge_dev_get(dp) == bridge->dev;
 786}
 787
 788/* Returns true if any port of this tree offloads the given net_device */
 789static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
 790						 const struct net_device *dev)
 791{
 792	struct dsa_port *dp;
 793
 794	list_for_each_entry(dp, &dst->ports, list)
 795		if (dsa_port_offloads_bridge_port(dp, dev))
 796			return true;
 797
 798	return false;
 799}
 800
 801/* Returns true if any port of this tree offloads the given bridge */
 802static inline bool
 803dsa_tree_offloads_bridge_dev(struct dsa_switch_tree *dst,
 804			     const struct net_device *bridge_dev)
 805{
 806	struct dsa_port *dp;
 807
 808	list_for_each_entry(dp, &dst->ports, list)
 809		if (dsa_port_offloads_bridge_dev(dp, bridge_dev))
 810			return true;
 811
 812	return false;
 813}
 814
 815static inline bool dsa_port_tree_same(const struct dsa_port *a,
 816				      const struct dsa_port *b)
 817{
 818	return a->ds->dst == b->ds->dst;
 819}
 820
 821typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
 822			      bool is_static, void *data);
 823struct dsa_switch_ops {
 824	/*
 825	 * Tagging protocol helpers called for the CPU ports and DSA links.
 826	 * @get_tag_protocol retrieves the initial tagging protocol and is
 827	 * mandatory. Switches which can operate using multiple tagging
 828	 * protocols should implement @change_tag_protocol and report in
 829	 * @get_tag_protocol the tagger in current use.
 830	 */
 831	enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
 832						  int port,
 833						  enum dsa_tag_protocol mprot);
 834	int	(*change_tag_protocol)(struct dsa_switch *ds,
 835				       enum dsa_tag_protocol proto);
 836	/*
 837	 * Method for switch drivers to connect to the tagging protocol driver
 838	 * in current use. The switch driver can provide handlers for certain
 839	 * types of packets for switch management.
 840	 */
 841	int	(*connect_tag_protocol)(struct dsa_switch *ds,
 842					enum dsa_tag_protocol proto);
 843
 844	int	(*port_change_master)(struct dsa_switch *ds, int port,
 845				      struct net_device *master,
 846				      struct netlink_ext_ack *extack);
 847
 848	/* Optional switch-wide initialization and destruction methods */
 849	int	(*setup)(struct dsa_switch *ds);
 850	void	(*teardown)(struct dsa_switch *ds);
 851
 852	/* Per-port initialization and destruction methods. Mandatory if the
 853	 * driver registers devlink port regions, optional otherwise.
 854	 */
 855	int	(*port_setup)(struct dsa_switch *ds, int port);
 856	void	(*port_teardown)(struct dsa_switch *ds, int port);
 857
 858	u32	(*get_phy_flags)(struct dsa_switch *ds, int port);
 859
 860	/*
 861	 * Access to the switch's PHY registers.
 862	 */
 863	int	(*phy_read)(struct dsa_switch *ds, int port, int regnum);
 864	int	(*phy_write)(struct dsa_switch *ds, int port,
 865			     int regnum, u16 val);
 866
 867	/*
 868	 * Link state adjustment (called from libphy)
 869	 */
 870	void	(*adjust_link)(struct dsa_switch *ds, int port,
 871				struct phy_device *phydev);
 872	void	(*fixed_link_update)(struct dsa_switch *ds, int port,
 873				struct fixed_phy_status *st);
 874
 875	/*
 876	 * PHYLINK integration
 877	 */
 878	void	(*phylink_get_caps)(struct dsa_switch *ds, int port,
 879				    struct phylink_config *config);
 880	struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds,
 881						      int port,
 882						      phy_interface_t iface);
 883	int	(*phylink_mac_link_state)(struct dsa_switch *ds, int port,
 884					  struct phylink_link_state *state);
 885	void	(*phylink_mac_config)(struct dsa_switch *ds, int port,
 886				      unsigned int mode,
 887				      const struct phylink_link_state *state);
 888	void	(*phylink_mac_an_restart)(struct dsa_switch *ds, int port);
 889	void	(*phylink_mac_link_down)(struct dsa_switch *ds, int port,
 890					 unsigned int mode,
 891					 phy_interface_t interface);
 892	void	(*phylink_mac_link_up)(struct dsa_switch *ds, int port,
 893				       unsigned int mode,
 894				       phy_interface_t interface,
 895				       struct phy_device *phydev,
 896				       int speed, int duplex,
 897				       bool tx_pause, bool rx_pause);
 898	void	(*phylink_fixed_state)(struct dsa_switch *ds, int port,
 899				       struct phylink_link_state *state);
 900	/*
 901	 * Port statistics counters.
 902	 */
 903	void	(*get_strings)(struct dsa_switch *ds, int port,
 904			       u32 stringset, uint8_t *data);
 905	void	(*get_ethtool_stats)(struct dsa_switch *ds,
 906				     int port, uint64_t *data);
 907	int	(*get_sset_count)(struct dsa_switch *ds, int port, int sset);
 908	void	(*get_ethtool_phy_stats)(struct dsa_switch *ds,
 909					 int port, uint64_t *data);
 910	void	(*get_eth_phy_stats)(struct dsa_switch *ds, int port,
 911				     struct ethtool_eth_phy_stats *phy_stats);
 912	void	(*get_eth_mac_stats)(struct dsa_switch *ds, int port,
 913				     struct ethtool_eth_mac_stats *mac_stats);
 914	void	(*get_eth_ctrl_stats)(struct dsa_switch *ds, int port,
 915				      struct ethtool_eth_ctrl_stats *ctrl_stats);
 916	void	(*get_rmon_stats)(struct dsa_switch *ds, int port,
 917				  struct ethtool_rmon_stats *rmon_stats,
 918				  const struct ethtool_rmon_hist_range **ranges);
 919	void	(*get_stats64)(struct dsa_switch *ds, int port,
 920				   struct rtnl_link_stats64 *s);
 921	void	(*get_pause_stats)(struct dsa_switch *ds, int port,
 922				   struct ethtool_pause_stats *pause_stats);
 923	void	(*self_test)(struct dsa_switch *ds, int port,
 924			     struct ethtool_test *etest, u64 *data);
 925
 926	/*
 927	 * ethtool Wake-on-LAN
 928	 */
 929	void	(*get_wol)(struct dsa_switch *ds, int port,
 930			   struct ethtool_wolinfo *w);
 931	int	(*set_wol)(struct dsa_switch *ds, int port,
 932			   struct ethtool_wolinfo *w);
 933
 934	/*
 935	 * ethtool timestamp info
 936	 */
 937	int	(*get_ts_info)(struct dsa_switch *ds, int port,
 938			       struct ethtool_ts_info *ts);
 939
 940	/*
 941	 * DCB ops
 942	 */
 943	int	(*port_get_default_prio)(struct dsa_switch *ds, int port);
 944	int	(*port_set_default_prio)(struct dsa_switch *ds, int port,
 945					 u8 prio);
 946	int	(*port_get_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp);
 947	int	(*port_add_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
 948				      u8 prio);
 949	int	(*port_del_dscp_prio)(struct dsa_switch *ds, int port, u8 dscp,
 950				      u8 prio);
 951
 952	/*
 953	 * Suspend and resume
 954	 */
 955	int	(*suspend)(struct dsa_switch *ds);
 956	int	(*resume)(struct dsa_switch *ds);
 957
 958	/*
 959	 * Port enable/disable
 960	 */
 961	int	(*port_enable)(struct dsa_switch *ds, int port,
 962			       struct phy_device *phy);
 963	void	(*port_disable)(struct dsa_switch *ds, int port);
 964
 965	/*
 966	 * Port's MAC EEE settings
 967	 */
 968	int	(*set_mac_eee)(struct dsa_switch *ds, int port,
 969			       struct ethtool_eee *e);
 970	int	(*get_mac_eee)(struct dsa_switch *ds, int port,
 971			       struct ethtool_eee *e);
 972
 973	/* EEPROM access */
 974	int	(*get_eeprom_len)(struct dsa_switch *ds);
 975	int	(*get_eeprom)(struct dsa_switch *ds,
 976			      struct ethtool_eeprom *eeprom, u8 *data);
 977	int	(*set_eeprom)(struct dsa_switch *ds,
 978			      struct ethtool_eeprom *eeprom, u8 *data);
 979
 980	/*
 981	 * Register access.
 982	 */
 983	int	(*get_regs_len)(struct dsa_switch *ds, int port);
 984	void	(*get_regs)(struct dsa_switch *ds, int port,
 985			    struct ethtool_regs *regs, void *p);
 986
 987	/*
 988	 * Upper device tracking.
 989	 */
 990	int	(*port_prechangeupper)(struct dsa_switch *ds, int port,
 991				       struct netdev_notifier_changeupper_info *info);
 992
 993	/*
 994	 * Bridge integration
 995	 */
 996	int	(*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs);
 997	int	(*port_bridge_join)(struct dsa_switch *ds, int port,
 998				    struct dsa_bridge bridge,
 999				    bool *tx_fwd_offload,
1000				    struct netlink_ext_ack *extack);
1001	void	(*port_bridge_leave)(struct dsa_switch *ds, int port,
1002				     struct dsa_bridge bridge);
1003	void	(*port_stp_state_set)(struct dsa_switch *ds, int port,
1004				      u8 state);
1005	int	(*port_mst_state_set)(struct dsa_switch *ds, int port,
1006				      const struct switchdev_mst_state *state);
1007	void	(*port_fast_age)(struct dsa_switch *ds, int port);
1008	int	(*port_vlan_fast_age)(struct dsa_switch *ds, int port, u16 vid);
1009	int	(*port_pre_bridge_flags)(struct dsa_switch *ds, int port,
1010					 struct switchdev_brport_flags flags,
1011					 struct netlink_ext_ack *extack);
1012	int	(*port_bridge_flags)(struct dsa_switch *ds, int port,
1013				     struct switchdev_brport_flags flags,
1014				     struct netlink_ext_ack *extack);
1015	void	(*port_set_host_flood)(struct dsa_switch *ds, int port,
1016				       bool uc, bool mc);
1017
1018	/*
1019	 * VLAN support
1020	 */
1021	int	(*port_vlan_filtering)(struct dsa_switch *ds, int port,
1022				       bool vlan_filtering,
1023				       struct netlink_ext_ack *extack);
1024	int	(*port_vlan_add)(struct dsa_switch *ds, int port,
1025				 const struct switchdev_obj_port_vlan *vlan,
1026				 struct netlink_ext_ack *extack);
1027	int	(*port_vlan_del)(struct dsa_switch *ds, int port,
1028				 const struct switchdev_obj_port_vlan *vlan);
1029	int	(*vlan_msti_set)(struct dsa_switch *ds, struct dsa_bridge bridge,
1030				 const struct switchdev_vlan_msti *msti);
1031
1032	/*
1033	 * Forwarding database
1034	 */
1035	int	(*port_fdb_add)(struct dsa_switch *ds, int port,
1036				const unsigned char *addr, u16 vid,
1037				struct dsa_db db);
1038	int	(*port_fdb_del)(struct dsa_switch *ds, int port,
1039				const unsigned char *addr, u16 vid,
1040				struct dsa_db db);
1041	int	(*port_fdb_dump)(struct dsa_switch *ds, int port,
1042				 dsa_fdb_dump_cb_t *cb, void *data);
1043	int	(*lag_fdb_add)(struct dsa_switch *ds, struct dsa_lag lag,
1044			       const unsigned char *addr, u16 vid,
1045			       struct dsa_db db);
1046	int	(*lag_fdb_del)(struct dsa_switch *ds, struct dsa_lag lag,
1047			       const unsigned char *addr, u16 vid,
1048			       struct dsa_db db);
1049
1050	/*
1051	 * Multicast database
1052	 */
1053	int	(*port_mdb_add)(struct dsa_switch *ds, int port,
1054				const struct switchdev_obj_port_mdb *mdb,
1055				struct dsa_db db);
1056	int	(*port_mdb_del)(struct dsa_switch *ds, int port,
1057				const struct switchdev_obj_port_mdb *mdb,
1058				struct dsa_db db);
1059	/*
1060	 * RXNFC
1061	 */
1062	int	(*get_rxnfc)(struct dsa_switch *ds, int port,
1063			     struct ethtool_rxnfc *nfc, u32 *rule_locs);
1064	int	(*set_rxnfc)(struct dsa_switch *ds, int port,
1065			     struct ethtool_rxnfc *nfc);
1066
1067	/*
1068	 * TC integration
1069	 */
1070	int	(*cls_flower_add)(struct dsa_switch *ds, int port,
1071				  struct flow_cls_offload *cls, bool ingress);
1072	int	(*cls_flower_del)(struct dsa_switch *ds, int port,
1073				  struct flow_cls_offload *cls, bool ingress);
1074	int	(*cls_flower_stats)(struct dsa_switch *ds, int port,
1075				    struct flow_cls_offload *cls, bool ingress);
1076	int	(*port_mirror_add)(struct dsa_switch *ds, int port,
1077				   struct dsa_mall_mirror_tc_entry *mirror,
1078				   bool ingress, struct netlink_ext_ack *extack);
1079	void	(*port_mirror_del)(struct dsa_switch *ds, int port,
1080				   struct dsa_mall_mirror_tc_entry *mirror);
1081	int	(*port_policer_add)(struct dsa_switch *ds, int port,
1082				    struct dsa_mall_policer_tc_entry *policer);
1083	void	(*port_policer_del)(struct dsa_switch *ds, int port);
1084	int	(*port_setup_tc)(struct dsa_switch *ds, int port,
1085				 enum tc_setup_type type, void *type_data);
1086
1087	/*
1088	 * Cross-chip operations
1089	 */
1090	int	(*crosschip_bridge_join)(struct dsa_switch *ds, int tree_index,
1091					 int sw_index, int port,
1092					 struct dsa_bridge bridge,
1093					 struct netlink_ext_ack *extack);
1094	void	(*crosschip_bridge_leave)(struct dsa_switch *ds, int tree_index,
1095					  int sw_index, int port,
1096					  struct dsa_bridge bridge);
1097	int	(*crosschip_lag_change)(struct dsa_switch *ds, int sw_index,
1098					int port);
1099	int	(*crosschip_lag_join)(struct dsa_switch *ds, int sw_index,
1100				      int port, struct dsa_lag lag,
1101				      struct netdev_lag_upper_info *info,
1102				      struct netlink_ext_ack *extack);
1103	int	(*crosschip_lag_leave)(struct dsa_switch *ds, int sw_index,
1104				       int port, struct dsa_lag lag);
1105
1106	/*
1107	 * PTP functionality
1108	 */
1109	int	(*port_hwtstamp_get)(struct dsa_switch *ds, int port,
1110				     struct ifreq *ifr);
1111	int	(*port_hwtstamp_set)(struct dsa_switch *ds, int port,
1112				     struct ifreq *ifr);
1113	void	(*port_txtstamp)(struct dsa_switch *ds, int port,
1114				 struct sk_buff *skb);
1115	bool	(*port_rxtstamp)(struct dsa_switch *ds, int port,
1116				 struct sk_buff *skb, unsigned int type);
1117
1118	/* Devlink parameters, etc */
1119	int	(*devlink_param_get)(struct dsa_switch *ds, u32 id,
1120				     struct devlink_param_gset_ctx *ctx);
1121	int	(*devlink_param_set)(struct dsa_switch *ds, u32 id,
1122				     struct devlink_param_gset_ctx *ctx);
1123	int	(*devlink_info_get)(struct dsa_switch *ds,
1124				    struct devlink_info_req *req,
1125				    struct netlink_ext_ack *extack);
1126	int	(*devlink_sb_pool_get)(struct dsa_switch *ds,
1127				       unsigned int sb_index, u16 pool_index,
1128				       struct devlink_sb_pool_info *pool_info);
1129	int	(*devlink_sb_pool_set)(struct dsa_switch *ds, unsigned int sb_index,
1130				       u16 pool_index, u32 size,
1131				       enum devlink_sb_threshold_type threshold_type,
1132				       struct netlink_ext_ack *extack);
1133	int	(*devlink_sb_port_pool_get)(struct dsa_switch *ds, int port,
1134					    unsigned int sb_index, u16 pool_index,
1135					    u32 *p_threshold);
1136	int	(*devlink_sb_port_pool_set)(struct dsa_switch *ds, int port,
1137					    unsigned int sb_index, u16 pool_index,
1138					    u32 threshold,
1139					    struct netlink_ext_ack *extack);
1140	int	(*devlink_sb_tc_pool_bind_get)(struct dsa_switch *ds, int port,
1141					       unsigned int sb_index, u16 tc_index,
1142					       enum devlink_sb_pool_type pool_type,
1143					       u16 *p_pool_index, u32 *p_threshold);
1144	int	(*devlink_sb_tc_pool_bind_set)(struct dsa_switch *ds, int port,
1145					       unsigned int sb_index, u16 tc_index,
1146					       enum devlink_sb_pool_type pool_type,
1147					       u16 pool_index, u32 threshold,
1148					       struct netlink_ext_ack *extack);
1149	int	(*devlink_sb_occ_snapshot)(struct dsa_switch *ds,
1150					   unsigned int sb_index);
1151	int	(*devlink_sb_occ_max_clear)(struct dsa_switch *ds,
1152					    unsigned int sb_index);
1153	int	(*devlink_sb_occ_port_pool_get)(struct dsa_switch *ds, int port,
1154						unsigned int sb_index, u16 pool_index,
1155						u32 *p_cur, u32 *p_max);
1156	int	(*devlink_sb_occ_tc_port_bind_get)(struct dsa_switch *ds, int port,
1157						   unsigned int sb_index, u16 tc_index,
1158						   enum devlink_sb_pool_type pool_type,
1159						   u32 *p_cur, u32 *p_max);
1160
1161	/*
1162	 * MTU change functionality. Switches can also adjust their MRU through
1163	 * this method. By MTU, one understands the SDU (L2 payload) length.
1164	 * If the switch needs to account for the DSA tag on the CPU port, this
1165	 * method needs to do so privately.
1166	 */
1167	int	(*port_change_mtu)(struct dsa_switch *ds, int port,
1168				   int new_mtu);
1169	int	(*port_max_mtu)(struct dsa_switch *ds, int port);
1170
1171	/*
1172	 * LAG integration
1173	 */
1174	int	(*port_lag_change)(struct dsa_switch *ds, int port);
1175	int	(*port_lag_join)(struct dsa_switch *ds, int port,
1176				 struct dsa_lag lag,
1177				 struct netdev_lag_upper_info *info,
1178				 struct netlink_ext_ack *extack);
1179	int	(*port_lag_leave)(struct dsa_switch *ds, int port,
1180				  struct dsa_lag lag);
1181
1182	/*
1183	 * HSR integration
1184	 */
1185	int	(*port_hsr_join)(struct dsa_switch *ds, int port,
1186				 struct net_device *hsr);
1187	int	(*port_hsr_leave)(struct dsa_switch *ds, int port,
1188				  struct net_device *hsr);
1189
1190	/*
1191	 * MRP integration
1192	 */
1193	int	(*port_mrp_add)(struct dsa_switch *ds, int port,
1194				const struct switchdev_obj_mrp *mrp);
1195	int	(*port_mrp_del)(struct dsa_switch *ds, int port,
1196				const struct switchdev_obj_mrp *mrp);
1197	int	(*port_mrp_add_ring_role)(struct dsa_switch *ds, int port,
1198					  const struct switchdev_obj_ring_role_mrp *mrp);
1199	int	(*port_mrp_del_ring_role)(struct dsa_switch *ds, int port,
1200					  const struct switchdev_obj_ring_role_mrp *mrp);
1201
1202	/*
1203	 * tag_8021q operations
1204	 */
1205	int	(*tag_8021q_vlan_add)(struct dsa_switch *ds, int port, u16 vid,
1206				      u16 flags);
1207	int	(*tag_8021q_vlan_del)(struct dsa_switch *ds, int port, u16 vid);
1208
1209	/*
1210	 * DSA master tracking operations
1211	 */
1212	void	(*master_state_change)(struct dsa_switch *ds,
1213				       const struct net_device *master,
1214				       bool operational);
1215};
1216
1217#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes)		\
1218	DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes,		\
1219			     dsa_devlink_param_get, dsa_devlink_param_set, NULL)
1220
1221int dsa_devlink_param_get(struct devlink *dl, u32 id,
1222			  struct devlink_param_gset_ctx *ctx);
1223int dsa_devlink_param_set(struct devlink *dl, u32 id,
1224			  struct devlink_param_gset_ctx *ctx);
1225int dsa_devlink_params_register(struct dsa_switch *ds,
1226				const struct devlink_param *params,
1227				size_t params_count);
1228void dsa_devlink_params_unregister(struct dsa_switch *ds,
1229				   const struct devlink_param *params,
1230				   size_t params_count);
1231int dsa_devlink_resource_register(struct dsa_switch *ds,
1232				  const char *resource_name,
1233				  u64 resource_size,
1234				  u64 resource_id,
1235				  u64 parent_resource_id,
1236				  const struct devlink_resource_size_params *size_params);
1237
1238void dsa_devlink_resources_unregister(struct dsa_switch *ds);
1239
1240void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
1241					   u64 resource_id,
1242					   devlink_resource_occ_get_t *occ_get,
1243					   void *occ_get_priv);
1244void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
1245					     u64 resource_id);
1246struct devlink_region *
1247dsa_devlink_region_create(struct dsa_switch *ds,
1248			  const struct devlink_region_ops *ops,
1249			  u32 region_max_snapshots, u64 region_size);
1250struct devlink_region *
1251dsa_devlink_port_region_create(struct dsa_switch *ds,
1252			       int port,
1253			       const struct devlink_port_region_ops *ops,
1254			       u32 region_max_snapshots, u64 region_size);
1255void dsa_devlink_region_destroy(struct devlink_region *region);
1256
1257struct dsa_port *dsa_port_from_netdev(struct net_device *netdev);
1258
1259struct dsa_devlink_priv {
1260	struct dsa_switch *ds;
1261};
1262
1263static inline struct dsa_switch *dsa_devlink_to_ds(struct devlink *dl)
1264{
1265	struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
1266
1267	return dl_priv->ds;
1268}
1269
1270static inline
1271struct dsa_switch *dsa_devlink_port_to_ds(struct devlink_port *port)
1272{
1273	struct devlink *dl = port->devlink;
1274	struct dsa_devlink_priv *dl_priv = devlink_priv(dl);
1275
1276	return dl_priv->ds;
1277}
1278
1279static inline int dsa_devlink_port_to_port(struct devlink_port *port)
1280{
1281	return port->index;
1282}
1283
1284struct dsa_switch_driver {
1285	struct list_head	list;
1286	const struct dsa_switch_ops *ops;
1287};
1288
1289bool dsa_fdb_present_in_other_db(struct dsa_switch *ds, int port,
1290				 const unsigned char *addr, u16 vid,
1291				 struct dsa_db db);
1292bool dsa_mdb_present_in_other_db(struct dsa_switch *ds, int port,
1293				 const struct switchdev_obj_port_mdb *mdb,
1294				 struct dsa_db db);
1295
1296/* Keep inline for faster access in hot path */
1297static inline bool netdev_uses_dsa(const struct net_device *dev)
1298{
1299#if IS_ENABLED(CONFIG_NET_DSA)
1300	return dev->dsa_ptr && dev->dsa_ptr->rcv;
1301#endif
1302	return false;
1303}
1304
 
 
 
 
 
 
 
 
 
1305/* All DSA tags that push the EtherType to the right (basically all except tail
1306 * tags, which don't break dissection) can be treated the same from the
1307 * perspective of the flow dissector.
1308 *
1309 * We need to return:
1310 *  - offset: the (B - A) difference between:
1311 *    A. the position of the real EtherType and
1312 *    B. the current skb->data (aka ETH_HLEN bytes into the frame, aka 2 bytes
1313 *       after the normal EtherType was supposed to be)
1314 *    The offset in bytes is exactly equal to the tagger overhead (and half of
1315 *    that, in __be16 shorts).
1316 *
1317 *  - proto: the value of the real EtherType.
1318 */
1319static inline void dsa_tag_generic_flow_dissect(const struct sk_buff *skb,
1320						__be16 *proto, int *offset)
1321{
1322#if IS_ENABLED(CONFIG_NET_DSA)
1323	const struct dsa_device_ops *ops = skb->dev->dsa_ptr->tag_ops;
1324	int tag_len = ops->needed_headroom;
1325
1326	*offset = tag_len;
1327	*proto = ((__be16 *)skb->data)[(tag_len / 2) - 1];
1328#endif
1329}
1330
1331#if IS_ENABLED(CONFIG_NET_DSA)
1332static inline int __dsa_netdevice_ops_check(struct net_device *dev)
1333{
1334	int err = -EOPNOTSUPP;
1335
1336	if (!dev->dsa_ptr)
1337		return err;
1338
1339	if (!dev->dsa_ptr->netdev_ops)
1340		return err;
1341
1342	return 0;
1343}
1344
1345static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
1346				    int cmd)
1347{
1348	const struct dsa_netdevice_ops *ops;
1349	int err;
1350
1351	err = __dsa_netdevice_ops_check(dev);
1352	if (err)
1353		return err;
1354
1355	ops = dev->dsa_ptr->netdev_ops;
1356
1357	return ops->ndo_eth_ioctl(dev, ifr, cmd);
1358}
1359#else
1360static inline int dsa_ndo_eth_ioctl(struct net_device *dev, struct ifreq *ifr,
1361				    int cmd)
1362{
1363	return -EOPNOTSUPP;
1364}
1365#endif
1366
1367void dsa_unregister_switch(struct dsa_switch *ds);
1368int dsa_register_switch(struct dsa_switch *ds);
1369void dsa_switch_shutdown(struct dsa_switch *ds);
1370struct dsa_switch *dsa_switch_find(int tree_index, int sw_index);
1371void dsa_flush_workqueue(void);
1372#ifdef CONFIG_PM_SLEEP
1373int dsa_switch_suspend(struct dsa_switch *ds);
1374int dsa_switch_resume(struct dsa_switch *ds);
1375#else
1376static inline int dsa_switch_suspend(struct dsa_switch *ds)
1377{
1378	return 0;
1379}
1380static inline int dsa_switch_resume(struct dsa_switch *ds)
1381{
1382	return 0;
1383}
1384#endif /* CONFIG_PM_SLEEP */
1385
1386#if IS_ENABLED(CONFIG_NET_DSA)
1387bool dsa_slave_dev_check(const struct net_device *dev);
1388#else
1389static inline bool dsa_slave_dev_check(const struct net_device *dev)
1390{
1391	return false;
1392}
1393#endif
1394
1395netdev_tx_t dsa_enqueue_skb(struct sk_buff *skb, struct net_device *dev);
 
 
 
1396void dsa_port_phylink_mac_change(struct dsa_switch *ds, int port, bool up);
1397
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1398#endif