Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
   1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
   2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
   3
   4#include <linux/kernel.h>
   5#include <linux/types.h>
   6#include <linux/netdevice.h>
   7#include <linux/etherdevice.h>
   8#include <linux/slab.h>
   9#include <linux/device.h>
  10#include <linux/skbuff.h>
  11#include <linux/if_vlan.h>
  12#include <linux/if_bridge.h>
  13#include <linux/workqueue.h>
  14#include <linux/jiffies.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/netlink.h>
  17#include <net/switchdev.h>
  18#include <net/vxlan.h>
  19
  20#include "spectrum_span.h"
  21#include "spectrum_switchdev.h"
  22#include "spectrum.h"
  23#include "core.h"
  24#include "reg.h"
  25
  26struct mlxsw_sp_bridge_ops;
  27
  28struct mlxsw_sp_bridge {
  29	struct mlxsw_sp *mlxsw_sp;
  30	struct {
  31		struct delayed_work dw;
  32#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
  33		unsigned int interval; /* ms */
  34	} fdb_notify;
  35#define MLXSW_SP_MIN_AGEING_TIME 10
  36#define MLXSW_SP_MAX_AGEING_TIME 1000000
  37#define MLXSW_SP_DEFAULT_AGEING_TIME 300
  38	u32 ageing_time;
  39	bool vlan_enabled_exists;
  40	struct list_head bridges_list;
  41	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
  42	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
  43	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
  44	const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
  45};
  46
  47struct mlxsw_sp_bridge_device {
  48	struct net_device *dev;
  49	struct list_head list;
  50	struct list_head ports_list;
  51	struct list_head mdb_list;
  52	struct rhashtable mdb_ht;
  53	u8 vlan_enabled:1,
  54	   multicast_enabled:1,
  55	   mrouter:1;
  56	const struct mlxsw_sp_bridge_ops *ops;
  57};
  58
  59struct mlxsw_sp_bridge_port {
  60	struct net_device *dev;
  61	struct mlxsw_sp_bridge_device *bridge_device;
  62	struct list_head list;
  63	struct list_head vlans_list;
  64	refcount_t ref_count;
  65	u8 stp_state;
  66	unsigned long flags;
  67	bool mrouter;
  68	bool lagged;
  69	union {
  70		u16 lag_id;
  71		u16 system_port;
  72	};
  73};
  74
  75struct mlxsw_sp_bridge_vlan {
  76	struct list_head list;
  77	struct list_head port_vlan_list;
  78	u16 vid;
  79};
  80
  81struct mlxsw_sp_bridge_ops {
  82	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
  83			 struct mlxsw_sp_bridge_port *bridge_port,
  84			 struct mlxsw_sp_port *mlxsw_sp_port,
  85			 struct netlink_ext_ack *extack);
  86	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
  87			   struct mlxsw_sp_bridge_port *bridge_port,
  88			   struct mlxsw_sp_port *mlxsw_sp_port);
  89	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
  90			  const struct net_device *vxlan_dev, u16 vid,
  91			  struct netlink_ext_ack *extack);
  92	struct mlxsw_sp_fid *
  93		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
  94			   u16 vid, struct netlink_ext_ack *extack);
  95	struct mlxsw_sp_fid *
  96		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
  97			      u16 vid);
  98	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
  99		       const struct mlxsw_sp_fid *fid);
 100};
 101
 102struct mlxsw_sp_switchdev_ops {
 103	void (*init)(struct mlxsw_sp *mlxsw_sp);
 104};
 105
 106struct mlxsw_sp_mdb_entry_key {
 107	unsigned char addr[ETH_ALEN];
 108	u16 fid;
 109};
 110
 111struct mlxsw_sp_mdb_entry {
 112	struct list_head list;
 113	struct rhash_head ht_node;
 114	struct mlxsw_sp_mdb_entry_key key;
 115	u16 mid;
 116	struct list_head ports_list;
 117	u16 ports_count;
 118};
 119
 120struct mlxsw_sp_mdb_entry_port {
 121	struct list_head list; /* Member of 'ports_list'. */
 122	u16 local_port;
 123	refcount_t refcount;
 124	bool mrouter;
 125};
 126
 127static const struct rhashtable_params mlxsw_sp_mdb_ht_params = {
 128	.key_offset = offsetof(struct mlxsw_sp_mdb_entry, key),
 129	.head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node),
 130	.key_len = sizeof(struct mlxsw_sp_mdb_entry_key),
 131};
 132
 133static int
 134mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
 135			       struct mlxsw_sp_bridge_port *bridge_port,
 136			       u16 fid_index);
 137
 138static void
 139mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
 140			       struct mlxsw_sp_bridge_port *bridge_port,
 141			       u16 fid_index);
 142
 143static int
 144mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
 145				   struct mlxsw_sp_bridge_device
 146				   *bridge_device, bool mc_enabled);
 147
 148static void
 149mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
 150				 struct mlxsw_sp_bridge_port *bridge_port,
 151				 bool add);
 152
 153static struct mlxsw_sp_bridge_device *
 154mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
 155			    const struct net_device *br_dev)
 156{
 157	struct mlxsw_sp_bridge_device *bridge_device;
 158
 159	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
 160		if (bridge_device->dev == br_dev)
 161			return bridge_device;
 162
 163	return NULL;
 164}
 165
 166bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
 167					 const struct net_device *br_dev)
 168{
 169	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
 170}
 171
 172static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
 173						    struct netdev_nested_priv *priv)
 174{
 175	struct mlxsw_sp *mlxsw_sp = priv->data;
 176
 177	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
 178	return 0;
 179}
 180
 181static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
 182						struct net_device *dev)
 183{
 184	struct netdev_nested_priv priv = {
 185		.data = (void *)mlxsw_sp,
 186	};
 187
 188	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
 189	netdev_walk_all_upper_dev_rcu(dev,
 190				      mlxsw_sp_bridge_device_upper_rif_destroy,
 191				      &priv);
 192}
 193
 194static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
 195					     struct net_device *br_dev,
 196					     struct netlink_ext_ack *extack)
 197{
 198	struct net_device *dev, *stop_dev;
 199	struct list_head *iter;
 200	int err;
 201
 202	netdev_for_each_lower_dev(br_dev, dev, iter) {
 203		if (netif_is_vxlan(dev) && netif_running(dev)) {
 204			err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
 205							 br_dev, dev, 0,
 206							 extack);
 207			if (err) {
 208				stop_dev = dev;
 209				goto err_vxlan_join;
 210			}
 211		}
 212	}
 213
 214	return 0;
 215
 216err_vxlan_join:
 217	netdev_for_each_lower_dev(br_dev, dev, iter) {
 218		if (netif_is_vxlan(dev) && netif_running(dev)) {
 219			if (stop_dev == dev)
 220				break;
 221			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
 222		}
 223	}
 224	return err;
 225}
 226
 227static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
 228					      struct net_device *br_dev)
 229{
 230	struct net_device *dev;
 231	struct list_head *iter;
 232
 233	netdev_for_each_lower_dev(br_dev, dev, iter) {
 234		if (netif_is_vxlan(dev) && netif_running(dev))
 235			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
 236	}
 237}
 238
 239static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
 240					      bool no_delay)
 241{
 242	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
 243	unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
 244
 245	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
 246			       msecs_to_jiffies(interval));
 247}
 248
 249static struct mlxsw_sp_bridge_device *
 250mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
 251			      struct net_device *br_dev,
 252			      struct netlink_ext_ack *extack)
 253{
 254	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
 255	struct mlxsw_sp_bridge_device *bridge_device;
 256	bool vlan_enabled = br_vlan_enabled(br_dev);
 257	int err;
 258
 259	if (vlan_enabled && bridge->vlan_enabled_exists) {
 260		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
 261		NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
 262		return ERR_PTR(-EINVAL);
 263	}
 264
 265	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
 266	if (!bridge_device)
 267		return ERR_PTR(-ENOMEM);
 268
 269	err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params);
 270	if (err)
 271		goto err_mdb_rhashtable_init;
 272
 273	bridge_device->dev = br_dev;
 274	bridge_device->vlan_enabled = vlan_enabled;
 275	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
 276	bridge_device->mrouter = br_multicast_router(br_dev);
 277	INIT_LIST_HEAD(&bridge_device->ports_list);
 278	if (vlan_enabled) {
 279		u16 proto;
 280
 281		bridge->vlan_enabled_exists = true;
 282		br_vlan_get_proto(br_dev, &proto);
 283		if (proto == ETH_P_8021AD)
 284			bridge_device->ops = bridge->bridge_8021ad_ops;
 285		else
 286			bridge_device->ops = bridge->bridge_8021q_ops;
 287	} else {
 288		bridge_device->ops = bridge->bridge_8021d_ops;
 289	}
 290	INIT_LIST_HEAD(&bridge_device->mdb_list);
 291
 292	if (list_empty(&bridge->bridges_list))
 293		mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
 294	list_add(&bridge_device->list, &bridge->bridges_list);
 295
 296	/* It is possible we already have VXLAN devices enslaved to the bridge.
 297	 * In which case, we need to replay their configuration as if they were
 298	 * just now enslaved to the bridge.
 299	 */
 300	err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
 301	if (err)
 302		goto err_vxlan_init;
 303
 304	return bridge_device;
 305
 306err_vxlan_init:
 307	list_del(&bridge_device->list);
 308	if (bridge_device->vlan_enabled)
 309		bridge->vlan_enabled_exists = false;
 310	rhashtable_destroy(&bridge_device->mdb_ht);
 311err_mdb_rhashtable_init:
 312	kfree(bridge_device);
 313	return ERR_PTR(err);
 314}
 315
 316static void
 317mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
 318			       struct mlxsw_sp_bridge_device *bridge_device)
 319{
 320	mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
 321	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
 322					    bridge_device->dev);
 323	list_del(&bridge_device->list);
 324	if (list_empty(&bridge->bridges_list))
 325		cancel_delayed_work(&bridge->fdb_notify.dw);
 326	if (bridge_device->vlan_enabled)
 327		bridge->vlan_enabled_exists = false;
 328	WARN_ON(!list_empty(&bridge_device->ports_list));
 329	WARN_ON(!list_empty(&bridge_device->mdb_list));
 330	rhashtable_destroy(&bridge_device->mdb_ht);
 331	kfree(bridge_device);
 332}
 333
 334static struct mlxsw_sp_bridge_device *
 335mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
 336			   struct net_device *br_dev,
 337			   struct netlink_ext_ack *extack)
 338{
 339	struct mlxsw_sp_bridge_device *bridge_device;
 340
 341	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
 342	if (bridge_device)
 343		return bridge_device;
 344
 345	return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
 346}
 347
 348static void
 349mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
 350			   struct mlxsw_sp_bridge_device *bridge_device)
 351{
 352	if (list_empty(&bridge_device->ports_list))
 353		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
 354}
 355
 356static struct mlxsw_sp_bridge_port *
 357__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
 358			    const struct net_device *brport_dev)
 359{
 360	struct mlxsw_sp_bridge_port *bridge_port;
 361
 362	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
 363		if (bridge_port->dev == brport_dev)
 364			return bridge_port;
 365	}
 366
 367	return NULL;
 368}
 369
 370struct mlxsw_sp_bridge_port *
 371mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
 372			  struct net_device *brport_dev)
 373{
 374	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
 375	struct mlxsw_sp_bridge_device *bridge_device;
 376
 377	if (!br_dev)
 378		return NULL;
 379
 380	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
 381	if (!bridge_device)
 382		return NULL;
 383
 384	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
 385}
 386
 387static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
 388				 const struct switchdev_obj *obj,
 389				 struct netlink_ext_ack *extack);
 390static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
 391				 const struct switchdev_obj *obj);
 392
 393struct mlxsw_sp_bridge_port_replay_switchdev_objs {
 394	struct net_device *brport_dev;
 395	struct mlxsw_sp_port *mlxsw_sp_port;
 396	int done;
 397};
 398
 399static int
 400mlxsw_sp_bridge_port_replay_switchdev_objs(struct notifier_block *nb,
 401					   unsigned long event, void *ptr)
 402{
 403	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
 404	struct switchdev_notifier_port_obj_info *port_obj_info = ptr;
 405	struct netlink_ext_ack *extack = port_obj_info->info.extack;
 406	struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso;
 407	int err = 0;
 408
 409	rso = (void *)port_obj_info->info.ctx;
 410
 411	if (event != SWITCHDEV_PORT_OBJ_ADD ||
 412	    dev != rso->brport_dev)
 413		goto out;
 414
 415	/* When a port is joining the bridge through a LAG, there likely are
 416	 * VLANs configured on that LAG already. The replay will thus attempt to
 417	 * have the given port-vlans join the corresponding FIDs. But the LAG
 418	 * netdevice has already called the ndo_vlan_rx_add_vid NDO for its VLAN
 419	 * memberships, back before CHANGEUPPER was distributed and netdevice
 420	 * master set. So now before propagating the VLAN events further, we
 421	 * first need to kill the corresponding VID at the mlxsw_sp_port.
 422	 *
 423	 * Note that this doesn't need to be rolled back on failure -- if the
 424	 * replay fails, the enslavement is off, and the VIDs would be killed by
 425	 * LAG anyway as part of its rollback.
 426	 */
 427	if (port_obj_info->obj->id == SWITCHDEV_OBJ_ID_PORT_VLAN) {
 428		u16 vid = SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj)->vid;
 429
 430		err = mlxsw_sp_port_kill_vid(rso->mlxsw_sp_port->dev, 0, vid);
 431		if (err)
 432			goto out;
 433	}
 434
 435	++rso->done;
 436	err = mlxsw_sp_port_obj_add(rso->mlxsw_sp_port->dev, NULL,
 437				    port_obj_info->obj, extack);
 438
 439out:
 440	return notifier_from_errno(err);
 441}
 442
 443static struct notifier_block mlxsw_sp_bridge_port_replay_switchdev_objs_nb = {
 444	.notifier_call = mlxsw_sp_bridge_port_replay_switchdev_objs,
 445};
 446
 447static int
 448mlxsw_sp_bridge_port_unreplay_switchdev_objs(struct notifier_block *nb,
 449					     unsigned long event, void *ptr)
 450{
 451	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
 452	struct switchdev_notifier_port_obj_info *port_obj_info = ptr;
 453	struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso;
 454
 455	rso = (void *)port_obj_info->info.ctx;
 456
 457	if (event != SWITCHDEV_PORT_OBJ_ADD ||
 458	    dev != rso->brport_dev)
 459		return NOTIFY_DONE;
 460	if (!rso->done--)
 461		return NOTIFY_STOP;
 462
 463	mlxsw_sp_port_obj_del(rso->mlxsw_sp_port->dev, NULL,
 464			      port_obj_info->obj);
 465	return NOTIFY_DONE;
 466}
 467
 468static struct notifier_block mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb = {
 469	.notifier_call = mlxsw_sp_bridge_port_unreplay_switchdev_objs,
 470};
 471
 472static struct mlxsw_sp_bridge_port *
 473mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
 474			    struct net_device *brport_dev,
 475			    struct netlink_ext_ack *extack)
 476{
 477	struct mlxsw_sp_bridge_port *bridge_port;
 478	struct mlxsw_sp_port *mlxsw_sp_port;
 479	int err;
 480
 481	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
 482	if (!bridge_port)
 483		return ERR_PTR(-ENOMEM);
 484
 485	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
 486	bridge_port->lagged = mlxsw_sp_port->lagged;
 487	if (bridge_port->lagged)
 488		bridge_port->lag_id = mlxsw_sp_port->lag_id;
 489	else
 490		bridge_port->system_port = mlxsw_sp_port->local_port;
 491	bridge_port->dev = brport_dev;
 492	bridge_port->bridge_device = bridge_device;
 493	bridge_port->stp_state = br_port_get_stp_state(brport_dev);
 494	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
 495			     BR_MCAST_FLOOD;
 496	INIT_LIST_HEAD(&bridge_port->vlans_list);
 497	list_add(&bridge_port->list, &bridge_device->ports_list);
 498	refcount_set(&bridge_port->ref_count, 1);
 499
 500	err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
 501					    NULL, NULL, NULL, false, extack);
 502	if (err)
 503		goto err_switchdev_offload;
 504
 505	return bridge_port;
 506
 507err_switchdev_offload:
 508	list_del(&bridge_port->list);
 509	kfree(bridge_port);
 510	return ERR_PTR(err);
 511}
 512
 513static void
 514mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
 515{
 516	switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL);
 517	list_del(&bridge_port->list);
 518	WARN_ON(!list_empty(&bridge_port->vlans_list));
 519	kfree(bridge_port);
 520}
 521
 522static struct mlxsw_sp_bridge_port *
 523mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
 524			 struct net_device *brport_dev,
 525			 struct netlink_ext_ack *extack)
 526{
 527	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
 528	struct mlxsw_sp_bridge_device *bridge_device;
 529	struct mlxsw_sp_bridge_port *bridge_port;
 530	int err;
 531
 532	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
 533	if (bridge_port) {
 534		refcount_inc(&bridge_port->ref_count);
 535		return bridge_port;
 536	}
 537
 538	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
 539	if (IS_ERR(bridge_device))
 540		return ERR_CAST(bridge_device);
 541
 542	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev,
 543						  extack);
 544	if (IS_ERR(bridge_port)) {
 545		err = PTR_ERR(bridge_port);
 546		goto err_bridge_port_create;
 547	}
 548
 549	return bridge_port;
 550
 551err_bridge_port_create:
 552	mlxsw_sp_bridge_device_put(bridge, bridge_device);
 553	return ERR_PTR(err);
 554}
 555
 556static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
 557				     struct mlxsw_sp_bridge_port *bridge_port)
 558{
 559	struct mlxsw_sp_bridge_device *bridge_device;
 560
 561	if (!refcount_dec_and_test(&bridge_port->ref_count))
 562		return;
 563	bridge_device = bridge_port->bridge_device;
 564	mlxsw_sp_bridge_port_destroy(bridge_port);
 565	mlxsw_sp_bridge_device_put(bridge, bridge_device);
 566}
 567
 568static struct mlxsw_sp_port_vlan *
 569mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
 570				  const struct mlxsw_sp_bridge_device *
 571				  bridge_device,
 572				  u16 vid)
 573{
 574	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 575
 576	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
 577			    list) {
 578		if (!mlxsw_sp_port_vlan->bridge_port)
 579			continue;
 580		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
 581		    bridge_device)
 582			continue;
 583		if (bridge_device->vlan_enabled &&
 584		    mlxsw_sp_port_vlan->vid != vid)
 585			continue;
 586		return mlxsw_sp_port_vlan;
 587	}
 588
 589	return NULL;
 590}
 591
 592static struct mlxsw_sp_port_vlan*
 593mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
 594			       u16 fid_index)
 595{
 596	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 597
 598	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
 599			    list) {
 600		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
 601
 602		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
 603			return mlxsw_sp_port_vlan;
 604	}
 605
 606	return NULL;
 607}
 608
 609static struct mlxsw_sp_bridge_vlan *
 610mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
 611			  u16 vid)
 612{
 613	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 614
 615	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 616		if (bridge_vlan->vid == vid)
 617			return bridge_vlan;
 618	}
 619
 620	return NULL;
 621}
 622
 623static struct mlxsw_sp_bridge_vlan *
 624mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
 625{
 626	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 627
 628	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
 629	if (!bridge_vlan)
 630		return NULL;
 631
 632	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
 633	bridge_vlan->vid = vid;
 634	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
 635
 636	return bridge_vlan;
 637}
 638
 639static void
 640mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
 641{
 642	list_del(&bridge_vlan->list);
 643	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
 644	kfree(bridge_vlan);
 645}
 646
 647static struct mlxsw_sp_bridge_vlan *
 648mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
 649{
 650	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 651
 652	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
 653	if (bridge_vlan)
 654		return bridge_vlan;
 655
 656	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
 657}
 658
 659static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
 660{
 661	if (list_empty(&bridge_vlan->port_vlan_list))
 662		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
 663}
 664
 665static int
 666mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
 667				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
 668				  u8 state)
 669{
 670	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 671
 672	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 673			    bridge_vlan_node) {
 674		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
 675			continue;
 676		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
 677						 bridge_vlan->vid, state);
 678	}
 679
 680	return 0;
 681}
 682
 683static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
 684					    struct net_device *orig_dev,
 685					    u8 state)
 686{
 687	struct mlxsw_sp_bridge_port *bridge_port;
 688	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 689	int err;
 690
 691	/* It's possible we failed to enslave the port, yet this
 692	 * operation is executed due to it being deferred.
 693	 */
 694	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
 695						orig_dev);
 696	if (!bridge_port)
 697		return 0;
 698
 699	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 700		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
 701							bridge_vlan, state);
 702		if (err)
 703			goto err_port_bridge_vlan_stp_set;
 704	}
 705
 706	bridge_port->stp_state = state;
 707
 708	return 0;
 709
 710err_port_bridge_vlan_stp_set:
 711	list_for_each_entry_continue_reverse(bridge_vlan,
 712					     &bridge_port->vlans_list, list)
 713		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
 714						  bridge_port->stp_state);
 715	return err;
 716}
 717
 718static int
 719mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
 720				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
 721				    enum mlxsw_sp_flood_type packet_type,
 722				    bool member)
 723{
 724	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 725
 726	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 727			    bridge_vlan_node) {
 728		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
 729			continue;
 730		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
 731					      packet_type,
 732					      mlxsw_sp_port->local_port,
 733					      member);
 734	}
 735
 736	return 0;
 737}
 738
 739static int
 740mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
 741				     struct mlxsw_sp_bridge_port *bridge_port,
 742				     enum mlxsw_sp_flood_type packet_type,
 743				     bool member)
 744{
 745	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 746	int err;
 747
 748	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 749		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
 750							  bridge_vlan,
 751							  packet_type,
 752							  member);
 753		if (err)
 754			goto err_port_bridge_vlan_flood_set;
 755	}
 756
 757	return 0;
 758
 759err_port_bridge_vlan_flood_set:
 760	list_for_each_entry_continue_reverse(bridge_vlan,
 761					     &bridge_port->vlans_list, list)
 762		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
 763						    packet_type, !member);
 764	return err;
 765}
 766
 767static int
 768mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan,
 769				enum mlxsw_sp_flood_type packet_type,
 770				bool member)
 771{
 772	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 773	int err;
 774
 775	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 776			    bridge_vlan_node) {
 777		u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
 778
 779		err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
 780					     packet_type, local_port, member);
 781		if (err)
 782			goto err_fid_flood_set;
 783	}
 784
 785	return 0;
 786
 787err_fid_flood_set:
 788	list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
 789					     &bridge_vlan->port_vlan_list,
 790					     list) {
 791		u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
 792
 793		mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type,
 794				       local_port, !member);
 795	}
 796
 797	return err;
 798}
 799
 800static int
 801mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port,
 802				      enum mlxsw_sp_flood_type packet_type,
 803				      bool member)
 804{
 805	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 806	int err;
 807
 808	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 809		err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
 810						      member);
 811		if (err)
 812			goto err_bridge_vlans_flood_set;
 813	}
 814
 815	return 0;
 816
 817err_bridge_vlans_flood_set:
 818	list_for_each_entry_continue_reverse(bridge_vlan,
 819					     &bridge_port->vlans_list, list)
 820		mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
 821						!member);
 822	return err;
 823}
 824
 825static int
 826mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
 827				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
 828				       bool set)
 829{
 830	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 831	u16 vid = bridge_vlan->vid;
 832
 833	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 834			    bridge_vlan_node) {
 835		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
 836			continue;
 837		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
 838	}
 839
 840	return 0;
 841}
 842
 843static int
 844mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
 845				  struct mlxsw_sp_bridge_port *bridge_port,
 846				  bool set)
 847{
 848	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 849	int err;
 850
 851	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 852		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
 853							     bridge_vlan, set);
 854		if (err)
 855			goto err_port_bridge_vlan_learning_set;
 856	}
 857
 858	return 0;
 859
 860err_port_bridge_vlan_learning_set:
 861	list_for_each_entry_continue_reverse(bridge_vlan,
 862					     &bridge_port->vlans_list, list)
 863		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
 864						       bridge_vlan, !set);
 865	return err;
 866}
 867
 868static int
 869mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
 870				    const struct net_device *orig_dev,
 871				    struct switchdev_brport_flags flags,
 872				    struct netlink_ext_ack *extack)
 873{
 874	if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
 875			   BR_PORT_LOCKED | BR_PORT_MAB)) {
 876		NL_SET_ERR_MSG_MOD(extack, "Unsupported bridge port flag");
 877		return -EINVAL;
 878	}
 879
 880	if ((flags.mask & BR_PORT_LOCKED) && is_vlan_dev(orig_dev)) {
 881		NL_SET_ERR_MSG_MOD(extack, "Locked flag cannot be set on a VLAN upper");
 882		return -EINVAL;
 883	}
 884
 885	if ((flags.mask & BR_PORT_LOCKED) && vlan_uses_dev(orig_dev)) {
 886		NL_SET_ERR_MSG_MOD(extack, "Locked flag cannot be set on a bridge port that has VLAN uppers");
 887		return -EINVAL;
 888	}
 889
 890	return 0;
 891}
 892
 893static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
 894					   struct net_device *orig_dev,
 895					   struct switchdev_brport_flags flags)
 896{
 897	struct mlxsw_sp_bridge_port *bridge_port;
 898	int err;
 899
 900	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
 901						orig_dev);
 902	if (!bridge_port)
 903		return 0;
 904
 905	if (flags.mask & BR_FLOOD) {
 906		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
 907							   bridge_port,
 908							   MLXSW_SP_FLOOD_TYPE_UC,
 909							   flags.val & BR_FLOOD);
 910		if (err)
 911			return err;
 912	}
 913
 914	if (flags.mask & BR_LEARNING) {
 915		err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
 916							bridge_port,
 917							flags.val & BR_LEARNING);
 918		if (err)
 919			return err;
 920	}
 921
 922	if (flags.mask & BR_PORT_LOCKED) {
 923		err = mlxsw_sp_port_security_set(mlxsw_sp_port,
 924						 flags.val & BR_PORT_LOCKED);
 925		if (err)
 926			return err;
 927	}
 928
 929	if (bridge_port->bridge_device->multicast_enabled)
 930		goto out;
 931
 932	if (flags.mask & BR_MCAST_FLOOD) {
 933		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
 934							   bridge_port,
 935							   MLXSW_SP_FLOOD_TYPE_MC,
 936							   flags.val & BR_MCAST_FLOOD);
 937		if (err)
 938			return err;
 939	}
 940
 941out:
 942	memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
 943	return 0;
 944}
 945
 946static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
 947{
 948	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
 949	int err;
 950
 951	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
 952	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
 953	if (err)
 954		return err;
 955	mlxsw_sp->bridge->ageing_time = ageing_time;
 956	return 0;
 957}
 958
 959static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
 960					    unsigned long ageing_clock_t)
 961{
 962	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 963	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
 964	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
 965
 966	if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
 967	    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
 968		return -ERANGE;
 969
 970	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
 971}
 972
 973static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
 974					  struct net_device *orig_dev,
 975					  bool vlan_enabled)
 976{
 977	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 978	struct mlxsw_sp_bridge_device *bridge_device;
 979
 980	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
 981	if (WARN_ON(!bridge_device))
 982		return -EINVAL;
 983
 984	if (bridge_device->vlan_enabled == vlan_enabled)
 985		return 0;
 986
 987	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
 988	return -EINVAL;
 989}
 990
 991static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
 992						struct net_device *orig_dev,
 993						u16 vlan_proto)
 994{
 995	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 996	struct mlxsw_sp_bridge_device *bridge_device;
 997
 998	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
 999	if (WARN_ON(!bridge_device))
1000		return -EINVAL;
1001
1002	netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
1003	return -EINVAL;
1004}
1005
1006static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
1007					  struct net_device *orig_dev,
1008					  bool is_port_mrouter)
1009{
1010	struct mlxsw_sp_bridge_port *bridge_port;
1011	int err;
1012
1013	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
1014						orig_dev);
1015	if (!bridge_port)
1016		return 0;
1017
1018	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
1019					 is_port_mrouter);
1020
1021	if (!bridge_port->bridge_device->multicast_enabled)
1022		goto out;
1023
1024	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
1025						   MLXSW_SP_FLOOD_TYPE_MC,
1026						   is_port_mrouter);
1027	if (err)
1028		return err;
1029
1030out:
1031	bridge_port->mrouter = is_port_mrouter;
1032	return 0;
1033}
1034
1035static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
1036{
1037	const struct mlxsw_sp_bridge_device *bridge_device;
1038
1039	bridge_device = bridge_port->bridge_device;
1040	return bridge_device->multicast_enabled ? bridge_port->mrouter :
1041					bridge_port->flags & BR_MCAST_FLOOD;
1042}
1043
1044static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
1045					 struct net_device *orig_dev,
1046					 bool mc_disabled)
1047{
1048	enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
1049	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1050	struct mlxsw_sp_bridge_device *bridge_device;
1051	struct mlxsw_sp_bridge_port *bridge_port;
1052	int err;
1053
1054	/* It's possible we failed to enslave the port, yet this
1055	 * operation is executed due to it being deferred.
1056	 */
1057	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
1058	if (!bridge_device)
1059		return 0;
1060
1061	if (bridge_device->multicast_enabled == !mc_disabled)
1062		return 0;
1063
1064	bridge_device->multicast_enabled = !mc_disabled;
1065	err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
1066						 !mc_disabled);
1067	if (err)
1068		goto err_mc_enable_sync;
1069
1070	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1071		bool member = mlxsw_sp_mc_flood(bridge_port);
1072
1073		err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port,
1074							    packet_type,
1075							    member);
1076		if (err)
1077			goto err_flood_table_set;
1078	}
1079
1080	return 0;
1081
1082err_flood_table_set:
1083	list_for_each_entry_continue_reverse(bridge_port,
1084					     &bridge_device->ports_list, list) {
1085		bool member = mlxsw_sp_mc_flood(bridge_port);
1086
1087		mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type,
1088						      !member);
1089	}
1090	mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
1091					   mc_disabled);
1092err_mc_enable_sync:
1093	bridge_device->multicast_enabled = mc_disabled;
1094	return err;
1095}
1096
1097static struct mlxsw_sp_mdb_entry_port *
1098mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry,
1099			       u16 local_port)
1100{
1101	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1102
1103	list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) {
1104		if (mdb_entry_port->local_port == local_port)
1105			return mdb_entry_port;
1106	}
1107
1108	return NULL;
1109}
1110
1111static struct mlxsw_sp_mdb_entry_port *
1112mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp,
1113			    struct mlxsw_sp_mdb_entry *mdb_entry,
1114			    u16 local_port)
1115{
1116	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1117	int err;
1118
1119	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1120	if (mdb_entry_port) {
1121		if (mdb_entry_port->mrouter &&
1122		    refcount_read(&mdb_entry_port->refcount) == 1)
1123			mdb_entry->ports_count++;
1124
1125		refcount_inc(&mdb_entry_port->refcount);
1126		return mdb_entry_port;
1127	}
1128
1129	err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1130					  mdb_entry->key.fid, local_port, true);
1131	if (err)
1132		return ERR_PTR(err);
1133
1134	mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
1135	if (!mdb_entry_port) {
1136		err = -ENOMEM;
1137		goto err_mdb_entry_port_alloc;
1138	}
1139
1140	mdb_entry_port->local_port = local_port;
1141	refcount_set(&mdb_entry_port->refcount, 1);
1142	list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
1143	mdb_entry->ports_count++;
1144
1145	return mdb_entry_port;
1146
1147err_mdb_entry_port_alloc:
1148	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1149				    mdb_entry->key.fid, local_port, false);
1150	return ERR_PTR(err);
1151}
1152
1153static void
1154mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp,
1155			    struct mlxsw_sp_mdb_entry *mdb_entry,
1156			    u16 local_port, bool force)
1157{
1158	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1159
1160	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1161	if (!mdb_entry_port)
1162		return;
1163
1164	if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) {
1165		if (mdb_entry_port->mrouter &&
1166		    refcount_read(&mdb_entry_port->refcount) == 1)
1167			mdb_entry->ports_count--;
1168		return;
1169	}
1170
1171	mdb_entry->ports_count--;
1172	list_del(&mdb_entry_port->list);
1173	kfree(mdb_entry_port);
1174	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1175				    mdb_entry->key.fid, local_port, false);
1176}
1177
1178static __always_unused struct mlxsw_sp_mdb_entry_port *
1179mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp,
1180				    struct mlxsw_sp_mdb_entry *mdb_entry,
1181				    u16 local_port)
1182{
1183	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1184	int err;
1185
1186	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1187	if (mdb_entry_port) {
1188		if (!mdb_entry_port->mrouter)
1189			refcount_inc(&mdb_entry_port->refcount);
1190		return mdb_entry_port;
1191	}
1192
1193	err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1194					  mdb_entry->key.fid, local_port, true);
1195	if (err)
1196		return ERR_PTR(err);
1197
1198	mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
1199	if (!mdb_entry_port) {
1200		err = -ENOMEM;
1201		goto err_mdb_entry_port_alloc;
1202	}
1203
1204	mdb_entry_port->local_port = local_port;
1205	refcount_set(&mdb_entry_port->refcount, 1);
1206	mdb_entry_port->mrouter = true;
1207	list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
1208
1209	return mdb_entry_port;
1210
1211err_mdb_entry_port_alloc:
1212	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1213				    mdb_entry->key.fid, local_port, false);
1214	return ERR_PTR(err);
1215}
1216
1217static __always_unused void
1218mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp,
1219				    struct mlxsw_sp_mdb_entry *mdb_entry,
1220				    u16 local_port)
1221{
1222	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1223
1224	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1225	if (!mdb_entry_port)
1226		return;
1227
1228	if (!mdb_entry_port->mrouter)
1229		return;
1230
1231	mdb_entry_port->mrouter = false;
1232	if (!refcount_dec_and_test(&mdb_entry_port->refcount))
1233		return;
1234
1235	list_del(&mdb_entry_port->list);
1236	kfree(mdb_entry_port);
1237	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1238				    mdb_entry->key.fid, local_port, false);
1239}
1240
1241static void
1242mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
1243				   struct mlxsw_sp_bridge_device *bridge_device,
1244				   bool add)
1245{
1246	u16 local_port = mlxsw_sp_router_port(mlxsw_sp);
1247	struct mlxsw_sp_mdb_entry *mdb_entry;
1248
1249	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
1250		if (add)
1251			mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
1252							    local_port);
1253		else
1254			mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
1255							    local_port);
1256	}
1257}
1258
1259static int
1260mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
1261				  struct net_device *orig_dev,
1262				  bool is_mrouter)
1263{
1264	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1265	struct mlxsw_sp_bridge_device *bridge_device;
1266
1267	/* It's possible we failed to enslave the port, yet this
1268	 * operation is executed due to it being deferred.
1269	 */
1270	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
1271	if (!bridge_device)
1272		return 0;
1273
1274	if (bridge_device->mrouter != is_mrouter)
1275		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
1276						   is_mrouter);
1277	bridge_device->mrouter = is_mrouter;
1278	return 0;
1279}
1280
1281static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx,
1282				  const struct switchdev_attr *attr,
1283				  struct netlink_ext_ack *extack)
1284{
1285	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1286	int err;
1287
1288	switch (attr->id) {
1289	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
1290		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
1291						       attr->orig_dev,
1292						       attr->u.stp_state);
1293		break;
1294	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
1295		err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
1296							  attr->orig_dev,
1297							  attr->u.brport_flags,
1298							  extack);
1299		break;
1300	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
1301		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
1302						      attr->orig_dev,
1303						      attr->u.brport_flags);
1304		break;
1305	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
1306		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
1307						       attr->u.ageing_time);
1308		break;
1309	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
1310		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
1311						     attr->orig_dev,
1312						     attr->u.vlan_filtering);
1313		break;
1314	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
1315		err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
1316							   attr->orig_dev,
1317							   attr->u.vlan_protocol);
1318		break;
1319	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
1320		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
1321						     attr->orig_dev,
1322						     attr->u.mrouter);
1323		break;
1324	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
1325		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
1326						    attr->orig_dev,
1327						    attr->u.mc_disabled);
1328		break;
1329	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
1330		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
1331							attr->orig_dev,
1332							attr->u.mrouter);
1333		break;
1334	default:
1335		err = -EOPNOTSUPP;
1336		break;
1337	}
1338
1339	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1340
1341	return err;
1342}
1343
1344static int
1345mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1346			    struct mlxsw_sp_bridge_port *bridge_port,
1347			    struct netlink_ext_ack *extack)
1348{
1349	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1350	struct mlxsw_sp_bridge_device *bridge_device;
1351	u16 local_port = mlxsw_sp_port->local_port;
1352	u16 vid = mlxsw_sp_port_vlan->vid;
1353	struct mlxsw_sp_fid *fid;
1354	int err;
1355
1356	bridge_device = bridge_port->bridge_device;
1357	fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
1358	if (IS_ERR(fid))
1359		return PTR_ERR(fid);
1360
1361	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
1362				     bridge_port->flags & BR_FLOOD);
1363	if (err)
1364		goto err_fid_uc_flood_set;
1365
1366	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
1367				     mlxsw_sp_mc_flood(bridge_port));
1368	if (err)
1369		goto err_fid_mc_flood_set;
1370
1371	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
1372				     true);
1373	if (err)
1374		goto err_fid_bc_flood_set;
1375
1376	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
1377	if (err)
1378		goto err_fid_port_vid_map;
1379
1380	mlxsw_sp_port_vlan->fid = fid;
1381
1382	return 0;
1383
1384err_fid_port_vid_map:
1385	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1386err_fid_bc_flood_set:
1387	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1388err_fid_mc_flood_set:
1389	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1390err_fid_uc_flood_set:
1391	mlxsw_sp_fid_put(fid);
1392	return err;
1393}
1394
1395static void
1396mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1397{
1398	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1399	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1400	u16 local_port = mlxsw_sp_port->local_port;
1401	u16 vid = mlxsw_sp_port_vlan->vid;
1402
1403	mlxsw_sp_port_vlan->fid = NULL;
1404	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
1405	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1406	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1407	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1408	mlxsw_sp_fid_put(fid);
1409}
1410
1411static u16
1412mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1413			     u16 vid, bool is_pvid)
1414{
1415	if (is_pvid)
1416		return vid;
1417	else if (mlxsw_sp_port->pvid == vid)
1418		return 0;	/* Dis-allow untagged packets */
1419	else
1420		return mlxsw_sp_port->pvid;
1421}
1422
1423static int
1424mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1425			       struct mlxsw_sp_bridge_port *bridge_port,
1426			       struct netlink_ext_ack *extack)
1427{
1428	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1429	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1430	u16 vid = mlxsw_sp_port_vlan->vid;
1431	int err;
1432
1433	/* No need to continue if only VLAN flags were changed */
1434	if (mlxsw_sp_port_vlan->bridge_port)
1435		return 0;
1436
1437	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
1438					  extack);
1439	if (err)
1440		return err;
1441
1442	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1443					     bridge_port->flags & BR_LEARNING);
1444	if (err)
1445		goto err_port_vid_learning_set;
1446
1447	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1448					bridge_port->stp_state);
1449	if (err)
1450		goto err_port_vid_stp_set;
1451
1452	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1453	if (!bridge_vlan) {
1454		err = -ENOMEM;
1455		goto err_bridge_vlan_get;
1456	}
1457
1458	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1459		 &bridge_vlan->port_vlan_list);
1460
1461	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1462				 bridge_port->dev, extack);
1463	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1464
1465	return 0;
1466
1467err_bridge_vlan_get:
1468	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1469err_port_vid_stp_set:
1470	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1471err_port_vid_learning_set:
1472	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1473	return err;
1474}
1475
1476void
1477mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1478{
1479	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1480	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1481	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1482	struct mlxsw_sp_bridge_port *bridge_port;
1483	u16 vid = mlxsw_sp_port_vlan->vid;
1484	bool last_port;
1485
1486	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1487		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1488		return;
1489
1490	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1491	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1492	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1493
1494	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1495	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1496	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1497	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1498	if (last_port)
1499		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1500					       bridge_port,
1501					       mlxsw_sp_fid_index(fid));
1502
1503	mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port,
1504				       mlxsw_sp_fid_index(fid));
1505
1506	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1507
1508	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1509	mlxsw_sp_port_vlan->bridge_port = NULL;
1510}
1511
1512static int
1513mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1514			      struct mlxsw_sp_bridge_port *bridge_port,
1515			      u16 vid, bool is_untagged, bool is_pvid,
1516			      struct netlink_ext_ack *extack)
1517{
1518	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1519	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1520	u16 old_pvid = mlxsw_sp_port->pvid;
1521	u16 proto;
1522	int err;
1523
1524	/* The only valid scenario in which a port-vlan already exists, is if
1525	 * the VLAN flags were changed and the port-vlan is associated with the
1526	 * correct bridge port
1527	 */
1528	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1529	if (mlxsw_sp_port_vlan &&
1530	    mlxsw_sp_port_vlan->bridge_port != bridge_port)
1531		return -EEXIST;
1532
1533	if (!mlxsw_sp_port_vlan) {
1534		mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1535							       vid);
1536		if (IS_ERR(mlxsw_sp_port_vlan))
1537			return PTR_ERR(mlxsw_sp_port_vlan);
1538	}
1539
1540	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1541				     is_untagged);
1542	if (err)
1543		goto err_port_vlan_set;
1544
1545	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1546	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1547	if (err)
1548		goto err_port_pvid_set;
1549
1550	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1551					     extack);
1552	if (err)
1553		goto err_port_vlan_bridge_join;
1554
1555	return 0;
1556
1557err_port_vlan_bridge_join:
1558	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
1559err_port_pvid_set:
1560	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1561err_port_vlan_set:
1562	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1563	return err;
1564}
1565
1566static int
1567mlxsw_sp_br_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1568			    struct net_device *br_dev,
1569			    const struct switchdev_obj_port_vlan *vlan,
1570			    struct netlink_ext_ack *extack)
1571{
1572	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1573
1574	return mlxsw_sp_router_bridge_vlan_add(mlxsw_sp, br_dev, vlan->vid,
1575					       flag_pvid, extack);
1576}
1577
1578static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1579				   const struct switchdev_obj_port_vlan *vlan,
1580				   struct netlink_ext_ack *extack)
1581{
1582	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1583	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1584	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1585	struct net_device *orig_dev = vlan->obj.orig_dev;
1586	struct mlxsw_sp_bridge_port *bridge_port;
1587
1588	if (netif_is_bridge_master(orig_dev)) {
1589		int err = 0;
1590
1591		if (br_vlan_enabled(orig_dev))
1592			err = mlxsw_sp_br_rif_pvid_change(mlxsw_sp, orig_dev,
1593							  vlan, extack);
1594		if (!err)
1595			err = -EOPNOTSUPP;
1596		return err;
1597	}
1598
1599	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1600	if (WARN_ON(!bridge_port))
1601		return -EINVAL;
1602
1603	if (!bridge_port->bridge_device->vlan_enabled)
1604		return 0;
1605
1606	return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1607					     vlan->vid, flag_untagged,
1608					     flag_pvid, extack);
1609}
1610
1611static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1612{
1613	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1614			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1615}
1616
1617static int
1618mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1619			       struct mlxsw_sp_bridge_port *bridge_port,
1620			       u16 fid_index)
1621{
1622	bool lagged = bridge_port->lagged;
1623	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1624	u16 system_port;
1625
1626	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1627	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1628	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1629	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1630
1631	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1632}
1633
1634static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1635{
1636	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1637			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1638}
1639
1640static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1641{
1642	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1643			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1644}
1645
1646static int
1647mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic,
1648			     const char *mac, u16 fid, __be32 addr, bool adding)
1649{
1650	char *sfd_pl;
1651	u8 num_rec;
1652	u32 uip;
1653	int err;
1654
1655	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1656	if (!sfd_pl)
1657		return -ENOMEM;
1658
1659	uip = be32_to_cpu(addr);
1660	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1661	mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0,
1662				      mlxsw_sp_sfd_rec_policy(dynamic), mac,
1663				      fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip);
1664	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1665	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1666	if (err)
1667		goto out;
1668
1669	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1670		err = -EBUSY;
1671
1672out:
1673	kfree(sfd_pl);
1674	return err;
1675}
1676
1677static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp,
1678						  const char *mac, u16 fid,
1679						  u32 kvdl_index, bool adding)
1680{
1681	char *sfd_pl;
1682	u8 num_rec;
1683	int err;
1684
1685	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1686	if (!sfd_pl)
1687		return -ENOMEM;
1688
1689	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1690	mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid,
1691				      MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index);
1692	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1693	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1694	if (err)
1695		goto out;
1696
1697	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1698		err = -EBUSY;
1699
1700out:
1701	kfree(sfd_pl);
1702	return err;
1703}
1704
1705static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp,
1706					    const char *mac, u16 fid,
1707					    const struct in6_addr *addr)
1708{
1709	u32 kvdl_index;
1710	int err;
1711
1712	err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index);
1713	if (err)
1714		return err;
1715
1716	err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid,
1717						     kvdl_index, true);
1718	if (err)
1719		goto err_sfd_write;
1720
1721	err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr);
1722	if (err)
1723		/* Replace can fail only for creating new mapping, so removing
1724		 * the FDB entry in the error path is OK.
1725		 */
1726		goto err_addr_replace;
1727
1728	return 0;
1729
1730err_addr_replace:
1731	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index,
1732					       false);
1733err_sfd_write:
1734	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1735	return err;
1736}
1737
1738static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp,
1739					     const char *mac, u16 fid,
1740					     const struct in6_addr *addr)
1741{
1742	mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid);
1743	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false);
1744	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1745}
1746
1747static int
1748mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac,
1749			     u16 fid, const struct in6_addr *addr, bool adding)
1750{
1751	if (adding)
1752		return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid,
1753							addr);
1754
1755	mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr);
1756	return 0;
1757}
1758
1759static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1760					  const char *mac, u16 fid,
1761					  enum mlxsw_sp_l3proto proto,
1762					  const union mlxsw_sp_l3addr *addr,
1763					  bool adding, bool dynamic)
1764{
1765	switch (proto) {
1766	case MLXSW_SP_L3_PROTO_IPV4:
1767		return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid,
1768						    addr->addr4, adding);
1769	case MLXSW_SP_L3_PROTO_IPV6:
1770		return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid,
1771						    &addr->addr6, adding);
1772	default:
1773		WARN_ON(1);
1774		return -EOPNOTSUPP;
1775	}
1776}
1777
1778static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1779				     const char *mac, u16 fid, u16 vid,
1780				     bool adding,
1781				     enum mlxsw_reg_sfd_rec_action action,
1782				     enum mlxsw_reg_sfd_rec_policy policy)
1783{
1784	char *sfd_pl;
1785	u8 num_rec;
1786	int err;
1787
1788	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1789	if (!sfd_pl)
1790		return -ENOMEM;
1791
1792	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1793	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
1794			      local_port);
1795	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1796	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1797	if (err)
1798		goto out;
1799
1800	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1801		err = -EBUSY;
1802
1803out:
1804	kfree(sfd_pl);
1805	return err;
1806}
1807
1808static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1809				   const char *mac, u16 fid, u16 vid,
1810				   bool adding, bool dynamic)
1811{
1812	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
1813					 adding, MLXSW_REG_SFD_REC_ACTION_NOP,
1814					 mlxsw_sp_sfd_rec_policy(dynamic));
1815}
1816
1817int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1818			bool adding)
1819{
1820	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
1821					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1822					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1823}
1824
1825static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1826				       const char *mac, u16 fid, u16 lag_vid,
1827				       bool adding, bool dynamic)
1828{
1829	char *sfd_pl;
1830	u8 num_rec;
1831	int err;
1832
1833	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1834	if (!sfd_pl)
1835		return -ENOMEM;
1836
1837	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1838	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1839				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1840				  lag_vid, lag_id);
1841	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1842	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1843	if (err)
1844		goto out;
1845
1846	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1847		err = -EBUSY;
1848
1849out:
1850	kfree(sfd_pl);
1851	return err;
1852}
1853
1854static int
1855mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1856		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1857{
1858	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1859	struct net_device *orig_dev = fdb_info->info.dev;
1860	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1861	struct mlxsw_sp_bridge_device *bridge_device;
1862	struct mlxsw_sp_bridge_port *bridge_port;
1863	u16 fid_index, vid;
1864
1865	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1866	if (!bridge_port)
1867		return -EINVAL;
1868
1869	bridge_device = bridge_port->bridge_device;
1870	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1871							       bridge_device,
1872							       fdb_info->vid);
1873	if (!mlxsw_sp_port_vlan)
1874		return 0;
1875
1876	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1877	vid = mlxsw_sp_port_vlan->vid;
1878
1879	if (!bridge_port->lagged)
1880		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1881					       bridge_port->system_port,
1882					       fdb_info->addr, fid_index, vid,
1883					       adding, false);
1884	else
1885		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1886						   bridge_port->lag_id,
1887						   fdb_info->addr, fid_index,
1888						   vid, adding, false);
1889}
1890
1891static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
1892				    const struct mlxsw_sp_mdb_entry *mdb_entry,
1893				    bool adding)
1894{
1895	char *sfd_pl;
1896	u8 num_rec;
1897	int err;
1898
1899	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1900	if (!sfd_pl)
1901		return -ENOMEM;
1902
1903	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1904	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
1905			      mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1906			      mdb_entry->mid);
1907	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1908	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1909	if (err)
1910		goto out;
1911
1912	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1913		err = -EBUSY;
1914
1915out:
1916	kfree(sfd_pl);
1917	return err;
1918}
1919
1920static void
1921mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1922				      struct mlxsw_sp_bridge_port *bridge_port,
1923				      struct mlxsw_sp_ports_bitmap *ports_bm)
1924{
1925	struct mlxsw_sp_port *mlxsw_sp_port;
1926	u64 max_lag_members, i;
1927	int lag_id;
1928
1929	if (!bridge_port->lagged) {
1930		set_bit(bridge_port->system_port, ports_bm->bitmap);
1931	} else {
1932		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1933						     MAX_LAG_MEMBERS);
1934		lag_id = bridge_port->lag_id;
1935		for (i = 0; i < max_lag_members; i++) {
1936			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1937								 lag_id, i);
1938			if (mlxsw_sp_port)
1939				set_bit(mlxsw_sp_port->local_port,
1940					ports_bm->bitmap);
1941		}
1942	}
1943}
1944
1945static void
1946mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
1947				struct mlxsw_sp_bridge_device *bridge_device,
1948				struct mlxsw_sp *mlxsw_sp)
1949{
1950	struct mlxsw_sp_bridge_port *bridge_port;
1951
1952	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1953		if (bridge_port->mrouter) {
1954			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1955							      bridge_port,
1956							      flood_bm);
1957		}
1958	}
1959}
1960
1961static int mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp *mlxsw_sp,
1962					struct mlxsw_sp_ports_bitmap *ports_bm,
1963					struct mlxsw_sp_mdb_entry *mdb_entry)
1964{
1965	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1966	unsigned int nbits = ports_bm->nbits;
1967	int i;
1968
1969	for_each_set_bit(i, ports_bm->bitmap, nbits) {
1970		mdb_entry_port = mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp,
1971								     mdb_entry,
1972								     i);
1973		if (IS_ERR(mdb_entry_port)) {
1974			nbits = i;
1975			goto err_mrouter_port_get;
1976		}
1977	}
1978
1979	return 0;
1980
1981err_mrouter_port_get:
1982	for_each_set_bit(i, ports_bm->bitmap, nbits)
1983		mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
1984	return PTR_ERR(mdb_entry_port);
1985}
1986
1987static void mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp *mlxsw_sp,
1988					 struct mlxsw_sp_ports_bitmap *ports_bm,
1989					 struct mlxsw_sp_mdb_entry *mdb_entry)
1990{
1991	int i;
1992
1993	for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
1994		mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
1995}
1996
1997static int
1998mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp *mlxsw_sp,
1999			     struct mlxsw_sp_bridge_device *bridge_device,
2000			     struct mlxsw_sp_mdb_entry *mdb_entry, bool add)
2001{
2002	struct mlxsw_sp_ports_bitmap ports_bm;
2003	int err;
2004
2005	err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &ports_bm);
2006	if (err)
2007		return err;
2008
2009	mlxsw_sp_mc_get_mrouters_bitmap(&ports_bm, bridge_device, mlxsw_sp);
2010
2011	if (add)
2012		err = mlxsw_sp_mc_mdb_mrouters_add(mlxsw_sp, &ports_bm,
2013						   mdb_entry);
2014	else
2015		mlxsw_sp_mc_mdb_mrouters_del(mlxsw_sp, &ports_bm, mdb_entry);
2016
2017	mlxsw_sp_port_bitmap_fini(&ports_bm);
2018	return err;
2019}
2020
2021static struct mlxsw_sp_mdb_entry *
2022mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp *mlxsw_sp,
2023			   struct mlxsw_sp_bridge_device *bridge_device,
2024			   const unsigned char *addr, u16 fid, u16 local_port)
2025{
2026	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2027	struct mlxsw_sp_mdb_entry *mdb_entry;
2028	int err;
2029
2030	mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
2031	if (!mdb_entry)
2032		return ERR_PTR(-ENOMEM);
2033
2034	ether_addr_copy(mdb_entry->key.addr, addr);
2035	mdb_entry->key.fid = fid;
2036	err = mlxsw_sp_pgt_mid_alloc(mlxsw_sp, &mdb_entry->mid);
2037	if (err)
2038		goto err_pgt_mid_alloc;
2039
2040	INIT_LIST_HEAD(&mdb_entry->ports_list);
2041
2042	err = mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry,
2043					   true);
2044	if (err)
2045		goto err_mdb_mrouters_set;
2046
2047	mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, mdb_entry,
2048						     local_port);
2049	if (IS_ERR(mdb_entry_port)) {
2050		err = PTR_ERR(mdb_entry_port);
2051		goto err_mdb_entry_port_get;
2052	}
2053
2054	if (bridge_device->multicast_enabled) {
2055		err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, true);
2056		if (err)
2057			goto err_mdb_entry_write;
2058	}
2059
2060	err = rhashtable_insert_fast(&bridge_device->mdb_ht,
2061				     &mdb_entry->ht_node,
2062				     mlxsw_sp_mdb_ht_params);
2063	if (err)
2064		goto err_rhashtable_insert;
2065
2066	list_add_tail(&mdb_entry->list, &bridge_device->mdb_list);
2067
2068	return mdb_entry;
2069
2070err_rhashtable_insert:
2071	if (bridge_device->multicast_enabled)
2072		mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
2073err_mdb_entry_write:
2074	mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, false);
2075err_mdb_entry_port_get:
2076	mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
2077err_mdb_mrouters_set:
2078	mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
2079err_pgt_mid_alloc:
2080	kfree(mdb_entry);
2081	return ERR_PTR(err);
2082}
2083
2084static void
2085mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp *mlxsw_sp,
2086			   struct mlxsw_sp_mdb_entry *mdb_entry,
2087			   struct mlxsw_sp_bridge_device *bridge_device,
2088			   u16 local_port, bool force)
2089{
2090	list_del(&mdb_entry->list);
2091	rhashtable_remove_fast(&bridge_device->mdb_ht, &mdb_entry->ht_node,
2092			       mlxsw_sp_mdb_ht_params);
2093	if (bridge_device->multicast_enabled)
2094		mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
2095	mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, force);
2096	mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
2097	WARN_ON(!list_empty(&mdb_entry->ports_list));
2098	mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
2099	kfree(mdb_entry);
2100}
2101
2102static struct mlxsw_sp_mdb_entry *
2103mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp *mlxsw_sp,
2104			  struct mlxsw_sp_bridge_device *bridge_device,
2105			  const unsigned char *addr, u16 fid, u16 local_port)
2106{
2107	struct mlxsw_sp_mdb_entry_key key = {};
2108	struct mlxsw_sp_mdb_entry *mdb_entry;
2109
2110	ether_addr_copy(key.addr, addr);
2111	key.fid = fid;
2112	mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
2113					   mlxsw_sp_mdb_ht_params);
2114	if (mdb_entry) {
2115		struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2116
2117		mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp,
2118							     mdb_entry,
2119							     local_port);
2120		if (IS_ERR(mdb_entry_port))
2121			return ERR_CAST(mdb_entry_port);
2122
2123		return mdb_entry;
2124	}
2125
2126	return mlxsw_sp_mc_mdb_entry_init(mlxsw_sp, bridge_device, addr, fid,
2127					  local_port);
2128}
2129
2130static bool
2131mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry *mdb_entry,
2132			     struct mlxsw_sp_mdb_entry_port *removed_entry_port,
2133			     bool force)
2134{
2135	if (mdb_entry->ports_count > 1)
2136		return false;
2137
2138	if (force)
2139		return true;
2140
2141	if (!removed_entry_port->mrouter &&
2142	    refcount_read(&removed_entry_port->refcount) > 1)
2143		return false;
2144
2145	if (removed_entry_port->mrouter &&
2146	    refcount_read(&removed_entry_port->refcount) > 2)
2147		return false;
2148
2149	return true;
2150}
2151
2152static void
2153mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp *mlxsw_sp,
2154			  struct mlxsw_sp_bridge_device *bridge_device,
2155			  struct mlxsw_sp_mdb_entry *mdb_entry, u16 local_port,
2156			  bool force)
2157{
2158	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2159
2160	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
2161	if (!mdb_entry_port)
2162		return;
2163
2164	/* Avoid a temporary situation in which the MDB entry points to an empty
2165	 * PGT entry, as otherwise packets will be temporarily dropped instead
2166	 * of being flooded. Instead, in this situation, call
2167	 * mlxsw_sp_mc_mdb_entry_fini(), which first deletes the MDB entry and
2168	 * then releases the PGT entry.
2169	 */
2170	if (mlxsw_sp_mc_mdb_entry_remove(mdb_entry, mdb_entry_port, force))
2171		mlxsw_sp_mc_mdb_entry_fini(mlxsw_sp, mdb_entry, bridge_device,
2172					   local_port, force);
2173	else
2174		mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port,
2175					    force);
2176}
2177
2178static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
2179				 const struct switchdev_obj_port_mdb *mdb)
2180{
2181	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2182	struct net_device *orig_dev = mdb->obj.orig_dev;
2183	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2184	struct mlxsw_sp_bridge_device *bridge_device;
2185	struct mlxsw_sp_bridge_port *bridge_port;
2186	struct mlxsw_sp_mdb_entry *mdb_entry;
2187	u16 fid_index;
2188
2189	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2190	if (!bridge_port)
2191		return 0;
2192
2193	bridge_device = bridge_port->bridge_device;
2194	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
2195							       bridge_device,
2196							       mdb->vid);
2197	if (!mlxsw_sp_port_vlan)
2198		return 0;
2199
2200	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
2201
2202	mdb_entry = mlxsw_sp_mc_mdb_entry_get(mlxsw_sp, bridge_device,
2203					      mdb->addr, fid_index,
2204					      mlxsw_sp_port->local_port);
2205	if (IS_ERR(mdb_entry))
2206		return PTR_ERR(mdb_entry);
2207
2208	return 0;
2209}
2210
2211static int
2212mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
2213				   struct mlxsw_sp_bridge_device *bridge_device,
2214				   bool mc_enabled)
2215{
2216	struct mlxsw_sp_mdb_entry *mdb_entry;
2217	int err;
2218
2219	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
2220		err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, mc_enabled);
2221		if (err)
2222			goto err_mdb_entry_write;
2223	}
2224	return 0;
2225
2226err_mdb_entry_write:
2227	list_for_each_entry_continue_reverse(mdb_entry,
2228					     &bridge_device->mdb_list, list)
2229		mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, !mc_enabled);
2230	return err;
2231}
2232
2233static void
2234mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
2235				 struct mlxsw_sp_bridge_port *bridge_port,
2236				 bool add)
2237{
2238	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2239	struct mlxsw_sp_bridge_device *bridge_device;
2240	u16 local_port = mlxsw_sp_port->local_port;
2241	struct mlxsw_sp_mdb_entry *mdb_entry;
2242
2243	bridge_device = bridge_port->bridge_device;
2244
2245	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
2246		if (add)
2247			mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
2248							    local_port);
2249		else
2250			mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
2251							    local_port);
2252	}
2253}
2254
2255static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
2256				 const struct switchdev_obj *obj,
2257				 struct netlink_ext_ack *extack)
2258{
2259	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2260	const struct switchdev_obj_port_vlan *vlan;
2261	int err = 0;
2262
2263	switch (obj->id) {
2264	case SWITCHDEV_OBJ_ID_PORT_VLAN:
2265		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
2266
2267		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
2268
2269		/* The event is emitted before the changes are actually
2270		 * applied to the bridge. Therefore schedule the respin
2271		 * call for later, so that the respin logic sees the
2272		 * updated bridge state.
2273		 */
2274		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2275		break;
2276	case SWITCHDEV_OBJ_ID_PORT_MDB:
2277		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
2278					    SWITCHDEV_OBJ_PORT_MDB(obj));
2279		break;
2280	default:
2281		err = -EOPNOTSUPP;
2282		break;
2283	}
2284
2285	return err;
2286}
2287
2288static void
2289mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
2290			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
2291{
2292	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
2293	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2294	u16 proto;
2295
2296	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2297	if (WARN_ON(!mlxsw_sp_port_vlan))
2298		return;
2299
2300	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2301	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
2302	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
2303	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
2304	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
2305}
2306
2307static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
2308				   const struct switchdev_obj_port_vlan *vlan)
2309{
2310	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2311	struct net_device *orig_dev = vlan->obj.orig_dev;
2312	struct mlxsw_sp_bridge_port *bridge_port;
2313
2314	if (netif_is_bridge_master(orig_dev))
2315		return -EOPNOTSUPP;
2316
2317	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2318	if (WARN_ON(!bridge_port))
2319		return -EINVAL;
2320
2321	if (!bridge_port->bridge_device->vlan_enabled)
2322		return 0;
2323
2324	mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
2325
2326	return 0;
2327}
2328
2329static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
2330				 const struct switchdev_obj_port_mdb *mdb)
2331{
2332	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2333	struct net_device *orig_dev = mdb->obj.orig_dev;
2334	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2335	struct mlxsw_sp_bridge_device *bridge_device;
2336	struct net_device *dev = mlxsw_sp_port->dev;
2337	struct mlxsw_sp_bridge_port *bridge_port;
2338	struct mlxsw_sp_mdb_entry_key key = {};
2339	struct mlxsw_sp_mdb_entry *mdb_entry;
2340	u16 fid_index;
2341
2342	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2343	if (!bridge_port)
2344		return 0;
2345
2346	bridge_device = bridge_port->bridge_device;
2347	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
2348							       bridge_device,
2349							       mdb->vid);
2350	if (!mlxsw_sp_port_vlan)
2351		return 0;
2352
2353	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
2354
2355	ether_addr_copy(key.addr, mdb->addr);
2356	key.fid = fid_index;
2357	mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
2358					   mlxsw_sp_mdb_ht_params);
2359	if (!mdb_entry) {
2360		netdev_err(dev, "Unable to remove port from MC DB\n");
2361		return -EINVAL;
2362	}
2363
2364	mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
2365				  mlxsw_sp_port->local_port, false);
2366	return 0;
2367}
2368
2369static void
2370mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
2371			       struct mlxsw_sp_bridge_port *bridge_port,
2372			       u16 fid_index)
2373{
2374	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2375	struct mlxsw_sp_bridge_device *bridge_device;
2376	struct mlxsw_sp_mdb_entry *mdb_entry, *tmp;
2377	u16 local_port = mlxsw_sp_port->local_port;
2378
2379	bridge_device = bridge_port->bridge_device;
2380
2381	list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list,
2382				 list) {
2383		if (mdb_entry->key.fid != fid_index)
2384			continue;
2385
2386		if (bridge_port->mrouter)
2387			mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp,
2388							    mdb_entry,
2389							    local_port);
2390
2391		mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
2392					  local_port, true);
2393	}
2394}
2395
2396static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
2397				 const struct switchdev_obj *obj)
2398{
2399	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2400	int err = 0;
2401
2402	switch (obj->id) {
2403	case SWITCHDEV_OBJ_ID_PORT_VLAN:
2404		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
2405					      SWITCHDEV_OBJ_PORT_VLAN(obj));
2406		break;
2407	case SWITCHDEV_OBJ_ID_PORT_MDB:
2408		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
2409					    SWITCHDEV_OBJ_PORT_MDB(obj));
2410		break;
2411	default:
2412		err = -EOPNOTSUPP;
2413		break;
2414	}
2415
2416	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2417
2418	return err;
2419}
2420
2421static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
2422						   u16 lag_id)
2423{
2424	struct mlxsw_sp_port *mlxsw_sp_port;
2425	u64 max_lag_members;
2426	int i;
2427
2428	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
2429					     MAX_LAG_MEMBERS);
2430	for (i = 0; i < max_lag_members; i++) {
2431		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2432		if (mlxsw_sp_port)
2433			return mlxsw_sp_port;
2434	}
2435	return NULL;
2436}
2437
2438static int
2439mlxsw_sp_bridge_port_replay(struct mlxsw_sp_bridge_port *bridge_port,
2440			    struct mlxsw_sp_port *mlxsw_sp_port,
2441			    struct netlink_ext_ack *extack)
2442{
2443	struct mlxsw_sp_bridge_port_replay_switchdev_objs rso = {
2444		.brport_dev = bridge_port->dev,
2445		.mlxsw_sp_port = mlxsw_sp_port,
2446	};
2447	struct notifier_block *nb;
2448	int err;
2449
2450	nb = &mlxsw_sp_bridge_port_replay_switchdev_objs_nb;
2451	err = switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev,
2452					   &rso, NULL, nb, extack);
2453	if (err)
2454		goto err_replay;
2455
2456	return 0;
2457
2458err_replay:
2459	nb = &mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb;
2460	switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev,
2461				     &rso, NULL, nb, extack);
2462	return err;
2463}
2464
2465static int
2466mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
2467				     struct mlxsw_sp_port *mlxsw_sp_port,
2468				     struct netlink_ext_ack *extack)
2469{
2470	if (is_vlan_dev(bridge_port->dev)) {
2471		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
2472		return -EINVAL;
2473	}
2474
2475	/* Port is no longer usable as a router interface */
2476	if (mlxsw_sp_port->default_vlan->fid)
2477		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
2478
2479	return mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack);
2480}
2481
2482static int
2483mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2484				struct mlxsw_sp_bridge_port *bridge_port,
2485				struct mlxsw_sp_port *mlxsw_sp_port,
2486				struct netlink_ext_ack *extack)
2487{
2488	return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2489						    extack);
2490}
2491
2492static void
2493mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2494{
2495	/* Make sure untagged frames are allowed to ingress */
2496	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
2497			       ETH_P_8021Q);
2498}
2499
2500static void
2501mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2502				 struct mlxsw_sp_bridge_port *bridge_port,
2503				 struct mlxsw_sp_port *mlxsw_sp_port)
2504{
2505	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2506}
2507
2508static int
2509mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2510				      const struct net_device *vxlan_dev,
2511				      u16 vid, u16 ethertype,
2512				      struct netlink_ext_ack *extack)
2513{
2514	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2515	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2516	struct mlxsw_sp_nve_params params = {
2517		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2518		.vni = vxlan->cfg.vni,
2519		.dev = vxlan_dev,
2520		.ethertype = ethertype,
2521	};
2522	struct mlxsw_sp_fid *fid;
2523	int err;
2524
2525	/* If the VLAN is 0, we need to find the VLAN that is configured as
2526	 * PVID and egress untagged on the bridge port of the VxLAN device.
2527	 * It is possible no such VLAN exists
2528	 */
2529	if (!vid) {
2530		err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2531		if (err || !vid)
2532			return err;
2533	}
2534
2535	fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2536	if (IS_ERR(fid)) {
2537		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
2538		return PTR_ERR(fid);
2539	}
2540
2541	if (mlxsw_sp_fid_vni_is_set(fid)) {
2542		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2543		err = -EINVAL;
2544		goto err_vni_exists;
2545	}
2546
2547	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2548	if (err)
2549		goto err_nve_fid_enable;
2550
2551	return 0;
2552
2553err_nve_fid_enable:
2554err_vni_exists:
2555	mlxsw_sp_fid_put(fid);
2556	return err;
2557}
2558
2559static int
2560mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2561				 const struct net_device *vxlan_dev, u16 vid,
2562				 struct netlink_ext_ack *extack)
2563{
2564	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2565						     vid, ETH_P_8021Q, extack);
2566}
2567
2568static struct net_device *
2569mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2570{
2571	struct net_device *dev;
2572	struct list_head *iter;
2573
2574	netdev_for_each_lower_dev(br_dev, dev, iter) {
2575		u16 pvid;
2576		int err;
2577
2578		if (!netif_is_vxlan(dev))
2579			continue;
2580
2581		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2582		if (err || pvid != vid)
2583			continue;
2584
2585		return dev;
2586	}
2587
2588	return NULL;
2589}
2590
2591static struct mlxsw_sp_fid *
2592mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2593			      u16 vid, struct netlink_ext_ack *extack)
2594{
2595	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2596
2597	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2598}
2599
2600static struct mlxsw_sp_fid *
2601mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2602				 u16 vid)
2603{
2604	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2605
2606	return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2607}
2608
2609static u16
2610mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2611			      const struct mlxsw_sp_fid *fid)
2612{
2613	return mlxsw_sp_fid_8021q_vid(fid);
2614}
2615
2616static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2617	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2618	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2619	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2620	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2621	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2622	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2623};
2624
2625static bool
2626mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2627			   const struct net_device *br_dev)
2628{
2629	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2630
2631	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2632			    list) {
2633		if (mlxsw_sp_port_vlan->bridge_port &&
2634		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2635		    br_dev)
2636			return true;
2637	}
2638
2639	return false;
2640}
2641
2642static int
2643mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2644				struct mlxsw_sp_bridge_port *bridge_port,
2645				struct mlxsw_sp_port *mlxsw_sp_port,
2646				struct netlink_ext_ack *extack)
2647{
2648	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2649	struct net_device *dev = bridge_port->dev;
2650	u16 vid;
2651	int err;
2652
2653	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2654	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2655	if (WARN_ON(!mlxsw_sp_port_vlan))
2656		return -EINVAL;
2657
2658	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2659		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2660		return -EINVAL;
2661	}
2662
2663	/* Port is no longer usable as a router interface */
2664	if (mlxsw_sp_port_vlan->fid)
2665		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2666
2667	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2668					     extack);
2669	if (err)
2670		return err;
2671
2672	err = mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack);
2673	if (err)
2674		goto err_replay;
2675
2676	return 0;
2677
2678err_replay:
2679	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2680	return err;
2681}
2682
2683static void
2684mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2685				 struct mlxsw_sp_bridge_port *bridge_port,
2686				 struct mlxsw_sp_port *mlxsw_sp_port)
2687{
2688	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2689	struct net_device *dev = bridge_port->dev;
2690	u16 vid;
2691
2692	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2693	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2694	if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2695		return;
2696
2697	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2698}
2699
2700static int
2701mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2702				 const struct net_device *vxlan_dev, u16 vid,
2703				 struct netlink_ext_ack *extack)
2704{
2705	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2706	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2707	struct mlxsw_sp_nve_params params = {
2708		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2709		.vni = vxlan->cfg.vni,
2710		.dev = vxlan_dev,
2711		.ethertype = ETH_P_8021Q,
2712	};
2713	struct mlxsw_sp_fid *fid;
2714	int err;
2715
2716	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2717	if (IS_ERR(fid)) {
2718		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
2719		return -EINVAL;
2720	}
2721
2722	if (mlxsw_sp_fid_vni_is_set(fid)) {
2723		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2724		err = -EINVAL;
2725		goto err_vni_exists;
2726	}
2727
2728	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2729	if (err)
2730		goto err_nve_fid_enable;
2731
2732	return 0;
2733
2734err_nve_fid_enable:
2735err_vni_exists:
2736	mlxsw_sp_fid_put(fid);
2737	return err;
2738}
2739
2740static struct mlxsw_sp_fid *
2741mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2742			      u16 vid, struct netlink_ext_ack *extack)
2743{
2744	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2745
2746	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2747}
2748
2749static struct mlxsw_sp_fid *
2750mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2751				 u16 vid)
2752{
2753	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2754
2755	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2756	if (vid)
2757		return NULL;
2758
2759	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2760}
2761
2762static u16
2763mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2764			      const struct mlxsw_sp_fid *fid)
2765{
2766	return 0;
2767}
2768
2769static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2770	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2771	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2772	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2773	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2774	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2775	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2776};
2777
2778static int
2779mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2780				 struct mlxsw_sp_bridge_port *bridge_port,
2781				 struct mlxsw_sp_port *mlxsw_sp_port,
2782				 struct netlink_ext_ack *extack)
2783{
2784	int err;
2785
2786	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
2787	if (err)
2788		return err;
2789
2790	err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2791						   extack);
2792	if (err)
2793		goto err_bridge_vlan_aware_port_join;
2794
2795	return 0;
2796
2797err_bridge_vlan_aware_port_join:
2798	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2799	return err;
2800}
2801
2802static void
2803mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2804				  struct mlxsw_sp_bridge_port *bridge_port,
2805				  struct mlxsw_sp_port *mlxsw_sp_port)
2806{
2807	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2808	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2809}
2810
2811static int
2812mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2813				  const struct net_device *vxlan_dev, u16 vid,
2814				  struct netlink_ext_ack *extack)
2815{
2816	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2817						     vid, ETH_P_8021AD, extack);
2818}
2819
2820static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = {
2821	.port_join	= mlxsw_sp_bridge_8021ad_port_join,
2822	.port_leave	= mlxsw_sp_bridge_8021ad_port_leave,
2823	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2824	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2825	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2826	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2827};
2828
2829static int
2830mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2831				  struct mlxsw_sp_bridge_port *bridge_port,
2832				  struct mlxsw_sp_port *mlxsw_sp_port,
2833				  struct netlink_ext_ack *extack)
2834{
2835	int err;
2836
2837	/* The EtherType of decapsulated packets is determined at the egress
2838	 * port to allow 802.1d and 802.1ad bridges with VXLAN devices to
2839	 * co-exist.
2840	 */
2841	err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD);
2842	if (err)
2843		return err;
2844
2845	err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port,
2846					       mlxsw_sp_port, extack);
2847	if (err)
2848		goto err_bridge_8021ad_port_join;
2849
2850	return 0;
2851
2852err_bridge_8021ad_port_join:
2853	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2854	return err;
2855}
2856
2857static void
2858mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2859				   struct mlxsw_sp_bridge_port *bridge_port,
2860				   struct mlxsw_sp_port *mlxsw_sp_port)
2861{
2862	mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port,
2863					  mlxsw_sp_port);
2864	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2865}
2866
2867static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = {
2868	.port_join	= mlxsw_sp2_bridge_8021ad_port_join,
2869	.port_leave	= mlxsw_sp2_bridge_8021ad_port_leave,
2870	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2871	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2872	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2873	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2874};
2875
2876int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2877			      struct net_device *brport_dev,
2878			      struct net_device *br_dev,
2879			      struct netlink_ext_ack *extack)
2880{
2881	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2882	struct mlxsw_sp_bridge_device *bridge_device;
2883	struct mlxsw_sp_bridge_port *bridge_port;
2884	int err;
2885
2886	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
2887					       extack);
2888	if (IS_ERR(bridge_port))
2889		return PTR_ERR(bridge_port);
2890	bridge_device = bridge_port->bridge_device;
2891
2892	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2893					    mlxsw_sp_port, extack);
2894	if (err)
2895		goto err_port_join;
2896
2897	err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, br_dev, extack);
2898	if (err)
2899		goto err_replay;
2900
2901	return 0;
2902
2903err_replay:
2904	bridge_device->ops->port_leave(bridge_device, bridge_port,
2905				       mlxsw_sp_port);
2906err_port_join:
2907	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2908	return err;
2909}
2910
2911void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2912				struct net_device *brport_dev,
2913				struct net_device *br_dev)
2914{
2915	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2916	struct mlxsw_sp_bridge_device *bridge_device;
2917	struct mlxsw_sp_bridge_port *bridge_port;
2918
2919	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2920	if (!bridge_device)
2921		return;
2922	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2923	if (!bridge_port)
2924		return;
2925
2926	bridge_device->ops->port_leave(bridge_device, bridge_port,
2927				       mlxsw_sp_port);
2928	mlxsw_sp_port_security_set(mlxsw_sp_port, false);
2929	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2930}
2931
2932int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2933			       const struct net_device *br_dev,
2934			       const struct net_device *vxlan_dev, u16 vid,
2935			       struct netlink_ext_ack *extack)
2936{
2937	struct mlxsw_sp_bridge_device *bridge_device;
2938
2939	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2940	if (WARN_ON(!bridge_device))
2941		return -EINVAL;
2942
2943	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2944					      extack);
2945}
2946
2947void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2948				 const struct net_device *vxlan_dev)
2949{
2950	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2951	struct mlxsw_sp_fid *fid;
2952
2953	/* If the VxLAN device is down, then the FID does not have a VNI */
2954	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2955	if (!fid)
2956		return;
2957
2958	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2959	/* Drop both the reference we just took during lookup and the reference
2960	 * the VXLAN device took.
2961	 */
2962	mlxsw_sp_fid_put(fid);
2963	mlxsw_sp_fid_put(fid);
2964}
2965
2966static void
2967mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2968				      enum mlxsw_sp_l3proto *proto,
2969				      union mlxsw_sp_l3addr *addr)
2970{
2971	if (vxlan_addr->sa.sa_family == AF_INET) {
2972		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2973		*proto = MLXSW_SP_L3_PROTO_IPV4;
2974	} else {
2975		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2976		*proto = MLXSW_SP_L3_PROTO_IPV6;
2977	}
2978}
2979
2980static void
2981mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2982				      const union mlxsw_sp_l3addr *addr,
2983				      union vxlan_addr *vxlan_addr)
2984{
2985	switch (proto) {
2986	case MLXSW_SP_L3_PROTO_IPV4:
2987		vxlan_addr->sa.sa_family = AF_INET;
2988		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2989		break;
2990	case MLXSW_SP_L3_PROTO_IPV6:
2991		vxlan_addr->sa.sa_family = AF_INET6;
2992		vxlan_addr->sin6.sin6_addr = addr->addr6;
2993		break;
2994	}
2995}
2996
2997static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2998					      const char *mac,
2999					      enum mlxsw_sp_l3proto proto,
3000					      union mlxsw_sp_l3addr *addr,
3001					      __be32 vni, bool adding)
3002{
3003	struct switchdev_notifier_vxlan_fdb_info info;
3004	struct vxlan_dev *vxlan = netdev_priv(dev);
3005	enum switchdev_notifier_type type;
3006
3007	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
3008			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
3009	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
3010	info.remote_port = vxlan->cfg.dst_port;
3011	info.remote_vni = vni;
3012	info.remote_ifindex = 0;
3013	ether_addr_copy(info.eth_addr, mac);
3014	info.vni = vni;
3015	info.offloaded = adding;
3016	call_switchdev_notifiers(type, dev, &info.info, NULL);
3017}
3018
3019static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
3020					    const char *mac,
3021					    enum mlxsw_sp_l3proto proto,
3022					    union mlxsw_sp_l3addr *addr,
3023					    __be32 vni,
3024					    bool adding)
3025{
3026	if (netif_is_vxlan(dev))
3027		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
3028						  adding);
3029}
3030
3031static void
3032mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
3033			    const char *mac, u16 vid,
3034			    struct net_device *dev, bool offloaded, bool locked)
3035{
3036	struct switchdev_notifier_fdb_info info = {};
3037
3038	info.addr = mac;
3039	info.vid = vid;
3040	info.offloaded = offloaded;
3041	info.locked = locked;
3042	call_switchdev_notifiers(type, dev, &info.info, NULL);
3043}
3044
3045static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
3046					    char *sfn_pl, int rec_index,
3047					    bool adding)
3048{
3049	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3050	struct mlxsw_sp_bridge_device *bridge_device;
3051	struct mlxsw_sp_bridge_port *bridge_port;
3052	struct mlxsw_sp_port *mlxsw_sp_port;
3053	u16 local_port, vid, fid, evid = 0;
3054	enum switchdev_notifier_type type;
3055	char mac[ETH_ALEN];
3056	bool do_notification = true;
3057	int err;
3058
3059	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
3060
3061	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
3062		return;
3063	mlxsw_sp_port = mlxsw_sp->ports[local_port];
3064	if (!mlxsw_sp_port) {
3065		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
3066		goto just_remove;
3067	}
3068
3069	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
3070	if (!mlxsw_sp_port_vlan) {
3071		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
3072		goto just_remove;
3073	}
3074
3075	bridge_port = mlxsw_sp_port_vlan->bridge_port;
3076	if (!bridge_port) {
3077		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
3078		goto just_remove;
3079	}
3080
3081	bridge_device = bridge_port->bridge_device;
3082	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
3083	evid = mlxsw_sp_port_vlan->vid;
3084
3085	if (adding && mlxsw_sp_port->security) {
3086		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, mac,
3087					    vid, bridge_port->dev, false, true);
3088		return;
3089	}
3090
3091do_fdb_op:
3092	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
3093				      adding, true);
3094	if (err) {
3095		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
3096		return;
3097	}
3098
3099	if (!do_notification)
3100		return;
3101	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
3102	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding,
3103				    false);
3104
3105	return;
3106
3107just_remove:
3108	adding = false;
3109	do_notification = false;
3110	goto do_fdb_op;
3111}
3112
3113static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
3114						char *sfn_pl, int rec_index,
3115						bool adding)
3116{
3117	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3118	struct mlxsw_sp_bridge_device *bridge_device;
3119	struct mlxsw_sp_bridge_port *bridge_port;
3120	struct mlxsw_sp_port *mlxsw_sp_port;
3121	enum switchdev_notifier_type type;
3122	char mac[ETH_ALEN];
3123	u16 lag_vid = 0;
3124	u16 lag_id;
3125	u16 vid, fid;
3126	bool do_notification = true;
3127	int err;
3128
3129	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
3130	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
3131	if (!mlxsw_sp_port) {
3132		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
3133		goto just_remove;
3134	}
3135
3136	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
3137	if (!mlxsw_sp_port_vlan) {
3138		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
3139		goto just_remove;
3140	}
3141
3142	bridge_port = mlxsw_sp_port_vlan->bridge_port;
3143	if (!bridge_port) {
3144		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
3145		goto just_remove;
3146	}
3147
3148	bridge_device = bridge_port->bridge_device;
3149	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
3150	lag_vid = mlxsw_sp_port_vlan->vid;
3151
3152	if (adding && mlxsw_sp_port->security) {
3153		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, mac,
3154					    vid, bridge_port->dev, false, true);
3155		return;
3156	}
3157
3158do_fdb_op:
3159	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
3160					  adding, true);
3161	if (err) {
3162		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
3163		return;
3164	}
3165
3166	if (!do_notification)
3167		return;
3168	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
3169	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding,
3170				    false);
3171
3172	return;
3173
3174just_remove:
3175	adding = false;
3176	do_notification = false;
3177	goto do_fdb_op;
3178}
3179
3180static int
3181__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
3182					    const struct mlxsw_sp_fid *fid,
3183					    bool adding,
3184					    struct net_device **nve_dev,
3185					    u16 *p_vid, __be32 *p_vni)
3186{
3187	struct mlxsw_sp_bridge_device *bridge_device;
3188	struct net_device *br_dev, *dev;
3189	int nve_ifindex;
3190	int err;
3191
3192	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
3193	if (err)
3194		return err;
3195
3196	err = mlxsw_sp_fid_vni(fid, p_vni);
3197	if (err)
3198		return err;
3199
3200	dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
3201	if (!dev)
3202		return -EINVAL;
3203	*nve_dev = dev;
3204
3205	if (!netif_running(dev))
3206		return -EINVAL;
3207
3208	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
3209		return -EINVAL;
3210
3211	if (adding && netif_is_vxlan(dev)) {
3212		struct vxlan_dev *vxlan = netdev_priv(dev);
3213
3214		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
3215			return -EINVAL;
3216	}
3217
3218	br_dev = netdev_master_upper_dev_get(dev);
3219	if (!br_dev)
3220		return -EINVAL;
3221
3222	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3223	if (!bridge_device)
3224		return -EINVAL;
3225
3226	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
3227
3228	return 0;
3229}
3230
3231static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
3232						      char *sfn_pl,
3233						      int rec_index,
3234						      bool adding)
3235{
3236	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
3237	enum switchdev_notifier_type type;
3238	struct net_device *nve_dev;
3239	union mlxsw_sp_l3addr addr;
3240	struct mlxsw_sp_fid *fid;
3241	char mac[ETH_ALEN];
3242	u16 fid_index, vid;
3243	__be32 vni;
3244	u32 uip;
3245	int err;
3246
3247	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
3248				       &uip, &sfn_proto);
3249
3250	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
3251	if (!fid)
3252		goto err_fid_lookup;
3253
3254	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
3255					      (enum mlxsw_sp_l3proto) sfn_proto,
3256					      &addr);
3257	if (err)
3258		goto err_ip_resolve;
3259
3260	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
3261							  &nve_dev, &vid, &vni);
3262	if (err)
3263		goto err_fdb_process;
3264
3265	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
3266					     (enum mlxsw_sp_l3proto) sfn_proto,
3267					     &addr, adding, true);
3268	if (err)
3269		goto err_fdb_op;
3270
3271	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
3272					(enum mlxsw_sp_l3proto) sfn_proto,
3273					&addr, vni, adding);
3274
3275	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
3276			SWITCHDEV_FDB_DEL_TO_BRIDGE;
3277	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding, false);
3278
3279	mlxsw_sp_fid_put(fid);
3280
3281	return;
3282
3283err_fdb_op:
3284err_fdb_process:
3285err_ip_resolve:
3286	mlxsw_sp_fid_put(fid);
3287err_fid_lookup:
3288	/* Remove an FDB entry in case we cannot process it. Otherwise the
3289	 * device will keep sending the same notification over and over again.
3290	 */
3291	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
3292				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
3293				       false, true);
3294}
3295
3296static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
3297					    char *sfn_pl, int rec_index)
3298{
3299	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
3300	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
3301		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
3302						rec_index, true);
3303		break;
3304	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
3305		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
3306						rec_index, false);
3307		break;
3308	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
3309		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
3310						    rec_index, true);
3311		break;
3312	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
3313		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
3314						    rec_index, false);
3315		break;
3316	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
3317		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
3318							  rec_index, true);
3319		break;
3320	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
3321		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
3322							  rec_index, false);
3323		break;
3324	}
3325}
3326
3327#define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
3328
3329static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
3330{
3331	struct mlxsw_sp_bridge *bridge;
3332	struct mlxsw_sp *mlxsw_sp;
3333	bool reschedule = false;
3334	char *sfn_pl;
3335	int queries;
3336	u8 num_rec;
3337	int i;
3338	int err;
3339
3340	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
3341	if (!sfn_pl)
3342		return;
3343
3344	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
3345	mlxsw_sp = bridge->mlxsw_sp;
3346
3347	rtnl_lock();
3348	if (list_empty(&bridge->bridges_list))
3349		goto out;
3350	reschedule = true;
3351	queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
3352	while (queries > 0) {
3353		mlxsw_reg_sfn_pack(sfn_pl);
3354		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
3355		if (err) {
3356			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
3357			goto out;
3358		}
3359		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
3360		for (i = 0; i < num_rec; i++)
3361			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
3362		if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
3363			goto out;
3364		queries--;
3365	}
3366
3367out:
3368	rtnl_unlock();
3369	kfree(sfn_pl);
3370	if (!reschedule)
3371		return;
3372	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
3373}
3374
3375struct mlxsw_sp_switchdev_event_work {
3376	struct work_struct work;
3377	netdevice_tracker dev_tracker;
3378	union {
3379		struct switchdev_notifier_fdb_info fdb_info;
3380		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
3381	};
3382	struct net_device *dev;
3383	unsigned long event;
3384};
3385
3386static void
3387mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
3388					  struct mlxsw_sp_switchdev_event_work *
3389					  switchdev_work,
3390					  struct mlxsw_sp_fid *fid, __be32 vni)
3391{
3392	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
3393	struct switchdev_notifier_fdb_info *fdb_info;
3394	struct net_device *dev = switchdev_work->dev;
3395	enum mlxsw_sp_l3proto proto;
3396	union mlxsw_sp_l3addr addr;
3397	int err;
3398
3399	fdb_info = &switchdev_work->fdb_info;
3400	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
3401	if (err)
3402		return;
3403
3404	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
3405					      &proto, &addr);
3406
3407	switch (switchdev_work->event) {
3408	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3409		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
3410						     vxlan_fdb_info.eth_addr,
3411						     mlxsw_sp_fid_index(fid),
3412						     proto, &addr, true, false);
3413		if (err)
3414			return;
3415		vxlan_fdb_info.offloaded = true;
3416		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3417					 &vxlan_fdb_info.info, NULL);
3418		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3419					    vxlan_fdb_info.eth_addr,
3420					    fdb_info->vid, dev, true, false);
3421		break;
3422	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3423		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
3424						     vxlan_fdb_info.eth_addr,
3425						     mlxsw_sp_fid_index(fid),
3426						     proto, &addr, false,
3427						     false);
3428		vxlan_fdb_info.offloaded = false;
3429		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3430					 &vxlan_fdb_info.info, NULL);
3431		break;
3432	}
3433}
3434
3435static void
3436mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
3437					switchdev_work)
3438{
3439	struct mlxsw_sp_bridge_device *bridge_device;
3440	struct net_device *dev = switchdev_work->dev;
3441	struct net_device *br_dev;
3442	struct mlxsw_sp *mlxsw_sp;
3443	struct mlxsw_sp_fid *fid;
3444	__be32 vni;
3445	int err;
3446
3447	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
3448	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
3449		return;
3450
3451	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
3452	    (!switchdev_work->fdb_info.added_by_user ||
3453	     switchdev_work->fdb_info.is_local))
3454		return;
3455
3456	if (!netif_running(dev))
3457		return;
3458	br_dev = netdev_master_upper_dev_get(dev);
3459	if (!br_dev)
3460		return;
3461	if (!netif_is_bridge_master(br_dev))
3462		return;
3463	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3464	if (!mlxsw_sp)
3465		return;
3466	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3467	if (!bridge_device)
3468		return;
3469
3470	fid = bridge_device->ops->fid_lookup(bridge_device,
3471					     switchdev_work->fdb_info.vid);
3472	if (!fid)
3473		return;
3474
3475	err = mlxsw_sp_fid_vni(fid, &vni);
3476	if (err)
3477		goto out;
3478
3479	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
3480						  vni);
3481
3482out:
3483	mlxsw_sp_fid_put(fid);
3484}
3485
3486static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
3487{
3488	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3489		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3490	struct net_device *dev = switchdev_work->dev;
3491	struct switchdev_notifier_fdb_info *fdb_info;
3492	struct mlxsw_sp_port *mlxsw_sp_port;
3493	int err;
3494
3495	rtnl_lock();
3496	if (netif_is_vxlan(dev)) {
3497		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
3498		goto out;
3499	}
3500
3501	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3502	if (!mlxsw_sp_port)
3503		goto out;
3504
3505	switch (switchdev_work->event) {
3506	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3507		fdb_info = &switchdev_work->fdb_info;
3508		if (!fdb_info->added_by_user || fdb_info->is_local)
3509			break;
3510		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
3511		if (err)
3512			break;
3513		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3514					    fdb_info->addr,
3515					    fdb_info->vid, dev, true, false);
3516		break;
3517	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3518		fdb_info = &switchdev_work->fdb_info;
3519		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
3520		break;
3521	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3522	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3523		/* These events are only used to potentially update an existing
3524		 * SPAN mirror.
3525		 */
3526		break;
3527	}
3528
3529	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
3530
3531out:
3532	rtnl_unlock();
3533	kfree(switchdev_work->fdb_info.addr);
3534	netdev_put(dev, &switchdev_work->dev_tracker);
3535	kfree(switchdev_work);
3536}
3537
3538static void
3539mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
3540				 struct mlxsw_sp_switchdev_event_work *
3541				 switchdev_work)
3542{
3543	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3544	struct mlxsw_sp_bridge_device *bridge_device;
3545	struct net_device *dev = switchdev_work->dev;
3546	enum mlxsw_sp_l3proto proto;
3547	union mlxsw_sp_l3addr addr;
3548	struct net_device *br_dev;
3549	struct mlxsw_sp_fid *fid;
3550	u16 vid;
3551	int err;
3552
3553	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3554	br_dev = netdev_master_upper_dev_get(dev);
3555
3556	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3557	if (!bridge_device)
3558		return;
3559
3560	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3561	if (!fid)
3562		return;
3563
3564	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3565					      &proto, &addr);
3566
3567	if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) {
3568		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
3569		if (err) {
3570			mlxsw_sp_fid_put(fid);
3571			return;
3572		}
3573		vxlan_fdb_info->offloaded = true;
3574		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3575					 &vxlan_fdb_info->info, NULL);
3576		mlxsw_sp_fid_put(fid);
3577		return;
3578	}
3579
3580	/* The device has a single FDB table, whereas Linux has two - one
3581	 * in the bridge driver and another in the VxLAN driver. We only
3582	 * program an entry to the device if the MAC points to the VxLAN
3583	 * device in the bridge's FDB table
3584	 */
3585	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3586	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
3587		goto err_br_fdb_find;
3588
3589	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3590					     mlxsw_sp_fid_index(fid), proto,
3591					     &addr, true, false);
3592	if (err)
3593		goto err_fdb_tunnel_uc_op;
3594	vxlan_fdb_info->offloaded = true;
3595	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3596				 &vxlan_fdb_info->info, NULL);
3597	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3598				    vxlan_fdb_info->eth_addr, vid, dev, true,
3599				    false);
3600
3601	mlxsw_sp_fid_put(fid);
3602
3603	return;
3604
3605err_fdb_tunnel_uc_op:
3606err_br_fdb_find:
3607	mlxsw_sp_fid_put(fid);
3608}
3609
3610static void
3611mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3612				 struct mlxsw_sp_switchdev_event_work *
3613				 switchdev_work)
3614{
3615	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3616	struct mlxsw_sp_bridge_device *bridge_device;
3617	struct net_device *dev = switchdev_work->dev;
3618	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3619	enum mlxsw_sp_l3proto proto;
3620	union mlxsw_sp_l3addr addr;
3621	struct mlxsw_sp_fid *fid;
3622	u16 vid;
3623
3624	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3625	if (!vxlan_fdb_info->offloaded)
3626		return;
3627
3628	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3629	if (!bridge_device)
3630		return;
3631
3632	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3633	if (!fid)
3634		return;
3635
3636	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3637					      &proto, &addr);
3638
3639	if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) {
3640		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3641		mlxsw_sp_fid_put(fid);
3642		return;
3643	}
3644
3645	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3646				       mlxsw_sp_fid_index(fid), proto, &addr,
3647				       false, false);
3648	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3649	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3650				    vxlan_fdb_info->eth_addr, vid, dev, false,
3651				    false);
3652
3653	mlxsw_sp_fid_put(fid);
3654}
3655
3656static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3657{
3658	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3659		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3660	struct net_device *dev = switchdev_work->dev;
3661	struct mlxsw_sp *mlxsw_sp;
3662	struct net_device *br_dev;
3663
3664	rtnl_lock();
3665
3666	if (!netif_running(dev))
3667		goto out;
3668	br_dev = netdev_master_upper_dev_get(dev);
3669	if (!br_dev)
3670		goto out;
3671	if (!netif_is_bridge_master(br_dev))
3672		goto out;
3673	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3674	if (!mlxsw_sp)
3675		goto out;
3676
3677	switch (switchdev_work->event) {
3678	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3679		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3680		break;
3681	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3682		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3683		break;
3684	}
3685
3686out:
3687	rtnl_unlock();
3688	netdev_put(dev, &switchdev_work->dev_tracker);
3689	kfree(switchdev_work);
3690}
3691
3692static int
3693mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3694				      switchdev_work,
3695				      struct switchdev_notifier_info *info)
3696{
3697	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3698	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3699	struct vxlan_config *cfg = &vxlan->cfg;
3700	struct netlink_ext_ack *extack;
3701
3702	extack = switchdev_notifier_info_to_extack(info);
3703	vxlan_fdb_info = container_of(info,
3704				      struct switchdev_notifier_vxlan_fdb_info,
3705				      info);
3706
3707	if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3708		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3709		return -EOPNOTSUPP;
3710	}
3711	if (vxlan_fdb_info->remote_vni != cfg->vni ||
3712	    vxlan_fdb_info->vni != cfg->vni) {
3713		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3714		return -EOPNOTSUPP;
3715	}
3716	if (vxlan_fdb_info->remote_ifindex) {
3717		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3718		return -EOPNOTSUPP;
3719	}
3720	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3721		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3722		return -EOPNOTSUPP;
3723	}
3724	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3725		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3726		return -EOPNOTSUPP;
3727	}
3728
3729	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3730
3731	return 0;
3732}
3733
3734/* Called under rcu_read_lock() */
3735static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3736				    unsigned long event, void *ptr)
3737{
3738	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3739	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3740	struct switchdev_notifier_fdb_info *fdb_info;
3741	struct switchdev_notifier_info *info = ptr;
3742	struct net_device *br_dev;
3743	int err;
3744
3745	if (event == SWITCHDEV_PORT_ATTR_SET) {
3746		err = switchdev_handle_port_attr_set(dev, ptr,
3747						     mlxsw_sp_port_dev_check,
3748						     mlxsw_sp_port_attr_set);
3749		return notifier_from_errno(err);
3750	}
3751
3752	/* Tunnel devices are not our uppers, so check their master instead */
3753	br_dev = netdev_master_upper_dev_get_rcu(dev);
3754	if (!br_dev)
3755		return NOTIFY_DONE;
3756	if (!netif_is_bridge_master(br_dev))
3757		return NOTIFY_DONE;
3758	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3759		return NOTIFY_DONE;
3760
3761	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3762	if (!switchdev_work)
3763		return NOTIFY_BAD;
3764
3765	switchdev_work->dev = dev;
3766	switchdev_work->event = event;
3767
3768	switch (event) {
3769	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3770	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3771	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3772	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3773		fdb_info = container_of(info,
3774					struct switchdev_notifier_fdb_info,
3775					info);
3776		INIT_WORK(&switchdev_work->work,
3777			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3778		memcpy(&switchdev_work->fdb_info, ptr,
3779		       sizeof(switchdev_work->fdb_info));
3780		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3781		if (!switchdev_work->fdb_info.addr)
3782			goto err_addr_alloc;
3783		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3784				fdb_info->addr);
3785		/* Take a reference on the device. This can be either
3786		 * upper device containig mlxsw_sp_port or just a
3787		 * mlxsw_sp_port
3788		 */
3789		netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC);
3790		break;
3791	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3792	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3793		INIT_WORK(&switchdev_work->work,
3794			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3795		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3796							    info);
3797		if (err)
3798			goto err_vxlan_work_prepare;
3799		netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC);
3800		break;
3801	default:
3802		kfree(switchdev_work);
3803		return NOTIFY_DONE;
3804	}
3805
3806	mlxsw_core_schedule_work(&switchdev_work->work);
3807
3808	return NOTIFY_DONE;
3809
3810err_vxlan_work_prepare:
3811err_addr_alloc:
3812	kfree(switchdev_work);
3813	return NOTIFY_BAD;
3814}
3815
3816struct notifier_block mlxsw_sp_switchdev_notifier = {
3817	.notifier_call = mlxsw_sp_switchdev_event,
3818};
3819
3820static int
3821mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3822				  struct mlxsw_sp_bridge_device *bridge_device,
3823				  const struct net_device *vxlan_dev, u16 vid,
3824				  bool flag_untagged, bool flag_pvid,
3825				  struct netlink_ext_ack *extack)
3826{
3827	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3828	__be32 vni = vxlan->cfg.vni;
3829	struct mlxsw_sp_fid *fid;
3830	u16 old_vid;
3831	int err;
3832
3833	/* We cannot have the same VLAN as PVID and egress untagged on multiple
3834	 * VxLAN devices. Note that we get this notification before the VLAN is
3835	 * actually added to the bridge's database, so it is not possible for
3836	 * the lookup function to return 'vxlan_dev'
3837	 */
3838	if (flag_untagged && flag_pvid &&
3839	    mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3840		NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3841		return -EINVAL;
3842	}
3843
3844	if (!netif_running(vxlan_dev))
3845		return 0;
3846
3847	/* First case: FID is not associated with this VNI, but the new VLAN
3848	 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3849	 * it exists
3850	 */
3851	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3852	if (!fid) {
3853		if (!flag_untagged || !flag_pvid)
3854			return 0;
3855		return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
3856						      vid, extack);
3857	}
3858
3859	/* Second case: FID is associated with the VNI and the VLAN associated
3860	 * with the FID is the same as the notified VLAN. This means the flags
3861	 * (PVID / egress untagged) were toggled and that NVE should be
3862	 * disabled on the FID
3863	 */
3864	old_vid = mlxsw_sp_fid_8021q_vid(fid);
3865	if (vid == old_vid) {
3866		if (WARN_ON(flag_untagged && flag_pvid)) {
3867			mlxsw_sp_fid_put(fid);
3868			return -EINVAL;
3869		}
3870		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3871		mlxsw_sp_fid_put(fid);
3872		return 0;
3873	}
3874
3875	/* Third case: A new VLAN was configured on the VxLAN device, but this
3876	 * VLAN is not PVID, so there is nothing to do.
3877	 */
3878	if (!flag_pvid) {
3879		mlxsw_sp_fid_put(fid);
3880		return 0;
3881	}
3882
3883	/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3884	 * mapped to the VNI should be unmapped
3885	 */
3886	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3887	mlxsw_sp_fid_put(fid);
3888
3889	/* Fifth case: The new VLAN is also egress untagged, which means the
3890	 * VLAN needs to be mapped to the VNI
3891	 */
3892	if (!flag_untagged)
3893		return 0;
3894
3895	err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
3896	if (err)
3897		goto err_vxlan_join;
3898
3899	return 0;
3900
3901err_vxlan_join:
3902	bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
3903	return err;
3904}
3905
3906static void
3907mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3908				  struct mlxsw_sp_bridge_device *bridge_device,
3909				  const struct net_device *vxlan_dev, u16 vid)
3910{
3911	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3912	__be32 vni = vxlan->cfg.vni;
3913	struct mlxsw_sp_fid *fid;
3914
3915	if (!netif_running(vxlan_dev))
3916		return;
3917
3918	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3919	if (!fid)
3920		return;
3921
3922	/* A different VLAN than the one mapped to the VNI is deleted */
3923	if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3924		goto out;
3925
3926	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3927
3928out:
3929	mlxsw_sp_fid_put(fid);
3930}
3931
3932static int
3933mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3934				   struct switchdev_notifier_port_obj_info *
3935				   port_obj_info)
3936{
3937	struct switchdev_obj_port_vlan *vlan =
3938		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3939	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3940	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3941	struct mlxsw_sp_bridge_device *bridge_device;
3942	struct netlink_ext_ack *extack;
3943	struct mlxsw_sp *mlxsw_sp;
3944	struct net_device *br_dev;
3945
3946	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3947	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3948	if (!br_dev)
3949		return 0;
3950
3951	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3952	if (!mlxsw_sp)
3953		return 0;
3954
3955	port_obj_info->handled = true;
3956
3957	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3958	if (!bridge_device)
3959		return -EINVAL;
3960
3961	if (!bridge_device->vlan_enabled)
3962		return 0;
3963
3964	return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3965						 vxlan_dev, vlan->vid,
3966						 flag_untagged,
3967						 flag_pvid, extack);
3968}
3969
3970static void
3971mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3972				   struct switchdev_notifier_port_obj_info *
3973				   port_obj_info)
3974{
3975	struct switchdev_obj_port_vlan *vlan =
3976		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3977	struct mlxsw_sp_bridge_device *bridge_device;
3978	struct mlxsw_sp *mlxsw_sp;
3979	struct net_device *br_dev;
3980
3981	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3982	if (!br_dev)
3983		return;
3984
3985	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3986	if (!mlxsw_sp)
3987		return;
3988
3989	port_obj_info->handled = true;
3990
3991	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3992	if (!bridge_device)
3993		return;
3994
3995	if (!bridge_device->vlan_enabled)
3996		return;
3997
3998	mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
3999					  vlan->vid);
4000}
4001
4002static int
4003mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
4004					struct switchdev_notifier_port_obj_info *
4005					port_obj_info)
4006{
4007	int err = 0;
4008
4009	switch (port_obj_info->obj->id) {
4010	case SWITCHDEV_OBJ_ID_PORT_VLAN:
4011		err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
4012							 port_obj_info);
4013		break;
4014	default:
4015		break;
4016	}
4017
4018	return err;
4019}
4020
4021static void
4022mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
4023					struct switchdev_notifier_port_obj_info *
4024					port_obj_info)
4025{
4026	switch (port_obj_info->obj->id) {
4027	case SWITCHDEV_OBJ_ID_PORT_VLAN:
4028		mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
4029		break;
4030	default:
4031		break;
4032	}
4033}
4034
4035static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
4036					     unsigned long event, void *ptr)
4037{
4038	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
4039	int err = 0;
4040
4041	switch (event) {
4042	case SWITCHDEV_PORT_OBJ_ADD:
4043		if (netif_is_vxlan(dev))
4044			err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
4045		else
4046			err = switchdev_handle_port_obj_add(dev, ptr,
4047							mlxsw_sp_port_dev_check,
4048							mlxsw_sp_port_obj_add);
4049		return notifier_from_errno(err);
4050	case SWITCHDEV_PORT_OBJ_DEL:
4051		if (netif_is_vxlan(dev))
4052			mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
4053		else
4054			err = switchdev_handle_port_obj_del(dev, ptr,
4055							mlxsw_sp_port_dev_check,
4056							mlxsw_sp_port_obj_del);
4057		return notifier_from_errno(err);
4058	case SWITCHDEV_PORT_ATTR_SET:
4059		err = switchdev_handle_port_attr_set(dev, ptr,
4060						     mlxsw_sp_port_dev_check,
4061						     mlxsw_sp_port_attr_set);
4062		return notifier_from_errno(err);
4063	}
4064
4065	return NOTIFY_DONE;
4066}
4067
4068static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
4069	.notifier_call = mlxsw_sp_switchdev_blocking_event,
4070};
4071
4072u8
4073mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
4074{
4075	return bridge_port->stp_state;
4076}
4077
4078static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
4079{
4080	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
4081	struct notifier_block *nb;
4082	int err;
4083
4084	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
4085	if (err) {
4086		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
4087		return err;
4088	}
4089
4090	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
4091	if (err) {
4092		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
4093		return err;
4094	}
4095
4096	nb = &mlxsw_sp_switchdev_blocking_notifier;
4097	err = register_switchdev_blocking_notifier(nb);
4098	if (err) {
4099		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
4100		goto err_register_switchdev_blocking_notifier;
4101	}
4102
4103	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
4104	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
4105	return 0;
4106
4107err_register_switchdev_blocking_notifier:
4108	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
4109	return err;
4110}
4111
4112static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
4113{
4114	struct notifier_block *nb;
4115
4116	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
4117
4118	nb = &mlxsw_sp_switchdev_blocking_notifier;
4119	unregister_switchdev_blocking_notifier(nb);
4120
4121	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
4122}
4123
4124static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp)
4125{
4126	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops;
4127}
4128
4129const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = {
4130	.init	= mlxsw_sp1_switchdev_init,
4131};
4132
4133static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp)
4134{
4135	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops;
4136}
4137
4138const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = {
4139	.init	= mlxsw_sp2_switchdev_init,
4140};
4141
4142int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
4143{
4144	struct mlxsw_sp_bridge *bridge;
4145
4146	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
4147	if (!bridge)
4148		return -ENOMEM;
4149	mlxsw_sp->bridge = bridge;
4150	bridge->mlxsw_sp = mlxsw_sp;
4151
4152	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
4153
4154	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
4155	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
4156
4157	mlxsw_sp->switchdev_ops->init(mlxsw_sp);
4158
4159	return mlxsw_sp_fdb_init(mlxsw_sp);
4160}
4161
4162void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
4163{
4164	mlxsw_sp_fdb_fini(mlxsw_sp);
4165	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
4166	kfree(mlxsw_sp->bridge);
4167}
4168
   1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
   2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
   3
   4#include <linux/kernel.h>
   5#include <linux/types.h>
   6#include <linux/netdevice.h>
   7#include <linux/etherdevice.h>
   8#include <linux/slab.h>
   9#include <linux/device.h>
  10#include <linux/skbuff.h>
  11#include <linux/if_vlan.h>
  12#include <linux/if_bridge.h>
  13#include <linux/workqueue.h>
  14#include <linux/jiffies.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/netlink.h>
  17#include <net/switchdev.h>
  18#include <net/vxlan.h>
  19
  20#include "spectrum_span.h"
  21#include "spectrum_switchdev.h"
  22#include "spectrum.h"
  23#include "core.h"
  24#include "reg.h"
  25
  26struct mlxsw_sp_bridge_ops;
  27
  28struct mlxsw_sp_bridge {
  29	struct mlxsw_sp *mlxsw_sp;
  30	struct {
  31		struct delayed_work dw;
  32#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
  33		unsigned int interval; /* ms */
  34	} fdb_notify;
  35#define MLXSW_SP_MIN_AGEING_TIME 10
  36#define MLXSW_SP_MAX_AGEING_TIME 1000000
  37#define MLXSW_SP_DEFAULT_AGEING_TIME 300
  38	u32 ageing_time;
  39	bool vlan_enabled_exists;
  40	struct list_head bridges_list;
  41	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
  42	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
  43	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
  44	const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
  45};
  46
  47struct mlxsw_sp_bridge_device {
  48	struct net_device *dev;
  49	struct list_head list;
  50	struct list_head ports_list;
  51	struct list_head mdb_list;
  52	struct rhashtable mdb_ht;
  53	u8 vlan_enabled:1,
  54	   multicast_enabled:1,
  55	   mrouter:1;
  56	const struct mlxsw_sp_bridge_ops *ops;
  57};
  58
  59struct mlxsw_sp_bridge_port {
  60	struct net_device *dev;
  61	struct mlxsw_sp_bridge_device *bridge_device;
  62	struct list_head list;
  63	struct list_head vlans_list;
  64	unsigned int ref_count;
  65	u8 stp_state;
  66	unsigned long flags;
  67	bool mrouter;
  68	bool lagged;
  69	union {
  70		u16 lag_id;
  71		u16 system_port;
  72	};
  73};
  74
  75struct mlxsw_sp_bridge_vlan {
  76	struct list_head list;
  77	struct list_head port_vlan_list;
  78	u16 vid;
  79};
  80
  81struct mlxsw_sp_bridge_ops {
  82	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
  83			 struct mlxsw_sp_bridge_port *bridge_port,
  84			 struct mlxsw_sp_port *mlxsw_sp_port,
  85			 struct netlink_ext_ack *extack);
  86	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
  87			   struct mlxsw_sp_bridge_port *bridge_port,
  88			   struct mlxsw_sp_port *mlxsw_sp_port);
  89	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
  90			  const struct net_device *vxlan_dev, u16 vid,
  91			  struct netlink_ext_ack *extack);
  92	struct mlxsw_sp_fid *
  93		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
  94			   u16 vid, struct netlink_ext_ack *extack);
  95	struct mlxsw_sp_fid *
  96		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
  97			      u16 vid);
  98	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
  99		       const struct mlxsw_sp_fid *fid);
 100};
 101
 102struct mlxsw_sp_switchdev_ops {
 103	void (*init)(struct mlxsw_sp *mlxsw_sp);
 104};
 105
 106struct mlxsw_sp_mdb_entry_key {
 107	unsigned char addr[ETH_ALEN];
 108	u16 fid;
 109};
 110
 111struct mlxsw_sp_mdb_entry {
 112	struct list_head list;
 113	struct rhash_head ht_node;
 114	struct mlxsw_sp_mdb_entry_key key;
 115	u16 mid;
 116	struct list_head ports_list;
 117	u16 ports_count;
 118};
 119
 120struct mlxsw_sp_mdb_entry_port {
 121	struct list_head list; /* Member of 'ports_list'. */
 122	u16 local_port;
 123	refcount_t refcount;
 124	bool mrouter;
 125};
 126
 127static const struct rhashtable_params mlxsw_sp_mdb_ht_params = {
 128	.key_offset = offsetof(struct mlxsw_sp_mdb_entry, key),
 129	.head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node),
 130	.key_len = sizeof(struct mlxsw_sp_mdb_entry_key),
 131};
 132
 133static int
 134mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
 135			       struct mlxsw_sp_bridge_port *bridge_port,
 136			       u16 fid_index);
 137
 138static void
 139mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
 140			       struct mlxsw_sp_bridge_port *bridge_port,
 141			       u16 fid_index);
 142
 143static int
 144mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
 145				   struct mlxsw_sp_bridge_device
 146				   *bridge_device, bool mc_enabled);
 147
 148static void
 149mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
 150				 struct mlxsw_sp_bridge_port *bridge_port,
 151				 bool add);
 152
 153static struct mlxsw_sp_bridge_device *
 154mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
 155			    const struct net_device *br_dev)
 156{
 157	struct mlxsw_sp_bridge_device *bridge_device;
 158
 159	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
 160		if (bridge_device->dev == br_dev)
 161			return bridge_device;
 162
 163	return NULL;
 164}
 165
 166bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
 167					 const struct net_device *br_dev)
 168{
 169	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
 170}
 171
 172static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
 173						    struct netdev_nested_priv *priv)
 174{
 175	struct mlxsw_sp *mlxsw_sp = priv->data;
 176
 177	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
 178	return 0;
 179}
 180
 181static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
 182						struct net_device *dev)
 183{
 184	struct netdev_nested_priv priv = {
 185		.data = (void *)mlxsw_sp,
 186	};
 187
 188	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
 189	netdev_walk_all_upper_dev_rcu(dev,
 190				      mlxsw_sp_bridge_device_upper_rif_destroy,
 191				      &priv);
 192}
 193
 194static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
 195					     struct net_device *br_dev,
 196					     struct netlink_ext_ack *extack)
 197{
 198	struct net_device *dev, *stop_dev;
 199	struct list_head *iter;
 200	int err;
 201
 202	netdev_for_each_lower_dev(br_dev, dev, iter) {
 203		if (netif_is_vxlan(dev) && netif_running(dev)) {
 204			err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
 205							 br_dev, dev, 0,
 206							 extack);
 207			if (err) {
 208				stop_dev = dev;
 209				goto err_vxlan_join;
 210			}
 211		}
 212	}
 213
 214	return 0;
 215
 216err_vxlan_join:
 217	netdev_for_each_lower_dev(br_dev, dev, iter) {
 218		if (netif_is_vxlan(dev) && netif_running(dev)) {
 219			if (stop_dev == dev)
 220				break;
 221			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
 222		}
 223	}
 224	return err;
 225}
 226
 227static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
 228					      struct net_device *br_dev)
 229{
 230	struct net_device *dev;
 231	struct list_head *iter;
 232
 233	netdev_for_each_lower_dev(br_dev, dev, iter) {
 234		if (netif_is_vxlan(dev) && netif_running(dev))
 235			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
 236	}
 237}
 238
 239static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
 240					      bool no_delay)
 241{
 242	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
 243	unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
 244
 245	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
 246			       msecs_to_jiffies(interval));
 247}
 248
 249static struct mlxsw_sp_bridge_device *
 250mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
 251			      struct net_device *br_dev,
 252			      struct netlink_ext_ack *extack)
 253{
 254	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
 255	struct mlxsw_sp_bridge_device *bridge_device;
 256	bool vlan_enabled = br_vlan_enabled(br_dev);
 257	int err;
 258
 259	if (vlan_enabled && bridge->vlan_enabled_exists) {
 260		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
 261		NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
 262		return ERR_PTR(-EINVAL);
 263	}
 264
 265	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
 266	if (!bridge_device)
 267		return ERR_PTR(-ENOMEM);
 268
 269	err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params);
 270	if (err)
 271		goto err_mdb_rhashtable_init;
 272
 273	bridge_device->dev = br_dev;
 274	bridge_device->vlan_enabled = vlan_enabled;
 275	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
 276	bridge_device->mrouter = br_multicast_router(br_dev);
 277	INIT_LIST_HEAD(&bridge_device->ports_list);
 278	if (vlan_enabled) {
 279		u16 proto;
 280
 281		bridge->vlan_enabled_exists = true;
 282		br_vlan_get_proto(br_dev, &proto);
 283		if (proto == ETH_P_8021AD)
 284			bridge_device->ops = bridge->bridge_8021ad_ops;
 285		else
 286			bridge_device->ops = bridge->bridge_8021q_ops;
 287	} else {
 288		bridge_device->ops = bridge->bridge_8021d_ops;
 289	}
 290	INIT_LIST_HEAD(&bridge_device->mdb_list);
 291
 292	if (list_empty(&bridge->bridges_list))
 293		mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
 294	list_add(&bridge_device->list, &bridge->bridges_list);
 295
 296	/* It is possible we already have VXLAN devices enslaved to the bridge.
 297	 * In which case, we need to replay their configuration as if they were
 298	 * just now enslaved to the bridge.
 299	 */
 300	err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
 301	if (err)
 302		goto err_vxlan_init;
 303
 304	return bridge_device;
 305
 306err_vxlan_init:
 307	list_del(&bridge_device->list);
 308	if (bridge_device->vlan_enabled)
 309		bridge->vlan_enabled_exists = false;
 310	rhashtable_destroy(&bridge_device->mdb_ht);
 311err_mdb_rhashtable_init:
 312	kfree(bridge_device);
 313	return ERR_PTR(err);
 314}
 315
 316static void
 317mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
 318			       struct mlxsw_sp_bridge_device *bridge_device)
 319{
 320	mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
 321	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
 322					    bridge_device->dev);
 323	list_del(&bridge_device->list);
 324	if (list_empty(&bridge->bridges_list))
 325		cancel_delayed_work(&bridge->fdb_notify.dw);
 326	if (bridge_device->vlan_enabled)
 327		bridge->vlan_enabled_exists = false;
 328	WARN_ON(!list_empty(&bridge_device->ports_list));
 329	WARN_ON(!list_empty(&bridge_device->mdb_list));
 330	rhashtable_destroy(&bridge_device->mdb_ht);
 331	kfree(bridge_device);
 332}
 333
 334static struct mlxsw_sp_bridge_device *
 335mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
 336			   struct net_device *br_dev,
 337			   struct netlink_ext_ack *extack)
 338{
 339	struct mlxsw_sp_bridge_device *bridge_device;
 340
 341	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
 342	if (bridge_device)
 343		return bridge_device;
 344
 345	return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
 346}
 347
 348static void
 349mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
 350			   struct mlxsw_sp_bridge_device *bridge_device)
 351{
 352	if (list_empty(&bridge_device->ports_list))
 353		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
 354}
 355
 356static struct mlxsw_sp_bridge_port *
 357__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
 358			    const struct net_device *brport_dev)
 359{
 360	struct mlxsw_sp_bridge_port *bridge_port;
 361
 362	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
 363		if (bridge_port->dev == brport_dev)
 364			return bridge_port;
 365	}
 366
 367	return NULL;
 368}
 369
 370struct mlxsw_sp_bridge_port *
 371mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
 372			  struct net_device *brport_dev)
 373{
 374	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
 375	struct mlxsw_sp_bridge_device *bridge_device;
 376
 377	if (!br_dev)
 378		return NULL;
 379
 380	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
 381	if (!bridge_device)
 382		return NULL;
 383
 384	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
 385}
 386
 387static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
 388				 const struct switchdev_obj *obj,
 389				 struct netlink_ext_ack *extack);
 390static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
 391				 const struct switchdev_obj *obj);
 392
 393struct mlxsw_sp_bridge_port_replay_switchdev_objs {
 394	struct net_device *brport_dev;
 395	struct mlxsw_sp_port *mlxsw_sp_port;
 396	int done;
 397};
 398
 399static int
 400mlxsw_sp_bridge_port_replay_switchdev_objs(struct notifier_block *nb,
 401					   unsigned long event, void *ptr)
 402{
 403	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
 404	struct switchdev_notifier_port_obj_info *port_obj_info = ptr;
 405	struct netlink_ext_ack *extack = port_obj_info->info.extack;
 406	struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso;
 407	int err = 0;
 408
 409	rso = (void *)port_obj_info->info.ctx;
 410
 411	if (event != SWITCHDEV_PORT_OBJ_ADD ||
 412	    dev != rso->brport_dev)
 413		goto out;
 414
 415	/* When a port is joining the bridge through a LAG, there likely are
 416	 * VLANs configured on that LAG already. The replay will thus attempt to
 417	 * have the given port-vlans join the corresponding FIDs. But the LAG
 418	 * netdevice has already called the ndo_vlan_rx_add_vid NDO for its VLAN
 419	 * memberships, back before CHANGEUPPER was distributed and netdevice
 420	 * master set. So now before propagating the VLAN events further, we
 421	 * first need to kill the corresponding VID at the mlxsw_sp_port.
 422	 *
 423	 * Note that this doesn't need to be rolled back on failure -- if the
 424	 * replay fails, the enslavement is off, and the VIDs would be killed by
 425	 * LAG anyway as part of its rollback.
 426	 */
 427	if (port_obj_info->obj->id == SWITCHDEV_OBJ_ID_PORT_VLAN) {
 428		u16 vid = SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj)->vid;
 429
 430		err = mlxsw_sp_port_kill_vid(rso->mlxsw_sp_port->dev, 0, vid);
 431		if (err)
 432			goto out;
 433	}
 434
 435	++rso->done;
 436	err = mlxsw_sp_port_obj_add(rso->mlxsw_sp_port->dev, NULL,
 437				    port_obj_info->obj, extack);
 438
 439out:
 440	return notifier_from_errno(err);
 441}
 442
 443static struct notifier_block mlxsw_sp_bridge_port_replay_switchdev_objs_nb = {
 444	.notifier_call = mlxsw_sp_bridge_port_replay_switchdev_objs,
 445};
 446
 447static int
 448mlxsw_sp_bridge_port_unreplay_switchdev_objs(struct notifier_block *nb,
 449					     unsigned long event, void *ptr)
 450{
 451	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
 452	struct switchdev_notifier_port_obj_info *port_obj_info = ptr;
 453	struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso;
 454
 455	rso = (void *)port_obj_info->info.ctx;
 456
 457	if (event != SWITCHDEV_PORT_OBJ_ADD ||
 458	    dev != rso->brport_dev)
 459		return NOTIFY_DONE;
 460	if (!rso->done--)
 461		return NOTIFY_STOP;
 462
 463	mlxsw_sp_port_obj_del(rso->mlxsw_sp_port->dev, NULL,
 464			      port_obj_info->obj);
 465	return NOTIFY_DONE;
 466}
 467
 468static struct notifier_block mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb = {
 469	.notifier_call = mlxsw_sp_bridge_port_unreplay_switchdev_objs,
 470};
 471
 472static struct mlxsw_sp_bridge_port *
 473mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
 474			    struct net_device *brport_dev,
 475			    struct netlink_ext_ack *extack)
 476{
 477	struct mlxsw_sp_bridge_port *bridge_port;
 478	struct mlxsw_sp_port *mlxsw_sp_port;
 479	int err;
 480
 481	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
 482	if (!bridge_port)
 483		return ERR_PTR(-ENOMEM);
 484
 485	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
 486	bridge_port->lagged = mlxsw_sp_port->lagged;
 487	if (bridge_port->lagged)
 488		bridge_port->lag_id = mlxsw_sp_port->lag_id;
 489	else
 490		bridge_port->system_port = mlxsw_sp_port->local_port;
 491	bridge_port->dev = brport_dev;
 492	bridge_port->bridge_device = bridge_device;
 493	bridge_port->stp_state = br_port_get_stp_state(brport_dev);
 494	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
 495			     BR_MCAST_FLOOD;
 496	INIT_LIST_HEAD(&bridge_port->vlans_list);
 497	list_add(&bridge_port->list, &bridge_device->ports_list);
 498	bridge_port->ref_count = 1;
 499
 500	err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
 501					    NULL, NULL, NULL, false, extack);
 502	if (err)
 503		goto err_switchdev_offload;
 504
 505	return bridge_port;
 506
 507err_switchdev_offload:
 508	list_del(&bridge_port->list);
 509	kfree(bridge_port);
 510	return ERR_PTR(err);
 511}
 512
 513static void
 514mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
 515{
 516	switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL);
 517	list_del(&bridge_port->list);
 518	WARN_ON(!list_empty(&bridge_port->vlans_list));
 519	kfree(bridge_port);
 520}
 521
 522static struct mlxsw_sp_bridge_port *
 523mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
 524			 struct net_device *brport_dev,
 525			 struct netlink_ext_ack *extack)
 526{
 527	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
 528	struct mlxsw_sp_bridge_device *bridge_device;
 529	struct mlxsw_sp_bridge_port *bridge_port;
 530	int err;
 531
 532	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
 533	if (bridge_port) {
 534		bridge_port->ref_count++;
 535		return bridge_port;
 536	}
 537
 538	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
 539	if (IS_ERR(bridge_device))
 540		return ERR_CAST(bridge_device);
 541
 542	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev,
 543						  extack);
 544	if (IS_ERR(bridge_port)) {
 545		err = PTR_ERR(bridge_port);
 546		goto err_bridge_port_create;
 547	}
 548
 549	return bridge_port;
 550
 551err_bridge_port_create:
 552	mlxsw_sp_bridge_device_put(bridge, bridge_device);
 553	return ERR_PTR(err);
 554}
 555
 556static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
 557				     struct mlxsw_sp_bridge_port *bridge_port)
 558{
 559	struct mlxsw_sp_bridge_device *bridge_device;
 560
 561	if (--bridge_port->ref_count != 0)
 562		return;
 563	bridge_device = bridge_port->bridge_device;
 564	mlxsw_sp_bridge_port_destroy(bridge_port);
 565	mlxsw_sp_bridge_device_put(bridge, bridge_device);
 566}
 567
 568static struct mlxsw_sp_port_vlan *
 569mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
 570				  const struct mlxsw_sp_bridge_device *
 571				  bridge_device,
 572				  u16 vid)
 573{
 574	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 575
 576	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
 577			    list) {
 578		if (!mlxsw_sp_port_vlan->bridge_port)
 579			continue;
 580		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
 581		    bridge_device)
 582			continue;
 583		if (bridge_device->vlan_enabled &&
 584		    mlxsw_sp_port_vlan->vid != vid)
 585			continue;
 586		return mlxsw_sp_port_vlan;
 587	}
 588
 589	return NULL;
 590}
 591
 592static struct mlxsw_sp_port_vlan*
 593mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
 594			       u16 fid_index)
 595{
 596	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 597
 598	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
 599			    list) {
 600		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
 601
 602		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
 603			return mlxsw_sp_port_vlan;
 604	}
 605
 606	return NULL;
 607}
 608
 609static struct mlxsw_sp_bridge_vlan *
 610mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
 611			  u16 vid)
 612{
 613	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 614
 615	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 616		if (bridge_vlan->vid == vid)
 617			return bridge_vlan;
 618	}
 619
 620	return NULL;
 621}
 622
 623static struct mlxsw_sp_bridge_vlan *
 624mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
 625{
 626	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 627
 628	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
 629	if (!bridge_vlan)
 630		return NULL;
 631
 632	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
 633	bridge_vlan->vid = vid;
 634	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
 635
 636	return bridge_vlan;
 637}
 638
 639static void
 640mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
 641{
 642	list_del(&bridge_vlan->list);
 643	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
 644	kfree(bridge_vlan);
 645}
 646
 647static struct mlxsw_sp_bridge_vlan *
 648mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
 649{
 650	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 651
 652	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
 653	if (bridge_vlan)
 654		return bridge_vlan;
 655
 656	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
 657}
 658
 659static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
 660{
 661	if (list_empty(&bridge_vlan->port_vlan_list))
 662		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
 663}
 664
 665static int
 666mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
 667				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
 668				  u8 state)
 669{
 670	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 671
 672	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 673			    bridge_vlan_node) {
 674		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
 675			continue;
 676		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
 677						 bridge_vlan->vid, state);
 678	}
 679
 680	return 0;
 681}
 682
 683static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
 684					    struct net_device *orig_dev,
 685					    u8 state)
 686{
 687	struct mlxsw_sp_bridge_port *bridge_port;
 688	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 689	int err;
 690
 691	/* It's possible we failed to enslave the port, yet this
 692	 * operation is executed due to it being deferred.
 693	 */
 694	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
 695						orig_dev);
 696	if (!bridge_port)
 697		return 0;
 698
 699	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 700		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
 701							bridge_vlan, state);
 702		if (err)
 703			goto err_port_bridge_vlan_stp_set;
 704	}
 705
 706	bridge_port->stp_state = state;
 707
 708	return 0;
 709
 710err_port_bridge_vlan_stp_set:
 711	list_for_each_entry_continue_reverse(bridge_vlan,
 712					     &bridge_port->vlans_list, list)
 713		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
 714						  bridge_port->stp_state);
 715	return err;
 716}
 717
 718static int
 719mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
 720				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
 721				    enum mlxsw_sp_flood_type packet_type,
 722				    bool member)
 723{
 724	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 725
 726	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 727			    bridge_vlan_node) {
 728		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
 729			continue;
 730		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
 731					      packet_type,
 732					      mlxsw_sp_port->local_port,
 733					      member);
 734	}
 735
 736	return 0;
 737}
 738
 739static int
 740mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
 741				     struct mlxsw_sp_bridge_port *bridge_port,
 742				     enum mlxsw_sp_flood_type packet_type,
 743				     bool member)
 744{
 745	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 746	int err;
 747
 748	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 749		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
 750							  bridge_vlan,
 751							  packet_type,
 752							  member);
 753		if (err)
 754			goto err_port_bridge_vlan_flood_set;
 755	}
 756
 757	return 0;
 758
 759err_port_bridge_vlan_flood_set:
 760	list_for_each_entry_continue_reverse(bridge_vlan,
 761					     &bridge_port->vlans_list, list)
 762		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
 763						    packet_type, !member);
 764	return err;
 765}
 766
 767static int
 768mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan,
 769				enum mlxsw_sp_flood_type packet_type,
 770				bool member)
 771{
 772	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 773	int err;
 774
 775	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 776			    bridge_vlan_node) {
 777		u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
 778
 779		err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
 780					     packet_type, local_port, member);
 781		if (err)
 782			goto err_fid_flood_set;
 783	}
 784
 785	return 0;
 786
 787err_fid_flood_set:
 788	list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
 789					     &bridge_vlan->port_vlan_list,
 790					     list) {
 791		u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
 792
 793		mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type,
 794				       local_port, !member);
 795	}
 796
 797	return err;
 798}
 799
 800static int
 801mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port,
 802				      enum mlxsw_sp_flood_type packet_type,
 803				      bool member)
 804{
 805	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 806	int err;
 807
 808	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 809		err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
 810						      member);
 811		if (err)
 812			goto err_bridge_vlans_flood_set;
 813	}
 814
 815	return 0;
 816
 817err_bridge_vlans_flood_set:
 818	list_for_each_entry_continue_reverse(bridge_vlan,
 819					     &bridge_port->vlans_list, list)
 820		mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
 821						!member);
 822	return err;
 823}
 824
 825static int
 826mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
 827				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
 828				       bool set)
 829{
 830	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 831	u16 vid = bridge_vlan->vid;
 832
 833	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 834			    bridge_vlan_node) {
 835		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
 836			continue;
 837		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
 838	}
 839
 840	return 0;
 841}
 842
 843static int
 844mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
 845				  struct mlxsw_sp_bridge_port *bridge_port,
 846				  bool set)
 847{
 848	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 849	int err;
 850
 851	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 852		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
 853							     bridge_vlan, set);
 854		if (err)
 855			goto err_port_bridge_vlan_learning_set;
 856	}
 857
 858	return 0;
 859
 860err_port_bridge_vlan_learning_set:
 861	list_for_each_entry_continue_reverse(bridge_vlan,
 862					     &bridge_port->vlans_list, list)
 863		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
 864						       bridge_vlan, !set);
 865	return err;
 866}
 867
 868static int
 869mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
 870				    const struct net_device *orig_dev,
 871				    struct switchdev_brport_flags flags,
 872				    struct netlink_ext_ack *extack)
 873{
 874	if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
 875			   BR_PORT_LOCKED | BR_PORT_MAB)) {
 876		NL_SET_ERR_MSG_MOD(extack, "Unsupported bridge port flag");
 877		return -EINVAL;
 878	}
 879
 880	if ((flags.mask & BR_PORT_LOCKED) && is_vlan_dev(orig_dev)) {
 881		NL_SET_ERR_MSG_MOD(extack, "Locked flag cannot be set on a VLAN upper");
 882		return -EINVAL;
 883	}
 884
 885	if ((flags.mask & BR_PORT_LOCKED) && vlan_uses_dev(orig_dev)) {
 886		NL_SET_ERR_MSG_MOD(extack, "Locked flag cannot be set on a bridge port that has VLAN uppers");
 887		return -EINVAL;
 888	}
 889
 890	return 0;
 891}
 892
 893static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
 894					   struct net_device *orig_dev,
 895					   struct switchdev_brport_flags flags)
 896{
 897	struct mlxsw_sp_bridge_port *bridge_port;
 898	int err;
 899
 900	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
 901						orig_dev);
 902	if (!bridge_port)
 903		return 0;
 904
 905	if (flags.mask & BR_FLOOD) {
 906		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
 907							   bridge_port,
 908							   MLXSW_SP_FLOOD_TYPE_UC,
 909							   flags.val & BR_FLOOD);
 910		if (err)
 911			return err;
 912	}
 913
 914	if (flags.mask & BR_LEARNING) {
 915		err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
 916							bridge_port,
 917							flags.val & BR_LEARNING);
 918		if (err)
 919			return err;
 920	}
 921
 922	if (flags.mask & BR_PORT_LOCKED) {
 923		err = mlxsw_sp_port_security_set(mlxsw_sp_port,
 924						 flags.val & BR_PORT_LOCKED);
 925		if (err)
 926			return err;
 927	}
 928
 929	if (bridge_port->bridge_device->multicast_enabled)
 930		goto out;
 931
 932	if (flags.mask & BR_MCAST_FLOOD) {
 933		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
 934							   bridge_port,
 935							   MLXSW_SP_FLOOD_TYPE_MC,
 936							   flags.val & BR_MCAST_FLOOD);
 937		if (err)
 938			return err;
 939	}
 940
 941out:
 942	memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
 943	return 0;
 944}
 945
 946static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
 947{
 948	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
 949	int err;
 950
 951	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
 952	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
 953	if (err)
 954		return err;
 955	mlxsw_sp->bridge->ageing_time = ageing_time;
 956	return 0;
 957}
 958
 959static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
 960					    unsigned long ageing_clock_t)
 961{
 962	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 963	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
 964	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
 965
 966	if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
 967	    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
 968		return -ERANGE;
 969
 970	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
 971}
 972
 973static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
 974					  struct net_device *orig_dev,
 975					  bool vlan_enabled)
 976{
 977	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 978	struct mlxsw_sp_bridge_device *bridge_device;
 979
 980	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
 981	if (WARN_ON(!bridge_device))
 982		return -EINVAL;
 983
 984	if (bridge_device->vlan_enabled == vlan_enabled)
 985		return 0;
 986
 987	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
 988	return -EINVAL;
 989}
 990
 991static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
 992						struct net_device *orig_dev,
 993						u16 vlan_proto)
 994{
 995	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 996	struct mlxsw_sp_bridge_device *bridge_device;
 997
 998	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
 999	if (WARN_ON(!bridge_device))
1000		return -EINVAL;
1001
1002	netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
1003	return -EINVAL;
1004}
1005
1006static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
1007					  struct net_device *orig_dev,
1008					  bool is_port_mrouter)
1009{
1010	struct mlxsw_sp_bridge_port *bridge_port;
1011	int err;
1012
1013	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
1014						orig_dev);
1015	if (!bridge_port)
1016		return 0;
1017
1018	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
1019					 is_port_mrouter);
1020
1021	if (!bridge_port->bridge_device->multicast_enabled)
1022		goto out;
1023
1024	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
1025						   MLXSW_SP_FLOOD_TYPE_MC,
1026						   is_port_mrouter);
1027	if (err)
1028		return err;
1029
1030out:
1031	bridge_port->mrouter = is_port_mrouter;
1032	return 0;
1033}
1034
1035static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
1036{
1037	const struct mlxsw_sp_bridge_device *bridge_device;
1038
1039	bridge_device = bridge_port->bridge_device;
1040	return bridge_device->multicast_enabled ? bridge_port->mrouter :
1041					bridge_port->flags & BR_MCAST_FLOOD;
1042}
1043
1044static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
1045					 struct net_device *orig_dev,
1046					 bool mc_disabled)
1047{
1048	enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
1049	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1050	struct mlxsw_sp_bridge_device *bridge_device;
1051	struct mlxsw_sp_bridge_port *bridge_port;
1052	int err;
1053
1054	/* It's possible we failed to enslave the port, yet this
1055	 * operation is executed due to it being deferred.
1056	 */
1057	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
1058	if (!bridge_device)
1059		return 0;
1060
1061	if (bridge_device->multicast_enabled == !mc_disabled)
1062		return 0;
1063
1064	bridge_device->multicast_enabled = !mc_disabled;
1065	err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
1066						 !mc_disabled);
1067	if (err)
1068		goto err_mc_enable_sync;
1069
1070	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1071		bool member = mlxsw_sp_mc_flood(bridge_port);
1072
1073		err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port,
1074							    packet_type,
1075							    member);
1076		if (err)
1077			goto err_flood_table_set;
1078	}
1079
1080	return 0;
1081
1082err_flood_table_set:
1083	list_for_each_entry_continue_reverse(bridge_port,
1084					     &bridge_device->ports_list, list) {
1085		bool member = mlxsw_sp_mc_flood(bridge_port);
1086
1087		mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type,
1088						      !member);
1089	}
1090	mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
1091					   mc_disabled);
1092err_mc_enable_sync:
1093	bridge_device->multicast_enabled = mc_disabled;
1094	return err;
1095}
1096
1097static struct mlxsw_sp_mdb_entry_port *
1098mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry,
1099			       u16 local_port)
1100{
1101	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1102
1103	list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) {
1104		if (mdb_entry_port->local_port == local_port)
1105			return mdb_entry_port;
1106	}
1107
1108	return NULL;
1109}
1110
1111static struct mlxsw_sp_mdb_entry_port *
1112mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp,
1113			    struct mlxsw_sp_mdb_entry *mdb_entry,
1114			    u16 local_port)
1115{
1116	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1117	int err;
1118
1119	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1120	if (mdb_entry_port) {
1121		if (mdb_entry_port->mrouter &&
1122		    refcount_read(&mdb_entry_port->refcount) == 1)
1123			mdb_entry->ports_count++;
1124
1125		refcount_inc(&mdb_entry_port->refcount);
1126		return mdb_entry_port;
1127	}
1128
1129	err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1130					  mdb_entry->key.fid, local_port, true);
1131	if (err)
1132		return ERR_PTR(err);
1133
1134	mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
1135	if (!mdb_entry_port) {
1136		err = -ENOMEM;
1137		goto err_mdb_entry_port_alloc;
1138	}
1139
1140	mdb_entry_port->local_port = local_port;
1141	refcount_set(&mdb_entry_port->refcount, 1);
1142	list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
1143	mdb_entry->ports_count++;
1144
1145	return mdb_entry_port;
1146
1147err_mdb_entry_port_alloc:
1148	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1149				    mdb_entry->key.fid, local_port, false);
1150	return ERR_PTR(err);
1151}
1152
1153static void
1154mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp,
1155			    struct mlxsw_sp_mdb_entry *mdb_entry,
1156			    u16 local_port, bool force)
1157{
1158	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1159
1160	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1161	if (!mdb_entry_port)
1162		return;
1163
1164	if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) {
1165		if (mdb_entry_port->mrouter &&
1166		    refcount_read(&mdb_entry_port->refcount) == 1)
1167			mdb_entry->ports_count--;
1168		return;
1169	}
1170
1171	mdb_entry->ports_count--;
1172	list_del(&mdb_entry_port->list);
1173	kfree(mdb_entry_port);
1174	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1175				    mdb_entry->key.fid, local_port, false);
1176}
1177
1178static __always_unused struct mlxsw_sp_mdb_entry_port *
1179mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp,
1180				    struct mlxsw_sp_mdb_entry *mdb_entry,
1181				    u16 local_port)
1182{
1183	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1184	int err;
1185
1186	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1187	if (mdb_entry_port) {
1188		if (!mdb_entry_port->mrouter)
1189			refcount_inc(&mdb_entry_port->refcount);
1190		return mdb_entry_port;
1191	}
1192
1193	err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1194					  mdb_entry->key.fid, local_port, true);
1195	if (err)
1196		return ERR_PTR(err);
1197
1198	mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
1199	if (!mdb_entry_port) {
1200		err = -ENOMEM;
1201		goto err_mdb_entry_port_alloc;
1202	}
1203
1204	mdb_entry_port->local_port = local_port;
1205	refcount_set(&mdb_entry_port->refcount, 1);
1206	mdb_entry_port->mrouter = true;
1207	list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
1208
1209	return mdb_entry_port;
1210
1211err_mdb_entry_port_alloc:
1212	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1213				    mdb_entry->key.fid, local_port, false);
1214	return ERR_PTR(err);
1215}
1216
1217static __always_unused void
1218mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp,
1219				    struct mlxsw_sp_mdb_entry *mdb_entry,
1220				    u16 local_port)
1221{
1222	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1223
1224	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1225	if (!mdb_entry_port)
1226		return;
1227
1228	if (!mdb_entry_port->mrouter)
1229		return;
1230
1231	mdb_entry_port->mrouter = false;
1232	if (!refcount_dec_and_test(&mdb_entry_port->refcount))
1233		return;
1234
1235	list_del(&mdb_entry_port->list);
1236	kfree(mdb_entry_port);
1237	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1238				    mdb_entry->key.fid, local_port, false);
1239}
1240
1241static void
1242mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
1243				   struct mlxsw_sp_bridge_device *bridge_device,
1244				   bool add)
1245{
1246	u16 local_port = mlxsw_sp_router_port(mlxsw_sp);
1247	struct mlxsw_sp_mdb_entry *mdb_entry;
1248
1249	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
1250		if (add)
1251			mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
1252							    local_port);
1253		else
1254			mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
1255							    local_port);
1256	}
1257}
1258
1259static int
1260mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
1261				  struct net_device *orig_dev,
1262				  bool is_mrouter)
1263{
1264	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1265	struct mlxsw_sp_bridge_device *bridge_device;
1266
1267	/* It's possible we failed to enslave the port, yet this
1268	 * operation is executed due to it being deferred.
1269	 */
1270	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
1271	if (!bridge_device)
1272		return 0;
1273
1274	if (bridge_device->mrouter != is_mrouter)
1275		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
1276						   is_mrouter);
1277	bridge_device->mrouter = is_mrouter;
1278	return 0;
1279}
1280
1281static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx,
1282				  const struct switchdev_attr *attr,
1283				  struct netlink_ext_ack *extack)
1284{
1285	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1286	int err;
1287
1288	switch (attr->id) {
1289	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
1290		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
1291						       attr->orig_dev,
1292						       attr->u.stp_state);
1293		break;
1294	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
1295		err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
1296							  attr->orig_dev,
1297							  attr->u.brport_flags,
1298							  extack);
1299		break;
1300	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
1301		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
1302						      attr->orig_dev,
1303						      attr->u.brport_flags);
1304		break;
1305	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
1306		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
1307						       attr->u.ageing_time);
1308		break;
1309	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
1310		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
1311						     attr->orig_dev,
1312						     attr->u.vlan_filtering);
1313		break;
1314	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
1315		err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
1316							   attr->orig_dev,
1317							   attr->u.vlan_protocol);
1318		break;
1319	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
1320		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
1321						     attr->orig_dev,
1322						     attr->u.mrouter);
1323		break;
1324	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
1325		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
1326						    attr->orig_dev,
1327						    attr->u.mc_disabled);
1328		break;
1329	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
1330		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
1331							attr->orig_dev,
1332							attr->u.mrouter);
1333		break;
1334	default:
1335		err = -EOPNOTSUPP;
1336		break;
1337	}
1338
1339	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
1340
1341	return err;
1342}
1343
1344static int
1345mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1346			    struct mlxsw_sp_bridge_port *bridge_port,
1347			    struct netlink_ext_ack *extack)
1348{
1349	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1350	struct mlxsw_sp_bridge_device *bridge_device;
1351	u16 local_port = mlxsw_sp_port->local_port;
1352	u16 vid = mlxsw_sp_port_vlan->vid;
1353	struct mlxsw_sp_fid *fid;
1354	int err;
1355
1356	bridge_device = bridge_port->bridge_device;
1357	fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
1358	if (IS_ERR(fid))
1359		return PTR_ERR(fid);
1360
1361	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
1362				     bridge_port->flags & BR_FLOOD);
1363	if (err)
1364		goto err_fid_uc_flood_set;
1365
1366	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
1367				     mlxsw_sp_mc_flood(bridge_port));
1368	if (err)
1369		goto err_fid_mc_flood_set;
1370
1371	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
1372				     true);
1373	if (err)
1374		goto err_fid_bc_flood_set;
1375
1376	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
1377	if (err)
1378		goto err_fid_port_vid_map;
1379
1380	mlxsw_sp_port_vlan->fid = fid;
1381
1382	return 0;
1383
1384err_fid_port_vid_map:
1385	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1386err_fid_bc_flood_set:
1387	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1388err_fid_mc_flood_set:
1389	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1390err_fid_uc_flood_set:
1391	mlxsw_sp_fid_put(fid);
1392	return err;
1393}
1394
1395static void
1396mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1397{
1398	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1399	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1400	u16 local_port = mlxsw_sp_port->local_port;
1401	u16 vid = mlxsw_sp_port_vlan->vid;
1402
1403	mlxsw_sp_port_vlan->fid = NULL;
1404	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
1405	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1406	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1407	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1408	mlxsw_sp_fid_put(fid);
1409}
1410
1411static u16
1412mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1413			     u16 vid, bool is_pvid)
1414{
1415	if (is_pvid)
1416		return vid;
1417	else if (mlxsw_sp_port->pvid == vid)
1418		return 0;	/* Dis-allow untagged packets */
1419	else
1420		return mlxsw_sp_port->pvid;
1421}
1422
1423static int
1424mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1425			       struct mlxsw_sp_bridge_port *bridge_port,
1426			       struct netlink_ext_ack *extack)
1427{
1428	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1429	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1430	u16 vid = mlxsw_sp_port_vlan->vid;
1431	int err;
1432
1433	/* No need to continue if only VLAN flags were changed */
1434	if (mlxsw_sp_port_vlan->bridge_port)
1435		return 0;
1436
1437	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
1438					  extack);
1439	if (err)
1440		return err;
1441
1442	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1443					     bridge_port->flags & BR_LEARNING);
1444	if (err)
1445		goto err_port_vid_learning_set;
1446
1447	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1448					bridge_port->stp_state);
1449	if (err)
1450		goto err_port_vid_stp_set;
1451
1452	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1453	if (!bridge_vlan) {
1454		err = -ENOMEM;
1455		goto err_bridge_vlan_get;
1456	}
1457
1458	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1459		 &bridge_vlan->port_vlan_list);
1460
1461	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1462				 bridge_port->dev, extack);
1463	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1464
1465	return 0;
1466
1467err_bridge_vlan_get:
1468	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1469err_port_vid_stp_set:
1470	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1471err_port_vid_learning_set:
1472	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1473	return err;
1474}
1475
1476void
1477mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1478{
1479	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1480	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1481	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1482	struct mlxsw_sp_bridge_port *bridge_port;
1483	u16 vid = mlxsw_sp_port_vlan->vid;
1484	bool last_port;
1485
1486	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1487		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1488		return;
1489
1490	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1491	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1492	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1493
1494	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1495	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1496	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1497	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1498	if (last_port)
1499		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1500					       bridge_port,
1501					       mlxsw_sp_fid_index(fid));
1502
1503	mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port,
1504				       mlxsw_sp_fid_index(fid));
1505
1506	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1507
1508	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1509	mlxsw_sp_port_vlan->bridge_port = NULL;
1510}
1511
1512static int
1513mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1514			      struct mlxsw_sp_bridge_port *bridge_port,
1515			      u16 vid, bool is_untagged, bool is_pvid,
1516			      struct netlink_ext_ack *extack)
1517{
1518	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1519	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1520	u16 old_pvid = mlxsw_sp_port->pvid;
1521	u16 proto;
1522	int err;
1523
1524	/* The only valid scenario in which a port-vlan already exists, is if
1525	 * the VLAN flags were changed and the port-vlan is associated with the
1526	 * correct bridge port
1527	 */
1528	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1529	if (mlxsw_sp_port_vlan &&
1530	    mlxsw_sp_port_vlan->bridge_port != bridge_port)
1531		return -EEXIST;
1532
1533	if (!mlxsw_sp_port_vlan) {
1534		mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1535							       vid);
1536		if (IS_ERR(mlxsw_sp_port_vlan))
1537			return PTR_ERR(mlxsw_sp_port_vlan);
1538	}
1539
1540	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1541				     is_untagged);
1542	if (err)
1543		goto err_port_vlan_set;
1544
1545	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1546	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1547	if (err)
1548		goto err_port_pvid_set;
1549
1550	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1551					     extack);
1552	if (err)
1553		goto err_port_vlan_bridge_join;
1554
1555	return 0;
1556
1557err_port_vlan_bridge_join:
1558	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
1559err_port_pvid_set:
1560	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1561err_port_vlan_set:
1562	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1563	return err;
1564}
1565
1566static int
1567mlxsw_sp_br_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1568			    struct net_device *br_dev,
1569			    const struct switchdev_obj_port_vlan *vlan,
1570			    struct netlink_ext_ack *extack)
1571{
1572	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1573
1574	return mlxsw_sp_router_bridge_vlan_add(mlxsw_sp, br_dev, vlan->vid,
1575					       flag_pvid, extack);
1576}
1577
1578static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1579				   const struct switchdev_obj_port_vlan *vlan,
1580				   struct netlink_ext_ack *extack)
1581{
1582	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1583	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1584	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1585	struct net_device *orig_dev = vlan->obj.orig_dev;
1586	struct mlxsw_sp_bridge_port *bridge_port;
1587
1588	if (netif_is_bridge_master(orig_dev)) {
1589		int err = 0;
1590
1591		if (br_vlan_enabled(orig_dev))
1592			err = mlxsw_sp_br_rif_pvid_change(mlxsw_sp, orig_dev,
1593							  vlan, extack);
1594		if (!err)
1595			err = -EOPNOTSUPP;
1596		return err;
1597	}
1598
1599	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1600	if (WARN_ON(!bridge_port))
1601		return -EINVAL;
1602
1603	if (!bridge_port->bridge_device->vlan_enabled)
1604		return 0;
1605
1606	return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1607					     vlan->vid, flag_untagged,
1608					     flag_pvid, extack);
1609}
1610
1611static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1612{
1613	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1614			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1615}
1616
1617static int
1618mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1619			       struct mlxsw_sp_bridge_port *bridge_port,
1620			       u16 fid_index)
1621{
1622	bool lagged = bridge_port->lagged;
1623	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1624	u16 system_port;
1625
1626	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1627	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1628	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1629	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1630
1631	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1632}
1633
1634static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1635{
1636	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1637			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1638}
1639
1640static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1641{
1642	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1643			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1644}
1645
1646static int
1647mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic,
1648			     const char *mac, u16 fid, __be32 addr, bool adding)
1649{
1650	char *sfd_pl;
1651	u8 num_rec;
1652	u32 uip;
1653	int err;
1654
1655	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1656	if (!sfd_pl)
1657		return -ENOMEM;
1658
1659	uip = be32_to_cpu(addr);
1660	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1661	mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0,
1662				      mlxsw_sp_sfd_rec_policy(dynamic), mac,
1663				      fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip);
1664	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1665	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1666	if (err)
1667		goto out;
1668
1669	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1670		err = -EBUSY;
1671
1672out:
1673	kfree(sfd_pl);
1674	return err;
1675}
1676
1677static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp,
1678						  const char *mac, u16 fid,
1679						  u32 kvdl_index, bool adding)
1680{
1681	char *sfd_pl;
1682	u8 num_rec;
1683	int err;
1684
1685	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1686	if (!sfd_pl)
1687		return -ENOMEM;
1688
1689	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1690	mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid,
1691				      MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index);
1692	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1693	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1694	if (err)
1695		goto out;
1696
1697	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1698		err = -EBUSY;
1699
1700out:
1701	kfree(sfd_pl);
1702	return err;
1703}
1704
1705static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp,
1706					    const char *mac, u16 fid,
1707					    const struct in6_addr *addr)
1708{
1709	u32 kvdl_index;
1710	int err;
1711
1712	err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index);
1713	if (err)
1714		return err;
1715
1716	err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid,
1717						     kvdl_index, true);
1718	if (err)
1719		goto err_sfd_write;
1720
1721	err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr);
1722	if (err)
1723		/* Replace can fail only for creating new mapping, so removing
1724		 * the FDB entry in the error path is OK.
1725		 */
1726		goto err_addr_replace;
1727
1728	return 0;
1729
1730err_addr_replace:
1731	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index,
1732					       false);
1733err_sfd_write:
1734	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1735	return err;
1736}
1737
1738static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp,
1739					     const char *mac, u16 fid,
1740					     const struct in6_addr *addr)
1741{
1742	mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid);
1743	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false);
1744	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1745}
1746
1747static int
1748mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac,
1749			     u16 fid, const struct in6_addr *addr, bool adding)
1750{
1751	if (adding)
1752		return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid,
1753							addr);
1754
1755	mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr);
1756	return 0;
1757}
1758
1759static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1760					  const char *mac, u16 fid,
1761					  enum mlxsw_sp_l3proto proto,
1762					  const union mlxsw_sp_l3addr *addr,
1763					  bool adding, bool dynamic)
1764{
1765	switch (proto) {
1766	case MLXSW_SP_L3_PROTO_IPV4:
1767		return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid,
1768						    addr->addr4, adding);
1769	case MLXSW_SP_L3_PROTO_IPV6:
1770		return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid,
1771						    &addr->addr6, adding);
1772	default:
1773		WARN_ON(1);
1774		return -EOPNOTSUPP;
1775	}
1776}
1777
1778static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1779				     const char *mac, u16 fid, u16 vid,
1780				     bool adding,
1781				     enum mlxsw_reg_sfd_rec_action action,
1782				     enum mlxsw_reg_sfd_rec_policy policy)
1783{
1784	char *sfd_pl;
1785	u8 num_rec;
1786	int err;
1787
1788	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1789	if (!sfd_pl)
1790		return -ENOMEM;
1791
1792	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1793	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
1794			      local_port);
1795	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1796	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1797	if (err)
1798		goto out;
1799
1800	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1801		err = -EBUSY;
1802
1803out:
1804	kfree(sfd_pl);
1805	return err;
1806}
1807
1808static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1809				   const char *mac, u16 fid, u16 vid,
1810				   bool adding, bool dynamic)
1811{
1812	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
1813					 adding, MLXSW_REG_SFD_REC_ACTION_NOP,
1814					 mlxsw_sp_sfd_rec_policy(dynamic));
1815}
1816
1817int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1818			bool adding)
1819{
1820	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
1821					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1822					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1823}
1824
1825static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1826				       const char *mac, u16 fid, u16 lag_vid,
1827				       bool adding, bool dynamic)
1828{
1829	char *sfd_pl;
1830	u8 num_rec;
1831	int err;
1832
1833	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1834	if (!sfd_pl)
1835		return -ENOMEM;
1836
1837	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1838	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1839				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1840				  lag_vid, lag_id);
1841	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1842	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1843	if (err)
1844		goto out;
1845
1846	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1847		err = -EBUSY;
1848
1849out:
1850	kfree(sfd_pl);
1851	return err;
1852}
1853
1854static int
1855mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1856		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1857{
1858	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1859	struct net_device *orig_dev = fdb_info->info.dev;
1860	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1861	struct mlxsw_sp_bridge_device *bridge_device;
1862	struct mlxsw_sp_bridge_port *bridge_port;
1863	u16 fid_index, vid;
1864
1865	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1866	if (!bridge_port)
1867		return -EINVAL;
1868
1869	bridge_device = bridge_port->bridge_device;
1870	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1871							       bridge_device,
1872							       fdb_info->vid);
1873	if (!mlxsw_sp_port_vlan)
1874		return 0;
1875
1876	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1877	vid = mlxsw_sp_port_vlan->vid;
1878
1879	if (!bridge_port->lagged)
1880		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1881					       bridge_port->system_port,
1882					       fdb_info->addr, fid_index, vid,
1883					       adding, false);
1884	else
1885		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1886						   bridge_port->lag_id,
1887						   fdb_info->addr, fid_index,
1888						   vid, adding, false);
1889}
1890
1891static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
1892				    const struct mlxsw_sp_mdb_entry *mdb_entry,
1893				    bool adding)
1894{
1895	char *sfd_pl;
1896	u8 num_rec;
1897	int err;
1898
1899	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1900	if (!sfd_pl)
1901		return -ENOMEM;
1902
1903	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1904	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
1905			      mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1906			      mdb_entry->mid);
1907	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1908	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1909	if (err)
1910		goto out;
1911
1912	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1913		err = -EBUSY;
1914
1915out:
1916	kfree(sfd_pl);
1917	return err;
1918}
1919
1920static void
1921mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1922				      struct mlxsw_sp_bridge_port *bridge_port,
1923				      struct mlxsw_sp_ports_bitmap *ports_bm)
1924{
1925	struct mlxsw_sp_port *mlxsw_sp_port;
1926	u64 max_lag_members, i;
1927	int lag_id;
1928
1929	if (!bridge_port->lagged) {
1930		set_bit(bridge_port->system_port, ports_bm->bitmap);
1931	} else {
1932		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1933						     MAX_LAG_MEMBERS);
1934		lag_id = bridge_port->lag_id;
1935		for (i = 0; i < max_lag_members; i++) {
1936			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1937								 lag_id, i);
1938			if (mlxsw_sp_port)
1939				set_bit(mlxsw_sp_port->local_port,
1940					ports_bm->bitmap);
1941		}
1942	}
1943}
1944
1945static void
1946mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
1947				struct mlxsw_sp_bridge_device *bridge_device,
1948				struct mlxsw_sp *mlxsw_sp)
1949{
1950	struct mlxsw_sp_bridge_port *bridge_port;
1951
1952	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1953		if (bridge_port->mrouter) {
1954			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1955							      bridge_port,
1956							      flood_bm);
1957		}
1958	}
1959}
1960
1961static int mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp *mlxsw_sp,
1962					struct mlxsw_sp_ports_bitmap *ports_bm,
1963					struct mlxsw_sp_mdb_entry *mdb_entry)
1964{
1965	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1966	unsigned int nbits = ports_bm->nbits;
1967	int i;
1968
1969	for_each_set_bit(i, ports_bm->bitmap, nbits) {
1970		mdb_entry_port = mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp,
1971								     mdb_entry,
1972								     i);
1973		if (IS_ERR(mdb_entry_port)) {
1974			nbits = i;
1975			goto err_mrouter_port_get;
1976		}
1977	}
1978
1979	return 0;
1980
1981err_mrouter_port_get:
1982	for_each_set_bit(i, ports_bm->bitmap, nbits)
1983		mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
1984	return PTR_ERR(mdb_entry_port);
1985}
1986
1987static void mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp *mlxsw_sp,
1988					 struct mlxsw_sp_ports_bitmap *ports_bm,
1989					 struct mlxsw_sp_mdb_entry *mdb_entry)
1990{
1991	int i;
1992
1993	for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
1994		mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
1995}
1996
1997static int
1998mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp *mlxsw_sp,
1999			     struct mlxsw_sp_bridge_device *bridge_device,
2000			     struct mlxsw_sp_mdb_entry *mdb_entry, bool add)
2001{
2002	struct mlxsw_sp_ports_bitmap ports_bm;
2003	int err;
2004
2005	err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &ports_bm);
2006	if (err)
2007		return err;
2008
2009	mlxsw_sp_mc_get_mrouters_bitmap(&ports_bm, bridge_device, mlxsw_sp);
2010
2011	if (add)
2012		err = mlxsw_sp_mc_mdb_mrouters_add(mlxsw_sp, &ports_bm,
2013						   mdb_entry);
2014	else
2015		mlxsw_sp_mc_mdb_mrouters_del(mlxsw_sp, &ports_bm, mdb_entry);
2016
2017	mlxsw_sp_port_bitmap_fini(&ports_bm);
2018	return err;
2019}
2020
2021static struct mlxsw_sp_mdb_entry *
2022mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp *mlxsw_sp,
2023			   struct mlxsw_sp_bridge_device *bridge_device,
2024			   const unsigned char *addr, u16 fid, u16 local_port)
2025{
2026	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2027	struct mlxsw_sp_mdb_entry *mdb_entry;
2028	int err;
2029
2030	mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
2031	if (!mdb_entry)
2032		return ERR_PTR(-ENOMEM);
2033
2034	ether_addr_copy(mdb_entry->key.addr, addr);
2035	mdb_entry->key.fid = fid;
2036	err = mlxsw_sp_pgt_mid_alloc(mlxsw_sp, &mdb_entry->mid);
2037	if (err)
2038		goto err_pgt_mid_alloc;
2039
2040	INIT_LIST_HEAD(&mdb_entry->ports_list);
2041
2042	err = mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry,
2043					   true);
2044	if (err)
2045		goto err_mdb_mrouters_set;
2046
2047	mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, mdb_entry,
2048						     local_port);
2049	if (IS_ERR(mdb_entry_port)) {
2050		err = PTR_ERR(mdb_entry_port);
2051		goto err_mdb_entry_port_get;
2052	}
2053
2054	if (bridge_device->multicast_enabled) {
2055		err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, true);
2056		if (err)
2057			goto err_mdb_entry_write;
2058	}
2059
2060	err = rhashtable_insert_fast(&bridge_device->mdb_ht,
2061				     &mdb_entry->ht_node,
2062				     mlxsw_sp_mdb_ht_params);
2063	if (err)
2064		goto err_rhashtable_insert;
2065
2066	list_add_tail(&mdb_entry->list, &bridge_device->mdb_list);
2067
2068	return mdb_entry;
2069
2070err_rhashtable_insert:
2071	if (bridge_device->multicast_enabled)
2072		mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
2073err_mdb_entry_write:
2074	mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, false);
2075err_mdb_entry_port_get:
2076	mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
2077err_mdb_mrouters_set:
2078	mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
2079err_pgt_mid_alloc:
2080	kfree(mdb_entry);
2081	return ERR_PTR(err);
2082}
2083
2084static void
2085mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp *mlxsw_sp,
2086			   struct mlxsw_sp_mdb_entry *mdb_entry,
2087			   struct mlxsw_sp_bridge_device *bridge_device,
2088			   u16 local_port, bool force)
2089{
2090	list_del(&mdb_entry->list);
2091	rhashtable_remove_fast(&bridge_device->mdb_ht, &mdb_entry->ht_node,
2092			       mlxsw_sp_mdb_ht_params);
2093	if (bridge_device->multicast_enabled)
2094		mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
2095	mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, force);
2096	mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
2097	WARN_ON(!list_empty(&mdb_entry->ports_list));
2098	mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
2099	kfree(mdb_entry);
2100}
2101
2102static struct mlxsw_sp_mdb_entry *
2103mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp *mlxsw_sp,
2104			  struct mlxsw_sp_bridge_device *bridge_device,
2105			  const unsigned char *addr, u16 fid, u16 local_port)
2106{
2107	struct mlxsw_sp_mdb_entry_key key = {};
2108	struct mlxsw_sp_mdb_entry *mdb_entry;
2109
2110	ether_addr_copy(key.addr, addr);
2111	key.fid = fid;
2112	mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
2113					   mlxsw_sp_mdb_ht_params);
2114	if (mdb_entry) {
2115		struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2116
2117		mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp,
2118							     mdb_entry,
2119							     local_port);
2120		if (IS_ERR(mdb_entry_port))
2121			return ERR_CAST(mdb_entry_port);
2122
2123		return mdb_entry;
2124	}
2125
2126	return mlxsw_sp_mc_mdb_entry_init(mlxsw_sp, bridge_device, addr, fid,
2127					  local_port);
2128}
2129
2130static bool
2131mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry *mdb_entry,
2132			     struct mlxsw_sp_mdb_entry_port *removed_entry_port,
2133			     bool force)
2134{
2135	if (mdb_entry->ports_count > 1)
2136		return false;
2137
2138	if (force)
2139		return true;
2140
2141	if (!removed_entry_port->mrouter &&
2142	    refcount_read(&removed_entry_port->refcount) > 1)
2143		return false;
2144
2145	if (removed_entry_port->mrouter &&
2146	    refcount_read(&removed_entry_port->refcount) > 2)
2147		return false;
2148
2149	return true;
2150}
2151
2152static void
2153mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp *mlxsw_sp,
2154			  struct mlxsw_sp_bridge_device *bridge_device,
2155			  struct mlxsw_sp_mdb_entry *mdb_entry, u16 local_port,
2156			  bool force)
2157{
2158	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2159
2160	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
2161	if (!mdb_entry_port)
2162		return;
2163
2164	/* Avoid a temporary situation in which the MDB entry points to an empty
2165	 * PGT entry, as otherwise packets will be temporarily dropped instead
2166	 * of being flooded. Instead, in this situation, call
2167	 * mlxsw_sp_mc_mdb_entry_fini(), which first deletes the MDB entry and
2168	 * then releases the PGT entry.
2169	 */
2170	if (mlxsw_sp_mc_mdb_entry_remove(mdb_entry, mdb_entry_port, force))
2171		mlxsw_sp_mc_mdb_entry_fini(mlxsw_sp, mdb_entry, bridge_device,
2172					   local_port, force);
2173	else
2174		mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port,
2175					    force);
2176}
2177
2178static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
2179				 const struct switchdev_obj_port_mdb *mdb)
2180{
2181	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2182	struct net_device *orig_dev = mdb->obj.orig_dev;
2183	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2184	struct mlxsw_sp_bridge_device *bridge_device;
2185	struct mlxsw_sp_bridge_port *bridge_port;
2186	struct mlxsw_sp_mdb_entry *mdb_entry;
2187	u16 fid_index;
2188
2189	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2190	if (!bridge_port)
2191		return 0;
2192
2193	bridge_device = bridge_port->bridge_device;
2194	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
2195							       bridge_device,
2196							       mdb->vid);
2197	if (!mlxsw_sp_port_vlan)
2198		return 0;
2199
2200	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
2201
2202	mdb_entry = mlxsw_sp_mc_mdb_entry_get(mlxsw_sp, bridge_device,
2203					      mdb->addr, fid_index,
2204					      mlxsw_sp_port->local_port);
2205	if (IS_ERR(mdb_entry))
2206		return PTR_ERR(mdb_entry);
2207
2208	return 0;
2209}
2210
2211static int
2212mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
2213				   struct mlxsw_sp_bridge_device *bridge_device,
2214				   bool mc_enabled)
2215{
2216	struct mlxsw_sp_mdb_entry *mdb_entry;
2217	int err;
2218
2219	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
2220		err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, mc_enabled);
2221		if (err)
2222			goto err_mdb_entry_write;
2223	}
2224	return 0;
2225
2226err_mdb_entry_write:
2227	list_for_each_entry_continue_reverse(mdb_entry,
2228					     &bridge_device->mdb_list, list)
2229		mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, !mc_enabled);
2230	return err;
2231}
2232
2233static void
2234mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
2235				 struct mlxsw_sp_bridge_port *bridge_port,
2236				 bool add)
2237{
2238	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2239	struct mlxsw_sp_bridge_device *bridge_device;
2240	u16 local_port = mlxsw_sp_port->local_port;
2241	struct mlxsw_sp_mdb_entry *mdb_entry;
2242
2243	bridge_device = bridge_port->bridge_device;
2244
2245	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
2246		if (add)
2247			mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
2248							    local_port);
2249		else
2250			mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
2251							    local_port);
2252	}
2253}
2254
2255static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
2256				 const struct switchdev_obj *obj,
2257				 struct netlink_ext_ack *extack)
2258{
2259	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2260	const struct switchdev_obj_port_vlan *vlan;
2261	int err = 0;
2262
2263	switch (obj->id) {
2264	case SWITCHDEV_OBJ_ID_PORT_VLAN:
2265		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
2266
2267		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
2268
2269		/* The event is emitted before the changes are actually
2270		 * applied to the bridge. Therefore schedule the respin
2271		 * call for later, so that the respin logic sees the
2272		 * updated bridge state.
2273		 */
2274		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2275		break;
2276	case SWITCHDEV_OBJ_ID_PORT_MDB:
2277		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
2278					    SWITCHDEV_OBJ_PORT_MDB(obj));
2279		break;
2280	default:
2281		err = -EOPNOTSUPP;
2282		break;
2283	}
2284
2285	return err;
2286}
2287
2288static void
2289mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
2290			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
2291{
2292	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
2293	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2294	u16 proto;
2295
2296	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2297	if (WARN_ON(!mlxsw_sp_port_vlan))
2298		return;
2299
2300	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2301	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
2302	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
2303	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
2304	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
2305}
2306
2307static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
2308				   const struct switchdev_obj_port_vlan *vlan)
2309{
2310	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2311	struct net_device *orig_dev = vlan->obj.orig_dev;
2312	struct mlxsw_sp_bridge_port *bridge_port;
2313
2314	if (netif_is_bridge_master(orig_dev))
2315		return -EOPNOTSUPP;
2316
2317	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2318	if (WARN_ON(!bridge_port))
2319		return -EINVAL;
2320
2321	if (!bridge_port->bridge_device->vlan_enabled)
2322		return 0;
2323
2324	mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
2325
2326	return 0;
2327}
2328
2329static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
2330				 const struct switchdev_obj_port_mdb *mdb)
2331{
2332	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2333	struct net_device *orig_dev = mdb->obj.orig_dev;
2334	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2335	struct mlxsw_sp_bridge_device *bridge_device;
2336	struct net_device *dev = mlxsw_sp_port->dev;
2337	struct mlxsw_sp_bridge_port *bridge_port;
2338	struct mlxsw_sp_mdb_entry_key key = {};
2339	struct mlxsw_sp_mdb_entry *mdb_entry;
2340	u16 fid_index;
2341
2342	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2343	if (!bridge_port)
2344		return 0;
2345
2346	bridge_device = bridge_port->bridge_device;
2347	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
2348							       bridge_device,
2349							       mdb->vid);
2350	if (!mlxsw_sp_port_vlan)
2351		return 0;
2352
2353	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
2354
2355	ether_addr_copy(key.addr, mdb->addr);
2356	key.fid = fid_index;
2357	mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
2358					   mlxsw_sp_mdb_ht_params);
2359	if (!mdb_entry) {
2360		netdev_err(dev, "Unable to remove port from MC DB\n");
2361		return -EINVAL;
2362	}
2363
2364	mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
2365				  mlxsw_sp_port->local_port, false);
2366	return 0;
2367}
2368
2369static void
2370mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
2371			       struct mlxsw_sp_bridge_port *bridge_port,
2372			       u16 fid_index)
2373{
2374	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2375	struct mlxsw_sp_bridge_device *bridge_device;
2376	struct mlxsw_sp_mdb_entry *mdb_entry, *tmp;
2377	u16 local_port = mlxsw_sp_port->local_port;
2378
2379	bridge_device = bridge_port->bridge_device;
2380
2381	list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list,
2382				 list) {
2383		if (mdb_entry->key.fid != fid_index)
2384			continue;
2385
2386		if (bridge_port->mrouter)
2387			mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp,
2388							    mdb_entry,
2389							    local_port);
2390
2391		mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
2392					  local_port, true);
2393	}
2394}
2395
2396static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
2397				 const struct switchdev_obj *obj)
2398{
2399	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2400	int err = 0;
2401
2402	switch (obj->id) {
2403	case SWITCHDEV_OBJ_ID_PORT_VLAN:
2404		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
2405					      SWITCHDEV_OBJ_PORT_VLAN(obj));
2406		break;
2407	case SWITCHDEV_OBJ_ID_PORT_MDB:
2408		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
2409					    SWITCHDEV_OBJ_PORT_MDB(obj));
2410		break;
2411	default:
2412		err = -EOPNOTSUPP;
2413		break;
2414	}
2415
2416	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2417
2418	return err;
2419}
2420
2421static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
2422						   u16 lag_id)
2423{
2424	struct mlxsw_sp_port *mlxsw_sp_port;
2425	u64 max_lag_members;
2426	int i;
2427
2428	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
2429					     MAX_LAG_MEMBERS);
2430	for (i = 0; i < max_lag_members; i++) {
2431		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2432		if (mlxsw_sp_port)
2433			return mlxsw_sp_port;
2434	}
2435	return NULL;
2436}
2437
2438static int
2439mlxsw_sp_bridge_port_replay(struct mlxsw_sp_bridge_port *bridge_port,
2440			    struct mlxsw_sp_port *mlxsw_sp_port,
2441			    struct netlink_ext_ack *extack)
2442{
2443	struct mlxsw_sp_bridge_port_replay_switchdev_objs rso = {
2444		.brport_dev = bridge_port->dev,
2445		.mlxsw_sp_port = mlxsw_sp_port,
2446	};
2447	struct notifier_block *nb;
2448	int err;
2449
2450	nb = &mlxsw_sp_bridge_port_replay_switchdev_objs_nb;
2451	err = switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev,
2452					   &rso, NULL, nb, extack);
2453	if (err)
2454		goto err_replay;
2455
2456	return 0;
2457
2458err_replay:
2459	nb = &mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb;
2460	switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev,
2461				     &rso, NULL, nb, extack);
2462	return err;
2463}
2464
2465static int
2466mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
2467				     struct mlxsw_sp_port *mlxsw_sp_port,
2468				     struct netlink_ext_ack *extack)
2469{
2470	if (is_vlan_dev(bridge_port->dev)) {
2471		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
2472		return -EINVAL;
2473	}
2474
2475	/* Port is no longer usable as a router interface */
2476	if (mlxsw_sp_port->default_vlan->fid)
2477		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
2478
2479	return mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack);
2480}
2481
2482static int
2483mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2484				struct mlxsw_sp_bridge_port *bridge_port,
2485				struct mlxsw_sp_port *mlxsw_sp_port,
2486				struct netlink_ext_ack *extack)
2487{
2488	return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2489						    extack);
2490}
2491
2492static void
2493mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2494{
2495	/* Make sure untagged frames are allowed to ingress */
2496	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
2497			       ETH_P_8021Q);
2498}
2499
2500static void
2501mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2502				 struct mlxsw_sp_bridge_port *bridge_port,
2503				 struct mlxsw_sp_port *mlxsw_sp_port)
2504{
2505	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2506}
2507
2508static int
2509mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2510				      const struct net_device *vxlan_dev,
2511				      u16 vid, u16 ethertype,
2512				      struct netlink_ext_ack *extack)
2513{
2514	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2515	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2516	struct mlxsw_sp_nve_params params = {
2517		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2518		.vni = vxlan->cfg.vni,
2519		.dev = vxlan_dev,
2520		.ethertype = ethertype,
2521	};
2522	struct mlxsw_sp_fid *fid;
2523	int err;
2524
2525	/* If the VLAN is 0, we need to find the VLAN that is configured as
2526	 * PVID and egress untagged on the bridge port of the VxLAN device.
2527	 * It is possible no such VLAN exists
2528	 */
2529	if (!vid) {
2530		err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2531		if (err || !vid)
2532			return err;
2533	}
2534
2535	fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2536	if (IS_ERR(fid)) {
2537		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
2538		return PTR_ERR(fid);
2539	}
2540
2541	if (mlxsw_sp_fid_vni_is_set(fid)) {
2542		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2543		err = -EINVAL;
2544		goto err_vni_exists;
2545	}
2546
2547	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2548	if (err)
2549		goto err_nve_fid_enable;
2550
2551	return 0;
2552
2553err_nve_fid_enable:
2554err_vni_exists:
2555	mlxsw_sp_fid_put(fid);
2556	return err;
2557}
2558
2559static int
2560mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2561				 const struct net_device *vxlan_dev, u16 vid,
2562				 struct netlink_ext_ack *extack)
2563{
2564	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2565						     vid, ETH_P_8021Q, extack);
2566}
2567
2568static struct net_device *
2569mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2570{
2571	struct net_device *dev;
2572	struct list_head *iter;
2573
2574	netdev_for_each_lower_dev(br_dev, dev, iter) {
2575		u16 pvid;
2576		int err;
2577
2578		if (!netif_is_vxlan(dev))
2579			continue;
2580
2581		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2582		if (err || pvid != vid)
2583			continue;
2584
2585		return dev;
2586	}
2587
2588	return NULL;
2589}
2590
2591static struct mlxsw_sp_fid *
2592mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2593			      u16 vid, struct netlink_ext_ack *extack)
2594{
2595	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2596
2597	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2598}
2599
2600static struct mlxsw_sp_fid *
2601mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2602				 u16 vid)
2603{
2604	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2605
2606	return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2607}
2608
2609static u16
2610mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2611			      const struct mlxsw_sp_fid *fid)
2612{
2613	return mlxsw_sp_fid_8021q_vid(fid);
2614}
2615
2616static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2617	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2618	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2619	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2620	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2621	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2622	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2623};
2624
2625static bool
2626mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2627			   const struct net_device *br_dev)
2628{
2629	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2630
2631	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2632			    list) {
2633		if (mlxsw_sp_port_vlan->bridge_port &&
2634		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2635		    br_dev)
2636			return true;
2637	}
2638
2639	return false;
2640}
2641
2642static int
2643mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2644				struct mlxsw_sp_bridge_port *bridge_port,
2645				struct mlxsw_sp_port *mlxsw_sp_port,
2646				struct netlink_ext_ack *extack)
2647{
2648	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2649	struct net_device *dev = bridge_port->dev;
2650	u16 vid;
2651	int err;
2652
2653	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2654	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2655	if (WARN_ON(!mlxsw_sp_port_vlan))
2656		return -EINVAL;
2657
2658	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2659		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2660		return -EINVAL;
2661	}
2662
2663	/* Port is no longer usable as a router interface */
2664	if (mlxsw_sp_port_vlan->fid)
2665		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2666
2667	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2668					     extack);
2669	if (err)
2670		return err;
2671
2672	err = mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack);
2673	if (err)
2674		goto err_replay;
2675
2676	return 0;
2677
2678err_replay:
2679	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2680	return err;
2681}
2682
2683static void
2684mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2685				 struct mlxsw_sp_bridge_port *bridge_port,
2686				 struct mlxsw_sp_port *mlxsw_sp_port)
2687{
2688	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2689	struct net_device *dev = bridge_port->dev;
2690	u16 vid;
2691
2692	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2693	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2694	if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2695		return;
2696
2697	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2698}
2699
2700static int
2701mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2702				 const struct net_device *vxlan_dev, u16 vid,
2703				 struct netlink_ext_ack *extack)
2704{
2705	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2706	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2707	struct mlxsw_sp_nve_params params = {
2708		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2709		.vni = vxlan->cfg.vni,
2710		.dev = vxlan_dev,
2711		.ethertype = ETH_P_8021Q,
2712	};
2713	struct mlxsw_sp_fid *fid;
2714	int err;
2715
2716	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2717	if (IS_ERR(fid)) {
2718		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
2719		return -EINVAL;
2720	}
2721
2722	if (mlxsw_sp_fid_vni_is_set(fid)) {
2723		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2724		err = -EINVAL;
2725		goto err_vni_exists;
2726	}
2727
2728	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2729	if (err)
2730		goto err_nve_fid_enable;
2731
2732	return 0;
2733
2734err_nve_fid_enable:
2735err_vni_exists:
2736	mlxsw_sp_fid_put(fid);
2737	return err;
2738}
2739
2740static struct mlxsw_sp_fid *
2741mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2742			      u16 vid, struct netlink_ext_ack *extack)
2743{
2744	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2745
2746	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2747}
2748
2749static struct mlxsw_sp_fid *
2750mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2751				 u16 vid)
2752{
2753	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2754
2755	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2756	if (vid)
2757		return NULL;
2758
2759	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2760}
2761
2762static u16
2763mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2764			      const struct mlxsw_sp_fid *fid)
2765{
2766	return 0;
2767}
2768
2769static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2770	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2771	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2772	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2773	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2774	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2775	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2776};
2777
2778static int
2779mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2780				 struct mlxsw_sp_bridge_port *bridge_port,
2781				 struct mlxsw_sp_port *mlxsw_sp_port,
2782				 struct netlink_ext_ack *extack)
2783{
2784	int err;
2785
2786	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
2787	if (err)
2788		return err;
2789
2790	err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2791						   extack);
2792	if (err)
2793		goto err_bridge_vlan_aware_port_join;
2794
2795	return 0;
2796
2797err_bridge_vlan_aware_port_join:
2798	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2799	return err;
2800}
2801
2802static void
2803mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2804				  struct mlxsw_sp_bridge_port *bridge_port,
2805				  struct mlxsw_sp_port *mlxsw_sp_port)
2806{
2807	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2808	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2809}
2810
2811static int
2812mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2813				  const struct net_device *vxlan_dev, u16 vid,
2814				  struct netlink_ext_ack *extack)
2815{
2816	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2817						     vid, ETH_P_8021AD, extack);
2818}
2819
2820static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = {
2821	.port_join	= mlxsw_sp_bridge_8021ad_port_join,
2822	.port_leave	= mlxsw_sp_bridge_8021ad_port_leave,
2823	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2824	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2825	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2826	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2827};
2828
2829static int
2830mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2831				  struct mlxsw_sp_bridge_port *bridge_port,
2832				  struct mlxsw_sp_port *mlxsw_sp_port,
2833				  struct netlink_ext_ack *extack)
2834{
2835	int err;
2836
2837	/* The EtherType of decapsulated packets is determined at the egress
2838	 * port to allow 802.1d and 802.1ad bridges with VXLAN devices to
2839	 * co-exist.
2840	 */
2841	err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD);
2842	if (err)
2843		return err;
2844
2845	err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port,
2846					       mlxsw_sp_port, extack);
2847	if (err)
2848		goto err_bridge_8021ad_port_join;
2849
2850	return 0;
2851
2852err_bridge_8021ad_port_join:
2853	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2854	return err;
2855}
2856
2857static void
2858mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2859				   struct mlxsw_sp_bridge_port *bridge_port,
2860				   struct mlxsw_sp_port *mlxsw_sp_port)
2861{
2862	mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port,
2863					  mlxsw_sp_port);
2864	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2865}
2866
2867static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = {
2868	.port_join	= mlxsw_sp2_bridge_8021ad_port_join,
2869	.port_leave	= mlxsw_sp2_bridge_8021ad_port_leave,
2870	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2871	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2872	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2873	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2874};
2875
2876int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2877			      struct net_device *brport_dev,
2878			      struct net_device *br_dev,
2879			      struct netlink_ext_ack *extack)
2880{
2881	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2882	struct mlxsw_sp_bridge_device *bridge_device;
2883	struct mlxsw_sp_bridge_port *bridge_port;
2884	int err;
2885
2886	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
2887					       extack);
2888	if (IS_ERR(bridge_port))
2889		return PTR_ERR(bridge_port);
2890	bridge_device = bridge_port->bridge_device;
2891
2892	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2893					    mlxsw_sp_port, extack);
2894	if (err)
2895		goto err_port_join;
2896
2897	err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, br_dev, extack);
2898	if (err)
2899		goto err_replay;
2900
2901	return 0;
2902
2903err_replay:
2904	bridge_device->ops->port_leave(bridge_device, bridge_port,
2905				       mlxsw_sp_port);
2906err_port_join:
2907	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2908	return err;
2909}
2910
2911void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2912				struct net_device *brport_dev,
2913				struct net_device *br_dev)
2914{
2915	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2916	struct mlxsw_sp_bridge_device *bridge_device;
2917	struct mlxsw_sp_bridge_port *bridge_port;
2918
2919	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2920	if (!bridge_device)
2921		return;
2922	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2923	if (!bridge_port)
2924		return;
2925
2926	bridge_device->ops->port_leave(bridge_device, bridge_port,
2927				       mlxsw_sp_port);
2928	mlxsw_sp_port_security_set(mlxsw_sp_port, false);
2929	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2930}
2931
2932int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2933			       const struct net_device *br_dev,
2934			       const struct net_device *vxlan_dev, u16 vid,
2935			       struct netlink_ext_ack *extack)
2936{
2937	struct mlxsw_sp_bridge_device *bridge_device;
2938
2939	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2940	if (WARN_ON(!bridge_device))
2941		return -EINVAL;
2942
2943	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2944					      extack);
2945}
2946
2947void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2948				 const struct net_device *vxlan_dev)
2949{
2950	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2951	struct mlxsw_sp_fid *fid;
2952
2953	/* If the VxLAN device is down, then the FID does not have a VNI */
2954	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2955	if (!fid)
2956		return;
2957
2958	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2959	/* Drop both the reference we just took during lookup and the reference
2960	 * the VXLAN device took.
2961	 */
2962	mlxsw_sp_fid_put(fid);
2963	mlxsw_sp_fid_put(fid);
2964}
2965
2966static void
2967mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2968				      enum mlxsw_sp_l3proto *proto,
2969				      union mlxsw_sp_l3addr *addr)
2970{
2971	if (vxlan_addr->sa.sa_family == AF_INET) {
2972		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2973		*proto = MLXSW_SP_L3_PROTO_IPV4;
2974	} else {
2975		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2976		*proto = MLXSW_SP_L3_PROTO_IPV6;
2977	}
2978}
2979
2980static void
2981mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2982				      const union mlxsw_sp_l3addr *addr,
2983				      union vxlan_addr *vxlan_addr)
2984{
2985	switch (proto) {
2986	case MLXSW_SP_L3_PROTO_IPV4:
2987		vxlan_addr->sa.sa_family = AF_INET;
2988		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2989		break;
2990	case MLXSW_SP_L3_PROTO_IPV6:
2991		vxlan_addr->sa.sa_family = AF_INET6;
2992		vxlan_addr->sin6.sin6_addr = addr->addr6;
2993		break;
2994	}
2995}
2996
2997static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2998					      const char *mac,
2999					      enum mlxsw_sp_l3proto proto,
3000					      union mlxsw_sp_l3addr *addr,
3001					      __be32 vni, bool adding)
3002{
3003	struct switchdev_notifier_vxlan_fdb_info info;
3004	struct vxlan_dev *vxlan = netdev_priv(dev);
3005	enum switchdev_notifier_type type;
3006
3007	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
3008			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
3009	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
3010	info.remote_port = vxlan->cfg.dst_port;
3011	info.remote_vni = vni;
3012	info.remote_ifindex = 0;
3013	ether_addr_copy(info.eth_addr, mac);
3014	info.vni = vni;
3015	info.offloaded = adding;
3016	call_switchdev_notifiers(type, dev, &info.info, NULL);
3017}
3018
3019static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
3020					    const char *mac,
3021					    enum mlxsw_sp_l3proto proto,
3022					    union mlxsw_sp_l3addr *addr,
3023					    __be32 vni,
3024					    bool adding)
3025{
3026	if (netif_is_vxlan(dev))
3027		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
3028						  adding);
3029}
3030
3031static void
3032mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
3033			    const char *mac, u16 vid,
3034			    struct net_device *dev, bool offloaded, bool locked)
3035{
3036	struct switchdev_notifier_fdb_info info = {};
3037
3038	info.addr = mac;
3039	info.vid = vid;
3040	info.offloaded = offloaded;
3041	info.locked = locked;
3042	call_switchdev_notifiers(type, dev, &info.info, NULL);
3043}
3044
3045static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
3046					    char *sfn_pl, int rec_index,
3047					    bool adding)
3048{
3049	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3050	struct mlxsw_sp_bridge_device *bridge_device;
3051	struct mlxsw_sp_bridge_port *bridge_port;
3052	struct mlxsw_sp_port *mlxsw_sp_port;
3053	u16 local_port, vid, fid, evid = 0;
3054	enum switchdev_notifier_type type;
3055	char mac[ETH_ALEN];
3056	bool do_notification = true;
3057	int err;
3058
3059	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
3060
3061	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
3062		return;
3063	mlxsw_sp_port = mlxsw_sp->ports[local_port];
3064	if (!mlxsw_sp_port) {
3065		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
3066		goto just_remove;
3067	}
3068
3069	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
3070	if (!mlxsw_sp_port_vlan) {
3071		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
3072		goto just_remove;
3073	}
3074
3075	bridge_port = mlxsw_sp_port_vlan->bridge_port;
3076	if (!bridge_port) {
3077		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
3078		goto just_remove;
3079	}
3080
3081	bridge_device = bridge_port->bridge_device;
3082	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
3083	evid = mlxsw_sp_port_vlan->vid;
3084
3085	if (adding && mlxsw_sp_port->security) {
3086		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, mac,
3087					    vid, bridge_port->dev, false, true);
3088		return;
3089	}
3090
3091do_fdb_op:
3092	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
3093				      adding, true);
3094	if (err) {
3095		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
3096		return;
3097	}
3098
3099	if (!do_notification)
3100		return;
3101	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
3102	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding,
3103				    false);
3104
3105	return;
3106
3107just_remove:
3108	adding = false;
3109	do_notification = false;
3110	goto do_fdb_op;
3111}
3112
3113static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
3114						char *sfn_pl, int rec_index,
3115						bool adding)
3116{
3117	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3118	struct mlxsw_sp_bridge_device *bridge_device;
3119	struct mlxsw_sp_bridge_port *bridge_port;
3120	struct mlxsw_sp_port *mlxsw_sp_port;
3121	enum switchdev_notifier_type type;
3122	char mac[ETH_ALEN];
3123	u16 lag_vid = 0;
3124	u16 lag_id;
3125	u16 vid, fid;
3126	bool do_notification = true;
3127	int err;
3128
3129	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
3130	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
3131	if (!mlxsw_sp_port) {
3132		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
3133		goto just_remove;
3134	}
3135
3136	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
3137	if (!mlxsw_sp_port_vlan) {
3138		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
3139		goto just_remove;
3140	}
3141
3142	bridge_port = mlxsw_sp_port_vlan->bridge_port;
3143	if (!bridge_port) {
3144		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
3145		goto just_remove;
3146	}
3147
3148	bridge_device = bridge_port->bridge_device;
3149	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
3150	lag_vid = mlxsw_sp_port_vlan->vid;
3151
3152	if (adding && mlxsw_sp_port->security) {
3153		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, mac,
3154					    vid, bridge_port->dev, false, true);
3155		return;
3156	}
3157
3158do_fdb_op:
3159	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
3160					  adding, true);
3161	if (err) {
3162		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
3163		return;
3164	}
3165
3166	if (!do_notification)
3167		return;
3168	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
3169	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding,
3170				    false);
3171
3172	return;
3173
3174just_remove:
3175	adding = false;
3176	do_notification = false;
3177	goto do_fdb_op;
3178}
3179
3180static int
3181__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
3182					    const struct mlxsw_sp_fid *fid,
3183					    bool adding,
3184					    struct net_device **nve_dev,
3185					    u16 *p_vid, __be32 *p_vni)
3186{
3187	struct mlxsw_sp_bridge_device *bridge_device;
3188	struct net_device *br_dev, *dev;
3189	int nve_ifindex;
3190	int err;
3191
3192	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
3193	if (err)
3194		return err;
3195
3196	err = mlxsw_sp_fid_vni(fid, p_vni);
3197	if (err)
3198		return err;
3199
3200	dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
3201	if (!dev)
3202		return -EINVAL;
3203	*nve_dev = dev;
3204
3205	if (!netif_running(dev))
3206		return -EINVAL;
3207
3208	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
3209		return -EINVAL;
3210
3211	if (adding && netif_is_vxlan(dev)) {
3212		struct vxlan_dev *vxlan = netdev_priv(dev);
3213
3214		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
3215			return -EINVAL;
3216	}
3217
3218	br_dev = netdev_master_upper_dev_get(dev);
3219	if (!br_dev)
3220		return -EINVAL;
3221
3222	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3223	if (!bridge_device)
3224		return -EINVAL;
3225
3226	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
3227
3228	return 0;
3229}
3230
3231static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
3232						      char *sfn_pl,
3233						      int rec_index,
3234						      bool adding)
3235{
3236	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
3237	enum switchdev_notifier_type type;
3238	struct net_device *nve_dev;
3239	union mlxsw_sp_l3addr addr;
3240	struct mlxsw_sp_fid *fid;
3241	char mac[ETH_ALEN];
3242	u16 fid_index, vid;
3243	__be32 vni;
3244	u32 uip;
3245	int err;
3246
3247	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
3248				       &uip, &sfn_proto);
3249
3250	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
3251	if (!fid)
3252		goto err_fid_lookup;
3253
3254	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
3255					      (enum mlxsw_sp_l3proto) sfn_proto,
3256					      &addr);
3257	if (err)
3258		goto err_ip_resolve;
3259
3260	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
3261							  &nve_dev, &vid, &vni);
3262	if (err)
3263		goto err_fdb_process;
3264
3265	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
3266					     (enum mlxsw_sp_l3proto) sfn_proto,
3267					     &addr, adding, true);
3268	if (err)
3269		goto err_fdb_op;
3270
3271	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
3272					(enum mlxsw_sp_l3proto) sfn_proto,
3273					&addr, vni, adding);
3274
3275	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
3276			SWITCHDEV_FDB_DEL_TO_BRIDGE;
3277	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding, false);
3278
3279	mlxsw_sp_fid_put(fid);
3280
3281	return;
3282
3283err_fdb_op:
3284err_fdb_process:
3285err_ip_resolve:
3286	mlxsw_sp_fid_put(fid);
3287err_fid_lookup:
3288	/* Remove an FDB entry in case we cannot process it. Otherwise the
3289	 * device will keep sending the same notification over and over again.
3290	 */
3291	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
3292				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
3293				       false, true);
3294}
3295
3296static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
3297					    char *sfn_pl, int rec_index)
3298{
3299	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
3300	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
3301		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
3302						rec_index, true);
3303		break;
3304	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
3305		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
3306						rec_index, false);
3307		break;
3308	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
3309		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
3310						    rec_index, true);
3311		break;
3312	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
3313		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
3314						    rec_index, false);
3315		break;
3316	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
3317		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
3318							  rec_index, true);
3319		break;
3320	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
3321		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
3322							  rec_index, false);
3323		break;
3324	}
3325}
3326
3327#define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
3328
3329static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
3330{
3331	struct mlxsw_sp_bridge *bridge;
3332	struct mlxsw_sp *mlxsw_sp;
3333	bool reschedule = false;
3334	char *sfn_pl;
3335	int queries;
3336	u8 num_rec;
3337	int i;
3338	int err;
3339
3340	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
3341	if (!sfn_pl)
3342		return;
3343
3344	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
3345	mlxsw_sp = bridge->mlxsw_sp;
3346
3347	rtnl_lock();
3348	if (list_empty(&bridge->bridges_list))
3349		goto out;
3350	reschedule = true;
3351	queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
3352	while (queries > 0) {
3353		mlxsw_reg_sfn_pack(sfn_pl);
3354		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
3355		if (err) {
3356			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
3357			goto out;
3358		}
3359		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
3360		for (i = 0; i < num_rec; i++)
3361			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
3362		if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
3363			goto out;
3364		queries--;
3365	}
3366
3367out:
3368	rtnl_unlock();
3369	kfree(sfn_pl);
3370	if (!reschedule)
3371		return;
3372	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
3373}
3374
3375struct mlxsw_sp_switchdev_event_work {
3376	struct work_struct work;
3377	netdevice_tracker dev_tracker;
3378	union {
3379		struct switchdev_notifier_fdb_info fdb_info;
3380		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
3381	};
3382	struct net_device *dev;
3383	unsigned long event;
3384};
3385
3386static void
3387mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
3388					  struct mlxsw_sp_switchdev_event_work *
3389					  switchdev_work,
3390					  struct mlxsw_sp_fid *fid, __be32 vni)
3391{
3392	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
3393	struct switchdev_notifier_fdb_info *fdb_info;
3394	struct net_device *dev = switchdev_work->dev;
3395	enum mlxsw_sp_l3proto proto;
3396	union mlxsw_sp_l3addr addr;
3397	int err;
3398
3399	fdb_info = &switchdev_work->fdb_info;
3400	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
3401	if (err)
3402		return;
3403
3404	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
3405					      &proto, &addr);
3406
3407	switch (switchdev_work->event) {
3408	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3409		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
3410						     vxlan_fdb_info.eth_addr,
3411						     mlxsw_sp_fid_index(fid),
3412						     proto, &addr, true, false);
3413		if (err)
3414			return;
3415		vxlan_fdb_info.offloaded = true;
3416		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3417					 &vxlan_fdb_info.info, NULL);
3418		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3419					    vxlan_fdb_info.eth_addr,
3420					    fdb_info->vid, dev, true, false);
3421		break;
3422	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3423		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
3424						     vxlan_fdb_info.eth_addr,
3425						     mlxsw_sp_fid_index(fid),
3426						     proto, &addr, false,
3427						     false);
3428		vxlan_fdb_info.offloaded = false;
3429		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3430					 &vxlan_fdb_info.info, NULL);
3431		break;
3432	}
3433}
3434
3435static void
3436mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
3437					switchdev_work)
3438{
3439	struct mlxsw_sp_bridge_device *bridge_device;
3440	struct net_device *dev = switchdev_work->dev;
3441	struct net_device *br_dev;
3442	struct mlxsw_sp *mlxsw_sp;
3443	struct mlxsw_sp_fid *fid;
3444	__be32 vni;
3445	int err;
3446
3447	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
3448	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
3449		return;
3450
3451	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
3452	    (!switchdev_work->fdb_info.added_by_user ||
3453	     switchdev_work->fdb_info.is_local))
3454		return;
3455
3456	if (!netif_running(dev))
3457		return;
3458	br_dev = netdev_master_upper_dev_get(dev);
3459	if (!br_dev)
3460		return;
3461	if (!netif_is_bridge_master(br_dev))
3462		return;
3463	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3464	if (!mlxsw_sp)
3465		return;
3466	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3467	if (!bridge_device)
3468		return;
3469
3470	fid = bridge_device->ops->fid_lookup(bridge_device,
3471					     switchdev_work->fdb_info.vid);
3472	if (!fid)
3473		return;
3474
3475	err = mlxsw_sp_fid_vni(fid, &vni);
3476	if (err)
3477		goto out;
3478
3479	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
3480						  vni);
3481
3482out:
3483	mlxsw_sp_fid_put(fid);
3484}
3485
3486static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
3487{
3488	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3489		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3490	struct net_device *dev = switchdev_work->dev;
3491	struct switchdev_notifier_fdb_info *fdb_info;
3492	struct mlxsw_sp_port *mlxsw_sp_port;
3493	int err;
3494
3495	rtnl_lock();
3496	if (netif_is_vxlan(dev)) {
3497		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
3498		goto out;
3499	}
3500
3501	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3502	if (!mlxsw_sp_port)
3503		goto out;
3504
3505	switch (switchdev_work->event) {
3506	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3507		fdb_info = &switchdev_work->fdb_info;
3508		if (!fdb_info->added_by_user || fdb_info->is_local)
3509			break;
3510		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
3511		if (err)
3512			break;
3513		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3514					    fdb_info->addr,
3515					    fdb_info->vid, dev, true, false);
3516		break;
3517	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3518		fdb_info = &switchdev_work->fdb_info;
3519		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
3520		break;
3521	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3522	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3523		/* These events are only used to potentially update an existing
3524		 * SPAN mirror.
3525		 */
3526		break;
3527	}
3528
3529	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
3530
3531out:
3532	rtnl_unlock();
3533	kfree(switchdev_work->fdb_info.addr);
3534	netdev_put(dev, &switchdev_work->dev_tracker);
3535	kfree(switchdev_work);
3536}
3537
3538static void
3539mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
3540				 struct mlxsw_sp_switchdev_event_work *
3541				 switchdev_work)
3542{
3543	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3544	struct mlxsw_sp_bridge_device *bridge_device;
3545	struct net_device *dev = switchdev_work->dev;
3546	enum mlxsw_sp_l3proto proto;
3547	union mlxsw_sp_l3addr addr;
3548	struct net_device *br_dev;
3549	struct mlxsw_sp_fid *fid;
3550	u16 vid;
3551	int err;
3552
3553	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3554	br_dev = netdev_master_upper_dev_get(dev);
3555
3556	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3557	if (!bridge_device)
3558		return;
3559
3560	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3561	if (!fid)
3562		return;
3563
3564	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3565					      &proto, &addr);
3566
3567	if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) {
3568		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
3569		if (err) {
3570			mlxsw_sp_fid_put(fid);
3571			return;
3572		}
3573		vxlan_fdb_info->offloaded = true;
3574		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3575					 &vxlan_fdb_info->info, NULL);
3576		mlxsw_sp_fid_put(fid);
3577		return;
3578	}
3579
3580	/* The device has a single FDB table, whereas Linux has two - one
3581	 * in the bridge driver and another in the VxLAN driver. We only
3582	 * program an entry to the device if the MAC points to the VxLAN
3583	 * device in the bridge's FDB table
3584	 */
3585	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3586	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
3587		goto err_br_fdb_find;
3588
3589	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3590					     mlxsw_sp_fid_index(fid), proto,
3591					     &addr, true, false);
3592	if (err)
3593		goto err_fdb_tunnel_uc_op;
3594	vxlan_fdb_info->offloaded = true;
3595	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3596				 &vxlan_fdb_info->info, NULL);
3597	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3598				    vxlan_fdb_info->eth_addr, vid, dev, true,
3599				    false);
3600
3601	mlxsw_sp_fid_put(fid);
3602
3603	return;
3604
3605err_fdb_tunnel_uc_op:
3606err_br_fdb_find:
3607	mlxsw_sp_fid_put(fid);
3608}
3609
3610static void
3611mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3612				 struct mlxsw_sp_switchdev_event_work *
3613				 switchdev_work)
3614{
3615	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3616	struct mlxsw_sp_bridge_device *bridge_device;
3617	struct net_device *dev = switchdev_work->dev;
3618	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3619	enum mlxsw_sp_l3proto proto;
3620	union mlxsw_sp_l3addr addr;
3621	struct mlxsw_sp_fid *fid;
3622	u16 vid;
3623
3624	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3625	if (!vxlan_fdb_info->offloaded)
3626		return;
3627
3628	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3629	if (!bridge_device)
3630		return;
3631
3632	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3633	if (!fid)
3634		return;
3635
3636	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3637					      &proto, &addr);
3638
3639	if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) {
3640		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3641		mlxsw_sp_fid_put(fid);
3642		return;
3643	}
3644
3645	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3646				       mlxsw_sp_fid_index(fid), proto, &addr,
3647				       false, false);
3648	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3649	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3650				    vxlan_fdb_info->eth_addr, vid, dev, false,
3651				    false);
3652
3653	mlxsw_sp_fid_put(fid);
3654}
3655
3656static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3657{
3658	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3659		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3660	struct net_device *dev = switchdev_work->dev;
3661	struct mlxsw_sp *mlxsw_sp;
3662	struct net_device *br_dev;
3663
3664	rtnl_lock();
3665
3666	if (!netif_running(dev))
3667		goto out;
3668	br_dev = netdev_master_upper_dev_get(dev);
3669	if (!br_dev)
3670		goto out;
3671	if (!netif_is_bridge_master(br_dev))
3672		goto out;
3673	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3674	if (!mlxsw_sp)
3675		goto out;
3676
3677	switch (switchdev_work->event) {
3678	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3679		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3680		break;
3681	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3682		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3683		break;
3684	}
3685
3686out:
3687	rtnl_unlock();
3688	netdev_put(dev, &switchdev_work->dev_tracker);
3689	kfree(switchdev_work);
3690}
3691
3692static int
3693mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3694				      switchdev_work,
3695				      struct switchdev_notifier_info *info)
3696{
3697	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3698	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3699	struct vxlan_config *cfg = &vxlan->cfg;
3700	struct netlink_ext_ack *extack;
3701
3702	extack = switchdev_notifier_info_to_extack(info);
3703	vxlan_fdb_info = container_of(info,
3704				      struct switchdev_notifier_vxlan_fdb_info,
3705				      info);
3706
3707	if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3708		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3709		return -EOPNOTSUPP;
3710	}
3711	if (vxlan_fdb_info->remote_vni != cfg->vni ||
3712	    vxlan_fdb_info->vni != cfg->vni) {
3713		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3714		return -EOPNOTSUPP;
3715	}
3716	if (vxlan_fdb_info->remote_ifindex) {
3717		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3718		return -EOPNOTSUPP;
3719	}
3720	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3721		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3722		return -EOPNOTSUPP;
3723	}
3724	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3725		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3726		return -EOPNOTSUPP;
3727	}
3728
3729	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3730
3731	return 0;
3732}
3733
3734/* Called under rcu_read_lock() */
3735static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3736				    unsigned long event, void *ptr)
3737{
3738	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3739	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3740	struct switchdev_notifier_fdb_info *fdb_info;
3741	struct switchdev_notifier_info *info = ptr;
3742	struct net_device *br_dev;
3743	int err;
3744
3745	if (event == SWITCHDEV_PORT_ATTR_SET) {
3746		err = switchdev_handle_port_attr_set(dev, ptr,
3747						     mlxsw_sp_port_dev_check,
3748						     mlxsw_sp_port_attr_set);
3749		return notifier_from_errno(err);
3750	}
3751
3752	/* Tunnel devices are not our uppers, so check their master instead */
3753	br_dev = netdev_master_upper_dev_get_rcu(dev);
3754	if (!br_dev)
3755		return NOTIFY_DONE;
3756	if (!netif_is_bridge_master(br_dev))
3757		return NOTIFY_DONE;
3758	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3759		return NOTIFY_DONE;
3760
3761	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3762	if (!switchdev_work)
3763		return NOTIFY_BAD;
3764
3765	switchdev_work->dev = dev;
3766	switchdev_work->event = event;
3767
3768	switch (event) {
3769	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3770	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3771	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3772	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3773		fdb_info = container_of(info,
3774					struct switchdev_notifier_fdb_info,
3775					info);
3776		INIT_WORK(&switchdev_work->work,
3777			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3778		memcpy(&switchdev_work->fdb_info, ptr,
3779		       sizeof(switchdev_work->fdb_info));
3780		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3781		if (!switchdev_work->fdb_info.addr)
3782			goto err_addr_alloc;
3783		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3784				fdb_info->addr);
3785		/* Take a reference on the device. This can be either
3786		 * upper device containig mlxsw_sp_port or just a
3787		 * mlxsw_sp_port
3788		 */
3789		netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC);
3790		break;
3791	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3792	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3793		INIT_WORK(&switchdev_work->work,
3794			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3795		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3796							    info);
3797		if (err)
3798			goto err_vxlan_work_prepare;
3799		netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC);
3800		break;
3801	default:
3802		kfree(switchdev_work);
3803		return NOTIFY_DONE;
3804	}
3805
3806	mlxsw_core_schedule_work(&switchdev_work->work);
3807
3808	return NOTIFY_DONE;
3809
3810err_vxlan_work_prepare:
3811err_addr_alloc:
3812	kfree(switchdev_work);
3813	return NOTIFY_BAD;
3814}
3815
3816struct notifier_block mlxsw_sp_switchdev_notifier = {
3817	.notifier_call = mlxsw_sp_switchdev_event,
3818};
3819
3820static int
3821mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3822				  struct mlxsw_sp_bridge_device *bridge_device,
3823				  const struct net_device *vxlan_dev, u16 vid,
3824				  bool flag_untagged, bool flag_pvid,
3825				  struct netlink_ext_ack *extack)
3826{
3827	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3828	__be32 vni = vxlan->cfg.vni;
3829	struct mlxsw_sp_fid *fid;
3830	u16 old_vid;
3831	int err;
3832
3833	/* We cannot have the same VLAN as PVID and egress untagged on multiple
3834	 * VxLAN devices. Note that we get this notification before the VLAN is
3835	 * actually added to the bridge's database, so it is not possible for
3836	 * the lookup function to return 'vxlan_dev'
3837	 */
3838	if (flag_untagged && flag_pvid &&
3839	    mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3840		NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3841		return -EINVAL;
3842	}
3843
3844	if (!netif_running(vxlan_dev))
3845		return 0;
3846
3847	/* First case: FID is not associated with this VNI, but the new VLAN
3848	 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3849	 * it exists
3850	 */
3851	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3852	if (!fid) {
3853		if (!flag_untagged || !flag_pvid)
3854			return 0;
3855		return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
3856						      vid, extack);
3857	}
3858
3859	/* Second case: FID is associated with the VNI and the VLAN associated
3860	 * with the FID is the same as the notified VLAN. This means the flags
3861	 * (PVID / egress untagged) were toggled and that NVE should be
3862	 * disabled on the FID
3863	 */
3864	old_vid = mlxsw_sp_fid_8021q_vid(fid);
3865	if (vid == old_vid) {
3866		if (WARN_ON(flag_untagged && flag_pvid)) {
3867			mlxsw_sp_fid_put(fid);
3868			return -EINVAL;
3869		}
3870		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3871		mlxsw_sp_fid_put(fid);
3872		return 0;
3873	}
3874
3875	/* Third case: A new VLAN was configured on the VxLAN device, but this
3876	 * VLAN is not PVID, so there is nothing to do.
3877	 */
3878	if (!flag_pvid) {
3879		mlxsw_sp_fid_put(fid);
3880		return 0;
3881	}
3882
3883	/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3884	 * mapped to the VNI should be unmapped
3885	 */
3886	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3887	mlxsw_sp_fid_put(fid);
3888
3889	/* Fifth case: The new VLAN is also egress untagged, which means the
3890	 * VLAN needs to be mapped to the VNI
3891	 */
3892	if (!flag_untagged)
3893		return 0;
3894
3895	err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
3896	if (err)
3897		goto err_vxlan_join;
3898
3899	return 0;
3900
3901err_vxlan_join:
3902	bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
3903	return err;
3904}
3905
3906static void
3907mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3908				  struct mlxsw_sp_bridge_device *bridge_device,
3909				  const struct net_device *vxlan_dev, u16 vid)
3910{
3911	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3912	__be32 vni = vxlan->cfg.vni;
3913	struct mlxsw_sp_fid *fid;
3914
3915	if (!netif_running(vxlan_dev))
3916		return;
3917
3918	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3919	if (!fid)
3920		return;
3921
3922	/* A different VLAN than the one mapped to the VNI is deleted */
3923	if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3924		goto out;
3925
3926	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3927
3928out:
3929	mlxsw_sp_fid_put(fid);
3930}
3931
3932static int
3933mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3934				   struct switchdev_notifier_port_obj_info *
3935				   port_obj_info)
3936{
3937	struct switchdev_obj_port_vlan *vlan =
3938		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3939	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3940	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3941	struct mlxsw_sp_bridge_device *bridge_device;
3942	struct netlink_ext_ack *extack;
3943	struct mlxsw_sp *mlxsw_sp;
3944	struct net_device *br_dev;
3945
3946	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3947	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3948	if (!br_dev)
3949		return 0;
3950
3951	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3952	if (!mlxsw_sp)
3953		return 0;
3954
3955	port_obj_info->handled = true;
3956
3957	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3958	if (!bridge_device)
3959		return -EINVAL;
3960
3961	if (!bridge_device->vlan_enabled)
3962		return 0;
3963
3964	return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3965						 vxlan_dev, vlan->vid,
3966						 flag_untagged,
3967						 flag_pvid, extack);
3968}
3969
3970static void
3971mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3972				   struct switchdev_notifier_port_obj_info *
3973				   port_obj_info)
3974{
3975	struct switchdev_obj_port_vlan *vlan =
3976		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3977	struct mlxsw_sp_bridge_device *bridge_device;
3978	struct mlxsw_sp *mlxsw_sp;
3979	struct net_device *br_dev;
3980
3981	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3982	if (!br_dev)
3983		return;
3984
3985	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3986	if (!mlxsw_sp)
3987		return;
3988
3989	port_obj_info->handled = true;
3990
3991	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3992	if (!bridge_device)
3993		return;
3994
3995	if (!bridge_device->vlan_enabled)
3996		return;
3997
3998	mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
3999					  vlan->vid);
4000}
4001
4002static int
4003mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
4004					struct switchdev_notifier_port_obj_info *
4005					port_obj_info)
4006{
4007	int err = 0;
4008
4009	switch (port_obj_info->obj->id) {
4010	case SWITCHDEV_OBJ_ID_PORT_VLAN:
4011		err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
4012							 port_obj_info);
4013		break;
4014	default:
4015		break;
4016	}
4017
4018	return err;
4019}
4020
4021static void
4022mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
4023					struct switchdev_notifier_port_obj_info *
4024					port_obj_info)
4025{
4026	switch (port_obj_info->obj->id) {
4027	case SWITCHDEV_OBJ_ID_PORT_VLAN:
4028		mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
4029		break;
4030	default:
4031		break;
4032	}
4033}
4034
4035static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
4036					     unsigned long event, void *ptr)
4037{
4038	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
4039	int err = 0;
4040
4041	switch (event) {
4042	case SWITCHDEV_PORT_OBJ_ADD:
4043		if (netif_is_vxlan(dev))
4044			err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
4045		else
4046			err = switchdev_handle_port_obj_add(dev, ptr,
4047							mlxsw_sp_port_dev_check,
4048							mlxsw_sp_port_obj_add);
4049		return notifier_from_errno(err);
4050	case SWITCHDEV_PORT_OBJ_DEL:
4051		if (netif_is_vxlan(dev))
4052			mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
4053		else
4054			err = switchdev_handle_port_obj_del(dev, ptr,
4055							mlxsw_sp_port_dev_check,
4056							mlxsw_sp_port_obj_del);
4057		return notifier_from_errno(err);
4058	case SWITCHDEV_PORT_ATTR_SET:
4059		err = switchdev_handle_port_attr_set(dev, ptr,
4060						     mlxsw_sp_port_dev_check,
4061						     mlxsw_sp_port_attr_set);
4062		return notifier_from_errno(err);
4063	}
4064
4065	return NOTIFY_DONE;
4066}
4067
4068static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
4069	.notifier_call = mlxsw_sp_switchdev_blocking_event,
4070};
4071
4072u8
4073mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
4074{
4075	return bridge_port->stp_state;
4076}
4077
4078static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
4079{
4080	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
4081	struct notifier_block *nb;
4082	int err;
4083
4084	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
4085	if (err) {
4086		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
4087		return err;
4088	}
4089
4090	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
4091	if (err) {
4092		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
4093		return err;
4094	}
4095
4096	nb = &mlxsw_sp_switchdev_blocking_notifier;
4097	err = register_switchdev_blocking_notifier(nb);
4098	if (err) {
4099		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
4100		goto err_register_switchdev_blocking_notifier;
4101	}
4102
4103	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
4104	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
4105	return 0;
4106
4107err_register_switchdev_blocking_notifier:
4108	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
4109	return err;
4110}
4111
4112static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
4113{
4114	struct notifier_block *nb;
4115
4116	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
4117
4118	nb = &mlxsw_sp_switchdev_blocking_notifier;
4119	unregister_switchdev_blocking_notifier(nb);
4120
4121	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
4122}
4123
4124static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp)
4125{
4126	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops;
4127}
4128
4129const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = {
4130	.init	= mlxsw_sp1_switchdev_init,
4131};
4132
4133static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp)
4134{
4135	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops;
4136}
4137
4138const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = {
4139	.init	= mlxsw_sp2_switchdev_init,
4140};
4141
4142int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
4143{
4144	struct mlxsw_sp_bridge *bridge;
4145
4146	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
4147	if (!bridge)
4148		return -ENOMEM;
4149	mlxsw_sp->bridge = bridge;
4150	bridge->mlxsw_sp = mlxsw_sp;
4151
4152	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
4153
4154	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
4155	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
4156
4157	mlxsw_sp->switchdev_ops->init(mlxsw_sp);
4158
4159	return mlxsw_sp_fdb_init(mlxsw_sp);
4160}
4161
4162void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
4163{
4164	mlxsw_sp_fdb_fini(mlxsw_sp);
4165	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
4166	kfree(mlxsw_sp->bridge);
4167}
4168