Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.8
   1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
   2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
   3
   4#include <linux/kernel.h>
   5#include <linux/types.h>
   6#include <linux/netdevice.h>
   7#include <linux/etherdevice.h>
   8#include <linux/slab.h>
   9#include <linux/device.h>
  10#include <linux/skbuff.h>
  11#include <linux/if_vlan.h>
  12#include <linux/if_bridge.h>
  13#include <linux/workqueue.h>
  14#include <linux/jiffies.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/netlink.h>
  17#include <net/switchdev.h>
  18#include <net/vxlan.h>
  19
  20#include "spectrum_span.h"
  21#include "spectrum_switchdev.h"
  22#include "spectrum.h"
  23#include "core.h"
  24#include "reg.h"
  25
  26struct mlxsw_sp_bridge_ops;
  27
  28struct mlxsw_sp_bridge {
  29	struct mlxsw_sp *mlxsw_sp;
  30	struct {
  31		struct delayed_work dw;
  32#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
  33		unsigned int interval; /* ms */
  34	} fdb_notify;
  35#define MLXSW_SP_MIN_AGEING_TIME 10
  36#define MLXSW_SP_MAX_AGEING_TIME 1000000
  37#define MLXSW_SP_DEFAULT_AGEING_TIME 300
  38	u32 ageing_time;
  39	bool vlan_enabled_exists;
  40	struct list_head bridges_list;
  41	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
  42	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
  43	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
  44	const struct mlxsw_sp_bridge_ops *bridge_8021ad_ops;
  45};
  46
  47struct mlxsw_sp_bridge_device {
  48	struct net_device *dev;
  49	struct list_head list;
  50	struct list_head ports_list;
  51	struct list_head mdb_list;
  52	struct rhashtable mdb_ht;
  53	u8 vlan_enabled:1,
  54	   multicast_enabled:1,
  55	   mrouter:1;
  56	const struct mlxsw_sp_bridge_ops *ops;
  57};
  58
  59struct mlxsw_sp_bridge_port {
  60	struct net_device *dev;
  61	struct mlxsw_sp_bridge_device *bridge_device;
  62	struct list_head list;
  63	struct list_head vlans_list;
  64	unsigned int ref_count;
  65	u8 stp_state;
  66	unsigned long flags;
  67	bool mrouter;
  68	bool lagged;
  69	union {
  70		u16 lag_id;
  71		u16 system_port;
  72	};
  73};
  74
  75struct mlxsw_sp_bridge_vlan {
  76	struct list_head list;
  77	struct list_head port_vlan_list;
  78	u16 vid;
  79};
  80
  81struct mlxsw_sp_bridge_ops {
  82	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
  83			 struct mlxsw_sp_bridge_port *bridge_port,
  84			 struct mlxsw_sp_port *mlxsw_sp_port,
  85			 struct netlink_ext_ack *extack);
  86	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
  87			   struct mlxsw_sp_bridge_port *bridge_port,
  88			   struct mlxsw_sp_port *mlxsw_sp_port);
  89	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
  90			  const struct net_device *vxlan_dev, u16 vid,
  91			  struct netlink_ext_ack *extack);
  92	struct mlxsw_sp_fid *
  93		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
  94			   u16 vid, struct netlink_ext_ack *extack);
  95	struct mlxsw_sp_fid *
  96		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
  97			      u16 vid);
  98	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
  99		       const struct mlxsw_sp_fid *fid);
 100};
 101
 102struct mlxsw_sp_switchdev_ops {
 103	void (*init)(struct mlxsw_sp *mlxsw_sp);
 104};
 105
 106struct mlxsw_sp_mdb_entry_key {
 107	unsigned char addr[ETH_ALEN];
 108	u16 fid;
 109};
 110
 111struct mlxsw_sp_mdb_entry {
 112	struct list_head list;
 113	struct rhash_head ht_node;
 114	struct mlxsw_sp_mdb_entry_key key;
 115	u16 mid;
 116	struct list_head ports_list;
 117	u16 ports_count;
 118};
 119
 120struct mlxsw_sp_mdb_entry_port {
 121	struct list_head list; /* Member of 'ports_list'. */
 122	u16 local_port;
 123	refcount_t refcount;
 124	bool mrouter;
 125};
 126
 127static const struct rhashtable_params mlxsw_sp_mdb_ht_params = {
 128	.key_offset = offsetof(struct mlxsw_sp_mdb_entry, key),
 129	.head_offset = offsetof(struct mlxsw_sp_mdb_entry, ht_node),
 130	.key_len = sizeof(struct mlxsw_sp_mdb_entry_key),
 131};
 132
 133static int
 134mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
 135			       struct mlxsw_sp_bridge_port *bridge_port,
 136			       u16 fid_index);
 137
 138static void
 139mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
 140			       struct mlxsw_sp_bridge_port *bridge_port,
 141			       u16 fid_index);
 142
 143static int
 144mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
 145				   struct mlxsw_sp_bridge_device
 146				   *bridge_device, bool mc_enabled);
 147
 148static void
 149mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
 150				 struct mlxsw_sp_bridge_port *bridge_port,
 151				 bool add);
 152
 153static struct mlxsw_sp_bridge_device *
 154mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
 155			    const struct net_device *br_dev)
 156{
 157	struct mlxsw_sp_bridge_device *bridge_device;
 158
 159	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
 160		if (bridge_device->dev == br_dev)
 161			return bridge_device;
 162
 163	return NULL;
 164}
 165
 166bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
 167					 const struct net_device *br_dev)
 168{
 169	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
 170}
 171
 172static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
 173						    struct netdev_nested_priv *priv)
 174{
 175	struct mlxsw_sp *mlxsw_sp = priv->data;
 176
 177	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
 178	return 0;
 179}
 180
 181static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
 182						struct net_device *dev)
 183{
 184	struct netdev_nested_priv priv = {
 185		.data = (void *)mlxsw_sp,
 186	};
 187
 188	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
 189	netdev_walk_all_upper_dev_rcu(dev,
 190				      mlxsw_sp_bridge_device_upper_rif_destroy,
 191				      &priv);
 192}
 193
 194static int mlxsw_sp_bridge_device_vxlan_init(struct mlxsw_sp_bridge *bridge,
 195					     struct net_device *br_dev,
 196					     struct netlink_ext_ack *extack)
 197{
 198	struct net_device *dev, *stop_dev;
 199	struct list_head *iter;
 200	int err;
 201
 202	netdev_for_each_lower_dev(br_dev, dev, iter) {
 203		if (netif_is_vxlan(dev) && netif_running(dev)) {
 204			err = mlxsw_sp_bridge_vxlan_join(bridge->mlxsw_sp,
 205							 br_dev, dev, 0,
 206							 extack);
 207			if (err) {
 208				stop_dev = dev;
 209				goto err_vxlan_join;
 210			}
 211		}
 212	}
 213
 214	return 0;
 215
 216err_vxlan_join:
 217	netdev_for_each_lower_dev(br_dev, dev, iter) {
 218		if (netif_is_vxlan(dev) && netif_running(dev)) {
 219			if (stop_dev == dev)
 220				break;
 221			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
 222		}
 223	}
 224	return err;
 225}
 226
 227static void mlxsw_sp_bridge_device_vxlan_fini(struct mlxsw_sp_bridge *bridge,
 228					      struct net_device *br_dev)
 229{
 230	struct net_device *dev;
 231	struct list_head *iter;
 232
 233	netdev_for_each_lower_dev(br_dev, dev, iter) {
 234		if (netif_is_vxlan(dev) && netif_running(dev))
 235			mlxsw_sp_bridge_vxlan_leave(bridge->mlxsw_sp, dev);
 236	}
 237}
 238
 239static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp,
 240					      bool no_delay)
 241{
 242	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
 243	unsigned int interval = no_delay ? 0 : bridge->fdb_notify.interval;
 244
 245	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
 246			       msecs_to_jiffies(interval));
 247}
 248
 249static struct mlxsw_sp_bridge_device *
 250mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
 251			      struct net_device *br_dev,
 252			      struct netlink_ext_ack *extack)
 253{
 254	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
 255	struct mlxsw_sp_bridge_device *bridge_device;
 256	bool vlan_enabled = br_vlan_enabled(br_dev);
 257	int err;
 258
 259	if (vlan_enabled && bridge->vlan_enabled_exists) {
 260		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
 261		NL_SET_ERR_MSG_MOD(extack, "Only one VLAN-aware bridge is supported");
 262		return ERR_PTR(-EINVAL);
 263	}
 264
 265	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
 266	if (!bridge_device)
 267		return ERR_PTR(-ENOMEM);
 268
 269	err = rhashtable_init(&bridge_device->mdb_ht, &mlxsw_sp_mdb_ht_params);
 270	if (err)
 271		goto err_mdb_rhashtable_init;
 272
 273	bridge_device->dev = br_dev;
 274	bridge_device->vlan_enabled = vlan_enabled;
 275	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
 276	bridge_device->mrouter = br_multicast_router(br_dev);
 277	INIT_LIST_HEAD(&bridge_device->ports_list);
 278	if (vlan_enabled) {
 279		u16 proto;
 280
 281		bridge->vlan_enabled_exists = true;
 282		br_vlan_get_proto(br_dev, &proto);
 283		if (proto == ETH_P_8021AD)
 284			bridge_device->ops = bridge->bridge_8021ad_ops;
 285		else
 286			bridge_device->ops = bridge->bridge_8021q_ops;
 287	} else {
 288		bridge_device->ops = bridge->bridge_8021d_ops;
 289	}
 290	INIT_LIST_HEAD(&bridge_device->mdb_list);
 291
 292	if (list_empty(&bridge->bridges_list))
 293		mlxsw_sp_fdb_notify_work_schedule(bridge->mlxsw_sp, false);
 294	list_add(&bridge_device->list, &bridge->bridges_list);
 295
 296	/* It is possible we already have VXLAN devices enslaved to the bridge.
 297	 * In which case, we need to replay their configuration as if they were
 298	 * just now enslaved to the bridge.
 299	 */
 300	err = mlxsw_sp_bridge_device_vxlan_init(bridge, br_dev, extack);
 301	if (err)
 302		goto err_vxlan_init;
 303
 304	return bridge_device;
 305
 306err_vxlan_init:
 307	list_del(&bridge_device->list);
 308	if (bridge_device->vlan_enabled)
 309		bridge->vlan_enabled_exists = false;
 310	rhashtable_destroy(&bridge_device->mdb_ht);
 311err_mdb_rhashtable_init:
 312	kfree(bridge_device);
 313	return ERR_PTR(err);
 314}
 315
 316static void
 317mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
 318			       struct mlxsw_sp_bridge_device *bridge_device)
 319{
 320	mlxsw_sp_bridge_device_vxlan_fini(bridge, bridge_device->dev);
 321	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
 322					    bridge_device->dev);
 323	list_del(&bridge_device->list);
 324	if (list_empty(&bridge->bridges_list))
 325		cancel_delayed_work(&bridge->fdb_notify.dw);
 326	if (bridge_device->vlan_enabled)
 327		bridge->vlan_enabled_exists = false;
 328	WARN_ON(!list_empty(&bridge_device->ports_list));
 329	WARN_ON(!list_empty(&bridge_device->mdb_list));
 330	rhashtable_destroy(&bridge_device->mdb_ht);
 331	kfree(bridge_device);
 332}
 333
 334static struct mlxsw_sp_bridge_device *
 335mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
 336			   struct net_device *br_dev,
 337			   struct netlink_ext_ack *extack)
 338{
 339	struct mlxsw_sp_bridge_device *bridge_device;
 340
 341	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
 342	if (bridge_device)
 343		return bridge_device;
 344
 345	return mlxsw_sp_bridge_device_create(bridge, br_dev, extack);
 346}
 347
 348static void
 349mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
 350			   struct mlxsw_sp_bridge_device *bridge_device)
 351{
 352	if (list_empty(&bridge_device->ports_list))
 353		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
 354}
 355
 356static struct mlxsw_sp_bridge_port *
 357__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
 358			    const struct net_device *brport_dev)
 359{
 360	struct mlxsw_sp_bridge_port *bridge_port;
 361
 362	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
 363		if (bridge_port->dev == brport_dev)
 364			return bridge_port;
 365	}
 366
 367	return NULL;
 368}
 369
 370struct mlxsw_sp_bridge_port *
 371mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
 372			  struct net_device *brport_dev)
 373{
 374	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
 375	struct mlxsw_sp_bridge_device *bridge_device;
 376
 377	if (!br_dev)
 378		return NULL;
 379
 380	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
 381	if (!bridge_device)
 382		return NULL;
 383
 384	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
 385}
 386
 387static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
 388				 const struct switchdev_obj *obj,
 389				 struct netlink_ext_ack *extack);
 390static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
 391				 const struct switchdev_obj *obj);
 392
 393struct mlxsw_sp_bridge_port_replay_switchdev_objs {
 394	struct net_device *brport_dev;
 395	struct mlxsw_sp_port *mlxsw_sp_port;
 396	int done;
 397};
 398
 399static int
 400mlxsw_sp_bridge_port_replay_switchdev_objs(struct notifier_block *nb,
 401					   unsigned long event, void *ptr)
 402{
 403	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
 404	struct switchdev_notifier_port_obj_info *port_obj_info = ptr;
 405	struct netlink_ext_ack *extack = port_obj_info->info.extack;
 406	struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso;
 407	int err = 0;
 408
 409	rso = (void *)port_obj_info->info.ctx;
 410
 411	if (event != SWITCHDEV_PORT_OBJ_ADD ||
 412	    dev != rso->brport_dev)
 413		goto out;
 414
 415	/* When a port is joining the bridge through a LAG, there likely are
 416	 * VLANs configured on that LAG already. The replay will thus attempt to
 417	 * have the given port-vlans join the corresponding FIDs. But the LAG
 418	 * netdevice has already called the ndo_vlan_rx_add_vid NDO for its VLAN
 419	 * memberships, back before CHANGEUPPER was distributed and netdevice
 420	 * master set. So now before propagating the VLAN events further, we
 421	 * first need to kill the corresponding VID at the mlxsw_sp_port.
 422	 *
 423	 * Note that this doesn't need to be rolled back on failure -- if the
 424	 * replay fails, the enslavement is off, and the VIDs would be killed by
 425	 * LAG anyway as part of its rollback.
 426	 */
 427	if (port_obj_info->obj->id == SWITCHDEV_OBJ_ID_PORT_VLAN) {
 428		u16 vid = SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj)->vid;
 429
 430		err = mlxsw_sp_port_kill_vid(rso->mlxsw_sp_port->dev, 0, vid);
 431		if (err)
 432			goto out;
 433	}
 434
 435	++rso->done;
 436	err = mlxsw_sp_port_obj_add(rso->mlxsw_sp_port->dev, NULL,
 437				    port_obj_info->obj, extack);
 438
 439out:
 440	return notifier_from_errno(err);
 441}
 442
 443static struct notifier_block mlxsw_sp_bridge_port_replay_switchdev_objs_nb = {
 444	.notifier_call = mlxsw_sp_bridge_port_replay_switchdev_objs,
 445};
 446
 447static int
 448mlxsw_sp_bridge_port_unreplay_switchdev_objs(struct notifier_block *nb,
 449					     unsigned long event, void *ptr)
 450{
 451	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
 452	struct switchdev_notifier_port_obj_info *port_obj_info = ptr;
 453	struct mlxsw_sp_bridge_port_replay_switchdev_objs *rso;
 454
 455	rso = (void *)port_obj_info->info.ctx;
 456
 457	if (event != SWITCHDEV_PORT_OBJ_ADD ||
 458	    dev != rso->brport_dev)
 459		return NOTIFY_DONE;
 460	if (!rso->done--)
 461		return NOTIFY_STOP;
 462
 463	mlxsw_sp_port_obj_del(rso->mlxsw_sp_port->dev, NULL,
 464			      port_obj_info->obj);
 465	return NOTIFY_DONE;
 466}
 467
 468static struct notifier_block mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb = {
 469	.notifier_call = mlxsw_sp_bridge_port_unreplay_switchdev_objs,
 470};
 471
 472static struct mlxsw_sp_bridge_port *
 473mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
 474			    struct net_device *brport_dev,
 475			    struct netlink_ext_ack *extack)
 476{
 477	struct mlxsw_sp_bridge_port *bridge_port;
 478	struct mlxsw_sp_port *mlxsw_sp_port;
 479	int err;
 480
 481	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
 482	if (!bridge_port)
 483		return ERR_PTR(-ENOMEM);
 484
 485	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
 486	bridge_port->lagged = mlxsw_sp_port->lagged;
 487	if (bridge_port->lagged)
 488		bridge_port->lag_id = mlxsw_sp_port->lag_id;
 489	else
 490		bridge_port->system_port = mlxsw_sp_port->local_port;
 491	bridge_port->dev = brport_dev;
 492	bridge_port->bridge_device = bridge_device;
 493	bridge_port->stp_state = br_port_get_stp_state(brport_dev);
 494	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
 495			     BR_MCAST_FLOOD;
 496	INIT_LIST_HEAD(&bridge_port->vlans_list);
 497	list_add(&bridge_port->list, &bridge_device->ports_list);
 498	bridge_port->ref_count = 1;
 499
 500	err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev,
 501					    NULL, NULL, NULL, false, extack);
 502	if (err)
 503		goto err_switchdev_offload;
 504
 505	return bridge_port;
 506
 507err_switchdev_offload:
 508	list_del(&bridge_port->list);
 509	kfree(bridge_port);
 510	return ERR_PTR(err);
 511}
 512
 513static void
 514mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
 515{
 516	switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL);
 517	list_del(&bridge_port->list);
 518	WARN_ON(!list_empty(&bridge_port->vlans_list));
 519	kfree(bridge_port);
 520}
 521
 522static struct mlxsw_sp_bridge_port *
 523mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
 524			 struct net_device *brport_dev,
 525			 struct netlink_ext_ack *extack)
 526{
 527	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
 528	struct mlxsw_sp_bridge_device *bridge_device;
 529	struct mlxsw_sp_bridge_port *bridge_port;
 530	int err;
 531
 532	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
 533	if (bridge_port) {
 534		bridge_port->ref_count++;
 535		return bridge_port;
 536	}
 537
 538	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev, extack);
 539	if (IS_ERR(bridge_device))
 540		return ERR_CAST(bridge_device);
 541
 542	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev,
 543						  extack);
 544	if (IS_ERR(bridge_port)) {
 545		err = PTR_ERR(bridge_port);
 546		goto err_bridge_port_create;
 547	}
 548
 549	return bridge_port;
 550
 551err_bridge_port_create:
 552	mlxsw_sp_bridge_device_put(bridge, bridge_device);
 553	return ERR_PTR(err);
 554}
 555
 556static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
 557				     struct mlxsw_sp_bridge_port *bridge_port)
 558{
 559	struct mlxsw_sp_bridge_device *bridge_device;
 560
 561	if (--bridge_port->ref_count != 0)
 562		return;
 563	bridge_device = bridge_port->bridge_device;
 564	mlxsw_sp_bridge_port_destroy(bridge_port);
 565	mlxsw_sp_bridge_device_put(bridge, bridge_device);
 566}
 567
 568static struct mlxsw_sp_port_vlan *
 569mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
 570				  const struct mlxsw_sp_bridge_device *
 571				  bridge_device,
 572				  u16 vid)
 573{
 574	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 575
 576	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
 577			    list) {
 578		if (!mlxsw_sp_port_vlan->bridge_port)
 579			continue;
 580		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
 581		    bridge_device)
 582			continue;
 583		if (bridge_device->vlan_enabled &&
 584		    mlxsw_sp_port_vlan->vid != vid)
 585			continue;
 586		return mlxsw_sp_port_vlan;
 587	}
 588
 589	return NULL;
 590}
 591
 592static struct mlxsw_sp_port_vlan*
 593mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
 594			       u16 fid_index)
 595{
 596	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 597
 598	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
 599			    list) {
 600		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
 601
 602		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
 603			return mlxsw_sp_port_vlan;
 604	}
 605
 606	return NULL;
 607}
 608
 609static struct mlxsw_sp_bridge_vlan *
 610mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
 611			  u16 vid)
 612{
 613	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 614
 615	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 616		if (bridge_vlan->vid == vid)
 617			return bridge_vlan;
 618	}
 619
 620	return NULL;
 621}
 622
 623static struct mlxsw_sp_bridge_vlan *
 624mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
 625{
 626	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 627
 628	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
 629	if (!bridge_vlan)
 630		return NULL;
 631
 632	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
 633	bridge_vlan->vid = vid;
 634	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
 635
 636	return bridge_vlan;
 637}
 638
 639static void
 640mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
 641{
 642	list_del(&bridge_vlan->list);
 643	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
 644	kfree(bridge_vlan);
 645}
 646
 647static struct mlxsw_sp_bridge_vlan *
 648mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
 649{
 650	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 651
 652	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
 653	if (bridge_vlan)
 654		return bridge_vlan;
 655
 656	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
 657}
 658
 659static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
 660{
 661	if (list_empty(&bridge_vlan->port_vlan_list))
 662		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
 663}
 664
 665static int
 666mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
 667				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
 668				  u8 state)
 669{
 670	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 671
 672	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 673			    bridge_vlan_node) {
 674		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
 675			continue;
 676		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
 677						 bridge_vlan->vid, state);
 678	}
 679
 680	return 0;
 681}
 682
 683static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
 684					    struct net_device *orig_dev,
 685					    u8 state)
 686{
 687	struct mlxsw_sp_bridge_port *bridge_port;
 688	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 689	int err;
 690
 
 
 
 691	/* It's possible we failed to enslave the port, yet this
 692	 * operation is executed due to it being deferred.
 693	 */
 694	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
 695						orig_dev);
 696	if (!bridge_port)
 697		return 0;
 698
 699	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 700		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
 701							bridge_vlan, state);
 702		if (err)
 703			goto err_port_bridge_vlan_stp_set;
 704	}
 705
 706	bridge_port->stp_state = state;
 707
 708	return 0;
 709
 710err_port_bridge_vlan_stp_set:
 711	list_for_each_entry_continue_reverse(bridge_vlan,
 712					     &bridge_port->vlans_list, list)
 713		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
 714						  bridge_port->stp_state);
 715	return err;
 716}
 717
 718static int
 719mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
 720				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
 721				    enum mlxsw_sp_flood_type packet_type,
 722				    bool member)
 723{
 724	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 725
 726	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 727			    bridge_vlan_node) {
 728		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
 729			continue;
 730		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
 731					      packet_type,
 732					      mlxsw_sp_port->local_port,
 733					      member);
 734	}
 735
 736	return 0;
 737}
 738
 739static int
 740mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
 741				     struct mlxsw_sp_bridge_port *bridge_port,
 742				     enum mlxsw_sp_flood_type packet_type,
 743				     bool member)
 744{
 745	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 746	int err;
 747
 748	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 749		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
 750							  bridge_vlan,
 751							  packet_type,
 752							  member);
 753		if (err)
 754			goto err_port_bridge_vlan_flood_set;
 755	}
 756
 757	return 0;
 758
 759err_port_bridge_vlan_flood_set:
 760	list_for_each_entry_continue_reverse(bridge_vlan,
 761					     &bridge_port->vlans_list, list)
 762		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
 763						    packet_type, !member);
 764	return err;
 765}
 766
 767static int
 768mlxsw_sp_bridge_vlans_flood_set(struct mlxsw_sp_bridge_vlan *bridge_vlan,
 769				enum mlxsw_sp_flood_type packet_type,
 770				bool member)
 771{
 772	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 773	int err;
 774
 775	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 776			    bridge_vlan_node) {
 777		u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
 778
 779		err = mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
 780					     packet_type, local_port, member);
 781		if (err)
 782			goto err_fid_flood_set;
 783	}
 784
 785	return 0;
 786
 787err_fid_flood_set:
 788	list_for_each_entry_continue_reverse(mlxsw_sp_port_vlan,
 789					     &bridge_vlan->port_vlan_list,
 790					     list) {
 791		u16 local_port = mlxsw_sp_port_vlan->mlxsw_sp_port->local_port;
 792
 793		mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid, packet_type,
 794				       local_port, !member);
 795	}
 796
 797	return err;
 798}
 799
 800static int
 801mlxsw_sp_bridge_ports_flood_table_set(struct mlxsw_sp_bridge_port *bridge_port,
 802				      enum mlxsw_sp_flood_type packet_type,
 803				      bool member)
 804{
 805	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 806	int err;
 807
 808	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 809		err = mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
 810						      member);
 811		if (err)
 812			goto err_bridge_vlans_flood_set;
 813	}
 814
 815	return 0;
 816
 817err_bridge_vlans_flood_set:
 818	list_for_each_entry_continue_reverse(bridge_vlan,
 819					     &bridge_port->vlans_list, list)
 820		mlxsw_sp_bridge_vlans_flood_set(bridge_vlan, packet_type,
 821						!member);
 822	return err;
 823}
 824
 825static int
 826mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
 827				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
 828				       bool set)
 829{
 830	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 831	u16 vid = bridge_vlan->vid;
 832
 833	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 834			    bridge_vlan_node) {
 835		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
 836			continue;
 837		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
 838	}
 839
 840	return 0;
 841}
 842
 843static int
 844mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
 845				  struct mlxsw_sp_bridge_port *bridge_port,
 846				  bool set)
 847{
 848	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 849	int err;
 850
 851	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 852		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
 853							     bridge_vlan, set);
 854		if (err)
 855			goto err_port_bridge_vlan_learning_set;
 856	}
 857
 858	return 0;
 859
 860err_port_bridge_vlan_learning_set:
 861	list_for_each_entry_continue_reverse(bridge_vlan,
 862					     &bridge_port->vlans_list, list)
 863		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
 864						       bridge_vlan, !set);
 865	return err;
 866}
 867
 868static int
 869mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
 870				    const struct net_device *orig_dev,
 871				    struct switchdev_brport_flags flags,
 872				    struct netlink_ext_ack *extack)
 873{
 874	if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
 875			   BR_PORT_LOCKED | BR_PORT_MAB)) {
 876		NL_SET_ERR_MSG_MOD(extack, "Unsupported bridge port flag");
 877		return -EINVAL;
 878	}
 879
 880	if ((flags.mask & BR_PORT_LOCKED) && is_vlan_dev(orig_dev)) {
 881		NL_SET_ERR_MSG_MOD(extack, "Locked flag cannot be set on a VLAN upper");
 882		return -EINVAL;
 883	}
 884
 885	if ((flags.mask & BR_PORT_LOCKED) && vlan_uses_dev(orig_dev)) {
 886		NL_SET_ERR_MSG_MOD(extack, "Locked flag cannot be set on a bridge port that has VLAN uppers");
 887		return -EINVAL;
 888	}
 889
 890	return 0;
 891}
 892
 893static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
 894					   struct net_device *orig_dev,
 895					   struct switchdev_brport_flags flags)
 896{
 897	struct mlxsw_sp_bridge_port *bridge_port;
 898	int err;
 899
 
 
 
 900	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
 901						orig_dev);
 902	if (!bridge_port)
 903		return 0;
 904
 905	if (flags.mask & BR_FLOOD) {
 906		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
 907							   bridge_port,
 908							   MLXSW_SP_FLOOD_TYPE_UC,
 909							   flags.val & BR_FLOOD);
 910		if (err)
 911			return err;
 912	}
 913
 914	if (flags.mask & BR_LEARNING) {
 915		err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port,
 916							bridge_port,
 917							flags.val & BR_LEARNING);
 918		if (err)
 919			return err;
 920	}
 921
 922	if (flags.mask & BR_PORT_LOCKED) {
 923		err = mlxsw_sp_port_security_set(mlxsw_sp_port,
 924						 flags.val & BR_PORT_LOCKED);
 925		if (err)
 926			return err;
 927	}
 928
 929	if (bridge_port->bridge_device->multicast_enabled)
 930		goto out;
 931
 932	if (flags.mask & BR_MCAST_FLOOD) {
 933		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
 934							   bridge_port,
 935							   MLXSW_SP_FLOOD_TYPE_MC,
 936							   flags.val & BR_MCAST_FLOOD);
 937		if (err)
 938			return err;
 939	}
 940
 941out:
 942	memcpy(&bridge_port->flags, &flags.val, sizeof(flags.val));
 943	return 0;
 944}
 945
 946static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
 947{
 948	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
 949	int err;
 950
 951	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
 952	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
 953	if (err)
 954		return err;
 955	mlxsw_sp->bridge->ageing_time = ageing_time;
 956	return 0;
 957}
 958
 959static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
 960					    unsigned long ageing_clock_t)
 961{
 962	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 963	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
 964	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
 965
 966	if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
 967	    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
 968		return -ERANGE;
 
 
 
 
 969
 970	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
 971}
 972
 973static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
 974					  struct net_device *orig_dev,
 975					  bool vlan_enabled)
 976{
 977	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 978	struct mlxsw_sp_bridge_device *bridge_device;
 979
 
 
 
 980	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
 981	if (WARN_ON(!bridge_device))
 982		return -EINVAL;
 983
 984	if (bridge_device->vlan_enabled == vlan_enabled)
 985		return 0;
 986
 987	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
 988	return -EINVAL;
 989}
 990
 991static int mlxsw_sp_port_attr_br_vlan_proto_set(struct mlxsw_sp_port *mlxsw_sp_port,
 992						struct net_device *orig_dev,
 993						u16 vlan_proto)
 994{
 995	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 996	struct mlxsw_sp_bridge_device *bridge_device;
 997
 998	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
 999	if (WARN_ON(!bridge_device))
1000		return -EINVAL;
1001
1002	netdev_err(bridge_device->dev, "VLAN protocol can't be changed on existing bridge\n");
1003	return -EINVAL;
1004}
1005
1006static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
1007					  struct net_device *orig_dev,
1008					  bool is_port_mrouter)
1009{
1010	struct mlxsw_sp_bridge_port *bridge_port;
1011	int err;
1012
 
 
 
1013	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
1014						orig_dev);
1015	if (!bridge_port)
1016		return 0;
1017
1018	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
1019					 is_port_mrouter);
1020
1021	if (!bridge_port->bridge_device->multicast_enabled)
1022		goto out;
1023
1024	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
1025						   MLXSW_SP_FLOOD_TYPE_MC,
1026						   is_port_mrouter);
1027	if (err)
1028		return err;
1029
 
 
1030out:
1031	bridge_port->mrouter = is_port_mrouter;
1032	return 0;
1033}
1034
1035static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
1036{
1037	const struct mlxsw_sp_bridge_device *bridge_device;
1038
1039	bridge_device = bridge_port->bridge_device;
1040	return bridge_device->multicast_enabled ? bridge_port->mrouter :
1041					bridge_port->flags & BR_MCAST_FLOOD;
1042}
1043
1044static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
1045					 struct net_device *orig_dev,
1046					 bool mc_disabled)
1047{
1048	enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
1049	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1050	struct mlxsw_sp_bridge_device *bridge_device;
1051	struct mlxsw_sp_bridge_port *bridge_port;
1052	int err;
1053
 
 
 
1054	/* It's possible we failed to enslave the port, yet this
1055	 * operation is executed due to it being deferred.
1056	 */
1057	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
1058	if (!bridge_device)
1059		return 0;
1060
1061	if (bridge_device->multicast_enabled == !mc_disabled)
1062		return 0;
1063
1064	bridge_device->multicast_enabled = !mc_disabled;
1065	err = mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
1066						 !mc_disabled);
1067	if (err)
1068		goto err_mc_enable_sync;
1069
1070	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
 
1071		bool member = mlxsw_sp_mc_flood(bridge_port);
1072
1073		err = mlxsw_sp_bridge_ports_flood_table_set(bridge_port,
1074							    packet_type,
1075							    member);
1076		if (err)
1077			goto err_flood_table_set;
1078	}
1079
1080	return 0;
1081
1082err_flood_table_set:
1083	list_for_each_entry_continue_reverse(bridge_port,
1084					     &bridge_device->ports_list, list) {
1085		bool member = mlxsw_sp_mc_flood(bridge_port);
1086
1087		mlxsw_sp_bridge_ports_flood_table_set(bridge_port, packet_type,
1088						      !member);
1089	}
1090	mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp, bridge_device,
1091					   mc_disabled);
1092err_mc_enable_sync:
1093	bridge_device->multicast_enabled = mc_disabled;
1094	return err;
1095}
1096
1097static struct mlxsw_sp_mdb_entry_port *
1098mlxsw_sp_mdb_entry_port_lookup(struct mlxsw_sp_mdb_entry *mdb_entry,
1099			       u16 local_port)
1100{
1101	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1102
1103	list_for_each_entry(mdb_entry_port, &mdb_entry->ports_list, list) {
1104		if (mdb_entry_port->local_port == local_port)
1105			return mdb_entry_port;
1106	}
1107
1108	return NULL;
1109}
1110
1111static struct mlxsw_sp_mdb_entry_port *
1112mlxsw_sp_mdb_entry_port_get(struct mlxsw_sp *mlxsw_sp,
1113			    struct mlxsw_sp_mdb_entry *mdb_entry,
1114			    u16 local_port)
1115{
1116	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1117	int err;
1118
1119	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1120	if (mdb_entry_port) {
1121		if (mdb_entry_port->mrouter &&
1122		    refcount_read(&mdb_entry_port->refcount) == 1)
1123			mdb_entry->ports_count++;
1124
1125		refcount_inc(&mdb_entry_port->refcount);
1126		return mdb_entry_port;
1127	}
1128
1129	err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1130					  mdb_entry->key.fid, local_port, true);
1131	if (err)
1132		return ERR_PTR(err);
1133
1134	mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
1135	if (!mdb_entry_port) {
1136		err = -ENOMEM;
1137		goto err_mdb_entry_port_alloc;
1138	}
1139
1140	mdb_entry_port->local_port = local_port;
1141	refcount_set(&mdb_entry_port->refcount, 1);
1142	list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
1143	mdb_entry->ports_count++;
1144
1145	return mdb_entry_port;
1146
1147err_mdb_entry_port_alloc:
1148	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1149				    mdb_entry->key.fid, local_port, false);
1150	return ERR_PTR(err);
1151}
1152
1153static void
1154mlxsw_sp_mdb_entry_port_put(struct mlxsw_sp *mlxsw_sp,
1155			    struct mlxsw_sp_mdb_entry *mdb_entry,
1156			    u16 local_port, bool force)
1157{
1158	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1159
1160	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1161	if (!mdb_entry_port)
1162		return;
1163
1164	if (!force && !refcount_dec_and_test(&mdb_entry_port->refcount)) {
1165		if (mdb_entry_port->mrouter &&
1166		    refcount_read(&mdb_entry_port->refcount) == 1)
1167			mdb_entry->ports_count--;
1168		return;
1169	}
1170
1171	mdb_entry->ports_count--;
1172	list_del(&mdb_entry_port->list);
1173	kfree(mdb_entry_port);
1174	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1175				    mdb_entry->key.fid, local_port, false);
1176}
1177
1178static __always_unused struct mlxsw_sp_mdb_entry_port *
1179mlxsw_sp_mdb_entry_mrouter_port_get(struct mlxsw_sp *mlxsw_sp,
1180				    struct mlxsw_sp_mdb_entry *mdb_entry,
1181				    u16 local_port)
1182{
1183	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1184	int err;
1185
1186	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1187	if (mdb_entry_port) {
1188		if (!mdb_entry_port->mrouter)
1189			refcount_inc(&mdb_entry_port->refcount);
1190		return mdb_entry_port;
1191	}
1192
1193	err = mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1194					  mdb_entry->key.fid, local_port, true);
1195	if (err)
1196		return ERR_PTR(err);
1197
1198	mdb_entry_port = kzalloc(sizeof(*mdb_entry_port), GFP_KERNEL);
1199	if (!mdb_entry_port) {
1200		err = -ENOMEM;
1201		goto err_mdb_entry_port_alloc;
1202	}
1203
1204	mdb_entry_port->local_port = local_port;
1205	refcount_set(&mdb_entry_port->refcount, 1);
1206	mdb_entry_port->mrouter = true;
1207	list_add(&mdb_entry_port->list, &mdb_entry->ports_list);
1208
1209	return mdb_entry_port;
1210
1211err_mdb_entry_port_alloc:
1212	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1213				    mdb_entry->key.fid, local_port, false);
1214	return ERR_PTR(err);
1215}
1216
1217static __always_unused void
1218mlxsw_sp_mdb_entry_mrouter_port_put(struct mlxsw_sp *mlxsw_sp,
1219				    struct mlxsw_sp_mdb_entry *mdb_entry,
1220				    u16 local_port)
1221{
1222	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1223
1224	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
1225	if (!mdb_entry_port)
1226		return;
1227
1228	if (!mdb_entry_port->mrouter)
1229		return;
1230
1231	mdb_entry_port->mrouter = false;
1232	if (!refcount_dec_and_test(&mdb_entry_port->refcount))
1233		return;
1234
1235	list_del(&mdb_entry_port->list);
1236	kfree(mdb_entry_port);
1237	mlxsw_sp_pgt_entry_port_set(mlxsw_sp, mdb_entry->mid,
1238				    mdb_entry->key.fid, local_port, false);
 
1239}
1240
1241static void
1242mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
1243				   struct mlxsw_sp_bridge_device *bridge_device,
1244				   bool add)
1245{
1246	u16 local_port = mlxsw_sp_router_port(mlxsw_sp);
1247	struct mlxsw_sp_mdb_entry *mdb_entry;
1248
1249	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
1250		if (add)
1251			mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
1252							    local_port);
1253		else
1254			mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
1255							    local_port);
1256	}
1257}
1258
1259static int
1260mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
 
1261				  struct net_device *orig_dev,
1262				  bool is_mrouter)
1263{
1264	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1265	struct mlxsw_sp_bridge_device *bridge_device;
1266
 
 
 
1267	/* It's possible we failed to enslave the port, yet this
1268	 * operation is executed due to it being deferred.
1269	 */
1270	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
1271	if (!bridge_device)
1272		return 0;
1273
1274	if (bridge_device->mrouter != is_mrouter)
1275		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
1276						   is_mrouter);
1277	bridge_device->mrouter = is_mrouter;
1278	return 0;
1279}
1280
1281static int mlxsw_sp_port_attr_set(struct net_device *dev, const void *ctx,
1282				  const struct switchdev_attr *attr,
1283				  struct netlink_ext_ack *extack)
1284{
1285	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1286	int err;
1287
1288	switch (attr->id) {
1289	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
1290		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port,
1291						       attr->orig_dev,
1292						       attr->u.stp_state);
1293		break;
1294	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
1295		err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
1296							  attr->orig_dev,
1297							  attr->u.brport_flags,
1298							  extack);
1299		break;
1300	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
1301		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port,
1302						      attr->orig_dev,
1303						      attr->u.brport_flags);
1304		break;
1305	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
1306		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port,
1307						       attr->u.ageing_time);
1308		break;
1309	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
1310		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port,
1311						     attr->orig_dev,
1312						     attr->u.vlan_filtering);
1313		break;
1314	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_PROTOCOL:
1315		err = mlxsw_sp_port_attr_br_vlan_proto_set(mlxsw_sp_port,
1316							   attr->orig_dev,
1317							   attr->u.vlan_protocol);
1318		break;
1319	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
1320		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port,
1321						     attr->orig_dev,
1322						     attr->u.mrouter);
1323		break;
1324	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
1325		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port,
1326						    attr->orig_dev,
1327						    attr->u.mc_disabled);
1328		break;
1329	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
1330		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port,
1331							attr->orig_dev,
1332							attr->u.mrouter);
1333		break;
1334	default:
1335		err = -EOPNOTSUPP;
1336		break;
1337	}
1338
1339	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
 
1340
1341	return err;
1342}
1343
1344static int
1345mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1346			    struct mlxsw_sp_bridge_port *bridge_port,
1347			    struct netlink_ext_ack *extack)
1348{
1349	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1350	struct mlxsw_sp_bridge_device *bridge_device;
1351	u16 local_port = mlxsw_sp_port->local_port;
1352	u16 vid = mlxsw_sp_port_vlan->vid;
1353	struct mlxsw_sp_fid *fid;
1354	int err;
1355
1356	bridge_device = bridge_port->bridge_device;
1357	fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
1358	if (IS_ERR(fid))
1359		return PTR_ERR(fid);
1360
1361	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
1362				     bridge_port->flags & BR_FLOOD);
1363	if (err)
1364		goto err_fid_uc_flood_set;
1365
1366	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
1367				     mlxsw_sp_mc_flood(bridge_port));
1368	if (err)
1369		goto err_fid_mc_flood_set;
1370
1371	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
1372				     true);
1373	if (err)
1374		goto err_fid_bc_flood_set;
1375
1376	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
1377	if (err)
1378		goto err_fid_port_vid_map;
1379
1380	mlxsw_sp_port_vlan->fid = fid;
1381
1382	return 0;
1383
1384err_fid_port_vid_map:
1385	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1386err_fid_bc_flood_set:
1387	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1388err_fid_mc_flood_set:
1389	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1390err_fid_uc_flood_set:
1391	mlxsw_sp_fid_put(fid);
1392	return err;
1393}
1394
1395static void
1396mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1397{
1398	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1399	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1400	u16 local_port = mlxsw_sp_port->local_port;
1401	u16 vid = mlxsw_sp_port_vlan->vid;
1402
1403	mlxsw_sp_port_vlan->fid = NULL;
1404	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
1405	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
1406	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
1407	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
1408	mlxsw_sp_fid_put(fid);
1409}
1410
1411static u16
1412mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
1413			     u16 vid, bool is_pvid)
1414{
1415	if (is_pvid)
1416		return vid;
1417	else if (mlxsw_sp_port->pvid == vid)
1418		return 0;	/* Dis-allow untagged packets */
1419	else
1420		return mlxsw_sp_port->pvid;
1421}
1422
1423static int
1424mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
1425			       struct mlxsw_sp_bridge_port *bridge_port,
1426			       struct netlink_ext_ack *extack)
1427{
1428	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1429	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1430	u16 vid = mlxsw_sp_port_vlan->vid;
1431	int err;
1432
1433	/* No need to continue if only VLAN flags were changed */
1434	if (mlxsw_sp_port_vlan->bridge_port)
1435		return 0;
1436
1437	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
1438					  extack);
1439	if (err)
1440		return err;
1441
1442	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
1443					     bridge_port->flags & BR_LEARNING);
1444	if (err)
1445		goto err_port_vid_learning_set;
1446
1447	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
1448					bridge_port->stp_state);
1449	if (err)
1450		goto err_port_vid_stp_set;
1451
1452	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
1453	if (!bridge_vlan) {
1454		err = -ENOMEM;
1455		goto err_bridge_vlan_get;
1456	}
1457
1458	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1459		 &bridge_vlan->port_vlan_list);
1460
1461	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1462				 bridge_port->dev, extack);
1463	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1464
1465	return 0;
1466
1467err_bridge_vlan_get:
1468	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1469err_port_vid_stp_set:
1470	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1471err_port_vid_learning_set:
1472	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1473	return err;
1474}
1475
1476void
1477mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1478{
1479	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1480	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1481	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1482	struct mlxsw_sp_bridge_port *bridge_port;
1483	u16 vid = mlxsw_sp_port_vlan->vid;
1484	bool last_port;
1485
1486	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1487		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1488		return;
1489
1490	bridge_port = mlxsw_sp_port_vlan->bridge_port;
 
1491	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1492	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1493
1494	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1495	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1496	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1497	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1498	if (last_port)
1499		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1500					       bridge_port,
1501					       mlxsw_sp_fid_index(fid));
1502
1503	mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port,
1504				       mlxsw_sp_fid_index(fid));
1505
1506	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1507
1508	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1509	mlxsw_sp_port_vlan->bridge_port = NULL;
1510}
1511
1512static int
1513mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1514			      struct mlxsw_sp_bridge_port *bridge_port,
1515			      u16 vid, bool is_untagged, bool is_pvid,
1516			      struct netlink_ext_ack *extack)
1517{
1518	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1519	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1520	u16 old_pvid = mlxsw_sp_port->pvid;
1521	u16 proto;
1522	int err;
1523
1524	/* The only valid scenario in which a port-vlan already exists, is if
1525	 * the VLAN flags were changed and the port-vlan is associated with the
1526	 * correct bridge port
1527	 */
1528	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1529	if (mlxsw_sp_port_vlan &&
1530	    mlxsw_sp_port_vlan->bridge_port != bridge_port)
1531		return -EEXIST;
1532
1533	if (!mlxsw_sp_port_vlan) {
1534		mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1535							       vid);
1536		if (IS_ERR(mlxsw_sp_port_vlan))
1537			return PTR_ERR(mlxsw_sp_port_vlan);
1538	}
1539
1540	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1541				     is_untagged);
1542	if (err)
1543		goto err_port_vlan_set;
1544
1545	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
1546	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
1547	if (err)
1548		goto err_port_pvid_set;
1549
1550	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1551					     extack);
1552	if (err)
1553		goto err_port_vlan_bridge_join;
1554
1555	return 0;
1556
1557err_port_vlan_bridge_join:
1558	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid, proto);
1559err_port_pvid_set:
1560	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1561err_port_vlan_set:
1562	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1563	return err;
1564}
1565
1566static int
1567mlxsw_sp_br_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1568			    struct net_device *br_dev,
1569			    const struct switchdev_obj_port_vlan *vlan,
1570			    struct netlink_ext_ack *extack)
1571{
1572	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
 
 
 
 
 
 
 
 
 
1573
1574	return mlxsw_sp_router_bridge_vlan_add(mlxsw_sp, br_dev, vlan->vid,
1575					       flag_pvid, extack);
 
 
 
 
 
 
 
 
 
 
 
 
 
1576}
1577
1578static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1579				   const struct switchdev_obj_port_vlan *vlan,
 
1580				   struct netlink_ext_ack *extack)
1581{
1582	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1583	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1584	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1585	struct net_device *orig_dev = vlan->obj.orig_dev;
1586	struct mlxsw_sp_bridge_port *bridge_port;
 
1587
1588	if (netif_is_bridge_master(orig_dev)) {
1589		int err = 0;
1590
1591		if (br_vlan_enabled(orig_dev))
1592			err = mlxsw_sp_br_rif_pvid_change(mlxsw_sp, orig_dev,
1593							  vlan, extack);
 
 
1594		if (!err)
1595			err = -EOPNOTSUPP;
1596		return err;
1597	}
1598
 
 
 
1599	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1600	if (WARN_ON(!bridge_port))
1601		return -EINVAL;
1602
1603	if (!bridge_port->bridge_device->vlan_enabled)
1604		return 0;
1605
1606	return mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1607					     vlan->vid, flag_untagged,
1608					     flag_pvid, extack);
 
 
 
 
 
 
 
 
1609}
1610
1611static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1612{
1613	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1614			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1615}
1616
1617static int
1618mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1619			       struct mlxsw_sp_bridge_port *bridge_port,
1620			       u16 fid_index)
1621{
1622	bool lagged = bridge_port->lagged;
1623	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1624	u16 system_port;
1625
1626	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1627	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1628	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1629	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1630
1631	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1632}
1633
1634static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1635{
1636	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1637			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1638}
1639
1640static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1641{
1642	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1643			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1644}
1645
1646static int
1647mlxsw_sp_port_fdb_tun_uc_op4(struct mlxsw_sp *mlxsw_sp, bool dynamic,
1648			     const char *mac, u16 fid, __be32 addr, bool adding)
 
 
1649{
 
1650	char *sfd_pl;
1651	u8 num_rec;
1652	u32 uip;
1653	int err;
1654
1655	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1656	if (!sfd_pl)
1657		return -ENOMEM;
1658
1659	uip = be32_to_cpu(addr);
1660	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1661	mlxsw_reg_sfd_uc_tunnel_pack4(sfd_pl, 0,
1662				      mlxsw_sp_sfd_rec_policy(dynamic), mac,
1663				      fid, MLXSW_REG_SFD_REC_ACTION_NOP, uip);
1664	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1665	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1666	if (err)
1667		goto out;
1668
1669	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1670		err = -EBUSY;
1671
1672out:
1673	kfree(sfd_pl);
1674	return err;
1675}
1676
1677static int mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(struct mlxsw_sp *mlxsw_sp,
1678						  const char *mac, u16 fid,
1679						  u32 kvdl_index, bool adding)
1680{
1681	char *sfd_pl;
1682	u8 num_rec;
1683	int err;
1684
1685	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1686	if (!sfd_pl)
1687		return -ENOMEM;
1688
1689	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1690	mlxsw_reg_sfd_uc_tunnel_pack6(sfd_pl, 0, mac, fid,
1691				      MLXSW_REG_SFD_REC_ACTION_NOP, kvdl_index);
 
 
1692	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1693	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1694	if (err)
1695		goto out;
1696
1697	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1698		err = -EBUSY;
1699
1700out:
1701	kfree(sfd_pl);
1702	return err;
1703}
1704
1705static int mlxsw_sp_port_fdb_tun_uc_op6_add(struct mlxsw_sp *mlxsw_sp,
1706					    const char *mac, u16 fid,
1707					    const struct in6_addr *addr)
1708{
1709	u32 kvdl_index;
1710	int err;
1711
1712	err = mlxsw_sp_nve_ipv6_addr_kvdl_set(mlxsw_sp, addr, &kvdl_index);
1713	if (err)
1714		return err;
1715
1716	err = mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid,
1717						     kvdl_index, true);
1718	if (err)
1719		goto err_sfd_write;
1720
1721	err = mlxsw_sp_nve_ipv6_addr_map_replace(mlxsw_sp, mac, fid, addr);
1722	if (err)
1723		/* Replace can fail only for creating new mapping, so removing
1724		 * the FDB entry in the error path is OK.
1725		 */
1726		goto err_addr_replace;
1727
1728	return 0;
1729
1730err_addr_replace:
1731	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, kvdl_index,
1732					       false);
1733err_sfd_write:
1734	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1735	return err;
1736}
1737
1738static void mlxsw_sp_port_fdb_tun_uc_op6_del(struct mlxsw_sp *mlxsw_sp,
1739					     const char *mac, u16 fid,
1740					     const struct in6_addr *addr)
1741{
1742	mlxsw_sp_nve_ipv6_addr_map_del(mlxsw_sp, mac, fid);
1743	mlxsw_sp_port_fdb_tun_uc_op6_sfd_write(mlxsw_sp, mac, fid, 0, false);
1744	mlxsw_sp_nve_ipv6_addr_kvdl_unset(mlxsw_sp, addr);
1745}
1746
1747static int
1748mlxsw_sp_port_fdb_tun_uc_op6(struct mlxsw_sp *mlxsw_sp, const char *mac,
1749			     u16 fid, const struct in6_addr *addr, bool adding)
1750{
1751	if (adding)
1752		return mlxsw_sp_port_fdb_tun_uc_op6_add(mlxsw_sp, mac, fid,
1753							addr);
1754
1755	mlxsw_sp_port_fdb_tun_uc_op6_del(mlxsw_sp, mac, fid, addr);
1756	return 0;
1757}
1758
1759static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1760					  const char *mac, u16 fid,
1761					  enum mlxsw_sp_l3proto proto,
1762					  const union mlxsw_sp_l3addr *addr,
1763					  bool adding, bool dynamic)
1764{
1765	switch (proto) {
1766	case MLXSW_SP_L3_PROTO_IPV4:
1767		return mlxsw_sp_port_fdb_tun_uc_op4(mlxsw_sp, dynamic, mac, fid,
1768						    addr->addr4, adding);
1769	case MLXSW_SP_L3_PROTO_IPV6:
1770		return mlxsw_sp_port_fdb_tun_uc_op6(mlxsw_sp, mac, fid,
1771						    &addr->addr6, adding);
1772	default:
1773		WARN_ON(1);
1774		return -EOPNOTSUPP;
1775	}
1776}
1777
1778static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1779				     const char *mac, u16 fid, u16 vid,
1780				     bool adding,
1781				     enum mlxsw_reg_sfd_rec_action action,
1782				     enum mlxsw_reg_sfd_rec_policy policy)
1783{
1784	char *sfd_pl;
1785	u8 num_rec;
1786	int err;
1787
1788	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1789	if (!sfd_pl)
1790		return -ENOMEM;
1791
1792	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1793	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, vid, action,
1794			      local_port);
1795	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1796	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1797	if (err)
1798		goto out;
1799
1800	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1801		err = -EBUSY;
1802
1803out:
1804	kfree(sfd_pl);
1805	return err;
1806}
1807
1808static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1809				   const char *mac, u16 fid, u16 vid,
1810				   bool adding, bool dynamic)
1811{
1812	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, vid,
1813					 adding, MLXSW_REG_SFD_REC_ACTION_NOP,
1814					 mlxsw_sp_sfd_rec_policy(dynamic));
1815}
1816
1817int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1818			bool adding)
1819{
1820	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, 0, adding,
1821					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1822					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1823}
1824
1825static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1826				       const char *mac, u16 fid, u16 lag_vid,
1827				       bool adding, bool dynamic)
1828{
1829	char *sfd_pl;
1830	u8 num_rec;
1831	int err;
1832
1833	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1834	if (!sfd_pl)
1835		return -ENOMEM;
1836
1837	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1838	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1839				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1840				  lag_vid, lag_id);
1841	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1842	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1843	if (err)
1844		goto out;
1845
1846	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1847		err = -EBUSY;
1848
1849out:
1850	kfree(sfd_pl);
1851	return err;
1852}
1853
1854static int
1855mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1856		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1857{
1858	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1859	struct net_device *orig_dev = fdb_info->info.dev;
1860	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1861	struct mlxsw_sp_bridge_device *bridge_device;
1862	struct mlxsw_sp_bridge_port *bridge_port;
1863	u16 fid_index, vid;
1864
1865	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1866	if (!bridge_port)
1867		return -EINVAL;
1868
1869	bridge_device = bridge_port->bridge_device;
1870	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1871							       bridge_device,
1872							       fdb_info->vid);
1873	if (!mlxsw_sp_port_vlan)
1874		return 0;
1875
1876	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1877	vid = mlxsw_sp_port_vlan->vid;
1878
1879	if (!bridge_port->lagged)
1880		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1881					       bridge_port->system_port,
1882					       fdb_info->addr, fid_index, vid,
1883					       adding, false);
1884	else
1885		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1886						   bridge_port->lag_id,
1887						   fdb_info->addr, fid_index,
1888						   vid, adding, false);
1889}
1890
1891static int mlxsw_sp_mdb_entry_write(struct mlxsw_sp *mlxsw_sp,
1892				    const struct mlxsw_sp_mdb_entry *mdb_entry,
1893				    bool adding)
1894{
1895	char *sfd_pl;
1896	u8 num_rec;
1897	int err;
1898
1899	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1900	if (!sfd_pl)
1901		return -ENOMEM;
1902
1903	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1904	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, mdb_entry->key.addr,
1905			      mdb_entry->key.fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1906			      mdb_entry->mid);
1907	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1908	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1909	if (err)
1910		goto out;
1911
1912	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1913		err = -EBUSY;
1914
1915out:
1916	kfree(sfd_pl);
1917	return err;
1918}
1919
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1920static void
1921mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1922				      struct mlxsw_sp_bridge_port *bridge_port,
1923				      struct mlxsw_sp_ports_bitmap *ports_bm)
1924{
1925	struct mlxsw_sp_port *mlxsw_sp_port;
1926	u64 max_lag_members, i;
1927	int lag_id;
1928
1929	if (!bridge_port->lagged) {
1930		set_bit(bridge_port->system_port, ports_bm->bitmap);
1931	} else {
1932		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1933						     MAX_LAG_MEMBERS);
1934		lag_id = bridge_port->lag_id;
1935		for (i = 0; i < max_lag_members; i++) {
1936			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1937								 lag_id, i);
1938			if (mlxsw_sp_port)
1939				set_bit(mlxsw_sp_port->local_port,
1940					ports_bm->bitmap);
1941		}
1942	}
1943}
1944
1945static void
1946mlxsw_sp_mc_get_mrouters_bitmap(struct mlxsw_sp_ports_bitmap *flood_bm,
1947				struct mlxsw_sp_bridge_device *bridge_device,
1948				struct mlxsw_sp *mlxsw_sp)
1949{
1950	struct mlxsw_sp_bridge_port *bridge_port;
1951
1952	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1953		if (bridge_port->mrouter) {
1954			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1955							      bridge_port,
1956							      flood_bm);
1957		}
1958	}
1959}
1960
1961static int mlxsw_sp_mc_mdb_mrouters_add(struct mlxsw_sp *mlxsw_sp,
1962					struct mlxsw_sp_ports_bitmap *ports_bm,
1963					struct mlxsw_sp_mdb_entry *mdb_entry)
1964{
1965	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
1966	unsigned int nbits = ports_bm->nbits;
1967	int i;
1968
1969	for_each_set_bit(i, ports_bm->bitmap, nbits) {
1970		mdb_entry_port = mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp,
1971								     mdb_entry,
1972								     i);
1973		if (IS_ERR(mdb_entry_port)) {
1974			nbits = i;
1975			goto err_mrouter_port_get;
1976		}
1977	}
1978
1979	return 0;
1980
1981err_mrouter_port_get:
1982	for_each_set_bit(i, ports_bm->bitmap, nbits)
1983		mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
1984	return PTR_ERR(mdb_entry_port);
1985}
1986
1987static void mlxsw_sp_mc_mdb_mrouters_del(struct mlxsw_sp *mlxsw_sp,
1988					 struct mlxsw_sp_ports_bitmap *ports_bm,
1989					 struct mlxsw_sp_mdb_entry *mdb_entry)
1990{
1991	int i;
1992
1993	for_each_set_bit(i, ports_bm->bitmap, ports_bm->nbits)
1994		mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry, i);
1995}
1996
1997static int
1998mlxsw_sp_mc_mdb_mrouters_set(struct mlxsw_sp *mlxsw_sp,
1999			     struct mlxsw_sp_bridge_device *bridge_device,
2000			     struct mlxsw_sp_mdb_entry *mdb_entry, bool add)
2001{
2002	struct mlxsw_sp_ports_bitmap ports_bm;
2003	int err;
2004
2005	err = mlxsw_sp_port_bitmap_init(mlxsw_sp, &ports_bm);
2006	if (err)
2007		return err;
2008
2009	mlxsw_sp_mc_get_mrouters_bitmap(&ports_bm, bridge_device, mlxsw_sp);
2010
2011	if (add)
2012		err = mlxsw_sp_mc_mdb_mrouters_add(mlxsw_sp, &ports_bm,
2013						   mdb_entry);
2014	else
2015		mlxsw_sp_mc_mdb_mrouters_del(mlxsw_sp, &ports_bm, mdb_entry);
2016
2017	mlxsw_sp_port_bitmap_fini(&ports_bm);
2018	return err;
2019}
2020
2021static struct mlxsw_sp_mdb_entry *
2022mlxsw_sp_mc_mdb_entry_init(struct mlxsw_sp *mlxsw_sp,
2023			   struct mlxsw_sp_bridge_device *bridge_device,
2024			   const unsigned char *addr, u16 fid, u16 local_port)
2025{
2026	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2027	struct mlxsw_sp_mdb_entry *mdb_entry;
2028	int err;
2029
2030	mdb_entry = kzalloc(sizeof(*mdb_entry), GFP_KERNEL);
2031	if (!mdb_entry)
2032		return ERR_PTR(-ENOMEM);
 
2033
2034	ether_addr_copy(mdb_entry->key.addr, addr);
2035	mdb_entry->key.fid = fid;
2036	err = mlxsw_sp_pgt_mid_alloc(mlxsw_sp, &mdb_entry->mid);
2037	if (err)
2038		goto err_pgt_mid_alloc;
2039
2040	INIT_LIST_HEAD(&mdb_entry->ports_list);
 
2041
2042	err = mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry,
2043					   true);
 
 
2044	if (err)
2045		goto err_mdb_mrouters_set;
2046
2047	mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp, mdb_entry,
2048						     local_port);
2049	if (IS_ERR(mdb_entry_port)) {
2050		err = PTR_ERR(mdb_entry_port);
2051		goto err_mdb_entry_port_get;
2052	}
2053
2054	if (bridge_device->multicast_enabled) {
2055		err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, true);
2056		if (err)
2057			goto err_mdb_entry_write;
2058	}
2059
2060	err = rhashtable_insert_fast(&bridge_device->mdb_ht,
2061				     &mdb_entry->ht_node,
2062				     mlxsw_sp_mdb_ht_params);
2063	if (err)
2064		goto err_rhashtable_insert;
2065
2066	list_add_tail(&mdb_entry->list, &bridge_device->mdb_list);
 
 
 
2067
2068	return mdb_entry;
 
 
 
 
2069
2070err_rhashtable_insert:
2071	if (bridge_device->multicast_enabled)
2072		mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
2073err_mdb_entry_write:
2074	mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, false);
2075err_mdb_entry_port_get:
2076	mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
2077err_mdb_mrouters_set:
2078	mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
2079err_pgt_mid_alloc:
2080	kfree(mdb_entry);
2081	return ERR_PTR(err);
2082}
2083
2084static void
2085mlxsw_sp_mc_mdb_entry_fini(struct mlxsw_sp *mlxsw_sp,
2086			   struct mlxsw_sp_mdb_entry *mdb_entry,
2087			   struct mlxsw_sp_bridge_device *bridge_device,
2088			   u16 local_port, bool force)
2089{
2090	list_del(&mdb_entry->list);
2091	rhashtable_remove_fast(&bridge_device->mdb_ht, &mdb_entry->ht_node,
2092			       mlxsw_sp_mdb_ht_params);
2093	if (bridge_device->multicast_enabled)
2094		mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, false);
2095	mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port, force);
2096	mlxsw_sp_mc_mdb_mrouters_set(mlxsw_sp, bridge_device, mdb_entry, false);
2097	WARN_ON(!list_empty(&mdb_entry->ports_list));
2098	mlxsw_sp_pgt_mid_free(mlxsw_sp, mdb_entry->mid);
2099	kfree(mdb_entry);
2100}
2101
2102static struct mlxsw_sp_mdb_entry *
2103mlxsw_sp_mc_mdb_entry_get(struct mlxsw_sp *mlxsw_sp,
2104			  struct mlxsw_sp_bridge_device *bridge_device,
2105			  const unsigned char *addr, u16 fid, u16 local_port)
2106{
2107	struct mlxsw_sp_mdb_entry_key key = {};
2108	struct mlxsw_sp_mdb_entry *mdb_entry;
2109
2110	ether_addr_copy(key.addr, addr);
2111	key.fid = fid;
2112	mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
2113					   mlxsw_sp_mdb_ht_params);
2114	if (mdb_entry) {
2115		struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2116
2117		mdb_entry_port = mlxsw_sp_mdb_entry_port_get(mlxsw_sp,
2118							     mdb_entry,
2119							     local_port);
2120		if (IS_ERR(mdb_entry_port))
2121			return ERR_CAST(mdb_entry_port);
2122
2123		return mdb_entry;
2124	}
 
2125
2126	return mlxsw_sp_mc_mdb_entry_init(mlxsw_sp, bridge_device, addr, fid,
2127					  local_port);
2128}
2129
2130static bool
2131mlxsw_sp_mc_mdb_entry_remove(struct mlxsw_sp_mdb_entry *mdb_entry,
2132			     struct mlxsw_sp_mdb_entry_port *removed_entry_port,
2133			     bool force)
2134{
2135	if (mdb_entry->ports_count > 1)
2136		return false;
2137
2138	if (force)
2139		return true;
2140
2141	if (!removed_entry_port->mrouter &&
2142	    refcount_read(&removed_entry_port->refcount) > 1)
2143		return false;
2144
2145	if (removed_entry_port->mrouter &&
2146	    refcount_read(&removed_entry_port->refcount) > 2)
2147		return false;
2148
2149	return true;
 
 
 
 
2150}
2151
2152static void
2153mlxsw_sp_mc_mdb_entry_put(struct mlxsw_sp *mlxsw_sp,
2154			  struct mlxsw_sp_bridge_device *bridge_device,
2155			  struct mlxsw_sp_mdb_entry *mdb_entry, u16 local_port,
2156			  bool force)
2157{
2158	struct mlxsw_sp_mdb_entry_port *mdb_entry_port;
2159
2160	mdb_entry_port = mlxsw_sp_mdb_entry_port_lookup(mdb_entry, local_port);
2161	if (!mdb_entry_port)
2162		return;
2163
2164	/* Avoid a temporary situation in which the MDB entry points to an empty
2165	 * PGT entry, as otherwise packets will be temporarily dropped instead
2166	 * of being flooded. Instead, in this situation, call
2167	 * mlxsw_sp_mc_mdb_entry_fini(), which first deletes the MDB entry and
2168	 * then releases the PGT entry.
2169	 */
2170	if (mlxsw_sp_mc_mdb_entry_remove(mdb_entry, mdb_entry_port, force))
2171		mlxsw_sp_mc_mdb_entry_fini(mlxsw_sp, mdb_entry, bridge_device,
2172					   local_port, force);
2173	else
2174		mlxsw_sp_mdb_entry_port_put(mlxsw_sp, mdb_entry, local_port,
2175					    force);
2176}
2177
2178static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
2179				 const struct switchdev_obj_port_mdb *mdb)
 
2180{
2181	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2182	struct net_device *orig_dev = mdb->obj.orig_dev;
2183	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 
2184	struct mlxsw_sp_bridge_device *bridge_device;
2185	struct mlxsw_sp_bridge_port *bridge_port;
2186	struct mlxsw_sp_mdb_entry *mdb_entry;
2187	u16 fid_index;
 
 
 
 
2188
2189	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2190	if (!bridge_port)
2191		return 0;
2192
2193	bridge_device = bridge_port->bridge_device;
2194	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
2195							       bridge_device,
2196							       mdb->vid);
2197	if (!mlxsw_sp_port_vlan)
2198		return 0;
2199
2200	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
2201
2202	mdb_entry = mlxsw_sp_mc_mdb_entry_get(mlxsw_sp, bridge_device,
2203					      mdb->addr, fid_index,
2204					      mlxsw_sp_port->local_port);
2205	if (IS_ERR(mdb_entry))
2206		return PTR_ERR(mdb_entry);
 
 
 
 
 
2207
2208	return 0;
2209}
2210
2211static int
2212mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp *mlxsw_sp,
2213				   struct mlxsw_sp_bridge_device *bridge_device,
2214				   bool mc_enabled)
2215{
2216	struct mlxsw_sp_mdb_entry *mdb_entry;
2217	int err;
2218
2219	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
2220		err = mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, mc_enabled);
2221		if (err)
2222			goto err_mdb_entry_write;
2223	}
 
2224	return 0;
2225
2226err_mdb_entry_write:
2227	list_for_each_entry_continue_reverse(mdb_entry,
2228					     &bridge_device->mdb_list, list)
2229		mlxsw_sp_mdb_entry_write(mlxsw_sp, mdb_entry, !mc_enabled);
2230	return err;
2231}
2232
2233static void
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2234mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
2235				 struct mlxsw_sp_bridge_port *bridge_port,
2236				 bool add)
2237{
2238	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2239	struct mlxsw_sp_bridge_device *bridge_device;
2240	u16 local_port = mlxsw_sp_port->local_port;
2241	struct mlxsw_sp_mdb_entry *mdb_entry;
2242
2243	bridge_device = bridge_port->bridge_device;
2244
2245	list_for_each_entry(mdb_entry, &bridge_device->mdb_list, list) {
2246		if (add)
2247			mlxsw_sp_mdb_entry_mrouter_port_get(mlxsw_sp, mdb_entry,
2248							    local_port);
2249		else
2250			mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp, mdb_entry,
2251							    local_port);
2252	}
2253}
2254
2255static int mlxsw_sp_port_obj_add(struct net_device *dev, const void *ctx,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2256				 const struct switchdev_obj *obj,
 
2257				 struct netlink_ext_ack *extack)
2258{
2259	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2260	const struct switchdev_obj_port_vlan *vlan;
2261	int err = 0;
2262
2263	switch (obj->id) {
2264	case SWITCHDEV_OBJ_ID_PORT_VLAN:
2265		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
 
 
2266
2267		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, extack);
2268
2269		/* The event is emitted before the changes are actually
2270		 * applied to the bridge. Therefore schedule the respin
2271		 * call for later, so that the respin logic sees the
2272		 * updated bridge state.
2273		 */
2274		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2275		break;
2276	case SWITCHDEV_OBJ_ID_PORT_MDB:
2277		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
2278					    SWITCHDEV_OBJ_PORT_MDB(obj));
 
2279		break;
2280	default:
2281		err = -EOPNOTSUPP;
2282		break;
2283	}
2284
2285	return err;
2286}
2287
2288static void
2289mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
2290			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
2291{
2292	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
2293	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2294	u16 proto;
2295
2296	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2297	if (WARN_ON(!mlxsw_sp_port_vlan))
2298		return;
2299
2300	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2301	br_vlan_get_proto(bridge_port->bridge_device->dev, &proto);
2302	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid, proto);
2303	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
2304	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
2305}
2306
2307static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
2308				   const struct switchdev_obj_port_vlan *vlan)
2309{
2310	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2311	struct net_device *orig_dev = vlan->obj.orig_dev;
2312	struct mlxsw_sp_bridge_port *bridge_port;
 
2313
2314	if (netif_is_bridge_master(orig_dev))
2315		return -EOPNOTSUPP;
2316
2317	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2318	if (WARN_ON(!bridge_port))
2319		return -EINVAL;
2320
2321	if (!bridge_port->bridge_device->vlan_enabled)
2322		return 0;
2323
2324	mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vlan->vid);
 
2325
2326	return 0;
2327}
2328
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2329static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
2330				 const struct switchdev_obj_port_mdb *mdb)
2331{
2332	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2333	struct net_device *orig_dev = mdb->obj.orig_dev;
2334	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2335	struct mlxsw_sp_bridge_device *bridge_device;
2336	struct net_device *dev = mlxsw_sp_port->dev;
2337	struct mlxsw_sp_bridge_port *bridge_port;
2338	struct mlxsw_sp_mdb_entry_key key = {};
2339	struct mlxsw_sp_mdb_entry *mdb_entry;
2340	u16 fid_index;
2341
2342	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
2343	if (!bridge_port)
2344		return 0;
2345
2346	bridge_device = bridge_port->bridge_device;
2347	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
2348							       bridge_device,
2349							       mdb->vid);
2350	if (!mlxsw_sp_port_vlan)
2351		return 0;
2352
2353	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
2354
2355	ether_addr_copy(key.addr, mdb->addr);
2356	key.fid = fid_index;
2357	mdb_entry = rhashtable_lookup_fast(&bridge_device->mdb_ht, &key,
2358					   mlxsw_sp_mdb_ht_params);
2359	if (!mdb_entry) {
2360		netdev_err(dev, "Unable to remove port from MC DB\n");
2361		return -EINVAL;
2362	}
2363
2364	mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
2365				  mlxsw_sp_port->local_port, false);
2366	return 0;
2367}
2368
2369static void
2370mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
2371			       struct mlxsw_sp_bridge_port *bridge_port,
2372			       u16 fid_index)
2373{
2374	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2375	struct mlxsw_sp_bridge_device *bridge_device;
2376	struct mlxsw_sp_mdb_entry *mdb_entry, *tmp;
2377	u16 local_port = mlxsw_sp_port->local_port;
2378
2379	bridge_device = bridge_port->bridge_device;
2380
2381	list_for_each_entry_safe(mdb_entry, tmp, &bridge_device->mdb_list,
2382				 list) {
2383		if (mdb_entry->key.fid != fid_index)
2384			continue;
2385
2386		if (bridge_port->mrouter)
2387			mlxsw_sp_mdb_entry_mrouter_port_put(mlxsw_sp,
2388							    mdb_entry,
2389							    local_port);
2390
2391		mlxsw_sp_mc_mdb_entry_put(mlxsw_sp, bridge_device, mdb_entry,
2392					  local_port, true);
2393	}
2394}
2395
2396static int mlxsw_sp_port_obj_del(struct net_device *dev, const void *ctx,
2397				 const struct switchdev_obj *obj)
2398{
2399	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2400	int err = 0;
2401
2402	switch (obj->id) {
2403	case SWITCHDEV_OBJ_ID_PORT_VLAN:
2404		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
2405					      SWITCHDEV_OBJ_PORT_VLAN(obj));
2406		break;
2407	case SWITCHDEV_OBJ_ID_PORT_MDB:
2408		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
2409					    SWITCHDEV_OBJ_PORT_MDB(obj));
2410		break;
2411	default:
2412		err = -EOPNOTSUPP;
2413		break;
2414	}
2415
2416	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2417
2418	return err;
2419}
2420
2421static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
2422						   u16 lag_id)
2423{
2424	struct mlxsw_sp_port *mlxsw_sp_port;
2425	u64 max_lag_members;
2426	int i;
2427
2428	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
2429					     MAX_LAG_MEMBERS);
2430	for (i = 0; i < max_lag_members; i++) {
2431		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
2432		if (mlxsw_sp_port)
2433			return mlxsw_sp_port;
2434	}
2435	return NULL;
2436}
2437
2438static int
2439mlxsw_sp_bridge_port_replay(struct mlxsw_sp_bridge_port *bridge_port,
2440			    struct mlxsw_sp_port *mlxsw_sp_port,
2441			    struct netlink_ext_ack *extack)
2442{
2443	struct mlxsw_sp_bridge_port_replay_switchdev_objs rso = {
2444		.brport_dev = bridge_port->dev,
2445		.mlxsw_sp_port = mlxsw_sp_port,
2446	};
2447	struct notifier_block *nb;
2448	int err;
2449
2450	nb = &mlxsw_sp_bridge_port_replay_switchdev_objs_nb;
2451	err = switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev,
2452					   &rso, NULL, nb, extack);
2453	if (err)
2454		goto err_replay;
2455
2456	return 0;
2457
2458err_replay:
2459	nb = &mlxsw_sp_bridge_port_unreplay_switchdev_objs_nb;
2460	switchdev_bridge_port_replay(bridge_port->dev, mlxsw_sp_port->dev,
2461				     &rso, NULL, nb, extack);
2462	return err;
2463}
2464
2465static int
2466mlxsw_sp_bridge_vlan_aware_port_join(struct mlxsw_sp_bridge_port *bridge_port,
2467				     struct mlxsw_sp_port *mlxsw_sp_port,
2468				     struct netlink_ext_ack *extack)
2469{
2470	if (is_vlan_dev(bridge_port->dev)) {
2471		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
2472		return -EINVAL;
2473	}
2474
2475	/* Port is no longer usable as a router interface */
2476	if (mlxsw_sp_port->default_vlan->fid)
2477		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
2478
2479	return mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack);
2480}
2481
2482static int
2483mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2484				struct mlxsw_sp_bridge_port *bridge_port,
2485				struct mlxsw_sp_port *mlxsw_sp_port,
2486				struct netlink_ext_ack *extack)
2487{
2488	return mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2489						    extack);
2490}
2491
2492static void
2493mlxsw_sp_bridge_vlan_aware_port_leave(struct mlxsw_sp_port *mlxsw_sp_port)
2494{
2495	/* Make sure untagged frames are allowed to ingress */
2496	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
2497			       ETH_P_8021Q);
2498}
2499
2500static void
2501mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2502				 struct mlxsw_sp_bridge_port *bridge_port,
2503				 struct mlxsw_sp_port *mlxsw_sp_port)
2504{
2505	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
 
2506}
2507
2508static int
2509mlxsw_sp_bridge_vlan_aware_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2510				      const struct net_device *vxlan_dev,
2511				      u16 vid, u16 ethertype,
2512				      struct netlink_ext_ack *extack)
2513{
2514	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2515	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2516	struct mlxsw_sp_nve_params params = {
2517		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2518		.vni = vxlan->cfg.vni,
2519		.dev = vxlan_dev,
2520		.ethertype = ethertype,
2521	};
2522	struct mlxsw_sp_fid *fid;
2523	int err;
2524
2525	/* If the VLAN is 0, we need to find the VLAN that is configured as
2526	 * PVID and egress untagged on the bridge port of the VxLAN device.
2527	 * It is possible no such VLAN exists
2528	 */
2529	if (!vid) {
2530		err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
2531		if (err || !vid)
2532			return err;
2533	}
2534
2535	fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2536	if (IS_ERR(fid)) {
2537		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1Q FID");
2538		return PTR_ERR(fid);
2539	}
 
2540
2541	if (mlxsw_sp_fid_vni_is_set(fid)) {
2542		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2543		err = -EINVAL;
2544		goto err_vni_exists;
2545	}
2546
2547	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2548	if (err)
2549		goto err_nve_fid_enable;
2550
 
 
 
 
 
2551	return 0;
2552
2553err_nve_fid_enable:
2554err_vni_exists:
2555	mlxsw_sp_fid_put(fid);
2556	return err;
2557}
2558
2559static int
2560mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2561				 const struct net_device *vxlan_dev, u16 vid,
2562				 struct netlink_ext_ack *extack)
2563{
2564	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2565						     vid, ETH_P_8021Q, extack);
2566}
2567
2568static struct net_device *
2569mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2570{
2571	struct net_device *dev;
2572	struct list_head *iter;
2573
2574	netdev_for_each_lower_dev(br_dev, dev, iter) {
2575		u16 pvid;
2576		int err;
2577
2578		if (!netif_is_vxlan(dev))
2579			continue;
2580
2581		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2582		if (err || pvid != vid)
2583			continue;
2584
2585		return dev;
2586	}
2587
2588	return NULL;
2589}
2590
2591static struct mlxsw_sp_fid *
2592mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2593			      u16 vid, struct netlink_ext_ack *extack)
2594{
2595	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2596
2597	return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
 
 
 
 
2598}
2599
2600static struct mlxsw_sp_fid *
2601mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2602				 u16 vid)
2603{
2604	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2605
2606	return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2607}
2608
2609static u16
2610mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2611			      const struct mlxsw_sp_fid *fid)
2612{
2613	return mlxsw_sp_fid_8021q_vid(fid);
2614}
2615
2616static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2617	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2618	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2619	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2620	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2621	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2622	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2623};
2624
2625static bool
2626mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2627			   const struct net_device *br_dev)
2628{
2629	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2630
2631	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2632			    list) {
2633		if (mlxsw_sp_port_vlan->bridge_port &&
2634		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2635		    br_dev)
2636			return true;
2637	}
2638
2639	return false;
2640}
2641
2642static int
2643mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2644				struct mlxsw_sp_bridge_port *bridge_port,
2645				struct mlxsw_sp_port *mlxsw_sp_port,
2646				struct netlink_ext_ack *extack)
2647{
2648	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2649	struct net_device *dev = bridge_port->dev;
2650	u16 vid;
2651	int err;
2652
2653	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2654	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2655	if (WARN_ON(!mlxsw_sp_port_vlan))
2656		return -EINVAL;
2657
2658	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2659		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2660		return -EINVAL;
2661	}
2662
2663	/* Port is no longer usable as a router interface */
2664	if (mlxsw_sp_port_vlan->fid)
2665		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2666
2667	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2668					     extack);
2669	if (err)
2670		return err;
2671
2672	err = mlxsw_sp_bridge_port_replay(bridge_port, mlxsw_sp_port, extack);
2673	if (err)
2674		goto err_replay;
2675
2676	return 0;
2677
2678err_replay:
2679	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2680	return err;
2681}
2682
2683static void
2684mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2685				 struct mlxsw_sp_bridge_port *bridge_port,
2686				 struct mlxsw_sp_port *mlxsw_sp_port)
2687{
2688	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2689	struct net_device *dev = bridge_port->dev;
2690	u16 vid;
2691
2692	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2693	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2694	if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2695		return;
2696
2697	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2698}
2699
2700static int
2701mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2702				 const struct net_device *vxlan_dev, u16 vid,
2703				 struct netlink_ext_ack *extack)
2704{
2705	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2706	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2707	struct mlxsw_sp_nve_params params = {
2708		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2709		.vni = vxlan->cfg.vni,
2710		.dev = vxlan_dev,
2711		.ethertype = ETH_P_8021Q,
2712	};
2713	struct mlxsw_sp_fid *fid;
2714	int err;
2715
2716	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2717	if (IS_ERR(fid)) {
2718		NL_SET_ERR_MSG_MOD(extack, "Failed to create 802.1D FID");
2719		return -EINVAL;
2720	}
2721
2722	if (mlxsw_sp_fid_vni_is_set(fid)) {
2723		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2724		err = -EINVAL;
2725		goto err_vni_exists;
2726	}
2727
2728	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2729	if (err)
2730		goto err_nve_fid_enable;
2731
 
 
 
 
 
2732	return 0;
2733
2734err_nve_fid_enable:
2735err_vni_exists:
2736	mlxsw_sp_fid_put(fid);
2737	return err;
2738}
2739
2740static struct mlxsw_sp_fid *
2741mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2742			      u16 vid, struct netlink_ext_ack *extack)
2743{
2744	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2745
2746	return mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
 
 
 
 
 
 
 
 
 
2747}
2748
2749static struct mlxsw_sp_fid *
2750mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2751				 u16 vid)
2752{
2753	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2754
2755	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2756	if (vid)
2757		return NULL;
2758
2759	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2760}
2761
2762static u16
2763mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2764			      const struct mlxsw_sp_fid *fid)
2765{
2766	return 0;
2767}
2768
2769static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2770	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2771	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2772	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2773	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2774	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2775	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2776};
2777
2778static int
2779mlxsw_sp_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2780				 struct mlxsw_sp_bridge_port *bridge_port,
2781				 struct mlxsw_sp_port *mlxsw_sp_port,
2782				 struct netlink_ext_ack *extack)
2783{
2784	int err;
2785
2786	err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, false);
2787	if (err)
2788		return err;
2789
2790	err = mlxsw_sp_bridge_vlan_aware_port_join(bridge_port, mlxsw_sp_port,
2791						   extack);
2792	if (err)
2793		goto err_bridge_vlan_aware_port_join;
2794
2795	return 0;
2796
2797err_bridge_vlan_aware_port_join:
2798	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2799	return err;
2800}
2801
2802static void
2803mlxsw_sp_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2804				  struct mlxsw_sp_bridge_port *bridge_port,
2805				  struct mlxsw_sp_port *mlxsw_sp_port)
2806{
2807	mlxsw_sp_bridge_vlan_aware_port_leave(mlxsw_sp_port);
2808	mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
2809}
2810
2811static int
2812mlxsw_sp_bridge_8021ad_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2813				  const struct net_device *vxlan_dev, u16 vid,
2814				  struct netlink_ext_ack *extack)
2815{
2816	return mlxsw_sp_bridge_vlan_aware_vxlan_join(bridge_device, vxlan_dev,
2817						     vid, ETH_P_8021AD, extack);
2818}
2819
2820static const struct mlxsw_sp_bridge_ops mlxsw_sp1_bridge_8021ad_ops = {
2821	.port_join	= mlxsw_sp_bridge_8021ad_port_join,
2822	.port_leave	= mlxsw_sp_bridge_8021ad_port_leave,
2823	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2824	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2825	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2826	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2827};
2828
2829static int
2830mlxsw_sp2_bridge_8021ad_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2831				  struct mlxsw_sp_bridge_port *bridge_port,
2832				  struct mlxsw_sp_port *mlxsw_sp_port,
2833				  struct netlink_ext_ack *extack)
2834{
2835	int err;
2836
2837	/* The EtherType of decapsulated packets is determined at the egress
2838	 * port to allow 802.1d and 802.1ad bridges with VXLAN devices to
2839	 * co-exist.
2840	 */
2841	err = mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021AD);
2842	if (err)
2843		return err;
2844
2845	err = mlxsw_sp_bridge_8021ad_port_join(bridge_device, bridge_port,
2846					       mlxsw_sp_port, extack);
2847	if (err)
2848		goto err_bridge_8021ad_port_join;
2849
2850	return 0;
2851
2852err_bridge_8021ad_port_join:
2853	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2854	return err;
2855}
2856
2857static void
2858mlxsw_sp2_bridge_8021ad_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2859				   struct mlxsw_sp_bridge_port *bridge_port,
2860				   struct mlxsw_sp_port *mlxsw_sp_port)
2861{
2862	mlxsw_sp_bridge_8021ad_port_leave(bridge_device, bridge_port,
2863					  mlxsw_sp_port);
2864	mlxsw_sp_port_egress_ethtype_set(mlxsw_sp_port, ETH_P_8021Q);
2865}
2866
2867static const struct mlxsw_sp_bridge_ops mlxsw_sp2_bridge_8021ad_ops = {
2868	.port_join	= mlxsw_sp2_bridge_8021ad_port_join,
2869	.port_leave	= mlxsw_sp2_bridge_8021ad_port_leave,
2870	.vxlan_join	= mlxsw_sp_bridge_8021ad_vxlan_join,
2871	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2872	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2873	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2874};
2875
2876int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2877			      struct net_device *brport_dev,
2878			      struct net_device *br_dev,
2879			      struct netlink_ext_ack *extack)
2880{
2881	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2882	struct mlxsw_sp_bridge_device *bridge_device;
2883	struct mlxsw_sp_bridge_port *bridge_port;
2884	int err;
2885
2886	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev,
2887					       extack);
2888	if (IS_ERR(bridge_port))
2889		return PTR_ERR(bridge_port);
2890	bridge_device = bridge_port->bridge_device;
2891
2892	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2893					    mlxsw_sp_port, extack);
2894	if (err)
2895		goto err_port_join;
2896
2897	err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, br_dev, extack);
2898	if (err)
2899		goto err_replay;
2900
2901	return 0;
2902
2903err_replay:
2904	bridge_device->ops->port_leave(bridge_device, bridge_port,
2905				       mlxsw_sp_port);
2906err_port_join:
2907	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2908	return err;
2909}
2910
2911void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2912				struct net_device *brport_dev,
2913				struct net_device *br_dev)
2914{
2915	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2916	struct mlxsw_sp_bridge_device *bridge_device;
2917	struct mlxsw_sp_bridge_port *bridge_port;
2918
2919	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2920	if (!bridge_device)
2921		return;
2922	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2923	if (!bridge_port)
2924		return;
2925
2926	bridge_device->ops->port_leave(bridge_device, bridge_port,
2927				       mlxsw_sp_port);
2928	mlxsw_sp_port_security_set(mlxsw_sp_port, false);
2929	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2930}
2931
2932int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2933			       const struct net_device *br_dev,
2934			       const struct net_device *vxlan_dev, u16 vid,
2935			       struct netlink_ext_ack *extack)
2936{
2937	struct mlxsw_sp_bridge_device *bridge_device;
2938
2939	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2940	if (WARN_ON(!bridge_device))
2941		return -EINVAL;
2942
2943	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2944					      extack);
2945}
2946
2947void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2948				 const struct net_device *vxlan_dev)
2949{
2950	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2951	struct mlxsw_sp_fid *fid;
2952
2953	/* If the VxLAN device is down, then the FID does not have a VNI */
2954	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2955	if (!fid)
2956		return;
2957
2958	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
2959	/* Drop both the reference we just took during lookup and the reference
2960	 * the VXLAN device took.
2961	 */
2962	mlxsw_sp_fid_put(fid);
2963	mlxsw_sp_fid_put(fid);
2964}
2965
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2966static void
2967mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2968				      enum mlxsw_sp_l3proto *proto,
2969				      union mlxsw_sp_l3addr *addr)
2970{
2971	if (vxlan_addr->sa.sa_family == AF_INET) {
2972		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2973		*proto = MLXSW_SP_L3_PROTO_IPV4;
2974	} else {
2975		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2976		*proto = MLXSW_SP_L3_PROTO_IPV6;
2977	}
2978}
2979
2980static void
2981mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2982				      const union mlxsw_sp_l3addr *addr,
2983				      union vxlan_addr *vxlan_addr)
2984{
2985	switch (proto) {
2986	case MLXSW_SP_L3_PROTO_IPV4:
2987		vxlan_addr->sa.sa_family = AF_INET;
2988		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2989		break;
2990	case MLXSW_SP_L3_PROTO_IPV6:
2991		vxlan_addr->sa.sa_family = AF_INET6;
2992		vxlan_addr->sin6.sin6_addr = addr->addr6;
2993		break;
2994	}
2995}
2996
2997static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2998					      const char *mac,
2999					      enum mlxsw_sp_l3proto proto,
3000					      union mlxsw_sp_l3addr *addr,
3001					      __be32 vni, bool adding)
3002{
3003	struct switchdev_notifier_vxlan_fdb_info info;
3004	struct vxlan_dev *vxlan = netdev_priv(dev);
3005	enum switchdev_notifier_type type;
3006
3007	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
3008			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
3009	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
3010	info.remote_port = vxlan->cfg.dst_port;
3011	info.remote_vni = vni;
3012	info.remote_ifindex = 0;
3013	ether_addr_copy(info.eth_addr, mac);
3014	info.vni = vni;
3015	info.offloaded = adding;
3016	call_switchdev_notifiers(type, dev, &info.info, NULL);
3017}
3018
3019static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
3020					    const char *mac,
3021					    enum mlxsw_sp_l3proto proto,
3022					    union mlxsw_sp_l3addr *addr,
3023					    __be32 vni,
3024					    bool adding)
3025{
3026	if (netif_is_vxlan(dev))
3027		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
3028						  adding);
3029}
3030
3031static void
3032mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
3033			    const char *mac, u16 vid,
3034			    struct net_device *dev, bool offloaded, bool locked)
3035{
3036	struct switchdev_notifier_fdb_info info = {};
3037
3038	info.addr = mac;
3039	info.vid = vid;
3040	info.offloaded = offloaded;
3041	info.locked = locked;
3042	call_switchdev_notifiers(type, dev, &info.info, NULL);
3043}
3044
3045static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
3046					    char *sfn_pl, int rec_index,
3047					    bool adding)
3048{
3049	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3050	struct mlxsw_sp_bridge_device *bridge_device;
3051	struct mlxsw_sp_bridge_port *bridge_port;
3052	struct mlxsw_sp_port *mlxsw_sp_port;
3053	u16 local_port, vid, fid, evid = 0;
3054	enum switchdev_notifier_type type;
3055	char mac[ETH_ALEN];
 
 
3056	bool do_notification = true;
3057	int err;
3058
3059	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
3060
3061	if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
3062		return;
3063	mlxsw_sp_port = mlxsw_sp->ports[local_port];
3064	if (!mlxsw_sp_port) {
3065		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
3066		goto just_remove;
3067	}
3068
 
 
 
3069	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
3070	if (!mlxsw_sp_port_vlan) {
3071		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
3072		goto just_remove;
3073	}
3074
3075	bridge_port = mlxsw_sp_port_vlan->bridge_port;
3076	if (!bridge_port) {
3077		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
3078		goto just_remove;
3079	}
3080
3081	bridge_device = bridge_port->bridge_device;
3082	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
3083	evid = mlxsw_sp_port_vlan->vid;
3084
3085	if (adding && mlxsw_sp_port->security) {
3086		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, mac,
3087					    vid, bridge_port->dev, false, true);
3088		return;
3089	}
3090
3091do_fdb_op:
3092	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, evid,
3093				      adding, true);
3094	if (err) {
3095		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
3096		return;
3097	}
3098
3099	if (!do_notification)
3100		return;
3101	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
3102	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding,
3103				    false);
3104
3105	return;
3106
3107just_remove:
3108	adding = false;
3109	do_notification = false;
3110	goto do_fdb_op;
3111}
3112
3113static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
3114						char *sfn_pl, int rec_index,
3115						bool adding)
3116{
3117	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
3118	struct mlxsw_sp_bridge_device *bridge_device;
3119	struct mlxsw_sp_bridge_port *bridge_port;
3120	struct mlxsw_sp_port *mlxsw_sp_port;
3121	enum switchdev_notifier_type type;
3122	char mac[ETH_ALEN];
3123	u16 lag_vid = 0;
3124	u16 lag_id;
3125	u16 vid, fid;
3126	bool do_notification = true;
3127	int err;
3128
3129	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
3130	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
3131	if (!mlxsw_sp_port) {
3132		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
3133		goto just_remove;
3134	}
3135
 
 
 
3136	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
3137	if (!mlxsw_sp_port_vlan) {
3138		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
3139		goto just_remove;
3140	}
3141
3142	bridge_port = mlxsw_sp_port_vlan->bridge_port;
3143	if (!bridge_port) {
3144		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
3145		goto just_remove;
3146	}
3147
3148	bridge_device = bridge_port->bridge_device;
3149	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
3150	lag_vid = mlxsw_sp_port_vlan->vid;
3151
3152	if (adding && mlxsw_sp_port->security) {
3153		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE, mac,
3154					    vid, bridge_port->dev, false, true);
3155		return;
3156	}
3157
3158do_fdb_op:
3159	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
3160					  adding, true);
3161	if (err) {
3162		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
3163		return;
3164	}
3165
3166	if (!do_notification)
3167		return;
3168	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
3169	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding,
3170				    false);
3171
3172	return;
3173
3174just_remove:
3175	adding = false;
3176	do_notification = false;
3177	goto do_fdb_op;
3178}
3179
3180static int
3181__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
3182					    const struct mlxsw_sp_fid *fid,
3183					    bool adding,
3184					    struct net_device **nve_dev,
3185					    u16 *p_vid, __be32 *p_vni)
3186{
3187	struct mlxsw_sp_bridge_device *bridge_device;
3188	struct net_device *br_dev, *dev;
3189	int nve_ifindex;
3190	int err;
3191
3192	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
3193	if (err)
3194		return err;
3195
3196	err = mlxsw_sp_fid_vni(fid, p_vni);
3197	if (err)
3198		return err;
3199
3200	dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
3201	if (!dev)
3202		return -EINVAL;
3203	*nve_dev = dev;
3204
3205	if (!netif_running(dev))
3206		return -EINVAL;
3207
3208	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
3209		return -EINVAL;
3210
3211	if (adding && netif_is_vxlan(dev)) {
3212		struct vxlan_dev *vxlan = netdev_priv(dev);
3213
3214		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
3215			return -EINVAL;
3216	}
3217
3218	br_dev = netdev_master_upper_dev_get(dev);
3219	if (!br_dev)
3220		return -EINVAL;
3221
3222	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3223	if (!bridge_device)
3224		return -EINVAL;
3225
3226	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
3227
3228	return 0;
3229}
3230
3231static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
3232						      char *sfn_pl,
3233						      int rec_index,
3234						      bool adding)
3235{
3236	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
3237	enum switchdev_notifier_type type;
3238	struct net_device *nve_dev;
3239	union mlxsw_sp_l3addr addr;
3240	struct mlxsw_sp_fid *fid;
3241	char mac[ETH_ALEN];
3242	u16 fid_index, vid;
3243	__be32 vni;
3244	u32 uip;
3245	int err;
3246
3247	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
3248				       &uip, &sfn_proto);
3249
3250	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
3251	if (!fid)
3252		goto err_fid_lookup;
3253
3254	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
3255					      (enum mlxsw_sp_l3proto) sfn_proto,
3256					      &addr);
3257	if (err)
3258		goto err_ip_resolve;
3259
3260	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
3261							  &nve_dev, &vid, &vni);
3262	if (err)
3263		goto err_fdb_process;
3264
3265	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
3266					     (enum mlxsw_sp_l3proto) sfn_proto,
3267					     &addr, adding, true);
3268	if (err)
3269		goto err_fdb_op;
3270
3271	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
3272					(enum mlxsw_sp_l3proto) sfn_proto,
3273					&addr, vni, adding);
3274
3275	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
3276			SWITCHDEV_FDB_DEL_TO_BRIDGE;
3277	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding, false);
3278
3279	mlxsw_sp_fid_put(fid);
3280
3281	return;
3282
3283err_fdb_op:
3284err_fdb_process:
3285err_ip_resolve:
3286	mlxsw_sp_fid_put(fid);
3287err_fid_lookup:
3288	/* Remove an FDB entry in case we cannot process it. Otherwise the
3289	 * device will keep sending the same notification over and over again.
3290	 */
3291	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
3292				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
3293				       false, true);
3294}
3295
3296static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
3297					    char *sfn_pl, int rec_index)
3298{
3299	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
3300	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
3301		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
3302						rec_index, true);
3303		break;
3304	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
3305		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
3306						rec_index, false);
3307		break;
3308	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
3309		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
3310						    rec_index, true);
3311		break;
3312	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
3313		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
3314						    rec_index, false);
3315		break;
3316	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
3317		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
3318							  rec_index, true);
3319		break;
3320	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
3321		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
3322							  rec_index, false);
3323		break;
3324	}
3325}
3326
3327#define MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION 10
 
 
 
 
 
 
3328
3329static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
3330{
3331	struct mlxsw_sp_bridge *bridge;
3332	struct mlxsw_sp *mlxsw_sp;
3333	bool reschedule = false;
3334	char *sfn_pl;
3335	int queries;
3336	u8 num_rec;
3337	int i;
3338	int err;
3339
3340	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
3341	if (!sfn_pl)
3342		return;
3343
3344	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
3345	mlxsw_sp = bridge->mlxsw_sp;
3346
3347	rtnl_lock();
3348	if (list_empty(&bridge->bridges_list))
 
 
 
3349		goto out;
3350	reschedule = true;
3351	queries = MLXSW_SP_FDB_SFN_QUERIES_PER_SESSION;
3352	while (queries > 0) {
3353		mlxsw_reg_sfn_pack(sfn_pl);
3354		err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
3355		if (err) {
3356			dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
3357			goto out;
3358		}
3359		num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
3360		for (i = 0; i < num_rec; i++)
3361			mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
3362		if (num_rec != MLXSW_REG_SFN_REC_MAX_COUNT)
3363			goto out;
3364		queries--;
3365	}
 
 
 
3366
3367out:
3368	rtnl_unlock();
3369	kfree(sfn_pl);
3370	if (!reschedule)
3371		return;
3372	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp, !queries);
3373}
3374
3375struct mlxsw_sp_switchdev_event_work {
3376	struct work_struct work;
3377	netdevice_tracker dev_tracker;
3378	union {
3379		struct switchdev_notifier_fdb_info fdb_info;
3380		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
3381	};
3382	struct net_device *dev;
3383	unsigned long event;
3384};
3385
3386static void
3387mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
3388					  struct mlxsw_sp_switchdev_event_work *
3389					  switchdev_work,
3390					  struct mlxsw_sp_fid *fid, __be32 vni)
3391{
3392	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
3393	struct switchdev_notifier_fdb_info *fdb_info;
3394	struct net_device *dev = switchdev_work->dev;
3395	enum mlxsw_sp_l3proto proto;
3396	union mlxsw_sp_l3addr addr;
3397	int err;
3398
3399	fdb_info = &switchdev_work->fdb_info;
3400	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
3401	if (err)
3402		return;
3403
3404	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
3405					      &proto, &addr);
3406
3407	switch (switchdev_work->event) {
3408	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3409		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
3410						     vxlan_fdb_info.eth_addr,
3411						     mlxsw_sp_fid_index(fid),
3412						     proto, &addr, true, false);
3413		if (err)
3414			return;
3415		vxlan_fdb_info.offloaded = true;
3416		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3417					 &vxlan_fdb_info.info, NULL);
3418		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3419					    vxlan_fdb_info.eth_addr,
3420					    fdb_info->vid, dev, true, false);
3421		break;
3422	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3423		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
3424						     vxlan_fdb_info.eth_addr,
3425						     mlxsw_sp_fid_index(fid),
3426						     proto, &addr, false,
3427						     false);
3428		vxlan_fdb_info.offloaded = false;
3429		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3430					 &vxlan_fdb_info.info, NULL);
3431		break;
3432	}
3433}
3434
3435static void
3436mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
3437					switchdev_work)
3438{
3439	struct mlxsw_sp_bridge_device *bridge_device;
3440	struct net_device *dev = switchdev_work->dev;
3441	struct net_device *br_dev;
3442	struct mlxsw_sp *mlxsw_sp;
3443	struct mlxsw_sp_fid *fid;
3444	__be32 vni;
3445	int err;
3446
3447	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
3448	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
3449		return;
3450
3451	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
3452	    (!switchdev_work->fdb_info.added_by_user ||
3453	     switchdev_work->fdb_info.is_local))
3454		return;
3455
3456	if (!netif_running(dev))
3457		return;
3458	br_dev = netdev_master_upper_dev_get(dev);
3459	if (!br_dev)
3460		return;
3461	if (!netif_is_bridge_master(br_dev))
3462		return;
3463	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3464	if (!mlxsw_sp)
3465		return;
3466	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3467	if (!bridge_device)
3468		return;
3469
3470	fid = bridge_device->ops->fid_lookup(bridge_device,
3471					     switchdev_work->fdb_info.vid);
3472	if (!fid)
3473		return;
3474
3475	err = mlxsw_sp_fid_vni(fid, &vni);
3476	if (err)
3477		goto out;
3478
3479	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
3480						  vni);
3481
3482out:
3483	mlxsw_sp_fid_put(fid);
3484}
3485
3486static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
3487{
3488	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3489		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3490	struct net_device *dev = switchdev_work->dev;
3491	struct switchdev_notifier_fdb_info *fdb_info;
3492	struct mlxsw_sp_port *mlxsw_sp_port;
3493	int err;
3494
3495	rtnl_lock();
3496	if (netif_is_vxlan(dev)) {
3497		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
3498		goto out;
3499	}
3500
3501	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3502	if (!mlxsw_sp_port)
3503		goto out;
3504
3505	switch (switchdev_work->event) {
3506	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3507		fdb_info = &switchdev_work->fdb_info;
3508		if (!fdb_info->added_by_user || fdb_info->is_local)
3509			break;
3510		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
3511		if (err)
3512			break;
3513		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3514					    fdb_info->addr,
3515					    fdb_info->vid, dev, true, false);
3516		break;
3517	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3518		fdb_info = &switchdev_work->fdb_info;
3519		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
3520		break;
3521	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3522	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3523		/* These events are only used to potentially update an existing
3524		 * SPAN mirror.
3525		 */
3526		break;
3527	}
3528
3529	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
3530
3531out:
3532	rtnl_unlock();
3533	kfree(switchdev_work->fdb_info.addr);
3534	netdev_put(dev, &switchdev_work->dev_tracker);
3535	kfree(switchdev_work);
 
3536}
3537
3538static void
3539mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
3540				 struct mlxsw_sp_switchdev_event_work *
3541				 switchdev_work)
3542{
3543	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3544	struct mlxsw_sp_bridge_device *bridge_device;
3545	struct net_device *dev = switchdev_work->dev;
 
3546	enum mlxsw_sp_l3proto proto;
3547	union mlxsw_sp_l3addr addr;
3548	struct net_device *br_dev;
3549	struct mlxsw_sp_fid *fid;
3550	u16 vid;
3551	int err;
3552
3553	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3554	br_dev = netdev_master_upper_dev_get(dev);
3555
3556	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3557	if (!bridge_device)
3558		return;
3559
3560	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3561	if (!fid)
3562		return;
3563
3564	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3565					      &proto, &addr);
3566
3567	if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) {
3568		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
3569		if (err) {
3570			mlxsw_sp_fid_put(fid);
3571			return;
3572		}
3573		vxlan_fdb_info->offloaded = true;
3574		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3575					 &vxlan_fdb_info->info, NULL);
3576		mlxsw_sp_fid_put(fid);
3577		return;
3578	}
3579
3580	/* The device has a single FDB table, whereas Linux has two - one
3581	 * in the bridge driver and another in the VxLAN driver. We only
3582	 * program an entry to the device if the MAC points to the VxLAN
3583	 * device in the bridge's FDB table
3584	 */
3585	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3586	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
3587		goto err_br_fdb_find;
3588
3589	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3590					     mlxsw_sp_fid_index(fid), proto,
3591					     &addr, true, false);
3592	if (err)
3593		goto err_fdb_tunnel_uc_op;
3594	vxlan_fdb_info->offloaded = true;
3595	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
3596				 &vxlan_fdb_info->info, NULL);
3597	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3598				    vxlan_fdb_info->eth_addr, vid, dev, true,
3599				    false);
3600
3601	mlxsw_sp_fid_put(fid);
3602
3603	return;
3604
3605err_fdb_tunnel_uc_op:
3606err_br_fdb_find:
3607	mlxsw_sp_fid_put(fid);
3608}
3609
3610static void
3611mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
3612				 struct mlxsw_sp_switchdev_event_work *
3613				 switchdev_work)
3614{
3615	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3616	struct mlxsw_sp_bridge_device *bridge_device;
3617	struct net_device *dev = switchdev_work->dev;
3618	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
 
3619	enum mlxsw_sp_l3proto proto;
3620	union mlxsw_sp_l3addr addr;
3621	struct mlxsw_sp_fid *fid;
3622	u16 vid;
3623
3624	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
3625	if (!vxlan_fdb_info->offloaded)
3626		return;
3627
3628	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3629	if (!bridge_device)
3630		return;
3631
3632	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3633	if (!fid)
3634		return;
3635
3636	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3637					      &proto, &addr);
3638
3639	if (is_zero_ether_addr(vxlan_fdb_info->eth_addr)) {
3640		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3641		mlxsw_sp_fid_put(fid);
3642		return;
3643	}
3644
3645	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3646				       mlxsw_sp_fid_index(fid), proto, &addr,
3647				       false, false);
3648	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3649	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3650				    vxlan_fdb_info->eth_addr, vid, dev, false,
3651				    false);
3652
3653	mlxsw_sp_fid_put(fid);
3654}
3655
3656static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3657{
3658	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3659		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3660	struct net_device *dev = switchdev_work->dev;
3661	struct mlxsw_sp *mlxsw_sp;
3662	struct net_device *br_dev;
3663
3664	rtnl_lock();
3665
3666	if (!netif_running(dev))
3667		goto out;
3668	br_dev = netdev_master_upper_dev_get(dev);
3669	if (!br_dev)
3670		goto out;
3671	if (!netif_is_bridge_master(br_dev))
3672		goto out;
3673	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3674	if (!mlxsw_sp)
3675		goto out;
3676
3677	switch (switchdev_work->event) {
3678	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3679		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3680		break;
3681	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3682		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3683		break;
3684	}
3685
3686out:
3687	rtnl_unlock();
3688	netdev_put(dev, &switchdev_work->dev_tracker);
3689	kfree(switchdev_work);
 
3690}
3691
3692static int
3693mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3694				      switchdev_work,
3695				      struct switchdev_notifier_info *info)
3696{
3697	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3698	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3699	struct vxlan_config *cfg = &vxlan->cfg;
3700	struct netlink_ext_ack *extack;
3701
3702	extack = switchdev_notifier_info_to_extack(info);
3703	vxlan_fdb_info = container_of(info,
3704				      struct switchdev_notifier_vxlan_fdb_info,
3705				      info);
3706
3707	if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3708		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3709		return -EOPNOTSUPP;
3710	}
3711	if (vxlan_fdb_info->remote_vni != cfg->vni ||
3712	    vxlan_fdb_info->vni != cfg->vni) {
3713		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3714		return -EOPNOTSUPP;
3715	}
3716	if (vxlan_fdb_info->remote_ifindex) {
3717		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3718		return -EOPNOTSUPP;
3719	}
3720	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3721		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3722		return -EOPNOTSUPP;
3723	}
3724	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3725		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3726		return -EOPNOTSUPP;
3727	}
3728
3729	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3730
3731	return 0;
3732}
3733
3734/* Called under rcu_read_lock() */
3735static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3736				    unsigned long event, void *ptr)
3737{
3738	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3739	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3740	struct switchdev_notifier_fdb_info *fdb_info;
3741	struct switchdev_notifier_info *info = ptr;
3742	struct net_device *br_dev;
3743	int err;
3744
3745	if (event == SWITCHDEV_PORT_ATTR_SET) {
3746		err = switchdev_handle_port_attr_set(dev, ptr,
3747						     mlxsw_sp_port_dev_check,
3748						     mlxsw_sp_port_attr_set);
3749		return notifier_from_errno(err);
3750	}
3751
3752	/* Tunnel devices are not our uppers, so check their master instead */
3753	br_dev = netdev_master_upper_dev_get_rcu(dev);
3754	if (!br_dev)
3755		return NOTIFY_DONE;
3756	if (!netif_is_bridge_master(br_dev))
3757		return NOTIFY_DONE;
3758	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3759		return NOTIFY_DONE;
3760
3761	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3762	if (!switchdev_work)
3763		return NOTIFY_BAD;
3764
3765	switchdev_work->dev = dev;
3766	switchdev_work->event = event;
3767
3768	switch (event) {
3769	case SWITCHDEV_FDB_ADD_TO_DEVICE:
3770	case SWITCHDEV_FDB_DEL_TO_DEVICE:
3771	case SWITCHDEV_FDB_ADD_TO_BRIDGE:
3772	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3773		fdb_info = container_of(info,
3774					struct switchdev_notifier_fdb_info,
3775					info);
3776		INIT_WORK(&switchdev_work->work,
3777			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3778		memcpy(&switchdev_work->fdb_info, ptr,
3779		       sizeof(switchdev_work->fdb_info));
3780		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3781		if (!switchdev_work->fdb_info.addr)
3782			goto err_addr_alloc;
3783		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3784				fdb_info->addr);
3785		/* Take a reference on the device. This can be either
3786		 * upper device containig mlxsw_sp_port or just a
3787		 * mlxsw_sp_port
3788		 */
3789		netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC);
3790		break;
3791	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3792	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3793		INIT_WORK(&switchdev_work->work,
3794			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3795		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3796							    info);
3797		if (err)
3798			goto err_vxlan_work_prepare;
3799		netdev_hold(dev, &switchdev_work->dev_tracker, GFP_ATOMIC);
3800		break;
3801	default:
3802		kfree(switchdev_work);
3803		return NOTIFY_DONE;
3804	}
3805
3806	mlxsw_core_schedule_work(&switchdev_work->work);
3807
3808	return NOTIFY_DONE;
3809
3810err_vxlan_work_prepare:
3811err_addr_alloc:
3812	kfree(switchdev_work);
3813	return NOTIFY_BAD;
3814}
3815
3816struct notifier_block mlxsw_sp_switchdev_notifier = {
3817	.notifier_call = mlxsw_sp_switchdev_event,
3818};
3819
3820static int
3821mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3822				  struct mlxsw_sp_bridge_device *bridge_device,
3823				  const struct net_device *vxlan_dev, u16 vid,
3824				  bool flag_untagged, bool flag_pvid,
3825				  struct netlink_ext_ack *extack)
3826{
3827	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3828	__be32 vni = vxlan->cfg.vni;
3829	struct mlxsw_sp_fid *fid;
3830	u16 old_vid;
3831	int err;
3832
3833	/* We cannot have the same VLAN as PVID and egress untagged on multiple
3834	 * VxLAN devices. Note that we get this notification before the VLAN is
3835	 * actually added to the bridge's database, so it is not possible for
3836	 * the lookup function to return 'vxlan_dev'
3837	 */
3838	if (flag_untagged && flag_pvid &&
3839	    mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3840		NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3841		return -EINVAL;
3842	}
3843
3844	if (!netif_running(vxlan_dev))
3845		return 0;
3846
3847	/* First case: FID is not associated with this VNI, but the new VLAN
3848	 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3849	 * it exists
3850	 */
3851	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3852	if (!fid) {
3853		if (!flag_untagged || !flag_pvid)
3854			return 0;
3855		return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev,
3856						      vid, extack);
3857	}
3858
3859	/* Second case: FID is associated with the VNI and the VLAN associated
3860	 * with the FID is the same as the notified VLAN. This means the flags
3861	 * (PVID / egress untagged) were toggled and that NVE should be
3862	 * disabled on the FID
3863	 */
3864	old_vid = mlxsw_sp_fid_8021q_vid(fid);
3865	if (vid == old_vid) {
3866		if (WARN_ON(flag_untagged && flag_pvid)) {
3867			mlxsw_sp_fid_put(fid);
3868			return -EINVAL;
3869		}
3870		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3871		mlxsw_sp_fid_put(fid);
3872		return 0;
3873	}
3874
3875	/* Third case: A new VLAN was configured on the VxLAN device, but this
3876	 * VLAN is not PVID, so there is nothing to do.
3877	 */
3878	if (!flag_pvid) {
3879		mlxsw_sp_fid_put(fid);
3880		return 0;
3881	}
3882
3883	/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3884	 * mapped to the VNI should be unmapped
3885	 */
3886	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3887	mlxsw_sp_fid_put(fid);
3888
3889	/* Fifth case: The new VLAN is also egress untagged, which means the
3890	 * VLAN needs to be mapped to the VNI
3891	 */
3892	if (!flag_untagged)
3893		return 0;
3894
3895	err = bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, extack);
 
3896	if (err)
3897		goto err_vxlan_join;
3898
3899	return 0;
3900
3901err_vxlan_join:
3902	bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, old_vid, NULL);
 
3903	return err;
3904}
3905
3906static void
3907mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3908				  struct mlxsw_sp_bridge_device *bridge_device,
3909				  const struct net_device *vxlan_dev, u16 vid)
3910{
3911	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3912	__be32 vni = vxlan->cfg.vni;
3913	struct mlxsw_sp_fid *fid;
3914
3915	if (!netif_running(vxlan_dev))
3916		return;
3917
3918	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3919	if (!fid)
3920		return;
3921
3922	/* A different VLAN than the one mapped to the VNI is deleted */
3923	if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3924		goto out;
3925
3926	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3927
3928out:
3929	mlxsw_sp_fid_put(fid);
3930}
3931
3932static int
3933mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3934				   struct switchdev_notifier_port_obj_info *
3935				   port_obj_info)
3936{
3937	struct switchdev_obj_port_vlan *vlan =
3938		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3939	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3940	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
 
3941	struct mlxsw_sp_bridge_device *bridge_device;
3942	struct netlink_ext_ack *extack;
3943	struct mlxsw_sp *mlxsw_sp;
3944	struct net_device *br_dev;
 
3945
3946	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3947	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3948	if (!br_dev)
3949		return 0;
3950
3951	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3952	if (!mlxsw_sp)
3953		return 0;
3954
3955	port_obj_info->handled = true;
3956
 
 
 
3957	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3958	if (!bridge_device)
3959		return -EINVAL;
3960
3961	if (!bridge_device->vlan_enabled)
3962		return 0;
3963
3964	return mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3965						 vxlan_dev, vlan->vid,
3966						 flag_untagged,
3967						 flag_pvid, extack);
 
 
 
 
 
 
 
 
3968}
3969
3970static void
3971mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3972				   struct switchdev_notifier_port_obj_info *
3973				   port_obj_info)
3974{
3975	struct switchdev_obj_port_vlan *vlan =
3976		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3977	struct mlxsw_sp_bridge_device *bridge_device;
3978	struct mlxsw_sp *mlxsw_sp;
3979	struct net_device *br_dev;
 
3980
3981	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3982	if (!br_dev)
3983		return;
3984
3985	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3986	if (!mlxsw_sp)
3987		return;
3988
3989	port_obj_info->handled = true;
3990
3991	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3992	if (!bridge_device)
3993		return;
3994
3995	if (!bridge_device->vlan_enabled)
3996		return;
3997
3998	mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, vxlan_dev,
3999					  vlan->vid);
 
4000}
4001
4002static int
4003mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
4004					struct switchdev_notifier_port_obj_info *
4005					port_obj_info)
4006{
4007	int err = 0;
4008
4009	switch (port_obj_info->obj->id) {
4010	case SWITCHDEV_OBJ_ID_PORT_VLAN:
4011		err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
4012							 port_obj_info);
4013		break;
4014	default:
4015		break;
4016	}
4017
4018	return err;
4019}
4020
4021static void
4022mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
4023					struct switchdev_notifier_port_obj_info *
4024					port_obj_info)
4025{
4026	switch (port_obj_info->obj->id) {
4027	case SWITCHDEV_OBJ_ID_PORT_VLAN:
4028		mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
4029		break;
4030	default:
4031		break;
4032	}
4033}
4034
4035static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
4036					     unsigned long event, void *ptr)
4037{
4038	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
4039	int err = 0;
4040
4041	switch (event) {
4042	case SWITCHDEV_PORT_OBJ_ADD:
4043		if (netif_is_vxlan(dev))
4044			err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
4045		else
4046			err = switchdev_handle_port_obj_add(dev, ptr,
4047							mlxsw_sp_port_dev_check,
4048							mlxsw_sp_port_obj_add);
4049		return notifier_from_errno(err);
4050	case SWITCHDEV_PORT_OBJ_DEL:
4051		if (netif_is_vxlan(dev))
4052			mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
4053		else
4054			err = switchdev_handle_port_obj_del(dev, ptr,
4055							mlxsw_sp_port_dev_check,
4056							mlxsw_sp_port_obj_del);
4057		return notifier_from_errno(err);
4058	case SWITCHDEV_PORT_ATTR_SET:
4059		err = switchdev_handle_port_attr_set(dev, ptr,
4060						     mlxsw_sp_port_dev_check,
4061						     mlxsw_sp_port_attr_set);
4062		return notifier_from_errno(err);
4063	}
4064
4065	return NOTIFY_DONE;
4066}
4067
4068static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
4069	.notifier_call = mlxsw_sp_switchdev_blocking_event,
4070};
4071
4072u8
4073mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
4074{
4075	return bridge_port->stp_state;
4076}
4077
4078static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
4079{
4080	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
4081	struct notifier_block *nb;
4082	int err;
4083
4084	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
4085	if (err) {
4086		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
4087		return err;
4088	}
4089
4090	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
4091	if (err) {
4092		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
4093		return err;
4094	}
4095
4096	nb = &mlxsw_sp_switchdev_blocking_notifier;
4097	err = register_switchdev_blocking_notifier(nb);
4098	if (err) {
4099		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
4100		goto err_register_switchdev_blocking_notifier;
4101	}
4102
4103	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
4104	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
 
4105	return 0;
4106
4107err_register_switchdev_blocking_notifier:
4108	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
4109	return err;
4110}
4111
4112static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
4113{
4114	struct notifier_block *nb;
4115
4116	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
4117
4118	nb = &mlxsw_sp_switchdev_blocking_notifier;
4119	unregister_switchdev_blocking_notifier(nb);
4120
4121	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
4122}
4123
4124static void mlxsw_sp1_switchdev_init(struct mlxsw_sp *mlxsw_sp)
4125{
4126	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp1_bridge_8021ad_ops;
4127}
4128
4129const struct mlxsw_sp_switchdev_ops mlxsw_sp1_switchdev_ops = {
4130	.init	= mlxsw_sp1_switchdev_init,
4131};
4132
4133static void mlxsw_sp2_switchdev_init(struct mlxsw_sp *mlxsw_sp)
4134{
4135	mlxsw_sp->bridge->bridge_8021ad_ops = &mlxsw_sp2_bridge_8021ad_ops;
4136}
4137
4138const struct mlxsw_sp_switchdev_ops mlxsw_sp2_switchdev_ops = {
4139	.init	= mlxsw_sp2_switchdev_init,
4140};
4141
4142int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
4143{
4144	struct mlxsw_sp_bridge *bridge;
4145
4146	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
4147	if (!bridge)
4148		return -ENOMEM;
4149	mlxsw_sp->bridge = bridge;
4150	bridge->mlxsw_sp = mlxsw_sp;
4151
4152	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
4153
4154	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
4155	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
4156
4157	mlxsw_sp->switchdev_ops->init(mlxsw_sp);
4158
4159	return mlxsw_sp_fdb_init(mlxsw_sp);
4160}
4161
4162void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
4163{
4164	mlxsw_sp_fdb_fini(mlxsw_sp);
4165	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
4166	kfree(mlxsw_sp->bridge);
4167}
4168
v5.4
   1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
   2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
   3
   4#include <linux/kernel.h>
   5#include <linux/types.h>
   6#include <linux/netdevice.h>
   7#include <linux/etherdevice.h>
   8#include <linux/slab.h>
   9#include <linux/device.h>
  10#include <linux/skbuff.h>
  11#include <linux/if_vlan.h>
  12#include <linux/if_bridge.h>
  13#include <linux/workqueue.h>
  14#include <linux/jiffies.h>
  15#include <linux/rtnetlink.h>
  16#include <linux/netlink.h>
  17#include <net/switchdev.h>
  18#include <net/vxlan.h>
  19
  20#include "spectrum_span.h"
  21#include "spectrum_switchdev.h"
  22#include "spectrum.h"
  23#include "core.h"
  24#include "reg.h"
  25
  26struct mlxsw_sp_bridge_ops;
  27
  28struct mlxsw_sp_bridge {
  29	struct mlxsw_sp *mlxsw_sp;
  30	struct {
  31		struct delayed_work dw;
  32#define MLXSW_SP_DEFAULT_LEARNING_INTERVAL 100
  33		unsigned int interval; /* ms */
  34	} fdb_notify;
  35#define MLXSW_SP_MIN_AGEING_TIME 10
  36#define MLXSW_SP_MAX_AGEING_TIME 1000000
  37#define MLXSW_SP_DEFAULT_AGEING_TIME 300
  38	u32 ageing_time;
  39	bool vlan_enabled_exists;
  40	struct list_head bridges_list;
  41	DECLARE_BITMAP(mids_bitmap, MLXSW_SP_MID_MAX);
  42	const struct mlxsw_sp_bridge_ops *bridge_8021q_ops;
  43	const struct mlxsw_sp_bridge_ops *bridge_8021d_ops;
 
  44};
  45
  46struct mlxsw_sp_bridge_device {
  47	struct net_device *dev;
  48	struct list_head list;
  49	struct list_head ports_list;
  50	struct list_head mids_list;
 
  51	u8 vlan_enabled:1,
  52	   multicast_enabled:1,
  53	   mrouter:1;
  54	const struct mlxsw_sp_bridge_ops *ops;
  55};
  56
  57struct mlxsw_sp_bridge_port {
  58	struct net_device *dev;
  59	struct mlxsw_sp_bridge_device *bridge_device;
  60	struct list_head list;
  61	struct list_head vlans_list;
  62	unsigned int ref_count;
  63	u8 stp_state;
  64	unsigned long flags;
  65	bool mrouter;
  66	bool lagged;
  67	union {
  68		u16 lag_id;
  69		u16 system_port;
  70	};
  71};
  72
  73struct mlxsw_sp_bridge_vlan {
  74	struct list_head list;
  75	struct list_head port_vlan_list;
  76	u16 vid;
  77};
  78
  79struct mlxsw_sp_bridge_ops {
  80	int (*port_join)(struct mlxsw_sp_bridge_device *bridge_device,
  81			 struct mlxsw_sp_bridge_port *bridge_port,
  82			 struct mlxsw_sp_port *mlxsw_sp_port,
  83			 struct netlink_ext_ack *extack);
  84	void (*port_leave)(struct mlxsw_sp_bridge_device *bridge_device,
  85			   struct mlxsw_sp_bridge_port *bridge_port,
  86			   struct mlxsw_sp_port *mlxsw_sp_port);
  87	int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device,
  88			  const struct net_device *vxlan_dev, u16 vid,
  89			  struct netlink_ext_ack *extack);
  90	struct mlxsw_sp_fid *
  91		(*fid_get)(struct mlxsw_sp_bridge_device *bridge_device,
  92			   u16 vid, struct netlink_ext_ack *extack);
  93	struct mlxsw_sp_fid *
  94		(*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device,
  95			      u16 vid);
  96	u16 (*fid_vid)(struct mlxsw_sp_bridge_device *bridge_device,
  97		       const struct mlxsw_sp_fid *fid);
  98};
  99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 100static int
 101mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
 102			       struct mlxsw_sp_bridge_port *bridge_port,
 103			       u16 fid_index);
 104
 105static void
 106mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
 107			       struct mlxsw_sp_bridge_port *bridge_port);
 
 108
 109static void
 110mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
 111				   struct mlxsw_sp_bridge_device
 112				   *bridge_device);
 113
 114static void
 115mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
 116				 struct mlxsw_sp_bridge_port *bridge_port,
 117				 bool add);
 118
 119static struct mlxsw_sp_bridge_device *
 120mlxsw_sp_bridge_device_find(const struct mlxsw_sp_bridge *bridge,
 121			    const struct net_device *br_dev)
 122{
 123	struct mlxsw_sp_bridge_device *bridge_device;
 124
 125	list_for_each_entry(bridge_device, &bridge->bridges_list, list)
 126		if (bridge_device->dev == br_dev)
 127			return bridge_device;
 128
 129	return NULL;
 130}
 131
 132bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp,
 133					 const struct net_device *br_dev)
 134{
 135	return !!mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
 136}
 137
 138static int mlxsw_sp_bridge_device_upper_rif_destroy(struct net_device *dev,
 139						    void *data)
 140{
 141	struct mlxsw_sp *mlxsw_sp = data;
 142
 143	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
 144	return 0;
 145}
 146
 147static void mlxsw_sp_bridge_device_rifs_destroy(struct mlxsw_sp *mlxsw_sp,
 148						struct net_device *dev)
 149{
 
 
 
 
 150	mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, dev);
 151	netdev_walk_all_upper_dev_rcu(dev,
 152				      mlxsw_sp_bridge_device_upper_rif_destroy,
 153				      mlxsw_sp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 154}
 155
 156static struct mlxsw_sp_bridge_device *
 157mlxsw_sp_bridge_device_create(struct mlxsw_sp_bridge *bridge,
 158			      struct net_device *br_dev)
 
 159{
 160	struct device *dev = bridge->mlxsw_sp->bus_info->dev;
 161	struct mlxsw_sp_bridge_device *bridge_device;
 162	bool vlan_enabled = br_vlan_enabled(br_dev);
 
 163
 164	if (vlan_enabled && bridge->vlan_enabled_exists) {
 165		dev_err(dev, "Only one VLAN-aware bridge is supported\n");
 
 166		return ERR_PTR(-EINVAL);
 167	}
 168
 169	bridge_device = kzalloc(sizeof(*bridge_device), GFP_KERNEL);
 170	if (!bridge_device)
 171		return ERR_PTR(-ENOMEM);
 172
 
 
 
 
 173	bridge_device->dev = br_dev;
 174	bridge_device->vlan_enabled = vlan_enabled;
 175	bridge_device->multicast_enabled = br_multicast_enabled(br_dev);
 176	bridge_device->mrouter = br_multicast_router(br_dev);
 177	INIT_LIST_HEAD(&bridge_device->ports_list);
 178	if (vlan_enabled) {
 
 
 179		bridge->vlan_enabled_exists = true;
 180		bridge_device->ops = bridge->bridge_8021q_ops;
 
 
 
 
 181	} else {
 182		bridge_device->ops = bridge->bridge_8021d_ops;
 183	}
 184	INIT_LIST_HEAD(&bridge_device->mids_list);
 
 
 
 185	list_add(&bridge_device->list, &bridge->bridges_list);
 186
 
 
 
 
 
 
 
 
 187	return bridge_device;
 
 
 
 
 
 
 
 
 
 188}
 189
 190static void
 191mlxsw_sp_bridge_device_destroy(struct mlxsw_sp_bridge *bridge,
 192			       struct mlxsw_sp_bridge_device *bridge_device)
 193{
 
 194	mlxsw_sp_bridge_device_rifs_destroy(bridge->mlxsw_sp,
 195					    bridge_device->dev);
 196	list_del(&bridge_device->list);
 
 
 197	if (bridge_device->vlan_enabled)
 198		bridge->vlan_enabled_exists = false;
 199	WARN_ON(!list_empty(&bridge_device->ports_list));
 200	WARN_ON(!list_empty(&bridge_device->mids_list));
 
 201	kfree(bridge_device);
 202}
 203
 204static struct mlxsw_sp_bridge_device *
 205mlxsw_sp_bridge_device_get(struct mlxsw_sp_bridge *bridge,
 206			   struct net_device *br_dev)
 
 207{
 208	struct mlxsw_sp_bridge_device *bridge_device;
 209
 210	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
 211	if (bridge_device)
 212		return bridge_device;
 213
 214	return mlxsw_sp_bridge_device_create(bridge, br_dev);
 215}
 216
 217static void
 218mlxsw_sp_bridge_device_put(struct mlxsw_sp_bridge *bridge,
 219			   struct mlxsw_sp_bridge_device *bridge_device)
 220{
 221	if (list_empty(&bridge_device->ports_list))
 222		mlxsw_sp_bridge_device_destroy(bridge, bridge_device);
 223}
 224
 225static struct mlxsw_sp_bridge_port *
 226__mlxsw_sp_bridge_port_find(const struct mlxsw_sp_bridge_device *bridge_device,
 227			    const struct net_device *brport_dev)
 228{
 229	struct mlxsw_sp_bridge_port *bridge_port;
 230
 231	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
 232		if (bridge_port->dev == brport_dev)
 233			return bridge_port;
 234	}
 235
 236	return NULL;
 237}
 238
 239struct mlxsw_sp_bridge_port *
 240mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge,
 241			  struct net_device *brport_dev)
 242{
 243	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
 244	struct mlxsw_sp_bridge_device *bridge_device;
 245
 246	if (!br_dev)
 247		return NULL;
 248
 249	bridge_device = mlxsw_sp_bridge_device_find(bridge, br_dev);
 250	if (!bridge_device)
 251		return NULL;
 252
 253	return __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
 254}
 255
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 256static struct mlxsw_sp_bridge_port *
 257mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device,
 258			    struct net_device *brport_dev)
 
 259{
 260	struct mlxsw_sp_bridge_port *bridge_port;
 261	struct mlxsw_sp_port *mlxsw_sp_port;
 
 262
 263	bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL);
 264	if (!bridge_port)
 265		return NULL;
 266
 267	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev);
 268	bridge_port->lagged = mlxsw_sp_port->lagged;
 269	if (bridge_port->lagged)
 270		bridge_port->lag_id = mlxsw_sp_port->lag_id;
 271	else
 272		bridge_port->system_port = mlxsw_sp_port->local_port;
 273	bridge_port->dev = brport_dev;
 274	bridge_port->bridge_device = bridge_device;
 275	bridge_port->stp_state = BR_STATE_DISABLED;
 276	bridge_port->flags = BR_LEARNING | BR_FLOOD | BR_LEARNING_SYNC |
 277			     BR_MCAST_FLOOD;
 278	INIT_LIST_HEAD(&bridge_port->vlans_list);
 279	list_add(&bridge_port->list, &bridge_device->ports_list);
 280	bridge_port->ref_count = 1;
 281
 
 
 
 
 
 282	return bridge_port;
 
 
 
 
 
 283}
 284
 285static void
 286mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port)
 287{
 
 288	list_del(&bridge_port->list);
 289	WARN_ON(!list_empty(&bridge_port->vlans_list));
 290	kfree(bridge_port);
 291}
 292
 293static struct mlxsw_sp_bridge_port *
 294mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge,
 295			 struct net_device *brport_dev)
 
 296{
 297	struct net_device *br_dev = netdev_master_upper_dev_get(brport_dev);
 298	struct mlxsw_sp_bridge_device *bridge_device;
 299	struct mlxsw_sp_bridge_port *bridge_port;
 300	int err;
 301
 302	bridge_port = mlxsw_sp_bridge_port_find(bridge, brport_dev);
 303	if (bridge_port) {
 304		bridge_port->ref_count++;
 305		return bridge_port;
 306	}
 307
 308	bridge_device = mlxsw_sp_bridge_device_get(bridge, br_dev);
 309	if (IS_ERR(bridge_device))
 310		return ERR_CAST(bridge_device);
 311
 312	bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev);
 313	if (!bridge_port) {
 314		err = -ENOMEM;
 
 315		goto err_bridge_port_create;
 316	}
 317
 318	return bridge_port;
 319
 320err_bridge_port_create:
 321	mlxsw_sp_bridge_device_put(bridge, bridge_device);
 322	return ERR_PTR(err);
 323}
 324
 325static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge,
 326				     struct mlxsw_sp_bridge_port *bridge_port)
 327{
 328	struct mlxsw_sp_bridge_device *bridge_device;
 329
 330	if (--bridge_port->ref_count != 0)
 331		return;
 332	bridge_device = bridge_port->bridge_device;
 333	mlxsw_sp_bridge_port_destroy(bridge_port);
 334	mlxsw_sp_bridge_device_put(bridge, bridge_device);
 335}
 336
 337static struct mlxsw_sp_port_vlan *
 338mlxsw_sp_port_vlan_find_by_bridge(struct mlxsw_sp_port *mlxsw_sp_port,
 339				  const struct mlxsw_sp_bridge_device *
 340				  bridge_device,
 341				  u16 vid)
 342{
 343	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 344
 345	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
 346			    list) {
 347		if (!mlxsw_sp_port_vlan->bridge_port)
 348			continue;
 349		if (mlxsw_sp_port_vlan->bridge_port->bridge_device !=
 350		    bridge_device)
 351			continue;
 352		if (bridge_device->vlan_enabled &&
 353		    mlxsw_sp_port_vlan->vid != vid)
 354			continue;
 355		return mlxsw_sp_port_vlan;
 356	}
 357
 358	return NULL;
 359}
 360
 361static struct mlxsw_sp_port_vlan*
 362mlxsw_sp_port_vlan_find_by_fid(struct mlxsw_sp_port *mlxsw_sp_port,
 363			       u16 fid_index)
 364{
 365	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 366
 367	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
 368			    list) {
 369		struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
 370
 371		if (fid && mlxsw_sp_fid_index(fid) == fid_index)
 372			return mlxsw_sp_port_vlan;
 373	}
 374
 375	return NULL;
 376}
 377
 378static struct mlxsw_sp_bridge_vlan *
 379mlxsw_sp_bridge_vlan_find(const struct mlxsw_sp_bridge_port *bridge_port,
 380			  u16 vid)
 381{
 382	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 383
 384	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 385		if (bridge_vlan->vid == vid)
 386			return bridge_vlan;
 387	}
 388
 389	return NULL;
 390}
 391
 392static struct mlxsw_sp_bridge_vlan *
 393mlxsw_sp_bridge_vlan_create(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
 394{
 395	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 396
 397	bridge_vlan = kzalloc(sizeof(*bridge_vlan), GFP_KERNEL);
 398	if (!bridge_vlan)
 399		return NULL;
 400
 401	INIT_LIST_HEAD(&bridge_vlan->port_vlan_list);
 402	bridge_vlan->vid = vid;
 403	list_add(&bridge_vlan->list, &bridge_port->vlans_list);
 404
 405	return bridge_vlan;
 406}
 407
 408static void
 409mlxsw_sp_bridge_vlan_destroy(struct mlxsw_sp_bridge_vlan *bridge_vlan)
 410{
 411	list_del(&bridge_vlan->list);
 412	WARN_ON(!list_empty(&bridge_vlan->port_vlan_list));
 413	kfree(bridge_vlan);
 414}
 415
 416static struct mlxsw_sp_bridge_vlan *
 417mlxsw_sp_bridge_vlan_get(struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
 418{
 419	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 420
 421	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
 422	if (bridge_vlan)
 423		return bridge_vlan;
 424
 425	return mlxsw_sp_bridge_vlan_create(bridge_port, vid);
 426}
 427
 428static void mlxsw_sp_bridge_vlan_put(struct mlxsw_sp_bridge_vlan *bridge_vlan)
 429{
 430	if (list_empty(&bridge_vlan->port_vlan_list))
 431		mlxsw_sp_bridge_vlan_destroy(bridge_vlan);
 432}
 433
 434static int
 435mlxsw_sp_port_bridge_vlan_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
 436				  struct mlxsw_sp_bridge_vlan *bridge_vlan,
 437				  u8 state)
 438{
 439	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 440
 441	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 442			    bridge_vlan_node) {
 443		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
 444			continue;
 445		return mlxsw_sp_port_vid_stp_set(mlxsw_sp_port,
 446						 bridge_vlan->vid, state);
 447	}
 448
 449	return 0;
 450}
 451
 452static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
 453					    struct switchdev_trans *trans,
 454					    struct net_device *orig_dev,
 455					    u8 state)
 456{
 457	struct mlxsw_sp_bridge_port *bridge_port;
 458	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 459	int err;
 460
 461	if (switchdev_trans_ph_prepare(trans))
 462		return 0;
 463
 464	/* It's possible we failed to enslave the port, yet this
 465	 * operation is executed due to it being deferred.
 466	 */
 467	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
 468						orig_dev);
 469	if (!bridge_port)
 470		return 0;
 471
 472	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 473		err = mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port,
 474							bridge_vlan, state);
 475		if (err)
 476			goto err_port_bridge_vlan_stp_set;
 477	}
 478
 479	bridge_port->stp_state = state;
 480
 481	return 0;
 482
 483err_port_bridge_vlan_stp_set:
 484	list_for_each_entry_continue_reverse(bridge_vlan,
 485					     &bridge_port->vlans_list, list)
 486		mlxsw_sp_port_bridge_vlan_stp_set(mlxsw_sp_port, bridge_vlan,
 487						  bridge_port->stp_state);
 488	return err;
 489}
 490
 491static int
 492mlxsw_sp_port_bridge_vlan_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
 493				    struct mlxsw_sp_bridge_vlan *bridge_vlan,
 494				    enum mlxsw_sp_flood_type packet_type,
 495				    bool member)
 496{
 497	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 498
 499	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 500			    bridge_vlan_node) {
 501		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
 502			continue;
 503		return mlxsw_sp_fid_flood_set(mlxsw_sp_port_vlan->fid,
 504					      packet_type,
 505					      mlxsw_sp_port->local_port,
 506					      member);
 507	}
 508
 509	return 0;
 510}
 511
 512static int
 513mlxsw_sp_bridge_port_flood_table_set(struct mlxsw_sp_port *mlxsw_sp_port,
 514				     struct mlxsw_sp_bridge_port *bridge_port,
 515				     enum mlxsw_sp_flood_type packet_type,
 516				     bool member)
 517{
 518	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 519	int err;
 520
 521	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 522		err = mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port,
 523							  bridge_vlan,
 524							  packet_type,
 525							  member);
 526		if (err)
 527			goto err_port_bridge_vlan_flood_set;
 528	}
 529
 530	return 0;
 531
 532err_port_bridge_vlan_flood_set:
 533	list_for_each_entry_continue_reverse(bridge_vlan,
 534					     &bridge_port->vlans_list, list)
 535		mlxsw_sp_port_bridge_vlan_flood_set(mlxsw_sp_port, bridge_vlan,
 536						    packet_type, !member);
 537	return err;
 538}
 539
 540static int
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 541mlxsw_sp_port_bridge_vlan_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
 542				       struct mlxsw_sp_bridge_vlan *bridge_vlan,
 543				       bool set)
 544{
 545	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 546	u16 vid = bridge_vlan->vid;
 547
 548	list_for_each_entry(mlxsw_sp_port_vlan, &bridge_vlan->port_vlan_list,
 549			    bridge_vlan_node) {
 550		if (mlxsw_sp_port_vlan->mlxsw_sp_port != mlxsw_sp_port)
 551			continue;
 552		return mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, set);
 553	}
 554
 555	return 0;
 556}
 557
 558static int
 559mlxsw_sp_bridge_port_learning_set(struct mlxsw_sp_port *mlxsw_sp_port,
 560				  struct mlxsw_sp_bridge_port *bridge_port,
 561				  bool set)
 562{
 563	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 564	int err;
 565
 566	list_for_each_entry(bridge_vlan, &bridge_port->vlans_list, list) {
 567		err = mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
 568							     bridge_vlan, set);
 569		if (err)
 570			goto err_port_bridge_vlan_learning_set;
 571	}
 572
 573	return 0;
 574
 575err_port_bridge_vlan_learning_set:
 576	list_for_each_entry_continue_reverse(bridge_vlan,
 577					     &bridge_port->vlans_list, list)
 578		mlxsw_sp_port_bridge_vlan_learning_set(mlxsw_sp_port,
 579						       bridge_vlan, !set);
 580	return err;
 581}
 582
 583static int mlxsw_sp_port_attr_br_pre_flags_set(struct mlxsw_sp_port
 584					       *mlxsw_sp_port,
 585					       struct switchdev_trans *trans,
 586					       unsigned long brport_flags)
 587{
 588	if (brport_flags & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD))
 
 
 
 
 
 
 
 
 589		return -EINVAL;
 
 
 
 
 
 
 590
 591	return 0;
 592}
 593
 594static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
 595					   struct switchdev_trans *trans,
 596					   struct net_device *orig_dev,
 597					   unsigned long brport_flags)
 598{
 599	struct mlxsw_sp_bridge_port *bridge_port;
 600	int err;
 601
 602	if (switchdev_trans_ph_prepare(trans))
 603		return 0;
 604
 605	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
 606						orig_dev);
 607	if (!bridge_port)
 608		return 0;
 609
 610	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
 611						   MLXSW_SP_FLOOD_TYPE_UC,
 612						   brport_flags & BR_FLOOD);
 613	if (err)
 614		return err;
 
 
 
 
 
 
 
 
 
 
 
 615
 616	err = mlxsw_sp_bridge_port_learning_set(mlxsw_sp_port, bridge_port,
 617						brport_flags & BR_LEARNING);
 618	if (err)
 619		return err;
 
 
 620
 621	if (bridge_port->bridge_device->multicast_enabled)
 622		goto out;
 623
 624	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
 625						   MLXSW_SP_FLOOD_TYPE_MC,
 626						   brport_flags &
 627						   BR_MCAST_FLOOD);
 628	if (err)
 629		return err;
 
 
 630
 631out:
 632	memcpy(&bridge_port->flags, &brport_flags, sizeof(brport_flags));
 633	return 0;
 634}
 635
 636static int mlxsw_sp_ageing_set(struct mlxsw_sp *mlxsw_sp, u32 ageing_time)
 637{
 638	char sfdat_pl[MLXSW_REG_SFDAT_LEN];
 639	int err;
 640
 641	mlxsw_reg_sfdat_pack(sfdat_pl, ageing_time);
 642	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdat), sfdat_pl);
 643	if (err)
 644		return err;
 645	mlxsw_sp->bridge->ageing_time = ageing_time;
 646	return 0;
 647}
 648
 649static int mlxsw_sp_port_attr_br_ageing_set(struct mlxsw_sp_port *mlxsw_sp_port,
 650					    struct switchdev_trans *trans,
 651					    unsigned long ageing_clock_t)
 652{
 653	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 654	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
 655	u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000;
 656
 657	if (switchdev_trans_ph_prepare(trans)) {
 658		if (ageing_time < MLXSW_SP_MIN_AGEING_TIME ||
 659		    ageing_time > MLXSW_SP_MAX_AGEING_TIME)
 660			return -ERANGE;
 661		else
 662			return 0;
 663	}
 664
 665	return mlxsw_sp_ageing_set(mlxsw_sp, ageing_time);
 666}
 667
 668static int mlxsw_sp_port_attr_br_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
 669					  struct switchdev_trans *trans,
 670					  struct net_device *orig_dev,
 671					  bool vlan_enabled)
 672{
 673	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 674	struct mlxsw_sp_bridge_device *bridge_device;
 675
 676	if (!switchdev_trans_ph_prepare(trans))
 677		return 0;
 678
 679	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
 680	if (WARN_ON(!bridge_device))
 681		return -EINVAL;
 682
 683	if (bridge_device->vlan_enabled == vlan_enabled)
 684		return 0;
 685
 686	netdev_err(bridge_device->dev, "VLAN filtering can't be changed for existing bridge\n");
 687	return -EINVAL;
 688}
 689
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 690static int mlxsw_sp_port_attr_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
 691					  struct switchdev_trans *trans,
 692					  struct net_device *orig_dev,
 693					  bool is_port_mrouter)
 694{
 695	struct mlxsw_sp_bridge_port *bridge_port;
 696	int err;
 697
 698	if (switchdev_trans_ph_prepare(trans))
 699		return 0;
 700
 701	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp_port->mlxsw_sp->bridge,
 702						orig_dev);
 703	if (!bridge_port)
 704		return 0;
 705
 
 
 
 706	if (!bridge_port->bridge_device->multicast_enabled)
 707		goto out;
 708
 709	err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port, bridge_port,
 710						   MLXSW_SP_FLOOD_TYPE_MC,
 711						   is_port_mrouter);
 712	if (err)
 713		return err;
 714
 715	mlxsw_sp_port_mrouter_update_mdb(mlxsw_sp_port, bridge_port,
 716					 is_port_mrouter);
 717out:
 718	bridge_port->mrouter = is_port_mrouter;
 719	return 0;
 720}
 721
 722static bool mlxsw_sp_mc_flood(const struct mlxsw_sp_bridge_port *bridge_port)
 723{
 724	const struct mlxsw_sp_bridge_device *bridge_device;
 725
 726	bridge_device = bridge_port->bridge_device;
 727	return bridge_device->multicast_enabled ? bridge_port->mrouter :
 728					bridge_port->flags & BR_MCAST_FLOOD;
 729}
 730
 731static int mlxsw_sp_port_mc_disabled_set(struct mlxsw_sp_port *mlxsw_sp_port,
 732					 struct switchdev_trans *trans,
 733					 struct net_device *orig_dev,
 734					 bool mc_disabled)
 735{
 
 736	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 737	struct mlxsw_sp_bridge_device *bridge_device;
 738	struct mlxsw_sp_bridge_port *bridge_port;
 739	int err;
 740
 741	if (switchdev_trans_ph_prepare(trans))
 742		return 0;
 743
 744	/* It's possible we failed to enslave the port, yet this
 745	 * operation is executed due to it being deferred.
 746	 */
 747	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
 748	if (!bridge_device)
 749		return 0;
 750
 751	if (bridge_device->multicast_enabled != !mc_disabled) {
 752		bridge_device->multicast_enabled = !mc_disabled;
 753		mlxsw_sp_bridge_mdb_mc_enable_sync(mlxsw_sp_port,
 754						   bridge_device);
 755	}
 
 
 
 756
 757	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
 758		enum mlxsw_sp_flood_type packet_type = MLXSW_SP_FLOOD_TYPE_MC;
 759		bool member = mlxsw_sp_mc_flood(bridge_port);
 760
 761		err = mlxsw_sp_bridge_port_flood_table_set(mlxsw_sp_port,
 762							   bridge_port,
 763							   packet_type, member);
 764		if (err)
 765			return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 766	}
 767
 768	bridge_device->multicast_enabled = !mc_disabled;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 769
 770	return 0;
 
 
 
 
 771}
 772
 773static int mlxsw_sp_smid_router_port_set(struct mlxsw_sp *mlxsw_sp,
 774					 u16 mid_idx, bool add)
 
 
 775{
 776	char *smid_pl;
 777	int err;
 778
 779	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
 780	if (!smid_pl)
 781		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 782
 783	mlxsw_reg_smid_pack(smid_pl, mid_idx,
 784			    mlxsw_sp_router_port(mlxsw_sp), add);
 785	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
 786	kfree(smid_pl);
 787	return err;
 788}
 789
 790static void
 791mlxsw_sp_bridge_mrouter_update_mdb(struct mlxsw_sp *mlxsw_sp,
 792				   struct mlxsw_sp_bridge_device *bridge_device,
 793				   bool add)
 794{
 795	struct mlxsw_sp_mid *mid;
 
 796
 797	list_for_each_entry(mid, &bridge_device->mids_list, list)
 798		mlxsw_sp_smid_router_port_set(mlxsw_sp, mid->mid, add);
 
 
 
 
 
 
 799}
 800
 801static int
 802mlxsw_sp_port_attr_br_mrouter_set(struct mlxsw_sp_port *mlxsw_sp_port,
 803				  struct switchdev_trans *trans,
 804				  struct net_device *orig_dev,
 805				  bool is_mrouter)
 806{
 807	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
 808	struct mlxsw_sp_bridge_device *bridge_device;
 809
 810	if (switchdev_trans_ph_prepare(trans))
 811		return 0;
 812
 813	/* It's possible we failed to enslave the port, yet this
 814	 * operation is executed due to it being deferred.
 815	 */
 816	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, orig_dev);
 817	if (!bridge_device)
 818		return 0;
 819
 820	if (bridge_device->mrouter != is_mrouter)
 821		mlxsw_sp_bridge_mrouter_update_mdb(mlxsw_sp, bridge_device,
 822						   is_mrouter);
 823	bridge_device->mrouter = is_mrouter;
 824	return 0;
 825}
 826
 827static int mlxsw_sp_port_attr_set(struct net_device *dev,
 828				  const struct switchdev_attr *attr,
 829				  struct switchdev_trans *trans)
 830{
 831	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
 832	int err;
 833
 834	switch (attr->id) {
 835	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
 836		err = mlxsw_sp_port_attr_stp_state_set(mlxsw_sp_port, trans,
 837						       attr->orig_dev,
 838						       attr->u.stp_state);
 839		break;
 840	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
 841		err = mlxsw_sp_port_attr_br_pre_flags_set(mlxsw_sp_port,
 842							  trans,
 843							  attr->u.brport_flags);
 
 844		break;
 845	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
 846		err = mlxsw_sp_port_attr_br_flags_set(mlxsw_sp_port, trans,
 847						      attr->orig_dev,
 848						      attr->u.brport_flags);
 849		break;
 850	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
 851		err = mlxsw_sp_port_attr_br_ageing_set(mlxsw_sp_port, trans,
 852						       attr->u.ageing_time);
 853		break;
 854	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
 855		err = mlxsw_sp_port_attr_br_vlan_set(mlxsw_sp_port, trans,
 856						     attr->orig_dev,
 857						     attr->u.vlan_filtering);
 858		break;
 
 
 
 
 
 859	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
 860		err = mlxsw_sp_port_attr_mrouter_set(mlxsw_sp_port, trans,
 861						     attr->orig_dev,
 862						     attr->u.mrouter);
 863		break;
 864	case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED:
 865		err = mlxsw_sp_port_mc_disabled_set(mlxsw_sp_port, trans,
 866						    attr->orig_dev,
 867						    attr->u.mc_disabled);
 868		break;
 869	case SWITCHDEV_ATTR_ID_BRIDGE_MROUTER:
 870		err = mlxsw_sp_port_attr_br_mrouter_set(mlxsw_sp_port, trans,
 871							attr->orig_dev,
 872							attr->u.mrouter);
 873		break;
 874	default:
 875		err = -EOPNOTSUPP;
 876		break;
 877	}
 878
 879	if (switchdev_trans_ph_commit(trans))
 880		mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
 881
 882	return err;
 883}
 884
 885static int
 886mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
 887			    struct mlxsw_sp_bridge_port *bridge_port,
 888			    struct netlink_ext_ack *extack)
 889{
 890	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
 891	struct mlxsw_sp_bridge_device *bridge_device;
 892	u8 local_port = mlxsw_sp_port->local_port;
 893	u16 vid = mlxsw_sp_port_vlan->vid;
 894	struct mlxsw_sp_fid *fid;
 895	int err;
 896
 897	bridge_device = bridge_port->bridge_device;
 898	fid = bridge_device->ops->fid_get(bridge_device, vid, extack);
 899	if (IS_ERR(fid))
 900		return PTR_ERR(fid);
 901
 902	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port,
 903				     bridge_port->flags & BR_FLOOD);
 904	if (err)
 905		goto err_fid_uc_flood_set;
 906
 907	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port,
 908				     mlxsw_sp_mc_flood(bridge_port));
 909	if (err)
 910		goto err_fid_mc_flood_set;
 911
 912	err = mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port,
 913				     true);
 914	if (err)
 915		goto err_fid_bc_flood_set;
 916
 917	err = mlxsw_sp_fid_port_vid_map(fid, mlxsw_sp_port, vid);
 918	if (err)
 919		goto err_fid_port_vid_map;
 920
 921	mlxsw_sp_port_vlan->fid = fid;
 922
 923	return 0;
 924
 925err_fid_port_vid_map:
 926	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
 927err_fid_bc_flood_set:
 928	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
 929err_fid_mc_flood_set:
 930	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
 931err_fid_uc_flood_set:
 932	mlxsw_sp_fid_put(fid);
 933	return err;
 934}
 935
 936static void
 937mlxsw_sp_port_vlan_fid_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
 938{
 939	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
 940	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
 941	u8 local_port = mlxsw_sp_port->local_port;
 942	u16 vid = mlxsw_sp_port_vlan->vid;
 943
 944	mlxsw_sp_port_vlan->fid = NULL;
 945	mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid);
 946	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_BC, local_port, false);
 947	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_MC, local_port, false);
 948	mlxsw_sp_fid_flood_set(fid, MLXSW_SP_FLOOD_TYPE_UC, local_port, false);
 949	mlxsw_sp_fid_put(fid);
 950}
 951
 952static u16
 953mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port,
 954			     u16 vid, bool is_pvid)
 955{
 956	if (is_pvid)
 957		return vid;
 958	else if (mlxsw_sp_port->pvid == vid)
 959		return 0;	/* Dis-allow untagged packets */
 960	else
 961		return mlxsw_sp_port->pvid;
 962}
 963
 964static int
 965mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
 966			       struct mlxsw_sp_bridge_port *bridge_port,
 967			       struct netlink_ext_ack *extack)
 968{
 969	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
 970	struct mlxsw_sp_bridge_vlan *bridge_vlan;
 971	u16 vid = mlxsw_sp_port_vlan->vid;
 972	int err;
 973
 974	/* No need to continue if only VLAN flags were changed */
 975	if (mlxsw_sp_port_vlan->bridge_port)
 976		return 0;
 977
 978	err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port,
 979					  extack);
 980	if (err)
 981		return err;
 982
 983	err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid,
 984					     bridge_port->flags & BR_LEARNING);
 985	if (err)
 986		goto err_port_vid_learning_set;
 987
 988	err = mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid,
 989					bridge_port->stp_state);
 990	if (err)
 991		goto err_port_vid_stp_set;
 992
 993	bridge_vlan = mlxsw_sp_bridge_vlan_get(bridge_port, vid);
 994	if (!bridge_vlan) {
 995		err = -ENOMEM;
 996		goto err_bridge_vlan_get;
 997	}
 998
 999	list_add(&mlxsw_sp_port_vlan->bridge_vlan_node,
1000		 &bridge_vlan->port_vlan_list);
1001
1002	mlxsw_sp_bridge_port_get(mlxsw_sp_port->mlxsw_sp->bridge,
1003				 bridge_port->dev);
1004	mlxsw_sp_port_vlan->bridge_port = bridge_port;
1005
1006	return 0;
1007
1008err_bridge_vlan_get:
1009	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1010err_port_vid_stp_set:
1011	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1012err_port_vid_learning_set:
1013	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1014	return err;
1015}
1016
1017void
1018mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1019{
1020	struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1021	struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1022	struct mlxsw_sp_bridge_vlan *bridge_vlan;
1023	struct mlxsw_sp_bridge_port *bridge_port;
1024	u16 vid = mlxsw_sp_port_vlan->vid;
1025	bool last_port, last_vlan;
1026
1027	if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021Q &&
1028		    mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_8021D))
1029		return;
1030
1031	bridge_port = mlxsw_sp_port_vlan->bridge_port;
1032	last_vlan = list_is_singular(&bridge_port->vlans_list);
1033	bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid);
1034	last_port = list_is_singular(&bridge_vlan->port_vlan_list);
1035
1036	list_del(&mlxsw_sp_port_vlan->bridge_vlan_node);
1037	mlxsw_sp_bridge_vlan_put(bridge_vlan);
1038	mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_DISABLED);
1039	mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
1040	if (last_port)
1041		mlxsw_sp_bridge_port_fdb_flush(mlxsw_sp_port->mlxsw_sp,
1042					       bridge_port,
1043					       mlxsw_sp_fid_index(fid));
1044	if (last_vlan)
1045		mlxsw_sp_bridge_port_mdb_flush(mlxsw_sp_port, bridge_port);
 
1046
1047	mlxsw_sp_port_vlan_fid_leave(mlxsw_sp_port_vlan);
1048
1049	mlxsw_sp_bridge_port_put(mlxsw_sp_port->mlxsw_sp->bridge, bridge_port);
1050	mlxsw_sp_port_vlan->bridge_port = NULL;
1051}
1052
1053static int
1054mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port,
1055			      struct mlxsw_sp_bridge_port *bridge_port,
1056			      u16 vid, bool is_untagged, bool is_pvid,
1057			      struct netlink_ext_ack *extack)
1058{
1059	u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid);
1060	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1061	u16 old_pvid = mlxsw_sp_port->pvid;
 
1062	int err;
1063
1064	/* The only valid scenario in which a port-vlan already exists, is if
1065	 * the VLAN flags were changed and the port-vlan is associated with the
1066	 * correct bridge port
1067	 */
1068	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1069	if (mlxsw_sp_port_vlan &&
1070	    mlxsw_sp_port_vlan->bridge_port != bridge_port)
1071		return -EEXIST;
1072
1073	if (!mlxsw_sp_port_vlan) {
1074		mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1075							       vid);
1076		if (IS_ERR(mlxsw_sp_port_vlan))
1077			return PTR_ERR(mlxsw_sp_port_vlan);
1078	}
1079
1080	err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true,
1081				     is_untagged);
1082	if (err)
1083		goto err_port_vlan_set;
1084
1085	err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
 
1086	if (err)
1087		goto err_port_pvid_set;
1088
1089	err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
1090					     extack);
1091	if (err)
1092		goto err_port_vlan_bridge_join;
1093
1094	return 0;
1095
1096err_port_vlan_bridge_join:
1097	mlxsw_sp_port_pvid_set(mlxsw_sp_port, old_pvid);
1098err_port_pvid_set:
1099	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1100err_port_vlan_set:
1101	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1102	return err;
1103}
1104
1105static int
1106mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp,
1107				const struct net_device *br_dev,
1108				const struct switchdev_obj_port_vlan *vlan)
 
1109{
1110	struct mlxsw_sp_rif *rif;
1111	struct mlxsw_sp_fid *fid;
1112	u16 pvid;
1113	u16 vid;
1114
1115	rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, br_dev);
1116	if (!rif)
1117		return 0;
1118	fid = mlxsw_sp_rif_fid(rif);
1119	pvid = mlxsw_sp_fid_8021q_vid(fid);
1120
1121	for (vid = vlan->vid_begin; vid <= vlan->vid_end; ++vid) {
1122		if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1123			if (vid != pvid) {
1124				netdev_err(br_dev, "Can't change PVID, it's used by router interface\n");
1125				return -EBUSY;
1126			}
1127		} else {
1128			if (vid == pvid) {
1129				netdev_err(br_dev, "Can't remove PVID, it's used by router interface\n");
1130				return -EBUSY;
1131			}
1132		}
1133	}
1134
1135	return 0;
1136}
1137
1138static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port,
1139				   const struct switchdev_obj_port_vlan *vlan,
1140				   struct switchdev_trans *trans,
1141				   struct netlink_ext_ack *extack)
1142{
1143	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
1144	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
1145	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1146	struct net_device *orig_dev = vlan->obj.orig_dev;
1147	struct mlxsw_sp_bridge_port *bridge_port;
1148	u16 vid;
1149
1150	if (netif_is_bridge_master(orig_dev)) {
1151		int err = 0;
1152
1153		if ((vlan->flags & BRIDGE_VLAN_INFO_BRENTRY) &&
1154		    br_vlan_enabled(orig_dev) &&
1155		    switchdev_trans_ph_prepare(trans))
1156			err = mlxsw_sp_br_ban_rif_pvid_change(mlxsw_sp,
1157							      orig_dev, vlan);
1158		if (!err)
1159			err = -EOPNOTSUPP;
1160		return err;
1161	}
1162
1163	if (switchdev_trans_ph_commit(trans))
1164		return 0;
1165
1166	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1167	if (WARN_ON(!bridge_port))
1168		return -EINVAL;
1169
1170	if (!bridge_port->bridge_device->vlan_enabled)
1171		return 0;
1172
1173	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1174		int err;
1175
1176		err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port,
1177						    vid, flag_untagged,
1178						    flag_pvid, extack);
1179		if (err)
1180			return err;
1181	}
1182
1183	return 0;
1184}
1185
1186static enum mlxsw_reg_sfdf_flush_type mlxsw_sp_fdb_flush_type(bool lagged)
1187{
1188	return lagged ? MLXSW_REG_SFDF_FLUSH_PER_LAG_AND_FID :
1189			MLXSW_REG_SFDF_FLUSH_PER_PORT_AND_FID;
1190}
1191
1192static int
1193mlxsw_sp_bridge_port_fdb_flush(struct mlxsw_sp *mlxsw_sp,
1194			       struct mlxsw_sp_bridge_port *bridge_port,
1195			       u16 fid_index)
1196{
1197	bool lagged = bridge_port->lagged;
1198	char sfdf_pl[MLXSW_REG_SFDF_LEN];
1199	u16 system_port;
1200
1201	system_port = lagged ? bridge_port->lag_id : bridge_port->system_port;
1202	mlxsw_reg_sfdf_pack(sfdf_pl, mlxsw_sp_fdb_flush_type(lagged));
1203	mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
1204	mlxsw_reg_sfdf_port_fid_system_port_set(sfdf_pl, system_port);
1205
1206	return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
1207}
1208
1209static enum mlxsw_reg_sfd_rec_policy mlxsw_sp_sfd_rec_policy(bool dynamic)
1210{
1211	return dynamic ? MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_INGRESS :
1212			 MLXSW_REG_SFD_REC_POLICY_DYNAMIC_ENTRY_MLAG;
1213}
1214
1215static enum mlxsw_reg_sfd_op mlxsw_sp_sfd_op(bool adding)
1216{
1217	return adding ? MLXSW_REG_SFD_OP_WRITE_EDIT :
1218			MLXSW_REG_SFD_OP_WRITE_REMOVE;
1219}
1220
1221static int mlxsw_sp_port_fdb_tunnel_uc_op(struct mlxsw_sp *mlxsw_sp,
1222					  const char *mac, u16 fid,
1223					  enum mlxsw_sp_l3proto proto,
1224					  const union mlxsw_sp_l3addr *addr,
1225					  bool adding, bool dynamic)
1226{
1227	enum mlxsw_reg_sfd_uc_tunnel_protocol sfd_proto;
1228	char *sfd_pl;
1229	u8 num_rec;
1230	u32 uip;
1231	int err;
1232
1233	switch (proto) {
1234	case MLXSW_SP_L3_PROTO_IPV4:
1235		uip = be32_to_cpu(addr->addr4);
1236		sfd_proto = MLXSW_REG_SFD_UC_TUNNEL_PROTOCOL_IPV4;
1237		break;
1238	case MLXSW_SP_L3_PROTO_IPV6: /* fall through */
1239	default:
1240		WARN_ON(1);
1241		return -EOPNOTSUPP;
1242	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1243
1244	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1245	if (!sfd_pl)
1246		return -ENOMEM;
1247
1248	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1249	mlxsw_reg_sfd_uc_tunnel_pack(sfd_pl, 0,
1250				     mlxsw_sp_sfd_rec_policy(dynamic), mac, fid,
1251				     MLXSW_REG_SFD_REC_ACTION_NOP, uip,
1252				     sfd_proto);
1253	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1254	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1255	if (err)
1256		goto out;
1257
1258	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1259		err = -EBUSY;
1260
1261out:
1262	kfree(sfd_pl);
1263	return err;
1264}
1265
1266static int __mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1267				     const char *mac, u16 fid, bool adding,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1268				     enum mlxsw_reg_sfd_rec_action action,
1269				     enum mlxsw_reg_sfd_rec_policy policy)
1270{
1271	char *sfd_pl;
1272	u8 num_rec;
1273	int err;
1274
1275	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1276	if (!sfd_pl)
1277		return -ENOMEM;
1278
1279	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1280	mlxsw_reg_sfd_uc_pack(sfd_pl, 0, policy, mac, fid, action, local_port);
 
1281	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1282	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1283	if (err)
1284		goto out;
1285
1286	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1287		err = -EBUSY;
1288
1289out:
1290	kfree(sfd_pl);
1291	return err;
1292}
1293
1294static int mlxsw_sp_port_fdb_uc_op(struct mlxsw_sp *mlxsw_sp, u8 local_port,
1295				   const char *mac, u16 fid, bool adding,
1296				   bool dynamic)
1297{
1298	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid, adding,
1299					 MLXSW_REG_SFD_REC_ACTION_NOP,
1300					 mlxsw_sp_sfd_rec_policy(dynamic));
1301}
1302
1303int mlxsw_sp_rif_fdb_op(struct mlxsw_sp *mlxsw_sp, const char *mac, u16 fid,
1304			bool adding)
1305{
1306	return __mlxsw_sp_port_fdb_uc_op(mlxsw_sp, 0, mac, fid, adding,
1307					 MLXSW_REG_SFD_REC_ACTION_FORWARD_IP_ROUTER,
1308					 MLXSW_REG_SFD_REC_POLICY_STATIC_ENTRY);
1309}
1310
1311static int mlxsw_sp_port_fdb_uc_lag_op(struct mlxsw_sp *mlxsw_sp, u16 lag_id,
1312				       const char *mac, u16 fid, u16 lag_vid,
1313				       bool adding, bool dynamic)
1314{
1315	char *sfd_pl;
1316	u8 num_rec;
1317	int err;
1318
1319	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1320	if (!sfd_pl)
1321		return -ENOMEM;
1322
1323	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1324	mlxsw_reg_sfd_uc_lag_pack(sfd_pl, 0, mlxsw_sp_sfd_rec_policy(dynamic),
1325				  mac, fid, MLXSW_REG_SFD_REC_ACTION_NOP,
1326				  lag_vid, lag_id);
1327	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1328	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1329	if (err)
1330		goto out;
1331
1332	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1333		err = -EBUSY;
1334
1335out:
1336	kfree(sfd_pl);
1337	return err;
1338}
1339
1340static int
1341mlxsw_sp_port_fdb_set(struct mlxsw_sp_port *mlxsw_sp_port,
1342		      struct switchdev_notifier_fdb_info *fdb_info, bool adding)
1343{
1344	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1345	struct net_device *orig_dev = fdb_info->info.dev;
1346	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1347	struct mlxsw_sp_bridge_device *bridge_device;
1348	struct mlxsw_sp_bridge_port *bridge_port;
1349	u16 fid_index, vid;
1350
1351	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1352	if (!bridge_port)
1353		return -EINVAL;
1354
1355	bridge_device = bridge_port->bridge_device;
1356	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1357							       bridge_device,
1358							       fdb_info->vid);
1359	if (!mlxsw_sp_port_vlan)
1360		return 0;
1361
1362	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1363	vid = mlxsw_sp_port_vlan->vid;
1364
1365	if (!bridge_port->lagged)
1366		return mlxsw_sp_port_fdb_uc_op(mlxsw_sp,
1367					       bridge_port->system_port,
1368					       fdb_info->addr, fid_index,
1369					       adding, false);
1370	else
1371		return mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp,
1372						   bridge_port->lag_id,
1373						   fdb_info->addr, fid_index,
1374						   vid, adding, false);
1375}
1376
1377static int mlxsw_sp_port_mdb_op(struct mlxsw_sp *mlxsw_sp, const char *addr,
1378				u16 fid, u16 mid_idx, bool adding)
 
1379{
1380	char *sfd_pl;
1381	u8 num_rec;
1382	int err;
1383
1384	sfd_pl = kmalloc(MLXSW_REG_SFD_LEN, GFP_KERNEL);
1385	if (!sfd_pl)
1386		return -ENOMEM;
1387
1388	mlxsw_reg_sfd_pack(sfd_pl, mlxsw_sp_sfd_op(adding), 0);
1389	mlxsw_reg_sfd_mc_pack(sfd_pl, 0, addr, fid,
1390			      MLXSW_REG_SFD_REC_ACTION_NOP, mid_idx);
 
1391	num_rec = mlxsw_reg_sfd_num_rec_get(sfd_pl);
1392	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfd), sfd_pl);
1393	if (err)
1394		goto out;
1395
1396	if (num_rec != mlxsw_reg_sfd_num_rec_get(sfd_pl))
1397		err = -EBUSY;
1398
1399out:
1400	kfree(sfd_pl);
1401	return err;
1402}
1403
1404static int mlxsw_sp_port_smid_full_entry(struct mlxsw_sp *mlxsw_sp, u16 mid_idx,
1405					 long *ports_bitmap,
1406					 bool set_router_port)
1407{
1408	char *smid_pl;
1409	int err, i;
1410
1411	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1412	if (!smid_pl)
1413		return -ENOMEM;
1414
1415	mlxsw_reg_smid_pack(smid_pl, mid_idx, 0, false);
1416	for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++) {
1417		if (mlxsw_sp->ports[i])
1418			mlxsw_reg_smid_port_mask_set(smid_pl, i, 1);
1419	}
1420
1421	mlxsw_reg_smid_port_mask_set(smid_pl,
1422				     mlxsw_sp_router_port(mlxsw_sp), 1);
1423
1424	for_each_set_bit(i, ports_bitmap, mlxsw_core_max_ports(mlxsw_sp->core))
1425		mlxsw_reg_smid_port_set(smid_pl, i, 1);
1426
1427	mlxsw_reg_smid_port_set(smid_pl, mlxsw_sp_router_port(mlxsw_sp),
1428				set_router_port);
1429
1430	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1431	kfree(smid_pl);
1432	return err;
1433}
1434
1435static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port,
1436				  u16 mid_idx, bool add)
1437{
1438	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1439	char *smid_pl;
1440	int err;
1441
1442	smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
1443	if (!smid_pl)
1444		return -ENOMEM;
1445
1446	mlxsw_reg_smid_pack(smid_pl, mid_idx, mlxsw_sp_port->local_port, add);
1447	err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(smid), smid_pl);
1448	kfree(smid_pl);
1449	return err;
1450}
1451
1452static struct
1453mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp_bridge_device *bridge_device,
1454				const unsigned char *addr,
1455				u16 fid)
1456{
1457	struct mlxsw_sp_mid *mid;
1458
1459	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1460		if (ether_addr_equal(mid->addr, addr) && mid->fid == fid)
1461			return mid;
1462	}
1463	return NULL;
1464}
1465
1466static void
1467mlxsw_sp_bridge_port_get_ports_bitmap(struct mlxsw_sp *mlxsw_sp,
1468				      struct mlxsw_sp_bridge_port *bridge_port,
1469				      unsigned long *ports_bitmap)
1470{
1471	struct mlxsw_sp_port *mlxsw_sp_port;
1472	u64 max_lag_members, i;
1473	int lag_id;
1474
1475	if (!bridge_port->lagged) {
1476		set_bit(bridge_port->system_port, ports_bitmap);
1477	} else {
1478		max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1479						     MAX_LAG_MEMBERS);
1480		lag_id = bridge_port->lag_id;
1481		for (i = 0; i < max_lag_members; i++) {
1482			mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp,
1483								 lag_id, i);
1484			if (mlxsw_sp_port)
1485				set_bit(mlxsw_sp_port->local_port,
1486					ports_bitmap);
1487		}
1488	}
1489}
1490
1491static void
1492mlxsw_sp_mc_get_mrouters_bitmap(unsigned long *flood_bitmap,
1493				struct mlxsw_sp_bridge_device *bridge_device,
1494				struct mlxsw_sp *mlxsw_sp)
1495{
1496	struct mlxsw_sp_bridge_port *bridge_port;
1497
1498	list_for_each_entry(bridge_port, &bridge_device->ports_list, list) {
1499		if (bridge_port->mrouter) {
1500			mlxsw_sp_bridge_port_get_ports_bitmap(mlxsw_sp,
1501							      bridge_port,
1502							      flood_bitmap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1503		}
1504	}
 
 
 
 
 
 
 
1505}
1506
1507static bool
1508mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1509			    struct mlxsw_sp_mid *mid,
1510			    struct mlxsw_sp_bridge_device *bridge_device)
1511{
1512	long *flood_bitmap;
1513	int num_of_ports;
1514	int alloc_size;
1515	u16 mid_idx;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1516	int err;
1517
1518	mid_idx = find_first_zero_bit(mlxsw_sp->bridge->mids_bitmap,
1519				      MLXSW_SP_MID_MAX);
1520	if (mid_idx == MLXSW_SP_MID_MAX)
1521		return false;
1522
1523	num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core);
1524	alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports);
1525	flood_bitmap = kzalloc(alloc_size, GFP_KERNEL);
1526	if (!flood_bitmap)
1527		return false;
1528
1529	bitmap_copy(flood_bitmap,  mid->ports_in_mid, num_of_ports);
1530	mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp);
1531
1532	mid->mid = mid_idx;
1533	err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap,
1534					    bridge_device->mrouter);
1535	kfree(flood_bitmap);
1536	if (err)
1537		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
1538
1539	err = mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid_idx,
1540				   true);
 
1541	if (err)
1542		return false;
1543
1544	set_bit(mid_idx, mlxsw_sp->bridge->mids_bitmap);
1545	mid->in_hw = true;
1546	return true;
1547}
1548
1549static int mlxsw_sp_mc_remove_mdb_entry(struct mlxsw_sp *mlxsw_sp,
1550					struct mlxsw_sp_mid *mid)
1551{
1552	if (!mid->in_hw)
1553		return 0;
1554
1555	clear_bit(mid->mid, mlxsw_sp->bridge->mids_bitmap);
1556	mid->in_hw = false;
1557	return mlxsw_sp_port_mdb_op(mlxsw_sp, mid->addr, mid->fid, mid->mid,
1558				    false);
 
 
 
 
 
 
 
 
1559}
1560
1561static struct
1562mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
1563				  struct mlxsw_sp_bridge_device *bridge_device,
1564				  const unsigned char *addr,
1565				  u16 fid)
1566{
1567	struct mlxsw_sp_mid *mid;
1568	size_t alloc_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1569
1570	mid = kzalloc(sizeof(*mid), GFP_KERNEL);
1571	if (!mid)
1572		return NULL;
1573
1574	alloc_size = sizeof(unsigned long) *
1575		     BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
 
1576
1577	mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
1578	if (!mid->ports_in_mid)
1579		goto err_ports_in_mid_alloc;
1580
1581	ether_addr_copy(mid->addr, addr);
1582	mid->fid = fid;
1583	mid->in_hw = false;
1584
1585	if (!bridge_device->multicast_enabled)
1586		goto out;
1587
1588	if (!mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid, bridge_device))
1589		goto err_write_mdb_entry;
 
1590
1591out:
1592	list_add_tail(&mid->list, &bridge_device->mids_list);
1593	return mid;
1594
1595err_write_mdb_entry:
1596	kfree(mid->ports_in_mid);
1597err_ports_in_mid_alloc:
1598	kfree(mid);
1599	return NULL;
1600}
1601
1602static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
1603					 struct mlxsw_sp_mid *mid)
 
 
 
1604{
1605	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1606	int err = 0;
 
 
 
1607
1608	clear_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1609	if (bitmap_empty(mid->ports_in_mid,
1610			 mlxsw_core_max_ports(mlxsw_sp->core))) {
1611		err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1612		list_del(&mid->list);
1613		kfree(mid->ports_in_mid);
1614		kfree(mid);
1615	}
1616	return err;
 
 
 
1617}
1618
1619static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
1620				 const struct switchdev_obj_port_mdb *mdb,
1621				 struct switchdev_trans *trans)
1622{
1623	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1624	struct net_device *orig_dev = mdb->obj.orig_dev;
1625	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1626	struct net_device *dev = mlxsw_sp_port->dev;
1627	struct mlxsw_sp_bridge_device *bridge_device;
1628	struct mlxsw_sp_bridge_port *bridge_port;
1629	struct mlxsw_sp_mid *mid;
1630	u16 fid_index;
1631	int err = 0;
1632
1633	if (switchdev_trans_ph_commit(trans))
1634		return 0;
1635
1636	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1637	if (!bridge_port)
1638		return 0;
1639
1640	bridge_device = bridge_port->bridge_device;
1641	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1642							       bridge_device,
1643							       mdb->vid);
1644	if (!mlxsw_sp_port_vlan)
1645		return 0;
1646
1647	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1648
1649	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1650	if (!mid) {
1651		mid = __mlxsw_sp_mc_alloc(mlxsw_sp, bridge_device, mdb->addr,
1652					  fid_index);
1653		if (!mid) {
1654			netdev_err(dev, "Unable to allocate MC group\n");
1655			return -ENOMEM;
1656		}
1657	}
1658	set_bit(mlxsw_sp_port->local_port, mid->ports_in_mid);
1659
1660	if (!bridge_device->multicast_enabled)
1661		return 0;
1662
1663	if (bridge_port->mrouter)
1664		return 0;
 
 
 
 
 
1665
1666	err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, true);
1667	if (err) {
1668		netdev_err(dev, "Unable to set SMID\n");
1669		goto err_out;
1670	}
1671
1672	return 0;
1673
1674err_out:
1675	mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
 
 
1676	return err;
1677}
1678
1679static void
1680mlxsw_sp_bridge_mdb_mc_enable_sync(struct mlxsw_sp_port *mlxsw_sp_port,
1681				   struct mlxsw_sp_bridge_device
1682				   *bridge_device)
1683{
1684	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1685	struct mlxsw_sp_mid *mid;
1686	bool mc_enabled;
1687
1688	mc_enabled = bridge_device->multicast_enabled;
1689
1690	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1691		if (mc_enabled)
1692			mlxsw_sp_mc_write_mdb_entry(mlxsw_sp, mid,
1693						    bridge_device);
1694		else
1695			mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
1696	}
1697}
1698
1699static void
1700mlxsw_sp_port_mrouter_update_mdb(struct mlxsw_sp_port *mlxsw_sp_port,
1701				 struct mlxsw_sp_bridge_port *bridge_port,
1702				 bool add)
1703{
 
1704	struct mlxsw_sp_bridge_device *bridge_device;
1705	struct mlxsw_sp_mid *mid;
 
1706
1707	bridge_device = bridge_port->bridge_device;
1708
1709	list_for_each_entry(mid, &bridge_device->mids_list, list) {
1710		if (!test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid))
1711			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, add);
 
 
 
 
1712	}
1713}
1714
1715struct mlxsw_sp_span_respin_work {
1716	struct work_struct work;
1717	struct mlxsw_sp *mlxsw_sp;
1718};
1719
1720static void mlxsw_sp_span_respin_work(struct work_struct *work)
1721{
1722	struct mlxsw_sp_span_respin_work *respin_work =
1723		container_of(work, struct mlxsw_sp_span_respin_work, work);
1724
1725	rtnl_lock();
1726	mlxsw_sp_span_respin(respin_work->mlxsw_sp);
1727	rtnl_unlock();
1728	kfree(respin_work);
1729}
1730
1731static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp)
1732{
1733	struct mlxsw_sp_span_respin_work *respin_work;
1734
1735	respin_work = kzalloc(sizeof(*respin_work), GFP_ATOMIC);
1736	if (!respin_work)
1737		return;
1738
1739	INIT_WORK(&respin_work->work, mlxsw_sp_span_respin_work);
1740	respin_work->mlxsw_sp = mlxsw_sp;
1741
1742	mlxsw_core_schedule_work(&respin_work->work);
1743}
1744
1745static int mlxsw_sp_port_obj_add(struct net_device *dev,
1746				 const struct switchdev_obj *obj,
1747				 struct switchdev_trans *trans,
1748				 struct netlink_ext_ack *extack)
1749{
1750	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1751	const struct switchdev_obj_port_vlan *vlan;
1752	int err = 0;
1753
1754	switch (obj->id) {
1755	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1756		vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
1757		err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans,
1758					      extack);
1759
1760		if (switchdev_trans_ph_prepare(trans)) {
1761			/* The event is emitted before the changes are actually
1762			 * applied to the bridge. Therefore schedule the respin
1763			 * call for later, so that the respin logic sees the
1764			 * updated bridge state.
1765			 */
1766			mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1767		}
1768		break;
1769	case SWITCHDEV_OBJ_ID_PORT_MDB:
1770		err = mlxsw_sp_port_mdb_add(mlxsw_sp_port,
1771					    SWITCHDEV_OBJ_PORT_MDB(obj),
1772					    trans);
1773		break;
1774	default:
1775		err = -EOPNOTSUPP;
1776		break;
1777	}
1778
1779	return err;
1780}
1781
1782static void
1783mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port,
1784			      struct mlxsw_sp_bridge_port *bridge_port, u16 vid)
1785{
1786	u16 pvid = mlxsw_sp_port->pvid == vid ? 0 : mlxsw_sp_port->pvid;
1787	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
 
1788
1789	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1790	if (WARN_ON(!mlxsw_sp_port_vlan))
1791		return;
1792
1793	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1794	mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid);
 
1795	mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1796	mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1797}
1798
1799static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
1800				   const struct switchdev_obj_port_vlan *vlan)
1801{
1802	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1803	struct net_device *orig_dev = vlan->obj.orig_dev;
1804	struct mlxsw_sp_bridge_port *bridge_port;
1805	u16 vid;
1806
1807	if (netif_is_bridge_master(orig_dev))
1808		return -EOPNOTSUPP;
1809
1810	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1811	if (WARN_ON(!bridge_port))
1812		return -EINVAL;
1813
1814	if (!bridge_port->bridge_device->vlan_enabled)
1815		return 0;
1816
1817	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
1818		mlxsw_sp_bridge_port_vlan_del(mlxsw_sp_port, bridge_port, vid);
1819
1820	return 0;
1821}
1822
1823static int
1824__mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1825			struct mlxsw_sp_bridge_port *bridge_port,
1826			struct mlxsw_sp_mid *mid)
1827{
1828	struct net_device *dev = mlxsw_sp_port->dev;
1829	int err;
1830
1831	if (bridge_port->bridge_device->multicast_enabled &&
1832	    !bridge_port->mrouter) {
1833		err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1834		if (err)
1835			netdev_err(dev, "Unable to remove port from SMID\n");
1836	}
1837
1838	err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
1839	if (err)
1840		netdev_err(dev, "Unable to remove MC SFD\n");
1841
1842	return err;
1843}
1844
1845static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
1846				 const struct switchdev_obj_port_mdb *mdb)
1847{
1848	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1849	struct net_device *orig_dev = mdb->obj.orig_dev;
1850	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1851	struct mlxsw_sp_bridge_device *bridge_device;
1852	struct net_device *dev = mlxsw_sp_port->dev;
1853	struct mlxsw_sp_bridge_port *bridge_port;
1854	struct mlxsw_sp_mid *mid;
 
1855	u16 fid_index;
1856
1857	bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
1858	if (!bridge_port)
1859		return 0;
1860
1861	bridge_device = bridge_port->bridge_device;
1862	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_bridge(mlxsw_sp_port,
1863							       bridge_device,
1864							       mdb->vid);
1865	if (!mlxsw_sp_port_vlan)
1866		return 0;
1867
1868	fid_index = mlxsw_sp_fid_index(mlxsw_sp_port_vlan->fid);
1869
1870	mid = __mlxsw_sp_mc_get(bridge_device, mdb->addr, fid_index);
1871	if (!mid) {
 
 
 
1872		netdev_err(dev, "Unable to remove port from MC DB\n");
1873		return -EINVAL;
1874	}
1875
1876	return __mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port, mid);
 
 
1877}
1878
1879static void
1880mlxsw_sp_bridge_port_mdb_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1881			       struct mlxsw_sp_bridge_port *bridge_port)
 
1882{
 
1883	struct mlxsw_sp_bridge_device *bridge_device;
1884	struct mlxsw_sp_mid *mid, *tmp;
 
1885
1886	bridge_device = bridge_port->bridge_device;
1887
1888	list_for_each_entry_safe(mid, tmp, &bridge_device->mids_list, list) {
1889		if (test_bit(mlxsw_sp_port->local_port, mid->ports_in_mid)) {
1890			__mlxsw_sp_port_mdb_del(mlxsw_sp_port, bridge_port,
1891						mid);
1892		} else if (bridge_device->multicast_enabled &&
1893			   bridge_port->mrouter) {
1894			mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
1895		}
 
 
 
 
1896	}
1897}
1898
1899static int mlxsw_sp_port_obj_del(struct net_device *dev,
1900				 const struct switchdev_obj *obj)
1901{
1902	struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1903	int err = 0;
1904
1905	switch (obj->id) {
1906	case SWITCHDEV_OBJ_ID_PORT_VLAN:
1907		err = mlxsw_sp_port_vlans_del(mlxsw_sp_port,
1908					      SWITCHDEV_OBJ_PORT_VLAN(obj));
1909		break;
1910	case SWITCHDEV_OBJ_ID_PORT_MDB:
1911		err = mlxsw_sp_port_mdb_del(mlxsw_sp_port,
1912					    SWITCHDEV_OBJ_PORT_MDB(obj));
1913		break;
1914	default:
1915		err = -EOPNOTSUPP;
1916		break;
1917	}
1918
1919	mlxsw_sp_span_respin_schedule(mlxsw_sp_port->mlxsw_sp);
1920
1921	return err;
1922}
1923
1924static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp,
1925						   u16 lag_id)
1926{
1927	struct mlxsw_sp_port *mlxsw_sp_port;
1928	u64 max_lag_members;
1929	int i;
1930
1931	max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1932					     MAX_LAG_MEMBERS);
1933	for (i = 0; i < max_lag_members; i++) {
1934		mlxsw_sp_port = mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i);
1935		if (mlxsw_sp_port)
1936			return mlxsw_sp_port;
1937	}
1938	return NULL;
1939}
1940
1941static int
1942mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
1943				struct mlxsw_sp_bridge_port *bridge_port,
1944				struct mlxsw_sp_port *mlxsw_sp_port,
1945				struct netlink_ext_ack *extack)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1946{
1947	if (is_vlan_dev(bridge_port->dev)) {
1948		NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
1949		return -EINVAL;
1950	}
1951
1952	/* Port is no longer usable as a router interface */
1953	if (mlxsw_sp_port->default_vlan->fid)
1954		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
1955
1956	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1957}
1958
1959static void
1960mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
1961				 struct mlxsw_sp_bridge_port *bridge_port,
1962				 struct mlxsw_sp_port *mlxsw_sp_port)
1963{
1964	/* Make sure untagged frames are allowed to ingress */
1965	mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID);
1966}
1967
1968static int
1969mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
1970				 const struct net_device *vxlan_dev, u16 vid,
1971				 struct netlink_ext_ack *extack)
 
1972{
1973	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
1974	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
1975	struct mlxsw_sp_nve_params params = {
1976		.type = MLXSW_SP_NVE_TYPE_VXLAN,
1977		.vni = vxlan->cfg.vni,
1978		.dev = vxlan_dev,
 
1979	};
1980	struct mlxsw_sp_fid *fid;
1981	int err;
1982
1983	/* If the VLAN is 0, we need to find the VLAN that is configured as
1984	 * PVID and egress untagged on the bridge port of the VxLAN device.
1985	 * It is possible no such VLAN exists
1986	 */
1987	if (!vid) {
1988		err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid);
1989		if (err || !vid)
1990			return err;
1991	}
1992
1993	/* If no other port is member in the VLAN, then the FID does not exist.
1994	 * NVE will be enabled on the FID once a port joins the VLAN
1995	 */
1996	fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
1997	if (!fid)
1998		return 0;
1999
2000	if (mlxsw_sp_fid_vni_is_set(fid)) {
2001		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2002		err = -EINVAL;
2003		goto err_vni_exists;
2004	}
2005
2006	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2007	if (err)
2008		goto err_nve_fid_enable;
2009
2010	/* The tunnel port does not hold a reference on the FID. Only
2011	 * local ports and the router port
2012	 */
2013	mlxsw_sp_fid_put(fid);
2014
2015	return 0;
2016
2017err_nve_fid_enable:
2018err_vni_exists:
2019	mlxsw_sp_fid_put(fid);
2020	return err;
2021}
2022
 
 
 
 
 
 
 
 
 
2023static struct net_device *
2024mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid)
2025{
2026	struct net_device *dev;
2027	struct list_head *iter;
2028
2029	netdev_for_each_lower_dev(br_dev, dev, iter) {
2030		u16 pvid;
2031		int err;
2032
2033		if (!netif_is_vxlan(dev))
2034			continue;
2035
2036		err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
2037		if (err || pvid != vid)
2038			continue;
2039
2040		return dev;
2041	}
2042
2043	return NULL;
2044}
2045
2046static struct mlxsw_sp_fid *
2047mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2048			      u16 vid, struct netlink_ext_ack *extack)
2049{
2050	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2051	struct net_device *vxlan_dev;
2052	struct mlxsw_sp_fid *fid;
2053	int err;
2054
2055	fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid);
2056	if (IS_ERR(fid))
2057		return fid;
2058
2059	if (mlxsw_sp_fid_vni_is_set(fid))
2060		return fid;
2061
2062	/* Find the VxLAN device that has the specified VLAN configured as
2063	 * PVID and egress untagged. There can be at most one such device
2064	 */
2065	vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev,
2066							 vid);
2067	if (!vxlan_dev)
2068		return fid;
2069
2070	if (!netif_running(vxlan_dev))
2071		return fid;
2072
2073	err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
2074					       extack);
2075	if (err)
2076		goto err_vxlan_join;
2077
2078	return fid;
2079
2080err_vxlan_join:
2081	mlxsw_sp_fid_put(fid);
2082	return ERR_PTR(err);
2083}
2084
2085static struct mlxsw_sp_fid *
2086mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2087				 u16 vid)
2088{
2089	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2090
2091	return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid);
2092}
2093
2094static u16
2095mlxsw_sp_bridge_8021q_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2096			      const struct mlxsw_sp_fid *fid)
2097{
2098	return mlxsw_sp_fid_8021q_vid(fid);
2099}
2100
2101static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = {
2102	.port_join	= mlxsw_sp_bridge_8021q_port_join,
2103	.port_leave	= mlxsw_sp_bridge_8021q_port_leave,
2104	.vxlan_join	= mlxsw_sp_bridge_8021q_vxlan_join,
2105	.fid_get	= mlxsw_sp_bridge_8021q_fid_get,
2106	.fid_lookup	= mlxsw_sp_bridge_8021q_fid_lookup,
2107	.fid_vid	= mlxsw_sp_bridge_8021q_fid_vid,
2108};
2109
2110static bool
2111mlxsw_sp_port_is_br_member(const struct mlxsw_sp_port *mlxsw_sp_port,
2112			   const struct net_device *br_dev)
2113{
2114	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2115
2116	list_for_each_entry(mlxsw_sp_port_vlan, &mlxsw_sp_port->vlans_list,
2117			    list) {
2118		if (mlxsw_sp_port_vlan->bridge_port &&
2119		    mlxsw_sp_port_vlan->bridge_port->bridge_device->dev ==
2120		    br_dev)
2121			return true;
2122	}
2123
2124	return false;
2125}
2126
2127static int
2128mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
2129				struct mlxsw_sp_bridge_port *bridge_port,
2130				struct mlxsw_sp_port *mlxsw_sp_port,
2131				struct netlink_ext_ack *extack)
2132{
2133	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2134	struct net_device *dev = bridge_port->dev;
2135	u16 vid;
 
2136
2137	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2138	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2139	if (WARN_ON(!mlxsw_sp_port_vlan))
2140		return -EINVAL;
2141
2142	if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
2143		NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
2144		return -EINVAL;
2145	}
2146
2147	/* Port is no longer usable as a router interface */
2148	if (mlxsw_sp_port_vlan->fid)
2149		mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
2150
2151	return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port,
2152					      extack);
 
 
 
 
 
 
 
 
 
 
 
 
2153}
2154
2155static void
2156mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
2157				 struct mlxsw_sp_bridge_port *bridge_port,
2158				 struct mlxsw_sp_port *mlxsw_sp_port)
2159{
2160	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2161	struct net_device *dev = bridge_port->dev;
2162	u16 vid;
2163
2164	vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID;
2165	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
2166	if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port)
2167		return;
2168
2169	mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
2170}
2171
2172static int
2173mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device,
2174				 const struct net_device *vxlan_dev, u16 vid,
2175				 struct netlink_ext_ack *extack)
2176{
2177	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2178	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2179	struct mlxsw_sp_nve_params params = {
2180		.type = MLXSW_SP_NVE_TYPE_VXLAN,
2181		.vni = vxlan->cfg.vni,
2182		.dev = vxlan_dev,
 
2183	};
2184	struct mlxsw_sp_fid *fid;
2185	int err;
2186
2187	fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2188	if (!fid) {
2189		NL_SET_ERR_MSG_MOD(extack, "Did not find a corresponding FID");
2190		return -EINVAL;
2191	}
2192
2193	if (mlxsw_sp_fid_vni_is_set(fid)) {
2194		NL_SET_ERR_MSG_MOD(extack, "VNI is already set on FID");
2195		err = -EINVAL;
2196		goto err_vni_exists;
2197	}
2198
2199	err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, &params, extack);
2200	if (err)
2201		goto err_nve_fid_enable;
2202
2203	/* The tunnel port does not hold a reference on the FID. Only
2204	 * local ports and the router port
2205	 */
2206	mlxsw_sp_fid_put(fid);
2207
2208	return 0;
2209
2210err_nve_fid_enable:
2211err_vni_exists:
2212	mlxsw_sp_fid_put(fid);
2213	return err;
2214}
2215
2216static struct mlxsw_sp_fid *
2217mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device,
2218			      u16 vid, struct netlink_ext_ack *extack)
2219{
2220	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2221	struct net_device *vxlan_dev;
2222	struct mlxsw_sp_fid *fid;
2223	int err;
2224
2225	fid = mlxsw_sp_fid_8021d_get(mlxsw_sp, bridge_device->dev->ifindex);
2226	if (IS_ERR(fid))
2227		return fid;
2228
2229	if (mlxsw_sp_fid_vni_is_set(fid))
2230		return fid;
2231
2232	vxlan_dev = mlxsw_sp_bridge_vxlan_dev_find(bridge_device->dev);
2233	if (!vxlan_dev)
2234		return fid;
2235
2236	if (!netif_running(vxlan_dev))
2237		return fid;
2238
2239	err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0,
2240					       extack);
2241	if (err)
2242		goto err_vxlan_join;
2243
2244	return fid;
2245
2246err_vxlan_join:
2247	mlxsw_sp_fid_put(fid);
2248	return ERR_PTR(err);
2249}
2250
2251static struct mlxsw_sp_fid *
2252mlxsw_sp_bridge_8021d_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device,
2253				 u16 vid)
2254{
2255	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev);
2256
2257	/* The only valid VLAN for a VLAN-unaware bridge is 0 */
2258	if (vid)
2259		return NULL;
2260
2261	return mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex);
2262}
2263
2264static u16
2265mlxsw_sp_bridge_8021d_fid_vid(struct mlxsw_sp_bridge_device *bridge_device,
2266			      const struct mlxsw_sp_fid *fid)
2267{
2268	return 0;
2269}
2270
2271static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = {
2272	.port_join	= mlxsw_sp_bridge_8021d_port_join,
2273	.port_leave	= mlxsw_sp_bridge_8021d_port_leave,
2274	.vxlan_join	= mlxsw_sp_bridge_8021d_vxlan_join,
2275	.fid_get	= mlxsw_sp_bridge_8021d_fid_get,
2276	.fid_lookup	= mlxsw_sp_bridge_8021d_fid_lookup,
2277	.fid_vid	= mlxsw_sp_bridge_8021d_fid_vid,
2278};
2279
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2280int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
2281			      struct net_device *brport_dev,
2282			      struct net_device *br_dev,
2283			      struct netlink_ext_ack *extack)
2284{
2285	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2286	struct mlxsw_sp_bridge_device *bridge_device;
2287	struct mlxsw_sp_bridge_port *bridge_port;
2288	int err;
2289
2290	bridge_port = mlxsw_sp_bridge_port_get(mlxsw_sp->bridge, brport_dev);
 
2291	if (IS_ERR(bridge_port))
2292		return PTR_ERR(bridge_port);
2293	bridge_device = bridge_port->bridge_device;
2294
2295	err = bridge_device->ops->port_join(bridge_device, bridge_port,
2296					    mlxsw_sp_port, extack);
2297	if (err)
2298		goto err_port_join;
2299
 
 
 
 
2300	return 0;
2301
 
 
 
2302err_port_join:
2303	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2304	return err;
2305}
2306
2307void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
2308				struct net_device *brport_dev,
2309				struct net_device *br_dev)
2310{
2311	struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2312	struct mlxsw_sp_bridge_device *bridge_device;
2313	struct mlxsw_sp_bridge_port *bridge_port;
2314
2315	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2316	if (!bridge_device)
2317		return;
2318	bridge_port = __mlxsw_sp_bridge_port_find(bridge_device, brport_dev);
2319	if (!bridge_port)
2320		return;
2321
2322	bridge_device->ops->port_leave(bridge_device, bridge_port,
2323				       mlxsw_sp_port);
 
2324	mlxsw_sp_bridge_port_put(mlxsw_sp->bridge, bridge_port);
2325}
2326
2327int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp,
2328			       const struct net_device *br_dev,
2329			       const struct net_device *vxlan_dev, u16 vid,
2330			       struct netlink_ext_ack *extack)
2331{
2332	struct mlxsw_sp_bridge_device *bridge_device;
2333
2334	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2335	if (WARN_ON(!bridge_device))
2336		return -EINVAL;
2337
2338	return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid,
2339					      extack);
2340}
2341
2342void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp,
2343				 const struct net_device *vxlan_dev)
2344{
2345	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
2346	struct mlxsw_sp_fid *fid;
2347
2348	/* If the VxLAN device is down, then the FID does not have a VNI */
2349	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni);
2350	if (!fid)
2351		return;
2352
2353	mlxsw_sp_nve_fid_disable(mlxsw_sp, fid);
 
 
 
 
2354	mlxsw_sp_fid_put(fid);
2355}
2356
2357struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
2358					     const struct net_device *br_dev,
2359					     u16 vid,
2360					     struct netlink_ext_ack *extack)
2361{
2362	struct mlxsw_sp_bridge_device *bridge_device;
2363
2364	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2365	if (WARN_ON(!bridge_device))
2366		return ERR_PTR(-EINVAL);
2367
2368	return bridge_device->ops->fid_get(bridge_device, vid, extack);
2369}
2370
2371static void
2372mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr,
2373				      enum mlxsw_sp_l3proto *proto,
2374				      union mlxsw_sp_l3addr *addr)
2375{
2376	if (vxlan_addr->sa.sa_family == AF_INET) {
2377		addr->addr4 = vxlan_addr->sin.sin_addr.s_addr;
2378		*proto = MLXSW_SP_L3_PROTO_IPV4;
2379	} else {
2380		addr->addr6 = vxlan_addr->sin6.sin6_addr;
2381		*proto = MLXSW_SP_L3_PROTO_IPV6;
2382	}
2383}
2384
2385static void
2386mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto,
2387				      const union mlxsw_sp_l3addr *addr,
2388				      union vxlan_addr *vxlan_addr)
2389{
2390	switch (proto) {
2391	case MLXSW_SP_L3_PROTO_IPV4:
2392		vxlan_addr->sa.sa_family = AF_INET;
2393		vxlan_addr->sin.sin_addr.s_addr = addr->addr4;
2394		break;
2395	case MLXSW_SP_L3_PROTO_IPV6:
2396		vxlan_addr->sa.sa_family = AF_INET6;
2397		vxlan_addr->sin6.sin6_addr = addr->addr6;
2398		break;
2399	}
2400}
2401
2402static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev,
2403					      const char *mac,
2404					      enum mlxsw_sp_l3proto proto,
2405					      union mlxsw_sp_l3addr *addr,
2406					      __be32 vni, bool adding)
2407{
2408	struct switchdev_notifier_vxlan_fdb_info info;
2409	struct vxlan_dev *vxlan = netdev_priv(dev);
2410	enum switchdev_notifier_type type;
2411
2412	type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE :
2413			SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE;
2414	mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip);
2415	info.remote_port = vxlan->cfg.dst_port;
2416	info.remote_vni = vni;
2417	info.remote_ifindex = 0;
2418	ether_addr_copy(info.eth_addr, mac);
2419	info.vni = vni;
2420	info.offloaded = adding;
2421	call_switchdev_notifiers(type, dev, &info.info, NULL);
2422}
2423
2424static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev,
2425					    const char *mac,
2426					    enum mlxsw_sp_l3proto proto,
2427					    union mlxsw_sp_l3addr *addr,
2428					    __be32 vni,
2429					    bool adding)
2430{
2431	if (netif_is_vxlan(dev))
2432		mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni,
2433						  adding);
2434}
2435
2436static void
2437mlxsw_sp_fdb_call_notifiers(enum switchdev_notifier_type type,
2438			    const char *mac, u16 vid,
2439			    struct net_device *dev, bool offloaded)
2440{
2441	struct switchdev_notifier_fdb_info info;
2442
2443	info.addr = mac;
2444	info.vid = vid;
2445	info.offloaded = offloaded;
 
2446	call_switchdev_notifiers(type, dev, &info.info, NULL);
2447}
2448
2449static void mlxsw_sp_fdb_notify_mac_process(struct mlxsw_sp *mlxsw_sp,
2450					    char *sfn_pl, int rec_index,
2451					    bool adding)
2452{
2453	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2454	struct mlxsw_sp_bridge_device *bridge_device;
2455	struct mlxsw_sp_bridge_port *bridge_port;
2456	struct mlxsw_sp_port *mlxsw_sp_port;
 
2457	enum switchdev_notifier_type type;
2458	char mac[ETH_ALEN];
2459	u8 local_port;
2460	u16 vid, fid;
2461	bool do_notification = true;
2462	int err;
2463
2464	mlxsw_reg_sfn_mac_unpack(sfn_pl, rec_index, mac, &fid, &local_port);
 
 
 
2465	mlxsw_sp_port = mlxsw_sp->ports[local_port];
2466	if (!mlxsw_sp_port) {
2467		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Incorrect local port in FDB notification\n");
2468		goto just_remove;
2469	}
2470
2471	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2472		goto just_remove;
2473
2474	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2475	if (!mlxsw_sp_port_vlan) {
2476		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2477		goto just_remove;
2478	}
2479
2480	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2481	if (!bridge_port) {
2482		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2483		goto just_remove;
2484	}
2485
2486	bridge_device = bridge_port->bridge_device;
2487	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
 
 
 
 
 
 
 
2488
2489do_fdb_op:
2490	err = mlxsw_sp_port_fdb_uc_op(mlxsw_sp, local_port, mac, fid,
2491				      adding, true);
2492	if (err) {
2493		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2494		return;
2495	}
2496
2497	if (!do_notification)
2498		return;
2499	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2500	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
 
2501
2502	return;
2503
2504just_remove:
2505	adding = false;
2506	do_notification = false;
2507	goto do_fdb_op;
2508}
2509
2510static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp,
2511						char *sfn_pl, int rec_index,
2512						bool adding)
2513{
2514	struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2515	struct mlxsw_sp_bridge_device *bridge_device;
2516	struct mlxsw_sp_bridge_port *bridge_port;
2517	struct mlxsw_sp_port *mlxsw_sp_port;
2518	enum switchdev_notifier_type type;
2519	char mac[ETH_ALEN];
2520	u16 lag_vid = 0;
2521	u16 lag_id;
2522	u16 vid, fid;
2523	bool do_notification = true;
2524	int err;
2525
2526	mlxsw_reg_sfn_mac_lag_unpack(sfn_pl, rec_index, mac, &fid, &lag_id);
2527	mlxsw_sp_port = mlxsw_sp_lag_rep_port(mlxsw_sp, lag_id);
2528	if (!mlxsw_sp_port) {
2529		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Cannot find port representor for LAG\n");
2530		goto just_remove;
2531	}
2532
2533	if (mlxsw_sp_fid_is_dummy(mlxsw_sp, fid))
2534		goto just_remove;
2535
2536	mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_fid(mlxsw_sp_port, fid);
2537	if (!mlxsw_sp_port_vlan) {
2538		netdev_err(mlxsw_sp_port->dev, "Failed to find a matching {Port, VID} following FDB notification\n");
2539		goto just_remove;
2540	}
2541
2542	bridge_port = mlxsw_sp_port_vlan->bridge_port;
2543	if (!bridge_port) {
2544		netdev_err(mlxsw_sp_port->dev, "{Port, VID} not associated with a bridge\n");
2545		goto just_remove;
2546	}
2547
2548	bridge_device = bridge_port->bridge_device;
2549	vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0;
2550	lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ?
2551		  mlxsw_sp_port_vlan->vid : 0;
 
 
 
 
 
2552
2553do_fdb_op:
2554	err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid,
2555					  adding, true);
2556	if (err) {
2557		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to set FDB entry\n");
2558		return;
2559	}
2560
2561	if (!do_notification)
2562		return;
2563	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : SWITCHDEV_FDB_DEL_TO_BRIDGE;
2564	mlxsw_sp_fdb_call_notifiers(type, mac, vid, bridge_port->dev, adding);
 
2565
2566	return;
2567
2568just_remove:
2569	adding = false;
2570	do_notification = false;
2571	goto do_fdb_op;
2572}
2573
2574static int
2575__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2576					    const struct mlxsw_sp_fid *fid,
2577					    bool adding,
2578					    struct net_device **nve_dev,
2579					    u16 *p_vid, __be32 *p_vni)
2580{
2581	struct mlxsw_sp_bridge_device *bridge_device;
2582	struct net_device *br_dev, *dev;
2583	int nve_ifindex;
2584	int err;
2585
2586	err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex);
2587	if (err)
2588		return err;
2589
2590	err = mlxsw_sp_fid_vni(fid, p_vni);
2591	if (err)
2592		return err;
2593
2594	dev = __dev_get_by_index(&init_net, nve_ifindex);
2595	if (!dev)
2596		return -EINVAL;
2597	*nve_dev = dev;
2598
2599	if (!netif_running(dev))
2600		return -EINVAL;
2601
2602	if (adding && !br_port_flag_is_set(dev, BR_LEARNING))
2603		return -EINVAL;
2604
2605	if (adding && netif_is_vxlan(dev)) {
2606		struct vxlan_dev *vxlan = netdev_priv(dev);
2607
2608		if (!(vxlan->cfg.flags & VXLAN_F_LEARN))
2609			return -EINVAL;
2610	}
2611
2612	br_dev = netdev_master_upper_dev_get(dev);
2613	if (!br_dev)
2614		return -EINVAL;
2615
2616	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2617	if (!bridge_device)
2618		return -EINVAL;
2619
2620	*p_vid = bridge_device->ops->fid_vid(bridge_device, fid);
2621
2622	return 0;
2623}
2624
2625static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp,
2626						      char *sfn_pl,
2627						      int rec_index,
2628						      bool adding)
2629{
2630	enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto;
2631	enum switchdev_notifier_type type;
2632	struct net_device *nve_dev;
2633	union mlxsw_sp_l3addr addr;
2634	struct mlxsw_sp_fid *fid;
2635	char mac[ETH_ALEN];
2636	u16 fid_index, vid;
2637	__be32 vni;
2638	u32 uip;
2639	int err;
2640
2641	mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index,
2642				       &uip, &sfn_proto);
2643
2644	fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index);
2645	if (!fid)
2646		goto err_fid_lookup;
2647
2648	err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip,
2649					      (enum mlxsw_sp_l3proto) sfn_proto,
2650					      &addr);
2651	if (err)
2652		goto err_ip_resolve;
2653
2654	err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding,
2655							  &nve_dev, &vid, &vni);
2656	if (err)
2657		goto err_fdb_process;
2658
2659	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2660					     (enum mlxsw_sp_l3proto) sfn_proto,
2661					     &addr, adding, true);
2662	if (err)
2663		goto err_fdb_op;
2664
2665	mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac,
2666					(enum mlxsw_sp_l3proto) sfn_proto,
2667					&addr, vni, adding);
2668
2669	type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
2670			SWITCHDEV_FDB_DEL_TO_BRIDGE;
2671	mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding);
2672
2673	mlxsw_sp_fid_put(fid);
2674
2675	return;
2676
2677err_fdb_op:
2678err_fdb_process:
2679err_ip_resolve:
2680	mlxsw_sp_fid_put(fid);
2681err_fid_lookup:
2682	/* Remove an FDB entry in case we cannot process it. Otherwise the
2683	 * device will keep sending the same notification over and over again.
2684	 */
2685	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index,
2686				       (enum mlxsw_sp_l3proto) sfn_proto, &addr,
2687				       false, true);
2688}
2689
2690static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp,
2691					    char *sfn_pl, int rec_index)
2692{
2693	switch (mlxsw_reg_sfn_rec_type_get(sfn_pl, rec_index)) {
2694	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC:
2695		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2696						rec_index, true);
2697		break;
2698	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC:
2699		mlxsw_sp_fdb_notify_mac_process(mlxsw_sp, sfn_pl,
2700						rec_index, false);
2701		break;
2702	case MLXSW_REG_SFN_REC_TYPE_LEARNED_MAC_LAG:
2703		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2704						    rec_index, true);
2705		break;
2706	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG:
2707		mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl,
2708						    rec_index, false);
2709		break;
2710	case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL:
2711		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2712							  rec_index, true);
2713		break;
2714	case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL:
2715		mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl,
2716							  rec_index, false);
2717		break;
2718	}
2719}
2720
2721static void mlxsw_sp_fdb_notify_work_schedule(struct mlxsw_sp *mlxsw_sp)
2722{
2723	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
2724
2725	mlxsw_core_schedule_dw(&bridge->fdb_notify.dw,
2726			       msecs_to_jiffies(bridge->fdb_notify.interval));
2727}
2728
2729static void mlxsw_sp_fdb_notify_work(struct work_struct *work)
2730{
2731	struct mlxsw_sp_bridge *bridge;
2732	struct mlxsw_sp *mlxsw_sp;
 
2733	char *sfn_pl;
 
2734	u8 num_rec;
2735	int i;
2736	int err;
2737
2738	sfn_pl = kmalloc(MLXSW_REG_SFN_LEN, GFP_KERNEL);
2739	if (!sfn_pl)
2740		return;
2741
2742	bridge = container_of(work, struct mlxsw_sp_bridge, fdb_notify.dw.work);
2743	mlxsw_sp = bridge->mlxsw_sp;
2744
2745	rtnl_lock();
2746	mlxsw_reg_sfn_pack(sfn_pl);
2747	err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(sfn), sfn_pl);
2748	if (err) {
2749		dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to get FDB notifications\n");
2750		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2751	}
2752	num_rec = mlxsw_reg_sfn_num_rec_get(sfn_pl);
2753	for (i = 0; i < num_rec; i++)
2754		mlxsw_sp_fdb_notify_rec_process(mlxsw_sp, sfn_pl, i);
2755
2756out:
2757	rtnl_unlock();
2758	kfree(sfn_pl);
2759	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
 
 
2760}
2761
2762struct mlxsw_sp_switchdev_event_work {
2763	struct work_struct work;
 
2764	union {
2765		struct switchdev_notifier_fdb_info fdb_info;
2766		struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2767	};
2768	struct net_device *dev;
2769	unsigned long event;
2770};
2771
2772static void
2773mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp,
2774					  struct mlxsw_sp_switchdev_event_work *
2775					  switchdev_work,
2776					  struct mlxsw_sp_fid *fid, __be32 vni)
2777{
2778	struct switchdev_notifier_vxlan_fdb_info vxlan_fdb_info;
2779	struct switchdev_notifier_fdb_info *fdb_info;
2780	struct net_device *dev = switchdev_work->dev;
2781	enum mlxsw_sp_l3proto proto;
2782	union mlxsw_sp_l3addr addr;
2783	int err;
2784
2785	fdb_info = &switchdev_work->fdb_info;
2786	err = vxlan_fdb_find_uc(dev, fdb_info->addr, vni, &vxlan_fdb_info);
2787	if (err)
2788		return;
2789
2790	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info.remote_ip,
2791					      &proto, &addr);
2792
2793	switch (switchdev_work->event) {
2794	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2795		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2796						     vxlan_fdb_info.eth_addr,
2797						     mlxsw_sp_fid_index(fid),
2798						     proto, &addr, true, false);
2799		if (err)
2800			return;
2801		vxlan_fdb_info.offloaded = true;
2802		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2803					 &vxlan_fdb_info.info, NULL);
2804		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2805					    vxlan_fdb_info.eth_addr,
2806					    fdb_info->vid, dev, true);
2807		break;
2808	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2809		err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp,
2810						     vxlan_fdb_info.eth_addr,
2811						     mlxsw_sp_fid_index(fid),
2812						     proto, &addr, false,
2813						     false);
2814		vxlan_fdb_info.offloaded = false;
2815		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2816					 &vxlan_fdb_info.info, NULL);
2817		break;
2818	}
2819}
2820
2821static void
2822mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work *
2823					switchdev_work)
2824{
2825	struct mlxsw_sp_bridge_device *bridge_device;
2826	struct net_device *dev = switchdev_work->dev;
2827	struct net_device *br_dev;
2828	struct mlxsw_sp *mlxsw_sp;
2829	struct mlxsw_sp_fid *fid;
2830	__be32 vni;
2831	int err;
2832
2833	if (switchdev_work->event != SWITCHDEV_FDB_ADD_TO_DEVICE &&
2834	    switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE)
2835		return;
2836
2837	if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE &&
2838	    !switchdev_work->fdb_info.added_by_user)
 
2839		return;
2840
2841	if (!netif_running(dev))
2842		return;
2843	br_dev = netdev_master_upper_dev_get(dev);
2844	if (!br_dev)
2845		return;
2846	if (!netif_is_bridge_master(br_dev))
2847		return;
2848	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
2849	if (!mlxsw_sp)
2850		return;
2851	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2852	if (!bridge_device)
2853		return;
2854
2855	fid = bridge_device->ops->fid_lookup(bridge_device,
2856					     switchdev_work->fdb_info.vid);
2857	if (!fid)
2858		return;
2859
2860	err = mlxsw_sp_fid_vni(fid, &vni);
2861	if (err)
2862		goto out;
2863
2864	mlxsw_sp_switchdev_bridge_vxlan_fdb_event(mlxsw_sp, switchdev_work, fid,
2865						  vni);
2866
2867out:
2868	mlxsw_sp_fid_put(fid);
2869}
2870
2871static void mlxsw_sp_switchdev_bridge_fdb_event_work(struct work_struct *work)
2872{
2873	struct mlxsw_sp_switchdev_event_work *switchdev_work =
2874		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
2875	struct net_device *dev = switchdev_work->dev;
2876	struct switchdev_notifier_fdb_info *fdb_info;
2877	struct mlxsw_sp_port *mlxsw_sp_port;
2878	int err;
2879
2880	rtnl_lock();
2881	if (netif_is_vxlan(dev)) {
2882		mlxsw_sp_switchdev_bridge_nve_fdb_event(switchdev_work);
2883		goto out;
2884	}
2885
2886	mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
2887	if (!mlxsw_sp_port)
2888		goto out;
2889
2890	switch (switchdev_work->event) {
2891	case SWITCHDEV_FDB_ADD_TO_DEVICE:
2892		fdb_info = &switchdev_work->fdb_info;
2893		if (!fdb_info->added_by_user)
2894			break;
2895		err = mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, true);
2896		if (err)
2897			break;
2898		mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2899					    fdb_info->addr,
2900					    fdb_info->vid, dev, true);
2901		break;
2902	case SWITCHDEV_FDB_DEL_TO_DEVICE:
2903		fdb_info = &switchdev_work->fdb_info;
2904		mlxsw_sp_port_fdb_set(mlxsw_sp_port, fdb_info, false);
2905		break;
2906	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
2907	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
2908		/* These events are only used to potentially update an existing
2909		 * SPAN mirror.
2910		 */
2911		break;
2912	}
2913
2914	mlxsw_sp_span_respin(mlxsw_sp_port->mlxsw_sp);
2915
2916out:
2917	rtnl_unlock();
2918	kfree(switchdev_work->fdb_info.addr);
 
2919	kfree(switchdev_work);
2920	dev_put(dev);
2921}
2922
2923static void
2924mlxsw_sp_switchdev_vxlan_fdb_add(struct mlxsw_sp *mlxsw_sp,
2925				 struct mlxsw_sp_switchdev_event_work *
2926				 switchdev_work)
2927{
2928	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
2929	struct mlxsw_sp_bridge_device *bridge_device;
2930	struct net_device *dev = switchdev_work->dev;
2931	u8 all_zeros_mac[ETH_ALEN] = { 0 };
2932	enum mlxsw_sp_l3proto proto;
2933	union mlxsw_sp_l3addr addr;
2934	struct net_device *br_dev;
2935	struct mlxsw_sp_fid *fid;
2936	u16 vid;
2937	int err;
2938
2939	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
2940	br_dev = netdev_master_upper_dev_get(dev);
2941
2942	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
2943	if (!bridge_device)
2944		return;
2945
2946	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
2947	if (!fid)
2948		return;
2949
2950	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
2951					      &proto, &addr);
2952
2953	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
2954		err = mlxsw_sp_nve_flood_ip_add(mlxsw_sp, fid, proto, &addr);
2955		if (err) {
2956			mlxsw_sp_fid_put(fid);
2957			return;
2958		}
2959		vxlan_fdb_info->offloaded = true;
2960		call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2961					 &vxlan_fdb_info->info, NULL);
2962		mlxsw_sp_fid_put(fid);
2963		return;
2964	}
2965
2966	/* The device has a single FDB table, whereas Linux has two - one
2967	 * in the bridge driver and another in the VxLAN driver. We only
2968	 * program an entry to the device if the MAC points to the VxLAN
2969	 * device in the bridge's FDB table
2970	 */
2971	vid = bridge_device->ops->fid_vid(bridge_device, fid);
2972	if (br_fdb_find_port(br_dev, vxlan_fdb_info->eth_addr, vid) != dev)
2973		goto err_br_fdb_find;
2974
2975	err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
2976					     mlxsw_sp_fid_index(fid), proto,
2977					     &addr, true, false);
2978	if (err)
2979		goto err_fdb_tunnel_uc_op;
2980	vxlan_fdb_info->offloaded = true;
2981	call_switchdev_notifiers(SWITCHDEV_VXLAN_FDB_OFFLOADED, dev,
2982				 &vxlan_fdb_info->info, NULL);
2983	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
2984				    vxlan_fdb_info->eth_addr, vid, dev, true);
 
2985
2986	mlxsw_sp_fid_put(fid);
2987
2988	return;
2989
2990err_fdb_tunnel_uc_op:
2991err_br_fdb_find:
2992	mlxsw_sp_fid_put(fid);
2993}
2994
2995static void
2996mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp,
2997				 struct mlxsw_sp_switchdev_event_work *
2998				 switchdev_work)
2999{
3000	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3001	struct mlxsw_sp_bridge_device *bridge_device;
3002	struct net_device *dev = switchdev_work->dev;
3003	struct net_device *br_dev = netdev_master_upper_dev_get(dev);
3004	u8 all_zeros_mac[ETH_ALEN] = { 0 };
3005	enum mlxsw_sp_l3proto proto;
3006	union mlxsw_sp_l3addr addr;
3007	struct mlxsw_sp_fid *fid;
3008	u16 vid;
3009
3010	vxlan_fdb_info = &switchdev_work->vxlan_fdb_info;
 
 
3011
3012	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3013	if (!bridge_device)
3014		return;
3015
3016	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan_fdb_info->vni);
3017	if (!fid)
3018		return;
3019
3020	mlxsw_sp_switchdev_vxlan_addr_convert(&vxlan_fdb_info->remote_ip,
3021					      &proto, &addr);
3022
3023	if (ether_addr_equal(vxlan_fdb_info->eth_addr, all_zeros_mac)) {
3024		mlxsw_sp_nve_flood_ip_del(mlxsw_sp, fid, proto, &addr);
3025		mlxsw_sp_fid_put(fid);
3026		return;
3027	}
3028
3029	mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, vxlan_fdb_info->eth_addr,
3030				       mlxsw_sp_fid_index(fid), proto, &addr,
3031				       false, false);
3032	vid = bridge_device->ops->fid_vid(bridge_device, fid);
3033	mlxsw_sp_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED,
3034				    vxlan_fdb_info->eth_addr, vid, dev, false);
 
3035
3036	mlxsw_sp_fid_put(fid);
3037}
3038
3039static void mlxsw_sp_switchdev_vxlan_fdb_event_work(struct work_struct *work)
3040{
3041	struct mlxsw_sp_switchdev_event_work *switchdev_work =
3042		container_of(work, struct mlxsw_sp_switchdev_event_work, work);
3043	struct net_device *dev = switchdev_work->dev;
3044	struct mlxsw_sp *mlxsw_sp;
3045	struct net_device *br_dev;
3046
3047	rtnl_lock();
3048
3049	if (!netif_running(dev))
3050		goto out;
3051	br_dev = netdev_master_upper_dev_get(dev);
3052	if (!br_dev)
3053		goto out;
3054	if (!netif_is_bridge_master(br_dev))
3055		goto out;
3056	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3057	if (!mlxsw_sp)
3058		goto out;
3059
3060	switch (switchdev_work->event) {
3061	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE:
3062		mlxsw_sp_switchdev_vxlan_fdb_add(mlxsw_sp, switchdev_work);
3063		break;
3064	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3065		mlxsw_sp_switchdev_vxlan_fdb_del(mlxsw_sp, switchdev_work);
3066		break;
3067	}
3068
3069out:
3070	rtnl_unlock();
 
3071	kfree(switchdev_work);
3072	dev_put(dev);
3073}
3074
3075static int
3076mlxsw_sp_switchdev_vxlan_work_prepare(struct mlxsw_sp_switchdev_event_work *
3077				      switchdev_work,
3078				      struct switchdev_notifier_info *info)
3079{
3080	struct vxlan_dev *vxlan = netdev_priv(switchdev_work->dev);
3081	struct switchdev_notifier_vxlan_fdb_info *vxlan_fdb_info;
3082	struct vxlan_config *cfg = &vxlan->cfg;
3083	struct netlink_ext_ack *extack;
3084
3085	extack = switchdev_notifier_info_to_extack(info);
3086	vxlan_fdb_info = container_of(info,
3087				      struct switchdev_notifier_vxlan_fdb_info,
3088				      info);
3089
3090	if (vxlan_fdb_info->remote_port != cfg->dst_port) {
3091		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default remote port is not supported");
3092		return -EOPNOTSUPP;
3093	}
3094	if (vxlan_fdb_info->remote_vni != cfg->vni ||
3095	    vxlan_fdb_info->vni != cfg->vni) {
3096		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Non-default VNI is not supported");
3097		return -EOPNOTSUPP;
3098	}
3099	if (vxlan_fdb_info->remote_ifindex) {
3100		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Local interface is not supported");
3101		return -EOPNOTSUPP;
3102	}
3103	if (is_multicast_ether_addr(vxlan_fdb_info->eth_addr)) {
3104		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast MAC addresses not supported");
3105		return -EOPNOTSUPP;
3106	}
3107	if (vxlan_addr_multicast(&vxlan_fdb_info->remote_ip)) {
3108		NL_SET_ERR_MSG_MOD(extack, "VxLAN: FDB: Multicast destination IP is not supported");
3109		return -EOPNOTSUPP;
3110	}
3111
3112	switchdev_work->vxlan_fdb_info = *vxlan_fdb_info;
3113
3114	return 0;
3115}
3116
3117/* Called under rcu_read_lock() */
3118static int mlxsw_sp_switchdev_event(struct notifier_block *unused,
3119				    unsigned long event, void *ptr)
3120{
3121	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3122	struct mlxsw_sp_switchdev_event_work *switchdev_work;
3123	struct switchdev_notifier_fdb_info *fdb_info;
3124	struct switchdev_notifier_info *info = ptr;
3125	struct net_device *br_dev;
3126	int err;
3127
3128	if (event == SWITCHDEV_PORT_ATTR_SET) {
3129		err = switchdev_handle_port_attr_set(dev, ptr,
3130						     mlxsw_sp_port_dev_check,
3131						     mlxsw_sp_port_attr_set);
3132		return notifier_from_errno(err);
3133	}
3134
3135	/* Tunnel devices are not our uppers, so check their master instead */
3136	br_dev = netdev_master_upper_dev_get_rcu(dev);
3137	if (!br_dev)
3138		return NOTIFY_DONE;
3139	if (!netif_is_bridge_master(br_dev))
3140		return NOTIFY_DONE;
3141	if (!mlxsw_sp_port_dev_lower_find_rcu(br_dev))
3142		return NOTIFY_DONE;
3143
3144	switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
3145	if (!switchdev_work)
3146		return NOTIFY_BAD;
3147
3148	switchdev_work->dev = dev;
3149	switchdev_work->event = event;
3150
3151	switch (event) {
3152	case SWITCHDEV_FDB_ADD_TO_DEVICE: /* fall through */
3153	case SWITCHDEV_FDB_DEL_TO_DEVICE: /* fall through */
3154	case SWITCHDEV_FDB_ADD_TO_BRIDGE: /* fall through */
3155	case SWITCHDEV_FDB_DEL_TO_BRIDGE:
3156		fdb_info = container_of(info,
3157					struct switchdev_notifier_fdb_info,
3158					info);
3159		INIT_WORK(&switchdev_work->work,
3160			  mlxsw_sp_switchdev_bridge_fdb_event_work);
3161		memcpy(&switchdev_work->fdb_info, ptr,
3162		       sizeof(switchdev_work->fdb_info));
3163		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
3164		if (!switchdev_work->fdb_info.addr)
3165			goto err_addr_alloc;
3166		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
3167				fdb_info->addr);
3168		/* Take a reference on the device. This can be either
3169		 * upper device containig mlxsw_sp_port or just a
3170		 * mlxsw_sp_port
3171		 */
3172		dev_hold(dev);
3173		break;
3174	case SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE: /* fall through */
3175	case SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE:
3176		INIT_WORK(&switchdev_work->work,
3177			  mlxsw_sp_switchdev_vxlan_fdb_event_work);
3178		err = mlxsw_sp_switchdev_vxlan_work_prepare(switchdev_work,
3179							    info);
3180		if (err)
3181			goto err_vxlan_work_prepare;
3182		dev_hold(dev);
3183		break;
3184	default:
3185		kfree(switchdev_work);
3186		return NOTIFY_DONE;
3187	}
3188
3189	mlxsw_core_schedule_work(&switchdev_work->work);
3190
3191	return NOTIFY_DONE;
3192
3193err_vxlan_work_prepare:
3194err_addr_alloc:
3195	kfree(switchdev_work);
3196	return NOTIFY_BAD;
3197}
3198
3199struct notifier_block mlxsw_sp_switchdev_notifier = {
3200	.notifier_call = mlxsw_sp_switchdev_event,
3201};
3202
3203static int
3204mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp,
3205				  struct mlxsw_sp_bridge_device *bridge_device,
3206				  const struct net_device *vxlan_dev, u16 vid,
3207				  bool flag_untagged, bool flag_pvid,
3208				  struct netlink_ext_ack *extack)
3209{
3210	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3211	__be32 vni = vxlan->cfg.vni;
3212	struct mlxsw_sp_fid *fid;
3213	u16 old_vid;
3214	int err;
3215
3216	/* We cannot have the same VLAN as PVID and egress untagged on multiple
3217	 * VxLAN devices. Note that we get this notification before the VLAN is
3218	 * actually added to the bridge's database, so it is not possible for
3219	 * the lookup function to return 'vxlan_dev'
3220	 */
3221	if (flag_untagged && flag_pvid &&
3222	    mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) {
3223		NL_SET_ERR_MSG_MOD(extack, "VLAN already mapped to a different VNI");
3224		return -EINVAL;
3225	}
3226
3227	if (!netif_running(vxlan_dev))
3228		return 0;
3229
3230	/* First case: FID is not associated with this VNI, but the new VLAN
3231	 * is both PVID and egress untagged. Need to enable NVE on the FID, if
3232	 * it exists
3233	 */
3234	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3235	if (!fid) {
3236		if (!flag_untagged || !flag_pvid)
3237			return 0;
3238		return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device,
3239							vxlan_dev, vid, extack);
3240	}
3241
3242	/* Second case: FID is associated with the VNI and the VLAN associated
3243	 * with the FID is the same as the notified VLAN. This means the flags
3244	 * (PVID / egress untagged) were toggled and that NVE should be
3245	 * disabled on the FID
3246	 */
3247	old_vid = mlxsw_sp_fid_8021q_vid(fid);
3248	if (vid == old_vid) {
3249		if (WARN_ON(flag_untagged && flag_pvid)) {
3250			mlxsw_sp_fid_put(fid);
3251			return -EINVAL;
3252		}
3253		mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3254		mlxsw_sp_fid_put(fid);
3255		return 0;
3256	}
3257
3258	/* Third case: A new VLAN was configured on the VxLAN device, but this
3259	 * VLAN is not PVID, so there is nothing to do.
3260	 */
3261	if (!flag_pvid) {
3262		mlxsw_sp_fid_put(fid);
3263		return 0;
3264	}
3265
3266	/* Fourth case: Thew new VLAN is PVID, which means the VLAN currently
3267	 * mapped to the VNI should be unmapped
3268	 */
3269	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3270	mlxsw_sp_fid_put(fid);
3271
3272	/* Fifth case: The new VLAN is also egress untagged, which means the
3273	 * VLAN needs to be mapped to the VNI
3274	 */
3275	if (!flag_untagged)
3276		return 0;
3277
3278	err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid,
3279					       extack);
3280	if (err)
3281		goto err_vxlan_join;
3282
3283	return 0;
3284
3285err_vxlan_join:
3286	mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid,
3287					 NULL);
3288	return err;
3289}
3290
3291static void
3292mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp,
3293				  struct mlxsw_sp_bridge_device *bridge_device,
3294				  const struct net_device *vxlan_dev, u16 vid)
3295{
3296	struct vxlan_dev *vxlan = netdev_priv(vxlan_dev);
3297	__be32 vni = vxlan->cfg.vni;
3298	struct mlxsw_sp_fid *fid;
3299
3300	if (!netif_running(vxlan_dev))
3301		return;
3302
3303	fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni);
3304	if (!fid)
3305		return;
3306
3307	/* A different VLAN than the one mapped to the VNI is deleted */
3308	if (mlxsw_sp_fid_8021q_vid(fid) != vid)
3309		goto out;
3310
3311	mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev);
3312
3313out:
3314	mlxsw_sp_fid_put(fid);
3315}
3316
3317static int
3318mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev,
3319				   struct switchdev_notifier_port_obj_info *
3320				   port_obj_info)
3321{
3322	struct switchdev_obj_port_vlan *vlan =
3323		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3324	bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
3325	bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
3326	struct switchdev_trans *trans = port_obj_info->trans;
3327	struct mlxsw_sp_bridge_device *bridge_device;
3328	struct netlink_ext_ack *extack;
3329	struct mlxsw_sp *mlxsw_sp;
3330	struct net_device *br_dev;
3331	u16 vid;
3332
3333	extack = switchdev_notifier_info_to_extack(&port_obj_info->info);
3334	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3335	if (!br_dev)
3336		return 0;
3337
3338	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3339	if (!mlxsw_sp)
3340		return 0;
3341
3342	port_obj_info->handled = true;
3343
3344	if (switchdev_trans_ph_commit(trans))
3345		return 0;
3346
3347	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3348	if (!bridge_device)
3349		return -EINVAL;
3350
3351	if (!bridge_device->vlan_enabled)
3352		return 0;
3353
3354	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
3355		int err;
3356
3357		err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device,
3358							vxlan_dev, vid,
3359							flag_untagged,
3360							flag_pvid, extack);
3361		if (err)
3362			return err;
3363	}
3364
3365	return 0;
3366}
3367
3368static void
3369mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev,
3370				   struct switchdev_notifier_port_obj_info *
3371				   port_obj_info)
3372{
3373	struct switchdev_obj_port_vlan *vlan =
3374		SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj);
3375	struct mlxsw_sp_bridge_device *bridge_device;
3376	struct mlxsw_sp *mlxsw_sp;
3377	struct net_device *br_dev;
3378	u16 vid;
3379
3380	br_dev = netdev_master_upper_dev_get(vxlan_dev);
3381	if (!br_dev)
3382		return;
3383
3384	mlxsw_sp = mlxsw_sp_lower_get(br_dev);
3385	if (!mlxsw_sp)
3386		return;
3387
3388	port_obj_info->handled = true;
3389
3390	bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev);
3391	if (!bridge_device)
3392		return;
3393
3394	if (!bridge_device->vlan_enabled)
3395		return;
3396
3397	for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++)
3398		mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device,
3399						  vxlan_dev, vid);
3400}
3401
3402static int
3403mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev,
3404					struct switchdev_notifier_port_obj_info *
3405					port_obj_info)
3406{
3407	int err = 0;
3408
3409	switch (port_obj_info->obj->id) {
3410	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3411		err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev,
3412							 port_obj_info);
3413		break;
3414	default:
3415		break;
3416	}
3417
3418	return err;
3419}
3420
3421static void
3422mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev,
3423					struct switchdev_notifier_port_obj_info *
3424					port_obj_info)
3425{
3426	switch (port_obj_info->obj->id) {
3427	case SWITCHDEV_OBJ_ID_PORT_VLAN:
3428		mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info);
3429		break;
3430	default:
3431		break;
3432	}
3433}
3434
3435static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused,
3436					     unsigned long event, void *ptr)
3437{
3438	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
3439	int err = 0;
3440
3441	switch (event) {
3442	case SWITCHDEV_PORT_OBJ_ADD:
3443		if (netif_is_vxlan(dev))
3444			err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr);
3445		else
3446			err = switchdev_handle_port_obj_add(dev, ptr,
3447							mlxsw_sp_port_dev_check,
3448							mlxsw_sp_port_obj_add);
3449		return notifier_from_errno(err);
3450	case SWITCHDEV_PORT_OBJ_DEL:
3451		if (netif_is_vxlan(dev))
3452			mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr);
3453		else
3454			err = switchdev_handle_port_obj_del(dev, ptr,
3455							mlxsw_sp_port_dev_check,
3456							mlxsw_sp_port_obj_del);
3457		return notifier_from_errno(err);
3458	case SWITCHDEV_PORT_ATTR_SET:
3459		err = switchdev_handle_port_attr_set(dev, ptr,
3460						     mlxsw_sp_port_dev_check,
3461						     mlxsw_sp_port_attr_set);
3462		return notifier_from_errno(err);
3463	}
3464
3465	return NOTIFY_DONE;
3466}
3467
3468static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = {
3469	.notifier_call = mlxsw_sp_switchdev_blocking_event,
3470};
3471
3472u8
3473mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port)
3474{
3475	return bridge_port->stp_state;
3476}
3477
3478static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp)
3479{
3480	struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge;
3481	struct notifier_block *nb;
3482	int err;
3483
3484	err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME);
3485	if (err) {
3486		dev_err(mlxsw_sp->bus_info->dev, "Failed to set default ageing time\n");
3487		return err;
3488	}
3489
3490	err = register_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3491	if (err) {
3492		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev notifier\n");
3493		return err;
3494	}
3495
3496	nb = &mlxsw_sp_switchdev_blocking_notifier;
3497	err = register_switchdev_blocking_notifier(nb);
3498	if (err) {
3499		dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n");
3500		goto err_register_switchdev_blocking_notifier;
3501	}
3502
3503	INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work);
3504	bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL;
3505	mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp);
3506	return 0;
3507
3508err_register_switchdev_blocking_notifier:
3509	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3510	return err;
3511}
3512
3513static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp)
3514{
3515	struct notifier_block *nb;
3516
3517	cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw);
3518
3519	nb = &mlxsw_sp_switchdev_blocking_notifier;
3520	unregister_switchdev_blocking_notifier(nb);
3521
3522	unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier);
3523}
3524
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3525int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp)
3526{
3527	struct mlxsw_sp_bridge *bridge;
3528
3529	bridge = kzalloc(sizeof(*mlxsw_sp->bridge), GFP_KERNEL);
3530	if (!bridge)
3531		return -ENOMEM;
3532	mlxsw_sp->bridge = bridge;
3533	bridge->mlxsw_sp = mlxsw_sp;
3534
3535	INIT_LIST_HEAD(&mlxsw_sp->bridge->bridges_list);
3536
3537	bridge->bridge_8021q_ops = &mlxsw_sp_bridge_8021q_ops;
3538	bridge->bridge_8021d_ops = &mlxsw_sp_bridge_8021d_ops;
 
 
3539
3540	return mlxsw_sp_fdb_init(mlxsw_sp);
3541}
3542
3543void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
3544{
3545	mlxsw_sp_fdb_fini(mlxsw_sp);
3546	WARN_ON(!list_empty(&mlxsw_sp->bridge->bridges_list));
3547	kfree(mlxsw_sp->bridge);
3548}
3549