Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1/*
   2 * net/switchdev/switchdev.c - Switch device API
   3 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
   4 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License as published by
   8 * the Free Software Foundation; either version 2 of the License, or
   9 * (at your option) any later version.
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/types.h>
  14#include <linux/init.h>
  15#include <linux/mutex.h>
  16#include <linux/notifier.h>
  17#include <linux/netdevice.h>
  18#include <linux/etherdevice.h>
  19#include <linux/if_bridge.h>
  20#include <linux/list.h>
  21#include <linux/workqueue.h>
  22#include <linux/if_vlan.h>
  23#include <linux/rtnetlink.h>
  24#include <net/switchdev.h>
  25
  26/**
  27 *	switchdev_trans_item_enqueue - Enqueue data item to transaction queue
  28 *
  29 *	@trans: transaction
  30 *	@data: pointer to data being queued
  31 *	@destructor: data destructor
  32 *	@tritem: transaction item being queued
  33 *
  34 *	Enqeueue data item to transaction queue. tritem is typically placed in
  35 *	cointainter pointed at by data pointer. Destructor is called on
  36 *	transaction abort and after successful commit phase in case
  37 *	the caller did not dequeue the item before.
  38 */
  39void switchdev_trans_item_enqueue(struct switchdev_trans *trans,
  40				  void *data, void (*destructor)(void const *),
  41				  struct switchdev_trans_item *tritem)
  42{
  43	tritem->data = data;
  44	tritem->destructor = destructor;
  45	list_add_tail(&tritem->list, &trans->item_list);
  46}
  47EXPORT_SYMBOL_GPL(switchdev_trans_item_enqueue);
  48
  49static struct switchdev_trans_item *
  50__switchdev_trans_item_dequeue(struct switchdev_trans *trans)
  51{
  52	struct switchdev_trans_item *tritem;
  53
  54	if (list_empty(&trans->item_list))
  55		return NULL;
  56	tritem = list_first_entry(&trans->item_list,
  57				  struct switchdev_trans_item, list);
  58	list_del(&tritem->list);
  59	return tritem;
  60}
  61
  62/**
  63 *	switchdev_trans_item_dequeue - Dequeue data item from transaction queue
  64 *
  65 *	@trans: transaction
  66 */
  67void *switchdev_trans_item_dequeue(struct switchdev_trans *trans)
  68{
  69	struct switchdev_trans_item *tritem;
  70
  71	tritem = __switchdev_trans_item_dequeue(trans);
  72	BUG_ON(!tritem);
  73	return tritem->data;
  74}
  75EXPORT_SYMBOL_GPL(switchdev_trans_item_dequeue);
  76
  77static void switchdev_trans_init(struct switchdev_trans *trans)
  78{
  79	INIT_LIST_HEAD(&trans->item_list);
  80}
  81
  82static void switchdev_trans_items_destroy(struct switchdev_trans *trans)
  83{
  84	struct switchdev_trans_item *tritem;
 
 
 
 
 
 
 
 
 
 
 
 
 
  85
  86	while ((tritem = __switchdev_trans_item_dequeue(trans)))
  87		tritem->destructor(tritem->data);
  88}
  89
  90static void switchdev_trans_items_warn_destroy(struct net_device *dev,
  91					       struct switchdev_trans *trans)
  92{
  93	WARN(!list_empty(&trans->item_list), "%s: transaction item queue is not empty.\n",
  94	     dev->name);
  95	switchdev_trans_items_destroy(trans);
  96}
  97
  98static LIST_HEAD(deferred);
  99static DEFINE_SPINLOCK(deferred_lock);
 100
 101typedef void switchdev_deferred_func_t(struct net_device *dev,
 102				       const void *data);
 103
 104struct switchdev_deferred_item {
 105	struct list_head list;
 106	struct net_device *dev;
 
 107	switchdev_deferred_func_t *func;
 108	unsigned long data[0];
 109};
 110
 111static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
 112{
 113	struct switchdev_deferred_item *dfitem;
 114
 115	spin_lock_bh(&deferred_lock);
 116	if (list_empty(&deferred)) {
 117		dfitem = NULL;
 118		goto unlock;
 119	}
 120	dfitem = list_first_entry(&deferred,
 121				  struct switchdev_deferred_item, list);
 122	list_del(&dfitem->list);
 123unlock:
 124	spin_unlock_bh(&deferred_lock);
 125	return dfitem;
 126}
 127
 128/**
 129 *	switchdev_deferred_process - Process ops in deferred queue
 130 *
 131 *	Called to flush the ops currently queued in deferred ops queue.
 132 *	rtnl_lock must be held.
 133 */
 134void switchdev_deferred_process(void)
 135{
 136	struct switchdev_deferred_item *dfitem;
 137
 138	ASSERT_RTNL();
 139
 140	while ((dfitem = switchdev_deferred_dequeue())) {
 141		dfitem->func(dfitem->dev, dfitem->data);
 142		dev_put(dfitem->dev);
 143		kfree(dfitem);
 144	}
 145}
 146EXPORT_SYMBOL_GPL(switchdev_deferred_process);
 147
 148static void switchdev_deferred_process_work(struct work_struct *work)
 149{
 150	rtnl_lock();
 151	switchdev_deferred_process();
 152	rtnl_unlock();
 153}
 154
 155static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
 156
 157static int switchdev_deferred_enqueue(struct net_device *dev,
 158				      const void *data, size_t data_len,
 159				      switchdev_deferred_func_t *func)
 160{
 161	struct switchdev_deferred_item *dfitem;
 162
 163	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
 164	if (!dfitem)
 165		return -ENOMEM;
 166	dfitem->dev = dev;
 167	dfitem->func = func;
 168	memcpy(dfitem->data, data, data_len);
 169	dev_hold(dev);
 170	spin_lock_bh(&deferred_lock);
 171	list_add_tail(&dfitem->list, &deferred);
 172	spin_unlock_bh(&deferred_lock);
 173	schedule_work(&deferred_process_work);
 174	return 0;
 175}
 176
 177/**
 178 *	switchdev_port_attr_get - Get port attribute
 179 *
 180 *	@dev: port device
 181 *	@attr: attribute to get
 182 */
 183int switchdev_port_attr_get(struct net_device *dev, struct switchdev_attr *attr)
 184{
 185	const struct switchdev_ops *ops = dev->switchdev_ops;
 186	struct net_device *lower_dev;
 187	struct list_head *iter;
 188	struct switchdev_attr first = {
 189		.id = SWITCHDEV_ATTR_ID_UNDEFINED
 190	};
 191	int err = -EOPNOTSUPP;
 192
 193	if (ops && ops->switchdev_port_attr_get)
 194		return ops->switchdev_port_attr_get(dev, attr);
 
 
 195
 196	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
 
 
 
 
 197		return err;
 198
 199	/* Switch device port(s) may be stacked under
 200	 * bond/team/vlan dev, so recurse down to get attr on
 201	 * each port.  Return -ENODATA if attr values don't
 202	 * compare across ports.
 203	 */
 204
 205	netdev_for_each_lower_dev(dev, lower_dev, iter) {
 206		err = switchdev_port_attr_get(lower_dev, attr);
 207		if (err)
 208			break;
 209		if (first.id == SWITCHDEV_ATTR_ID_UNDEFINED)
 210			first = *attr;
 211		else if (memcmp(&first, attr, sizeof(*attr)))
 212			return -ENODATA;
 213	}
 214
 215	return err;
 216}
 217EXPORT_SYMBOL_GPL(switchdev_port_attr_get);
 218
 219static int __switchdev_port_attr_set(struct net_device *dev,
 220				     const struct switchdev_attr *attr,
 221				     struct switchdev_trans *trans)
 222{
 223	const struct switchdev_ops *ops = dev->switchdev_ops;
 224	struct net_device *lower_dev;
 225	struct list_head *iter;
 226	int err = -EOPNOTSUPP;
 227
 228	if (ops && ops->switchdev_port_attr_set) {
 229		err = ops->switchdev_port_attr_set(dev, attr, trans);
 230		goto done;
 231	}
 232
 233	if (attr->flags & SWITCHDEV_F_NO_RECURSE)
 234		goto done;
 235
 236	/* Switch device port(s) may be stacked under
 237	 * bond/team/vlan dev, so recurse down to set attr on
 238	 * each port.
 239	 */
 240
 241	netdev_for_each_lower_dev(dev, lower_dev, iter) {
 242		err = __switchdev_port_attr_set(lower_dev, attr, trans);
 243		if (err)
 244			break;
 245	}
 246
 247done:
 248	if (err == -EOPNOTSUPP && attr->flags & SWITCHDEV_F_SKIP_EOPNOTSUPP)
 249		err = 0;
 250
 251	return err;
 252}
 253
 254static int switchdev_port_attr_set_now(struct net_device *dev,
 255				       const struct switchdev_attr *attr)
 
 256{
 257	struct switchdev_trans trans;
 258	int err;
 259
 260	switchdev_trans_init(&trans);
 261
 262	/* Phase I: prepare for attr set. Driver/device should fail
 263	 * here if there are going to be issues in the commit phase,
 264	 * such as lack of resources or support.  The driver/device
 265	 * should reserve resources needed for the commit phase here,
 266	 * but should not commit the attr.
 267	 */
 268
 269	trans.ph_prepare = true;
 270	err = __switchdev_port_attr_set(dev, attr, &trans);
 271	if (err) {
 272		/* Prepare phase failed: abort the transaction.  Any
 273		 * resources reserved in the prepare phase are
 274		 * released.
 275		 */
 276
 277		if (err != -EOPNOTSUPP)
 278			switchdev_trans_items_destroy(&trans);
 279
 280		return err;
 281	}
 282
 283	/* Phase II: commit attr set.  This cannot fail as a fault
 284	 * of driver/device.  If it does, it's a bug in the driver/device
 285	 * because the driver said everythings was OK in phase I.
 286	 */
 287
 288	trans.ph_prepare = false;
 289	err = __switchdev_port_attr_set(dev, attr, &trans);
 290	WARN(err, "%s: Commit of attribute (id=%d) failed.\n",
 291	     dev->name, attr->id);
 292	switchdev_trans_items_warn_destroy(dev, &trans);
 293
 294	return err;
 295}
 296
 297static void switchdev_port_attr_set_deferred(struct net_device *dev,
 298					     const void *data)
 299{
 300	const struct switchdev_attr *attr = data;
 301	int err;
 302
 303	err = switchdev_port_attr_set_now(dev, attr);
 304	if (err && err != -EOPNOTSUPP)
 305		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
 306			   err, attr->id);
 307	if (attr->complete)
 308		attr->complete(dev, err, attr->complete_priv);
 309}
 310
 311static int switchdev_port_attr_set_defer(struct net_device *dev,
 312					 const struct switchdev_attr *attr)
 313{
 314	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
 315					  switchdev_port_attr_set_deferred);
 316}
 317
 318/**
 319 *	switchdev_port_attr_set - Set port attribute
 320 *
 321 *	@dev: port device
 322 *	@attr: attribute to set
 323 *
 324 *	Use a 2-phase prepare-commit transaction model to ensure
 325 *	system is not left in a partially updated state due to
 326 *	failure from driver/device.
 327 *
 328 *	rtnl_lock must be held and must not be in atomic section,
 329 *	in case SWITCHDEV_F_DEFER flag is not set.
 330 */
 331int switchdev_port_attr_set(struct net_device *dev,
 332			    const struct switchdev_attr *attr)
 
 333{
 334	if (attr->flags & SWITCHDEV_F_DEFER)
 335		return switchdev_port_attr_set_defer(dev, attr);
 336	ASSERT_RTNL();
 337	return switchdev_port_attr_set_now(dev, attr);
 338}
 339EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
 340
 341static size_t switchdev_obj_size(const struct switchdev_obj *obj)
 342{
 343	switch (obj->id) {
 344	case SWITCHDEV_OBJ_ID_PORT_VLAN:
 345		return sizeof(struct switchdev_obj_port_vlan);
 346	case SWITCHDEV_OBJ_ID_PORT_FDB:
 347		return sizeof(struct switchdev_obj_port_fdb);
 348	case SWITCHDEV_OBJ_ID_PORT_MDB:
 349		return sizeof(struct switchdev_obj_port_mdb);
 
 
 350	default:
 351		BUG();
 352	}
 353	return 0;
 354}
 355
 356static int __switchdev_port_obj_add(struct net_device *dev,
 357				    const struct switchdev_obj *obj,
 358				    struct switchdev_trans *trans)
 359{
 360	const struct switchdev_ops *ops = dev->switchdev_ops;
 361	struct net_device *lower_dev;
 362	struct list_head *iter;
 363	int err = -EOPNOTSUPP;
 364
 365	if (ops && ops->switchdev_port_obj_add)
 366		return ops->switchdev_port_obj_add(dev, obj, trans);
 367
 368	/* Switch device port(s) may be stacked under
 369	 * bond/team/vlan dev, so recurse down to add object on
 370	 * each port.
 371	 */
 372
 373	netdev_for_each_lower_dev(dev, lower_dev, iter) {
 374		err = __switchdev_port_obj_add(lower_dev, obj, trans);
 375		if (err)
 376			break;
 377	}
 378
 379	return err;
 380}
 381
 382static int switchdev_port_obj_add_now(struct net_device *dev,
 383				      const struct switchdev_obj *obj)
 384{
 385	struct switchdev_trans trans;
 386	int err;
 387
 388	ASSERT_RTNL();
 389
 390	switchdev_trans_init(&trans);
 391
 392	/* Phase I: prepare for obj add. Driver/device should fail
 393	 * here if there are going to be issues in the commit phase,
 394	 * such as lack of resources or support.  The driver/device
 395	 * should reserve resources needed for the commit phase here,
 396	 * but should not commit the obj.
 397	 */
 398
 399	trans.ph_prepare = true;
 400	err = __switchdev_port_obj_add(dev, obj, &trans);
 401	if (err) {
 402		/* Prepare phase failed: abort the transaction.  Any
 403		 * resources reserved in the prepare phase are
 404		 * released.
 405		 */
 406
 407		if (err != -EOPNOTSUPP)
 408			switchdev_trans_items_destroy(&trans);
 409
 410		return err;
 411	}
 
 
 
 
 412
 413	/* Phase II: commit obj add.  This cannot fail as a fault
 414	 * of driver/device.  If it does, it's a bug in the driver/device
 415	 * because the driver said everythings was OK in phase I.
 416	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 417
 418	trans.ph_prepare = false;
 419	err = __switchdev_port_obj_add(dev, obj, &trans);
 420	WARN(err, "%s: Commit of object (id=%d) failed.\n", dev->name, obj->id);
 421	switchdev_trans_items_warn_destroy(dev, &trans);
 
 422
 423	return err;
 
 424}
 425
 426static void switchdev_port_obj_add_deferred(struct net_device *dev,
 427					    const void *data)
 428{
 429	const struct switchdev_obj *obj = data;
 430	int err;
 431
 432	err = switchdev_port_obj_add_now(dev, obj);
 
 
 433	if (err && err != -EOPNOTSUPP)
 434		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
 435			   err, obj->id);
 436	if (obj->complete)
 437		obj->complete(dev, err, obj->complete_priv);
 438}
 439
 440static int switchdev_port_obj_add_defer(struct net_device *dev,
 441					const struct switchdev_obj *obj)
 442{
 443	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
 444					  switchdev_port_obj_add_deferred);
 445}
 446
 447/**
 448 *	switchdev_port_obj_add - Add port object
 449 *
 450 *	@dev: port device
 451 *	@id: object ID
 452 *	@obj: object to add
 453 *
 454 *	Use a 2-phase prepare-commit transaction model to ensure
 455 *	system is not left in a partially updated state due to
 456 *	failure from driver/device.
 457 *
 458 *	rtnl_lock must be held and must not be in atomic section,
 459 *	in case SWITCHDEV_F_DEFER flag is not set.
 460 */
 461int switchdev_port_obj_add(struct net_device *dev,
 462			   const struct switchdev_obj *obj)
 
 463{
 464	if (obj->flags & SWITCHDEV_F_DEFER)
 465		return switchdev_port_obj_add_defer(dev, obj);
 466	ASSERT_RTNL();
 467	return switchdev_port_obj_add_now(dev, obj);
 
 468}
 469EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
 470
 471static int switchdev_port_obj_del_now(struct net_device *dev,
 472				      const struct switchdev_obj *obj)
 473{
 474	const struct switchdev_ops *ops = dev->switchdev_ops;
 475	struct net_device *lower_dev;
 476	struct list_head *iter;
 477	int err = -EOPNOTSUPP;
 478
 479	if (ops && ops->switchdev_port_obj_del)
 480		return ops->switchdev_port_obj_del(dev, obj);
 481
 482	/* Switch device port(s) may be stacked under
 483	 * bond/team/vlan dev, so recurse down to delete object on
 484	 * each port.
 485	 */
 486
 487	netdev_for_each_lower_dev(dev, lower_dev, iter) {
 488		err = switchdev_port_obj_del_now(lower_dev, obj);
 489		if (err)
 490			break;
 491	}
 492
 493	return err;
 494}
 495
 496static void switchdev_port_obj_del_deferred(struct net_device *dev,
 497					    const void *data)
 498{
 499	const struct switchdev_obj *obj = data;
 500	int err;
 501
 502	err = switchdev_port_obj_del_now(dev, obj);
 503	if (err && err != -EOPNOTSUPP)
 504		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
 505			   err, obj->id);
 506	if (obj->complete)
 507		obj->complete(dev, err, obj->complete_priv);
 508}
 509
 510static int switchdev_port_obj_del_defer(struct net_device *dev,
 511					const struct switchdev_obj *obj)
 512{
 513	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
 514					  switchdev_port_obj_del_deferred);
 515}
 516
 517/**
 518 *	switchdev_port_obj_del - Delete port object
 519 *
 520 *	@dev: port device
 521 *	@id: object ID
 522 *	@obj: object to delete
 523 *
 524 *	rtnl_lock must be held and must not be in atomic section,
 525 *	in case SWITCHDEV_F_DEFER flag is not set.
 526 */
 527int switchdev_port_obj_del(struct net_device *dev,
 528			   const struct switchdev_obj *obj)
 529{
 530	if (obj->flags & SWITCHDEV_F_DEFER)
 531		return switchdev_port_obj_del_defer(dev, obj);
 532	ASSERT_RTNL();
 533	return switchdev_port_obj_del_now(dev, obj);
 534}
 535EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
 536
 537/**
 538 *	switchdev_port_obj_dump - Dump port objects
 539 *
 540 *	@dev: port device
 541 *	@id: object ID
 542 *	@obj: object to dump
 543 *	@cb: function to call with a filled object
 
 
 544 *
 545 *	rtnl_lock must be held.
 546 */
 547int switchdev_port_obj_dump(struct net_device *dev, struct switchdev_obj *obj,
 548			    switchdev_obj_dump_cb_t *cb)
 
 549{
 550	const struct switchdev_ops *ops = dev->switchdev_ops;
 551	struct net_device *lower_dev;
 552	struct list_head *iter;
 553	int err = -EOPNOTSUPP;
 554
 555	ASSERT_RTNL();
 556
 557	if (ops && ops->switchdev_port_obj_dump)
 558		return ops->switchdev_port_obj_dump(dev, obj, cb);
 559
 560	/* Switch device port(s) may be stacked under
 561	 * bond/team/vlan dev, so recurse down to dump objects on
 562	 * first port at bottom of stack.
 563	 */
 564
 565	netdev_for_each_lower_dev(dev, lower_dev, iter) {
 566		err = switchdev_port_obj_dump(lower_dev, obj, cb);
 567		break;
 
 
 
 
 
 
 568	}
 569
 570	return err;
 
 
 571}
 572EXPORT_SYMBOL_GPL(switchdev_port_obj_dump);
 573
 574static RAW_NOTIFIER_HEAD(switchdev_notif_chain);
 
 575
 576/**
 577 *	register_switchdev_notifier - Register notifier
 578 *	@nb: notifier_block
 579 *
 580 *	Register switch device notifier. This should be used by code
 581 *	which needs to monitor events happening in particular device.
 582 *	Return values are same as for atomic_notifier_chain_register().
 583 */
 584int register_switchdev_notifier(struct notifier_block *nb)
 585{
 586	int err;
 587
 588	rtnl_lock();
 589	err = raw_notifier_chain_register(&switchdev_notif_chain, nb);
 590	rtnl_unlock();
 591	return err;
 592}
 593EXPORT_SYMBOL_GPL(register_switchdev_notifier);
 594
 595/**
 596 *	unregister_switchdev_notifier - Unregister notifier
 597 *	@nb: notifier_block
 598 *
 599 *	Unregister switch device notifier.
 600 *	Return values are same as for atomic_notifier_chain_unregister().
 601 */
 602int unregister_switchdev_notifier(struct notifier_block *nb)
 603{
 604	int err;
 605
 606	rtnl_lock();
 607	err = raw_notifier_chain_unregister(&switchdev_notif_chain, nb);
 608	rtnl_unlock();
 609	return err;
 610}
 611EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
 612
 613/**
 614 *	call_switchdev_notifiers - Call notifiers
 615 *	@val: value passed unmodified to notifier function
 616 *	@dev: port device
 617 *	@info: notifier information data
 618 *
 619 *	Call all network notifier blocks. This should be called by driver
 620 *	when it needs to propagate hardware event.
 621 *	Return values are same as for atomic_notifier_call_chain().
 622 *	rtnl_lock must be held.
 623 */
 624int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
 625			     struct switchdev_notifier_info *info)
 
 626{
 627	ASSERT_RTNL();
 628
 629	info->dev = dev;
 630	return raw_notifier_call_chain(&switchdev_notif_chain, val, info);
 
 631}
 632EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
 633
 634struct switchdev_vlan_dump {
 635	struct switchdev_obj_port_vlan vlan;
 636	struct sk_buff *skb;
 637	u32 filter_mask;
 638	u16 flags;
 639	u16 begin;
 640	u16 end;
 641};
 642
 643static int switchdev_port_vlan_dump_put(struct switchdev_vlan_dump *dump)
 644{
 645	struct bridge_vlan_info vinfo;
 646
 647	vinfo.flags = dump->flags;
 
 
 648
 649	if (dump->begin == 0 && dump->end == 0) {
 650		return 0;
 651	} else if (dump->begin == dump->end) {
 652		vinfo.vid = dump->begin;
 653		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
 654			    sizeof(vinfo), &vinfo))
 655			return -EMSGSIZE;
 656	} else {
 657		vinfo.vid = dump->begin;
 658		vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_BEGIN;
 659		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
 660			    sizeof(vinfo), &vinfo))
 661			return -EMSGSIZE;
 662		vinfo.vid = dump->end;
 663		vinfo.flags &= ~BRIDGE_VLAN_INFO_RANGE_BEGIN;
 664		vinfo.flags |= BRIDGE_VLAN_INFO_RANGE_END;
 665		if (nla_put(dump->skb, IFLA_BRIDGE_VLAN_INFO,
 666			    sizeof(vinfo), &vinfo))
 667			return -EMSGSIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 668	}
 669
 670	return 0;
 671}
 672
 673static int switchdev_port_vlan_dump_cb(struct switchdev_obj *obj)
 674{
 675	struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
 676	struct switchdev_vlan_dump *dump =
 677		container_of(vlan, struct switchdev_vlan_dump, vlan);
 678	int err = 0;
 679
 680	if (vlan->vid_begin > vlan->vid_end)
 681		return -EINVAL;
 682
 683	if (dump->filter_mask & RTEXT_FILTER_BRVLAN) {
 684		dump->flags = vlan->flags;
 685		for (dump->begin = dump->end = vlan->vid_begin;
 686		     dump->begin <= vlan->vid_end;
 687		     dump->begin++, dump->end++) {
 688			err = switchdev_port_vlan_dump_put(dump);
 689			if (err)
 690				return err;
 691		}
 692	} else if (dump->filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) {
 693		if (dump->begin > vlan->vid_begin &&
 694		    dump->begin >= vlan->vid_end) {
 695			if ((dump->begin - 1) == vlan->vid_end &&
 696			    dump->flags == vlan->flags) {
 697				/* prepend */
 698				dump->begin = vlan->vid_begin;
 699			} else {
 700				err = switchdev_port_vlan_dump_put(dump);
 701				dump->flags = vlan->flags;
 702				dump->begin = vlan->vid_begin;
 703				dump->end = vlan->vid_end;
 704			}
 705		} else if (dump->end <= vlan->vid_begin &&
 706		           dump->end < vlan->vid_end) {
 707			if ((dump->end  + 1) == vlan->vid_begin &&
 708			    dump->flags == vlan->flags) {
 709				/* append */
 710				dump->end = vlan->vid_end;
 711			} else {
 712				err = switchdev_port_vlan_dump_put(dump);
 713				dump->flags = vlan->flags;
 714				dump->begin = vlan->vid_begin;
 715				dump->end = vlan->vid_end;
 716			}
 717		} else {
 718			err = -EINVAL;
 719		}
 720	}
 721
 722	return err;
 
 
 723}
 724
 725static int switchdev_port_vlan_fill(struct sk_buff *skb, struct net_device *dev,
 726				    u32 filter_mask)
 
 
 
 727{
 728	struct switchdev_vlan_dump dump = {
 729		.vlan.obj.orig_dev = dev,
 730		.vlan.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
 731		.skb = skb,
 732		.filter_mask = filter_mask,
 
 
 
 733	};
 734	int err = 0;
 735
 736	if ((filter_mask & RTEXT_FILTER_BRVLAN) ||
 737	    (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) {
 738		err = switchdev_port_obj_dump(dev, &dump.vlan.obj,
 739					      switchdev_port_vlan_dump_cb);
 740		if (err)
 741			goto err_out;
 742		if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)
 743			/* last one */
 744			err = switchdev_port_vlan_dump_put(&dump);
 745	}
 746
 747err_out:
 748	return err == -EOPNOTSUPP ? 0 : err;
 749}
 750
 751/**
 752 *	switchdev_port_bridge_getlink - Get bridge port attributes
 753 *
 754 *	@dev: port device
 755 *
 756 *	Called for SELF on rtnl_bridge_getlink to get bridge port
 757 *	attributes.
 758 */
 759int switchdev_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
 760				  struct net_device *dev, u32 filter_mask,
 761				  int nlflags)
 762{
 763	struct switchdev_attr attr = {
 764		.orig_dev = dev,
 765		.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
 766	};
 767	u16 mode = BRIDGE_MODE_UNDEF;
 768	u32 mask = BR_LEARNING | BR_LEARNING_SYNC | BR_FLOOD;
 769	int err;
 770
 771	if (!netif_is_bridge_port(dev))
 772		return -EOPNOTSUPP;
 773
 774	err = switchdev_port_attr_get(dev, &attr);
 775	if (err && err != -EOPNOTSUPP)
 776		return err;
 
 
 
 
 777
 778	return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
 779				       attr.u.brport_flags, mask, nlflags,
 780				       filter_mask, switchdev_port_vlan_fill);
 781}
 782EXPORT_SYMBOL_GPL(switchdev_port_bridge_getlink);
 783
 784static int switchdev_port_br_setflag(struct net_device *dev,
 785				     struct nlattr *nlattr,
 786				     unsigned long brport_flag)
 787{
 788	struct switchdev_attr attr = {
 789		.orig_dev = dev,
 790		.id = SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS,
 791	};
 792	u8 flag = nla_get_u8(nlattr);
 793	int err;
 794
 795	err = switchdev_port_attr_get(dev, &attr);
 796	if (err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 797		return err;
 798
 799	if (flag)
 800		attr.u.brport_flags |= brport_flag;
 801	else
 802		attr.u.brport_flags &= ~brport_flag;
 803
 804	return switchdev_port_attr_set(dev, &attr);
 805}
 806
 807static const struct nla_policy
 808switchdev_port_bridge_policy[IFLA_BRPORT_MAX + 1] = {
 809	[IFLA_BRPORT_STATE]		= { .type = NLA_U8 },
 810	[IFLA_BRPORT_COST]		= { .type = NLA_U32 },
 811	[IFLA_BRPORT_PRIORITY]		= { .type = NLA_U16 },
 812	[IFLA_BRPORT_MODE]		= { .type = NLA_U8 },
 813	[IFLA_BRPORT_GUARD]		= { .type = NLA_U8 },
 814	[IFLA_BRPORT_PROTECT]		= { .type = NLA_U8 },
 815	[IFLA_BRPORT_FAST_LEAVE]	= { .type = NLA_U8 },
 816	[IFLA_BRPORT_LEARNING]		= { .type = NLA_U8 },
 817	[IFLA_BRPORT_LEARNING_SYNC]	= { .type = NLA_U8 },
 818	[IFLA_BRPORT_UNICAST_FLOOD]	= { .type = NLA_U8 },
 819};
 820
 821static int switchdev_port_br_setlink_protinfo(struct net_device *dev,
 822					      struct nlattr *protinfo)
 
 
 
 
 
 
 823{
 824	struct nlattr *attr;
 825	int rem;
 826	int err;
 827
 828	err = nla_validate_nested(protinfo, IFLA_BRPORT_MAX,
 829				  switchdev_port_bridge_policy);
 830	if (err)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 831		return err;
 
 832
 833	nla_for_each_nested(attr, protinfo, rem) {
 834		switch (nla_type(attr)) {
 835		case IFLA_BRPORT_LEARNING:
 836			err = switchdev_port_br_setflag(dev, attr,
 837							BR_LEARNING);
 838			break;
 839		case IFLA_BRPORT_LEARNING_SYNC:
 840			err = switchdev_port_br_setflag(dev, attr,
 841							BR_LEARNING_SYNC);
 842			break;
 843		case IFLA_BRPORT_UNICAST_FLOOD:
 844			err = switchdev_port_br_setflag(dev, attr, BR_FLOOD);
 845			break;
 846		default:
 847			err = -EOPNOTSUPP;
 848			break;
 849		}
 850		if (err)
 
 
 
 
 
 851			return err;
 852	}
 853
 854	return 0;
 855}
 
 
 
 856
 857static int switchdev_port_br_afspec(struct net_device *dev,
 858				    struct nlattr *afspec,
 859				    int (*f)(struct net_device *dev,
 860					     const struct switchdev_obj *obj))
 861{
 862	struct nlattr *attr;
 863	struct bridge_vlan_info *vinfo;
 864	struct switchdev_obj_port_vlan vlan = {
 865		.obj.orig_dev = dev,
 866		.obj.id = SWITCHDEV_OBJ_ID_PORT_VLAN,
 867	};
 868	int rem;
 869	int err;
 870
 871	nla_for_each_nested(attr, afspec, rem) {
 872		if (nla_type(attr) != IFLA_BRIDGE_VLAN_INFO)
 873			continue;
 874		if (nla_len(attr) != sizeof(struct bridge_vlan_info))
 875			return -EINVAL;
 876		vinfo = nla_data(attr);
 877		if (!vinfo->vid || vinfo->vid >= VLAN_VID_MASK)
 878			return -EINVAL;
 879		vlan.flags = vinfo->flags;
 880		if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_BEGIN) {
 881			if (vlan.vid_begin)
 882				return -EINVAL;
 883			vlan.vid_begin = vinfo->vid;
 884			/* don't allow range of pvids */
 885			if (vlan.flags & BRIDGE_VLAN_INFO_PVID)
 886				return -EINVAL;
 887		} else if (vinfo->flags & BRIDGE_VLAN_INFO_RANGE_END) {
 888			if (!vlan.vid_begin)
 889				return -EINVAL;
 890			vlan.vid_end = vinfo->vid;
 891			if (vlan.vid_end <= vlan.vid_begin)
 892				return -EINVAL;
 893			err = f(dev, &vlan.obj);
 894			if (err)
 895				return err;
 896			vlan.vid_begin = 0;
 897		} else {
 898			if (vlan.vid_begin)
 899				return -EINVAL;
 900			vlan.vid_begin = vinfo->vid;
 901			vlan.vid_end = vinfo->vid;
 902			err = f(dev, &vlan.obj);
 903			if (err)
 904				return err;
 905			vlan.vid_begin = 0;
 906		}
 907	}
 908
 909	return 0;
 
 
 
 
 910}
 911
 912/**
 913 *	switchdev_port_bridge_setlink - Set bridge port attributes
 914 *
 915 *	@dev: port device
 916 *	@nlh: netlink header
 917 *	@flags: netlink flags
 918 *
 919 *	Called for SELF on rtnl_bridge_setlink to set bridge port
 920 *	attributes.
 921 */
 922int switchdev_port_bridge_setlink(struct net_device *dev,
 923				  struct nlmsghdr *nlh, u16 flags)
 
 
 
 
 924{
 925	struct nlattr *protinfo;
 926	struct nlattr *afspec;
 927	int err = 0;
 928
 929	if (!netif_is_bridge_port(dev))
 930		return -EOPNOTSUPP;
 931
 932	protinfo = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
 933				   IFLA_PROTINFO);
 934	if (protinfo) {
 935		err = switchdev_port_br_setlink_protinfo(dev, protinfo);
 936		if (err)
 937			return err;
 938	}
 939
 940	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
 941				 IFLA_AF_SPEC);
 942	if (afspec)
 943		err = switchdev_port_br_afspec(dev, afspec,
 944					       switchdev_port_obj_add);
 
 
 
 
 
 
 
 
 
 945
 
 
 
 
 946	return err;
 947}
 948EXPORT_SYMBOL_GPL(switchdev_port_bridge_setlink);
 949
 950/**
 951 *	switchdev_port_bridge_dellink - Set bridge port attributes
 952 *
 953 *	@dev: port device
 954 *	@nlh: netlink header
 955 *	@flags: netlink flags
 956 *
 957 *	Called for SELF on rtnl_bridge_dellink to set bridge port
 958 *	attributes.
 959 */
 960int switchdev_port_bridge_dellink(struct net_device *dev,
 961				  struct nlmsghdr *nlh, u16 flags)
 962{
 963	struct nlattr *afspec;
 
 
 
 964
 965	if (!netif_is_bridge_port(dev))
 966		return -EOPNOTSUPP;
 
 
 
 
 967
 968	afspec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg),
 969				 IFLA_AF_SPEC);
 970	if (afspec)
 971		return switchdev_port_br_afspec(dev, afspec,
 972						switchdev_port_obj_del);
 
 
 
 
 
 973
 974	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975}
 976EXPORT_SYMBOL_GPL(switchdev_port_bridge_dellink);
 977
 978/**
 979 *	switchdev_port_fdb_add - Add FDB (MAC/VLAN) entry to port
 980 *
 981 *	@ndmsg: netlink hdr
 982 *	@nlattr: netlink attributes
 983 *	@dev: port device
 984 *	@addr: MAC address to add
 985 *	@vid: VLAN to add
 986 *
 987 *	Add FDB entry to switch device.
 988 */
 989int switchdev_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 990			   struct net_device *dev, const unsigned char *addr,
 991			   u16 vid, u16 nlm_flags)
 992{
 993	struct switchdev_obj_port_fdb fdb = {
 994		.obj.orig_dev = dev,
 995		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
 996		.vid = vid,
 997	};
 998
 999	ether_addr_copy(fdb.addr, addr);
1000	return switchdev_port_obj_add(dev, &fdb.obj);
 
 
 
1001}
1002EXPORT_SYMBOL_GPL(switchdev_port_fdb_add);
1003
1004/**
1005 *	switchdev_port_fdb_del - Delete FDB (MAC/VLAN) entry from port
1006 *
1007 *	@ndmsg: netlink hdr
1008 *	@nlattr: netlink attributes
1009 *	@dev: port device
1010 *	@addr: MAC address to delete
1011 *	@vid: VLAN to delete
1012 *
1013 *	Delete FDB entry from switch device.
1014 */
1015int switchdev_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
1016			   struct net_device *dev, const unsigned char *addr,
1017			   u16 vid)
1018{
1019	struct switchdev_obj_port_fdb fdb = {
1020		.obj.orig_dev = dev,
1021		.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
1022		.vid = vid,
1023	};
1024
1025	ether_addr_copy(fdb.addr, addr);
1026	return switchdev_port_obj_del(dev, &fdb.obj);
 
 
 
1027}
1028EXPORT_SYMBOL_GPL(switchdev_port_fdb_del);
1029
1030struct switchdev_fdb_dump {
1031	struct switchdev_obj_port_fdb fdb;
1032	struct net_device *dev;
1033	struct sk_buff *skb;
1034	struct netlink_callback *cb;
1035	int idx;
1036};
1037
1038static int switchdev_port_fdb_dump_cb(struct switchdev_obj *obj)
1039{
1040	struct switchdev_obj_port_fdb *fdb = SWITCHDEV_OBJ_PORT_FDB(obj);
1041	struct switchdev_fdb_dump *dump =
1042		container_of(fdb, struct switchdev_fdb_dump, fdb);
1043	u32 portid = NETLINK_CB(dump->cb->skb).portid;
1044	u32 seq = dump->cb->nlh->nlmsg_seq;
1045	struct nlmsghdr *nlh;
1046	struct ndmsg *ndm;
1047
1048	if (dump->idx < dump->cb->args[2])
1049		goto skip;
1050
1051	nlh = nlmsg_put(dump->skb, portid, seq, RTM_NEWNEIGH,
1052			sizeof(*ndm), NLM_F_MULTI);
1053	if (!nlh)
1054		return -EMSGSIZE;
1055
1056	ndm = nlmsg_data(nlh);
1057	ndm->ndm_family  = AF_BRIDGE;
1058	ndm->ndm_pad1    = 0;
1059	ndm->ndm_pad2    = 0;
1060	ndm->ndm_flags   = NTF_SELF;
1061	ndm->ndm_type    = 0;
1062	ndm->ndm_ifindex = dump->dev->ifindex;
1063	ndm->ndm_state   = fdb->ndm_state;
1064
1065	if (nla_put(dump->skb, NDA_LLADDR, ETH_ALEN, fdb->addr))
1066		goto nla_put_failure;
1067
1068	if (fdb->vid && nla_put_u16(dump->skb, NDA_VLAN, fdb->vid))
1069		goto nla_put_failure;
1070
1071	nlmsg_end(dump->skb, nlh);
 
 
 
 
 
1072
1073skip:
1074	dump->idx++;
1075	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
1076
1077nla_put_failure:
1078	nlmsg_cancel(dump->skb, nlh);
1079	return -EMSGSIZE;
1080}
1081
1082/**
1083 *	switchdev_port_fdb_dump - Dump port FDB (MAC/VLAN) entries
1084 *
1085 *	@skb: netlink skb
1086 *	@cb: netlink callback
1087 *	@dev: port device
1088 *	@filter_dev: filter device
1089 *	@idx:
1090 *
1091 *	Dump FDB entries from switch device.
1092 */
1093int switchdev_port_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
1094			    struct net_device *dev,
1095			    struct net_device *filter_dev, int *idx)
1096{
1097	struct switchdev_fdb_dump dump = {
1098		.fdb.obj.orig_dev = dev,
1099		.fdb.obj.id = SWITCHDEV_OBJ_ID_PORT_FDB,
1100		.dev = dev,
1101		.skb = skb,
1102		.cb = cb,
1103		.idx = *idx,
1104	};
1105	int err;
1106
1107	err = switchdev_port_obj_dump(dev, &dump.fdb.obj,
1108				      switchdev_port_fdb_dump_cb);
1109	*idx = dump.idx;
 
1110	return err;
1111}
1112EXPORT_SYMBOL_GPL(switchdev_port_fdb_dump);
1113
1114bool switchdev_port_same_parent_id(struct net_device *a,
1115				   struct net_device *b)
1116{
1117	struct switchdev_attr a_attr = {
1118		.orig_dev = a,
1119		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
 
 
 
 
 
 
 
 
 
1120	};
1121	struct switchdev_attr b_attr = {
1122		.orig_dev = b,
1123		.id = SWITCHDEV_ATTR_ID_PORT_PARENT_ID,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1124	};
1125
1126	if (switchdev_port_attr_get(a, &a_attr) ||
1127	    switchdev_port_attr_get(b, &b_attr))
1128		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1129
1130	return netdev_phys_item_id_same(&a_attr.u.ppid, &b_attr.u.ppid);
 
 
 
1131}
1132EXPORT_SYMBOL_GPL(switchdev_port_same_parent_id);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * net/switchdev/switchdev.c - Switch device API
   4 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
   5 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
 
 
 
 
 
   6 */
   7
   8#include <linux/kernel.h>
   9#include <linux/types.h>
  10#include <linux/init.h>
  11#include <linux/mutex.h>
  12#include <linux/notifier.h>
  13#include <linux/netdevice.h>
  14#include <linux/etherdevice.h>
  15#include <linux/if_bridge.h>
  16#include <linux/list.h>
  17#include <linux/workqueue.h>
  18#include <linux/if_vlan.h>
  19#include <linux/rtnetlink.h>
  20#include <net/switchdev.h>
  21
  22static bool switchdev_obj_eq(const struct switchdev_obj *a,
  23			     const struct switchdev_obj *b)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  24{
  25	const struct switchdev_obj_port_vlan *va, *vb;
  26	const struct switchdev_obj_port_mdb *ma, *mb;
 
 
 
 
 
  27
  28	if (a->id != b->id || a->orig_dev != b->orig_dev)
  29		return false;
 
 
  30
  31	switch (a->id) {
  32	case SWITCHDEV_OBJ_ID_PORT_VLAN:
  33		va = SWITCHDEV_OBJ_PORT_VLAN(a);
  34		vb = SWITCHDEV_OBJ_PORT_VLAN(b);
  35		return va->flags == vb->flags &&
  36			va->vid == vb->vid &&
  37			va->changed == vb->changed;
  38	case SWITCHDEV_OBJ_ID_PORT_MDB:
  39	case SWITCHDEV_OBJ_ID_HOST_MDB:
  40		ma = SWITCHDEV_OBJ_PORT_MDB(a);
  41		mb = SWITCHDEV_OBJ_PORT_MDB(b);
  42		return ma->vid == mb->vid &&
  43			ether_addr_equal(ma->addr, mb->addr);
  44	default:
  45		break;
  46	}
  47
  48	BUG();
 
 
 
 
 
 
 
 
 
  49}
  50
  51static LIST_HEAD(deferred);
  52static DEFINE_SPINLOCK(deferred_lock);
  53
  54typedef void switchdev_deferred_func_t(struct net_device *dev,
  55				       const void *data);
  56
  57struct switchdev_deferred_item {
  58	struct list_head list;
  59	struct net_device *dev;
  60	netdevice_tracker dev_tracker;
  61	switchdev_deferred_func_t *func;
  62	unsigned long data[];
  63};
  64
  65static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
  66{
  67	struct switchdev_deferred_item *dfitem;
  68
  69	spin_lock_bh(&deferred_lock);
  70	if (list_empty(&deferred)) {
  71		dfitem = NULL;
  72		goto unlock;
  73	}
  74	dfitem = list_first_entry(&deferred,
  75				  struct switchdev_deferred_item, list);
  76	list_del(&dfitem->list);
  77unlock:
  78	spin_unlock_bh(&deferred_lock);
  79	return dfitem;
  80}
  81
  82/**
  83 *	switchdev_deferred_process - Process ops in deferred queue
  84 *
  85 *	Called to flush the ops currently queued in deferred ops queue.
  86 *	rtnl_lock must be held.
  87 */
  88void switchdev_deferred_process(void)
  89{
  90	struct switchdev_deferred_item *dfitem;
  91
  92	ASSERT_RTNL();
  93
  94	while ((dfitem = switchdev_deferred_dequeue())) {
  95		dfitem->func(dfitem->dev, dfitem->data);
  96		netdev_put(dfitem->dev, &dfitem->dev_tracker);
  97		kfree(dfitem);
  98	}
  99}
 100EXPORT_SYMBOL_GPL(switchdev_deferred_process);
 101
 102static void switchdev_deferred_process_work(struct work_struct *work)
 103{
 104	rtnl_lock();
 105	switchdev_deferred_process();
 106	rtnl_unlock();
 107}
 108
 109static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
 110
 111static int switchdev_deferred_enqueue(struct net_device *dev,
 112				      const void *data, size_t data_len,
 113				      switchdev_deferred_func_t *func)
 114{
 115	struct switchdev_deferred_item *dfitem;
 116
 117	dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC);
 118	if (!dfitem)
 119		return -ENOMEM;
 120	dfitem->dev = dev;
 121	dfitem->func = func;
 122	memcpy(dfitem->data, data, data_len);
 123	netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC);
 124	spin_lock_bh(&deferred_lock);
 125	list_add_tail(&dfitem->list, &deferred);
 126	spin_unlock_bh(&deferred_lock);
 127	schedule_work(&deferred_process_work);
 128	return 0;
 129}
 130
 131static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
 132				      struct net_device *dev,
 133				      const struct switchdev_attr *attr,
 134				      struct netlink_ext_ack *extack)
 
 
 
 135{
 136	int err;
 137	int rc;
 
 
 
 
 
 138
 139	struct switchdev_notifier_port_attr_info attr_info = {
 140		.attr = attr,
 141		.handled = false,
 142	};
 143
 144	rc = call_switchdev_blocking_notifiers(nt, dev,
 145					       &attr_info.info, extack);
 146	err = notifier_to_errno(rc);
 147	if (err) {
 148		WARN_ON(!attr_info.handled);
 149		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 150	}
 151
 152	if (!attr_info.handled)
 153		return -EOPNOTSUPP;
 
 154
 155	return 0;
 156}
 157
 158static int switchdev_port_attr_set_now(struct net_device *dev,
 159				       const struct switchdev_attr *attr,
 160				       struct netlink_ext_ack *extack)
 161{
 162	return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
 163					  extack);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 164}
 165
 166static void switchdev_port_attr_set_deferred(struct net_device *dev,
 167					     const void *data)
 168{
 169	const struct switchdev_attr *attr = data;
 170	int err;
 171
 172	err = switchdev_port_attr_set_now(dev, attr, NULL);
 173	if (err && err != -EOPNOTSUPP)
 174		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
 175			   err, attr->id);
 176	if (attr->complete)
 177		attr->complete(dev, err, attr->complete_priv);
 178}
 179
 180static int switchdev_port_attr_set_defer(struct net_device *dev,
 181					 const struct switchdev_attr *attr)
 182{
 183	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
 184					  switchdev_port_attr_set_deferred);
 185}
 186
 187/**
 188 *	switchdev_port_attr_set - Set port attribute
 189 *
 190 *	@dev: port device
 191 *	@attr: attribute to set
 192 *	@extack: netlink extended ack, for error message propagation
 
 
 
 193 *
 194 *	rtnl_lock must be held and must not be in atomic section,
 195 *	in case SWITCHDEV_F_DEFER flag is not set.
 196 */
 197int switchdev_port_attr_set(struct net_device *dev,
 198			    const struct switchdev_attr *attr,
 199			    struct netlink_ext_ack *extack)
 200{
 201	if (attr->flags & SWITCHDEV_F_DEFER)
 202		return switchdev_port_attr_set_defer(dev, attr);
 203	ASSERT_RTNL();
 204	return switchdev_port_attr_set_now(dev, attr, extack);
 205}
 206EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
 207
 208static size_t switchdev_obj_size(const struct switchdev_obj *obj)
 209{
 210	switch (obj->id) {
 211	case SWITCHDEV_OBJ_ID_PORT_VLAN:
 212		return sizeof(struct switchdev_obj_port_vlan);
 
 
 213	case SWITCHDEV_OBJ_ID_PORT_MDB:
 214		return sizeof(struct switchdev_obj_port_mdb);
 215	case SWITCHDEV_OBJ_ID_HOST_MDB:
 216		return sizeof(struct switchdev_obj_port_mdb);
 217	default:
 218		BUG();
 219	}
 220	return 0;
 221}
 222
 223static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
 224				     struct net_device *dev,
 225				     const struct switchdev_obj *obj,
 226				     struct netlink_ext_ack *extack)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 227{
 228	int rc;
 229	int err;
 230
 231	struct switchdev_notifier_port_obj_info obj_info = {
 232		.obj = obj,
 233		.handled = false,
 234	};
 
 
 
 
 
 
 235
 236	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
 237	err = notifier_to_errno(rc);
 238	if (err) {
 239		WARN_ON(!obj_info.handled);
 
 
 
 
 
 
 
 240		return err;
 241	}
 242	if (!obj_info.handled)
 243		return -EOPNOTSUPP;
 244	return 0;
 245}
 246
 247static void switchdev_obj_id_to_helpful_msg(struct net_device *dev,
 248					    enum switchdev_obj_id obj_id,
 249					    int err, bool add)
 250{
 251	const char *action = add ? "add" : "del";
 252	const char *reason = "";
 253	const char *problem;
 254	const char *obj_str;
 255
 256	switch (obj_id) {
 257	case SWITCHDEV_OBJ_ID_UNDEFINED:
 258		obj_str = "Undefined object";
 259		problem = "Attempted operation is undefined, indicating a possible programming\n"
 260			  "error.\n";
 261		break;
 262	case SWITCHDEV_OBJ_ID_PORT_VLAN:
 263		obj_str = "VLAN entry";
 264		problem = "Failure in VLAN settings on this port might disrupt network\n"
 265			  "segmentation or traffic isolation, affecting network partitioning.\n";
 266		break;
 267	case SWITCHDEV_OBJ_ID_PORT_MDB:
 268		obj_str = "Port Multicast Database entry";
 269		problem = "Failure in updating the port's Multicast Database could lead to\n"
 270			  "multicast forwarding issues.\n";
 271		break;
 272	case SWITCHDEV_OBJ_ID_HOST_MDB:
 273		obj_str = "Host Multicast Database entry";
 274		problem = "Failure in updating the host's Multicast Database may impact multicast\n"
 275			  "group memberships or traffic delivery, affecting multicast\n"
 276			  "communication.\n";
 277		break;
 278	case SWITCHDEV_OBJ_ID_MRP:
 279		obj_str = "Media Redundancy Protocol configuration for port";
 280		problem = "Failure to set MRP ring ID on this port prevents communication with\n"
 281			  "the specified redundancy ring, resulting in an inability to engage\n"
 282			  "in MRP-based network operations.\n";
 283		break;
 284	case SWITCHDEV_OBJ_ID_RING_TEST_MRP:
 285		obj_str = "MRP Test Frame Operations for port";
 286		problem = "Failure to generate/monitor MRP test frames may lead to inability to\n"
 287			  "assess the ring's operational integrity and fault response, hindering\n"
 288			  "proactive network management.\n";
 289		break;
 290	case SWITCHDEV_OBJ_ID_RING_ROLE_MRP:
 291		obj_str = "MRP Ring Role Configuration";
 292		problem = "Improper MRP ring role configuration may create conflicts in the ring,\n"
 293			  "disrupting communication for all participants, or isolate the local\n"
 294			  "system from the ring, hindering its ability to communicate with other\n"
 295			  "participants.\n";
 296		break;
 297	case SWITCHDEV_OBJ_ID_RING_STATE_MRP:
 298		obj_str = "MRP Ring State Configuration";
 299		problem = "Failure to correctly set the MRP ring state can result in network\n"
 300			  "loops or leave segments without communication. In a Closed state,\n"
 301			  "it maintains loop prevention by blocking one MRM port, while an Open\n"
 302			  "state activates in response to failures, changing port states to\n"
 303			  "preserve network connectivity.\n";
 304		break;
 305	case SWITCHDEV_OBJ_ID_IN_TEST_MRP:
 306		obj_str = "MRP_InTest Frame Generation Configuration";
 307		problem = "Failure in managing MRP_InTest frame generation can misjudge the\n"
 308			  "interconnection ring's state, leading to incorrect blocking or\n"
 309			  "unblocking of the I/C port. This misconfiguration might result\n"
 310			  "in unintended network loops or isolate critical network segments,\n"
 311			  "compromising network integrity and reliability.\n";
 312		break;
 313	case SWITCHDEV_OBJ_ID_IN_ROLE_MRP:
 314		obj_str = "Interconnection Ring Role Configuration";
 315		problem = "Failure in incorrect assignment of interconnection ring roles\n"
 316			  "(MIM/MIC) can impair the formation of the interconnection rings.\n";
 317		break;
 318	case SWITCHDEV_OBJ_ID_IN_STATE_MRP:
 319		obj_str = "Interconnection Ring State Configuration";
 320		problem = "Failure in updating the interconnection ring state can lead in\n"
 321			  "case of Open state to incorrect blocking or unblocking of the\n"
 322			  "I/C port, resulting in unintended network loops or isolation\n"
 323			  "of critical network\n";
 324		break;
 325	default:
 326		obj_str = "Unknown object";
 327		problem	= "Indicating a possible programming error.\n";
 328	}
 329
 330	switch (err) {
 331	case -ENOSPC:
 332		reason = "Current HW/SW setup lacks sufficient resources.\n";
 333		break;
 334	}
 335
 336	netdev_err(dev, "Failed to %s %s (object id=%d) with error: %pe (%d).\n%s%s\n",
 337		   action, obj_str, obj_id, ERR_PTR(err), err, problem, reason);
 338}
 339
 340static void switchdev_port_obj_add_deferred(struct net_device *dev,
 341					    const void *data)
 342{
 343	const struct switchdev_obj *obj = data;
 344	int err;
 345
 346	ASSERT_RTNL();
 347	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
 348					dev, obj, NULL);
 349	if (err && err != -EOPNOTSUPP)
 350		switchdev_obj_id_to_helpful_msg(dev, obj->id, err, true);
 
 351	if (obj->complete)
 352		obj->complete(dev, err, obj->complete_priv);
 353}
 354
 355static int switchdev_port_obj_add_defer(struct net_device *dev,
 356					const struct switchdev_obj *obj)
 357{
 358	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
 359					  switchdev_port_obj_add_deferred);
 360}
 361
 362/**
 363 *	switchdev_port_obj_add - Add port object
 364 *
 365 *	@dev: port device
 
 366 *	@obj: object to add
 367 *	@extack: netlink extended ack
 
 
 
 368 *
 369 *	rtnl_lock must be held and must not be in atomic section,
 370 *	in case SWITCHDEV_F_DEFER flag is not set.
 371 */
 372int switchdev_port_obj_add(struct net_device *dev,
 373			   const struct switchdev_obj *obj,
 374			   struct netlink_ext_ack *extack)
 375{
 376	if (obj->flags & SWITCHDEV_F_DEFER)
 377		return switchdev_port_obj_add_defer(dev, obj);
 378	ASSERT_RTNL();
 379	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
 380					 dev, obj, extack);
 381}
 382EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
 383
 384static int switchdev_port_obj_del_now(struct net_device *dev,
 385				      const struct switchdev_obj *obj)
 386{
 387	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
 388					 dev, obj, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 389}
 390
 391static void switchdev_port_obj_del_deferred(struct net_device *dev,
 392					    const void *data)
 393{
 394	const struct switchdev_obj *obj = data;
 395	int err;
 396
 397	err = switchdev_port_obj_del_now(dev, obj);
 398	if (err && err != -EOPNOTSUPP)
 399		switchdev_obj_id_to_helpful_msg(dev, obj->id, err, false);
 
 400	if (obj->complete)
 401		obj->complete(dev, err, obj->complete_priv);
 402}
 403
 404static int switchdev_port_obj_del_defer(struct net_device *dev,
 405					const struct switchdev_obj *obj)
 406{
 407	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
 408					  switchdev_port_obj_del_deferred);
 409}
 410
 411/**
 412 *	switchdev_port_obj_del - Delete port object
 413 *
 414 *	@dev: port device
 
 415 *	@obj: object to delete
 416 *
 417 *	rtnl_lock must be held and must not be in atomic section,
 418 *	in case SWITCHDEV_F_DEFER flag is not set.
 419 */
 420int switchdev_port_obj_del(struct net_device *dev,
 421			   const struct switchdev_obj *obj)
 422{
 423	if (obj->flags & SWITCHDEV_F_DEFER)
 424		return switchdev_port_obj_del_defer(dev, obj);
 425	ASSERT_RTNL();
 426	return switchdev_port_obj_del_now(dev, obj);
 427}
 428EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
 429
 430/**
 431 *	switchdev_port_obj_act_is_deferred - Is object action pending?
 432 *
 433 *	@dev: port device
 434 *	@nt: type of action; add or delete
 435 *	@obj: object to test
 436 *
 437 *	Returns true if a deferred item is pending, which is
 438 *	equivalent to the action @nt on an object @obj.
 439 *
 440 *	rtnl_lock must be held.
 441 */
 442bool switchdev_port_obj_act_is_deferred(struct net_device *dev,
 443					enum switchdev_notifier_type nt,
 444					const struct switchdev_obj *obj)
 445{
 446	struct switchdev_deferred_item *dfitem;
 447	bool found = false;
 
 
 448
 449	ASSERT_RTNL();
 450
 451	spin_lock_bh(&deferred_lock);
 
 452
 453	list_for_each_entry(dfitem, &deferred, list) {
 454		if (dfitem->dev != dev)
 455			continue;
 
 456
 457		if ((dfitem->func == switchdev_port_obj_add_deferred &&
 458		     nt == SWITCHDEV_PORT_OBJ_ADD) ||
 459		    (dfitem->func == switchdev_port_obj_del_deferred &&
 460		     nt == SWITCHDEV_PORT_OBJ_DEL)) {
 461			if (switchdev_obj_eq((const void *)dfitem->data, obj)) {
 462				found = true;
 463				break;
 464			}
 465		}
 466	}
 467
 468	spin_unlock_bh(&deferred_lock);
 469
 470	return found;
 471}
 472EXPORT_SYMBOL_GPL(switchdev_port_obj_act_is_deferred);
 473
 474static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
 475static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
 476
 477/**
 478 *	register_switchdev_notifier - Register notifier
 479 *	@nb: notifier_block
 480 *
 481 *	Register switch device notifier.
 
 
 482 */
 483int register_switchdev_notifier(struct notifier_block *nb)
 484{
 485	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
 
 
 
 
 
 486}
 487EXPORT_SYMBOL_GPL(register_switchdev_notifier);
 488
 489/**
 490 *	unregister_switchdev_notifier - Unregister notifier
 491 *	@nb: notifier_block
 492 *
 493 *	Unregister switch device notifier.
 
 494 */
 495int unregister_switchdev_notifier(struct notifier_block *nb)
 496{
 497	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
 
 
 
 
 
 498}
 499EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
 500
 501/**
 502 *	call_switchdev_notifiers - Call notifiers
 503 *	@val: value passed unmodified to notifier function
 504 *	@dev: port device
 505 *	@info: notifier information data
 506 *	@extack: netlink extended ack
 507 *	Call all network notifier blocks.
 
 
 
 508 */
 509int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
 510			     struct switchdev_notifier_info *info,
 511			     struct netlink_ext_ack *extack)
 512{
 
 
 513	info->dev = dev;
 514	info->extack = extack;
 515	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
 516}
 517EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
 518
 519int register_switchdev_blocking_notifier(struct notifier_block *nb)
 520{
 521	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
 522
 523	return blocking_notifier_chain_register(chain, nb);
 524}
 525EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
 
 526
 527int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
 528{
 529	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
 530
 531	return blocking_notifier_chain_unregister(chain, nb);
 532}
 533EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
 534
 535int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
 536				      struct switchdev_notifier_info *info,
 537				      struct netlink_ext_ack *extack)
 538{
 539	info->dev = dev;
 540	info->extack = extack;
 541	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
 542					    val, info);
 543}
 544EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
 545
 546struct switchdev_nested_priv {
 547	bool (*check_cb)(const struct net_device *dev);
 548	bool (*foreign_dev_check_cb)(const struct net_device *dev,
 549				     const struct net_device *foreign_dev);
 550	const struct net_device *dev;
 551	struct net_device *lower_dev;
 552};
 553
 554static int switchdev_lower_dev_walk(struct net_device *lower_dev,
 555				    struct netdev_nested_priv *priv)
 556{
 557	struct switchdev_nested_priv *switchdev_priv = priv->data;
 558	bool (*foreign_dev_check_cb)(const struct net_device *dev,
 559				     const struct net_device *foreign_dev);
 560	bool (*check_cb)(const struct net_device *dev);
 561	const struct net_device *dev;
 562
 563	check_cb = switchdev_priv->check_cb;
 564	foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
 565	dev = switchdev_priv->dev;
 566
 567	if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
 568		switchdev_priv->lower_dev = lower_dev;
 569		return 1;
 570	}
 571
 572	return 0;
 573}
 574
 575static struct net_device *
 576switchdev_lower_dev_find_rcu(struct net_device *dev,
 577			     bool (*check_cb)(const struct net_device *dev),
 578			     bool (*foreign_dev_check_cb)(const struct net_device *dev,
 579							  const struct net_device *foreign_dev))
 580{
 581	struct switchdev_nested_priv switchdev_priv = {
 582		.check_cb = check_cb,
 583		.foreign_dev_check_cb = foreign_dev_check_cb,
 584		.dev = dev,
 585		.lower_dev = NULL,
 586	};
 587	struct netdev_nested_priv priv = {
 588		.data = &switchdev_priv,
 589	};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 590
 591	netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
 592
 593	return switchdev_priv.lower_dev;
 594}
 595
 596static struct net_device *
 597switchdev_lower_dev_find(struct net_device *dev,
 598			 bool (*check_cb)(const struct net_device *dev),
 599			 bool (*foreign_dev_check_cb)(const struct net_device *dev,
 600						      const struct net_device *foreign_dev))
 601{
 602	struct switchdev_nested_priv switchdev_priv = {
 603		.check_cb = check_cb,
 604		.foreign_dev_check_cb = foreign_dev_check_cb,
 605		.dev = dev,
 606		.lower_dev = NULL,
 607	};
 608	struct netdev_nested_priv priv = {
 609		.data = &switchdev_priv,
 610	};
 
 611
 612	netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
 
 
 
 
 
 
 
 
 
 613
 614	return switchdev_priv.lower_dev;
 
 615}
 616
 617static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
 618		struct net_device *orig_dev, unsigned long event,
 619		const struct switchdev_notifier_fdb_info *fdb_info,
 620		bool (*check_cb)(const struct net_device *dev),
 621		bool (*foreign_dev_check_cb)(const struct net_device *dev,
 622					     const struct net_device *foreign_dev),
 623		int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
 624			      unsigned long event, const void *ctx,
 625			      const struct switchdev_notifier_fdb_info *fdb_info))
 626{
 627	const struct switchdev_notifier_info *info = &fdb_info->info;
 628	struct net_device *br, *lower_dev, *switchdev;
 629	struct list_head *iter;
 630	int err = -EOPNOTSUPP;
 
 
 
 
 
 631
 632	if (check_cb(dev))
 633		return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
 634
 635	/* Recurse through lower interfaces in case the FDB entry is pointing
 636	 * towards a bridge or a LAG device.
 637	 */
 638	netdev_for_each_lower_dev(dev, lower_dev, iter) {
 639		/* Do not propagate FDB entries across bridges */
 640		if (netif_is_bridge_master(lower_dev))
 641			continue;
 642
 643		/* Bridge ports might be either us, or LAG interfaces
 644		 * that we offload.
 645		 */
 646		if (!check_cb(lower_dev) &&
 647		    !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
 648						  foreign_dev_check_cb))
 649			continue;
 
 
 
 
 
 
 
 
 
 650
 651		err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
 652							     event, fdb_info, check_cb,
 653							     foreign_dev_check_cb,
 654							     mod_cb);
 655		if (err && err != -EOPNOTSUPP)
 656			return err;
 657	}
 658
 659	/* Event is neither on a bridge nor a LAG. Check whether it is on an
 660	 * interface that is in a bridge with us.
 661	 */
 662	br = netdev_master_upper_dev_get_rcu(dev);
 663	if (!br || !netif_is_bridge_master(br))
 664		return 0;
 665
 666	switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
 667	if (!switchdev)
 668		return 0;
 669
 670	if (!foreign_dev_check_cb(switchdev, dev))
 671		return err;
 672
 673	return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
 674						      check_cb, foreign_dev_check_cb,
 675						      mod_cb);
 676}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 677
 678int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
 679		const struct switchdev_notifier_fdb_info *fdb_info,
 680		bool (*check_cb)(const struct net_device *dev),
 681		bool (*foreign_dev_check_cb)(const struct net_device *dev,
 682					     const struct net_device *foreign_dev),
 683		int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
 684			      unsigned long event, const void *ctx,
 685			      const struct switchdev_notifier_fdb_info *fdb_info))
 686{
 
 
 687	int err;
 688
 689	err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
 690						     check_cb, foreign_dev_check_cb,
 691						     mod_cb);
 692	if (err == -EOPNOTSUPP)
 693		err = 0;
 694
 695	return err;
 696}
 697EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
 698
 699static int __switchdev_handle_port_obj_add(struct net_device *dev,
 700			struct switchdev_notifier_port_obj_info *port_obj_info,
 701			bool (*check_cb)(const struct net_device *dev),
 702			bool (*foreign_dev_check_cb)(const struct net_device *dev,
 703						     const struct net_device *foreign_dev),
 704			int (*add_cb)(struct net_device *dev, const void *ctx,
 705				      const struct switchdev_obj *obj,
 706				      struct netlink_ext_ack *extack))
 707{
 708	struct switchdev_notifier_info *info = &port_obj_info->info;
 709	struct net_device *br, *lower_dev, *switchdev;
 710	struct netlink_ext_ack *extack;
 711	struct list_head *iter;
 712	int err = -EOPNOTSUPP;
 713
 714	extack = switchdev_notifier_info_to_extack(info);
 715
 716	if (check_cb(dev)) {
 717		err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
 718		if (err != -EOPNOTSUPP)
 719			port_obj_info->handled = true;
 720		return err;
 721	}
 722
 723	/* Switch ports might be stacked under e.g. a LAG. Ignore the
 724	 * unsupported devices, another driver might be able to handle them. But
 725	 * propagate to the callers any hard errors.
 726	 *
 727	 * If the driver does its own bookkeeping of stacked ports, it's not
 728	 * necessary to go through this helper.
 729	 */
 730	netdev_for_each_lower_dev(dev, lower_dev, iter) {
 731		if (netif_is_bridge_master(lower_dev))
 732			continue;
 733
 734		/* When searching for switchdev interfaces that are neighbors
 735		 * of foreign ones, and @dev is a bridge, do not recurse on the
 736		 * foreign interface again, it was already visited.
 737		 */
 738		if (foreign_dev_check_cb && !check_cb(lower_dev) &&
 739		    !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
 740			continue;
 741
 742		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
 743						      check_cb, foreign_dev_check_cb,
 744						      add_cb);
 745		if (err && err != -EOPNOTSUPP)
 746			return err;
 747	}
 748
 749	/* Event is neither on a bridge nor a LAG. Check whether it is on an
 750	 * interface that is in a bridge with us.
 751	 */
 752	if (!foreign_dev_check_cb)
 753		return err;
 754
 755	br = netdev_master_upper_dev_get(dev);
 756	if (!br || !netif_is_bridge_master(br))
 757		return err;
 
 
 
 
 
 
 
 
 
 
 758
 759	switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
 760	if (!switchdev)
 761		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 762
 763	if (!foreign_dev_check_cb(switchdev, dev))
 764		return err;
 765
 766	return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
 767					       foreign_dev_check_cb, add_cb);
 768}
 769
 770/* Pass through a port object addition, if @dev passes @check_cb, or replicate
 771 * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
 772 * bridge or a LAG.
 
 
 
 
 
 
 773 */
 774int switchdev_handle_port_obj_add(struct net_device *dev,
 775			struct switchdev_notifier_port_obj_info *port_obj_info,
 776			bool (*check_cb)(const struct net_device *dev),
 777			int (*add_cb)(struct net_device *dev, const void *ctx,
 778				      const struct switchdev_obj *obj,
 779				      struct netlink_ext_ack *extack))
 780{
 781	int err;
 
 
 
 
 
 782
 783	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
 784					      NULL, add_cb);
 785	if (err == -EOPNOTSUPP)
 786		err = 0;
 787	return err;
 788}
 789EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
 790
 791/* Same as switchdev_handle_port_obj_add(), except if object is notified on a
 792 * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
 793 * that pass @check_cb and are in the same bridge as @dev.
 794 */
 795int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
 796			struct switchdev_notifier_port_obj_info *port_obj_info,
 797			bool (*check_cb)(const struct net_device *dev),
 798			bool (*foreign_dev_check_cb)(const struct net_device *dev,
 799						     const struct net_device *foreign_dev),
 800			int (*add_cb)(struct net_device *dev, const void *ctx,
 801				      const struct switchdev_obj *obj,
 802				      struct netlink_ext_ack *extack))
 803{
 804	int err;
 805
 806	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
 807					      foreign_dev_check_cb, add_cb);
 808	if (err == -EOPNOTSUPP)
 809		err = 0;
 810	return err;
 811}
 812EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
 813
 814static int __switchdev_handle_port_obj_del(struct net_device *dev,
 815			struct switchdev_notifier_port_obj_info *port_obj_info,
 816			bool (*check_cb)(const struct net_device *dev),
 817			bool (*foreign_dev_check_cb)(const struct net_device *dev,
 818						     const struct net_device *foreign_dev),
 819			int (*del_cb)(struct net_device *dev, const void *ctx,
 820				      const struct switchdev_obj *obj))
 
 
 
 
 
 821{
 822	struct switchdev_notifier_info *info = &port_obj_info->info;
 823	struct net_device *br, *lower_dev, *switchdev;
 824	struct list_head *iter;
 825	int err = -EOPNOTSUPP;
 826
 827	if (check_cb(dev)) {
 828		err = del_cb(dev, info->ctx, port_obj_info->obj);
 829		if (err != -EOPNOTSUPP)
 830			port_obj_info->handled = true;
 831		return err;
 832	}
 833
 834	/* Switch ports might be stacked under e.g. a LAG. Ignore the
 835	 * unsupported devices, another driver might be able to handle them. But
 836	 * propagate to the callers any hard errors.
 837	 *
 838	 * If the driver does its own bookkeeping of stacked ports, it's not
 839	 * necessary to go through this helper.
 840	 */
 841	netdev_for_each_lower_dev(dev, lower_dev, iter) {
 842		if (netif_is_bridge_master(lower_dev))
 843			continue;
 844
 845		/* When searching for switchdev interfaces that are neighbors
 846		 * of foreign ones, and @dev is a bridge, do not recurse on the
 847		 * foreign interface again, it was already visited.
 848		 */
 849		if (foreign_dev_check_cb && !check_cb(lower_dev) &&
 850		    !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
 851			continue;
 852
 853		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
 854						      check_cb, foreign_dev_check_cb,
 855						      del_cb);
 856		if (err && err != -EOPNOTSUPP)
 857			return err;
 858	}
 859
 860	/* Event is neither on a bridge nor a LAG. Check whether it is on an
 861	 * interface that is in a bridge with us.
 862	 */
 863	if (!foreign_dev_check_cb)
 864		return err;
 865
 866	br = netdev_master_upper_dev_get(dev);
 867	if (!br || !netif_is_bridge_master(br))
 868		return err;
 869
 870	switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
 871	if (!switchdev)
 872		return err;
 873
 874	if (!foreign_dev_check_cb(switchdev, dev))
 875		return err;
 876
 877	return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
 878					       foreign_dev_check_cb, del_cb);
 879}
 
 880
 881/* Pass through a port object deletion, if @dev passes @check_cb, or replicate
 882 * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
 883 * bridge or a LAG.
 
 
 
 
 
 
 
 884 */
 885int switchdev_handle_port_obj_del(struct net_device *dev,
 886			struct switchdev_notifier_port_obj_info *port_obj_info,
 887			bool (*check_cb)(const struct net_device *dev),
 888			int (*del_cb)(struct net_device *dev, const void *ctx,
 889				      const struct switchdev_obj *obj))
 890{
 891	int err;
 
 
 892
 893	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
 894					      NULL, del_cb);
 895	if (err == -EOPNOTSUPP)
 896		err = 0;
 897	return err;
 898}
 899EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
 900
 901/* Same as switchdev_handle_port_obj_del(), except if object is notified on a
 902 * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
 903 * that pass @check_cb and are in the same bridge as @dev.
 
 
 
 
 
 
 
 904 */
 905int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
 906			struct switchdev_notifier_port_obj_info *port_obj_info,
 907			bool (*check_cb)(const struct net_device *dev),
 908			bool (*foreign_dev_check_cb)(const struct net_device *dev,
 909						     const struct net_device *foreign_dev),
 910			int (*del_cb)(struct net_device *dev, const void *ctx,
 911				      const struct switchdev_obj *obj))
 912{
 913	int err;
 914
 915	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
 916					      foreign_dev_check_cb, del_cb);
 917	if (err == -EOPNOTSUPP)
 918		err = 0;
 919	return err;
 920}
 921EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
 922
 923static int __switchdev_handle_port_attr_set(struct net_device *dev,
 924			struct switchdev_notifier_port_attr_info *port_attr_info,
 925			bool (*check_cb)(const struct net_device *dev),
 926			int (*set_cb)(struct net_device *dev, const void *ctx,
 927				      const struct switchdev_attr *attr,
 928				      struct netlink_ext_ack *extack))
 
 
 
 929{
 930	struct switchdev_notifier_info *info = &port_attr_info->info;
 931	struct netlink_ext_ack *extack;
 932	struct net_device *lower_dev;
 933	struct list_head *iter;
 934	int err = -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 935
 936	extack = switchdev_notifier_info_to_extack(info);
 
 937
 938	if (check_cb(dev)) {
 939		err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
 940		if (err != -EOPNOTSUPP)
 941			port_attr_info->handled = true;
 942		return err;
 943	}
 944
 945	/* Switch ports might be stacked under e.g. a LAG. Ignore the
 946	 * unsupported devices, another driver might be able to handle them. But
 947	 * propagate to the callers any hard errors.
 948	 *
 949	 * If the driver does its own bookkeeping of stacked ports, it's not
 950	 * necessary to go through this helper.
 951	 */
 952	netdev_for_each_lower_dev(dev, lower_dev, iter) {
 953		if (netif_is_bridge_master(lower_dev))
 954			continue;
 955
 956		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
 957						       check_cb, set_cb);
 958		if (err && err != -EOPNOTSUPP)
 959			return err;
 960	}
 961
 962	return err;
 
 
 963}
 964
 965int switchdev_handle_port_attr_set(struct net_device *dev,
 966			struct switchdev_notifier_port_attr_info *port_attr_info,
 967			bool (*check_cb)(const struct net_device *dev),
 968			int (*set_cb)(struct net_device *dev, const void *ctx,
 969				      const struct switchdev_attr *attr,
 970				      struct netlink_ext_ack *extack))
 971{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 972	int err;
 973
 974	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
 975					       set_cb);
 976	if (err == -EOPNOTSUPP)
 977		err = 0;
 978	return err;
 979}
 980EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
 981
 982int switchdev_bridge_port_offload(struct net_device *brport_dev,
 983				  struct net_device *dev, const void *ctx,
 984				  struct notifier_block *atomic_nb,
 985				  struct notifier_block *blocking_nb,
 986				  bool tx_fwd_offload,
 987				  struct netlink_ext_ack *extack)
 988{
 989	struct switchdev_notifier_brport_info brport_info = {
 990		.brport = {
 991			.dev = dev,
 992			.ctx = ctx,
 993			.atomic_nb = atomic_nb,
 994			.blocking_nb = blocking_nb,
 995			.tx_fwd_offload = tx_fwd_offload,
 996		},
 997	};
 998	int err;
 999
1000	ASSERT_RTNL();
1001
1002	err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
1003						brport_dev, &brport_info.info,
1004						extack);
1005	return notifier_to_errno(err);
1006}
1007EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
1008
1009void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
1010				     const void *ctx,
1011				     struct notifier_block *atomic_nb,
1012				     struct notifier_block *blocking_nb)
1013{
1014	struct switchdev_notifier_brport_info brport_info = {
1015		.brport = {
1016			.ctx = ctx,
1017			.atomic_nb = atomic_nb,
1018			.blocking_nb = blocking_nb,
1019		},
1020	};
1021
1022	ASSERT_RTNL();
1023
1024	call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
1025					  brport_dev, &brport_info.info,
1026					  NULL);
1027}
1028EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);
1029
1030int switchdev_bridge_port_replay(struct net_device *brport_dev,
1031				 struct net_device *dev, const void *ctx,
1032				 struct notifier_block *atomic_nb,
1033				 struct notifier_block *blocking_nb,
1034				 struct netlink_ext_ack *extack)
1035{
1036	struct switchdev_notifier_brport_info brport_info = {
1037		.brport = {
1038			.dev = dev,
1039			.ctx = ctx,
1040			.atomic_nb = atomic_nb,
1041			.blocking_nb = blocking_nb,
1042		},
1043	};
1044	int err;
1045
1046	ASSERT_RTNL();
1047
1048	err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_REPLAY,
1049						brport_dev, &brport_info.info,
1050						extack);
1051	return notifier_to_errno(err);
1052}
1053EXPORT_SYMBOL_GPL(switchdev_bridge_port_replay);