Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/switchdev/switchdev.c - Switch device API
  4 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
  5 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/types.h>
 10#include <linux/init.h>
 11#include <linux/mutex.h>
 12#include <linux/notifier.h>
 13#include <linux/netdevice.h>
 14#include <linux/etherdevice.h>
 15#include <linux/if_bridge.h>
 16#include <linux/list.h>
 17#include <linux/workqueue.h>
 18#include <linux/if_vlan.h>
 19#include <linux/rtnetlink.h>
 20#include <net/switchdev.h>
 21
 22static LIST_HEAD(deferred);
 23static DEFINE_SPINLOCK(deferred_lock);
 24
 25typedef void switchdev_deferred_func_t(struct net_device *dev,
 26				       const void *data);
 27
 28struct switchdev_deferred_item {
 29	struct list_head list;
 30	struct net_device *dev;
 
 31	switchdev_deferred_func_t *func;
 32	unsigned long data[];
 33};
 34
 35static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
 36{
 37	struct switchdev_deferred_item *dfitem;
 38
 39	spin_lock_bh(&deferred_lock);
 40	if (list_empty(&deferred)) {
 41		dfitem = NULL;
 42		goto unlock;
 43	}
 44	dfitem = list_first_entry(&deferred,
 45				  struct switchdev_deferred_item, list);
 46	list_del(&dfitem->list);
 47unlock:
 48	spin_unlock_bh(&deferred_lock);
 49	return dfitem;
 50}
 51
 52/**
 53 *	switchdev_deferred_process - Process ops in deferred queue
 54 *
 55 *	Called to flush the ops currently queued in deferred ops queue.
 56 *	rtnl_lock must be held.
 57 */
 58void switchdev_deferred_process(void)
 59{
 60	struct switchdev_deferred_item *dfitem;
 61
 62	ASSERT_RTNL();
 63
 64	while ((dfitem = switchdev_deferred_dequeue())) {
 65		dfitem->func(dfitem->dev, dfitem->data);
 66		dev_put(dfitem->dev);
 67		kfree(dfitem);
 68	}
 69}
 70EXPORT_SYMBOL_GPL(switchdev_deferred_process);
 71
 72static void switchdev_deferred_process_work(struct work_struct *work)
 73{
 74	rtnl_lock();
 75	switchdev_deferred_process();
 76	rtnl_unlock();
 77}
 78
 79static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
 80
 81static int switchdev_deferred_enqueue(struct net_device *dev,
 82				      const void *data, size_t data_len,
 83				      switchdev_deferred_func_t *func)
 84{
 85	struct switchdev_deferred_item *dfitem;
 86
 87	dfitem = kmalloc(sizeof(*dfitem) + data_len, GFP_ATOMIC);
 88	if (!dfitem)
 89		return -ENOMEM;
 90	dfitem->dev = dev;
 91	dfitem->func = func;
 92	memcpy(dfitem->data, data, data_len);
 93	dev_hold(dev);
 94	spin_lock_bh(&deferred_lock);
 95	list_add_tail(&dfitem->list, &deferred);
 96	spin_unlock_bh(&deferred_lock);
 97	schedule_work(&deferred_process_work);
 98	return 0;
 99}
100
101static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
102				      struct net_device *dev,
103				      const struct switchdev_attr *attr,
104				      struct netlink_ext_ack *extack)
105{
106	int err;
107	int rc;
108
109	struct switchdev_notifier_port_attr_info attr_info = {
110		.attr = attr,
111		.handled = false,
112	};
113
114	rc = call_switchdev_blocking_notifiers(nt, dev,
115					       &attr_info.info, extack);
116	err = notifier_to_errno(rc);
117	if (err) {
118		WARN_ON(!attr_info.handled);
119		return err;
120	}
121
122	if (!attr_info.handled)
123		return -EOPNOTSUPP;
124
125	return 0;
126}
127
128static int switchdev_port_attr_set_now(struct net_device *dev,
129				       const struct switchdev_attr *attr,
130				       struct netlink_ext_ack *extack)
131{
132	return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
133					  extack);
134}
135
136static void switchdev_port_attr_set_deferred(struct net_device *dev,
137					     const void *data)
138{
139	const struct switchdev_attr *attr = data;
140	int err;
141
142	err = switchdev_port_attr_set_now(dev, attr, NULL);
143	if (err && err != -EOPNOTSUPP)
144		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
145			   err, attr->id);
146	if (attr->complete)
147		attr->complete(dev, err, attr->complete_priv);
148}
149
150static int switchdev_port_attr_set_defer(struct net_device *dev,
151					 const struct switchdev_attr *attr)
152{
153	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
154					  switchdev_port_attr_set_deferred);
155}
156
157/**
158 *	switchdev_port_attr_set - Set port attribute
159 *
160 *	@dev: port device
161 *	@attr: attribute to set
162 *	@extack: netlink extended ack, for error message propagation
163 *
164 *	rtnl_lock must be held and must not be in atomic section,
165 *	in case SWITCHDEV_F_DEFER flag is not set.
166 */
167int switchdev_port_attr_set(struct net_device *dev,
168			    const struct switchdev_attr *attr,
169			    struct netlink_ext_ack *extack)
170{
171	if (attr->flags & SWITCHDEV_F_DEFER)
172		return switchdev_port_attr_set_defer(dev, attr);
173	ASSERT_RTNL();
174	return switchdev_port_attr_set_now(dev, attr, extack);
175}
176EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
177
178static size_t switchdev_obj_size(const struct switchdev_obj *obj)
179{
180	switch (obj->id) {
181	case SWITCHDEV_OBJ_ID_PORT_VLAN:
182		return sizeof(struct switchdev_obj_port_vlan);
183	case SWITCHDEV_OBJ_ID_PORT_MDB:
184		return sizeof(struct switchdev_obj_port_mdb);
185	case SWITCHDEV_OBJ_ID_HOST_MDB:
186		return sizeof(struct switchdev_obj_port_mdb);
187	default:
188		BUG();
189	}
190	return 0;
191}
192
193static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
194				     struct net_device *dev,
195				     const struct switchdev_obj *obj,
196				     struct netlink_ext_ack *extack)
197{
198	int rc;
199	int err;
200
201	struct switchdev_notifier_port_obj_info obj_info = {
202		.obj = obj,
203		.handled = false,
204	};
205
206	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
207	err = notifier_to_errno(rc);
208	if (err) {
209		WARN_ON(!obj_info.handled);
210		return err;
211	}
212	if (!obj_info.handled)
213		return -EOPNOTSUPP;
214	return 0;
215}
216
217static void switchdev_port_obj_add_deferred(struct net_device *dev,
218					    const void *data)
219{
220	const struct switchdev_obj *obj = data;
221	int err;
222
223	ASSERT_RTNL();
224	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
225					dev, obj, NULL);
226	if (err && err != -EOPNOTSUPP)
227		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
228			   err, obj->id);
229	if (obj->complete)
230		obj->complete(dev, err, obj->complete_priv);
231}
232
233static int switchdev_port_obj_add_defer(struct net_device *dev,
234					const struct switchdev_obj *obj)
235{
236	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
237					  switchdev_port_obj_add_deferred);
238}
239
240/**
241 *	switchdev_port_obj_add - Add port object
242 *
243 *	@dev: port device
244 *	@obj: object to add
245 *	@extack: netlink extended ack
246 *
247 *	rtnl_lock must be held and must not be in atomic section,
248 *	in case SWITCHDEV_F_DEFER flag is not set.
249 */
250int switchdev_port_obj_add(struct net_device *dev,
251			   const struct switchdev_obj *obj,
252			   struct netlink_ext_ack *extack)
253{
254	if (obj->flags & SWITCHDEV_F_DEFER)
255		return switchdev_port_obj_add_defer(dev, obj);
256	ASSERT_RTNL();
257	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
258					 dev, obj, extack);
259}
260EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
261
262static int switchdev_port_obj_del_now(struct net_device *dev,
263				      const struct switchdev_obj *obj)
264{
265	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
266					 dev, obj, NULL);
267}
268
269static void switchdev_port_obj_del_deferred(struct net_device *dev,
270					    const void *data)
271{
272	const struct switchdev_obj *obj = data;
273	int err;
274
275	err = switchdev_port_obj_del_now(dev, obj);
276	if (err && err != -EOPNOTSUPP)
277		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
278			   err, obj->id);
279	if (obj->complete)
280		obj->complete(dev, err, obj->complete_priv);
281}
282
283static int switchdev_port_obj_del_defer(struct net_device *dev,
284					const struct switchdev_obj *obj)
285{
286	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
287					  switchdev_port_obj_del_deferred);
288}
289
290/**
291 *	switchdev_port_obj_del - Delete port object
292 *
293 *	@dev: port device
294 *	@obj: object to delete
295 *
296 *	rtnl_lock must be held and must not be in atomic section,
297 *	in case SWITCHDEV_F_DEFER flag is not set.
298 */
299int switchdev_port_obj_del(struct net_device *dev,
300			   const struct switchdev_obj *obj)
301{
302	if (obj->flags & SWITCHDEV_F_DEFER)
303		return switchdev_port_obj_del_defer(dev, obj);
304	ASSERT_RTNL();
305	return switchdev_port_obj_del_now(dev, obj);
306}
307EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
308
309static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
310static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
311
312/**
313 *	register_switchdev_notifier - Register notifier
314 *	@nb: notifier_block
315 *
316 *	Register switch device notifier.
317 */
318int register_switchdev_notifier(struct notifier_block *nb)
319{
320	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
321}
322EXPORT_SYMBOL_GPL(register_switchdev_notifier);
323
324/**
325 *	unregister_switchdev_notifier - Unregister notifier
326 *	@nb: notifier_block
327 *
328 *	Unregister switch device notifier.
329 */
330int unregister_switchdev_notifier(struct notifier_block *nb)
331{
332	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
333}
334EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
335
336/**
337 *	call_switchdev_notifiers - Call notifiers
338 *	@val: value passed unmodified to notifier function
339 *	@dev: port device
340 *	@info: notifier information data
341 *	@extack: netlink extended ack
342 *	Call all network notifier blocks.
343 */
344int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
345			     struct switchdev_notifier_info *info,
346			     struct netlink_ext_ack *extack)
347{
348	info->dev = dev;
349	info->extack = extack;
350	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
351}
352EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
353
354int register_switchdev_blocking_notifier(struct notifier_block *nb)
355{
356	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
357
358	return blocking_notifier_chain_register(chain, nb);
359}
360EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
361
362int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
363{
364	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
365
366	return blocking_notifier_chain_unregister(chain, nb);
367}
368EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
369
370int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
371				      struct switchdev_notifier_info *info,
372				      struct netlink_ext_ack *extack)
373{
374	info->dev = dev;
375	info->extack = extack;
376	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
377					    val, info);
378}
379EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
380
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
381static int __switchdev_handle_port_obj_add(struct net_device *dev,
382			struct switchdev_notifier_port_obj_info *port_obj_info,
383			bool (*check_cb)(const struct net_device *dev),
 
 
384			int (*add_cb)(struct net_device *dev, const void *ctx,
385				      const struct switchdev_obj *obj,
386				      struct netlink_ext_ack *extack))
387{
388	struct switchdev_notifier_info *info = &port_obj_info->info;
 
389	struct netlink_ext_ack *extack;
390	struct net_device *lower_dev;
391	struct list_head *iter;
392	int err = -EOPNOTSUPP;
393
394	extack = switchdev_notifier_info_to_extack(info);
395
396	if (check_cb(dev)) {
397		err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
398		if (err != -EOPNOTSUPP)
399			port_obj_info->handled = true;
400		return err;
401	}
402
403	/* Switch ports might be stacked under e.g. a LAG. Ignore the
404	 * unsupported devices, another driver might be able to handle them. But
405	 * propagate to the callers any hard errors.
406	 *
407	 * If the driver does its own bookkeeping of stacked ports, it's not
408	 * necessary to go through this helper.
409	 */
410	netdev_for_each_lower_dev(dev, lower_dev, iter) {
411		if (netif_is_bridge_master(lower_dev))
412			continue;
413
 
 
 
 
 
 
 
 
414		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
415						      check_cb, add_cb);
 
416		if (err && err != -EOPNOTSUPP)
417			return err;
418	}
419
420	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421}
422
 
 
 
 
423int switchdev_handle_port_obj_add(struct net_device *dev,
424			struct switchdev_notifier_port_obj_info *port_obj_info,
425			bool (*check_cb)(const struct net_device *dev),
426			int (*add_cb)(struct net_device *dev, const void *ctx,
427				      const struct switchdev_obj *obj,
428				      struct netlink_ext_ack *extack))
429{
430	int err;
431
432	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
433					      add_cb);
434	if (err == -EOPNOTSUPP)
435		err = 0;
436	return err;
437}
438EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
439
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
440static int __switchdev_handle_port_obj_del(struct net_device *dev,
441			struct switchdev_notifier_port_obj_info *port_obj_info,
442			bool (*check_cb)(const struct net_device *dev),
 
 
443			int (*del_cb)(struct net_device *dev, const void *ctx,
444				      const struct switchdev_obj *obj))
445{
446	struct switchdev_notifier_info *info = &port_obj_info->info;
447	struct net_device *lower_dev;
448	struct list_head *iter;
449	int err = -EOPNOTSUPP;
450
451	if (check_cb(dev)) {
452		err = del_cb(dev, info->ctx, port_obj_info->obj);
453		if (err != -EOPNOTSUPP)
454			port_obj_info->handled = true;
455		return err;
456	}
457
458	/* Switch ports might be stacked under e.g. a LAG. Ignore the
459	 * unsupported devices, another driver might be able to handle them. But
460	 * propagate to the callers any hard errors.
461	 *
462	 * If the driver does its own bookkeeping of stacked ports, it's not
463	 * necessary to go through this helper.
464	 */
465	netdev_for_each_lower_dev(dev, lower_dev, iter) {
466		if (netif_is_bridge_master(lower_dev))
467			continue;
468
 
 
 
 
 
 
 
 
469		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
470						      check_cb, del_cb);
 
471		if (err && err != -EOPNOTSUPP)
472			return err;
473	}
474
475	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
476}
477
 
 
 
 
478int switchdev_handle_port_obj_del(struct net_device *dev,
479			struct switchdev_notifier_port_obj_info *port_obj_info,
480			bool (*check_cb)(const struct net_device *dev),
481			int (*del_cb)(struct net_device *dev, const void *ctx,
482				      const struct switchdev_obj *obj))
483{
484	int err;
485
486	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
487					      del_cb);
488	if (err == -EOPNOTSUPP)
489		err = 0;
490	return err;
491}
492EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
493
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494static int __switchdev_handle_port_attr_set(struct net_device *dev,
495			struct switchdev_notifier_port_attr_info *port_attr_info,
496			bool (*check_cb)(const struct net_device *dev),
497			int (*set_cb)(struct net_device *dev, const void *ctx,
498				      const struct switchdev_attr *attr,
499				      struct netlink_ext_ack *extack))
500{
501	struct switchdev_notifier_info *info = &port_attr_info->info;
502	struct netlink_ext_ack *extack;
503	struct net_device *lower_dev;
504	struct list_head *iter;
505	int err = -EOPNOTSUPP;
506
507	extack = switchdev_notifier_info_to_extack(info);
508
509	if (check_cb(dev)) {
510		err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
511		if (err != -EOPNOTSUPP)
512			port_attr_info->handled = true;
513		return err;
514	}
515
516	/* Switch ports might be stacked under e.g. a LAG. Ignore the
517	 * unsupported devices, another driver might be able to handle them. But
518	 * propagate to the callers any hard errors.
519	 *
520	 * If the driver does its own bookkeeping of stacked ports, it's not
521	 * necessary to go through this helper.
522	 */
523	netdev_for_each_lower_dev(dev, lower_dev, iter) {
524		if (netif_is_bridge_master(lower_dev))
525			continue;
526
527		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
528						       check_cb, set_cb);
529		if (err && err != -EOPNOTSUPP)
530			return err;
531	}
532
533	return err;
534}
535
536int switchdev_handle_port_attr_set(struct net_device *dev,
537			struct switchdev_notifier_port_attr_info *port_attr_info,
538			bool (*check_cb)(const struct net_device *dev),
539			int (*set_cb)(struct net_device *dev, const void *ctx,
540				      const struct switchdev_attr *attr,
541				      struct netlink_ext_ack *extack))
542{
543	int err;
544
545	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
546					       set_cb);
547	if (err == -EOPNOTSUPP)
548		err = 0;
549	return err;
550}
551EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/switchdev/switchdev.c - Switch device API
  4 * Copyright (c) 2014-2015 Jiri Pirko <jiri@resnulli.us>
  5 * Copyright (c) 2014-2015 Scott Feldman <sfeldma@gmail.com>
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/types.h>
 10#include <linux/init.h>
 11#include <linux/mutex.h>
 12#include <linux/notifier.h>
 13#include <linux/netdevice.h>
 14#include <linux/etherdevice.h>
 15#include <linux/if_bridge.h>
 16#include <linux/list.h>
 17#include <linux/workqueue.h>
 18#include <linux/if_vlan.h>
 19#include <linux/rtnetlink.h>
 20#include <net/switchdev.h>
 21
 22static LIST_HEAD(deferred);
 23static DEFINE_SPINLOCK(deferred_lock);
 24
 25typedef void switchdev_deferred_func_t(struct net_device *dev,
 26				       const void *data);
 27
 28struct switchdev_deferred_item {
 29	struct list_head list;
 30	struct net_device *dev;
 31	netdevice_tracker dev_tracker;
 32	switchdev_deferred_func_t *func;
 33	unsigned long data[];
 34};
 35
 36static struct switchdev_deferred_item *switchdev_deferred_dequeue(void)
 37{
 38	struct switchdev_deferred_item *dfitem;
 39
 40	spin_lock_bh(&deferred_lock);
 41	if (list_empty(&deferred)) {
 42		dfitem = NULL;
 43		goto unlock;
 44	}
 45	dfitem = list_first_entry(&deferred,
 46				  struct switchdev_deferred_item, list);
 47	list_del(&dfitem->list);
 48unlock:
 49	spin_unlock_bh(&deferred_lock);
 50	return dfitem;
 51}
 52
 53/**
 54 *	switchdev_deferred_process - Process ops in deferred queue
 55 *
 56 *	Called to flush the ops currently queued in deferred ops queue.
 57 *	rtnl_lock must be held.
 58 */
 59void switchdev_deferred_process(void)
 60{
 61	struct switchdev_deferred_item *dfitem;
 62
 63	ASSERT_RTNL();
 64
 65	while ((dfitem = switchdev_deferred_dequeue())) {
 66		dfitem->func(dfitem->dev, dfitem->data);
 67		netdev_put(dfitem->dev, &dfitem->dev_tracker);
 68		kfree(dfitem);
 69	}
 70}
 71EXPORT_SYMBOL_GPL(switchdev_deferred_process);
 72
 73static void switchdev_deferred_process_work(struct work_struct *work)
 74{
 75	rtnl_lock();
 76	switchdev_deferred_process();
 77	rtnl_unlock();
 78}
 79
 80static DECLARE_WORK(deferred_process_work, switchdev_deferred_process_work);
 81
 82static int switchdev_deferred_enqueue(struct net_device *dev,
 83				      const void *data, size_t data_len,
 84				      switchdev_deferred_func_t *func)
 85{
 86	struct switchdev_deferred_item *dfitem;
 87
 88	dfitem = kmalloc(struct_size(dfitem, data, data_len), GFP_ATOMIC);
 89	if (!dfitem)
 90		return -ENOMEM;
 91	dfitem->dev = dev;
 92	dfitem->func = func;
 93	memcpy(dfitem->data, data, data_len);
 94	netdev_hold(dev, &dfitem->dev_tracker, GFP_ATOMIC);
 95	spin_lock_bh(&deferred_lock);
 96	list_add_tail(&dfitem->list, &deferred);
 97	spin_unlock_bh(&deferred_lock);
 98	schedule_work(&deferred_process_work);
 99	return 0;
100}
101
102static int switchdev_port_attr_notify(enum switchdev_notifier_type nt,
103				      struct net_device *dev,
104				      const struct switchdev_attr *attr,
105				      struct netlink_ext_ack *extack)
106{
107	int err;
108	int rc;
109
110	struct switchdev_notifier_port_attr_info attr_info = {
111		.attr = attr,
112		.handled = false,
113	};
114
115	rc = call_switchdev_blocking_notifiers(nt, dev,
116					       &attr_info.info, extack);
117	err = notifier_to_errno(rc);
118	if (err) {
119		WARN_ON(!attr_info.handled);
120		return err;
121	}
122
123	if (!attr_info.handled)
124		return -EOPNOTSUPP;
125
126	return 0;
127}
128
129static int switchdev_port_attr_set_now(struct net_device *dev,
130				       const struct switchdev_attr *attr,
131				       struct netlink_ext_ack *extack)
132{
133	return switchdev_port_attr_notify(SWITCHDEV_PORT_ATTR_SET, dev, attr,
134					  extack);
135}
136
137static void switchdev_port_attr_set_deferred(struct net_device *dev,
138					     const void *data)
139{
140	const struct switchdev_attr *attr = data;
141	int err;
142
143	err = switchdev_port_attr_set_now(dev, attr, NULL);
144	if (err && err != -EOPNOTSUPP)
145		netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
146			   err, attr->id);
147	if (attr->complete)
148		attr->complete(dev, err, attr->complete_priv);
149}
150
151static int switchdev_port_attr_set_defer(struct net_device *dev,
152					 const struct switchdev_attr *attr)
153{
154	return switchdev_deferred_enqueue(dev, attr, sizeof(*attr),
155					  switchdev_port_attr_set_deferred);
156}
157
158/**
159 *	switchdev_port_attr_set - Set port attribute
160 *
161 *	@dev: port device
162 *	@attr: attribute to set
163 *	@extack: netlink extended ack, for error message propagation
164 *
165 *	rtnl_lock must be held and must not be in atomic section,
166 *	in case SWITCHDEV_F_DEFER flag is not set.
167 */
168int switchdev_port_attr_set(struct net_device *dev,
169			    const struct switchdev_attr *attr,
170			    struct netlink_ext_ack *extack)
171{
172	if (attr->flags & SWITCHDEV_F_DEFER)
173		return switchdev_port_attr_set_defer(dev, attr);
174	ASSERT_RTNL();
175	return switchdev_port_attr_set_now(dev, attr, extack);
176}
177EXPORT_SYMBOL_GPL(switchdev_port_attr_set);
178
179static size_t switchdev_obj_size(const struct switchdev_obj *obj)
180{
181	switch (obj->id) {
182	case SWITCHDEV_OBJ_ID_PORT_VLAN:
183		return sizeof(struct switchdev_obj_port_vlan);
184	case SWITCHDEV_OBJ_ID_PORT_MDB:
185		return sizeof(struct switchdev_obj_port_mdb);
186	case SWITCHDEV_OBJ_ID_HOST_MDB:
187		return sizeof(struct switchdev_obj_port_mdb);
188	default:
189		BUG();
190	}
191	return 0;
192}
193
194static int switchdev_port_obj_notify(enum switchdev_notifier_type nt,
195				     struct net_device *dev,
196				     const struct switchdev_obj *obj,
197				     struct netlink_ext_ack *extack)
198{
199	int rc;
200	int err;
201
202	struct switchdev_notifier_port_obj_info obj_info = {
203		.obj = obj,
204		.handled = false,
205	};
206
207	rc = call_switchdev_blocking_notifiers(nt, dev, &obj_info.info, extack);
208	err = notifier_to_errno(rc);
209	if (err) {
210		WARN_ON(!obj_info.handled);
211		return err;
212	}
213	if (!obj_info.handled)
214		return -EOPNOTSUPP;
215	return 0;
216}
217
218static void switchdev_port_obj_add_deferred(struct net_device *dev,
219					    const void *data)
220{
221	const struct switchdev_obj *obj = data;
222	int err;
223
224	ASSERT_RTNL();
225	err = switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
226					dev, obj, NULL);
227	if (err && err != -EOPNOTSUPP)
228		netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
229			   err, obj->id);
230	if (obj->complete)
231		obj->complete(dev, err, obj->complete_priv);
232}
233
234static int switchdev_port_obj_add_defer(struct net_device *dev,
235					const struct switchdev_obj *obj)
236{
237	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
238					  switchdev_port_obj_add_deferred);
239}
240
241/**
242 *	switchdev_port_obj_add - Add port object
243 *
244 *	@dev: port device
245 *	@obj: object to add
246 *	@extack: netlink extended ack
247 *
248 *	rtnl_lock must be held and must not be in atomic section,
249 *	in case SWITCHDEV_F_DEFER flag is not set.
250 */
251int switchdev_port_obj_add(struct net_device *dev,
252			   const struct switchdev_obj *obj,
253			   struct netlink_ext_ack *extack)
254{
255	if (obj->flags & SWITCHDEV_F_DEFER)
256		return switchdev_port_obj_add_defer(dev, obj);
257	ASSERT_RTNL();
258	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_ADD,
259					 dev, obj, extack);
260}
261EXPORT_SYMBOL_GPL(switchdev_port_obj_add);
262
263static int switchdev_port_obj_del_now(struct net_device *dev,
264				      const struct switchdev_obj *obj)
265{
266	return switchdev_port_obj_notify(SWITCHDEV_PORT_OBJ_DEL,
267					 dev, obj, NULL);
268}
269
270static void switchdev_port_obj_del_deferred(struct net_device *dev,
271					    const void *data)
272{
273	const struct switchdev_obj *obj = data;
274	int err;
275
276	err = switchdev_port_obj_del_now(dev, obj);
277	if (err && err != -EOPNOTSUPP)
278		netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
279			   err, obj->id);
280	if (obj->complete)
281		obj->complete(dev, err, obj->complete_priv);
282}
283
284static int switchdev_port_obj_del_defer(struct net_device *dev,
285					const struct switchdev_obj *obj)
286{
287	return switchdev_deferred_enqueue(dev, obj, switchdev_obj_size(obj),
288					  switchdev_port_obj_del_deferred);
289}
290
291/**
292 *	switchdev_port_obj_del - Delete port object
293 *
294 *	@dev: port device
295 *	@obj: object to delete
296 *
297 *	rtnl_lock must be held and must not be in atomic section,
298 *	in case SWITCHDEV_F_DEFER flag is not set.
299 */
300int switchdev_port_obj_del(struct net_device *dev,
301			   const struct switchdev_obj *obj)
302{
303	if (obj->flags & SWITCHDEV_F_DEFER)
304		return switchdev_port_obj_del_defer(dev, obj);
305	ASSERT_RTNL();
306	return switchdev_port_obj_del_now(dev, obj);
307}
308EXPORT_SYMBOL_GPL(switchdev_port_obj_del);
309
310static ATOMIC_NOTIFIER_HEAD(switchdev_notif_chain);
311static BLOCKING_NOTIFIER_HEAD(switchdev_blocking_notif_chain);
312
313/**
314 *	register_switchdev_notifier - Register notifier
315 *	@nb: notifier_block
316 *
317 *	Register switch device notifier.
318 */
319int register_switchdev_notifier(struct notifier_block *nb)
320{
321	return atomic_notifier_chain_register(&switchdev_notif_chain, nb);
322}
323EXPORT_SYMBOL_GPL(register_switchdev_notifier);
324
325/**
326 *	unregister_switchdev_notifier - Unregister notifier
327 *	@nb: notifier_block
328 *
329 *	Unregister switch device notifier.
330 */
331int unregister_switchdev_notifier(struct notifier_block *nb)
332{
333	return atomic_notifier_chain_unregister(&switchdev_notif_chain, nb);
334}
335EXPORT_SYMBOL_GPL(unregister_switchdev_notifier);
336
337/**
338 *	call_switchdev_notifiers - Call notifiers
339 *	@val: value passed unmodified to notifier function
340 *	@dev: port device
341 *	@info: notifier information data
342 *	@extack: netlink extended ack
343 *	Call all network notifier blocks.
344 */
345int call_switchdev_notifiers(unsigned long val, struct net_device *dev,
346			     struct switchdev_notifier_info *info,
347			     struct netlink_ext_ack *extack)
348{
349	info->dev = dev;
350	info->extack = extack;
351	return atomic_notifier_call_chain(&switchdev_notif_chain, val, info);
352}
353EXPORT_SYMBOL_GPL(call_switchdev_notifiers);
354
355int register_switchdev_blocking_notifier(struct notifier_block *nb)
356{
357	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
358
359	return blocking_notifier_chain_register(chain, nb);
360}
361EXPORT_SYMBOL_GPL(register_switchdev_blocking_notifier);
362
363int unregister_switchdev_blocking_notifier(struct notifier_block *nb)
364{
365	struct blocking_notifier_head *chain = &switchdev_blocking_notif_chain;
366
367	return blocking_notifier_chain_unregister(chain, nb);
368}
369EXPORT_SYMBOL_GPL(unregister_switchdev_blocking_notifier);
370
371int call_switchdev_blocking_notifiers(unsigned long val, struct net_device *dev,
372				      struct switchdev_notifier_info *info,
373				      struct netlink_ext_ack *extack)
374{
375	info->dev = dev;
376	info->extack = extack;
377	return blocking_notifier_call_chain(&switchdev_blocking_notif_chain,
378					    val, info);
379}
380EXPORT_SYMBOL_GPL(call_switchdev_blocking_notifiers);
381
382struct switchdev_nested_priv {
383	bool (*check_cb)(const struct net_device *dev);
384	bool (*foreign_dev_check_cb)(const struct net_device *dev,
385				     const struct net_device *foreign_dev);
386	const struct net_device *dev;
387	struct net_device *lower_dev;
388};
389
390static int switchdev_lower_dev_walk(struct net_device *lower_dev,
391				    struct netdev_nested_priv *priv)
392{
393	struct switchdev_nested_priv *switchdev_priv = priv->data;
394	bool (*foreign_dev_check_cb)(const struct net_device *dev,
395				     const struct net_device *foreign_dev);
396	bool (*check_cb)(const struct net_device *dev);
397	const struct net_device *dev;
398
399	check_cb = switchdev_priv->check_cb;
400	foreign_dev_check_cb = switchdev_priv->foreign_dev_check_cb;
401	dev = switchdev_priv->dev;
402
403	if (check_cb(lower_dev) && !foreign_dev_check_cb(lower_dev, dev)) {
404		switchdev_priv->lower_dev = lower_dev;
405		return 1;
406	}
407
408	return 0;
409}
410
411static struct net_device *
412switchdev_lower_dev_find_rcu(struct net_device *dev,
413			     bool (*check_cb)(const struct net_device *dev),
414			     bool (*foreign_dev_check_cb)(const struct net_device *dev,
415							  const struct net_device *foreign_dev))
416{
417	struct switchdev_nested_priv switchdev_priv = {
418		.check_cb = check_cb,
419		.foreign_dev_check_cb = foreign_dev_check_cb,
420		.dev = dev,
421		.lower_dev = NULL,
422	};
423	struct netdev_nested_priv priv = {
424		.data = &switchdev_priv,
425	};
426
427	netdev_walk_all_lower_dev_rcu(dev, switchdev_lower_dev_walk, &priv);
428
429	return switchdev_priv.lower_dev;
430}
431
432static struct net_device *
433switchdev_lower_dev_find(struct net_device *dev,
434			 bool (*check_cb)(const struct net_device *dev),
435			 bool (*foreign_dev_check_cb)(const struct net_device *dev,
436						      const struct net_device *foreign_dev))
437{
438	struct switchdev_nested_priv switchdev_priv = {
439		.check_cb = check_cb,
440		.foreign_dev_check_cb = foreign_dev_check_cb,
441		.dev = dev,
442		.lower_dev = NULL,
443	};
444	struct netdev_nested_priv priv = {
445		.data = &switchdev_priv,
446	};
447
448	netdev_walk_all_lower_dev(dev, switchdev_lower_dev_walk, &priv);
449
450	return switchdev_priv.lower_dev;
451}
452
453static int __switchdev_handle_fdb_event_to_device(struct net_device *dev,
454		struct net_device *orig_dev, unsigned long event,
455		const struct switchdev_notifier_fdb_info *fdb_info,
456		bool (*check_cb)(const struct net_device *dev),
457		bool (*foreign_dev_check_cb)(const struct net_device *dev,
458					     const struct net_device *foreign_dev),
459		int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
460			      unsigned long event, const void *ctx,
461			      const struct switchdev_notifier_fdb_info *fdb_info))
462{
463	const struct switchdev_notifier_info *info = &fdb_info->info;
464	struct net_device *br, *lower_dev, *switchdev;
465	struct list_head *iter;
466	int err = -EOPNOTSUPP;
467
468	if (check_cb(dev))
469		return mod_cb(dev, orig_dev, event, info->ctx, fdb_info);
470
471	/* Recurse through lower interfaces in case the FDB entry is pointing
472	 * towards a bridge or a LAG device.
473	 */
474	netdev_for_each_lower_dev(dev, lower_dev, iter) {
475		/* Do not propagate FDB entries across bridges */
476		if (netif_is_bridge_master(lower_dev))
477			continue;
478
479		/* Bridge ports might be either us, or LAG interfaces
480		 * that we offload.
481		 */
482		if (!check_cb(lower_dev) &&
483		    !switchdev_lower_dev_find_rcu(lower_dev, check_cb,
484						  foreign_dev_check_cb))
485			continue;
486
487		err = __switchdev_handle_fdb_event_to_device(lower_dev, orig_dev,
488							     event, fdb_info, check_cb,
489							     foreign_dev_check_cb,
490							     mod_cb);
491		if (err && err != -EOPNOTSUPP)
492			return err;
493	}
494
495	/* Event is neither on a bridge nor a LAG. Check whether it is on an
496	 * interface that is in a bridge with us.
497	 */
498	br = netdev_master_upper_dev_get_rcu(dev);
499	if (!br || !netif_is_bridge_master(br))
500		return 0;
501
502	switchdev = switchdev_lower_dev_find_rcu(br, check_cb, foreign_dev_check_cb);
503	if (!switchdev)
504		return 0;
505
506	if (!foreign_dev_check_cb(switchdev, dev))
507		return err;
508
509	return __switchdev_handle_fdb_event_to_device(br, orig_dev, event, fdb_info,
510						      check_cb, foreign_dev_check_cb,
511						      mod_cb);
512}
513
514int switchdev_handle_fdb_event_to_device(struct net_device *dev, unsigned long event,
515		const struct switchdev_notifier_fdb_info *fdb_info,
516		bool (*check_cb)(const struct net_device *dev),
517		bool (*foreign_dev_check_cb)(const struct net_device *dev,
518					     const struct net_device *foreign_dev),
519		int (*mod_cb)(struct net_device *dev, struct net_device *orig_dev,
520			      unsigned long event, const void *ctx,
521			      const struct switchdev_notifier_fdb_info *fdb_info))
522{
523	int err;
524
525	err = __switchdev_handle_fdb_event_to_device(dev, dev, event, fdb_info,
526						     check_cb, foreign_dev_check_cb,
527						     mod_cb);
528	if (err == -EOPNOTSUPP)
529		err = 0;
530
531	return err;
532}
533EXPORT_SYMBOL_GPL(switchdev_handle_fdb_event_to_device);
534
535static int __switchdev_handle_port_obj_add(struct net_device *dev,
536			struct switchdev_notifier_port_obj_info *port_obj_info,
537			bool (*check_cb)(const struct net_device *dev),
538			bool (*foreign_dev_check_cb)(const struct net_device *dev,
539						     const struct net_device *foreign_dev),
540			int (*add_cb)(struct net_device *dev, const void *ctx,
541				      const struct switchdev_obj *obj,
542				      struct netlink_ext_ack *extack))
543{
544	struct switchdev_notifier_info *info = &port_obj_info->info;
545	struct net_device *br, *lower_dev, *switchdev;
546	struct netlink_ext_ack *extack;
 
547	struct list_head *iter;
548	int err = -EOPNOTSUPP;
549
550	extack = switchdev_notifier_info_to_extack(info);
551
552	if (check_cb(dev)) {
553		err = add_cb(dev, info->ctx, port_obj_info->obj, extack);
554		if (err != -EOPNOTSUPP)
555			port_obj_info->handled = true;
556		return err;
557	}
558
559	/* Switch ports might be stacked under e.g. a LAG. Ignore the
560	 * unsupported devices, another driver might be able to handle them. But
561	 * propagate to the callers any hard errors.
562	 *
563	 * If the driver does its own bookkeeping of stacked ports, it's not
564	 * necessary to go through this helper.
565	 */
566	netdev_for_each_lower_dev(dev, lower_dev, iter) {
567		if (netif_is_bridge_master(lower_dev))
568			continue;
569
570		/* When searching for switchdev interfaces that are neighbors
571		 * of foreign ones, and @dev is a bridge, do not recurse on the
572		 * foreign interface again, it was already visited.
573		 */
574		if (foreign_dev_check_cb && !check_cb(lower_dev) &&
575		    !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
576			continue;
577
578		err = __switchdev_handle_port_obj_add(lower_dev, port_obj_info,
579						      check_cb, foreign_dev_check_cb,
580						      add_cb);
581		if (err && err != -EOPNOTSUPP)
582			return err;
583	}
584
585	/* Event is neither on a bridge nor a LAG. Check whether it is on an
586	 * interface that is in a bridge with us.
587	 */
588	if (!foreign_dev_check_cb)
589		return err;
590
591	br = netdev_master_upper_dev_get(dev);
592	if (!br || !netif_is_bridge_master(br))
593		return err;
594
595	switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
596	if (!switchdev)
597		return err;
598
599	if (!foreign_dev_check_cb(switchdev, dev))
600		return err;
601
602	return __switchdev_handle_port_obj_add(br, port_obj_info, check_cb,
603					       foreign_dev_check_cb, add_cb);
604}
605
606/* Pass through a port object addition, if @dev passes @check_cb, or replicate
607 * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
608 * bridge or a LAG.
609 */
610int switchdev_handle_port_obj_add(struct net_device *dev,
611			struct switchdev_notifier_port_obj_info *port_obj_info,
612			bool (*check_cb)(const struct net_device *dev),
613			int (*add_cb)(struct net_device *dev, const void *ctx,
614				      const struct switchdev_obj *obj,
615				      struct netlink_ext_ack *extack))
616{
617	int err;
618
619	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
620					      NULL, add_cb);
621	if (err == -EOPNOTSUPP)
622		err = 0;
623	return err;
624}
625EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add);
626
627/* Same as switchdev_handle_port_obj_add(), except if object is notified on a
628 * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
629 * that pass @check_cb and are in the same bridge as @dev.
630 */
631int switchdev_handle_port_obj_add_foreign(struct net_device *dev,
632			struct switchdev_notifier_port_obj_info *port_obj_info,
633			bool (*check_cb)(const struct net_device *dev),
634			bool (*foreign_dev_check_cb)(const struct net_device *dev,
635						     const struct net_device *foreign_dev),
636			int (*add_cb)(struct net_device *dev, const void *ctx,
637				      const struct switchdev_obj *obj,
638				      struct netlink_ext_ack *extack))
639{
640	int err;
641
642	err = __switchdev_handle_port_obj_add(dev, port_obj_info, check_cb,
643					      foreign_dev_check_cb, add_cb);
644	if (err == -EOPNOTSUPP)
645		err = 0;
646	return err;
647}
648EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_add_foreign);
649
650static int __switchdev_handle_port_obj_del(struct net_device *dev,
651			struct switchdev_notifier_port_obj_info *port_obj_info,
652			bool (*check_cb)(const struct net_device *dev),
653			bool (*foreign_dev_check_cb)(const struct net_device *dev,
654						     const struct net_device *foreign_dev),
655			int (*del_cb)(struct net_device *dev, const void *ctx,
656				      const struct switchdev_obj *obj))
657{
658	struct switchdev_notifier_info *info = &port_obj_info->info;
659	struct net_device *br, *lower_dev, *switchdev;
660	struct list_head *iter;
661	int err = -EOPNOTSUPP;
662
663	if (check_cb(dev)) {
664		err = del_cb(dev, info->ctx, port_obj_info->obj);
665		if (err != -EOPNOTSUPP)
666			port_obj_info->handled = true;
667		return err;
668	}
669
670	/* Switch ports might be stacked under e.g. a LAG. Ignore the
671	 * unsupported devices, another driver might be able to handle them. But
672	 * propagate to the callers any hard errors.
673	 *
674	 * If the driver does its own bookkeeping of stacked ports, it's not
675	 * necessary to go through this helper.
676	 */
677	netdev_for_each_lower_dev(dev, lower_dev, iter) {
678		if (netif_is_bridge_master(lower_dev))
679			continue;
680
681		/* When searching for switchdev interfaces that are neighbors
682		 * of foreign ones, and @dev is a bridge, do not recurse on the
683		 * foreign interface again, it was already visited.
684		 */
685		if (foreign_dev_check_cb && !check_cb(lower_dev) &&
686		    !switchdev_lower_dev_find(lower_dev, check_cb, foreign_dev_check_cb))
687			continue;
688
689		err = __switchdev_handle_port_obj_del(lower_dev, port_obj_info,
690						      check_cb, foreign_dev_check_cb,
691						      del_cb);
692		if (err && err != -EOPNOTSUPP)
693			return err;
694	}
695
696	/* Event is neither on a bridge nor a LAG. Check whether it is on an
697	 * interface that is in a bridge with us.
698	 */
699	if (!foreign_dev_check_cb)
700		return err;
701
702	br = netdev_master_upper_dev_get(dev);
703	if (!br || !netif_is_bridge_master(br))
704		return err;
705
706	switchdev = switchdev_lower_dev_find(br, check_cb, foreign_dev_check_cb);
707	if (!switchdev)
708		return err;
709
710	if (!foreign_dev_check_cb(switchdev, dev))
711		return err;
712
713	return __switchdev_handle_port_obj_del(br, port_obj_info, check_cb,
714					       foreign_dev_check_cb, del_cb);
715}
716
717/* Pass through a port object deletion, if @dev passes @check_cb, or replicate
718 * it towards all lower interfaces of @dev that pass @check_cb, if @dev is a
719 * bridge or a LAG.
720 */
721int switchdev_handle_port_obj_del(struct net_device *dev,
722			struct switchdev_notifier_port_obj_info *port_obj_info,
723			bool (*check_cb)(const struct net_device *dev),
724			int (*del_cb)(struct net_device *dev, const void *ctx,
725				      const struct switchdev_obj *obj))
726{
727	int err;
728
729	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
730					      NULL, del_cb);
731	if (err == -EOPNOTSUPP)
732		err = 0;
733	return err;
734}
735EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del);
736
737/* Same as switchdev_handle_port_obj_del(), except if object is notified on a
738 * @dev that passes @foreign_dev_check_cb, it is replicated towards all devices
739 * that pass @check_cb and are in the same bridge as @dev.
740 */
741int switchdev_handle_port_obj_del_foreign(struct net_device *dev,
742			struct switchdev_notifier_port_obj_info *port_obj_info,
743			bool (*check_cb)(const struct net_device *dev),
744			bool (*foreign_dev_check_cb)(const struct net_device *dev,
745						     const struct net_device *foreign_dev),
746			int (*del_cb)(struct net_device *dev, const void *ctx,
747				      const struct switchdev_obj *obj))
748{
749	int err;
750
751	err = __switchdev_handle_port_obj_del(dev, port_obj_info, check_cb,
752					      foreign_dev_check_cb, del_cb);
753	if (err == -EOPNOTSUPP)
754		err = 0;
755	return err;
756}
757EXPORT_SYMBOL_GPL(switchdev_handle_port_obj_del_foreign);
758
759static int __switchdev_handle_port_attr_set(struct net_device *dev,
760			struct switchdev_notifier_port_attr_info *port_attr_info,
761			bool (*check_cb)(const struct net_device *dev),
762			int (*set_cb)(struct net_device *dev, const void *ctx,
763				      const struct switchdev_attr *attr,
764				      struct netlink_ext_ack *extack))
765{
766	struct switchdev_notifier_info *info = &port_attr_info->info;
767	struct netlink_ext_ack *extack;
768	struct net_device *lower_dev;
769	struct list_head *iter;
770	int err = -EOPNOTSUPP;
771
772	extack = switchdev_notifier_info_to_extack(info);
773
774	if (check_cb(dev)) {
775		err = set_cb(dev, info->ctx, port_attr_info->attr, extack);
776		if (err != -EOPNOTSUPP)
777			port_attr_info->handled = true;
778		return err;
779	}
780
781	/* Switch ports might be stacked under e.g. a LAG. Ignore the
782	 * unsupported devices, another driver might be able to handle them. But
783	 * propagate to the callers any hard errors.
784	 *
785	 * If the driver does its own bookkeeping of stacked ports, it's not
786	 * necessary to go through this helper.
787	 */
788	netdev_for_each_lower_dev(dev, lower_dev, iter) {
789		if (netif_is_bridge_master(lower_dev))
790			continue;
791
792		err = __switchdev_handle_port_attr_set(lower_dev, port_attr_info,
793						       check_cb, set_cb);
794		if (err && err != -EOPNOTSUPP)
795			return err;
796	}
797
798	return err;
799}
800
801int switchdev_handle_port_attr_set(struct net_device *dev,
802			struct switchdev_notifier_port_attr_info *port_attr_info,
803			bool (*check_cb)(const struct net_device *dev),
804			int (*set_cb)(struct net_device *dev, const void *ctx,
805				      const struct switchdev_attr *attr,
806				      struct netlink_ext_ack *extack))
807{
808	int err;
809
810	err = __switchdev_handle_port_attr_set(dev, port_attr_info, check_cb,
811					       set_cb);
812	if (err == -EOPNOTSUPP)
813		err = 0;
814	return err;
815}
816EXPORT_SYMBOL_GPL(switchdev_handle_port_attr_set);
817
818int switchdev_bridge_port_offload(struct net_device *brport_dev,
819				  struct net_device *dev, const void *ctx,
820				  struct notifier_block *atomic_nb,
821				  struct notifier_block *blocking_nb,
822				  bool tx_fwd_offload,
823				  struct netlink_ext_ack *extack)
824{
825	struct switchdev_notifier_brport_info brport_info = {
826		.brport = {
827			.dev = dev,
828			.ctx = ctx,
829			.atomic_nb = atomic_nb,
830			.blocking_nb = blocking_nb,
831			.tx_fwd_offload = tx_fwd_offload,
832		},
833	};
834	int err;
835
836	ASSERT_RTNL();
837
838	err = call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_OFFLOADED,
839						brport_dev, &brport_info.info,
840						extack);
841	return notifier_to_errno(err);
842}
843EXPORT_SYMBOL_GPL(switchdev_bridge_port_offload);
844
845void switchdev_bridge_port_unoffload(struct net_device *brport_dev,
846				     const void *ctx,
847				     struct notifier_block *atomic_nb,
848				     struct notifier_block *blocking_nb)
849{
850	struct switchdev_notifier_brport_info brport_info = {
851		.brport = {
852			.ctx = ctx,
853			.atomic_nb = atomic_nb,
854			.blocking_nb = blocking_nb,
855		},
856	};
857
858	ASSERT_RTNL();
859
860	call_switchdev_blocking_notifiers(SWITCHDEV_BRPORT_UNOFFLOADED,
861					  brport_dev, &brport_info.info,
862					  NULL);
863}
864EXPORT_SYMBOL_GPL(switchdev_bridge_port_unoffload);