Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Handling of a single switch chip, part of a switch fabric
  4 *
  5 * Copyright (c) 2017 Savoir-faire Linux Inc.
  6 *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
  7 */
  8
  9#include <linux/if_bridge.h>
 10#include <linux/netdevice.h>
 11#include <linux/notifier.h>
 12#include <linux/if_vlan.h>
 13#include <net/switchdev.h>
 14
 15#include "dsa_priv.h"
 
 
 
 
 
 
 16
 17static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
 18						   unsigned int ageing_time)
 19{
 20	int i;
 21
 22	for (i = 0; i < ds->num_ports; ++i) {
 23		struct dsa_port *dp = dsa_to_port(ds, i);
 24
 
 25		if (dp->ageing_time && dp->ageing_time < ageing_time)
 26			ageing_time = dp->ageing_time;
 27	}
 28
 29	return ageing_time;
 30}
 31
 32static int dsa_switch_ageing_time(struct dsa_switch *ds,
 33				  struct dsa_notifier_ageing_time_info *info)
 34{
 35	unsigned int ageing_time = info->ageing_time;
 36
 37	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
 38		return -ERANGE;
 39
 40	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
 41		return -ERANGE;
 42
 43	/* Program the fastest ageing time in case of multiple bridges */
 44	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
 45
 46	if (ds->ops->set_ageing_time)
 47		return ds->ops->set_ageing_time(ds, ageing_time);
 48
 49	return 0;
 50}
 51
 52static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
 53				 struct dsa_notifier_mtu_info *info)
 54{
 55	if (ds->index == info->sw_index && port == info->port)
 56		return true;
 57
 58	/* Do not propagate to other switches in the tree if the notifier was
 59	 * targeted for a single switch.
 60	 */
 61	if (info->targeted_match)
 62		return false;
 63
 64	if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
 65		return true;
 66
 67	return false;
 68}
 69
 70static int dsa_switch_mtu(struct dsa_switch *ds,
 71			  struct dsa_notifier_mtu_info *info)
 72{
 73	int port, ret;
 
 74
 75	if (!ds->ops->port_change_mtu)
 76		return -EOPNOTSUPP;
 77
 78	for (port = 0; port < ds->num_ports; port++) {
 79		if (dsa_switch_mtu_match(ds, port, info)) {
 80			ret = ds->ops->port_change_mtu(ds, port, info->mtu);
 
 81			if (ret)
 82				return ret;
 83		}
 84	}
 85
 86	return 0;
 87}
 88
 89static int dsa_switch_bridge_join(struct dsa_switch *ds,
 90				  struct dsa_notifier_bridge_info *info)
 91{
 92	struct dsa_switch_tree *dst = ds->dst;
 
 
 
 
 
 
 
 
 
 
 
 
 93
 94	if (dst->index == info->tree_index && ds->index == info->sw_index &&
 95	    ds->ops->port_bridge_join)
 96		return ds->ops->port_bridge_join(ds, info->port, info->br);
 97
 98	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
 99	    ds->ops->crosschip_bridge_join)
100		return ds->ops->crosschip_bridge_join(ds, info->tree_index,
101						      info->sw_index,
102						      info->port, info->br);
 
103
104	return 0;
105}
106
107static int dsa_switch_bridge_leave(struct dsa_switch *ds,
108				   struct dsa_notifier_bridge_info *info)
109{
110	bool unset_vlan_filtering = br_vlan_enabled(info->br);
111	struct dsa_switch_tree *dst = ds->dst;
112	struct netlink_ext_ack extack = {0};
113	int err, port;
114
115	if (dst->index == info->tree_index && ds->index == info->sw_index &&
116	    ds->ops->port_bridge_leave)
117		ds->ops->port_bridge_leave(ds, info->port, info->br);
118
119	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
120	    ds->ops->crosschip_bridge_leave)
121		ds->ops->crosschip_bridge_leave(ds, info->tree_index,
122						info->sw_index, info->port,
123						info->br);
124
125	/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
126	 * event for changing vlan_filtering setting upon slave ports leaving
127	 * it. That is a good thing, because that lets us handle it and also
128	 * handle the case where the switch's vlan_filtering setting is global
129	 * (not per port). When that happens, the correct moment to trigger the
130	 * vlan_filtering callback is only when the last port leaves the last
131	 * VLAN-aware bridge.
132	 */
133	if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
134		for (port = 0; port < ds->num_ports; port++) {
135			struct net_device *bridge_dev;
136
137			bridge_dev = dsa_to_port(ds, port)->bridge_dev;
 
 
 
 
138
139			if (bridge_dev && br_vlan_enabled(bridge_dev)) {
140				unset_vlan_filtering = false;
141				break;
142			}
143		}
144	}
145	if (unset_vlan_filtering) {
146		err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
147					      false, &extack);
148		if (extack._msg)
149			dev_err(ds->dev, "port %d: %s\n", info->port,
150				extack._msg);
151		if (err && err != -EOPNOTSUPP)
152			return err;
153	}
154	return 0;
155}
156
157/* Matches for all upstream-facing ports (the CPU port and all upstream-facing
158 * DSA links) that sit between the targeted port on which the notifier was
159 * emitted and its dedicated CPU port.
160 */
161static bool dsa_switch_host_address_match(struct dsa_switch *ds, int port,
162					  int info_sw_index, int info_port)
163{
164	struct dsa_port *targeted_dp, *cpu_dp;
165	struct dsa_switch *targeted_ds;
166
167	targeted_ds = dsa_switch_find(ds->dst->index, info_sw_index);
168	targeted_dp = dsa_to_port(targeted_ds, info_port);
169	cpu_dp = targeted_dp->cpu_dp;
170
171	if (dsa_switch_is_upstream_of(ds, targeted_ds))
172		return port == dsa_towards_port(ds, cpu_dp->ds->index,
173						cpu_dp->index);
174
175	return false;
176}
177
178static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
179					      const unsigned char *addr,
180					      u16 vid)
181{
182	struct dsa_mac_addr *a;
183
184	list_for_each_entry(a, addr_list, list)
185		if (ether_addr_equal(a->addr, addr) && a->vid == vid)
 
186			return a;
187
188	return NULL;
189}
190
191static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port,
192				 const struct switchdev_obj_port_mdb *mdb)
 
193{
194	struct dsa_port *dp = dsa_to_port(ds, port);
195	struct dsa_mac_addr *a;
196	int err;
 
197
198	/* No need to bother with refcounting for user ports */
199	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
200		return ds->ops->port_mdb_add(ds, port, mdb);
 
201
202	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
 
 
 
 
 
203	if (a) {
204		refcount_inc(&a->refcount);
205		return 0;
 
 
206	}
207
208	a = kzalloc(sizeof(*a), GFP_KERNEL);
209	if (!a)
210		return -ENOMEM;
 
 
211
212	err = ds->ops->port_mdb_add(ds, port, mdb);
 
213	if (err) {
214		kfree(a);
215		return err;
216	}
217
218	ether_addr_copy(a->addr, mdb->addr);
219	a->vid = mdb->vid;
 
220	refcount_set(&a->refcount, 1);
221	list_add_tail(&a->list, &dp->mdbs);
222
223	return 0;
 
 
 
224}
225
226static int dsa_switch_do_mdb_del(struct dsa_switch *ds, int port,
227				 const struct switchdev_obj_port_mdb *mdb)
 
228{
229	struct dsa_port *dp = dsa_to_port(ds, port);
230	struct dsa_mac_addr *a;
231	int err;
 
232
233	/* No need to bother with refcounting for user ports */
234	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
235		return ds->ops->port_mdb_del(ds, port, mdb);
 
236
237	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
238	if (!a)
239		return -ENOENT;
240
241	if (!refcount_dec_and_test(&a->refcount))
242		return 0;
 
 
 
 
 
 
243
244	err = ds->ops->port_mdb_del(ds, port, mdb);
 
 
 
 
 
 
 
245	if (err) {
246		refcount_inc(&a->refcount);
247		return err;
248	}
249
250	list_del(&a->list);
251	kfree(a);
252
253	return 0;
 
 
 
254}
255
256static int dsa_switch_do_fdb_add(struct dsa_switch *ds, int port,
257				 const unsigned char *addr, u16 vid)
258{
259	struct dsa_port *dp = dsa_to_port(ds, port);
260	struct dsa_mac_addr *a;
261	int err;
 
262
263	/* No need to bother with refcounting for user ports */
264	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
265		return ds->ops->port_fdb_add(ds, port, addr, vid);
 
 
 
 
266
267	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
 
 
268	if (a) {
269		refcount_inc(&a->refcount);
270		return 0;
 
271	}
272
273	a = kzalloc(sizeof(*a), GFP_KERNEL);
274	if (!a)
275		return -ENOMEM;
 
 
276
277	err = ds->ops->port_fdb_add(ds, port, addr, vid);
 
278	if (err) {
279		kfree(a);
280		return err;
281	}
282
283	ether_addr_copy(a->addr, addr);
284	a->vid = vid;
 
285	refcount_set(&a->refcount, 1);
286	list_add_tail(&a->list, &dp->fdbs);
287
288	return 0;
 
 
 
289}
290
291static int dsa_switch_do_fdb_del(struct dsa_switch *ds, int port,
292				 const unsigned char *addr, u16 vid)
293{
294	struct dsa_port *dp = dsa_to_port(ds, port);
295	struct dsa_mac_addr *a;
296	int err;
 
297
298	/* No need to bother with refcounting for user ports */
299	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
300		return ds->ops->port_fdb_del(ds, port, addr, vid);
 
301
302	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
303	if (!a)
304		return -ENOENT;
305
306	if (!refcount_dec_and_test(&a->refcount))
307		return 0;
 
 
 
 
 
 
 
 
 
 
 
308
309	err = ds->ops->port_fdb_del(ds, port, addr, vid);
 
310	if (err) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311		refcount_inc(&a->refcount);
312		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313	}
314
315	list_del(&a->list);
316	kfree(a);
317
318	return 0;
 
 
 
319}
320
321static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
322				   struct dsa_notifier_fdb_info *info)
323{
 
324	int err = 0;
325	int port;
326
327	if (!ds->ops->port_fdb_add)
328		return -EOPNOTSUPP;
329
330	for (port = 0; port < ds->num_ports; port++) {
331		if (dsa_switch_host_address_match(ds, port, info->sw_index,
332						  info->port)) {
333			err = dsa_switch_do_fdb_add(ds, port, info->addr,
334						    info->vid);
 
 
 
 
 
 
335			if (err)
336				break;
337		}
338	}
339
340	return err;
341}
342
343static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
344				   struct dsa_notifier_fdb_info *info)
345{
 
346	int err = 0;
347	int port;
348
349	if (!ds->ops->port_fdb_del)
350		return -EOPNOTSUPP;
351
352	for (port = 0; port < ds->num_ports; port++) {
353		if (dsa_switch_host_address_match(ds, port, info->sw_index,
354						  info->port)) {
355			err = dsa_switch_do_fdb_del(ds, port, info->addr,
356						    info->vid);
 
 
 
 
 
 
357			if (err)
358				break;
359		}
360	}
361
362	return err;
363}
364
365static int dsa_switch_fdb_add(struct dsa_switch *ds,
366			      struct dsa_notifier_fdb_info *info)
367{
368	int port = dsa_towards_port(ds, info->sw_index, info->port);
 
369
370	if (!ds->ops->port_fdb_add)
371		return -EOPNOTSUPP;
372
373	return dsa_switch_do_fdb_add(ds, port, info->addr, info->vid);
374}
375
376static int dsa_switch_fdb_del(struct dsa_switch *ds,
377			      struct dsa_notifier_fdb_info *info)
378{
379	int port = dsa_towards_port(ds, info->sw_index, info->port);
 
380
381	if (!ds->ops->port_fdb_del)
382		return -EOPNOTSUPP;
383
384	return dsa_switch_do_fdb_del(ds, port, info->addr, info->vid);
385}
386
387static int dsa_switch_hsr_join(struct dsa_switch *ds,
388			       struct dsa_notifier_hsr_info *info)
389{
390	if (ds->index == info->sw_index && ds->ops->port_hsr_join)
391		return ds->ops->port_hsr_join(ds, info->port, info->hsr);
392
393	return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
394}
395
396static int dsa_switch_hsr_leave(struct dsa_switch *ds,
397				struct dsa_notifier_hsr_info *info)
398{
399	if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
400		return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
401
402	return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
403}
404
405static int dsa_switch_lag_change(struct dsa_switch *ds,
406				 struct dsa_notifier_lag_info *info)
407{
408	if (ds->index == info->sw_index && ds->ops->port_lag_change)
409		return ds->ops->port_lag_change(ds, info->port);
410
411	if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
412		return ds->ops->crosschip_lag_change(ds, info->sw_index,
413						     info->port);
414
415	return 0;
416}
417
418static int dsa_switch_lag_join(struct dsa_switch *ds,
419			       struct dsa_notifier_lag_info *info)
420{
421	if (ds->index == info->sw_index && ds->ops->port_lag_join)
422		return ds->ops->port_lag_join(ds, info->port, info->lag,
423					      info->info);
424
425	if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
426		return ds->ops->crosschip_lag_join(ds, info->sw_index,
427						   info->port, info->lag,
428						   info->info);
429
430	return -EOPNOTSUPP;
431}
432
433static int dsa_switch_lag_leave(struct dsa_switch *ds,
434				struct dsa_notifier_lag_info *info)
435{
436	if (ds->index == info->sw_index && ds->ops->port_lag_leave)
437		return ds->ops->port_lag_leave(ds, info->port, info->lag);
438
439	if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
440		return ds->ops->crosschip_lag_leave(ds, info->sw_index,
441						    info->port, info->lag);
442
443	return -EOPNOTSUPP;
444}
445
446static int dsa_switch_mdb_add(struct dsa_switch *ds,
447			      struct dsa_notifier_mdb_info *info)
448{
449	int port = dsa_towards_port(ds, info->sw_index, info->port);
 
450
451	if (!ds->ops->port_mdb_add)
452		return -EOPNOTSUPP;
453
454	return dsa_switch_do_mdb_add(ds, port, info->mdb);
455}
456
457static int dsa_switch_mdb_del(struct dsa_switch *ds,
458			      struct dsa_notifier_mdb_info *info)
459{
460	int port = dsa_towards_port(ds, info->sw_index, info->port);
 
461
462	if (!ds->ops->port_mdb_del)
463		return -EOPNOTSUPP;
464
465	return dsa_switch_do_mdb_del(ds, port, info->mdb);
466}
467
468static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
469				   struct dsa_notifier_mdb_info *info)
470{
 
471	int err = 0;
472	int port;
473
474	if (!ds->ops->port_mdb_add)
475		return -EOPNOTSUPP;
476
477	for (port = 0; port < ds->num_ports; port++) {
478		if (dsa_switch_host_address_match(ds, port, info->sw_index,
479						  info->port)) {
480			err = dsa_switch_do_mdb_add(ds, port, info->mdb);
481			if (err)
482				break;
483		}
484	}
485
486	return err;
487}
488
489static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
490				   struct dsa_notifier_mdb_info *info)
491{
 
492	int err = 0;
493	int port;
494
495	if (!ds->ops->port_mdb_del)
496		return -EOPNOTSUPP;
497
498	for (port = 0; port < ds->num_ports; port++) {
499		if (dsa_switch_host_address_match(ds, port, info->sw_index,
500						  info->port)) {
501			err = dsa_switch_do_mdb_del(ds, port, info->mdb);
502			if (err)
503				break;
504		}
505	}
506
507	return err;
508}
509
510static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
511				  struct dsa_notifier_vlan_info *info)
 
 
 
 
 
 
 
 
 
 
512{
513	if (ds->index == info->sw_index && port == info->port)
514		return true;
515
516	if (dsa_is_dsa_port(ds, port))
517		return true;
518
519	return false;
520}
521
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
522static int dsa_switch_vlan_add(struct dsa_switch *ds,
523			       struct dsa_notifier_vlan_info *info)
524{
525	int port, err;
 
526
527	if (!ds->ops->port_vlan_add)
528		return -EOPNOTSUPP;
529
530	for (port = 0; port < ds->num_ports; port++) {
531		if (dsa_switch_vlan_match(ds, port, info)) {
532			err = ds->ops->port_vlan_add(ds, port, info->vlan,
533						     info->extack);
534			if (err)
535				return err;
536		}
537	}
538
539	return 0;
540}
541
542static int dsa_switch_vlan_del(struct dsa_switch *ds,
543			       struct dsa_notifier_vlan_info *info)
544{
 
 
 
545	if (!ds->ops->port_vlan_del)
546		return -EOPNOTSUPP;
547
548	if (ds->index == info->sw_index)
549		return ds->ops->port_vlan_del(ds, info->port, info->vlan);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
550
551	/* Do not deprogram the DSA links as they may be used as conduit
552	 * for other VLAN members in the fabric.
553	 */
554	return 0;
555}
556
557static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
558				       struct dsa_notifier_tag_proto_info *info)
559{
560	const struct dsa_device_ops *tag_ops = info->tag_ops;
561	int port, err;
 
562
563	if (!ds->ops->change_tag_protocol)
564		return -EOPNOTSUPP;
565
566	ASSERT_RTNL();
567
568	for (port = 0; port < ds->num_ports; port++) {
569		if (!dsa_is_cpu_port(ds, port))
570			continue;
571
572		err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
573		if (err)
574			return err;
575
576		dsa_port_set_tag_protocol(dsa_to_port(ds, port), tag_ops);
577	}
578
579	/* Now that changing the tag protocol can no longer fail, let's update
580	 * the remaining bits which are "duplicated for faster access", and the
581	 * bits that depend on the tagger, such as the MTU.
582	 */
583	for (port = 0; port < ds->num_ports; port++) {
584		if (dsa_is_user_port(ds, port)) {
585			struct net_device *slave;
586
587			slave = dsa_to_port(ds, port)->slave;
588			dsa_slave_setup_tagger(slave);
589
590			/* rtnl_mutex is held in dsa_tree_change_tag_proto */
591			dsa_slave_change_mtu(slave, slave->mtu);
592		}
593	}
594
595	return 0;
596}
597
598static int dsa_switch_mrp_add(struct dsa_switch *ds,
599			      struct dsa_notifier_mrp_info *info)
 
 
 
 
 
 
 
 
 
 
600{
601	if (!ds->ops->port_mrp_add)
602		return -EOPNOTSUPP;
603
604	if (ds->index == info->sw_index)
605		return ds->ops->port_mrp_add(ds, info->port, info->mrp);
606
607	return 0;
608}
 
 
 
 
609
610static int dsa_switch_mrp_del(struct dsa_switch *ds,
611			      struct dsa_notifier_mrp_info *info)
612{
613	if (!ds->ops->port_mrp_del)
614		return -EOPNOTSUPP;
615
616	if (ds->index == info->sw_index)
617		return ds->ops->port_mrp_del(ds, info->port, info->mrp);
 
 
 
 
 
 
618
619	return 0;
620}
621
622static int
623dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
624			     struct dsa_notifier_mrp_ring_role_info *info)
625{
626	if (!ds->ops->port_mrp_add)
627		return -EOPNOTSUPP;
628
629	if (ds->index == info->sw_index)
630		return ds->ops->port_mrp_add_ring_role(ds, info->port,
631						       info->mrp);
632
 
 
 
633	return 0;
634}
635
636static int
637dsa_switch_mrp_del_ring_role(struct dsa_switch *ds,
638			     struct dsa_notifier_mrp_ring_role_info *info)
639{
640	if (!ds->ops->port_mrp_del)
641		return -EOPNOTSUPP;
642
643	if (ds->index == info->sw_index)
644		return ds->ops->port_mrp_del_ring_role(ds, info->port,
645						       info->mrp);
646
647	return 0;
648}
649
650static int dsa_switch_event(struct notifier_block *nb,
651			    unsigned long event, void *info)
652{
653	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
654	int err;
655
656	switch (event) {
657	case DSA_NOTIFIER_AGEING_TIME:
658		err = dsa_switch_ageing_time(ds, info);
659		break;
660	case DSA_NOTIFIER_BRIDGE_JOIN:
661		err = dsa_switch_bridge_join(ds, info);
662		break;
663	case DSA_NOTIFIER_BRIDGE_LEAVE:
664		err = dsa_switch_bridge_leave(ds, info);
665		break;
666	case DSA_NOTIFIER_FDB_ADD:
667		err = dsa_switch_fdb_add(ds, info);
668		break;
669	case DSA_NOTIFIER_FDB_DEL:
670		err = dsa_switch_fdb_del(ds, info);
671		break;
672	case DSA_NOTIFIER_HOST_FDB_ADD:
673		err = dsa_switch_host_fdb_add(ds, info);
674		break;
675	case DSA_NOTIFIER_HOST_FDB_DEL:
676		err = dsa_switch_host_fdb_del(ds, info);
677		break;
678	case DSA_NOTIFIER_HSR_JOIN:
679		err = dsa_switch_hsr_join(ds, info);
680		break;
681	case DSA_NOTIFIER_HSR_LEAVE:
682		err = dsa_switch_hsr_leave(ds, info);
683		break;
684	case DSA_NOTIFIER_LAG_CHANGE:
685		err = dsa_switch_lag_change(ds, info);
686		break;
687	case DSA_NOTIFIER_LAG_JOIN:
688		err = dsa_switch_lag_join(ds, info);
689		break;
690	case DSA_NOTIFIER_LAG_LEAVE:
691		err = dsa_switch_lag_leave(ds, info);
692		break;
693	case DSA_NOTIFIER_MDB_ADD:
694		err = dsa_switch_mdb_add(ds, info);
695		break;
696	case DSA_NOTIFIER_MDB_DEL:
697		err = dsa_switch_mdb_del(ds, info);
698		break;
699	case DSA_NOTIFIER_HOST_MDB_ADD:
700		err = dsa_switch_host_mdb_add(ds, info);
701		break;
702	case DSA_NOTIFIER_HOST_MDB_DEL:
703		err = dsa_switch_host_mdb_del(ds, info);
704		break;
705	case DSA_NOTIFIER_VLAN_ADD:
706		err = dsa_switch_vlan_add(ds, info);
707		break;
708	case DSA_NOTIFIER_VLAN_DEL:
709		err = dsa_switch_vlan_del(ds, info);
710		break;
 
 
 
 
 
 
711	case DSA_NOTIFIER_MTU:
712		err = dsa_switch_mtu(ds, info);
713		break;
714	case DSA_NOTIFIER_TAG_PROTO:
715		err = dsa_switch_change_tag_proto(ds, info);
716		break;
717	case DSA_NOTIFIER_MRP_ADD:
718		err = dsa_switch_mrp_add(ds, info);
 
 
 
719		break;
720	case DSA_NOTIFIER_MRP_DEL:
721		err = dsa_switch_mrp_del(ds, info);
722		break;
723	case DSA_NOTIFIER_MRP_ADD_RING_ROLE:
724		err = dsa_switch_mrp_add_ring_role(ds, info);
725		break;
726	case DSA_NOTIFIER_MRP_DEL_RING_ROLE:
727		err = dsa_switch_mrp_del_ring_role(ds, info);
728		break;
729	default:
730		err = -EOPNOTSUPP;
731		break;
732	}
733
734	if (err)
735		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
736			event, err);
737
738	return notifier_from_errno(err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
739}
740
741int dsa_switch_register_notifier(struct dsa_switch *ds)
742{
743	ds->nb.notifier_call = dsa_switch_event;
744
745	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
746}
747
748void dsa_switch_unregister_notifier(struct dsa_switch *ds)
749{
750	int err;
751
752	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
753	if (err)
754		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
755}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Handling of a single switch chip, part of a switch fabric
   4 *
   5 * Copyright (c) 2017 Savoir-faire Linux Inc.
   6 *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
   7 */
   8
   9#include <linux/if_bridge.h>
  10#include <linux/netdevice.h>
  11#include <linux/notifier.h>
  12#include <linux/if_vlan.h>
  13#include <net/switchdev.h>
  14
  15#include "dsa.h"
  16#include "netlink.h"
  17#include "port.h"
  18#include "switch.h"
  19#include "tag_8021q.h"
  20#include "trace.h"
  21#include "user.h"
  22
  23static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
  24						   unsigned int ageing_time)
  25{
  26	struct dsa_port *dp;
 
 
 
  27
  28	dsa_switch_for_each_port(dp, ds)
  29		if (dp->ageing_time && dp->ageing_time < ageing_time)
  30			ageing_time = dp->ageing_time;
 
  31
  32	return ageing_time;
  33}
  34
  35static int dsa_switch_ageing_time(struct dsa_switch *ds,
  36				  struct dsa_notifier_ageing_time_info *info)
  37{
  38	unsigned int ageing_time = info->ageing_time;
  39
  40	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
  41		return -ERANGE;
  42
  43	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
  44		return -ERANGE;
  45
  46	/* Program the fastest ageing time in case of multiple bridges */
  47	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
  48
  49	if (ds->ops->set_ageing_time)
  50		return ds->ops->set_ageing_time(ds, ageing_time);
  51
  52	return 0;
  53}
  54
  55static bool dsa_port_mtu_match(struct dsa_port *dp,
  56			       struct dsa_notifier_mtu_info *info)
  57{
  58	return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
 
 
 
 
 
 
 
 
 
 
 
 
  59}
  60
  61static int dsa_switch_mtu(struct dsa_switch *ds,
  62			  struct dsa_notifier_mtu_info *info)
  63{
  64	struct dsa_port *dp;
  65	int ret;
  66
  67	if (!ds->ops->port_change_mtu)
  68		return -EOPNOTSUPP;
  69
  70	dsa_switch_for_each_port(dp, ds) {
  71		if (dsa_port_mtu_match(dp, info)) {
  72			ret = ds->ops->port_change_mtu(ds, dp->index,
  73						       info->mtu);
  74			if (ret)
  75				return ret;
  76		}
  77	}
  78
  79	return 0;
  80}
  81
  82static int dsa_switch_bridge_join(struct dsa_switch *ds,
  83				  struct dsa_notifier_bridge_info *info)
  84{
  85	int err;
  86
  87	if (info->dp->ds == ds) {
  88		if (!ds->ops->port_bridge_join)
  89			return -EOPNOTSUPP;
  90
  91		err = ds->ops->port_bridge_join(ds, info->dp->index,
  92						info->bridge,
  93						&info->tx_fwd_offload,
  94						info->extack);
  95		if (err)
  96			return err;
  97	}
  98
  99	if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
 100		err = ds->ops->crosschip_bridge_join(ds,
 101						     info->dp->ds->dst->index,
 102						     info->dp->ds->index,
 103						     info->dp->index,
 104						     info->bridge,
 105						     info->extack);
 106		if (err)
 107			return err;
 108	}
 109
 110	return 0;
 111}
 112
 113static int dsa_switch_bridge_leave(struct dsa_switch *ds,
 114				   struct dsa_notifier_bridge_info *info)
 115{
 116	if (info->dp->ds == ds && ds->ops->port_bridge_leave)
 117		ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 118
 119	if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
 120		ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
 121						info->dp->ds->index,
 122						info->dp->index,
 123						info->bridge);
 124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 125	return 0;
 126}
 127
 128/* Matches for all upstream-facing ports (the CPU port and all upstream-facing
 129 * DSA links) that sit between the targeted port on which the notifier was
 130 * emitted and its dedicated CPU port.
 131 */
 132static bool dsa_port_host_address_match(struct dsa_port *dp,
 133					const struct dsa_port *targeted_dp)
 134{
 135	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
 
 136
 137	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
 138		return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
 139						     cpu_dp->index);
 
 
 
 
 140
 141	return false;
 142}
 143
 144static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
 145					      const unsigned char *addr, u16 vid,
 146					      struct dsa_db db)
 147{
 148	struct dsa_mac_addr *a;
 149
 150	list_for_each_entry(a, addr_list, list)
 151		if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
 152		    dsa_db_equal(&a->db, &db))
 153			return a;
 154
 155	return NULL;
 156}
 157
 158static int dsa_port_do_mdb_add(struct dsa_port *dp,
 159			       const struct switchdev_obj_port_mdb *mdb,
 160			       struct dsa_db db)
 161{
 162	struct dsa_switch *ds = dp->ds;
 163	struct dsa_mac_addr *a;
 164	int port = dp->index;
 165	int err = 0;
 166
 167	/* No need to bother with refcounting for user ports */
 168	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
 169		err = ds->ops->port_mdb_add(ds, port, mdb, db);
 170		trace_dsa_mdb_add_hw(dp, mdb->addr, mdb->vid, &db, err);
 171
 172		return err;
 173	}
 174
 175	mutex_lock(&dp->addr_lists_lock);
 176
 177	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
 178	if (a) {
 179		refcount_inc(&a->refcount);
 180		trace_dsa_mdb_add_bump(dp, mdb->addr, mdb->vid, &db,
 181				       &a->refcount);
 182		goto out;
 183	}
 184
 185	a = kzalloc(sizeof(*a), GFP_KERNEL);
 186	if (!a) {
 187		err = -ENOMEM;
 188		goto out;
 189	}
 190
 191	err = ds->ops->port_mdb_add(ds, port, mdb, db);
 192	trace_dsa_mdb_add_hw(dp, mdb->addr, mdb->vid, &db, err);
 193	if (err) {
 194		kfree(a);
 195		goto out;
 196	}
 197
 198	ether_addr_copy(a->addr, mdb->addr);
 199	a->vid = mdb->vid;
 200	a->db = db;
 201	refcount_set(&a->refcount, 1);
 202	list_add_tail(&a->list, &dp->mdbs);
 203
 204out:
 205	mutex_unlock(&dp->addr_lists_lock);
 206
 207	return err;
 208}
 209
 210static int dsa_port_do_mdb_del(struct dsa_port *dp,
 211			       const struct switchdev_obj_port_mdb *mdb,
 212			       struct dsa_db db)
 213{
 214	struct dsa_switch *ds = dp->ds;
 215	struct dsa_mac_addr *a;
 216	int port = dp->index;
 217	int err = 0;
 218
 219	/* No need to bother with refcounting for user ports */
 220	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
 221		err = ds->ops->port_mdb_del(ds, port, mdb, db);
 222		trace_dsa_mdb_del_hw(dp, mdb->addr, mdb->vid, &db, err);
 223
 224		return err;
 225	}
 
 226
 227	mutex_lock(&dp->addr_lists_lock);
 228
 229	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
 230	if (!a) {
 231		trace_dsa_mdb_del_not_found(dp, mdb->addr, mdb->vid, &db);
 232		err = -ENOENT;
 233		goto out;
 234	}
 235
 236	if (!refcount_dec_and_test(&a->refcount)) {
 237		trace_dsa_mdb_del_drop(dp, mdb->addr, mdb->vid, &db,
 238				       &a->refcount);
 239		goto out;
 240	}
 241
 242	err = ds->ops->port_mdb_del(ds, port, mdb, db);
 243	trace_dsa_mdb_del_hw(dp, mdb->addr, mdb->vid, &db, err);
 244	if (err) {
 245		refcount_set(&a->refcount, 1);
 246		goto out;
 247	}
 248
 249	list_del(&a->list);
 250	kfree(a);
 251
 252out:
 253	mutex_unlock(&dp->addr_lists_lock);
 254
 255	return err;
 256}
 257
 258static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
 259			       u16 vid, struct dsa_db db)
 260{
 261	struct dsa_switch *ds = dp->ds;
 262	struct dsa_mac_addr *a;
 263	int port = dp->index;
 264	int err = 0;
 265
 266	/* No need to bother with refcounting for user ports */
 267	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
 268		err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
 269		trace_dsa_fdb_add_hw(dp, addr, vid, &db, err);
 270
 271		return err;
 272	}
 273
 274	mutex_lock(&dp->addr_lists_lock);
 275
 276	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
 277	if (a) {
 278		refcount_inc(&a->refcount);
 279		trace_dsa_fdb_add_bump(dp, addr, vid, &db, &a->refcount);
 280		goto out;
 281	}
 282
 283	a = kzalloc(sizeof(*a), GFP_KERNEL);
 284	if (!a) {
 285		err = -ENOMEM;
 286		goto out;
 287	}
 288
 289	err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
 290	trace_dsa_fdb_add_hw(dp, addr, vid, &db, err);
 291	if (err) {
 292		kfree(a);
 293		goto out;
 294	}
 295
 296	ether_addr_copy(a->addr, addr);
 297	a->vid = vid;
 298	a->db = db;
 299	refcount_set(&a->refcount, 1);
 300	list_add_tail(&a->list, &dp->fdbs);
 301
 302out:
 303	mutex_unlock(&dp->addr_lists_lock);
 304
 305	return err;
 306}
 307
 308static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
 309			       u16 vid, struct dsa_db db)
 310{
 311	struct dsa_switch *ds = dp->ds;
 312	struct dsa_mac_addr *a;
 313	int port = dp->index;
 314	int err = 0;
 315
 316	/* No need to bother with refcounting for user ports */
 317	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
 318		err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
 319		trace_dsa_fdb_del_hw(dp, addr, vid, &db, err);
 320
 321		return err;
 322	}
 
 323
 324	mutex_lock(&dp->addr_lists_lock);
 325
 326	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
 327	if (!a) {
 328		trace_dsa_fdb_del_not_found(dp, addr, vid, &db);
 329		err = -ENOENT;
 330		goto out;
 331	}
 332
 333	if (!refcount_dec_and_test(&a->refcount)) {
 334		trace_dsa_fdb_del_drop(dp, addr, vid, &db, &a->refcount);
 335		goto out;
 336	}
 337
 338	err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
 339	trace_dsa_fdb_del_hw(dp, addr, vid, &db, err);
 340	if (err) {
 341		refcount_set(&a->refcount, 1);
 342		goto out;
 343	}
 344
 345	list_del(&a->list);
 346	kfree(a);
 347
 348out:
 349	mutex_unlock(&dp->addr_lists_lock);
 350
 351	return err;
 352}
 353
 354static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
 355				     const unsigned char *addr, u16 vid,
 356				     struct dsa_db db)
 357{
 358	struct dsa_mac_addr *a;
 359	int err = 0;
 360
 361	mutex_lock(&lag->fdb_lock);
 362
 363	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
 364	if (a) {
 365		refcount_inc(&a->refcount);
 366		trace_dsa_lag_fdb_add_bump(lag->dev, addr, vid, &db,
 367					   &a->refcount);
 368		goto out;
 369	}
 370
 371	a = kzalloc(sizeof(*a), GFP_KERNEL);
 372	if (!a) {
 373		err = -ENOMEM;
 374		goto out;
 375	}
 376
 377	err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
 378	trace_dsa_lag_fdb_add_hw(lag->dev, addr, vid, &db, err);
 379	if (err) {
 380		kfree(a);
 381		goto out;
 382	}
 383
 384	ether_addr_copy(a->addr, addr);
 385	a->vid = vid;
 386	a->db = db;
 387	refcount_set(&a->refcount, 1);
 388	list_add_tail(&a->list, &lag->fdbs);
 389
 390out:
 391	mutex_unlock(&lag->fdb_lock);
 392
 393	return err;
 394}
 395
 396static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
 397				     const unsigned char *addr, u16 vid,
 398				     struct dsa_db db)
 399{
 400	struct dsa_mac_addr *a;
 401	int err = 0;
 402
 403	mutex_lock(&lag->fdb_lock);
 404
 405	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
 406	if (!a) {
 407		trace_dsa_lag_fdb_del_not_found(lag->dev, addr, vid, &db);
 408		err = -ENOENT;
 409		goto out;
 410	}
 411
 412	if (!refcount_dec_and_test(&a->refcount)) {
 413		trace_dsa_lag_fdb_del_drop(lag->dev, addr, vid, &db,
 414					   &a->refcount);
 415		goto out;
 416	}
 417
 418	err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
 419	trace_dsa_lag_fdb_del_hw(lag->dev, addr, vid, &db, err);
 420	if (err) {
 421		refcount_set(&a->refcount, 1);
 422		goto out;
 423	}
 424
 425	list_del(&a->list);
 426	kfree(a);
 427
 428out:
 429	mutex_unlock(&lag->fdb_lock);
 430
 431	return err;
 432}
 433
 434static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
 435				   struct dsa_notifier_fdb_info *info)
 436{
 437	struct dsa_port *dp;
 438	int err = 0;
 
 439
 440	if (!ds->ops->port_fdb_add)
 441		return -EOPNOTSUPP;
 442
 443	dsa_switch_for_each_port(dp, ds) {
 444		if (dsa_port_host_address_match(dp, info->dp)) {
 445			if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
 446				err = dsa_switch_do_lag_fdb_add(ds, dp->lag,
 447								info->addr,
 448								info->vid,
 449								info->db);
 450			} else {
 451				err = dsa_port_do_fdb_add(dp, info->addr,
 452							  info->vid, info->db);
 453			}
 454			if (err)
 455				break;
 456		}
 457	}
 458
 459	return err;
 460}
 461
 462static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
 463				   struct dsa_notifier_fdb_info *info)
 464{
 465	struct dsa_port *dp;
 466	int err = 0;
 
 467
 468	if (!ds->ops->port_fdb_del)
 469		return -EOPNOTSUPP;
 470
 471	dsa_switch_for_each_port(dp, ds) {
 472		if (dsa_port_host_address_match(dp, info->dp)) {
 473			if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
 474				err = dsa_switch_do_lag_fdb_del(ds, dp->lag,
 475								info->addr,
 476								info->vid,
 477								info->db);
 478			} else {
 479				err = dsa_port_do_fdb_del(dp, info->addr,
 480							  info->vid, info->db);
 481			}
 482			if (err)
 483				break;
 484		}
 485	}
 486
 487	return err;
 488}
 489
 490static int dsa_switch_fdb_add(struct dsa_switch *ds,
 491			      struct dsa_notifier_fdb_info *info)
 492{
 493	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 494	struct dsa_port *dp = dsa_to_port(ds, port);
 495
 496	if (!ds->ops->port_fdb_add)
 497		return -EOPNOTSUPP;
 498
 499	return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
 500}
 501
 502static int dsa_switch_fdb_del(struct dsa_switch *ds,
 503			      struct dsa_notifier_fdb_info *info)
 504{
 505	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 506	struct dsa_port *dp = dsa_to_port(ds, port);
 507
 508	if (!ds->ops->port_fdb_del)
 509		return -EOPNOTSUPP;
 510
 511	return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
 512}
 513
 514static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
 515				  struct dsa_notifier_lag_fdb_info *info)
 516{
 517	struct dsa_port *dp;
 
 518
 519	if (!ds->ops->lag_fdb_add)
 520		return -EOPNOTSUPP;
 521
 522	/* Notify switch only if it has a port in this LAG */
 523	dsa_switch_for_each_port(dp, ds)
 524		if (dsa_port_offloads_lag(dp, info->lag))
 525			return dsa_switch_do_lag_fdb_add(ds, info->lag,
 526							 info->addr, info->vid,
 527							 info->db);
 528
 529	return 0;
 530}
 531
 532static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
 533				  struct dsa_notifier_lag_fdb_info *info)
 534{
 535	struct dsa_port *dp;
 
 536
 537	if (!ds->ops->lag_fdb_del)
 538		return -EOPNOTSUPP;
 539
 540	/* Notify switch only if it has a port in this LAG */
 541	dsa_switch_for_each_port(dp, ds)
 542		if (dsa_port_offloads_lag(dp, info->lag))
 543			return dsa_switch_do_lag_fdb_del(ds, info->lag,
 544							 info->addr, info->vid,
 545							 info->db);
 546
 547	return 0;
 548}
 549
 550static int dsa_switch_lag_change(struct dsa_switch *ds,
 551				 struct dsa_notifier_lag_info *info)
 552{
 553	if (info->dp->ds == ds && ds->ops->port_lag_change)
 554		return ds->ops->port_lag_change(ds, info->dp->index);
 555
 556	if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
 557		return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
 558						     info->dp->index);
 559
 560	return 0;
 561}
 562
 563static int dsa_switch_lag_join(struct dsa_switch *ds,
 564			       struct dsa_notifier_lag_info *info)
 565{
 566	if (info->dp->ds == ds && ds->ops->port_lag_join)
 567		return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
 568					      info->info, info->extack);
 569
 570	if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
 571		return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
 572						   info->dp->index, info->lag,
 573						   info->info, info->extack);
 574
 575	return -EOPNOTSUPP;
 576}
 577
 578static int dsa_switch_lag_leave(struct dsa_switch *ds,
 579				struct dsa_notifier_lag_info *info)
 580{
 581	if (info->dp->ds == ds && ds->ops->port_lag_leave)
 582		return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
 583
 584	if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
 585		return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
 586						    info->dp->index, info->lag);
 587
 588	return -EOPNOTSUPP;
 589}
 590
 591static int dsa_switch_mdb_add(struct dsa_switch *ds,
 592			      struct dsa_notifier_mdb_info *info)
 593{
 594	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 595	struct dsa_port *dp = dsa_to_port(ds, port);
 596
 597	if (!ds->ops->port_mdb_add)
 598		return -EOPNOTSUPP;
 599
 600	return dsa_port_do_mdb_add(dp, info->mdb, info->db);
 601}
 602
 603static int dsa_switch_mdb_del(struct dsa_switch *ds,
 604			      struct dsa_notifier_mdb_info *info)
 605{
 606	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 607	struct dsa_port *dp = dsa_to_port(ds, port);
 608
 609	if (!ds->ops->port_mdb_del)
 610		return -EOPNOTSUPP;
 611
 612	return dsa_port_do_mdb_del(dp, info->mdb, info->db);
 613}
 614
 615static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
 616				   struct dsa_notifier_mdb_info *info)
 617{
 618	struct dsa_port *dp;
 619	int err = 0;
 
 620
 621	if (!ds->ops->port_mdb_add)
 622		return -EOPNOTSUPP;
 623
 624	dsa_switch_for_each_port(dp, ds) {
 625		if (dsa_port_host_address_match(dp, info->dp)) {
 626			err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
 
 627			if (err)
 628				break;
 629		}
 630	}
 631
 632	return err;
 633}
 634
 635static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
 636				   struct dsa_notifier_mdb_info *info)
 637{
 638	struct dsa_port *dp;
 639	int err = 0;
 
 640
 641	if (!ds->ops->port_mdb_del)
 642		return -EOPNOTSUPP;
 643
 644	dsa_switch_for_each_port(dp, ds) {
 645		if (dsa_port_host_address_match(dp, info->dp)) {
 646			err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
 
 647			if (err)
 648				break;
 649		}
 650	}
 651
 652	return err;
 653}
 654
 655/* Port VLANs match on the targeted port and on all DSA ports */
 656static bool dsa_port_vlan_match(struct dsa_port *dp,
 657				struct dsa_notifier_vlan_info *info)
 658{
 659	return dsa_port_is_dsa(dp) || dp == info->dp;
 660}
 661
 662/* Host VLANs match on the targeted port's CPU port, and on all DSA ports
 663 * (upstream and downstream) of that switch and its upstream switches.
 664 */
 665static bool dsa_port_host_vlan_match(struct dsa_port *dp,
 666				     const struct dsa_port *targeted_dp)
 667{
 668	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
 
 669
 670	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
 671		return dsa_port_is_dsa(dp) || dp == cpu_dp;
 672
 673	return false;
 674}
 675
 676struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
 677			       const struct switchdev_obj_port_vlan *vlan)
 678{
 679	struct dsa_vlan *v;
 680
 681	list_for_each_entry(v, vlan_list, list)
 682		if (v->vid == vlan->vid)
 683			return v;
 684
 685	return NULL;
 686}
 687
 688static int dsa_port_do_vlan_add(struct dsa_port *dp,
 689				const struct switchdev_obj_port_vlan *vlan,
 690				struct netlink_ext_ack *extack)
 691{
 692	struct dsa_switch *ds = dp->ds;
 693	int port = dp->index;
 694	struct dsa_vlan *v;
 695	int err = 0;
 696
 697	/* No need to bother with refcounting for user ports. */
 698	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
 699		err = ds->ops->port_vlan_add(ds, port, vlan, extack);
 700		trace_dsa_vlan_add_hw(dp, vlan, err);
 701
 702		return err;
 703	}
 704
 705	/* No need to propagate on shared ports the existing VLANs that were
 706	 * re-notified after just the flags have changed. This would cause a
 707	 * refcount bump which we need to avoid, since it unbalances the
 708	 * additions with the deletions.
 709	 */
 710	if (vlan->changed)
 711		return 0;
 712
 713	mutex_lock(&dp->vlans_lock);
 714
 715	v = dsa_vlan_find(&dp->vlans, vlan);
 716	if (v) {
 717		refcount_inc(&v->refcount);
 718		trace_dsa_vlan_add_bump(dp, vlan, &v->refcount);
 719		goto out;
 720	}
 721
 722	v = kzalloc(sizeof(*v), GFP_KERNEL);
 723	if (!v) {
 724		err = -ENOMEM;
 725		goto out;
 726	}
 727
 728	err = ds->ops->port_vlan_add(ds, port, vlan, extack);
 729	trace_dsa_vlan_add_hw(dp, vlan, err);
 730	if (err) {
 731		kfree(v);
 732		goto out;
 733	}
 734
 735	v->vid = vlan->vid;
 736	refcount_set(&v->refcount, 1);
 737	list_add_tail(&v->list, &dp->vlans);
 738
 739out:
 740	mutex_unlock(&dp->vlans_lock);
 741
 742	return err;
 743}
 744
 745static int dsa_port_do_vlan_del(struct dsa_port *dp,
 746				const struct switchdev_obj_port_vlan *vlan)
 747{
 748	struct dsa_switch *ds = dp->ds;
 749	int port = dp->index;
 750	struct dsa_vlan *v;
 751	int err = 0;
 752
 753	/* No need to bother with refcounting for user ports */
 754	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp))) {
 755		err = ds->ops->port_vlan_del(ds, port, vlan);
 756		trace_dsa_vlan_del_hw(dp, vlan, err);
 757
 758		return err;
 759	}
 760
 761	mutex_lock(&dp->vlans_lock);
 762
 763	v = dsa_vlan_find(&dp->vlans, vlan);
 764	if (!v) {
 765		trace_dsa_vlan_del_not_found(dp, vlan);
 766		err = -ENOENT;
 767		goto out;
 768	}
 769
 770	if (!refcount_dec_and_test(&v->refcount)) {
 771		trace_dsa_vlan_del_drop(dp, vlan, &v->refcount);
 772		goto out;
 773	}
 774
 775	err = ds->ops->port_vlan_del(ds, port, vlan);
 776	trace_dsa_vlan_del_hw(dp, vlan, err);
 777	if (err) {
 778		refcount_set(&v->refcount, 1);
 779		goto out;
 780	}
 781
 782	list_del(&v->list);
 783	kfree(v);
 784
 785out:
 786	mutex_unlock(&dp->vlans_lock);
 787
 788	return err;
 789}
 790
 791static int dsa_switch_vlan_add(struct dsa_switch *ds,
 792			       struct dsa_notifier_vlan_info *info)
 793{
 794	struct dsa_port *dp;
 795	int err;
 796
 797	if (!ds->ops->port_vlan_add)
 798		return -EOPNOTSUPP;
 799
 800	dsa_switch_for_each_port(dp, ds) {
 801		if (dsa_port_vlan_match(dp, info)) {
 802			err = dsa_port_do_vlan_add(dp, info->vlan,
 803						   info->extack);
 804			if (err)
 805				return err;
 806		}
 807	}
 808
 809	return 0;
 810}
 811
 812static int dsa_switch_vlan_del(struct dsa_switch *ds,
 813			       struct dsa_notifier_vlan_info *info)
 814{
 815	struct dsa_port *dp;
 816	int err;
 817
 818	if (!ds->ops->port_vlan_del)
 819		return -EOPNOTSUPP;
 820
 821	dsa_switch_for_each_port(dp, ds) {
 822		if (dsa_port_vlan_match(dp, info)) {
 823			err = dsa_port_do_vlan_del(dp, info->vlan);
 824			if (err)
 825				return err;
 826		}
 827	}
 828
 829	return 0;
 830}
 831
 832static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
 833				    struct dsa_notifier_vlan_info *info)
 834{
 835	struct dsa_port *dp;
 836	int err;
 837
 838	if (!ds->ops->port_vlan_add)
 839		return -EOPNOTSUPP;
 840
 841	dsa_switch_for_each_port(dp, ds) {
 842		if (dsa_port_host_vlan_match(dp, info->dp)) {
 843			err = dsa_port_do_vlan_add(dp, info->vlan,
 844						   info->extack);
 845			if (err)
 846				return err;
 847		}
 848	}
 849
 850	return 0;
 851}
 852
 853static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
 854				    struct dsa_notifier_vlan_info *info)
 855{
 856	struct dsa_port *dp;
 857	int err;
 858
 859	if (!ds->ops->port_vlan_del)
 860		return -EOPNOTSUPP;
 861
 862	dsa_switch_for_each_port(dp, ds) {
 863		if (dsa_port_host_vlan_match(dp, info->dp)) {
 864			err = dsa_port_do_vlan_del(dp, info->vlan);
 865			if (err)
 866				return err;
 867		}
 868	}
 869
 
 
 
 870	return 0;
 871}
 872
 873static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
 874				       struct dsa_notifier_tag_proto_info *info)
 875{
 876	const struct dsa_device_ops *tag_ops = info->tag_ops;
 877	struct dsa_port *dp, *cpu_dp;
 878	int err;
 879
 880	if (!ds->ops->change_tag_protocol)
 881		return -EOPNOTSUPP;
 882
 883	ASSERT_RTNL();
 884
 885	err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
 886	if (err)
 887		return err;
 888
 889	dsa_switch_for_each_cpu_port(cpu_dp, ds)
 890		dsa_port_set_tag_protocol(cpu_dp, tag_ops);
 
 
 
 
 891
 892	/* Now that changing the tag protocol can no longer fail, let's update
 893	 * the remaining bits which are "duplicated for faster access", and the
 894	 * bits that depend on the tagger, such as the MTU.
 895	 */
 896	dsa_switch_for_each_user_port(dp, ds) {
 897		struct net_device *user = dp->user;
 
 898
 899		dsa_user_setup_tagger(user);
 
 900
 901		/* rtnl_mutex is held in dsa_tree_change_tag_proto */
 902		dsa_user_change_mtu(user, user->mtu);
 
 903	}
 904
 905	return 0;
 906}
 907
 908/* We use the same cross-chip notifiers to inform both the tagger side, as well
 909 * as the switch side, of connection and disconnection events.
 910 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
 911 * switch side doesn't support connecting to this tagger, and therefore, the
 912 * fact that we don't disconnect the tagger side doesn't constitute a memory
 913 * leak: the tagger will still operate with persistent per-switch memory, just
 914 * with the switch side unconnected to it. What does constitute a hard error is
 915 * when the switch side supports connecting but fails.
 916 */
 917static int
 918dsa_switch_connect_tag_proto(struct dsa_switch *ds,
 919			     struct dsa_notifier_tag_proto_info *info)
 920{
 921	const struct dsa_device_ops *tag_ops = info->tag_ops;
 922	int err;
 
 
 
 923
 924	/* Notify the new tagger about the connection to this switch */
 925	if (tag_ops->connect) {
 926		err = tag_ops->connect(ds);
 927		if (err)
 928			return err;
 929	}
 930
 931	if (!ds->ops->connect_tag_protocol)
 
 
 
 932		return -EOPNOTSUPP;
 933
 934	/* Notify the switch about the connection to the new tagger */
 935	err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
 936	if (err) {
 937		/* Revert the new tagger's connection to this tree */
 938		if (tag_ops->disconnect)
 939			tag_ops->disconnect(ds);
 940		return err;
 941	}
 942
 943	return 0;
 944}
 945
 946static int
 947dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
 948				struct dsa_notifier_tag_proto_info *info)
 949{
 950	const struct dsa_device_ops *tag_ops = info->tag_ops;
 
 951
 952	/* Notify the tagger about the disconnection from this switch */
 953	if (tag_ops->disconnect && ds->tagger_data)
 954		tag_ops->disconnect(ds);
 955
 956	/* No need to notify the switch, since it shouldn't have any
 957	 * resources to tear down
 958	 */
 959	return 0;
 960}
 961
 962static int
 963dsa_switch_conduit_state_change(struct dsa_switch *ds,
 964				struct dsa_notifier_conduit_state_info *info)
 965{
 966	if (!ds->ops->conduit_state_change)
 967		return 0;
 968
 969	ds->ops->conduit_state_change(ds, info->conduit, info->operational);
 
 
 970
 971	return 0;
 972}
 973
 974static int dsa_switch_event(struct notifier_block *nb,
 975			    unsigned long event, void *info)
 976{
 977	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
 978	int err;
 979
 980	switch (event) {
 981	case DSA_NOTIFIER_AGEING_TIME:
 982		err = dsa_switch_ageing_time(ds, info);
 983		break;
 984	case DSA_NOTIFIER_BRIDGE_JOIN:
 985		err = dsa_switch_bridge_join(ds, info);
 986		break;
 987	case DSA_NOTIFIER_BRIDGE_LEAVE:
 988		err = dsa_switch_bridge_leave(ds, info);
 989		break;
 990	case DSA_NOTIFIER_FDB_ADD:
 991		err = dsa_switch_fdb_add(ds, info);
 992		break;
 993	case DSA_NOTIFIER_FDB_DEL:
 994		err = dsa_switch_fdb_del(ds, info);
 995		break;
 996	case DSA_NOTIFIER_HOST_FDB_ADD:
 997		err = dsa_switch_host_fdb_add(ds, info);
 998		break;
 999	case DSA_NOTIFIER_HOST_FDB_DEL:
1000		err = dsa_switch_host_fdb_del(ds, info);
1001		break;
1002	case DSA_NOTIFIER_LAG_FDB_ADD:
1003		err = dsa_switch_lag_fdb_add(ds, info);
1004		break;
1005	case DSA_NOTIFIER_LAG_FDB_DEL:
1006		err = dsa_switch_lag_fdb_del(ds, info);
1007		break;
1008	case DSA_NOTIFIER_LAG_CHANGE:
1009		err = dsa_switch_lag_change(ds, info);
1010		break;
1011	case DSA_NOTIFIER_LAG_JOIN:
1012		err = dsa_switch_lag_join(ds, info);
1013		break;
1014	case DSA_NOTIFIER_LAG_LEAVE:
1015		err = dsa_switch_lag_leave(ds, info);
1016		break;
1017	case DSA_NOTIFIER_MDB_ADD:
1018		err = dsa_switch_mdb_add(ds, info);
1019		break;
1020	case DSA_NOTIFIER_MDB_DEL:
1021		err = dsa_switch_mdb_del(ds, info);
1022		break;
1023	case DSA_NOTIFIER_HOST_MDB_ADD:
1024		err = dsa_switch_host_mdb_add(ds, info);
1025		break;
1026	case DSA_NOTIFIER_HOST_MDB_DEL:
1027		err = dsa_switch_host_mdb_del(ds, info);
1028		break;
1029	case DSA_NOTIFIER_VLAN_ADD:
1030		err = dsa_switch_vlan_add(ds, info);
1031		break;
1032	case DSA_NOTIFIER_VLAN_DEL:
1033		err = dsa_switch_vlan_del(ds, info);
1034		break;
1035	case DSA_NOTIFIER_HOST_VLAN_ADD:
1036		err = dsa_switch_host_vlan_add(ds, info);
1037		break;
1038	case DSA_NOTIFIER_HOST_VLAN_DEL:
1039		err = dsa_switch_host_vlan_del(ds, info);
1040		break;
1041	case DSA_NOTIFIER_MTU:
1042		err = dsa_switch_mtu(ds, info);
1043		break;
1044	case DSA_NOTIFIER_TAG_PROTO:
1045		err = dsa_switch_change_tag_proto(ds, info);
1046		break;
1047	case DSA_NOTIFIER_TAG_PROTO_CONNECT:
1048		err = dsa_switch_connect_tag_proto(ds, info);
1049		break;
1050	case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
1051		err = dsa_switch_disconnect_tag_proto(ds, info);
1052		break;
1053	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
1054		err = dsa_switch_tag_8021q_vlan_add(ds, info);
1055		break;
1056	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
1057		err = dsa_switch_tag_8021q_vlan_del(ds, info);
1058		break;
1059	case DSA_NOTIFIER_CONDUIT_STATE_CHANGE:
1060		err = dsa_switch_conduit_state_change(ds, info);
1061		break;
1062	default:
1063		err = -EOPNOTSUPP;
1064		break;
1065	}
1066
1067	if (err)
1068		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
1069			event, err);
1070
1071	return notifier_from_errno(err);
1072}
1073
1074/**
1075 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
1076 * @dst: collection of struct dsa_switch devices to notify.
1077 * @e: event, must be of type DSA_NOTIFIER_*
1078 * @v: event-specific value.
1079 *
1080 * Given a struct dsa_switch_tree, this can be used to run a function once for
1081 * each member DSA switch. The other alternative of traversing the tree is only
1082 * through its ports list, which does not uniquely list the switches.
1083 */
1084int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
1085{
1086	struct raw_notifier_head *nh = &dst->nh;
1087	int err;
1088
1089	err = raw_notifier_call_chain(nh, e, v);
1090
1091	return notifier_to_errno(err);
1092}
1093
1094/**
1095 * dsa_broadcast - Notify all DSA trees in the system.
1096 * @e: event, must be of type DSA_NOTIFIER_*
1097 * @v: event-specific value.
1098 *
1099 * Can be used to notify the switching fabric of events such as cross-chip
1100 * bridging between disjoint trees (such as islands of tagger-compatible
1101 * switches bridged by an incompatible middle switch).
1102 *
1103 * WARNING: this function is not reliable during probe time, because probing
1104 * between trees is asynchronous and not all DSA trees might have probed.
1105 */
1106int dsa_broadcast(unsigned long e, void *v)
1107{
1108	struct dsa_switch_tree *dst;
1109	int err = 0;
1110
1111	list_for_each_entry(dst, &dsa_tree_list, list) {
1112		err = dsa_tree_notify(dst, e, v);
1113		if (err)
1114			break;
1115	}
1116
1117	return err;
1118}
1119
1120int dsa_switch_register_notifier(struct dsa_switch *ds)
1121{
1122	ds->nb.notifier_call = dsa_switch_event;
1123
1124	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1125}
1126
1127void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1128{
1129	int err;
1130
1131	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1132	if (err)
1133		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
1134}