Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Handling of a single switch chip, part of a switch fabric
  4 *
  5 * Copyright (c) 2017 Savoir-faire Linux Inc.
  6 *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
  7 */
  8
  9#include <linux/if_bridge.h>
 10#include <linux/netdevice.h>
 11#include <linux/notifier.h>
 12#include <linux/if_vlan.h>
 13#include <net/switchdev.h>
 14
 15#include "dsa_priv.h"
 
 
 
 
 
 16
 17static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
 18						   unsigned int ageing_time)
 19{
 20	int i;
 21
 22	for (i = 0; i < ds->num_ports; ++i) {
 23		struct dsa_port *dp = dsa_to_port(ds, i);
 24
 
 25		if (dp->ageing_time && dp->ageing_time < ageing_time)
 26			ageing_time = dp->ageing_time;
 27	}
 28
 29	return ageing_time;
 30}
 31
 32static int dsa_switch_ageing_time(struct dsa_switch *ds,
 33				  struct dsa_notifier_ageing_time_info *info)
 34{
 35	unsigned int ageing_time = info->ageing_time;
 36
 37	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
 38		return -ERANGE;
 39
 40	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
 41		return -ERANGE;
 42
 43	/* Program the fastest ageing time in case of multiple bridges */
 44	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
 45
 46	if (ds->ops->set_ageing_time)
 47		return ds->ops->set_ageing_time(ds, ageing_time);
 48
 49	return 0;
 50}
 51
 52static bool dsa_switch_mtu_match(struct dsa_switch *ds, int port,
 53				 struct dsa_notifier_mtu_info *info)
 54{
 55	if (ds->index == info->sw_index && port == info->port)
 56		return true;
 57
 58	/* Do not propagate to other switches in the tree if the notifier was
 59	 * targeted for a single switch.
 60	 */
 61	if (info->targeted_match)
 62		return false;
 63
 64	if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
 65		return true;
 66
 67	return false;
 68}
 69
 70static int dsa_switch_mtu(struct dsa_switch *ds,
 71			  struct dsa_notifier_mtu_info *info)
 72{
 73	int port, ret;
 
 74
 75	if (!ds->ops->port_change_mtu)
 76		return -EOPNOTSUPP;
 77
 78	for (port = 0; port < ds->num_ports; port++) {
 79		if (dsa_switch_mtu_match(ds, port, info)) {
 80			ret = ds->ops->port_change_mtu(ds, port, info->mtu);
 
 81			if (ret)
 82				return ret;
 83		}
 84	}
 85
 86	return 0;
 87}
 88
 89static int dsa_switch_bridge_join(struct dsa_switch *ds,
 90				  struct dsa_notifier_bridge_info *info)
 91{
 92	struct dsa_switch_tree *dst = ds->dst;
 93
 94	if (dst->index == info->tree_index && ds->index == info->sw_index &&
 95	    ds->ops->port_bridge_join)
 96		return ds->ops->port_bridge_join(ds, info->port, info->br);
 97
 98	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
 99	    ds->ops->crosschip_bridge_join)
100		return ds->ops->crosschip_bridge_join(ds, info->tree_index,
101						      info->sw_index,
102						      info->port, info->br);
 
 
 
 
 
 
 
 
 
 
 
 
 
103
104	return 0;
105}
106
107static int dsa_switch_bridge_leave(struct dsa_switch *ds,
108				   struct dsa_notifier_bridge_info *info)
109{
110	bool unset_vlan_filtering = br_vlan_enabled(info->br);
111	struct dsa_switch_tree *dst = ds->dst;
112	struct netlink_ext_ack extack = {0};
113	int err, port;
114
115	if (dst->index == info->tree_index && ds->index == info->sw_index &&
116	    ds->ops->port_bridge_leave)
117		ds->ops->port_bridge_leave(ds, info->port, info->br);
118
119	if ((dst->index != info->tree_index || ds->index != info->sw_index) &&
120	    ds->ops->crosschip_bridge_leave)
121		ds->ops->crosschip_bridge_leave(ds, info->tree_index,
122						info->sw_index, info->port,
123						info->br);
124
125	/* If the bridge was vlan_filtering, the bridge core doesn't trigger an
126	 * event for changing vlan_filtering setting upon slave ports leaving
127	 * it. That is a good thing, because that lets us handle it and also
128	 * handle the case where the switch's vlan_filtering setting is global
129	 * (not per port). When that happens, the correct moment to trigger the
130	 * vlan_filtering callback is only when the last port leaves the last
131	 * VLAN-aware bridge.
132	 */
133	if (unset_vlan_filtering && ds->vlan_filtering_is_global) {
134		for (port = 0; port < ds->num_ports; port++) {
135			struct net_device *bridge_dev;
136
137			bridge_dev = dsa_to_port(ds, port)->bridge_dev;
 
 
 
 
138
139			if (bridge_dev && br_vlan_enabled(bridge_dev)) {
140				unset_vlan_filtering = false;
141				break;
142			}
143		}
144	}
145	if (unset_vlan_filtering) {
146		err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port),
147					      false, &extack);
148		if (extack._msg)
149			dev_err(ds->dev, "port %d: %s\n", info->port,
150				extack._msg);
151		if (err && err != -EOPNOTSUPP)
152			return err;
153	}
154	return 0;
155}
156
157/* Matches for all upstream-facing ports (the CPU port and all upstream-facing
158 * DSA links) that sit between the targeted port on which the notifier was
159 * emitted and its dedicated CPU port.
160 */
161static bool dsa_switch_host_address_match(struct dsa_switch *ds, int port,
162					  int info_sw_index, int info_port)
163{
164	struct dsa_port *targeted_dp, *cpu_dp;
165	struct dsa_switch *targeted_ds;
166
167	targeted_ds = dsa_switch_find(ds->dst->index, info_sw_index);
168	targeted_dp = dsa_to_port(targeted_ds, info_port);
169	cpu_dp = targeted_dp->cpu_dp;
170
171	if (dsa_switch_is_upstream_of(ds, targeted_ds))
172		return port == dsa_towards_port(ds, cpu_dp->ds->index,
173						cpu_dp->index);
174
175	return false;
176}
177
178static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
179					      const unsigned char *addr,
180					      u16 vid)
181{
182	struct dsa_mac_addr *a;
183
184	list_for_each_entry(a, addr_list, list)
185		if (ether_addr_equal(a->addr, addr) && a->vid == vid)
 
186			return a;
187
188	return NULL;
189}
190
191static int dsa_switch_do_mdb_add(struct dsa_switch *ds, int port,
192				 const struct switchdev_obj_port_mdb *mdb)
 
193{
194	struct dsa_port *dp = dsa_to_port(ds, port);
195	struct dsa_mac_addr *a;
196	int err;
 
197
198	/* No need to bother with refcounting for user ports */
199	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
200		return ds->ops->port_mdb_add(ds, port, mdb);
201
202	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
 
 
203	if (a) {
204		refcount_inc(&a->refcount);
205		return 0;
206	}
207
208	a = kzalloc(sizeof(*a), GFP_KERNEL);
209	if (!a)
210		return -ENOMEM;
 
 
211
212	err = ds->ops->port_mdb_add(ds, port, mdb);
213	if (err) {
214		kfree(a);
215		return err;
216	}
217
218	ether_addr_copy(a->addr, mdb->addr);
219	a->vid = mdb->vid;
 
220	refcount_set(&a->refcount, 1);
221	list_add_tail(&a->list, &dp->mdbs);
222
223	return 0;
 
 
 
224}
225
226static int dsa_switch_do_mdb_del(struct dsa_switch *ds, int port,
227				 const struct switchdev_obj_port_mdb *mdb)
 
228{
229	struct dsa_port *dp = dsa_to_port(ds, port);
230	struct dsa_mac_addr *a;
231	int err;
 
232
233	/* No need to bother with refcounting for user ports */
234	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
235		return ds->ops->port_mdb_del(ds, port, mdb);
236
237	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid);
238	if (!a)
239		return -ENOENT;
 
 
 
 
240
241	if (!refcount_dec_and_test(&a->refcount))
242		return 0;
243
244	err = ds->ops->port_mdb_del(ds, port, mdb);
245	if (err) {
246		refcount_inc(&a->refcount);
247		return err;
248	}
249
250	list_del(&a->list);
251	kfree(a);
252
253	return 0;
 
 
 
254}
255
256static int dsa_switch_do_fdb_add(struct dsa_switch *ds, int port,
257				 const unsigned char *addr, u16 vid)
258{
259	struct dsa_port *dp = dsa_to_port(ds, port);
260	struct dsa_mac_addr *a;
261	int err;
 
262
263	/* No need to bother with refcounting for user ports */
264	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
265		return ds->ops->port_fdb_add(ds, port, addr, vid);
 
 
266
267	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
268	if (a) {
269		refcount_inc(&a->refcount);
270		return 0;
271	}
272
273	a = kzalloc(sizeof(*a), GFP_KERNEL);
274	if (!a)
275		return -ENOMEM;
 
 
276
277	err = ds->ops->port_fdb_add(ds, port, addr, vid);
278	if (err) {
279		kfree(a);
280		return err;
281	}
282
283	ether_addr_copy(a->addr, addr);
284	a->vid = vid;
 
285	refcount_set(&a->refcount, 1);
286	list_add_tail(&a->list, &dp->fdbs);
287
288	return 0;
 
 
 
289}
290
291static int dsa_switch_do_fdb_del(struct dsa_switch *ds, int port,
292				 const unsigned char *addr, u16 vid)
293{
294	struct dsa_port *dp = dsa_to_port(ds, port);
295	struct dsa_mac_addr *a;
296	int err;
 
297
298	/* No need to bother with refcounting for user ports */
299	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
300		return ds->ops->port_fdb_del(ds, port, addr, vid);
 
 
301
302	a = dsa_mac_addr_find(&dp->fdbs, addr, vid);
303	if (!a)
304		return -ENOENT;
 
 
305
306	if (!refcount_dec_and_test(&a->refcount))
307		return 0;
308
309	err = ds->ops->port_fdb_del(ds, port, addr, vid);
310	if (err) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311		refcount_inc(&a->refcount);
312		return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
313	}
314
315	list_del(&a->list);
316	kfree(a);
317
318	return 0;
 
 
 
319}
320
321static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
322				   struct dsa_notifier_fdb_info *info)
323{
 
324	int err = 0;
325	int port;
326
327	if (!ds->ops->port_fdb_add)
328		return -EOPNOTSUPP;
329
330	for (port = 0; port < ds->num_ports; port++) {
331		if (dsa_switch_host_address_match(ds, port, info->sw_index,
332						  info->port)) {
333			err = dsa_switch_do_fdb_add(ds, port, info->addr,
334						    info->vid);
 
 
 
 
 
 
335			if (err)
336				break;
337		}
338	}
339
340	return err;
341}
342
343static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
344				   struct dsa_notifier_fdb_info *info)
345{
 
346	int err = 0;
347	int port;
348
349	if (!ds->ops->port_fdb_del)
350		return -EOPNOTSUPP;
351
352	for (port = 0; port < ds->num_ports; port++) {
353		if (dsa_switch_host_address_match(ds, port, info->sw_index,
354						  info->port)) {
355			err = dsa_switch_do_fdb_del(ds, port, info->addr,
356						    info->vid);
 
 
 
 
 
 
357			if (err)
358				break;
359		}
360	}
361
362	return err;
363}
364
365static int dsa_switch_fdb_add(struct dsa_switch *ds,
366			      struct dsa_notifier_fdb_info *info)
367{
368	int port = dsa_towards_port(ds, info->sw_index, info->port);
 
369
370	if (!ds->ops->port_fdb_add)
371		return -EOPNOTSUPP;
372
373	return dsa_switch_do_fdb_add(ds, port, info->addr, info->vid);
374}
375
376static int dsa_switch_fdb_del(struct dsa_switch *ds,
377			      struct dsa_notifier_fdb_info *info)
378{
379	int port = dsa_towards_port(ds, info->sw_index, info->port);
 
380
381	if (!ds->ops->port_fdb_del)
382		return -EOPNOTSUPP;
383
384	return dsa_switch_do_fdb_del(ds, port, info->addr, info->vid);
385}
386
387static int dsa_switch_hsr_join(struct dsa_switch *ds,
388			       struct dsa_notifier_hsr_info *info)
389{
390	if (ds->index == info->sw_index && ds->ops->port_hsr_join)
391		return ds->ops->port_hsr_join(ds, info->port, info->hsr);
392
393	return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
394}
395
396static int dsa_switch_hsr_leave(struct dsa_switch *ds,
397				struct dsa_notifier_hsr_info *info)
398{
399	if (ds->index == info->sw_index && ds->ops->port_hsr_leave)
400		return ds->ops->port_hsr_leave(ds, info->port, info->hsr);
401
402	return -EOPNOTSUPP;
 
 
 
 
 
 
 
 
 
 
403}
404
405static int dsa_switch_lag_change(struct dsa_switch *ds,
406				 struct dsa_notifier_lag_info *info)
407{
408	if (ds->index == info->sw_index && ds->ops->port_lag_change)
409		return ds->ops->port_lag_change(ds, info->port);
410
411	if (ds->index != info->sw_index && ds->ops->crosschip_lag_change)
412		return ds->ops->crosschip_lag_change(ds, info->sw_index,
413						     info->port);
414
415	return 0;
416}
417
418static int dsa_switch_lag_join(struct dsa_switch *ds,
419			       struct dsa_notifier_lag_info *info)
420{
421	if (ds->index == info->sw_index && ds->ops->port_lag_join)
422		return ds->ops->port_lag_join(ds, info->port, info->lag,
423					      info->info);
424
425	if (ds->index != info->sw_index && ds->ops->crosschip_lag_join)
426		return ds->ops->crosschip_lag_join(ds, info->sw_index,
427						   info->port, info->lag,
428						   info->info);
429
430	return -EOPNOTSUPP;
431}
432
433static int dsa_switch_lag_leave(struct dsa_switch *ds,
434				struct dsa_notifier_lag_info *info)
435{
436	if (ds->index == info->sw_index && ds->ops->port_lag_leave)
437		return ds->ops->port_lag_leave(ds, info->port, info->lag);
438
439	if (ds->index != info->sw_index && ds->ops->crosschip_lag_leave)
440		return ds->ops->crosschip_lag_leave(ds, info->sw_index,
441						    info->port, info->lag);
442
443	return -EOPNOTSUPP;
444}
445
446static int dsa_switch_mdb_add(struct dsa_switch *ds,
447			      struct dsa_notifier_mdb_info *info)
448{
449	int port = dsa_towards_port(ds, info->sw_index, info->port);
 
450
451	if (!ds->ops->port_mdb_add)
452		return -EOPNOTSUPP;
453
454	return dsa_switch_do_mdb_add(ds, port, info->mdb);
455}
456
457static int dsa_switch_mdb_del(struct dsa_switch *ds,
458			      struct dsa_notifier_mdb_info *info)
459{
460	int port = dsa_towards_port(ds, info->sw_index, info->port);
 
461
462	if (!ds->ops->port_mdb_del)
463		return -EOPNOTSUPP;
464
465	return dsa_switch_do_mdb_del(ds, port, info->mdb);
466}
467
468static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
469				   struct dsa_notifier_mdb_info *info)
470{
 
471	int err = 0;
472	int port;
473
474	if (!ds->ops->port_mdb_add)
475		return -EOPNOTSUPP;
476
477	for (port = 0; port < ds->num_ports; port++) {
478		if (dsa_switch_host_address_match(ds, port, info->sw_index,
479						  info->port)) {
480			err = dsa_switch_do_mdb_add(ds, port, info->mdb);
481			if (err)
482				break;
483		}
484	}
485
486	return err;
487}
488
489static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
490				   struct dsa_notifier_mdb_info *info)
491{
 
492	int err = 0;
493	int port;
494
495	if (!ds->ops->port_mdb_del)
496		return -EOPNOTSUPP;
497
498	for (port = 0; port < ds->num_ports; port++) {
499		if (dsa_switch_host_address_match(ds, port, info->sw_index,
500						  info->port)) {
501			err = dsa_switch_do_mdb_del(ds, port, info->mdb);
502			if (err)
503				break;
504		}
505	}
506
507	return err;
508}
509
510static bool dsa_switch_vlan_match(struct dsa_switch *ds, int port,
511				  struct dsa_notifier_vlan_info *info)
 
 
 
 
 
 
 
 
 
 
512{
513	if (ds->index == info->sw_index && port == info->port)
514		return true;
515
516	if (dsa_is_dsa_port(ds, port))
517		return true;
518
519	return false;
520}
521
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
522static int dsa_switch_vlan_add(struct dsa_switch *ds,
523			       struct dsa_notifier_vlan_info *info)
524{
525	int port, err;
 
526
527	if (!ds->ops->port_vlan_add)
528		return -EOPNOTSUPP;
529
530	for (port = 0; port < ds->num_ports; port++) {
531		if (dsa_switch_vlan_match(ds, port, info)) {
532			err = ds->ops->port_vlan_add(ds, port, info->vlan,
533						     info->extack);
534			if (err)
535				return err;
536		}
537	}
538
539	return 0;
540}
541
542static int dsa_switch_vlan_del(struct dsa_switch *ds,
543			       struct dsa_notifier_vlan_info *info)
544{
 
 
 
545	if (!ds->ops->port_vlan_del)
546		return -EOPNOTSUPP;
547
548	if (ds->index == info->sw_index)
549		return ds->ops->port_vlan_del(ds, info->port, info->vlan);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
550
551	/* Do not deprogram the DSA links as they may be used as conduit
552	 * for other VLAN members in the fabric.
553	 */
554	return 0;
555}
556
557static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
558				       struct dsa_notifier_tag_proto_info *info)
559{
560	const struct dsa_device_ops *tag_ops = info->tag_ops;
561	int port, err;
 
562
563	if (!ds->ops->change_tag_protocol)
564		return -EOPNOTSUPP;
565
566	ASSERT_RTNL();
567
568	for (port = 0; port < ds->num_ports; port++) {
569		if (!dsa_is_cpu_port(ds, port))
570			continue;
571
572		err = ds->ops->change_tag_protocol(ds, port, tag_ops->proto);
573		if (err)
574			return err;
575
576		dsa_port_set_tag_protocol(dsa_to_port(ds, port), tag_ops);
577	}
578
579	/* Now that changing the tag protocol can no longer fail, let's update
580	 * the remaining bits which are "duplicated for faster access", and the
581	 * bits that depend on the tagger, such as the MTU.
582	 */
583	for (port = 0; port < ds->num_ports; port++) {
584		if (dsa_is_user_port(ds, port)) {
585			struct net_device *slave;
586
587			slave = dsa_to_port(ds, port)->slave;
588			dsa_slave_setup_tagger(slave);
589
590			/* rtnl_mutex is held in dsa_tree_change_tag_proto */
591			dsa_slave_change_mtu(slave, slave->mtu);
592		}
593	}
594
595	return 0;
596}
597
598static int dsa_switch_mrp_add(struct dsa_switch *ds,
599			      struct dsa_notifier_mrp_info *info)
 
 
 
 
 
 
 
 
 
 
600{
601	if (!ds->ops->port_mrp_add)
602		return -EOPNOTSUPP;
603
604	if (ds->index == info->sw_index)
605		return ds->ops->port_mrp_add(ds, info->port, info->mrp);
606
607	return 0;
608}
 
 
 
 
609
610static int dsa_switch_mrp_del(struct dsa_switch *ds,
611			      struct dsa_notifier_mrp_info *info)
612{
613	if (!ds->ops->port_mrp_del)
614		return -EOPNOTSUPP;
615
616	if (ds->index == info->sw_index)
617		return ds->ops->port_mrp_del(ds, info->port, info->mrp);
 
 
 
 
 
 
618
619	return 0;
620}
621
622static int
623dsa_switch_mrp_add_ring_role(struct dsa_switch *ds,
624			     struct dsa_notifier_mrp_ring_role_info *info)
625{
626	if (!ds->ops->port_mrp_add)
627		return -EOPNOTSUPP;
628
629	if (ds->index == info->sw_index)
630		return ds->ops->port_mrp_add_ring_role(ds, info->port,
631						       info->mrp);
632
 
 
 
633	return 0;
634}
635
636static int
637dsa_switch_mrp_del_ring_role(struct dsa_switch *ds,
638			     struct dsa_notifier_mrp_ring_role_info *info)
639{
640	if (!ds->ops->port_mrp_del)
641		return -EOPNOTSUPP;
642
643	if (ds->index == info->sw_index)
644		return ds->ops->port_mrp_del_ring_role(ds, info->port,
645						       info->mrp);
646
647	return 0;
648}
649
650static int dsa_switch_event(struct notifier_block *nb,
651			    unsigned long event, void *info)
652{
653	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
654	int err;
655
656	switch (event) {
657	case DSA_NOTIFIER_AGEING_TIME:
658		err = dsa_switch_ageing_time(ds, info);
659		break;
660	case DSA_NOTIFIER_BRIDGE_JOIN:
661		err = dsa_switch_bridge_join(ds, info);
662		break;
663	case DSA_NOTIFIER_BRIDGE_LEAVE:
664		err = dsa_switch_bridge_leave(ds, info);
665		break;
666	case DSA_NOTIFIER_FDB_ADD:
667		err = dsa_switch_fdb_add(ds, info);
668		break;
669	case DSA_NOTIFIER_FDB_DEL:
670		err = dsa_switch_fdb_del(ds, info);
671		break;
672	case DSA_NOTIFIER_HOST_FDB_ADD:
673		err = dsa_switch_host_fdb_add(ds, info);
674		break;
675	case DSA_NOTIFIER_HOST_FDB_DEL:
676		err = dsa_switch_host_fdb_del(ds, info);
677		break;
678	case DSA_NOTIFIER_HSR_JOIN:
679		err = dsa_switch_hsr_join(ds, info);
680		break;
681	case DSA_NOTIFIER_HSR_LEAVE:
682		err = dsa_switch_hsr_leave(ds, info);
683		break;
684	case DSA_NOTIFIER_LAG_CHANGE:
685		err = dsa_switch_lag_change(ds, info);
686		break;
687	case DSA_NOTIFIER_LAG_JOIN:
688		err = dsa_switch_lag_join(ds, info);
689		break;
690	case DSA_NOTIFIER_LAG_LEAVE:
691		err = dsa_switch_lag_leave(ds, info);
692		break;
693	case DSA_NOTIFIER_MDB_ADD:
694		err = dsa_switch_mdb_add(ds, info);
695		break;
696	case DSA_NOTIFIER_MDB_DEL:
697		err = dsa_switch_mdb_del(ds, info);
698		break;
699	case DSA_NOTIFIER_HOST_MDB_ADD:
700		err = dsa_switch_host_mdb_add(ds, info);
701		break;
702	case DSA_NOTIFIER_HOST_MDB_DEL:
703		err = dsa_switch_host_mdb_del(ds, info);
704		break;
705	case DSA_NOTIFIER_VLAN_ADD:
706		err = dsa_switch_vlan_add(ds, info);
707		break;
708	case DSA_NOTIFIER_VLAN_DEL:
709		err = dsa_switch_vlan_del(ds, info);
710		break;
 
 
 
 
 
 
711	case DSA_NOTIFIER_MTU:
712		err = dsa_switch_mtu(ds, info);
713		break;
714	case DSA_NOTIFIER_TAG_PROTO:
715		err = dsa_switch_change_tag_proto(ds, info);
716		break;
717	case DSA_NOTIFIER_MRP_ADD:
718		err = dsa_switch_mrp_add(ds, info);
 
 
 
719		break;
720	case DSA_NOTIFIER_MRP_DEL:
721		err = dsa_switch_mrp_del(ds, info);
722		break;
723	case DSA_NOTIFIER_MRP_ADD_RING_ROLE:
724		err = dsa_switch_mrp_add_ring_role(ds, info);
725		break;
726	case DSA_NOTIFIER_MRP_DEL_RING_ROLE:
727		err = dsa_switch_mrp_del_ring_role(ds, info);
728		break;
729	default:
730		err = -EOPNOTSUPP;
731		break;
732	}
733
734	if (err)
735		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
736			event, err);
737
738	return notifier_from_errno(err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
739}
740
741int dsa_switch_register_notifier(struct dsa_switch *ds)
742{
743	ds->nb.notifier_call = dsa_switch_event;
744
745	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
746}
747
748void dsa_switch_unregister_notifier(struct dsa_switch *ds)
749{
750	int err;
751
752	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
753	if (err)
754		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
755}
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Handling of a single switch chip, part of a switch fabric
   4 *
   5 * Copyright (c) 2017 Savoir-faire Linux Inc.
   6 *	Vivien Didelot <vivien.didelot@savoirfairelinux.com>
   7 */
   8
   9#include <linux/if_bridge.h>
  10#include <linux/netdevice.h>
  11#include <linux/notifier.h>
  12#include <linux/if_vlan.h>
  13#include <net/switchdev.h>
  14
  15#include "dsa.h"
  16#include "netlink.h"
  17#include "port.h"
  18#include "slave.h"
  19#include "switch.h"
  20#include "tag_8021q.h"
  21
  22static unsigned int dsa_switch_fastest_ageing_time(struct dsa_switch *ds,
  23						   unsigned int ageing_time)
  24{
  25	struct dsa_port *dp;
 
 
 
  26
  27	dsa_switch_for_each_port(dp, ds)
  28		if (dp->ageing_time && dp->ageing_time < ageing_time)
  29			ageing_time = dp->ageing_time;
 
  30
  31	return ageing_time;
  32}
  33
  34static int dsa_switch_ageing_time(struct dsa_switch *ds,
  35				  struct dsa_notifier_ageing_time_info *info)
  36{
  37	unsigned int ageing_time = info->ageing_time;
  38
  39	if (ds->ageing_time_min && ageing_time < ds->ageing_time_min)
  40		return -ERANGE;
  41
  42	if (ds->ageing_time_max && ageing_time > ds->ageing_time_max)
  43		return -ERANGE;
  44
  45	/* Program the fastest ageing time in case of multiple bridges */
  46	ageing_time = dsa_switch_fastest_ageing_time(ds, ageing_time);
  47
  48	if (ds->ops->set_ageing_time)
  49		return ds->ops->set_ageing_time(ds, ageing_time);
  50
  51	return 0;
  52}
  53
  54static bool dsa_port_mtu_match(struct dsa_port *dp,
  55			       struct dsa_notifier_mtu_info *info)
  56{
  57	return dp == info->dp || dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp);
 
 
 
 
 
 
 
 
 
 
 
 
  58}
  59
  60static int dsa_switch_mtu(struct dsa_switch *ds,
  61			  struct dsa_notifier_mtu_info *info)
  62{
  63	struct dsa_port *dp;
  64	int ret;
  65
  66	if (!ds->ops->port_change_mtu)
  67		return -EOPNOTSUPP;
  68
  69	dsa_switch_for_each_port(dp, ds) {
  70		if (dsa_port_mtu_match(dp, info)) {
  71			ret = ds->ops->port_change_mtu(ds, dp->index,
  72						       info->mtu);
  73			if (ret)
  74				return ret;
  75		}
  76	}
  77
  78	return 0;
  79}
  80
  81static int dsa_switch_bridge_join(struct dsa_switch *ds,
  82				  struct dsa_notifier_bridge_info *info)
  83{
  84	int err;
  85
  86	if (info->dp->ds == ds) {
  87		if (!ds->ops->port_bridge_join)
  88			return -EOPNOTSUPP;
  89
  90		err = ds->ops->port_bridge_join(ds, info->dp->index,
  91						info->bridge,
  92						&info->tx_fwd_offload,
  93						info->extack);
  94		if (err)
  95			return err;
  96	}
  97
  98	if (info->dp->ds != ds && ds->ops->crosschip_bridge_join) {
  99		err = ds->ops->crosschip_bridge_join(ds,
 100						     info->dp->ds->dst->index,
 101						     info->dp->ds->index,
 102						     info->dp->index,
 103						     info->bridge,
 104						     info->extack);
 105		if (err)
 106			return err;
 107	}
 108
 109	return 0;
 110}
 111
 112static int dsa_switch_bridge_leave(struct dsa_switch *ds,
 113				   struct dsa_notifier_bridge_info *info)
 114{
 115	if (info->dp->ds == ds && ds->ops->port_bridge_leave)
 116		ds->ops->port_bridge_leave(ds, info->dp->index, info->bridge);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 117
 118	if (info->dp->ds != ds && ds->ops->crosschip_bridge_leave)
 119		ds->ops->crosschip_bridge_leave(ds, info->dp->ds->dst->index,
 120						info->dp->ds->index,
 121						info->dp->index,
 122						info->bridge);
 123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 124	return 0;
 125}
 126
 127/* Matches for all upstream-facing ports (the CPU port and all upstream-facing
 128 * DSA links) that sit between the targeted port on which the notifier was
 129 * emitted and its dedicated CPU port.
 130 */
 131static bool dsa_port_host_address_match(struct dsa_port *dp,
 132					const struct dsa_port *targeted_dp)
 133{
 134	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
 
 135
 136	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
 137		return dp->index == dsa_towards_port(dp->ds, cpu_dp->ds->index,
 138						     cpu_dp->index);
 
 
 
 
 139
 140	return false;
 141}
 142
 143static struct dsa_mac_addr *dsa_mac_addr_find(struct list_head *addr_list,
 144					      const unsigned char *addr, u16 vid,
 145					      struct dsa_db db)
 146{
 147	struct dsa_mac_addr *a;
 148
 149	list_for_each_entry(a, addr_list, list)
 150		if (ether_addr_equal(a->addr, addr) && a->vid == vid &&
 151		    dsa_db_equal(&a->db, &db))
 152			return a;
 153
 154	return NULL;
 155}
 156
 157static int dsa_port_do_mdb_add(struct dsa_port *dp,
 158			       const struct switchdev_obj_port_mdb *mdb,
 159			       struct dsa_db db)
 160{
 161	struct dsa_switch *ds = dp->ds;
 162	struct dsa_mac_addr *a;
 163	int port = dp->index;
 164	int err = 0;
 165
 166	/* No need to bother with refcounting for user ports */
 167	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
 168		return ds->ops->port_mdb_add(ds, port, mdb, db);
 169
 170	mutex_lock(&dp->addr_lists_lock);
 171
 172	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
 173	if (a) {
 174		refcount_inc(&a->refcount);
 175		goto out;
 176	}
 177
 178	a = kzalloc(sizeof(*a), GFP_KERNEL);
 179	if (!a) {
 180		err = -ENOMEM;
 181		goto out;
 182	}
 183
 184	err = ds->ops->port_mdb_add(ds, port, mdb, db);
 185	if (err) {
 186		kfree(a);
 187		goto out;
 188	}
 189
 190	ether_addr_copy(a->addr, mdb->addr);
 191	a->vid = mdb->vid;
 192	a->db = db;
 193	refcount_set(&a->refcount, 1);
 194	list_add_tail(&a->list, &dp->mdbs);
 195
 196out:
 197	mutex_unlock(&dp->addr_lists_lock);
 198
 199	return err;
 200}
 201
 202static int dsa_port_do_mdb_del(struct dsa_port *dp,
 203			       const struct switchdev_obj_port_mdb *mdb,
 204			       struct dsa_db db)
 205{
 206	struct dsa_switch *ds = dp->ds;
 207	struct dsa_mac_addr *a;
 208	int port = dp->index;
 209	int err = 0;
 210
 211	/* No need to bother with refcounting for user ports */
 212	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
 213		return ds->ops->port_mdb_del(ds, port, mdb, db);
 214
 215	mutex_lock(&dp->addr_lists_lock);
 216
 217	a = dsa_mac_addr_find(&dp->mdbs, mdb->addr, mdb->vid, db);
 218	if (!a) {
 219		err = -ENOENT;
 220		goto out;
 221	}
 222
 223	if (!refcount_dec_and_test(&a->refcount))
 224		goto out;
 225
 226	err = ds->ops->port_mdb_del(ds, port, mdb, db);
 227	if (err) {
 228		refcount_set(&a->refcount, 1);
 229		goto out;
 230	}
 231
 232	list_del(&a->list);
 233	kfree(a);
 234
 235out:
 236	mutex_unlock(&dp->addr_lists_lock);
 237
 238	return err;
 239}
 240
 241static int dsa_port_do_fdb_add(struct dsa_port *dp, const unsigned char *addr,
 242			       u16 vid, struct dsa_db db)
 243{
 244	struct dsa_switch *ds = dp->ds;
 245	struct dsa_mac_addr *a;
 246	int port = dp->index;
 247	int err = 0;
 248
 249	/* No need to bother with refcounting for user ports */
 250	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
 251		return ds->ops->port_fdb_add(ds, port, addr, vid, db);
 252
 253	mutex_lock(&dp->addr_lists_lock);
 254
 255	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
 256	if (a) {
 257		refcount_inc(&a->refcount);
 258		goto out;
 259	}
 260
 261	a = kzalloc(sizeof(*a), GFP_KERNEL);
 262	if (!a) {
 263		err = -ENOMEM;
 264		goto out;
 265	}
 266
 267	err = ds->ops->port_fdb_add(ds, port, addr, vid, db);
 268	if (err) {
 269		kfree(a);
 270		goto out;
 271	}
 272
 273	ether_addr_copy(a->addr, addr);
 274	a->vid = vid;
 275	a->db = db;
 276	refcount_set(&a->refcount, 1);
 277	list_add_tail(&a->list, &dp->fdbs);
 278
 279out:
 280	mutex_unlock(&dp->addr_lists_lock);
 281
 282	return err;
 283}
 284
 285static int dsa_port_do_fdb_del(struct dsa_port *dp, const unsigned char *addr,
 286			       u16 vid, struct dsa_db db)
 287{
 288	struct dsa_switch *ds = dp->ds;
 289	struct dsa_mac_addr *a;
 290	int port = dp->index;
 291	int err = 0;
 292
 293	/* No need to bother with refcounting for user ports */
 294	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
 295		return ds->ops->port_fdb_del(ds, port, addr, vid, db);
 296
 297	mutex_lock(&dp->addr_lists_lock);
 298
 299	a = dsa_mac_addr_find(&dp->fdbs, addr, vid, db);
 300	if (!a) {
 301		err = -ENOENT;
 302		goto out;
 303	}
 304
 305	if (!refcount_dec_and_test(&a->refcount))
 306		goto out;
 307
 308	err = ds->ops->port_fdb_del(ds, port, addr, vid, db);
 309	if (err) {
 310		refcount_set(&a->refcount, 1);
 311		goto out;
 312	}
 313
 314	list_del(&a->list);
 315	kfree(a);
 316
 317out:
 318	mutex_unlock(&dp->addr_lists_lock);
 319
 320	return err;
 321}
 322
 323static int dsa_switch_do_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag *lag,
 324				     const unsigned char *addr, u16 vid,
 325				     struct dsa_db db)
 326{
 327	struct dsa_mac_addr *a;
 328	int err = 0;
 329
 330	mutex_lock(&lag->fdb_lock);
 331
 332	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
 333	if (a) {
 334		refcount_inc(&a->refcount);
 335		goto out;
 336	}
 337
 338	a = kzalloc(sizeof(*a), GFP_KERNEL);
 339	if (!a) {
 340		err = -ENOMEM;
 341		goto out;
 342	}
 343
 344	err = ds->ops->lag_fdb_add(ds, *lag, addr, vid, db);
 345	if (err) {
 346		kfree(a);
 347		goto out;
 348	}
 349
 350	ether_addr_copy(a->addr, addr);
 351	a->vid = vid;
 352	a->db = db;
 353	refcount_set(&a->refcount, 1);
 354	list_add_tail(&a->list, &lag->fdbs);
 355
 356out:
 357	mutex_unlock(&lag->fdb_lock);
 358
 359	return err;
 360}
 361
 362static int dsa_switch_do_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag *lag,
 363				     const unsigned char *addr, u16 vid,
 364				     struct dsa_db db)
 365{
 366	struct dsa_mac_addr *a;
 367	int err = 0;
 368
 369	mutex_lock(&lag->fdb_lock);
 370
 371	a = dsa_mac_addr_find(&lag->fdbs, addr, vid, db);
 372	if (!a) {
 373		err = -ENOENT;
 374		goto out;
 375	}
 376
 377	if (!refcount_dec_and_test(&a->refcount))
 378		goto out;
 379
 380	err = ds->ops->lag_fdb_del(ds, *lag, addr, vid, db);
 381	if (err) {
 382		refcount_set(&a->refcount, 1);
 383		goto out;
 384	}
 385
 386	list_del(&a->list);
 387	kfree(a);
 388
 389out:
 390	mutex_unlock(&lag->fdb_lock);
 391
 392	return err;
 393}
 394
 395static int dsa_switch_host_fdb_add(struct dsa_switch *ds,
 396				   struct dsa_notifier_fdb_info *info)
 397{
 398	struct dsa_port *dp;
 399	int err = 0;
 
 400
 401	if (!ds->ops->port_fdb_add)
 402		return -EOPNOTSUPP;
 403
 404	dsa_switch_for_each_port(dp, ds) {
 405		if (dsa_port_host_address_match(dp, info->dp)) {
 406			if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
 407				err = dsa_switch_do_lag_fdb_add(ds, dp->lag,
 408								info->addr,
 409								info->vid,
 410								info->db);
 411			} else {
 412				err = dsa_port_do_fdb_add(dp, info->addr,
 413							  info->vid, info->db);
 414			}
 415			if (err)
 416				break;
 417		}
 418	}
 419
 420	return err;
 421}
 422
 423static int dsa_switch_host_fdb_del(struct dsa_switch *ds,
 424				   struct dsa_notifier_fdb_info *info)
 425{
 426	struct dsa_port *dp;
 427	int err = 0;
 
 428
 429	if (!ds->ops->port_fdb_del)
 430		return -EOPNOTSUPP;
 431
 432	dsa_switch_for_each_port(dp, ds) {
 433		if (dsa_port_host_address_match(dp, info->dp)) {
 434			if (dsa_port_is_cpu(dp) && info->dp->cpu_port_in_lag) {
 435				err = dsa_switch_do_lag_fdb_del(ds, dp->lag,
 436								info->addr,
 437								info->vid,
 438								info->db);
 439			} else {
 440				err = dsa_port_do_fdb_del(dp, info->addr,
 441							  info->vid, info->db);
 442			}
 443			if (err)
 444				break;
 445		}
 446	}
 447
 448	return err;
 449}
 450
 451static int dsa_switch_fdb_add(struct dsa_switch *ds,
 452			      struct dsa_notifier_fdb_info *info)
 453{
 454	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 455	struct dsa_port *dp = dsa_to_port(ds, port);
 456
 457	if (!ds->ops->port_fdb_add)
 458		return -EOPNOTSUPP;
 459
 460	return dsa_port_do_fdb_add(dp, info->addr, info->vid, info->db);
 461}
 462
 463static int dsa_switch_fdb_del(struct dsa_switch *ds,
 464			      struct dsa_notifier_fdb_info *info)
 465{
 466	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 467	struct dsa_port *dp = dsa_to_port(ds, port);
 468
 469	if (!ds->ops->port_fdb_del)
 470		return -EOPNOTSUPP;
 471
 472	return dsa_port_do_fdb_del(dp, info->addr, info->vid, info->db);
 473}
 474
 475static int dsa_switch_lag_fdb_add(struct dsa_switch *ds,
 476				  struct dsa_notifier_lag_fdb_info *info)
 477{
 478	struct dsa_port *dp;
 
 479
 480	if (!ds->ops->lag_fdb_add)
 481		return -EOPNOTSUPP;
 482
 483	/* Notify switch only if it has a port in this LAG */
 484	dsa_switch_for_each_port(dp, ds)
 485		if (dsa_port_offloads_lag(dp, info->lag))
 486			return dsa_switch_do_lag_fdb_add(ds, info->lag,
 487							 info->addr, info->vid,
 488							 info->db);
 489
 490	return 0;
 491}
 492
 493static int dsa_switch_lag_fdb_del(struct dsa_switch *ds,
 494				  struct dsa_notifier_lag_fdb_info *info)
 495{
 496	struct dsa_port *dp;
 
 497
 498	if (!ds->ops->lag_fdb_del)
 499		return -EOPNOTSUPP;
 500
 501	/* Notify switch only if it has a port in this LAG */
 502	dsa_switch_for_each_port(dp, ds)
 503		if (dsa_port_offloads_lag(dp, info->lag))
 504			return dsa_switch_do_lag_fdb_del(ds, info->lag,
 505							 info->addr, info->vid,
 506							 info->db);
 507
 508	return 0;
 509}
 510
 511static int dsa_switch_lag_change(struct dsa_switch *ds,
 512				 struct dsa_notifier_lag_info *info)
 513{
 514	if (info->dp->ds == ds && ds->ops->port_lag_change)
 515		return ds->ops->port_lag_change(ds, info->dp->index);
 516
 517	if (info->dp->ds != ds && ds->ops->crosschip_lag_change)
 518		return ds->ops->crosschip_lag_change(ds, info->dp->ds->index,
 519						     info->dp->index);
 520
 521	return 0;
 522}
 523
 524static int dsa_switch_lag_join(struct dsa_switch *ds,
 525			       struct dsa_notifier_lag_info *info)
 526{
 527	if (info->dp->ds == ds && ds->ops->port_lag_join)
 528		return ds->ops->port_lag_join(ds, info->dp->index, info->lag,
 529					      info->info, info->extack);
 530
 531	if (info->dp->ds != ds && ds->ops->crosschip_lag_join)
 532		return ds->ops->crosschip_lag_join(ds, info->dp->ds->index,
 533						   info->dp->index, info->lag,
 534						   info->info, info->extack);
 535
 536	return -EOPNOTSUPP;
 537}
 538
 539static int dsa_switch_lag_leave(struct dsa_switch *ds,
 540				struct dsa_notifier_lag_info *info)
 541{
 542	if (info->dp->ds == ds && ds->ops->port_lag_leave)
 543		return ds->ops->port_lag_leave(ds, info->dp->index, info->lag);
 544
 545	if (info->dp->ds != ds && ds->ops->crosschip_lag_leave)
 546		return ds->ops->crosschip_lag_leave(ds, info->dp->ds->index,
 547						    info->dp->index, info->lag);
 548
 549	return -EOPNOTSUPP;
 550}
 551
 552static int dsa_switch_mdb_add(struct dsa_switch *ds,
 553			      struct dsa_notifier_mdb_info *info)
 554{
 555	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 556	struct dsa_port *dp = dsa_to_port(ds, port);
 557
 558	if (!ds->ops->port_mdb_add)
 559		return -EOPNOTSUPP;
 560
 561	return dsa_port_do_mdb_add(dp, info->mdb, info->db);
 562}
 563
 564static int dsa_switch_mdb_del(struct dsa_switch *ds,
 565			      struct dsa_notifier_mdb_info *info)
 566{
 567	int port = dsa_towards_port(ds, info->dp->ds->index, info->dp->index);
 568	struct dsa_port *dp = dsa_to_port(ds, port);
 569
 570	if (!ds->ops->port_mdb_del)
 571		return -EOPNOTSUPP;
 572
 573	return dsa_port_do_mdb_del(dp, info->mdb, info->db);
 574}
 575
 576static int dsa_switch_host_mdb_add(struct dsa_switch *ds,
 577				   struct dsa_notifier_mdb_info *info)
 578{
 579	struct dsa_port *dp;
 580	int err = 0;
 
 581
 582	if (!ds->ops->port_mdb_add)
 583		return -EOPNOTSUPP;
 584
 585	dsa_switch_for_each_port(dp, ds) {
 586		if (dsa_port_host_address_match(dp, info->dp)) {
 587			err = dsa_port_do_mdb_add(dp, info->mdb, info->db);
 
 588			if (err)
 589				break;
 590		}
 591	}
 592
 593	return err;
 594}
 595
 596static int dsa_switch_host_mdb_del(struct dsa_switch *ds,
 597				   struct dsa_notifier_mdb_info *info)
 598{
 599	struct dsa_port *dp;
 600	int err = 0;
 
 601
 602	if (!ds->ops->port_mdb_del)
 603		return -EOPNOTSUPP;
 604
 605	dsa_switch_for_each_port(dp, ds) {
 606		if (dsa_port_host_address_match(dp, info->dp)) {
 607			err = dsa_port_do_mdb_del(dp, info->mdb, info->db);
 
 608			if (err)
 609				break;
 610		}
 611	}
 612
 613	return err;
 614}
 615
 616/* Port VLANs match on the targeted port and on all DSA ports */
 617static bool dsa_port_vlan_match(struct dsa_port *dp,
 618				struct dsa_notifier_vlan_info *info)
 619{
 620	return dsa_port_is_dsa(dp) || dp == info->dp;
 621}
 622
 623/* Host VLANs match on the targeted port's CPU port, and on all DSA ports
 624 * (upstream and downstream) of that switch and its upstream switches.
 625 */
 626static bool dsa_port_host_vlan_match(struct dsa_port *dp,
 627				     const struct dsa_port *targeted_dp)
 628{
 629	struct dsa_port *cpu_dp = targeted_dp->cpu_dp;
 
 630
 631	if (dsa_switch_is_upstream_of(dp->ds, targeted_dp->ds))
 632		return dsa_port_is_dsa(dp) || dp == cpu_dp;
 633
 634	return false;
 635}
 636
 637static struct dsa_vlan *dsa_vlan_find(struct list_head *vlan_list,
 638				      const struct switchdev_obj_port_vlan *vlan)
 639{
 640	struct dsa_vlan *v;
 641
 642	list_for_each_entry(v, vlan_list, list)
 643		if (v->vid == vlan->vid)
 644			return v;
 645
 646	return NULL;
 647}
 648
 649static int dsa_port_do_vlan_add(struct dsa_port *dp,
 650				const struct switchdev_obj_port_vlan *vlan,
 651				struct netlink_ext_ack *extack)
 652{
 653	struct dsa_switch *ds = dp->ds;
 654	int port = dp->index;
 655	struct dsa_vlan *v;
 656	int err = 0;
 657
 658	/* No need to bother with refcounting for user ports. */
 659	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
 660		return ds->ops->port_vlan_add(ds, port, vlan, extack);
 661
 662	/* No need to propagate on shared ports the existing VLANs that were
 663	 * re-notified after just the flags have changed. This would cause a
 664	 * refcount bump which we need to avoid, since it unbalances the
 665	 * additions with the deletions.
 666	 */
 667	if (vlan->changed)
 668		return 0;
 669
 670	mutex_lock(&dp->vlans_lock);
 671
 672	v = dsa_vlan_find(&dp->vlans, vlan);
 673	if (v) {
 674		refcount_inc(&v->refcount);
 675		goto out;
 676	}
 677
 678	v = kzalloc(sizeof(*v), GFP_KERNEL);
 679	if (!v) {
 680		err = -ENOMEM;
 681		goto out;
 682	}
 683
 684	err = ds->ops->port_vlan_add(ds, port, vlan, extack);
 685	if (err) {
 686		kfree(v);
 687		goto out;
 688	}
 689
 690	v->vid = vlan->vid;
 691	refcount_set(&v->refcount, 1);
 692	list_add_tail(&v->list, &dp->vlans);
 693
 694out:
 695	mutex_unlock(&dp->vlans_lock);
 696
 697	return err;
 698}
 699
 700static int dsa_port_do_vlan_del(struct dsa_port *dp,
 701				const struct switchdev_obj_port_vlan *vlan)
 702{
 703	struct dsa_switch *ds = dp->ds;
 704	int port = dp->index;
 705	struct dsa_vlan *v;
 706	int err = 0;
 707
 708	/* No need to bother with refcounting for user ports */
 709	if (!(dsa_port_is_cpu(dp) || dsa_port_is_dsa(dp)))
 710		return ds->ops->port_vlan_del(ds, port, vlan);
 711
 712	mutex_lock(&dp->vlans_lock);
 713
 714	v = dsa_vlan_find(&dp->vlans, vlan);
 715	if (!v) {
 716		err = -ENOENT;
 717		goto out;
 718	}
 719
 720	if (!refcount_dec_and_test(&v->refcount))
 721		goto out;
 722
 723	err = ds->ops->port_vlan_del(ds, port, vlan);
 724	if (err) {
 725		refcount_set(&v->refcount, 1);
 726		goto out;
 727	}
 728
 729	list_del(&v->list);
 730	kfree(v);
 731
 732out:
 733	mutex_unlock(&dp->vlans_lock);
 734
 735	return err;
 736}
 737
 738static int dsa_switch_vlan_add(struct dsa_switch *ds,
 739			       struct dsa_notifier_vlan_info *info)
 740{
 741	struct dsa_port *dp;
 742	int err;
 743
 744	if (!ds->ops->port_vlan_add)
 745		return -EOPNOTSUPP;
 746
 747	dsa_switch_for_each_port(dp, ds) {
 748		if (dsa_port_vlan_match(dp, info)) {
 749			err = dsa_port_do_vlan_add(dp, info->vlan,
 750						   info->extack);
 751			if (err)
 752				return err;
 753		}
 754	}
 755
 756	return 0;
 757}
 758
 759static int dsa_switch_vlan_del(struct dsa_switch *ds,
 760			       struct dsa_notifier_vlan_info *info)
 761{
 762	struct dsa_port *dp;
 763	int err;
 764
 765	if (!ds->ops->port_vlan_del)
 766		return -EOPNOTSUPP;
 767
 768	dsa_switch_for_each_port(dp, ds) {
 769		if (dsa_port_vlan_match(dp, info)) {
 770			err = dsa_port_do_vlan_del(dp, info->vlan);
 771			if (err)
 772				return err;
 773		}
 774	}
 775
 776	return 0;
 777}
 778
 779static int dsa_switch_host_vlan_add(struct dsa_switch *ds,
 780				    struct dsa_notifier_vlan_info *info)
 781{
 782	struct dsa_port *dp;
 783	int err;
 784
 785	if (!ds->ops->port_vlan_add)
 786		return -EOPNOTSUPP;
 787
 788	dsa_switch_for_each_port(dp, ds) {
 789		if (dsa_port_host_vlan_match(dp, info->dp)) {
 790			err = dsa_port_do_vlan_add(dp, info->vlan,
 791						   info->extack);
 792			if (err)
 793				return err;
 794		}
 795	}
 796
 797	return 0;
 798}
 799
 800static int dsa_switch_host_vlan_del(struct dsa_switch *ds,
 801				    struct dsa_notifier_vlan_info *info)
 802{
 803	struct dsa_port *dp;
 804	int err;
 805
 806	if (!ds->ops->port_vlan_del)
 807		return -EOPNOTSUPP;
 808
 809	dsa_switch_for_each_port(dp, ds) {
 810		if (dsa_port_host_vlan_match(dp, info->dp)) {
 811			err = dsa_port_do_vlan_del(dp, info->vlan);
 812			if (err)
 813				return err;
 814		}
 815	}
 816
 
 
 
 817	return 0;
 818}
 819
 820static int dsa_switch_change_tag_proto(struct dsa_switch *ds,
 821				       struct dsa_notifier_tag_proto_info *info)
 822{
 823	const struct dsa_device_ops *tag_ops = info->tag_ops;
 824	struct dsa_port *dp, *cpu_dp;
 825	int err;
 826
 827	if (!ds->ops->change_tag_protocol)
 828		return -EOPNOTSUPP;
 829
 830	ASSERT_RTNL();
 831
 832	err = ds->ops->change_tag_protocol(ds, tag_ops->proto);
 833	if (err)
 834		return err;
 
 
 
 
 835
 836	dsa_switch_for_each_cpu_port(cpu_dp, ds)
 837		dsa_port_set_tag_protocol(cpu_dp, tag_ops);
 838
 839	/* Now that changing the tag protocol can no longer fail, let's update
 840	 * the remaining bits which are "duplicated for faster access", and the
 841	 * bits that depend on the tagger, such as the MTU.
 842	 */
 843	dsa_switch_for_each_user_port(dp, ds) {
 844		struct net_device *slave = dp->slave;
 
 845
 846		dsa_slave_setup_tagger(slave);
 
 847
 848		/* rtnl_mutex is held in dsa_tree_change_tag_proto */
 849		dsa_slave_change_mtu(slave, slave->mtu);
 
 850	}
 851
 852	return 0;
 853}
 854
 855/* We use the same cross-chip notifiers to inform both the tagger side, as well
 856 * as the switch side, of connection and disconnection events.
 857 * Since ds->tagger_data is owned by the tagger, it isn't a hard error if the
 858 * switch side doesn't support connecting to this tagger, and therefore, the
 859 * fact that we don't disconnect the tagger side doesn't constitute a memory
 860 * leak: the tagger will still operate with persistent per-switch memory, just
 861 * with the switch side unconnected to it. What does constitute a hard error is
 862 * when the switch side supports connecting but fails.
 863 */
 864static int
 865dsa_switch_connect_tag_proto(struct dsa_switch *ds,
 866			     struct dsa_notifier_tag_proto_info *info)
 867{
 868	const struct dsa_device_ops *tag_ops = info->tag_ops;
 869	int err;
 
 
 
 870
 871	/* Notify the new tagger about the connection to this switch */
 872	if (tag_ops->connect) {
 873		err = tag_ops->connect(ds);
 874		if (err)
 875			return err;
 876	}
 877
 878	if (!ds->ops->connect_tag_protocol)
 
 
 
 879		return -EOPNOTSUPP;
 880
 881	/* Notify the switch about the connection to the new tagger */
 882	err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
 883	if (err) {
 884		/* Revert the new tagger's connection to this tree */
 885		if (tag_ops->disconnect)
 886			tag_ops->disconnect(ds);
 887		return err;
 888	}
 889
 890	return 0;
 891}
 892
 893static int
 894dsa_switch_disconnect_tag_proto(struct dsa_switch *ds,
 895				struct dsa_notifier_tag_proto_info *info)
 896{
 897	const struct dsa_device_ops *tag_ops = info->tag_ops;
 
 898
 899	/* Notify the tagger about the disconnection from this switch */
 900	if (tag_ops->disconnect && ds->tagger_data)
 901		tag_ops->disconnect(ds);
 902
 903	/* No need to notify the switch, since it shouldn't have any
 904	 * resources to tear down
 905	 */
 906	return 0;
 907}
 908
 909static int
 910dsa_switch_master_state_change(struct dsa_switch *ds,
 911			       struct dsa_notifier_master_state_info *info)
 912{
 913	if (!ds->ops->master_state_change)
 914		return 0;
 915
 916	ds->ops->master_state_change(ds, info->master, info->operational);
 
 
 917
 918	return 0;
 919}
 920
 921static int dsa_switch_event(struct notifier_block *nb,
 922			    unsigned long event, void *info)
 923{
 924	struct dsa_switch *ds = container_of(nb, struct dsa_switch, nb);
 925	int err;
 926
 927	switch (event) {
 928	case DSA_NOTIFIER_AGEING_TIME:
 929		err = dsa_switch_ageing_time(ds, info);
 930		break;
 931	case DSA_NOTIFIER_BRIDGE_JOIN:
 932		err = dsa_switch_bridge_join(ds, info);
 933		break;
 934	case DSA_NOTIFIER_BRIDGE_LEAVE:
 935		err = dsa_switch_bridge_leave(ds, info);
 936		break;
 937	case DSA_NOTIFIER_FDB_ADD:
 938		err = dsa_switch_fdb_add(ds, info);
 939		break;
 940	case DSA_NOTIFIER_FDB_DEL:
 941		err = dsa_switch_fdb_del(ds, info);
 942		break;
 943	case DSA_NOTIFIER_HOST_FDB_ADD:
 944		err = dsa_switch_host_fdb_add(ds, info);
 945		break;
 946	case DSA_NOTIFIER_HOST_FDB_DEL:
 947		err = dsa_switch_host_fdb_del(ds, info);
 948		break;
 949	case DSA_NOTIFIER_LAG_FDB_ADD:
 950		err = dsa_switch_lag_fdb_add(ds, info);
 951		break;
 952	case DSA_NOTIFIER_LAG_FDB_DEL:
 953		err = dsa_switch_lag_fdb_del(ds, info);
 954		break;
 955	case DSA_NOTIFIER_LAG_CHANGE:
 956		err = dsa_switch_lag_change(ds, info);
 957		break;
 958	case DSA_NOTIFIER_LAG_JOIN:
 959		err = dsa_switch_lag_join(ds, info);
 960		break;
 961	case DSA_NOTIFIER_LAG_LEAVE:
 962		err = dsa_switch_lag_leave(ds, info);
 963		break;
 964	case DSA_NOTIFIER_MDB_ADD:
 965		err = dsa_switch_mdb_add(ds, info);
 966		break;
 967	case DSA_NOTIFIER_MDB_DEL:
 968		err = dsa_switch_mdb_del(ds, info);
 969		break;
 970	case DSA_NOTIFIER_HOST_MDB_ADD:
 971		err = dsa_switch_host_mdb_add(ds, info);
 972		break;
 973	case DSA_NOTIFIER_HOST_MDB_DEL:
 974		err = dsa_switch_host_mdb_del(ds, info);
 975		break;
 976	case DSA_NOTIFIER_VLAN_ADD:
 977		err = dsa_switch_vlan_add(ds, info);
 978		break;
 979	case DSA_NOTIFIER_VLAN_DEL:
 980		err = dsa_switch_vlan_del(ds, info);
 981		break;
 982	case DSA_NOTIFIER_HOST_VLAN_ADD:
 983		err = dsa_switch_host_vlan_add(ds, info);
 984		break;
 985	case DSA_NOTIFIER_HOST_VLAN_DEL:
 986		err = dsa_switch_host_vlan_del(ds, info);
 987		break;
 988	case DSA_NOTIFIER_MTU:
 989		err = dsa_switch_mtu(ds, info);
 990		break;
 991	case DSA_NOTIFIER_TAG_PROTO:
 992		err = dsa_switch_change_tag_proto(ds, info);
 993		break;
 994	case DSA_NOTIFIER_TAG_PROTO_CONNECT:
 995		err = dsa_switch_connect_tag_proto(ds, info);
 996		break;
 997	case DSA_NOTIFIER_TAG_PROTO_DISCONNECT:
 998		err = dsa_switch_disconnect_tag_proto(ds, info);
 999		break;
1000	case DSA_NOTIFIER_TAG_8021Q_VLAN_ADD:
1001		err = dsa_switch_tag_8021q_vlan_add(ds, info);
1002		break;
1003	case DSA_NOTIFIER_TAG_8021Q_VLAN_DEL:
1004		err = dsa_switch_tag_8021q_vlan_del(ds, info);
1005		break;
1006	case DSA_NOTIFIER_MASTER_STATE_CHANGE:
1007		err = dsa_switch_master_state_change(ds, info);
1008		break;
1009	default:
1010		err = -EOPNOTSUPP;
1011		break;
1012	}
1013
1014	if (err)
1015		dev_dbg(ds->dev, "breaking chain for DSA event %lu (%d)\n",
1016			event, err);
1017
1018	return notifier_from_errno(err);
1019}
1020
1021/**
1022 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
1023 * @dst: collection of struct dsa_switch devices to notify.
1024 * @e: event, must be of type DSA_NOTIFIER_*
1025 * @v: event-specific value.
1026 *
1027 * Given a struct dsa_switch_tree, this can be used to run a function once for
1028 * each member DSA switch. The other alternative of traversing the tree is only
1029 * through its ports list, which does not uniquely list the switches.
1030 */
1031int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
1032{
1033	struct raw_notifier_head *nh = &dst->nh;
1034	int err;
1035
1036	err = raw_notifier_call_chain(nh, e, v);
1037
1038	return notifier_to_errno(err);
1039}
1040
1041/**
1042 * dsa_broadcast - Notify all DSA trees in the system.
1043 * @e: event, must be of type DSA_NOTIFIER_*
1044 * @v: event-specific value.
1045 *
1046 * Can be used to notify the switching fabric of events such as cross-chip
1047 * bridging between disjoint trees (such as islands of tagger-compatible
1048 * switches bridged by an incompatible middle switch).
1049 *
1050 * WARNING: this function is not reliable during probe time, because probing
1051 * between trees is asynchronous and not all DSA trees might have probed.
1052 */
1053int dsa_broadcast(unsigned long e, void *v)
1054{
1055	struct dsa_switch_tree *dst;
1056	int err = 0;
1057
1058	list_for_each_entry(dst, &dsa_tree_list, list) {
1059		err = dsa_tree_notify(dst, e, v);
1060		if (err)
1061			break;
1062	}
1063
1064	return err;
1065}
1066
1067int dsa_switch_register_notifier(struct dsa_switch *ds)
1068{
1069	ds->nb.notifier_call = dsa_switch_event;
1070
1071	return raw_notifier_chain_register(&ds->dst->nh, &ds->nb);
1072}
1073
1074void dsa_switch_unregister_notifier(struct dsa_switch *ds)
1075{
1076	int err;
1077
1078	err = raw_notifier_chain_unregister(&ds->dst->nh, &ds->nb);
1079	if (err)
1080		dev_err(ds->dev, "failed to unregister notifier (%d)\n", err);
1081}