Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0+
  2/* Microchip Sparx5 Switch driver
  3 *
  4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
  5 */
  6
  7#include <linux/if_bridge.h>
  8#include <net/switchdev.h>
  9
 10#include "sparx5_main_regs.h"
 11#include "sparx5_main.h"
 12
 13static struct workqueue_struct *sparx5_owq;
 14
 15struct sparx5_switchdev_event_work {
 16	struct work_struct work;
 17	struct switchdev_notifier_fdb_info fdb_info;
 18	struct net_device *dev;
 19	struct sparx5 *sparx5;
 20	unsigned long event;
 21};
 22
 23static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
 24					     struct switchdev_brport_flags flags)
 25{
 26	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
 27		return -EINVAL;
 28
 29	return 0;
 30}
 31
 32static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag)
 33{
 34	bool should_flood = flood_flag || port->is_mrouter;
 35	int pgid;
 36
 37	for (pgid = PGID_IPV4_MC_DATA; pgid <= PGID_IPV6_MC_CTRL; pgid++)
 38		sparx5_pgid_update_mask(port, pgid, should_flood);
 39}
 40
 41static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
 42					  struct switchdev_brport_flags flags)
 43{
 44	if (flags.mask & BR_MCAST_FLOOD) {
 45		sparx5_pgid_update_mask(port, PGID_MC_FLOOD, !!(flags.val & BR_MCAST_FLOOD));
 46		sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD));
 47	}
 48
 49	if (flags.mask & BR_FLOOD)
 50		sparx5_pgid_update_mask(port, PGID_UC_FLOOD, !!(flags.val & BR_FLOOD));
 51	if (flags.mask & BR_BCAST_FLOOD)
 52		sparx5_pgid_update_mask(port, PGID_BCAST, !!(flags.val & BR_BCAST_FLOOD));
 53}
 54
 55static void sparx5_attr_stp_state_set(struct sparx5_port *port,
 56				      u8 state)
 57{
 58	struct sparx5 *sparx5 = port->sparx5;
 59
 60	if (!test_bit(port->portno, sparx5->bridge_mask)) {
 61		netdev_err(port->ndev,
 62			   "Controlling non-bridged port %d?\n", port->portno);
 63		return;
 64	}
 65
 66	switch (state) {
 67	case BR_STATE_FORWARDING:
 68		set_bit(port->portno, sparx5->bridge_fwd_mask);
 69		fallthrough;
 70	case BR_STATE_LEARNING:
 71		set_bit(port->portno, sparx5->bridge_lrn_mask);
 72		break;
 73
 74	default:
 75		/* All other states treated as blocking */
 76		clear_bit(port->portno, sparx5->bridge_fwd_mask);
 77		clear_bit(port->portno, sparx5->bridge_lrn_mask);
 78		break;
 79	}
 80
 81	/* apply the bridge_fwd_mask to all the ports */
 82	sparx5_update_fwd(sparx5);
 83}
 84
 85static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
 86					unsigned long ageing_clock_t)
 87{
 88	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
 89	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
 90
 91	sparx5_set_ageing(port->sparx5, ageing_time);
 92}
 93
 94static void sparx5_port_attr_mrouter_set(struct sparx5_port *port,
 95					 struct net_device *orig_dev,
 96					 bool enable)
 97{
 98	struct sparx5 *sparx5 = port->sparx5;
 99	struct sparx5_mdb_entry *e;
100	bool flood_flag;
101
102	if ((enable && port->is_mrouter) || (!enable && !port->is_mrouter))
103		return;
104
105	/* Add/del mrouter port on all active mdb entries in HW.
106	 * Don't change entry port mask, since that represents
107	 * ports that actually joined that group.
108	 */
109	mutex_lock(&sparx5->mdb_lock);
110	list_for_each_entry(e, &sparx5->mdb_entries, list) {
111		if (!test_bit(port->portno, e->port_mask) &&
112		    ether_addr_is_ip_mcast(e->addr))
113			sparx5_pgid_update_mask(port, e->pgid_idx, enable);
114	}
115	mutex_unlock(&sparx5->mdb_lock);
116
117	/* Enable/disable flooding depending on if port is mrouter port
118	 * or if mcast flood is enabled.
119	 */
120	port->is_mrouter = enable;
121	flood_flag = br_port_flag_is_set(port->ndev, BR_MCAST_FLOOD);
122	sparx5_port_update_mcast_ip_flood(port, flood_flag);
123}
124
125static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
126				const struct switchdev_attr *attr,
127				struct netlink_ext_ack *extack)
128{
129	struct sparx5_port *port = netdev_priv(dev);
130
131	switch (attr->id) {
132	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
133		return sparx5_port_attr_pre_bridge_flags(port,
134							 attr->u.brport_flags);
135	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
136		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
137		break;
138	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
139		sparx5_attr_stp_state_set(port, attr->u.stp_state);
140		break;
141	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
142		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
143		break;
144	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
145		/* Used PVID 1 when default_pvid is 0, to avoid
146		 * collision with non-bridged ports.
147		 */
148		if (port->pvid == 0)
149			port->pvid = 1;
150		port->vlan_aware = attr->u.vlan_filtering;
151		sparx5_vlan_port_apply(port->sparx5, port);
152		break;
153	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
154		sparx5_port_attr_mrouter_set(port,
155					     attr->orig_dev,
156					     attr->u.mrouter);
157		break;
158	default:
159		return -EOPNOTSUPP;
160	}
161
162	return 0;
163}
164
165static int sparx5_port_bridge_join(struct sparx5_port *port,
166				   struct net_device *bridge,
167				   struct netlink_ext_ack *extack)
168{
169	struct sparx5 *sparx5 = port->sparx5;
170	struct net_device *ndev = port->ndev;
171	int err;
172
173	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
174		/* First bridged port */
175		sparx5->hw_bridge_dev = bridge;
176	else
177		if (sparx5->hw_bridge_dev != bridge)
178			/* This is adding the port to a second bridge, this is
179			 * unsupported
180			 */
181			return -ENODEV;
182
183	set_bit(port->portno, sparx5->bridge_mask);
184
185	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
186					    false, extack);
187	if (err)
188		goto err_switchdev_offload;
189
190	/* Remove standalone port entry */
191	sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
192
193	/* Port enters in bridge mode therefor don't need to copy to CPU
194	 * frames for multicast in case the bridge is not requesting them
195	 */
196	__dev_mc_unsync(ndev, sparx5_mc_unsync);
197
198	return 0;
199
200err_switchdev_offload:
201	clear_bit(port->portno, sparx5->bridge_mask);
202	return err;
203}
204
205static void sparx5_port_bridge_leave(struct sparx5_port *port,
206				     struct net_device *bridge)
207{
208	struct sparx5 *sparx5 = port->sparx5;
209
210	switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
211
212	clear_bit(port->portno, sparx5->bridge_mask);
213	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
214		sparx5->hw_bridge_dev = NULL;
215
216	/* Clear bridge vlan settings before updating the port settings */
217	port->vlan_aware = 0;
218	port->pvid = NULL_VID;
219	port->vid = NULL_VID;
220
221	/* Forward frames to CPU */
222	sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, 0);
223
224	/* Port enters in host more therefore restore mc list */
225	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
226}
227
228static int sparx5_port_changeupper(struct net_device *dev,
229				   struct netdev_notifier_changeupper_info *info)
230{
231	struct sparx5_port *port = netdev_priv(dev);
232	struct netlink_ext_ack *extack;
233	int err = 0;
234
235	extack = netdev_notifier_info_to_extack(&info->info);
236
237	if (netif_is_bridge_master(info->upper_dev)) {
238		if (info->linking)
239			err = sparx5_port_bridge_join(port, info->upper_dev,
240						      extack);
241		else
242			sparx5_port_bridge_leave(port, info->upper_dev);
243
244		sparx5_vlan_port_apply(port->sparx5, port);
245	}
246
247	return err;
248}
249
250static int sparx5_port_add_addr(struct net_device *dev, bool up)
251{
252	struct sparx5_port *port = netdev_priv(dev);
253	struct sparx5 *sparx5 = port->sparx5;
254	u16 vid = port->pvid;
255
256	if (up)
257		sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
258	else
259		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
260
261	return 0;
262}
263
264static int sparx5_netdevice_port_event(struct net_device *dev,
265				       struct notifier_block *nb,
266				       unsigned long event, void *ptr)
267{
268	int err = 0;
269
270	if (!sparx5_netdevice_check(dev))
271		return 0;
272
273	switch (event) {
274	case NETDEV_CHANGEUPPER:
275		err = sparx5_port_changeupper(dev, ptr);
276		break;
277	case NETDEV_PRE_UP:
278		err = sparx5_port_add_addr(dev, true);
279		break;
280	case NETDEV_DOWN:
281		err = sparx5_port_add_addr(dev, false);
282		break;
283	}
284
285	return err;
286}
287
288static int sparx5_netdevice_event(struct notifier_block *nb,
289				  unsigned long event, void *ptr)
290{
291	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
292	int ret = 0;
293
294	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
295
296	return notifier_from_errno(ret);
297}
298
299static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
300{
301	struct sparx5_switchdev_event_work *switchdev_work =
302		container_of(work, struct sparx5_switchdev_event_work, work);
303	struct net_device *dev = switchdev_work->dev;
304	struct switchdev_notifier_fdb_info *fdb_info;
305	struct sparx5_port *port;
306	struct sparx5 *sparx5;
307	bool host_addr;
308	u16 vid;
309
310	rtnl_lock();
311	if (!sparx5_netdevice_check(dev)) {
312		host_addr = true;
313		sparx5 = switchdev_work->sparx5;
314	} else {
315		host_addr = false;
316		sparx5 = switchdev_work->sparx5;
317		port = netdev_priv(dev);
318	}
319
320	fdb_info = &switchdev_work->fdb_info;
 
321
322	/* Used PVID 1 when default_pvid is 0, to avoid
323	 * collision with non-bridged ports.
324	 */
325	if (fdb_info->vid == 0)
326		vid = 1;
327	else
328		vid = fdb_info->vid;
329
330	switch (switchdev_work->event) {
331	case SWITCHDEV_FDB_ADD_TO_DEVICE:
332		if (host_addr)
333			sparx5_add_mact_entry(sparx5, dev, PGID_CPU,
334					      fdb_info->addr, vid);
335		else
336			sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
337					      fdb_info->addr, vid);
338		break;
339	case SWITCHDEV_FDB_DEL_TO_DEVICE:
340		sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
 
 
341		break;
342	}
343
 
344	rtnl_unlock();
345	kfree(switchdev_work->fdb_info.addr);
346	kfree(switchdev_work);
347	dev_put(dev);
348}
349
350static void sparx5_schedule_work(struct work_struct *work)
351{
352	queue_work(sparx5_owq, work);
353}
354
355static int sparx5_switchdev_event(struct notifier_block *nb,
356				  unsigned long event, void *ptr)
357{
358	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
359	struct sparx5_switchdev_event_work *switchdev_work;
360	struct switchdev_notifier_fdb_info *fdb_info;
361	struct switchdev_notifier_info *info = ptr;
362	struct sparx5 *spx5;
363	int err;
364
365	spx5 = container_of(nb, struct sparx5, switchdev_nb);
366
367	switch (event) {
368	case SWITCHDEV_PORT_ATTR_SET:
369		err = switchdev_handle_port_attr_set(dev, ptr,
370						     sparx5_netdevice_check,
371						     sparx5_port_attr_set);
372		return notifier_from_errno(err);
373	case SWITCHDEV_FDB_ADD_TO_DEVICE:
374		fallthrough;
375	case SWITCHDEV_FDB_DEL_TO_DEVICE:
376		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
377		if (!switchdev_work)
378			return NOTIFY_BAD;
379
380		switchdev_work->dev = dev;
381		switchdev_work->event = event;
382		switchdev_work->sparx5 = spx5;
383
384		fdb_info = container_of(info,
385					struct switchdev_notifier_fdb_info,
386					info);
387		INIT_WORK(&switchdev_work->work,
388			  sparx5_switchdev_bridge_fdb_event_work);
389		memcpy(&switchdev_work->fdb_info, ptr,
390		       sizeof(switchdev_work->fdb_info));
391		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
392		if (!switchdev_work->fdb_info.addr)
393			goto err_addr_alloc;
394
395		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
396				fdb_info->addr);
397		dev_hold(dev);
398
399		sparx5_schedule_work(&switchdev_work->work);
400		break;
401	}
402
403	return NOTIFY_DONE;
404err_addr_alloc:
405	kfree(switchdev_work);
406	return NOTIFY_BAD;
407}
408
409static int sparx5_handle_port_vlan_add(struct net_device *dev,
410				       struct notifier_block *nb,
411				       const struct switchdev_obj_port_vlan *v)
412{
413	struct sparx5_port *port = netdev_priv(dev);
414
415	if (netif_is_bridge_master(dev)) {
416		struct sparx5 *sparx5 =
417			container_of(nb, struct sparx5,
418				     switchdev_blocking_nb);
419
420		/* Flood broadcast to CPU */
421		sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
422				  v->vid);
423		return 0;
424	}
425
426	if (!sparx5_netdevice_check(dev))
427		return -EOPNOTSUPP;
428
429	return sparx5_vlan_vid_add(port, v->vid,
430				  v->flags & BRIDGE_VLAN_INFO_PVID,
431				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
432}
433
434static int sparx5_alloc_mdb_entry(struct sparx5 *sparx5,
435				  const unsigned char *addr,
436				  u16 vid,
437				  struct sparx5_mdb_entry **entry_out)
438{
439	struct sparx5_mdb_entry *entry;
440	u16 pgid_idx;
441	int err;
442
443	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
444	if (!entry)
445		return -ENOMEM;
446
447	err = sparx5_pgid_alloc_mcast(sparx5, &pgid_idx);
448	if (err) {
449		kfree(entry);
450		return err;
451	}
452
453	memcpy(entry->addr, addr, ETH_ALEN);
454	entry->vid = vid;
455	entry->pgid_idx = pgid_idx;
456
457	mutex_lock(&sparx5->mdb_lock);
458	list_add_tail(&entry->list, &sparx5->mdb_entries);
459	mutex_unlock(&sparx5->mdb_lock);
460
461	*entry_out = entry;
462	return 0;
463}
464
465static void sparx5_free_mdb_entry(struct sparx5 *sparx5,
466				  const unsigned char *addr,
467				  u16 vid)
468{
469	struct sparx5_mdb_entry *entry, *tmp;
470
471	mutex_lock(&sparx5->mdb_lock);
472	list_for_each_entry_safe(entry, tmp, &sparx5->mdb_entries, list) {
473		if ((vid == 0 || entry->vid == vid) &&
474		    ether_addr_equal(addr, entry->addr)) {
475			list_del(&entry->list);
476
477			sparx5_pgid_free(sparx5, entry->pgid_idx);
478			kfree(entry);
479			goto out;
480		}
481	}
482
483out:
484	mutex_unlock(&sparx5->mdb_lock);
485}
486
487static struct sparx5_mdb_entry *sparx5_mdb_get_entry(struct sparx5 *sparx5,
488						     const unsigned char *addr,
489						     u16 vid)
490{
491	struct sparx5_mdb_entry *e, *found = NULL;
492
493	mutex_lock(&sparx5->mdb_lock);
494	list_for_each_entry(e, &sparx5->mdb_entries, list) {
495		if (ether_addr_equal(e->addr, addr) && e->vid == vid) {
496			found = e;
497			goto out;
498		}
 
 
 
499	}
500
501out:
502	mutex_unlock(&sparx5->mdb_lock);
503	return found;
504}
505
506static void sparx5_cpu_copy_ena(struct sparx5 *spx5, u16 pgid, bool enable)
507{
508	spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable),
509		 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
510		 ANA_AC_PGID_MISC_CFG(pgid));
511}
512
513static int sparx5_handle_port_mdb_add(struct net_device *dev,
514				      struct notifier_block *nb,
515				      const struct switchdev_obj_port_mdb *v)
516{
517	struct sparx5_port *port = netdev_priv(dev);
518	struct sparx5 *spx5 = port->sparx5;
519	struct sparx5_mdb_entry *entry;
520	bool is_host, is_new;
521	int err, i;
522	u16 vid;
523
524	if (!sparx5_netdevice_check(dev))
525		return -EOPNOTSUPP;
526
527	is_host = netif_is_bridge_master(v->obj.orig_dev);
528
529	/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
530	 * Fall back to bridge vid 1.
531	 */
532	if (!br_vlan_enabled(spx5->hw_bridge_dev))
533		vid = 1;
534	else
535		vid = v->vid;
536
537	is_new = false;
538	entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
539	if (!entry) {
540		err = sparx5_alloc_mdb_entry(spx5, v->addr, vid, &entry);
541		is_new = true;
542		if (err)
543			return err;
544	}
545
546	mutex_lock(&spx5->mdb_lock);
547
548	/* Add any mrouter ports to the new entry */
549	if (is_new && ether_addr_is_ip_mcast(v->addr))
550		for (i = 0; i < SPX5_PORTS; i++)
551			if (spx5->ports[i] && spx5->ports[i]->is_mrouter)
552				sparx5_pgid_update_mask(spx5->ports[i],
553							entry->pgid_idx,
554							true);
555
556	if (is_host && !entry->cpu_copy) {
557		sparx5_cpu_copy_ena(spx5, entry->pgid_idx, true);
558		entry->cpu_copy = true;
559	} else if (!is_host) {
560		sparx5_pgid_update_mask(port, entry->pgid_idx, true);
561		set_bit(port->portno, entry->port_mask);
562	}
563	mutex_unlock(&spx5->mdb_lock);
564
565	sparx5_mact_learn(spx5, entry->pgid_idx, entry->addr, entry->vid);
566
567	return 0;
568}
569
570static int sparx5_handle_port_mdb_del(struct net_device *dev,
571				      struct notifier_block *nb,
572				      const struct switchdev_obj_port_mdb *v)
573{
574	struct sparx5_port *port = netdev_priv(dev);
575	struct sparx5 *spx5 = port->sparx5;
576	struct sparx5_mdb_entry *entry;
577	bool is_host;
578	u16 vid;
579
580	if (!sparx5_netdevice_check(dev))
581		return -EOPNOTSUPP;
582
583	is_host = netif_is_bridge_master(v->obj.orig_dev);
584
585	if (!br_vlan_enabled(spx5->hw_bridge_dev))
586		vid = 1;
587	else
588		vid = v->vid;
589
590	entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
591	if (!entry)
592		return 0;
593
594	mutex_lock(&spx5->mdb_lock);
595	if (is_host && entry->cpu_copy) {
596		sparx5_cpu_copy_ena(spx5, entry->pgid_idx, false);
597		entry->cpu_copy = false;
598	} else if (!is_host) {
599		clear_bit(port->portno, entry->port_mask);
600
601		/* Port not mrouter port or addr is L2 mcast, remove port from mask. */
602		if (!port->is_mrouter || !ether_addr_is_ip_mcast(v->addr))
603			sparx5_pgid_update_mask(port, entry->pgid_idx, false);
604	}
605	mutex_unlock(&spx5->mdb_lock);
606
607	if (bitmap_empty(entry->port_mask, SPX5_PORTS) && !entry->cpu_copy) {
608		 /* Clear pgid in case mrouter ports exists
609		  * that are not part of the group.
610		  */
611		sparx5_pgid_clear(spx5, entry->pgid_idx);
612		sparx5_mact_forget(spx5, entry->addr, entry->vid);
613		sparx5_free_mdb_entry(spx5, entry->addr, entry->vid);
614	}
615	return 0;
616}
617
618static int sparx5_handle_port_obj_add(struct net_device *dev,
619				      struct notifier_block *nb,
620				      struct switchdev_notifier_port_obj_info *info)
621{
622	const struct switchdev_obj *obj = info->obj;
623	int err;
624
625	switch (obj->id) {
626	case SWITCHDEV_OBJ_ID_PORT_VLAN:
627		err = sparx5_handle_port_vlan_add(dev, nb,
628						  SWITCHDEV_OBJ_PORT_VLAN(obj));
629		break;
630	case SWITCHDEV_OBJ_ID_PORT_MDB:
631	case SWITCHDEV_OBJ_ID_HOST_MDB:
632		err = sparx5_handle_port_mdb_add(dev, nb,
633						 SWITCHDEV_OBJ_PORT_MDB(obj));
634		break;
635	default:
636		err = -EOPNOTSUPP;
637		break;
638	}
639
640	info->handled = true;
641	return err;
642}
643
644static int sparx5_handle_port_vlan_del(struct net_device *dev,
645				       struct notifier_block *nb,
646				       u16 vid)
647{
648	struct sparx5_port *port = netdev_priv(dev);
649	int ret;
650
651	/* Master bridge? */
652	if (netif_is_bridge_master(dev)) {
653		struct sparx5 *sparx5 =
654			container_of(nb, struct sparx5,
655				     switchdev_blocking_nb);
656
657		sparx5_mact_forget(sparx5, dev->broadcast, vid);
658		return 0;
659	}
660
661	if (!sparx5_netdevice_check(dev))
662		return -EOPNOTSUPP;
663
664	ret = sparx5_vlan_vid_del(port, vid);
665	if (ret)
666		return ret;
667
 
 
 
668	return 0;
669}
670
671static int sparx5_handle_port_obj_del(struct net_device *dev,
672				      struct notifier_block *nb,
673				      struct switchdev_notifier_port_obj_info *info)
674{
675	const struct switchdev_obj *obj = info->obj;
676	int err;
677
678	switch (obj->id) {
679	case SWITCHDEV_OBJ_ID_PORT_VLAN:
680		err = sparx5_handle_port_vlan_del(dev, nb,
681						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
682		break;
683	case SWITCHDEV_OBJ_ID_PORT_MDB:
684	case SWITCHDEV_OBJ_ID_HOST_MDB:
685		err = sparx5_handle_port_mdb_del(dev, nb,
686						 SWITCHDEV_OBJ_PORT_MDB(obj));
687		break;
688	default:
689		err = -EOPNOTSUPP;
690		break;
691	}
692
693	info->handled = true;
694	return err;
695}
696
697static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
698					   unsigned long event,
699					   void *ptr)
700{
701	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
702	int err;
703
704	switch (event) {
705	case SWITCHDEV_PORT_OBJ_ADD:
706		err = sparx5_handle_port_obj_add(dev, nb, ptr);
707		return notifier_from_errno(err);
708	case SWITCHDEV_PORT_OBJ_DEL:
709		err = sparx5_handle_port_obj_del(dev, nb, ptr);
710		return notifier_from_errno(err);
711	case SWITCHDEV_PORT_ATTR_SET:
712		err = switchdev_handle_port_attr_set(dev, ptr,
713						     sparx5_netdevice_check,
714						     sparx5_port_attr_set);
715		return notifier_from_errno(err);
716	}
717
718	return NOTIFY_DONE;
719}
720
721int sparx5_register_notifier_blocks(struct sparx5 *s5)
722{
723	int err;
724
725	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
726	err = register_netdevice_notifier(&s5->netdevice_nb);
727	if (err)
728		return err;
729
730	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
731	err = register_switchdev_notifier(&s5->switchdev_nb);
732	if (err)
733		goto err_switchdev_nb;
734
735	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
736	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
737	if (err)
738		goto err_switchdev_blocking_nb;
739
740	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
741	if (!sparx5_owq) {
742		err = -ENOMEM;
743		goto err_switchdev_blocking_nb;
744	}
745
746	return 0;
747
748err_switchdev_blocking_nb:
749	unregister_switchdev_notifier(&s5->switchdev_nb);
750err_switchdev_nb:
751	unregister_netdevice_notifier(&s5->netdevice_nb);
752
753	return err;
754}
755
756void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
757{
758	destroy_workqueue(sparx5_owq);
759
760	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
761	unregister_switchdev_notifier(&s5->switchdev_nb);
762	unregister_netdevice_notifier(&s5->netdevice_nb);
763}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0+
  2/* Microchip Sparx5 Switch driver
  3 *
  4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
  5 */
  6
  7#include <linux/if_bridge.h>
  8#include <net/switchdev.h>
  9
 10#include "sparx5_main_regs.h"
 11#include "sparx5_main.h"
 12
 13static struct workqueue_struct *sparx5_owq;
 14
 15struct sparx5_switchdev_event_work {
 16	struct work_struct work;
 17	struct switchdev_notifier_fdb_info fdb_info;
 18	struct net_device *dev;
 
 19	unsigned long event;
 20};
 21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
 23					  struct switchdev_brport_flags flags)
 24{
 25	if (flags.mask & BR_MCAST_FLOOD)
 26		sparx5_pgid_update_mask(port, PGID_MC_FLOOD, true);
 
 
 
 
 
 
 
 27}
 28
 29static void sparx5_attr_stp_state_set(struct sparx5_port *port,
 30				      u8 state)
 31{
 32	struct sparx5 *sparx5 = port->sparx5;
 33
 34	if (!test_bit(port->portno, sparx5->bridge_mask)) {
 35		netdev_err(port->ndev,
 36			   "Controlling non-bridged port %d?\n", port->portno);
 37		return;
 38	}
 39
 40	switch (state) {
 41	case BR_STATE_FORWARDING:
 42		set_bit(port->portno, sparx5->bridge_fwd_mask);
 43		fallthrough;
 44	case BR_STATE_LEARNING:
 45		set_bit(port->portno, sparx5->bridge_lrn_mask);
 46		break;
 47
 48	default:
 49		/* All other states treated as blocking */
 50		clear_bit(port->portno, sparx5->bridge_fwd_mask);
 51		clear_bit(port->portno, sparx5->bridge_lrn_mask);
 52		break;
 53	}
 54
 55	/* apply the bridge_fwd_mask to all the ports */
 56	sparx5_update_fwd(sparx5);
 57}
 58
 59static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
 60					unsigned long ageing_clock_t)
 61{
 62	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
 63	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
 64
 65	sparx5_set_ageing(port->sparx5, ageing_time);
 66}
 67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 68static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
 69				const struct switchdev_attr *attr,
 70				struct netlink_ext_ack *extack)
 71{
 72	struct sparx5_port *port = netdev_priv(dev);
 73
 74	switch (attr->id) {
 
 
 
 75	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
 76		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
 77		break;
 78	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
 79		sparx5_attr_stp_state_set(port, attr->u.stp_state);
 80		break;
 81	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
 82		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
 83		break;
 84	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
 
 
 
 
 
 85		port->vlan_aware = attr->u.vlan_filtering;
 86		sparx5_vlan_port_apply(port->sparx5, port);
 87		break;
 
 
 
 
 
 88	default:
 89		return -EOPNOTSUPP;
 90	}
 91
 92	return 0;
 93}
 94
 95static int sparx5_port_bridge_join(struct sparx5_port *port,
 96				   struct net_device *bridge)
 
 97{
 98	struct sparx5 *sparx5 = port->sparx5;
 
 
 99
100	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
101		/* First bridged port */
102		sparx5->hw_bridge_dev = bridge;
103	else
104		if (sparx5->hw_bridge_dev != bridge)
105			/* This is adding the port to a second bridge, this is
106			 * unsupported
107			 */
108			return -ENODEV;
109
110	set_bit(port->portno, sparx5->bridge_mask);
111
 
 
 
 
 
 
 
 
112	/* Port enters in bridge mode therefor don't need to copy to CPU
113	 * frames for multicast in case the bridge is not requesting them
114	 */
115	__dev_mc_unsync(port->ndev, sparx5_mc_unsync);
116
117	return 0;
 
 
 
 
118}
119
120static void sparx5_port_bridge_leave(struct sparx5_port *port,
121				     struct net_device *bridge)
122{
123	struct sparx5 *sparx5 = port->sparx5;
124
 
 
125	clear_bit(port->portno, sparx5->bridge_mask);
126	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
127		sparx5->hw_bridge_dev = NULL;
128
129	/* Clear bridge vlan settings before updating the port settings */
130	port->vlan_aware = 0;
131	port->pvid = NULL_VID;
132	port->vid = NULL_VID;
133
 
 
 
134	/* Port enters in host more therefore restore mc list */
135	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
136}
137
138static int sparx5_port_changeupper(struct net_device *dev,
139				   struct netdev_notifier_changeupper_info *info)
140{
141	struct sparx5_port *port = netdev_priv(dev);
 
142	int err = 0;
143
 
 
144	if (netif_is_bridge_master(info->upper_dev)) {
145		if (info->linking)
146			err = sparx5_port_bridge_join(port, info->upper_dev);
 
147		else
148			sparx5_port_bridge_leave(port, info->upper_dev);
149
150		sparx5_vlan_port_apply(port->sparx5, port);
151	}
152
153	return err;
154}
155
156static int sparx5_port_add_addr(struct net_device *dev, bool up)
157{
158	struct sparx5_port *port = netdev_priv(dev);
159	struct sparx5 *sparx5 = port->sparx5;
160	u16 vid = port->pvid;
161
162	if (up)
163		sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
164	else
165		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
166
167	return 0;
168}
169
170static int sparx5_netdevice_port_event(struct net_device *dev,
171				       struct notifier_block *nb,
172				       unsigned long event, void *ptr)
173{
174	int err = 0;
175
176	if (!sparx5_netdevice_check(dev))
177		return 0;
178
179	switch (event) {
180	case NETDEV_CHANGEUPPER:
181		err = sparx5_port_changeupper(dev, ptr);
182		break;
183	case NETDEV_PRE_UP:
184		err = sparx5_port_add_addr(dev, true);
185		break;
186	case NETDEV_DOWN:
187		err = sparx5_port_add_addr(dev, false);
188		break;
189	}
190
191	return err;
192}
193
194static int sparx5_netdevice_event(struct notifier_block *nb,
195				  unsigned long event, void *ptr)
196{
197	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
198	int ret = 0;
199
200	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
201
202	return notifier_from_errno(ret);
203}
204
205static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
206{
207	struct sparx5_switchdev_event_work *switchdev_work =
208		container_of(work, struct sparx5_switchdev_event_work, work);
209	struct net_device *dev = switchdev_work->dev;
210	struct switchdev_notifier_fdb_info *fdb_info;
211	struct sparx5_port *port;
212	struct sparx5 *sparx5;
 
 
213
214	rtnl_lock();
215	if (!sparx5_netdevice_check(dev))
216		goto out;
 
 
 
 
 
 
217
218	port = netdev_priv(dev);
219	sparx5 = port->sparx5;
220
221	fdb_info = &switchdev_work->fdb_info;
 
 
 
 
 
 
222
223	switch (switchdev_work->event) {
224	case SWITCHDEV_FDB_ADD_TO_DEVICE:
225		if (!fdb_info->added_by_user)
226			break;
227		sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
228				      fdb_info->vid);
 
 
229		break;
230	case SWITCHDEV_FDB_DEL_TO_DEVICE:
231		if (!fdb_info->added_by_user)
232			break;
233		sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
234		break;
235	}
236
237out:
238	rtnl_unlock();
239	kfree(switchdev_work->fdb_info.addr);
240	kfree(switchdev_work);
241	dev_put(dev);
242}
243
244static void sparx5_schedule_work(struct work_struct *work)
245{
246	queue_work(sparx5_owq, work);
247}
248
249static int sparx5_switchdev_event(struct notifier_block *unused,
250				  unsigned long event, void *ptr)
251{
252	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
253	struct sparx5_switchdev_event_work *switchdev_work;
254	struct switchdev_notifier_fdb_info *fdb_info;
255	struct switchdev_notifier_info *info = ptr;
 
256	int err;
257
 
 
258	switch (event) {
259	case SWITCHDEV_PORT_ATTR_SET:
260		err = switchdev_handle_port_attr_set(dev, ptr,
261						     sparx5_netdevice_check,
262						     sparx5_port_attr_set);
263		return notifier_from_errno(err);
264	case SWITCHDEV_FDB_ADD_TO_DEVICE:
265		fallthrough;
266	case SWITCHDEV_FDB_DEL_TO_DEVICE:
267		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
268		if (!switchdev_work)
269			return NOTIFY_BAD;
270
271		switchdev_work->dev = dev;
272		switchdev_work->event = event;
 
273
274		fdb_info = container_of(info,
275					struct switchdev_notifier_fdb_info,
276					info);
277		INIT_WORK(&switchdev_work->work,
278			  sparx5_switchdev_bridge_fdb_event_work);
279		memcpy(&switchdev_work->fdb_info, ptr,
280		       sizeof(switchdev_work->fdb_info));
281		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
282		if (!switchdev_work->fdb_info.addr)
283			goto err_addr_alloc;
284
285		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
286				fdb_info->addr);
287		dev_hold(dev);
288
289		sparx5_schedule_work(&switchdev_work->work);
290		break;
291	}
292
293	return NOTIFY_DONE;
294err_addr_alloc:
295	kfree(switchdev_work);
296	return NOTIFY_BAD;
297}
298
299static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
300				      struct sparx5_port *port,
301				      u16 vid, bool add)
302{
303	if (!port ||
304	    !test_bit(port->portno, sparx5->bridge_mask))
305		return; /* Skip null/host interfaces */
306
307	/* Bridge connects to vid? */
308	if (add) {
309		/* Add port MAC address from the VLAN */
310		sparx5_mact_learn(sparx5, PGID_CPU,
311				  port->ndev->dev_addr, vid);
312	} else {
313		/* Control port addr visibility depending on
314		 * port VLAN connectivity.
315		 */
316		if (test_bit(port->portno, sparx5->vlan_mask[vid]))
317			sparx5_mact_learn(sparx5, PGID_CPU,
318					  port->ndev->dev_addr, vid);
319		else
320			sparx5_mact_forget(sparx5,
321					   port->ndev->dev_addr, vid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322	}
 
 
 
323}
324
325static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
326					struct sparx5 *sparx5,
327					u16 vid, bool add)
328{
329	int i;
330
331	/* First, handle bridge address'es */
332	if (add) {
333		sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
334				  vid);
335		sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
336				  vid);
337	} else {
338		sparx5_mact_forget(sparx5, dev->dev_addr, vid);
339		sparx5_mact_forget(sparx5, dev->broadcast, vid);
340	}
341
342	/* Now look at bridged ports */
343	for (i = 0; i < SPX5_PORTS; i++)
344		sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
 
 
 
 
 
 
 
345}
346
347static int sparx5_handle_port_vlan_add(struct net_device *dev,
348				       struct notifier_block *nb,
349				       const struct switchdev_obj_port_vlan *v)
350{
351	struct sparx5_port *port = netdev_priv(dev);
 
 
 
 
 
352
353	if (netif_is_bridge_master(dev)) {
354		if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) {
355			struct sparx5 *sparx5 =
356				container_of(nb, struct sparx5,
357					     switchdev_blocking_nb);
 
 
 
 
 
 
 
358
359			sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
360		}
361		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
364	if (!sparx5_netdevice_check(dev))
365		return -EOPNOTSUPP;
366
367	return sparx5_vlan_vid_add(port, v->vid,
368				  v->flags & BRIDGE_VLAN_INFO_PVID,
369				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370}
371
372static int sparx5_handle_port_obj_add(struct net_device *dev,
373				      struct notifier_block *nb,
374				      struct switchdev_notifier_port_obj_info *info)
375{
376	const struct switchdev_obj *obj = info->obj;
377	int err;
378
379	switch (obj->id) {
380	case SWITCHDEV_OBJ_ID_PORT_VLAN:
381		err = sparx5_handle_port_vlan_add(dev, nb,
382						  SWITCHDEV_OBJ_PORT_VLAN(obj));
383		break;
 
 
 
 
 
384	default:
385		err = -EOPNOTSUPP;
386		break;
387	}
388
389	info->handled = true;
390	return err;
391}
392
393static int sparx5_handle_port_vlan_del(struct net_device *dev,
394				       struct notifier_block *nb,
395				       u16 vid)
396{
397	struct sparx5_port *port = netdev_priv(dev);
398	int ret;
399
400	/* Master bridge? */
401	if (netif_is_bridge_master(dev)) {
402		struct sparx5 *sparx5 =
403			container_of(nb, struct sparx5,
404				     switchdev_blocking_nb);
405
406		sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
407		return 0;
408	}
409
410	if (!sparx5_netdevice_check(dev))
411		return -EOPNOTSUPP;
412
413	ret = sparx5_vlan_vid_del(port, vid);
414	if (ret)
415		return ret;
416
417	/* Delete the port MAC address with the matching VLAN information */
418	sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
419
420	return 0;
421}
422
423static int sparx5_handle_port_obj_del(struct net_device *dev,
424				      struct notifier_block *nb,
425				      struct switchdev_notifier_port_obj_info *info)
426{
427	const struct switchdev_obj *obj = info->obj;
428	int err;
429
430	switch (obj->id) {
431	case SWITCHDEV_OBJ_ID_PORT_VLAN:
432		err = sparx5_handle_port_vlan_del(dev, nb,
433						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
 
 
 
 
 
434		break;
435	default:
436		err = -EOPNOTSUPP;
437		break;
438	}
439
440	info->handled = true;
441	return err;
442}
443
444static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
445					   unsigned long event,
446					   void *ptr)
447{
448	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
449	int err;
450
451	switch (event) {
452	case SWITCHDEV_PORT_OBJ_ADD:
453		err = sparx5_handle_port_obj_add(dev, nb, ptr);
454		return notifier_from_errno(err);
455	case SWITCHDEV_PORT_OBJ_DEL:
456		err = sparx5_handle_port_obj_del(dev, nb, ptr);
457		return notifier_from_errno(err);
458	case SWITCHDEV_PORT_ATTR_SET:
459		err = switchdev_handle_port_attr_set(dev, ptr,
460						     sparx5_netdevice_check,
461						     sparx5_port_attr_set);
462		return notifier_from_errno(err);
463	}
464
465	return NOTIFY_DONE;
466}
467
468int sparx5_register_notifier_blocks(struct sparx5 *s5)
469{
470	int err;
471
472	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
473	err = register_netdevice_notifier(&s5->netdevice_nb);
474	if (err)
475		return err;
476
477	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
478	err = register_switchdev_notifier(&s5->switchdev_nb);
479	if (err)
480		goto err_switchdev_nb;
481
482	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
483	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
484	if (err)
485		goto err_switchdev_blocking_nb;
486
487	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
488	if (!sparx5_owq) {
489		err = -ENOMEM;
490		goto err_switchdev_blocking_nb;
491	}
492
493	return 0;
494
495err_switchdev_blocking_nb:
496	unregister_switchdev_notifier(&s5->switchdev_nb);
497err_switchdev_nb:
498	unregister_netdevice_notifier(&s5->netdevice_nb);
499
500	return err;
501}
502
503void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
504{
505	destroy_workqueue(sparx5_owq);
506
507	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
508	unregister_switchdev_notifier(&s5->switchdev_nb);
509	unregister_netdevice_notifier(&s5->netdevice_nb);
510}