Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0+
  2/* Microchip Sparx5 Switch driver
  3 *
  4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
  5 */
  6
  7#include <linux/if_bridge.h>
  8#include <net/switchdev.h>
  9
 10#include "sparx5_main_regs.h"
 11#include "sparx5_main.h"
 12
 13static struct workqueue_struct *sparx5_owq;
 14
 15struct sparx5_switchdev_event_work {
 16	struct work_struct work;
 17	struct switchdev_notifier_fdb_info fdb_info;
 18	struct net_device *dev;
 19	struct sparx5 *sparx5;
 20	unsigned long event;
 21};
 22
 23static int sparx5_port_attr_pre_bridge_flags(struct sparx5_port *port,
 24					     struct switchdev_brport_flags flags)
 25{
 26	if (flags.mask & ~(BR_FLOOD | BR_MCAST_FLOOD | BR_BCAST_FLOOD))
 27		return -EINVAL;
 28
 29	return 0;
 30}
 31
 32static void sparx5_port_update_mcast_ip_flood(struct sparx5_port *port, bool flood_flag)
 33{
 34	bool should_flood = flood_flag || port->is_mrouter;
 35	struct sparx5 *sparx5 = port->sparx5;
 36	int pgid;
 37
 38	for (pgid = sparx5_get_pgid(sparx5, PGID_IPV4_MC_DATA);
 39	     pgid <= sparx5_get_pgid(sparx5, PGID_IPV6_MC_CTRL); pgid++)
 40		sparx5_pgid_update_mask(port, pgid, should_flood);
 41}
 42
 43static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
 44					  struct switchdev_brport_flags flags)
 45{
 46	struct sparx5 *sparx5 = port->sparx5;
 47
 48	if (flags.mask & BR_MCAST_FLOOD) {
 49		sparx5_pgid_update_mask(port,
 50					sparx5_get_pgid(sparx5, PGID_MC_FLOOD),
 51					!!(flags.val & BR_MCAST_FLOOD));
 52		sparx5_port_update_mcast_ip_flood(port, !!(flags.val & BR_MCAST_FLOOD));
 53	}
 54
 55	if (flags.mask & BR_FLOOD)
 56		sparx5_pgid_update_mask(port,
 57					sparx5_get_pgid(sparx5, PGID_UC_FLOOD),
 58					!!(flags.val & BR_FLOOD));
 59	if (flags.mask & BR_BCAST_FLOOD)
 60		sparx5_pgid_update_mask(port,
 61					sparx5_get_pgid(sparx5, PGID_BCAST),
 62					!!(flags.val & BR_BCAST_FLOOD));
 63}
 64
 65static void sparx5_attr_stp_state_set(struct sparx5_port *port,
 66				      u8 state)
 67{
 68	struct sparx5 *sparx5 = port->sparx5;
 69
 70	if (!test_bit(port->portno, sparx5->bridge_mask)) {
 71		netdev_err(port->ndev,
 72			   "Controlling non-bridged port %d?\n", port->portno);
 73		return;
 74	}
 75
 76	switch (state) {
 77	case BR_STATE_FORWARDING:
 78		set_bit(port->portno, sparx5->bridge_fwd_mask);
 79		fallthrough;
 80	case BR_STATE_LEARNING:
 81		set_bit(port->portno, sparx5->bridge_lrn_mask);
 82		break;
 83
 84	default:
 85		/* All other states treated as blocking */
 86		clear_bit(port->portno, sparx5->bridge_fwd_mask);
 87		clear_bit(port->portno, sparx5->bridge_lrn_mask);
 88		break;
 89	}
 90
 91	/* apply the bridge_fwd_mask to all the ports */
 92	sparx5_update_fwd(sparx5);
 93}
 94
 95static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
 96					unsigned long ageing_clock_t)
 97{
 98	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
 99	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
100
101	sparx5_set_ageing(port->sparx5, ageing_time);
102}
103
104static void sparx5_port_attr_mrouter_set(struct sparx5_port *port,
105					 struct net_device *orig_dev,
106					 bool enable)
107{
108	struct sparx5 *sparx5 = port->sparx5;
109	struct sparx5_mdb_entry *e;
110	bool flood_flag;
111
112	if ((enable && port->is_mrouter) || (!enable && !port->is_mrouter))
113		return;
114
115	/* Add/del mrouter port on all active mdb entries in HW.
116	 * Don't change entry port mask, since that represents
117	 * ports that actually joined that group.
118	 */
119	mutex_lock(&sparx5->mdb_lock);
120	list_for_each_entry(e, &sparx5->mdb_entries, list) {
121		if (!test_bit(port->portno, e->port_mask) &&
122		    ether_addr_is_ip_mcast(e->addr))
123			sparx5_pgid_update_mask(port, e->pgid_idx, enable);
124	}
125	mutex_unlock(&sparx5->mdb_lock);
126
127	/* Enable/disable flooding depending on if port is mrouter port
128	 * or if mcast flood is enabled.
129	 */
130	port->is_mrouter = enable;
131	flood_flag = br_port_flag_is_set(port->ndev, BR_MCAST_FLOOD);
132	sparx5_port_update_mcast_ip_flood(port, flood_flag);
133}
134
135static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
136				const struct switchdev_attr *attr,
137				struct netlink_ext_ack *extack)
138{
139	struct sparx5_port *port = netdev_priv(dev);
140
141	switch (attr->id) {
142	case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
143		return sparx5_port_attr_pre_bridge_flags(port,
144							 attr->u.brport_flags);
145	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
146		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
147		break;
148	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
149		sparx5_attr_stp_state_set(port, attr->u.stp_state);
150		break;
151	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
152		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
153		break;
154	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
155		/* Used PVID 1 when default_pvid is 0, to avoid
156		 * collision with non-bridged ports.
157		 */
158		if (port->pvid == 0)
159			port->pvid = 1;
160		port->vlan_aware = attr->u.vlan_filtering;
161		sparx5_vlan_port_apply(port->sparx5, port);
162		break;
163	case SWITCHDEV_ATTR_ID_PORT_MROUTER:
164		sparx5_port_attr_mrouter_set(port,
165					     attr->orig_dev,
166					     attr->u.mrouter);
167		break;
168	default:
169		return -EOPNOTSUPP;
170	}
171
172	return 0;
173}
174
175static int sparx5_port_bridge_join(struct sparx5_port *port,
176				   struct net_device *bridge,
177				   struct netlink_ext_ack *extack)
178{
179	struct sparx5 *sparx5 = port->sparx5;
180	struct net_device *ndev = port->ndev;
181	int err;
182
183	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
184		/* First bridged port */
185		sparx5->hw_bridge_dev = bridge;
186	else
187		if (sparx5->hw_bridge_dev != bridge)
188			/* This is adding the port to a second bridge, this is
189			 * unsupported
190			 */
191			return -ENODEV;
192
193	set_bit(port->portno, sparx5->bridge_mask);
194
195	err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
196					    false, extack);
197	if (err)
198		goto err_switchdev_offload;
199
200	/* Remove standalone port entry */
201	sparx5_mact_forget(sparx5, ndev->dev_addr, 0);
202
203	/* Port enters in bridge mode therefore don't need to copy to CPU
204	 * frames for multicast in case the bridge is not requesting them
205	 */
206	__dev_mc_unsync(ndev, sparx5_mc_unsync);
207
208	return 0;
209
210err_switchdev_offload:
211	clear_bit(port->portno, sparx5->bridge_mask);
212	return err;
213}
214
215static void sparx5_port_bridge_leave(struct sparx5_port *port,
216				     struct net_device *bridge)
217{
218	struct sparx5 *sparx5 = port->sparx5;
219
220	switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL);
221
222	clear_bit(port->portno, sparx5->bridge_mask);
223	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
224		sparx5->hw_bridge_dev = NULL;
225
226	/* Clear bridge vlan settings before updating the port settings */
227	port->vlan_aware = 0;
228	port->pvid = NULL_VID;
229	port->vid = NULL_VID;
230
231	/* Forward frames to CPU */
232	sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
233			  port->ndev->dev_addr, 0);
234
235	/* Port enters in host more therefore restore mc list */
236	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
237}
238
239static int sparx5_port_changeupper(struct net_device *dev,
240				   struct netdev_notifier_changeupper_info *info)
241{
242	struct sparx5_port *port = netdev_priv(dev);
243	struct netlink_ext_ack *extack;
244	int err = 0;
245
246	extack = netdev_notifier_info_to_extack(&info->info);
247
248	if (netif_is_bridge_master(info->upper_dev)) {
249		if (info->linking)
250			err = sparx5_port_bridge_join(port, info->upper_dev,
251						      extack);
252		else
253			sparx5_port_bridge_leave(port, info->upper_dev);
254
255		sparx5_vlan_port_apply(port->sparx5, port);
256	}
257
258	return err;
259}
260
261static int sparx5_port_add_addr(struct net_device *dev, bool up)
262{
263	struct sparx5_port *port = netdev_priv(dev);
264	struct sparx5 *sparx5 = port->sparx5;
265	u16 vid = port->pvid;
266
267	if (up)
268		sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_CPU),
269				  port->ndev->dev_addr, vid);
270	else
271		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
272
273	return 0;
274}
275
276static int sparx5_netdevice_port_event(struct net_device *dev,
277				       struct notifier_block *nb,
278				       unsigned long event, void *ptr)
279{
280	int err = 0;
281
282	if (!sparx5_netdevice_check(dev))
283		return 0;
284
285	switch (event) {
286	case NETDEV_CHANGEUPPER:
287		err = sparx5_port_changeupper(dev, ptr);
288		break;
289	case NETDEV_PRE_UP:
290		err = sparx5_port_add_addr(dev, true);
291		break;
292	case NETDEV_DOWN:
293		err = sparx5_port_add_addr(dev, false);
294		break;
295	}
296
297	return err;
298}
299
300static int sparx5_netdevice_event(struct notifier_block *nb,
301				  unsigned long event, void *ptr)
302{
303	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
304	int ret = 0;
305
306	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
307
308	return notifier_from_errno(ret);
309}
310
311static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
312{
313	struct sparx5_switchdev_event_work *switchdev_work =
314		container_of(work, struct sparx5_switchdev_event_work, work);
315	struct net_device *dev = switchdev_work->dev;
316	struct switchdev_notifier_fdb_info *fdb_info;
317	struct sparx5_port *port;
318	struct sparx5 *sparx5;
319	bool host_addr;
320	u16 vid;
321
322	rtnl_lock();
323	if (!sparx5_netdevice_check(dev)) {
324		host_addr = true;
325		sparx5 = switchdev_work->sparx5;
326	} else {
327		host_addr = false;
328		sparx5 = switchdev_work->sparx5;
329		port = netdev_priv(dev);
330	}
331
332	fdb_info = &switchdev_work->fdb_info;
 
333
334	/* Used PVID 1 when default_pvid is 0, to avoid
335	 * collision with non-bridged ports.
336	 */
337	if (fdb_info->vid == 0)
338		vid = 1;
339	else
340		vid = fdb_info->vid;
341
342	switch (switchdev_work->event) {
343	case SWITCHDEV_FDB_ADD_TO_DEVICE:
344		if (host_addr)
345			sparx5_add_mact_entry(sparx5, dev,
346					      sparx5_get_pgid(sparx5, PGID_CPU),
347					      fdb_info->addr, vid);
348		else
349			sparx5_add_mact_entry(sparx5, port->ndev, port->portno,
350					      fdb_info->addr, vid);
351		break;
352	case SWITCHDEV_FDB_DEL_TO_DEVICE:
353		sparx5_del_mact_entry(sparx5, fdb_info->addr, vid);
 
 
354		break;
355	}
356
 
357	rtnl_unlock();
358	kfree(switchdev_work->fdb_info.addr);
359	kfree(switchdev_work);
360	dev_put(dev);
361}
362
363static void sparx5_schedule_work(struct work_struct *work)
364{
365	queue_work(sparx5_owq, work);
366}
367
368static int sparx5_switchdev_event(struct notifier_block *nb,
369				  unsigned long event, void *ptr)
370{
371	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
372	struct sparx5_switchdev_event_work *switchdev_work;
373	struct switchdev_notifier_fdb_info *fdb_info;
374	struct switchdev_notifier_info *info = ptr;
375	struct sparx5 *spx5;
376	int err;
377
378	spx5 = container_of(nb, struct sparx5, switchdev_nb);
379
380	switch (event) {
381	case SWITCHDEV_PORT_ATTR_SET:
382		err = switchdev_handle_port_attr_set(dev, ptr,
383						     sparx5_netdevice_check,
384						     sparx5_port_attr_set);
385		return notifier_from_errno(err);
386	case SWITCHDEV_FDB_ADD_TO_DEVICE:
387		fallthrough;
388	case SWITCHDEV_FDB_DEL_TO_DEVICE:
389		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
390		if (!switchdev_work)
391			return NOTIFY_BAD;
392
393		switchdev_work->dev = dev;
394		switchdev_work->event = event;
395		switchdev_work->sparx5 = spx5;
396
397		fdb_info = container_of(info,
398					struct switchdev_notifier_fdb_info,
399					info);
400		INIT_WORK(&switchdev_work->work,
401			  sparx5_switchdev_bridge_fdb_event_work);
402		memcpy(&switchdev_work->fdb_info, ptr,
403		       sizeof(switchdev_work->fdb_info));
404		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
405		if (!switchdev_work->fdb_info.addr)
406			goto err_addr_alloc;
407
408		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
409				fdb_info->addr);
410		dev_hold(dev);
411
412		sparx5_schedule_work(&switchdev_work->work);
413		break;
414	}
415
416	return NOTIFY_DONE;
417err_addr_alloc:
418	kfree(switchdev_work);
419	return NOTIFY_BAD;
420}
421
422static int sparx5_handle_port_vlan_add(struct net_device *dev,
423				       struct notifier_block *nb,
424				       const struct switchdev_obj_port_vlan *v)
425{
426	struct sparx5_port *port = netdev_priv(dev);
427
428	if (netif_is_bridge_master(dev)) {
429		struct sparx5 *sparx5 =
430			container_of(nb, struct sparx5,
431				     switchdev_blocking_nb);
432
433		/* Flood broadcast to CPU */
434		sparx5_mact_learn(sparx5, sparx5_get_pgid(sparx5, PGID_BCAST),
435				  dev->broadcast, v->vid);
436		return 0;
437	}
438
439	if (!sparx5_netdevice_check(dev))
440		return -EOPNOTSUPP;
441
442	return sparx5_vlan_vid_add(port, v->vid,
443				  v->flags & BRIDGE_VLAN_INFO_PVID,
444				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
445}
446
447static int sparx5_alloc_mdb_entry(struct sparx5 *sparx5,
448				  const unsigned char *addr,
449				  u16 vid,
450				  struct sparx5_mdb_entry **entry_out)
451{
452	struct sparx5_mdb_entry *entry;
453	u16 pgid_idx;
454	int err;
455
456	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
457	if (!entry)
458		return -ENOMEM;
459
460	err = sparx5_pgid_alloc_mcast(sparx5, &pgid_idx);
461	if (err) {
462		kfree(entry);
463		return err;
464	}
465
466	memcpy(entry->addr, addr, ETH_ALEN);
467	entry->vid = vid;
468	entry->pgid_idx = pgid_idx;
469
470	mutex_lock(&sparx5->mdb_lock);
471	list_add_tail(&entry->list, &sparx5->mdb_entries);
472	mutex_unlock(&sparx5->mdb_lock);
473
474	*entry_out = entry;
475	return 0;
476}
477
478static void sparx5_free_mdb_entry(struct sparx5 *sparx5,
479				  const unsigned char *addr,
480				  u16 vid)
481{
482	struct sparx5_mdb_entry *entry, *tmp;
483
484	mutex_lock(&sparx5->mdb_lock);
485	list_for_each_entry_safe(entry, tmp, &sparx5->mdb_entries, list) {
486		if ((vid == 0 || entry->vid == vid) &&
487		    ether_addr_equal(addr, entry->addr)) {
488			list_del(&entry->list);
489
490			sparx5_pgid_free(sparx5, entry->pgid_idx);
491			kfree(entry);
492			goto out;
493		}
494	}
495
496out:
497	mutex_unlock(&sparx5->mdb_lock);
498}
499
500static struct sparx5_mdb_entry *sparx5_mdb_get_entry(struct sparx5 *sparx5,
501						     const unsigned char *addr,
502						     u16 vid)
503{
504	struct sparx5_mdb_entry *e, *found = NULL;
505
506	mutex_lock(&sparx5->mdb_lock);
507	list_for_each_entry(e, &sparx5->mdb_entries, list) {
508		if (ether_addr_equal(e->addr, addr) && e->vid == vid) {
509			found = e;
510			goto out;
511		}
 
 
 
512	}
513
514out:
515	mutex_unlock(&sparx5->mdb_lock);
516	return found;
517}
518
519static void sparx5_cpu_copy_ena(struct sparx5 *spx5, u16 pgid, bool enable)
520{
521	spx5_rmw(ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA_SET(enable),
522		 ANA_AC_PGID_MISC_CFG_PGID_CPU_COPY_ENA, spx5,
523		 ANA_AC_PGID_MISC_CFG(pgid));
524}
525
526static int sparx5_handle_port_mdb_add(struct net_device *dev,
527				      struct notifier_block *nb,
528				      const struct switchdev_obj_port_mdb *v)
529{
530	struct sparx5_port *port = netdev_priv(dev);
531	struct sparx5 *spx5 = port->sparx5;
532	struct sparx5_mdb_entry *entry;
533	bool is_host, is_new;
534	int err, i;
535	u16 vid;
536
537	if (!sparx5_netdevice_check(dev))
538		return -EOPNOTSUPP;
539
540	is_host = netif_is_bridge_master(v->obj.orig_dev);
541
542	/* When VLAN unaware the vlan value is not parsed and we receive vid 0.
543	 * Fall back to bridge vid 1.
544	 */
545	if (!br_vlan_enabled(spx5->hw_bridge_dev))
546		vid = 1;
547	else
548		vid = v->vid;
549
550	is_new = false;
551	entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
552	if (!entry) {
553		err = sparx5_alloc_mdb_entry(spx5, v->addr, vid, &entry);
554		is_new = true;
555		if (err)
556			return err;
557	}
558
559	mutex_lock(&spx5->mdb_lock);
560
561	/* Add any mrouter ports to the new entry */
562	if (is_new && ether_addr_is_ip_mcast(v->addr))
563		for (i = 0; i < spx5->data->consts->n_ports; i++)
564			if (spx5->ports[i] && spx5->ports[i]->is_mrouter)
565				sparx5_pgid_update_mask(spx5->ports[i],
566							entry->pgid_idx,
567							true);
568
569	if (is_host && !entry->cpu_copy) {
570		sparx5_cpu_copy_ena(spx5, entry->pgid_idx, true);
571		entry->cpu_copy = true;
572	} else if (!is_host) {
573		sparx5_pgid_update_mask(port, entry->pgid_idx, true);
574		set_bit(port->portno, entry->port_mask);
575	}
576	mutex_unlock(&spx5->mdb_lock);
577
578	sparx5_mact_learn(spx5, entry->pgid_idx, entry->addr, entry->vid);
579
580	return 0;
581}
582
583static int sparx5_handle_port_mdb_del(struct net_device *dev,
584				      struct notifier_block *nb,
585				      const struct switchdev_obj_port_mdb *v)
586{
587	struct sparx5_port *port = netdev_priv(dev);
588	struct sparx5 *spx5 = port->sparx5;
589	struct sparx5_mdb_entry *entry;
590	bool is_host;
591	u16 vid;
592
593	if (!sparx5_netdevice_check(dev))
594		return -EOPNOTSUPP;
595
596	is_host = netif_is_bridge_master(v->obj.orig_dev);
597
598	if (!br_vlan_enabled(spx5->hw_bridge_dev))
599		vid = 1;
600	else
601		vid = v->vid;
602
603	entry = sparx5_mdb_get_entry(spx5, v->addr, vid);
604	if (!entry)
605		return 0;
606
607	mutex_lock(&spx5->mdb_lock);
608	if (is_host && entry->cpu_copy) {
609		sparx5_cpu_copy_ena(spx5, entry->pgid_idx, false);
610		entry->cpu_copy = false;
611	} else if (!is_host) {
612		clear_bit(port->portno, entry->port_mask);
613
614		/* Port not mrouter port or addr is L2 mcast, remove port from mask. */
615		if (!port->is_mrouter || !ether_addr_is_ip_mcast(v->addr))
616			sparx5_pgid_update_mask(port, entry->pgid_idx, false);
617	}
618	mutex_unlock(&spx5->mdb_lock);
619
620	if (bitmap_empty(entry->port_mask, SPX5_PORTS) && !entry->cpu_copy) {
621		 /* Clear pgid in case mrouter ports exists
622		  * that are not part of the group.
623		  */
624		sparx5_pgid_clear(spx5, entry->pgid_idx);
625		sparx5_mact_forget(spx5, entry->addr, entry->vid);
626		sparx5_free_mdb_entry(spx5, entry->addr, entry->vid);
627	}
628	return 0;
629}
630
631static int sparx5_handle_port_obj_add(struct net_device *dev,
632				      struct notifier_block *nb,
633				      struct switchdev_notifier_port_obj_info *info)
634{
635	const struct switchdev_obj *obj = info->obj;
636	int err;
637
638	switch (obj->id) {
639	case SWITCHDEV_OBJ_ID_PORT_VLAN:
640		err = sparx5_handle_port_vlan_add(dev, nb,
641						  SWITCHDEV_OBJ_PORT_VLAN(obj));
642		break;
643	case SWITCHDEV_OBJ_ID_PORT_MDB:
644	case SWITCHDEV_OBJ_ID_HOST_MDB:
645		err = sparx5_handle_port_mdb_add(dev, nb,
646						 SWITCHDEV_OBJ_PORT_MDB(obj));
647		break;
648	default:
649		err = -EOPNOTSUPP;
650		break;
651	}
652
653	info->handled = true;
654	return err;
655}
656
657static int sparx5_handle_port_vlan_del(struct net_device *dev,
658				       struct notifier_block *nb,
659				       u16 vid)
660{
661	struct sparx5_port *port = netdev_priv(dev);
662	int ret;
663
664	/* Master bridge? */
665	if (netif_is_bridge_master(dev)) {
666		struct sparx5 *sparx5 =
667			container_of(nb, struct sparx5,
668				     switchdev_blocking_nb);
669
670		sparx5_mact_forget(sparx5, dev->broadcast, vid);
671		return 0;
672	}
673
674	if (!sparx5_netdevice_check(dev))
675		return -EOPNOTSUPP;
676
677	ret = sparx5_vlan_vid_del(port, vid);
678	if (ret)
679		return ret;
680
 
 
 
681	return 0;
682}
683
684static int sparx5_handle_port_obj_del(struct net_device *dev,
685				      struct notifier_block *nb,
686				      struct switchdev_notifier_port_obj_info *info)
687{
688	const struct switchdev_obj *obj = info->obj;
689	int err;
690
691	switch (obj->id) {
692	case SWITCHDEV_OBJ_ID_PORT_VLAN:
693		err = sparx5_handle_port_vlan_del(dev, nb,
694						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
695		break;
696	case SWITCHDEV_OBJ_ID_PORT_MDB:
697	case SWITCHDEV_OBJ_ID_HOST_MDB:
698		err = sparx5_handle_port_mdb_del(dev, nb,
699						 SWITCHDEV_OBJ_PORT_MDB(obj));
700		break;
701	default:
702		err = -EOPNOTSUPP;
703		break;
704	}
705
706	info->handled = true;
707	return err;
708}
709
710static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
711					   unsigned long event,
712					   void *ptr)
713{
714	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
715	int err;
716
717	switch (event) {
718	case SWITCHDEV_PORT_OBJ_ADD:
719		err = sparx5_handle_port_obj_add(dev, nb, ptr);
720		return notifier_from_errno(err);
721	case SWITCHDEV_PORT_OBJ_DEL:
722		err = sparx5_handle_port_obj_del(dev, nb, ptr);
723		return notifier_from_errno(err);
724	case SWITCHDEV_PORT_ATTR_SET:
725		err = switchdev_handle_port_attr_set(dev, ptr,
726						     sparx5_netdevice_check,
727						     sparx5_port_attr_set);
728		return notifier_from_errno(err);
729	}
730
731	return NOTIFY_DONE;
732}
733
734int sparx5_register_notifier_blocks(struct sparx5 *s5)
735{
736	int err;
737
738	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
739	err = register_netdevice_notifier(&s5->netdevice_nb);
740	if (err)
741		return err;
742
743	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
744	err = register_switchdev_notifier(&s5->switchdev_nb);
745	if (err)
746		goto err_switchdev_nb;
747
748	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
749	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
750	if (err)
751		goto err_switchdev_blocking_nb;
752
753	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
754	if (!sparx5_owq) {
755		err = -ENOMEM;
756		goto err_switchdev_blocking_nb;
757	}
758
759	return 0;
760
761err_switchdev_blocking_nb:
762	unregister_switchdev_notifier(&s5->switchdev_nb);
763err_switchdev_nb:
764	unregister_netdevice_notifier(&s5->netdevice_nb);
765
766	return err;
767}
768
769void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
770{
771	destroy_workqueue(sparx5_owq);
772
773	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
774	unregister_switchdev_notifier(&s5->switchdev_nb);
775	unregister_netdevice_notifier(&s5->netdevice_nb);
776}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0+
  2/* Microchip Sparx5 Switch driver
  3 *
  4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
  5 */
  6
  7#include <linux/if_bridge.h>
  8#include <net/switchdev.h>
  9
 10#include "sparx5_main_regs.h"
 11#include "sparx5_main.h"
 12
 13static struct workqueue_struct *sparx5_owq;
 14
 15struct sparx5_switchdev_event_work {
 16	struct work_struct work;
 17	struct switchdev_notifier_fdb_info fdb_info;
 18	struct net_device *dev;
 
 19	unsigned long event;
 20};
 21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 22static void sparx5_port_attr_bridge_flags(struct sparx5_port *port,
 23					  struct switchdev_brport_flags flags)
 24{
 25	if (flags.mask & BR_MCAST_FLOOD)
 26		sparx5_pgid_update_mask(port, PGID_MC_FLOOD, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 27}
 28
 29static void sparx5_attr_stp_state_set(struct sparx5_port *port,
 30				      u8 state)
 31{
 32	struct sparx5 *sparx5 = port->sparx5;
 33
 34	if (!test_bit(port->portno, sparx5->bridge_mask)) {
 35		netdev_err(port->ndev,
 36			   "Controlling non-bridged port %d?\n", port->portno);
 37		return;
 38	}
 39
 40	switch (state) {
 41	case BR_STATE_FORWARDING:
 42		set_bit(port->portno, sparx5->bridge_fwd_mask);
 43		fallthrough;
 44	case BR_STATE_LEARNING:
 45		set_bit(port->portno, sparx5->bridge_lrn_mask);
 46		break;
 47
 48	default:
 49		/* All other states treated as blocking */
 50		clear_bit(port->portno, sparx5->bridge_fwd_mask);
 51		clear_bit(port->portno, sparx5->bridge_lrn_mask);
 52		break;
 53	}
 54
 55	/* apply the bridge_fwd_mask to all the ports */
 56	sparx5_update_fwd(sparx5);
 57}
 58
 59static void sparx5_port_attr_ageing_set(struct sparx5_port *port,
 60					unsigned long ageing_clock_t)
 61{
 62	unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t);
 63	u32 ageing_time = jiffies_to_msecs(ageing_jiffies);
 64
 65	sparx5_set_ageing(port->sparx5, ageing_time);
 66}
 67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 68static int sparx5_port_attr_set(struct net_device *dev, const void *ctx,
 69				const struct switchdev_attr *attr,
 70				struct netlink_ext_ack *extack)
 71{
 72	struct sparx5_port *port = netdev_priv(dev);
 73
 74	switch (attr->id) {
 
 
 
 75	case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
 76		sparx5_port_attr_bridge_flags(port, attr->u.brport_flags);
 77		break;
 78	case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
 79		sparx5_attr_stp_state_set(port, attr->u.stp_state);
 80		break;
 81	case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME:
 82		sparx5_port_attr_ageing_set(port, attr->u.ageing_time);
 83		break;
 84	case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING:
 
 
 
 
 
 85		port->vlan_aware = attr->u.vlan_filtering;
 86		sparx5_vlan_port_apply(port->sparx5, port);
 87		break;
 
 
 
 
 
 88	default:
 89		return -EOPNOTSUPP;
 90	}
 91
 92	return 0;
 93}
 94
 95static int sparx5_port_bridge_join(struct sparx5_port *port,
 96				   struct net_device *bridge)
 
 97{
 98	struct sparx5 *sparx5 = port->sparx5;
 
 
 99
100	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
101		/* First bridged port */
102		sparx5->hw_bridge_dev = bridge;
103	else
104		if (sparx5->hw_bridge_dev != bridge)
105			/* This is adding the port to a second bridge, this is
106			 * unsupported
107			 */
108			return -ENODEV;
109
110	set_bit(port->portno, sparx5->bridge_mask);
111
112	/* Port enters in bridge mode therefor don't need to copy to CPU
 
 
 
 
 
 
 
 
113	 * frames for multicast in case the bridge is not requesting them
114	 */
115	__dev_mc_unsync(port->ndev, sparx5_mc_unsync);
116
117	return 0;
 
 
 
 
118}
119
120static void sparx5_port_bridge_leave(struct sparx5_port *port,
121				     struct net_device *bridge)
122{
123	struct sparx5 *sparx5 = port->sparx5;
124
 
 
125	clear_bit(port->portno, sparx5->bridge_mask);
126	if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS))
127		sparx5->hw_bridge_dev = NULL;
128
129	/* Clear bridge vlan settings before updating the port settings */
130	port->vlan_aware = 0;
131	port->pvid = NULL_VID;
132	port->vid = NULL_VID;
133
 
 
 
 
134	/* Port enters in host more therefore restore mc list */
135	__dev_mc_sync(port->ndev, sparx5_mc_sync, sparx5_mc_unsync);
136}
137
138static int sparx5_port_changeupper(struct net_device *dev,
139				   struct netdev_notifier_changeupper_info *info)
140{
141	struct sparx5_port *port = netdev_priv(dev);
 
142	int err = 0;
143
 
 
144	if (netif_is_bridge_master(info->upper_dev)) {
145		if (info->linking)
146			err = sparx5_port_bridge_join(port, info->upper_dev);
 
147		else
148			sparx5_port_bridge_leave(port, info->upper_dev);
149
150		sparx5_vlan_port_apply(port->sparx5, port);
151	}
152
153	return err;
154}
155
156static int sparx5_port_add_addr(struct net_device *dev, bool up)
157{
158	struct sparx5_port *port = netdev_priv(dev);
159	struct sparx5 *sparx5 = port->sparx5;
160	u16 vid = port->pvid;
161
162	if (up)
163		sparx5_mact_learn(sparx5, PGID_CPU, port->ndev->dev_addr, vid);
 
164	else
165		sparx5_mact_forget(sparx5, port->ndev->dev_addr, vid);
166
167	return 0;
168}
169
170static int sparx5_netdevice_port_event(struct net_device *dev,
171				       struct notifier_block *nb,
172				       unsigned long event, void *ptr)
173{
174	int err = 0;
175
176	if (!sparx5_netdevice_check(dev))
177		return 0;
178
179	switch (event) {
180	case NETDEV_CHANGEUPPER:
181		err = sparx5_port_changeupper(dev, ptr);
182		break;
183	case NETDEV_PRE_UP:
184		err = sparx5_port_add_addr(dev, true);
185		break;
186	case NETDEV_DOWN:
187		err = sparx5_port_add_addr(dev, false);
188		break;
189	}
190
191	return err;
192}
193
194static int sparx5_netdevice_event(struct notifier_block *nb,
195				  unsigned long event, void *ptr)
196{
197	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
198	int ret = 0;
199
200	ret = sparx5_netdevice_port_event(dev, nb, event, ptr);
201
202	return notifier_from_errno(ret);
203}
204
205static void sparx5_switchdev_bridge_fdb_event_work(struct work_struct *work)
206{
207	struct sparx5_switchdev_event_work *switchdev_work =
208		container_of(work, struct sparx5_switchdev_event_work, work);
209	struct net_device *dev = switchdev_work->dev;
210	struct switchdev_notifier_fdb_info *fdb_info;
211	struct sparx5_port *port;
212	struct sparx5 *sparx5;
 
 
213
214	rtnl_lock();
215	if (!sparx5_netdevice_check(dev))
216		goto out;
 
 
 
 
 
 
217
218	port = netdev_priv(dev);
219	sparx5 = port->sparx5;
220
221	fdb_info = &switchdev_work->fdb_info;
 
 
 
 
 
 
222
223	switch (switchdev_work->event) {
224	case SWITCHDEV_FDB_ADD_TO_DEVICE:
225		if (!fdb_info->added_by_user)
226			break;
227		sparx5_add_mact_entry(sparx5, port, fdb_info->addr,
228				      fdb_info->vid);
 
 
 
229		break;
230	case SWITCHDEV_FDB_DEL_TO_DEVICE:
231		if (!fdb_info->added_by_user)
232			break;
233		sparx5_del_mact_entry(sparx5, fdb_info->addr, fdb_info->vid);
234		break;
235	}
236
237out:
238	rtnl_unlock();
239	kfree(switchdev_work->fdb_info.addr);
240	kfree(switchdev_work);
241	dev_put(dev);
242}
243
244static void sparx5_schedule_work(struct work_struct *work)
245{
246	queue_work(sparx5_owq, work);
247}
248
249static int sparx5_switchdev_event(struct notifier_block *unused,
250				  unsigned long event, void *ptr)
251{
252	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
253	struct sparx5_switchdev_event_work *switchdev_work;
254	struct switchdev_notifier_fdb_info *fdb_info;
255	struct switchdev_notifier_info *info = ptr;
 
256	int err;
257
 
 
258	switch (event) {
259	case SWITCHDEV_PORT_ATTR_SET:
260		err = switchdev_handle_port_attr_set(dev, ptr,
261						     sparx5_netdevice_check,
262						     sparx5_port_attr_set);
263		return notifier_from_errno(err);
264	case SWITCHDEV_FDB_ADD_TO_DEVICE:
265		fallthrough;
266	case SWITCHDEV_FDB_DEL_TO_DEVICE:
267		switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
268		if (!switchdev_work)
269			return NOTIFY_BAD;
270
271		switchdev_work->dev = dev;
272		switchdev_work->event = event;
 
273
274		fdb_info = container_of(info,
275					struct switchdev_notifier_fdb_info,
276					info);
277		INIT_WORK(&switchdev_work->work,
278			  sparx5_switchdev_bridge_fdb_event_work);
279		memcpy(&switchdev_work->fdb_info, ptr,
280		       sizeof(switchdev_work->fdb_info));
281		switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
282		if (!switchdev_work->fdb_info.addr)
283			goto err_addr_alloc;
284
285		ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
286				fdb_info->addr);
287		dev_hold(dev);
288
289		sparx5_schedule_work(&switchdev_work->work);
290		break;
291	}
292
293	return NOTIFY_DONE;
294err_addr_alloc:
295	kfree(switchdev_work);
296	return NOTIFY_BAD;
297}
298
299static void sparx5_sync_port_dev_addr(struct sparx5 *sparx5,
300				      struct sparx5_port *port,
301				      u16 vid, bool add)
302{
303	if (!port ||
304	    !test_bit(port->portno, sparx5->bridge_mask))
305		return; /* Skip null/host interfaces */
306
307	/* Bridge connects to vid? */
308	if (add) {
309		/* Add port MAC address from the VLAN */
310		sparx5_mact_learn(sparx5, PGID_CPU,
311				  port->ndev->dev_addr, vid);
312	} else {
313		/* Control port addr visibility depending on
314		 * port VLAN connectivity.
315		 */
316		if (test_bit(port->portno, sparx5->vlan_mask[vid]))
317			sparx5_mact_learn(sparx5, PGID_CPU,
318					  port->ndev->dev_addr, vid);
319		else
320			sparx5_mact_forget(sparx5,
321					   port->ndev->dev_addr, vid);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
322	}
 
 
 
323}
324
325static void sparx5_sync_bridge_dev_addr(struct net_device *dev,
326					struct sparx5 *sparx5,
327					u16 vid, bool add)
328{
329	int i;
330
331	/* First, handle bridge address'es */
332	if (add) {
333		sparx5_mact_learn(sparx5, PGID_CPU, dev->dev_addr,
334				  vid);
335		sparx5_mact_learn(sparx5, PGID_BCAST, dev->broadcast,
336				  vid);
337	} else {
338		sparx5_mact_forget(sparx5, dev->dev_addr, vid);
339		sparx5_mact_forget(sparx5, dev->broadcast, vid);
340	}
341
342	/* Now look at bridged ports */
343	for (i = 0; i < SPX5_PORTS; i++)
344		sparx5_sync_port_dev_addr(sparx5, sparx5->ports[i], vid, add);
 
 
 
 
 
 
 
345}
346
347static int sparx5_handle_port_vlan_add(struct net_device *dev,
348				       struct notifier_block *nb,
349				       const struct switchdev_obj_port_vlan *v)
350{
351	struct sparx5_port *port = netdev_priv(dev);
 
 
 
 
 
 
 
 
352
353	if (netif_is_bridge_master(dev)) {
354		if (v->flags & BRIDGE_VLAN_INFO_BRENTRY) {
355			struct sparx5 *sparx5 =
356				container_of(nb, struct sparx5,
357					     switchdev_blocking_nb);
 
 
 
 
358
359			sparx5_sync_bridge_dev_addr(dev, sparx5, v->vid, true);
360		}
361		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
363
364	if (!sparx5_netdevice_check(dev))
365		return -EOPNOTSUPP;
366
367	return sparx5_vlan_vid_add(port, v->vid,
368				  v->flags & BRIDGE_VLAN_INFO_PVID,
369				  v->flags & BRIDGE_VLAN_INFO_UNTAGGED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
370}
371
372static int sparx5_handle_port_obj_add(struct net_device *dev,
373				      struct notifier_block *nb,
374				      struct switchdev_notifier_port_obj_info *info)
375{
376	const struct switchdev_obj *obj = info->obj;
377	int err;
378
379	switch (obj->id) {
380	case SWITCHDEV_OBJ_ID_PORT_VLAN:
381		err = sparx5_handle_port_vlan_add(dev, nb,
382						  SWITCHDEV_OBJ_PORT_VLAN(obj));
383		break;
 
 
 
 
 
384	default:
385		err = -EOPNOTSUPP;
386		break;
387	}
388
389	info->handled = true;
390	return err;
391}
392
393static int sparx5_handle_port_vlan_del(struct net_device *dev,
394				       struct notifier_block *nb,
395				       u16 vid)
396{
397	struct sparx5_port *port = netdev_priv(dev);
398	int ret;
399
400	/* Master bridge? */
401	if (netif_is_bridge_master(dev)) {
402		struct sparx5 *sparx5 =
403			container_of(nb, struct sparx5,
404				     switchdev_blocking_nb);
405
406		sparx5_sync_bridge_dev_addr(dev, sparx5, vid, false);
407		return 0;
408	}
409
410	if (!sparx5_netdevice_check(dev))
411		return -EOPNOTSUPP;
412
413	ret = sparx5_vlan_vid_del(port, vid);
414	if (ret)
415		return ret;
416
417	/* Delete the port MAC address with the matching VLAN information */
418	sparx5_mact_forget(port->sparx5, port->ndev->dev_addr, vid);
419
420	return 0;
421}
422
423static int sparx5_handle_port_obj_del(struct net_device *dev,
424				      struct notifier_block *nb,
425				      struct switchdev_notifier_port_obj_info *info)
426{
427	const struct switchdev_obj *obj = info->obj;
428	int err;
429
430	switch (obj->id) {
431	case SWITCHDEV_OBJ_ID_PORT_VLAN:
432		err = sparx5_handle_port_vlan_del(dev, nb,
433						  SWITCHDEV_OBJ_PORT_VLAN(obj)->vid);
 
 
 
 
 
434		break;
435	default:
436		err = -EOPNOTSUPP;
437		break;
438	}
439
440	info->handled = true;
441	return err;
442}
443
444static int sparx5_switchdev_blocking_event(struct notifier_block *nb,
445					   unsigned long event,
446					   void *ptr)
447{
448	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
449	int err;
450
451	switch (event) {
452	case SWITCHDEV_PORT_OBJ_ADD:
453		err = sparx5_handle_port_obj_add(dev, nb, ptr);
454		return notifier_from_errno(err);
455	case SWITCHDEV_PORT_OBJ_DEL:
456		err = sparx5_handle_port_obj_del(dev, nb, ptr);
457		return notifier_from_errno(err);
458	case SWITCHDEV_PORT_ATTR_SET:
459		err = switchdev_handle_port_attr_set(dev, ptr,
460						     sparx5_netdevice_check,
461						     sparx5_port_attr_set);
462		return notifier_from_errno(err);
463	}
464
465	return NOTIFY_DONE;
466}
467
468int sparx5_register_notifier_blocks(struct sparx5 *s5)
469{
470	int err;
471
472	s5->netdevice_nb.notifier_call = sparx5_netdevice_event;
473	err = register_netdevice_notifier(&s5->netdevice_nb);
474	if (err)
475		return err;
476
477	s5->switchdev_nb.notifier_call = sparx5_switchdev_event;
478	err = register_switchdev_notifier(&s5->switchdev_nb);
479	if (err)
480		goto err_switchdev_nb;
481
482	s5->switchdev_blocking_nb.notifier_call = sparx5_switchdev_blocking_event;
483	err = register_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
484	if (err)
485		goto err_switchdev_blocking_nb;
486
487	sparx5_owq = alloc_ordered_workqueue("sparx5_order", 0);
488	if (!sparx5_owq) {
489		err = -ENOMEM;
490		goto err_switchdev_blocking_nb;
491	}
492
493	return 0;
494
495err_switchdev_blocking_nb:
496	unregister_switchdev_notifier(&s5->switchdev_nb);
497err_switchdev_nb:
498	unregister_netdevice_notifier(&s5->netdevice_nb);
499
500	return err;
501}
502
503void sparx5_unregister_notifier_blocks(struct sparx5 *s5)
504{
505	destroy_workqueue(sparx5_owq);
506
507	unregister_switchdev_blocking_notifier(&s5->switchdev_blocking_nb);
508	unregister_switchdev_notifier(&s5->switchdev_nb);
509	unregister_netdevice_notifier(&s5->netdevice_nb);
510}