Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0+
  2
  3#include <net/switchdev.h>
  4#include "lan966x_main.h"
  5
  6#define LAN966X_MAC_COLUMNS		4
  7#define MACACCESS_CMD_IDLE		0
  8#define MACACCESS_CMD_LEARN		1
  9#define MACACCESS_CMD_FORGET		2
 10#define MACACCESS_CMD_AGE		3
 11#define MACACCESS_CMD_GET_NEXT		4
 12#define MACACCESS_CMD_INIT		5
 13#define MACACCESS_CMD_READ		6
 14#define MACACCESS_CMD_WRITE		7
 15#define MACACCESS_CMD_SYNC_GET_NEXT	8
 16
 17#define LAN966X_MAC_INVALID_ROW		-1
 18
 19struct lan966x_mac_entry {
 20	struct list_head list;
 21	unsigned char mac[ETH_ALEN] __aligned(2);
 22	u16 vid;
 23	u16 port_index;
 24	int row;
 25	bool lag;
 26};
 27
 28struct lan966x_mac_raw_entry {
 29	u32 mach;
 30	u32 macl;
 31	u32 maca;
 32	bool processed;
 33};
 34
 35static int lan966x_mac_get_status(struct lan966x *lan966x)
 36{
 37	return lan_rd(lan966x, ANA_MACACCESS);
 38}
 39
 40static int lan966x_mac_wait_for_completion(struct lan966x *lan966x)
 41{
 42	u32 val;
 43
 44	return readx_poll_timeout_atomic(lan966x_mac_get_status,
 45					 lan966x, val,
 46					 (ANA_MACACCESS_MAC_TABLE_CMD_GET(val)) ==
 47					 MACACCESS_CMD_IDLE,
 48					 TABLE_UPDATE_SLEEP_US,
 49					 TABLE_UPDATE_TIMEOUT_US);
 50}
 51
 52static void lan966x_mac_select(struct lan966x *lan966x,
 53			       const unsigned char mac[ETH_ALEN],
 54			       unsigned int vid)
 55{
 56	u32 macl = 0, mach = 0;
 57
 58	/* Set the MAC address to handle and the vlan associated in a format
 59	 * understood by the hardware.
 60	 */
 61	mach |= vid    << 16;
 62	mach |= mac[0] << 8;
 63	mach |= mac[1] << 0;
 64	macl |= mac[2] << 24;
 65	macl |= mac[3] << 16;
 66	macl |= mac[4] << 8;
 67	macl |= mac[5] << 0;
 68
 69	lan_wr(macl, lan966x, ANA_MACLDATA);
 70	lan_wr(mach, lan966x, ANA_MACHDATA);
 71}
 72
 73static int __lan966x_mac_learn_locked(struct lan966x *lan966x, int pgid,
 74				      bool cpu_copy,
 75				      const unsigned char mac[ETH_ALEN],
 76				      unsigned int vid,
 77				      enum macaccess_entry_type type)
 78{
 79	lockdep_assert_held(&lan966x->mac_lock);
 80
 81	lan966x_mac_select(lan966x, mac, vid);
 82
 83	/* Issue a write command */
 84	lan_wr(ANA_MACACCESS_VALID_SET(1) |
 85	       ANA_MACACCESS_CHANGE2SW_SET(0) |
 86	       ANA_MACACCESS_MAC_CPU_COPY_SET(cpu_copy) |
 87	       ANA_MACACCESS_DEST_IDX_SET(pgid) |
 88	       ANA_MACACCESS_ENTRYTYPE_SET(type) |
 89	       ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN),
 90	       lan966x, ANA_MACACCESS);
 91
 92	return lan966x_mac_wait_for_completion(lan966x);
 93}
 94
 95static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid,
 96			       bool cpu_copy,
 97			       const unsigned char mac[ETH_ALEN],
 98			       unsigned int vid,
 99			       enum macaccess_entry_type type)
100{
101	int ret;
102
103	spin_lock(&lan966x->mac_lock);
104	ret = __lan966x_mac_learn_locked(lan966x, pgid, cpu_copy, mac, vid, type);
105	spin_unlock(&lan966x->mac_lock);
106
107	return ret;
108}
109
110/* The mask of the front ports is encoded inside the mac parameter via a call
111 * to lan966x_mdb_encode_mac().
112 */
113int lan966x_mac_ip_learn(struct lan966x *lan966x,
114			 bool cpu_copy,
115			 const unsigned char mac[ETH_ALEN],
116			 unsigned int vid,
117			 enum macaccess_entry_type type)
118{
119	WARN_ON(type != ENTRYTYPE_MACV4 && type != ENTRYTYPE_MACV6);
120
121	return __lan966x_mac_learn(lan966x, 0, cpu_copy, mac, vid, type);
122}
123
124int lan966x_mac_learn(struct lan966x *lan966x, int port,
125		      const unsigned char mac[ETH_ALEN],
126		      unsigned int vid,
127		      enum macaccess_entry_type type)
128{
129	WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
130
131	return __lan966x_mac_learn(lan966x, port, false, mac, vid, type);
132}
133
134static int lan966x_mac_learn_locked(struct lan966x *lan966x, int port,
135				    const unsigned char mac[ETH_ALEN],
136				    unsigned int vid,
137				    enum macaccess_entry_type type)
138{
139	WARN_ON(type != ENTRYTYPE_NORMAL && type != ENTRYTYPE_LOCKED);
140
141	return __lan966x_mac_learn_locked(lan966x, port, false, mac, vid, type);
142}
143
144static int lan966x_mac_forget_locked(struct lan966x *lan966x,
145				     const unsigned char mac[ETH_ALEN],
146				     unsigned int vid,
147				     enum macaccess_entry_type type)
148{
149	lockdep_assert_held(&lan966x->mac_lock);
150
151	lan966x_mac_select(lan966x, mac, vid);
152
153	/* Issue a forget command */
154	lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
155	       ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_FORGET),
156	       lan966x, ANA_MACACCESS);
157
158	return lan966x_mac_wait_for_completion(lan966x);
159}
160
161int lan966x_mac_forget(struct lan966x *lan966x,
162		       const unsigned char mac[ETH_ALEN],
163		       unsigned int vid,
164		       enum macaccess_entry_type type)
165{
166	int ret;
167
168	spin_lock(&lan966x->mac_lock);
169	ret = lan966x_mac_forget_locked(lan966x, mac, vid, type);
170	spin_unlock(&lan966x->mac_lock);
171
172	return ret;
173}
174
175int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid)
176{
177	return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED);
178}
179
180int lan966x_mac_cpu_forget(struct lan966x *lan966x, const char *addr, u16 vid)
181{
182	return lan966x_mac_forget(lan966x, addr, vid, ENTRYTYPE_LOCKED);
183}
184
185void lan966x_mac_set_ageing(struct lan966x *lan966x,
186			    u32 ageing)
187{
188	lan_rmw(ANA_AUTOAGE_AGE_PERIOD_SET(ageing / 2),
189		ANA_AUTOAGE_AGE_PERIOD,
190		lan966x, ANA_AUTOAGE);
191}
192
193void lan966x_mac_init(struct lan966x *lan966x)
194{
195	/* Clear the MAC table */
196	lan_wr(MACACCESS_CMD_INIT, lan966x, ANA_MACACCESS);
197	lan966x_mac_wait_for_completion(lan966x);
198
199	spin_lock_init(&lan966x->mac_lock);
200	INIT_LIST_HEAD(&lan966x->mac_entries);
201}
202
203static struct lan966x_mac_entry *lan966x_mac_alloc_entry(struct lan966x_port *port,
204							 const unsigned char *mac,
205							 u16 vid)
206{
207	struct lan966x_mac_entry *mac_entry;
208
209	mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC);
210	if (!mac_entry)
211		return NULL;
212
213	memcpy(mac_entry->mac, mac, ETH_ALEN);
214	mac_entry->vid = vid;
215	mac_entry->port_index = port->chip_port;
216	mac_entry->row = LAN966X_MAC_INVALID_ROW;
217	mac_entry->lag = port->bond ? true : false;
218	return mac_entry;
219}
220
221static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x,
222							const unsigned char *mac,
223							u16 vid, u16 port_index)
224{
225	struct lan966x_mac_entry *res = NULL;
226	struct lan966x_mac_entry *mac_entry;
227
228	list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
229		if (mac_entry->vid == vid &&
230		    ether_addr_equal(mac, mac_entry->mac) &&
231		    mac_entry->port_index == port_index) {
232			res = mac_entry;
233			break;
234		}
235	}
236
237	return res;
238}
239
240static int lan966x_mac_lookup(struct lan966x *lan966x,
241			      const unsigned char mac[ETH_ALEN],
242			      unsigned int vid, enum macaccess_entry_type type)
243{
244	int ret;
245
246	lan966x_mac_select(lan966x, mac, vid);
247
248	/* Issue a read command */
249	lan_wr(ANA_MACACCESS_ENTRYTYPE_SET(type) |
250	       ANA_MACACCESS_VALID_SET(1) |
251	       ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_READ),
252	       lan966x, ANA_MACACCESS);
253
254	ret = lan966x_mac_wait_for_completion(lan966x);
255	if (ret)
256		return ret;
257
258	return ANA_MACACCESS_VALID_GET(lan_rd(lan966x, ANA_MACACCESS));
259}
260
261static void lan966x_fdb_call_notifiers(enum switchdev_notifier_type type,
262				       const char *mac, u16 vid,
263				       struct net_device *dev)
264{
265	struct switchdev_notifier_fdb_info info = { 0 };
266
267	info.addr = mac;
268	info.vid = vid;
269	info.offloaded = true;
270	call_switchdev_notifiers(type, dev, &info.info, NULL);
271}
272
273int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port,
274			  const unsigned char *addr, u16 vid)
275{
276	struct lan966x_mac_entry *mac_entry;
277
278	spin_lock(&lan966x->mac_lock);
279	if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) {
280		spin_unlock(&lan966x->mac_lock);
281		return 0;
282	}
283
284	/* In case the entry already exists, don't add it again to SW,
285	 * just update HW, but we need to look in the actual HW because
286	 * it is possible for an entry to be learn by HW and before we
287	 * get the interrupt the frame will reach CPU and the CPU will
288	 * add the entry but without the extern_learn flag.
289	 */
290	mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port);
291	if (mac_entry) {
292		spin_unlock(&lan966x->mac_lock);
293		goto mac_learn;
294	}
295
296	mac_entry = lan966x_mac_alloc_entry(port, addr, vid);
297	if (!mac_entry) {
298		spin_unlock(&lan966x->mac_lock);
299		return -ENOMEM;
300	}
301
302	list_add_tail(&mac_entry->list, &lan966x->mac_entries);
303	spin_unlock(&lan966x->mac_lock);
304
305	lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid,
306				   port->bond ?: port->dev);
307
308mac_learn:
309	lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED);
310
311	return 0;
312}
313
314int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr,
315			  u16 vid)
316{
317	struct lan966x_mac_entry *mac_entry, *tmp;
318
319	spin_lock(&lan966x->mac_lock);
320	list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
321				 list) {
322		if (mac_entry->vid == vid &&
323		    ether_addr_equal(addr, mac_entry->mac)) {
324			lan966x_mac_forget_locked(lan966x, mac_entry->mac,
325						  mac_entry->vid,
326						  ENTRYTYPE_LOCKED);
327
328			list_del(&mac_entry->list);
329			kfree(mac_entry);
330		}
331	}
332	spin_unlock(&lan966x->mac_lock);
333
334	return 0;
335}
336
337void lan966x_mac_lag_replace_port_entry(struct lan966x *lan966x,
338					struct lan966x_port *src,
339					struct lan966x_port *dst)
340{
341	struct lan966x_mac_entry *mac_entry;
342
343	spin_lock(&lan966x->mac_lock);
344	list_for_each_entry(mac_entry, &lan966x->mac_entries, list) {
345		if (mac_entry->port_index == src->chip_port &&
346		    mac_entry->lag) {
347			lan966x_mac_forget_locked(lan966x, mac_entry->mac,
348						  mac_entry->vid,
349						  ENTRYTYPE_LOCKED);
350
351			lan966x_mac_learn_locked(lan966x, dst->chip_port,
352						 mac_entry->mac, mac_entry->vid,
353						 ENTRYTYPE_LOCKED);
354			mac_entry->port_index = dst->chip_port;
355		}
356	}
357	spin_unlock(&lan966x->mac_lock);
358}
359
360void lan966x_mac_lag_remove_port_entry(struct lan966x *lan966x,
361				       struct lan966x_port *src)
362{
363	struct lan966x_mac_entry *mac_entry, *tmp;
364
365	spin_lock(&lan966x->mac_lock);
366	list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
367				 list) {
368		if (mac_entry->port_index == src->chip_port &&
369		    mac_entry->lag) {
370			lan966x_mac_forget_locked(lan966x, mac_entry->mac,
371						  mac_entry->vid,
372						  ENTRYTYPE_LOCKED);
373
374			list_del(&mac_entry->list);
375			kfree(mac_entry);
376		}
377	}
378	spin_unlock(&lan966x->mac_lock);
379}
380
381void lan966x_mac_purge_entries(struct lan966x *lan966x)
382{
383	struct lan966x_mac_entry *mac_entry, *tmp;
384
385	spin_lock(&lan966x->mac_lock);
386	list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries,
387				 list) {
388		lan966x_mac_forget_locked(lan966x, mac_entry->mac,
389					  mac_entry->vid, ENTRYTYPE_LOCKED);
390
391		list_del(&mac_entry->list);
392		kfree(mac_entry);
393	}
394	spin_unlock(&lan966x->mac_lock);
395}
396
397static void lan966x_mac_notifiers(enum switchdev_notifier_type type,
398				  unsigned char *mac, u32 vid,
399				  struct net_device *dev)
400{
401	rtnl_lock();
402	lan966x_fdb_call_notifiers(type, mac, vid, dev);
403	rtnl_unlock();
404}
405
406static void lan966x_mac_process_raw_entry(struct lan966x_mac_raw_entry *raw_entry,
407					  u8 *mac, u16 *vid, u32 *dest_idx)
408{
409	mac[0] = (raw_entry->mach >> 8)  & 0xff;
410	mac[1] = (raw_entry->mach >> 0)  & 0xff;
411	mac[2] = (raw_entry->macl >> 24) & 0xff;
412	mac[3] = (raw_entry->macl >> 16) & 0xff;
413	mac[4] = (raw_entry->macl >> 8)  & 0xff;
414	mac[5] = (raw_entry->macl >> 0)  & 0xff;
415
416	*vid = (raw_entry->mach >> 16) & 0xfff;
417	*dest_idx = ANA_MACACCESS_DEST_IDX_GET(raw_entry->maca);
418}
419
420static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row,
421				    struct lan966x_mac_raw_entry *raw_entries)
422{
423	struct lan966x_mac_entry *mac_entry, *tmp;
424	unsigned char mac[ETH_ALEN] __aligned(2);
425	struct list_head mac_deleted_entries;
426	struct lan966x_port *port;
427	u32 dest_idx;
428	u32 column;
429	u16 vid;
430
431	INIT_LIST_HEAD(&mac_deleted_entries);
432
433	spin_lock(&lan966x->mac_lock);
434	list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) {
435		bool found = false;
436
437		if (mac_entry->row != row)
438			continue;
439
440		for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
441			/* All the valid entries are at the start of the row,
442			 * so when get one invalid entry it can just skip the
443			 * rest of the columns
444			 */
445			if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
446				break;
447
448			lan966x_mac_process_raw_entry(&raw_entries[column],
449						      mac, &vid, &dest_idx);
450			if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
451				continue;
452
453			/* If the entry in SW is found, then there is nothing
454			 * to do
455			 */
456			if (mac_entry->vid == vid &&
457			    ether_addr_equal(mac_entry->mac, mac) &&
458			    mac_entry->port_index == dest_idx) {
459				raw_entries[column].processed = true;
460				found = true;
461				break;
462			}
463		}
464
465		if (!found) {
466			list_del(&mac_entry->list);
467			/* Move the entry from SW list to a tmp list such that
468			 * it would be deleted later
469			 */
470			list_add_tail(&mac_entry->list, &mac_deleted_entries);
471		}
472	}
473	spin_unlock(&lan966x->mac_lock);
474
475	list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) {
476		/* Notify the bridge that the entry doesn't exist
477		 * anymore in the HW
478		 */
479		port = lan966x->ports[mac_entry->port_index];
480		lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE,
481				      mac_entry->mac, mac_entry->vid,
482				      port->bond ?: port->dev);
483		list_del(&mac_entry->list);
484		kfree(mac_entry);
485	}
486
487	/* Now go to the list of columns and see if any entry was not in the SW
488	 * list, then that means that the entry is new so it needs to notify the
489	 * bridge.
490	 */
491	for (column = 0; column < LAN966X_MAC_COLUMNS; ++column) {
492		/* All the valid entries are at the start of the row, so when
493		 * get one invalid entry it can just skip the rest of the columns
494		 */
495		if (!ANA_MACACCESS_VALID_GET(raw_entries[column].maca))
496			break;
497
498		/* If the entry already exists then don't do anything */
499		if (raw_entries[column].processed)
500			continue;
501
502		lan966x_mac_process_raw_entry(&raw_entries[column],
503					      mac, &vid, &dest_idx);
504		if (WARN_ON(dest_idx >= lan966x->num_phys_ports))
505			continue;
506
507		spin_lock(&lan966x->mac_lock);
508		mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx);
509		if (mac_entry) {
510			spin_unlock(&lan966x->mac_lock);
511			continue;
512		}
513
514		port = lan966x->ports[dest_idx];
515		mac_entry = lan966x_mac_alloc_entry(port, mac, vid);
516		if (!mac_entry) {
517			spin_unlock(&lan966x->mac_lock);
518			return;
519		}
520
521		mac_entry->row = row;
522		list_add_tail(&mac_entry->list, &lan966x->mac_entries);
523		spin_unlock(&lan966x->mac_lock);
524
525		lan966x_mac_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
526				      mac, vid, port->bond ?: port->dev);
527	}
528}
529
530irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x)
531{
532	struct lan966x_mac_raw_entry entry[LAN966X_MAC_COLUMNS] = { 0 };
533	u32 index, column;
534	bool stop = true;
535	u32 val;
536
537	/* Start the scan from 0, 0 */
538	lan_wr(ANA_MACTINDX_M_INDEX_SET(0) |
539	       ANA_MACTINDX_BUCKET_SET(0),
540	       lan966x, ANA_MACTINDX);
541
542	while (1) {
543		spin_lock(&lan966x->mac_lock);
544		lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT),
545			ANA_MACACCESS_MAC_TABLE_CMD,
546			lan966x, ANA_MACACCESS);
547		lan966x_mac_wait_for_completion(lan966x);
548
549		val = lan_rd(lan966x, ANA_MACTINDX);
550		index = ANA_MACTINDX_M_INDEX_GET(val);
551		column = ANA_MACTINDX_BUCKET_GET(val);
552
553		/* The SYNC-GET-NEXT returns all the entries(4) in a row in
554		 * which is suffered a change. By change it means that new entry
555		 * was added or an entry was removed because of ageing.
556		 * It would return all the columns for that row. And after that
557		 * it would return the next row The stop conditions of the
558		 * SYNC-GET-NEXT is when it reaches 'directly' to row 0
559		 * column 3. So if SYNC-GET-NEXT returns row 0 and column 0
560		 * then it is required to continue to read more even if it
561		 * reaches row 0 and column 3.
562		 */
563		if (index == 0 && column == 0)
564			stop = false;
565
566		if (column == LAN966X_MAC_COLUMNS - 1 &&
567		    index == 0 && stop) {
568			spin_unlock(&lan966x->mac_lock);
569			break;
570		}
571
572		entry[column].mach = lan_rd(lan966x, ANA_MACHDATA);
573		entry[column].macl = lan_rd(lan966x, ANA_MACLDATA);
574		entry[column].maca = lan_rd(lan966x, ANA_MACACCESS);
575		spin_unlock(&lan966x->mac_lock);
576
577		/* Once all the columns are read process them */
578		if (column == LAN966X_MAC_COLUMNS - 1) {
579			lan966x_mac_irq_process(lan966x, index, entry);
580			/* A row was processed so it is safe to assume that the
581			 * next row/column can be the stop condition
582			 */
583			stop = true;
584		}
585	}
586
587	lan_rmw(ANA_ANAINTR_INTR_SET(0),
588		ANA_ANAINTR_INTR,
589		lan966x, ANA_ANAINTR);
590
591	return IRQ_HANDLED;
592}