Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0+
  2
  3#include <linux/if_bridge.h>
  4
  5#include "lan966x_main.h"
  6
  7static void lan966x_lag_set_aggr_pgids(struct lan966x *lan966x)
  8{
  9	u32 visited = GENMASK(lan966x->num_phys_ports - 1, 0);
 10	int p, lag, i;
 11
 12	/* Reset destination and aggregation PGIDS */
 13	for (p = 0; p < lan966x->num_phys_ports; ++p)
 14		lan_wr(ANA_PGID_PGID_SET(BIT(p)),
 15		       lan966x, ANA_PGID(p));
 16
 17	for (p = PGID_AGGR; p < PGID_SRC; ++p)
 18		lan_wr(ANA_PGID_PGID_SET(visited),
 19		       lan966x, ANA_PGID(p));
 20
 21	/* The visited ports bitmask holds the list of ports offloading any
 22	 * bonding interface. Initially we mark all these ports as unvisited,
 23	 * then every time we visit a port in this bitmask, we know that it is
 24	 * the lowest numbered port, i.e. the one whose logical ID == physical
 25	 * port ID == LAG ID. So we mark as visited all further ports in the
 26	 * bitmask that are offloading the same bonding interface. This way,
 27	 * we set up the aggregation PGIDs only once per bonding interface.
 28	 */
 29	for (p = 0; p < lan966x->num_phys_ports; ++p) {
 30		struct lan966x_port *port = lan966x->ports[p];
 31
 32		if (!port || !port->bond)
 33			continue;
 34
 35		visited &= ~BIT(p);
 36	}
 37
 38	/* Now, set PGIDs for each active LAG */
 39	for (lag = 0; lag < lan966x->num_phys_ports; ++lag) {
 40		struct lan966x_port *port = lan966x->ports[lag];
 41		int num_active_ports = 0;
 42		struct net_device *bond;
 43		unsigned long bond_mask;
 44		u8 aggr_idx[16];
 45
 46		if (!port || !port->bond || (visited & BIT(lag)))
 47			continue;
 48
 49		bond = port->bond;
 50		bond_mask = lan966x_lag_get_mask(lan966x, bond);
 51
 52		for_each_set_bit(p, &bond_mask, lan966x->num_phys_ports) {
 53			struct lan966x_port *port = lan966x->ports[p];
 54
 55			if (!port)
 56				continue;
 57
 58			lan_wr(ANA_PGID_PGID_SET(bond_mask),
 59			       lan966x, ANA_PGID(p));
 60			if (port->lag_tx_active)
 61				aggr_idx[num_active_ports++] = p;
 62		}
 63
 64		for (i = PGID_AGGR; i < PGID_SRC; ++i) {
 65			u32 ac;
 66
 67			ac = lan_rd(lan966x, ANA_PGID(i));
 68			ac &= ~bond_mask;
 69			/* Don't do division by zero if there was no active
 70			 * port. Just make all aggregation codes zero.
 71			 */
 72			if (num_active_ports)
 73				ac |= BIT(aggr_idx[i % num_active_ports]);
 74			lan_wr(ANA_PGID_PGID_SET(ac),
 75			       lan966x, ANA_PGID(i));
 76		}
 77
 78		/* Mark all ports in the same LAG as visited to avoid applying
 79		 * the same config again.
 80		 */
 81		for (p = lag; p < lan966x->num_phys_ports; p++) {
 82			struct lan966x_port *port = lan966x->ports[p];
 83
 84			if (!port)
 85				continue;
 86
 87			if (port->bond == bond)
 88				visited |= BIT(p);
 89		}
 90	}
 91}
 92
 93static void lan966x_lag_set_port_ids(struct lan966x *lan966x)
 94{
 95	struct lan966x_port *port;
 96	u32 bond_mask;
 97	u32 lag_id;
 98	int p;
 99
100	for (p = 0; p < lan966x->num_phys_ports; ++p) {
101		port = lan966x->ports[p];
102		if (!port)
103			continue;
104
105		lag_id = port->chip_port;
106
107		bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
108		if (bond_mask)
109			lag_id = __ffs(bond_mask);
110
111		lan_rmw(ANA_PORT_CFG_PORTID_VAL_SET(lag_id),
112			ANA_PORT_CFG_PORTID_VAL,
113			lan966x, ANA_PORT_CFG(port->chip_port));
114	}
115}
116
117static void lan966x_lag_update_ids(struct lan966x *lan966x)
118{
119	lan966x_lag_set_port_ids(lan966x);
120	lan966x_update_fwd_mask(lan966x);
121	lan966x_lag_set_aggr_pgids(lan966x);
122}
123
124int lan966x_lag_port_join(struct lan966x_port *port,
125			  struct net_device *brport_dev,
126			  struct net_device *bond,
127			  struct netlink_ext_ack *extack)
128{
129	struct lan966x *lan966x = port->lan966x;
130	struct net_device *dev = port->dev;
131	u32 lag_id = -1;
132	u32 bond_mask;
133	int err;
134
135	bond_mask = lan966x_lag_get_mask(lan966x, bond);
136	if (bond_mask)
137		lag_id = __ffs(bond_mask);
138
139	port->bond = bond;
140	lan966x_lag_update_ids(lan966x);
141
142	err = switchdev_bridge_port_offload(brport_dev, dev, port,
143					    &lan966x_switchdev_nb,
144					    &lan966x_switchdev_blocking_nb,
145					    false, extack);
146	if (err)
147		goto out;
148
149	lan966x_port_stp_state_set(port, br_port_get_stp_state(brport_dev));
150
151	if (lan966x_lag_first_port(port->bond, port->dev) &&
152	    lag_id != -1)
153		lan966x_mac_lag_replace_port_entry(lan966x,
154						   lan966x->ports[lag_id],
155						   port);
156
157	return 0;
158
159out:
160	port->bond = NULL;
161	lan966x_lag_update_ids(lan966x);
162
163	return err;
164}
165
166void lan966x_lag_port_leave(struct lan966x_port *port, struct net_device *bond)
167{
168	struct lan966x *lan966x = port->lan966x;
169	u32 bond_mask;
170	u32 lag_id;
171
172	if (lan966x_lag_first_port(port->bond, port->dev)) {
173		bond_mask = lan966x_lag_get_mask(lan966x, port->bond);
174		bond_mask &= ~BIT(port->chip_port);
175		if (bond_mask) {
176			lag_id = __ffs(bond_mask);
177			lan966x_mac_lag_replace_port_entry(lan966x, port,
178							   lan966x->ports[lag_id]);
179		} else {
180			lan966x_mac_lag_remove_port_entry(lan966x, port);
181		}
182	}
183
184	port->bond = NULL;
185	lan966x_lag_update_ids(lan966x);
186	lan966x_port_stp_state_set(port, BR_STATE_FORWARDING);
187}
188
189static bool lan966x_lag_port_check_hash_types(struct lan966x *lan966x,
190					      enum netdev_lag_hash hash_type)
191{
192	int p;
193
194	for (p = 0; p < lan966x->num_phys_ports; ++p) {
195		struct lan966x_port *port = lan966x->ports[p];
196
197		if (!port || !port->bond)
198			continue;
199
200		if (port->hash_type != hash_type)
201			return false;
202	}
203
204	return true;
205}
206
207int lan966x_lag_port_prechangeupper(struct net_device *dev,
208				    struct netdev_notifier_changeupper_info *info)
209{
210	struct lan966x_port *port = netdev_priv(dev);
211	struct lan966x *lan966x = port->lan966x;
212	struct netdev_lag_upper_info *lui;
213	struct netlink_ext_ack *extack;
214
215	extack = netdev_notifier_info_to_extack(&info->info);
216	lui = info->upper_info;
217	if (!lui) {
218		port->hash_type = NETDEV_LAG_HASH_NONE;
219		return NOTIFY_DONE;
220	}
221
222	if (lui->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
223		NL_SET_ERR_MSG_MOD(extack,
224				   "LAG device using unsupported Tx type");
225		return -EINVAL;
226	}
227
228	if (!lan966x_lag_port_check_hash_types(lan966x, lui->hash_type)) {
229		NL_SET_ERR_MSG_MOD(extack,
230				   "LAG devices can have only the same hash_type");
231		return -EINVAL;
232	}
233
234	switch (lui->hash_type) {
235	case NETDEV_LAG_HASH_L2:
236		lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
237		       ANA_AGGR_CFG_AC_SMAC_ENA_SET(1),
238		       lan966x, ANA_AGGR_CFG);
239		break;
240	case NETDEV_LAG_HASH_L34:
241		lan_wr(ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
242		       ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1) |
243		       ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA_SET(1),
244		       lan966x, ANA_AGGR_CFG);
245		break;
246	case NETDEV_LAG_HASH_L23:
247		lan_wr(ANA_AGGR_CFG_AC_DMAC_ENA_SET(1) |
248		       ANA_AGGR_CFG_AC_SMAC_ENA_SET(1) |
249		       ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA_SET(1) |
250		       ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA_SET(1),
251		       lan966x, ANA_AGGR_CFG);
252		break;
253	default:
254		NL_SET_ERR_MSG_MOD(extack,
255				   "LAG device using unsupported hash type");
256		return -EINVAL;
257	}
258
259	port->hash_type = lui->hash_type;
260
261	return NOTIFY_OK;
262}
263
264int lan966x_lag_port_changelowerstate(struct net_device *dev,
265				      struct netdev_notifier_changelowerstate_info *info)
266{
267	struct netdev_lag_lower_state_info *lag = info->lower_state_info;
268	struct lan966x_port *port = netdev_priv(dev);
269	struct lan966x *lan966x = port->lan966x;
270	bool is_active;
271
272	if (!port->bond)
273		return NOTIFY_DONE;
274
275	is_active = lag->link_up && lag->tx_enabled;
276	if (port->lag_tx_active == is_active)
277		return NOTIFY_DONE;
278
279	port->lag_tx_active = is_active;
280	lan966x_lag_set_aggr_pgids(lan966x);
281
282	return NOTIFY_OK;
283}
284
285int lan966x_lag_netdev_prechangeupper(struct net_device *dev,
286				      struct netdev_notifier_changeupper_info *info)
287{
288	struct lan966x_port *port;
289	struct net_device *lower;
290	struct list_head *iter;
291	int err;
292
293	netdev_for_each_lower_dev(dev, lower, iter) {
294		if (!lan966x_netdevice_check(lower))
295			continue;
296
297		port = netdev_priv(lower);
298		if (port->bond != dev)
299			continue;
300
301		err = lan966x_port_prechangeupper(lower, dev, info);
302		if (err)
303			return err;
304	}
305
306	return NOTIFY_DONE;
307}
308
309int lan966x_lag_netdev_changeupper(struct net_device *dev,
310				   struct netdev_notifier_changeupper_info *info)
311{
312	struct lan966x_port *port;
313	struct net_device *lower;
314	struct list_head *iter;
315	int err;
316
317	netdev_for_each_lower_dev(dev, lower, iter) {
318		if (!lan966x_netdevice_check(lower))
319			continue;
320
321		port = netdev_priv(lower);
322		if (port->bond != dev)
323			continue;
324
325		err = lan966x_port_changeupper(lower, dev, info);
326		if (err)
327			return err;
328	}
329
330	return NOTIFY_DONE;
331}
332
333bool lan966x_lag_first_port(struct net_device *lag, struct net_device *dev)
334{
335	struct lan966x_port *port = netdev_priv(dev);
336	struct lan966x *lan966x = port->lan966x;
337	unsigned long bond_mask;
338
339	if (port->bond != lag)
340		return false;
341
342	bond_mask = lan966x_lag_get_mask(lan966x, lag);
343	if (bond_mask && port->chip_port == __ffs(bond_mask))
344		return true;
345
346	return false;
347}
348
349u32 lan966x_lag_get_mask(struct lan966x *lan966x, struct net_device *bond)
350{
351	struct lan966x_port *port;
352	u32 mask = 0;
353	int p;
354
355	if (!bond)
356		return mask;
357
358	for (p = 0; p < lan966x->num_phys_ports; p++) {
359		port = lan966x->ports[p];
360		if (!port)
361			continue;
362
363		if (port->bond == bond)
364			mask |= BIT(p);
365	}
366
367	return mask;
368}