Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1// SPDX-License-Identifier: GPL-2.0+
  2/* Microchip Sparx5 Switch driver
  3 *
  4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
  5 */
  6
  7#include <net/pkt_cls.h>
  8
  9#include "sparx5_main.h"
 10#include "sparx5_qos.h"
 11
 12/* Calculate new base_time based on cycle_time.
 13 *
 14 * The hardware requires a base_time that is always in the future.
 15 * We define threshold_time as current_time + (2 * cycle_time).
 16 * If base_time is below threshold_time this function recalculates it to be in
 17 * the interval:
 18 * threshold_time <= base_time < (threshold_time + cycle_time)
 19 *
 20 * A very simple algorithm could be like this:
 21 * new_base_time = org_base_time + N * cycle_time
 22 * using the lowest N so (new_base_time >= threshold_time
 23 */
 24void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
 25			  const ktime_t org_base_time, ktime_t *new_base_time)
 26{
 27	ktime_t current_time, threshold_time, new_time;
 28	struct timespec64 ts;
 29	u64 nr_of_cycles_p2;
 30	u64 nr_of_cycles;
 31	u64 diff_time;
 32
 33	new_time = org_base_time;
 34
 35	sparx5_ptp_gettime64(&sparx5->phc[SPARX5_PHC_PORT].info, &ts);
 36	current_time = timespec64_to_ktime(ts);
 37	threshold_time = current_time + (2 * cycle_time);
 38	diff_time = threshold_time - new_time;
 39	nr_of_cycles = div_u64(diff_time, cycle_time);
 40	nr_of_cycles_p2 = 1; /* Use 2^0 as start value */
 41
 42	if (new_time >= threshold_time) {
 43		*new_base_time = new_time;
 44		return;
 45	}
 46
 47	/* Calculate the smallest power of 2 (nr_of_cycles_p2)
 48	 * that is larger than nr_of_cycles.
 49	 */
 50	while (nr_of_cycles_p2 < nr_of_cycles)
 51		nr_of_cycles_p2 <<= 1; /* Next (higher) power of 2 */
 52
 53	/* Add as big chunks (power of 2 * cycle_time)
 54	 * as possible for each power of 2
 55	 */
 56	while (nr_of_cycles_p2) {
 57		if (new_time < threshold_time) {
 58			new_time += cycle_time * nr_of_cycles_p2;
 59			while (new_time < threshold_time)
 60				new_time += cycle_time * nr_of_cycles_p2;
 61			new_time -= cycle_time * nr_of_cycles_p2;
 62		}
 63		nr_of_cycles_p2 >>= 1; /* Next (lower) power of 2 */
 64	}
 65	new_time += cycle_time;
 66	*new_base_time = new_time;
 67}
 68
 69/* Max rates for leak groups */
 70static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
 71	1048568, /*  1.049 Gbps */
 72	2621420, /*  2.621 Gbps */
 73	10485680, /* 10.486 Gbps */
 74	26214200 /* 26.214 Gbps */
 75};
 76
 77u32 sparx5_get_hsch_max_group_rate(int grp)
 78{
 79	return spx5_hsch_max_group_rate[grp];
 80}
 81
 82static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
 83
 84static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
 85{
 86	u32 value;
 87
 88	value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
 89	return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
 90}
 91
 92static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
 93				    u32 leak_time)
 94{
 95	spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
 96		HSCH_HSCH_TIMER_CFG(layer, group));
 97}
 98
 99static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
100{
101	u32 value;
102
103	value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
104	return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
105}
106
107static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
108			      u32 idx)
109
110{
111	u32 value;
112
113	value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
114	return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
115}
116
117static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
118{
119	u32 itr, next;
120
121	itr = sparx5_lg_get_first(sparx5, layer, group);
122
123	for (;;) {
124		next = sparx5_lg_get_next(sparx5, layer, group, itr);
125		if (itr == next)
126			return itr;
127
128		itr = next;
129	}
130}
131
132static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
133			      u32 idx)
134{
135	return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
136}
137
138static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
139			       u32 idx)
140{
141	return idx == sparx5_lg_get_first(sparx5, layer, group);
142}
143
144static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
145{
146	return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
147}
148
149static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
150{
151	if (sparx5_lg_is_empty(sparx5, layer, group))
152		return false;
153
154	return sparx5_lg_get_first(sparx5, layer, group) ==
155	       sparx5_lg_get_last(sparx5, layer, group);
156}
157
158static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
159			     u32 leak_time)
160{
161	sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
162}
163
164static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
165{
166	sparx5_lg_set_leak_time(sparx5, layer, group, 0);
167}
168
169static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
170					u32 idx, u32 *group)
171{
172	u32 itr, next;
173	int i;
174
175	for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
176		if (sparx5_lg_is_empty(sparx5, layer, i))
177			continue;
178
179		itr = sparx5_lg_get_first(sparx5, layer, i);
180
181		for (;;) {
182			next = sparx5_lg_get_next(sparx5, layer, i, itr);
183
184			if (itr == idx) {
185				*group = i;
186				return 0; /* Found it */
187			}
188			if (itr == next)
189				break; /* Was not found */
190
191			itr = next;
192		}
193	}
194
195	return -1;
196}
197
198static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
199{
200	struct sparx5_layer *l = &layers[layer];
201	struct sparx5_lg *lg;
202	u32 i;
203
204	for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
205		lg = &l->leak_groups[i];
206		if (rate <= lg->max_rate) {
207			*group = i;
208			return 0;
209		}
210	}
211
212	return -1;
213}
214
215static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
216				  u32 idx, u32 *prev, u32 *next, u32 *first)
217{
218	u32 itr;
219
220	*first = sparx5_lg_get_first(sparx5, layer, group);
221	*prev = *first;
222	*next = *first;
223	itr = *first;
224
225	for (;;) {
226		*next = sparx5_lg_get_next(sparx5, layer, group, itr);
227
228		if (itr == idx)
229			return 0; /* Found it */
230
231		if (itr == *next)
232			return -1; /* Was not found */
233
234		*prev = itr;
235		itr = *next;
236	}
237
238	return -1;
239}
240
241static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
242			      u32 se_first, u32 idx, u32 idx_next, bool empty)
243{
244	u32 leak_time = layers[layer].leak_groups[group].leak_time;
245
246	/* Stop leaking */
247	sparx5_lg_disable(sparx5, layer, group);
248
249	if (empty)
250		return 0;
251
252	/* Select layer */
253	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
254		 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
255
256	/* Link elements */
257	spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
258		HSCH_SE_CONNECT(idx));
259
260	/* Set the first element. */
261	spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
262		 HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
263		 HSCH_HSCH_LEAK_CFG(layer, group));
264
265	/* Start leaking */
266	sparx5_lg_enable(sparx5, layer, group, leak_time);
267
268	return 0;
269}
270
271static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
272{
273	u32 first, next, prev;
274	bool empty = false;
275
276	/* idx *must* be present in the leak group */
277	WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
278				       &first) < 0);
279
280	if (sparx5_lg_is_singular(sparx5, layer, group)) {
281		empty = true;
282	} else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
283		/* idx is removed, prev is now last */
284		idx = prev;
285		next = prev;
286	} else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
287		/* idx is removed and points to itself, first is next */
288		first = next;
289		next = idx;
290	} else {
291		/* Next is not touched */
292		idx = prev;
293	}
294
295	return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
296				  empty);
297}
298
299static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
300			 u32 idx)
301{
302	u32 first, next, old_group;
303
304	pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
305		 idx);
306
307	/* Is this SE already shaping ? */
308	if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
309		if (old_group != new_group) {
310			/* Delete from old group */
311			sparx5_lg_del(sparx5, layer, old_group, idx);
312		} else {
313			/* Nothing to do here */
314			return 0;
315		}
316	}
317
318	/* We always add to head of the list */
319	first = idx;
320
321	if (sparx5_lg_is_empty(sparx5, layer, new_group))
322		next = idx;
323	else
324		next = sparx5_lg_get_first(sparx5, layer, new_group);
325
326	return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
327				  false);
328}
329
330static int sparx5_shaper_conf_set(struct sparx5_port *port,
331				  const struct sparx5_shaper *sh, u32 layer,
332				  u32 idx, u32 group)
333{
334	int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
335	struct sparx5 *sparx5 = port->sparx5;
336
337	if (!sh->rate && !sh->burst)
338		sparx5_lg_action = &sparx5_lg_del;
339	else
340		sparx5_lg_action = &sparx5_lg_add;
341
342	/* Select layer */
343	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
344		 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
345
346	/* Set frame mode */
347	spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
348		 sparx5, HSCH_SE_CFG(idx));
349
350	/* Set committed rate and burst */
351	spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
352			HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
353		sparx5, HSCH_CIR_CFG(idx));
354
355	/* This has to be done after the shaper configuration has been set */
356	sparx5_lg_action(sparx5, layer, group, idx);
357
358	return 0;
359}
360
361static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
362{
363	return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
364	       1;
365}
366
367static int sparx5_dwrr_conf_set(struct sparx5_port *port,
368				struct sparx5_dwrr *dwrr)
369{
370	u32 layer = is_sparx5(port->sparx5) ? 2 : 1;
371	int i;
372
373	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer) |
374		 HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
375		 HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
376		 port->sparx5, HSCH_HSCH_CFG_CFG);
377
378	/* Number of *lower* indexes that are arbitrated dwrr */
379	spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
380		 HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
381		 HSCH_SE_CFG(port->portno));
382
383	for (i = 0; i < dwrr->count; i++) {
384		spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
385			 HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
386			 HSCH_DWRR_ENTRY(i));
387	}
388
389	return 0;
390}
391
392static int sparx5_leak_groups_init(struct sparx5 *sparx5)
393{
394	const struct sparx5_ops *ops = sparx5->data->ops;
395	struct sparx5_layer *layer;
396	u32 sys_clk_per_100ps;
397	struct sparx5_lg *lg;
398	u32 leak_time_us;
399	int i, ii;
400
401	sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
402
403	for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
404		layer = &layers[i];
405		for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
406			lg = &layer->leak_groups[ii];
407			lg->max_rate = ops->get_hsch_max_group_rate(i);
408
409			/* Calculate the leak time in us, to serve a maximum
410			 * rate of 'max_rate' for this group
411			 */
412			leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
413
414			/* Hardware wants leak time in ns */
415			lg->leak_time = 1000 * leak_time_us;
416
417			/* Calculate resolution */
418			lg->resolution = 1000 / leak_time_us;
419
420			/* Maximum number of shapers that can be served by
421			 * this leak group
422			 */
423			lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
424
425			/* Example:
426			 * Wanted bandwidth is 100Mbit:
427			 *
428			 * 100 mbps can be served by leak group zero.
429			 *
430			 * leak_time is 125000 ns.
431			 * resolution is: 8
432			 *
433			 * cir          = 100000 / 8 = 12500
434			 * leaks_pr_sec = 125000 / 10^9 = 8000
435			 * bw           = 12500 * 8000 = 10^8 (100 Mbit)
436			 */
437
438			/* Disable by default - this also indicates an empty
439			 * leak group
440			 */
441			sparx5_lg_disable(sparx5, i, ii);
442		}
443	}
444
445	return 0;
446}
447
448int sparx5_qos_init(struct sparx5 *sparx5)
449{
450	int ret;
451
452	ret = sparx5_leak_groups_init(sparx5);
453	if (ret < 0)
454		return ret;
455
456	ret = sparx5_dcb_init(sparx5);
457	if (ret < 0)
458		return ret;
459
460	sparx5_psfp_init(sparx5);
461
462	return 0;
463}
464
465int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
466{
467	int i;
468
469	if (num_tc != SPX5_PRIOS) {
470		netdev_err(ndev, "Only %d traffic classes supported\n",
471			   SPX5_PRIOS);
472		return -EINVAL;
473	}
474
475	netdev_set_num_tc(ndev, num_tc);
476
477	for (i = 0; i < num_tc; i++)
478		netdev_set_tc_queue(ndev, i, 1, i);
479
480	netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
481		   ndev->num_tc, ndev->real_num_tx_queues);
482
483	return 0;
484}
485
486int sparx5_tc_mqprio_del(struct net_device *ndev)
487{
488	netdev_reset_tc(ndev);
489
490	netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
491		   ndev->num_tc, ndev->real_num_tx_queues);
492
493	return 0;
494}
495
496int sparx5_tc_tbf_add(struct sparx5_port *port,
497		      struct tc_tbf_qopt_offload_replace_params *params,
498		      u32 layer, u32 idx)
499{
500	struct sparx5_shaper sh = {
501		.mode = SPX5_SE_MODE_DATARATE,
502		.rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
503		.burst = params->max_size,
504	};
505	struct sparx5_lg *lg;
506	u32 group;
507
508	/* Find suitable group for this se */
509	if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
510		pr_debug("Could not find leak group for se with rate: %d",
511			 sh.rate);
512		return -EINVAL;
513	}
514
515	lg = &layers[layer].leak_groups[group];
516
517	pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
518
519	if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
520		return -EINVAL;
521
522	/* Calculate committed rate and burst */
523	sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
524	sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
525
526	if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
527		return -EINVAL;
528
529	return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
530}
531
532int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
533{
534	struct sparx5_shaper sh = {0};
535	u32 group;
536
537	sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
538
539	return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
540}
541
542int sparx5_tc_ets_add(struct sparx5_port *port,
543		      struct tc_ets_qopt_offload_replace_params *params)
544{
545	struct sparx5_dwrr dwrr = {0};
546	/* Minimum weight for each iteration */
547	unsigned int w_min = 100;
548	int i;
549
550	/* Find minimum weight for all dwrr bands */
551	for (i = 0; i < SPX5_PRIOS; i++) {
552		if (params->quanta[i] == 0)
553			continue;
554		w_min = min(w_min, params->weights[i]);
555	}
556
557	for (i = 0; i < SPX5_PRIOS; i++) {
558		/* Strict band; skip */
559		if (params->quanta[i] == 0)
560			continue;
561
562		dwrr.count++;
563
564		/* On the sparx5, bands with higher indexes are preferred and
565		 * arbitrated strict. Strict bands are put in the lower indexes,
566		 * by tc, so we reverse the bands here.
567		 *
568		 * Also convert the weight to something the hardware
569		 * understands.
570		 */
571		dwrr.cost[SPX5_PRIOS - i - 1] =
572			sparx5_weight_to_hw_cost(w_min, params->weights[i]);
573	}
574
575	return sparx5_dwrr_conf_set(port, &dwrr);
576}
577
578int sparx5_tc_ets_del(struct sparx5_port *port)
579{
580	struct sparx5_dwrr dwrr = {0};
581
582	return sparx5_dwrr_conf_set(port, &dwrr);
583}