Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0+
  2/* Microchip Sparx5 Switch driver
  3 *
  4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
  5 */
  6
  7#include <net/pkt_cls.h>
  8
  9#include "sparx5_main.h"
 10#include "sparx5_qos.h"
 11
 12/* Calculate new base_time based on cycle_time.
 13 *
 14 * The hardware requires a base_time that is always in the future.
 15 * We define threshold_time as current_time + (2 * cycle_time).
 16 * If base_time is below threshold_time this function recalculates it to be in
 17 * the interval:
 18 * threshold_time <= base_time < (threshold_time + cycle_time)
 19 *
 20 * A very simple algorithm could be like this:
 21 * new_base_time = org_base_time + N * cycle_time
 22 * using the lowest N so (new_base_time >= threshold_time
 23 */
 24void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
 25			  const ktime_t org_base_time, ktime_t *new_base_time)
 26{
 27	ktime_t current_time, threshold_time, new_time;
 28	struct timespec64 ts;
 29	u64 nr_of_cycles_p2;
 30	u64 nr_of_cycles;
 31	u64 diff_time;
 32
 33	new_time = org_base_time;
 34
 35	sparx5_ptp_gettime64(&sparx5->phc[SPARX5_PHC_PORT].info, &ts);
 36	current_time = timespec64_to_ktime(ts);
 37	threshold_time = current_time + (2 * cycle_time);
 38	diff_time = threshold_time - new_time;
 39	nr_of_cycles = div_u64(diff_time, cycle_time);
 40	nr_of_cycles_p2 = 1; /* Use 2^0 as start value */
 41
 42	if (new_time >= threshold_time) {
 43		*new_base_time = new_time;
 44		return;
 45	}
 46
 47	/* Calculate the smallest power of 2 (nr_of_cycles_p2)
 48	 * that is larger than nr_of_cycles.
 49	 */
 50	while (nr_of_cycles_p2 < nr_of_cycles)
 51		nr_of_cycles_p2 <<= 1; /* Next (higher) power of 2 */
 52
 53	/* Add as big chunks (power of 2 * cycle_time)
 54	 * as possible for each power of 2
 55	 */
 56	while (nr_of_cycles_p2) {
 57		if (new_time < threshold_time) {
 58			new_time += cycle_time * nr_of_cycles_p2;
 59			while (new_time < threshold_time)
 60				new_time += cycle_time * nr_of_cycles_p2;
 61			new_time -= cycle_time * nr_of_cycles_p2;
 62		}
 63		nr_of_cycles_p2 >>= 1; /* Next (lower) power of 2 */
 64	}
 65	new_time += cycle_time;
 66	*new_base_time = new_time;
 67}
 68
 69/* Max rates for leak groups */
 70static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
 71	1048568, /*  1.049 Gbps */
 72	2621420, /*  2.621 Gbps */
 73	10485680, /* 10.486 Gbps */
 74	26214200 /* 26.214 Gbps */
 75};
 76
 77static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
 78
 79static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
 80{
 81	u32 value;
 82
 83	value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
 84	return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
 85}
 86
 87static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
 88				    u32 leak_time)
 89{
 90	spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
 91		HSCH_HSCH_TIMER_CFG(layer, group));
 92}
 93
 94static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
 95{
 96	u32 value;
 97
 98	value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
 99	return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
100}
101
102static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
103			      u32 idx)
104
105{
106	u32 value;
107
108	value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
109	return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
110}
111
112static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
113{
114	u32 itr, next;
115
116	itr = sparx5_lg_get_first(sparx5, layer, group);
117
118	for (;;) {
119		next = sparx5_lg_get_next(sparx5, layer, group, itr);
120		if (itr == next)
121			return itr;
122
123		itr = next;
124	}
125}
126
127static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
128			      u32 idx)
129{
130	return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
131}
132
133static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
134			       u32 idx)
135{
136	return idx == sparx5_lg_get_first(sparx5, layer, group);
137}
138
139static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
140{
141	return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
142}
143
144static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
145{
146	if (sparx5_lg_is_empty(sparx5, layer, group))
147		return false;
148
149	return sparx5_lg_get_first(sparx5, layer, group) ==
150	       sparx5_lg_get_last(sparx5, layer, group);
151}
152
153static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
154			     u32 leak_time)
155{
156	sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
157}
158
159static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
160{
161	sparx5_lg_set_leak_time(sparx5, layer, group, 0);
162}
163
164static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
165					u32 idx, u32 *group)
166{
167	u32 itr, next;
168	int i;
169
170	for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
171		if (sparx5_lg_is_empty(sparx5, layer, i))
172			continue;
173
174		itr = sparx5_lg_get_first(sparx5, layer, i);
175
176		for (;;) {
177			next = sparx5_lg_get_next(sparx5, layer, i, itr);
178
179			if (itr == idx) {
180				*group = i;
181				return 0; /* Found it */
182			}
183			if (itr == next)
184				break; /* Was not found */
185
186			itr = next;
187		}
188	}
189
190	return -1;
191}
192
193static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
194{
195	struct sparx5_layer *l = &layers[layer];
196	struct sparx5_lg *lg;
197	u32 i;
198
199	for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
200		lg = &l->leak_groups[i];
201		if (rate <= lg->max_rate) {
202			*group = i;
203			return 0;
204		}
205	}
206
207	return -1;
208}
209
210static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
211				  u32 idx, u32 *prev, u32 *next, u32 *first)
212{
213	u32 itr;
214
215	*first = sparx5_lg_get_first(sparx5, layer, group);
216	*prev = *first;
217	*next = *first;
218	itr = *first;
219
220	for (;;) {
221		*next = sparx5_lg_get_next(sparx5, layer, group, itr);
222
223		if (itr == idx)
224			return 0; /* Found it */
225
226		if (itr == *next)
227			return -1; /* Was not found */
228
229		*prev = itr;
230		itr = *next;
231	}
232
233	return -1;
234}
235
236static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
237			      u32 se_first, u32 idx, u32 idx_next, bool empty)
238{
239	u32 leak_time = layers[layer].leak_groups[group].leak_time;
240
241	/* Stop leaking */
242	sparx5_lg_disable(sparx5, layer, group);
243
244	if (empty)
245		return 0;
246
247	/* Select layer */
248	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
249		 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
250
251	/* Link elements */
252	spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
253		HSCH_SE_CONNECT(idx));
254
255	/* Set the first element. */
256	spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
257		 HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
258		 HSCH_HSCH_LEAK_CFG(layer, group));
259
260	/* Start leaking */
261	sparx5_lg_enable(sparx5, layer, group, leak_time);
262
263	return 0;
264}
265
266static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
267{
268	u32 first, next, prev;
269	bool empty = false;
270
271	/* idx *must* be present in the leak group */
272	WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
273				       &first) < 0);
274
275	if (sparx5_lg_is_singular(sparx5, layer, group)) {
276		empty = true;
277	} else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
278		/* idx is removed, prev is now last */
279		idx = prev;
280		next = prev;
281	} else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
282		/* idx is removed and points to itself, first is next */
283		first = next;
284		next = idx;
285	} else {
286		/* Next is not touched */
287		idx = prev;
288	}
289
290	return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
291				  empty);
292}
293
294static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
295			 u32 idx)
296{
297	u32 first, next, old_group;
298
299	pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
300		 idx);
301
302	/* Is this SE already shaping ? */
303	if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
304		if (old_group != new_group) {
305			/* Delete from old group */
306			sparx5_lg_del(sparx5, layer, old_group, idx);
307		} else {
308			/* Nothing to do here */
309			return 0;
310		}
311	}
312
313	/* We always add to head of the list */
314	first = idx;
315
316	if (sparx5_lg_is_empty(sparx5, layer, new_group))
317		next = idx;
318	else
319		next = sparx5_lg_get_first(sparx5, layer, new_group);
320
321	return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
322				  false);
323}
324
325static int sparx5_shaper_conf_set(struct sparx5_port *port,
326				  const struct sparx5_shaper *sh, u32 layer,
327				  u32 idx, u32 group)
328{
329	int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
330	struct sparx5 *sparx5 = port->sparx5;
331
332	if (!sh->rate && !sh->burst)
333		sparx5_lg_action = &sparx5_lg_del;
334	else
335		sparx5_lg_action = &sparx5_lg_add;
336
337	/* Select layer */
338	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
339		 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
340
341	/* Set frame mode */
342	spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
343		 sparx5, HSCH_SE_CFG(idx));
344
345	/* Set committed rate and burst */
346	spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
347			HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
348		sparx5, HSCH_CIR_CFG(idx));
349
350	/* This has to be done after the shaper configuration has been set */
351	sparx5_lg_action(sparx5, layer, group, idx);
352
353	return 0;
354}
355
356static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
357{
358	return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
359	       1;
360}
361
362static int sparx5_dwrr_conf_set(struct sparx5_port *port,
363				struct sparx5_dwrr *dwrr)
364{
365	int i;
366
367	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
368		 HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
369		 HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
370		 port->sparx5, HSCH_HSCH_CFG_CFG);
371
372	/* Number of *lower* indexes that are arbitrated dwrr */
373	spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
374		 HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
375		 HSCH_SE_CFG(port->portno));
376
377	for (i = 0; i < dwrr->count; i++) {
378		spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
379			 HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
380			 HSCH_DWRR_ENTRY(i));
381	}
382
383	return 0;
384}
385
386static int sparx5_leak_groups_init(struct sparx5 *sparx5)
387{
388	struct sparx5_layer *layer;
389	u32 sys_clk_per_100ps;
390	struct sparx5_lg *lg;
391	u32 leak_time_us;
392	int i, ii;
393
394	sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
395
396	for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
397		layer = &layers[i];
398		for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
399			lg = &layer->leak_groups[ii];
400			lg->max_rate = spx5_hsch_max_group_rate[ii];
401
402			/* Calculate the leak time in us, to serve a maximum
403			 * rate of 'max_rate' for this group
404			 */
405			leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
406
407			/* Hardware wants leak time in ns */
408			lg->leak_time = 1000 * leak_time_us;
409
410			/* Calculate resolution */
411			lg->resolution = 1000 / leak_time_us;
412
413			/* Maximum number of shapers that can be served by
414			 * this leak group
415			 */
416			lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
417
418			/* Example:
419			 * Wanted bandwidth is 100Mbit:
420			 *
421			 * 100 mbps can be served by leak group zero.
422			 *
423			 * leak_time is 125000 ns.
424			 * resolution is: 8
425			 *
426			 * cir          = 100000 / 8 = 12500
427			 * leaks_pr_sec = 125000 / 10^9 = 8000
428			 * bw           = 12500 * 8000 = 10^8 (100 Mbit)
429			 */
430
431			/* Disable by default - this also indicates an empty
432			 * leak group
433			 */
434			sparx5_lg_disable(sparx5, i, ii);
435		}
436	}
437
438	return 0;
439}
440
441int sparx5_qos_init(struct sparx5 *sparx5)
442{
443	int ret;
444
445	ret = sparx5_leak_groups_init(sparx5);
446	if (ret < 0)
447		return ret;
448
449	ret = sparx5_dcb_init(sparx5);
450	if (ret < 0)
451		return ret;
452
453	sparx5_psfp_init(sparx5);
454
455	return 0;
456}
457
458int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
459{
460	int i;
461
462	if (num_tc != SPX5_PRIOS) {
463		netdev_err(ndev, "Only %d traffic classes supported\n",
464			   SPX5_PRIOS);
465		return -EINVAL;
466	}
467
468	netdev_set_num_tc(ndev, num_tc);
469
470	for (i = 0; i < num_tc; i++)
471		netdev_set_tc_queue(ndev, i, 1, i);
472
473	netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
474		   ndev->num_tc, ndev->real_num_tx_queues);
475
476	return 0;
477}
478
479int sparx5_tc_mqprio_del(struct net_device *ndev)
480{
481	netdev_reset_tc(ndev);
482
483	netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
484		   ndev->num_tc, ndev->real_num_tx_queues);
485
486	return 0;
487}
488
489int sparx5_tc_tbf_add(struct sparx5_port *port,
490		      struct tc_tbf_qopt_offload_replace_params *params,
491		      u32 layer, u32 idx)
492{
493	struct sparx5_shaper sh = {
494		.mode = SPX5_SE_MODE_DATARATE,
495		.rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
496		.burst = params->max_size,
497	};
498	struct sparx5_lg *lg;
499	u32 group;
500
501	/* Find suitable group for this se */
502	if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
503		pr_debug("Could not find leak group for se with rate: %d",
504			 sh.rate);
505		return -EINVAL;
506	}
507
508	lg = &layers[layer].leak_groups[group];
509
510	pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
511
512	if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
513		return -EINVAL;
514
515	/* Calculate committed rate and burst */
516	sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
517	sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
518
519	if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
520		return -EINVAL;
521
522	return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
523}
524
525int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
526{
527	struct sparx5_shaper sh = {0};
528	u32 group;
529
530	sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
531
532	return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
533}
534
535int sparx5_tc_ets_add(struct sparx5_port *port,
536		      struct tc_ets_qopt_offload_replace_params *params)
537{
538	struct sparx5_dwrr dwrr = {0};
539	/* Minimum weight for each iteration */
540	unsigned int w_min = 100;
541	int i;
542
543	/* Find minimum weight for all dwrr bands */
544	for (i = 0; i < SPX5_PRIOS; i++) {
545		if (params->quanta[i] == 0)
546			continue;
547		w_min = min(w_min, params->weights[i]);
548	}
549
550	for (i = 0; i < SPX5_PRIOS; i++) {
551		/* Strict band; skip */
552		if (params->quanta[i] == 0)
553			continue;
554
555		dwrr.count++;
556
557		/* On the sparx5, bands with higher indexes are preferred and
558		 * arbitrated strict. Strict bands are put in the lower indexes,
559		 * by tc, so we reverse the bands here.
560		 *
561		 * Also convert the weight to something the hardware
562		 * understands.
563		 */
564		dwrr.cost[SPX5_PRIOS - i - 1] =
565			sparx5_weight_to_hw_cost(w_min, params->weights[i]);
566	}
567
568	return sparx5_dwrr_conf_set(port, &dwrr);
569}
570
571int sparx5_tc_ets_del(struct sparx5_port *port)
572{
573	struct sparx5_dwrr dwrr = {0};
574
575	return sparx5_dwrr_conf_set(port, &dwrr);
576}
v6.2
  1// SPDX-License-Identifier: GPL-2.0+
  2/* Microchip Sparx5 Switch driver
  3 *
  4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
  5 */
  6
  7#include <net/pkt_cls.h>
  8
  9#include "sparx5_main.h"
 10#include "sparx5_qos.h"
 11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 12/* Max rates for leak groups */
 13static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
 14	1048568, /*  1.049 Gbps */
 15	2621420, /*  2.621 Gbps */
 16	10485680, /* 10.486 Gbps */
 17	26214200 /* 26.214 Gbps */
 18};
 19
 20static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
 21
 22static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
 23{
 24	u32 value;
 25
 26	value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
 27	return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
 28}
 29
 30static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
 31				    u32 leak_time)
 32{
 33	spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
 34		HSCH_HSCH_TIMER_CFG(layer, group));
 35}
 36
 37static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
 38{
 39	u32 value;
 40
 41	value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
 42	return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
 43}
 44
 45static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
 46			      u32 idx)
 47
 48{
 49	u32 value;
 50
 51	value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
 52	return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
 53}
 54
 55static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
 56{
 57	u32 itr, next;
 58
 59	itr = sparx5_lg_get_first(sparx5, layer, group);
 60
 61	for (;;) {
 62		next = sparx5_lg_get_next(sparx5, layer, group, itr);
 63		if (itr == next)
 64			return itr;
 65
 66		itr = next;
 67	}
 68}
 69
 70static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
 71			      u32 idx)
 72{
 73	return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
 74}
 75
 76static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
 77			       u32 idx)
 78{
 79	return idx == sparx5_lg_get_first(sparx5, layer, group);
 80}
 81
 82static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
 83{
 84	return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
 85}
 86
 87static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
 88{
 89	if (sparx5_lg_is_empty(sparx5, layer, group))
 90		return false;
 91
 92	return sparx5_lg_get_first(sparx5, layer, group) ==
 93	       sparx5_lg_get_last(sparx5, layer, group);
 94}
 95
 96static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
 97			     u32 leak_time)
 98{
 99	sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
100}
101
102static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
103{
104	sparx5_lg_set_leak_time(sparx5, layer, group, 0);
105}
106
107static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
108					u32 idx, u32 *group)
109{
110	u32 itr, next;
111	int i;
112
113	for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
114		if (sparx5_lg_is_empty(sparx5, layer, i))
115			continue;
116
117		itr = sparx5_lg_get_first(sparx5, layer, i);
118
119		for (;;) {
120			next = sparx5_lg_get_next(sparx5, layer, i, itr);
121
122			if (itr == idx) {
123				*group = i;
124				return 0; /* Found it */
125			}
126			if (itr == next)
127				break; /* Was not found */
128
129			itr = next;
130		}
131	}
132
133	return -1;
134}
135
136static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
137{
138	struct sparx5_layer *l = &layers[layer];
139	struct sparx5_lg *lg;
140	u32 i;
141
142	for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
143		lg = &l->leak_groups[i];
144		if (rate <= lg->max_rate) {
145			*group = i;
146			return 0;
147		}
148	}
149
150	return -1;
151}
152
153static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
154				  u32 idx, u32 *prev, u32 *next, u32 *first)
155{
156	u32 itr;
157
158	*first = sparx5_lg_get_first(sparx5, layer, group);
159	*prev = *first;
160	*next = *first;
161	itr = *first;
162
163	for (;;) {
164		*next = sparx5_lg_get_next(sparx5, layer, group, itr);
165
166		if (itr == idx)
167			return 0; /* Found it */
168
169		if (itr == *next)
170			return -1; /* Was not found */
171
172		*prev = itr;
173		itr = *next;
174	}
175
176	return -1;
177}
178
179static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
180			      u32 se_first, u32 idx, u32 idx_next, bool empty)
181{
182	u32 leak_time = layers[layer].leak_groups[group].leak_time;
183
184	/* Stop leaking */
185	sparx5_lg_disable(sparx5, layer, group);
186
187	if (empty)
188		return 0;
189
190	/* Select layer */
191	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
192		 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
193
194	/* Link elements */
195	spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
196		HSCH_SE_CONNECT(idx));
197
198	/* Set the first element. */
199	spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
200		 HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
201		 HSCH_HSCH_LEAK_CFG(layer, group));
202
203	/* Start leaking */
204	sparx5_lg_enable(sparx5, layer, group, leak_time);
205
206	return 0;
207}
208
209static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
210{
211	u32 first, next, prev;
212	bool empty = false;
213
214	/* idx *must* be present in the leak group */
215	WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
216				       &first) < 0);
217
218	if (sparx5_lg_is_singular(sparx5, layer, group)) {
219		empty = true;
220	} else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
221		/* idx is removed, prev is now last */
222		idx = prev;
223		next = prev;
224	} else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
225		/* idx is removed and points to itself, first is next */
226		first = next;
227		next = idx;
228	} else {
229		/* Next is not touched */
230		idx = prev;
231	}
232
233	return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
234				  empty);
235}
236
237static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
238			 u32 idx)
239{
240	u32 first, next, old_group;
241
242	pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
243		 idx);
244
245	/* Is this SE already shaping ? */
246	if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
247		if (old_group != new_group) {
248			/* Delete from old group */
249			sparx5_lg_del(sparx5, layer, old_group, idx);
250		} else {
251			/* Nothing to do here */
252			return 0;
253		}
254	}
255
256	/* We always add to head of the list */
257	first = idx;
258
259	if (sparx5_lg_is_empty(sparx5, layer, new_group))
260		next = idx;
261	else
262		next = sparx5_lg_get_first(sparx5, layer, new_group);
263
264	return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
265				  false);
266}
267
268static int sparx5_shaper_conf_set(struct sparx5_port *port,
269				  const struct sparx5_shaper *sh, u32 layer,
270				  u32 idx, u32 group)
271{
272	int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
273	struct sparx5 *sparx5 = port->sparx5;
274
275	if (!sh->rate && !sh->burst)
276		sparx5_lg_action = &sparx5_lg_del;
277	else
278		sparx5_lg_action = &sparx5_lg_add;
279
280	/* Select layer */
281	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
282		 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
283
284	/* Set frame mode */
285	spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
286		 sparx5, HSCH_SE_CFG(idx));
287
288	/* Set committed rate and burst */
289	spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
290			HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
291		sparx5, HSCH_CIR_CFG(idx));
292
293	/* This has to be done after the shaper configuration has been set */
294	sparx5_lg_action(sparx5, layer, group, idx);
295
296	return 0;
297}
298
299static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
300{
301	return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
302	       1;
303}
304
305static int sparx5_dwrr_conf_set(struct sparx5_port *port,
306				struct sparx5_dwrr *dwrr)
307{
308	int i;
309
310	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
311		 HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
312		 HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
313		 port->sparx5, HSCH_HSCH_CFG_CFG);
314
315	/* Number of *lower* indexes that are arbitrated dwrr */
316	spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
317		 HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
318		 HSCH_SE_CFG(port->portno));
319
320	for (i = 0; i < dwrr->count; i++) {
321		spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
322			 HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
323			 HSCH_DWRR_ENTRY(i));
324	}
325
326	return 0;
327}
328
329static int sparx5_leak_groups_init(struct sparx5 *sparx5)
330{
331	struct sparx5_layer *layer;
332	u32 sys_clk_per_100ps;
333	struct sparx5_lg *lg;
334	u32 leak_time_us;
335	int i, ii;
336
337	sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
338
339	for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
340		layer = &layers[i];
341		for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
342			lg = &layer->leak_groups[ii];
343			lg->max_rate = spx5_hsch_max_group_rate[ii];
344
345			/* Calculate the leak time in us, to serve a maximum
346			 * rate of 'max_rate' for this group
347			 */
348			leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
349
350			/* Hardware wants leak time in ns */
351			lg->leak_time = 1000 * leak_time_us;
352
353			/* Calculate resolution */
354			lg->resolution = 1000 / leak_time_us;
355
356			/* Maximum number of shapers that can be served by
357			 * this leak group
358			 */
359			lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
360
361			/* Example:
362			 * Wanted bandwidth is 100Mbit:
363			 *
364			 * 100 mbps can be served by leak group zero.
365			 *
366			 * leak_time is 125000 ns.
367			 * resolution is: 8
368			 *
369			 * cir          = 100000 / 8 = 12500
370			 * leaks_pr_sec = 125000 / 10^9 = 8000
371			 * bw           = 12500 * 8000 = 10^8 (100 Mbit)
372			 */
373
374			/* Disable by default - this also indicates an empty
375			 * leak group
376			 */
377			sparx5_lg_disable(sparx5, i, ii);
378		}
379	}
380
381	return 0;
382}
383
384int sparx5_qos_init(struct sparx5 *sparx5)
385{
386	int ret;
387
388	ret = sparx5_leak_groups_init(sparx5);
389	if (ret < 0)
390		return ret;
391
392	ret = sparx5_dcb_init(sparx5);
393	if (ret < 0)
394		return ret;
 
 
395
396	return 0;
397}
398
399int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
400{
401	int i;
402
403	if (num_tc != SPX5_PRIOS) {
404		netdev_err(ndev, "Only %d traffic classes supported\n",
405			   SPX5_PRIOS);
406		return -EINVAL;
407	}
408
409	netdev_set_num_tc(ndev, num_tc);
410
411	for (i = 0; i < num_tc; i++)
412		netdev_set_tc_queue(ndev, i, 1, i);
413
414	netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
415		   ndev->num_tc, ndev->real_num_tx_queues);
416
417	return 0;
418}
419
420int sparx5_tc_mqprio_del(struct net_device *ndev)
421{
422	netdev_reset_tc(ndev);
423
424	netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
425		   ndev->num_tc, ndev->real_num_tx_queues);
426
427	return 0;
428}
429
430int sparx5_tc_tbf_add(struct sparx5_port *port,
431		      struct tc_tbf_qopt_offload_replace_params *params,
432		      u32 layer, u32 idx)
433{
434	struct sparx5_shaper sh = {
435		.mode = SPX5_SE_MODE_DATARATE,
436		.rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
437		.burst = params->max_size,
438	};
439	struct sparx5_lg *lg;
440	u32 group;
441
442	/* Find suitable group for this se */
443	if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
444		pr_debug("Could not find leak group for se with rate: %d",
445			 sh.rate);
446		return -EINVAL;
447	}
448
449	lg = &layers[layer].leak_groups[group];
450
451	pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
452
453	if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
454		return -EINVAL;
455
456	/* Calculate committed rate and burst */
457	sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
458	sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
459
460	if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
461		return -EINVAL;
462
463	return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
464}
465
466int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
467{
468	struct sparx5_shaper sh = {0};
469	u32 group;
470
471	sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
472
473	return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
474}
475
476int sparx5_tc_ets_add(struct sparx5_port *port,
477		      struct tc_ets_qopt_offload_replace_params *params)
478{
479	struct sparx5_dwrr dwrr = {0};
480	/* Minimum weight for each iteration */
481	unsigned int w_min = 100;
482	int i;
483
484	/* Find minimum weight for all dwrr bands */
485	for (i = 0; i < SPX5_PRIOS; i++) {
486		if (params->quanta[i] == 0)
487			continue;
488		w_min = min(w_min, params->weights[i]);
489	}
490
491	for (i = 0; i < SPX5_PRIOS; i++) {
492		/* Strict band; skip */
493		if (params->quanta[i] == 0)
494			continue;
495
496		dwrr.count++;
497
498		/* On the sparx5, bands with higher indexes are preferred and
499		 * arbitrated strict. Strict bands are put in the lower indexes,
500		 * by tc, so we reverse the bands here.
501		 *
502		 * Also convert the weight to something the hardware
503		 * understands.
504		 */
505		dwrr.cost[SPX5_PRIOS - i - 1] =
506			sparx5_weight_to_hw_cost(w_min, params->weights[i]);
507	}
508
509	return sparx5_dwrr_conf_set(port, &dwrr);
510}
511
512int sparx5_tc_ets_del(struct sparx5_port *port)
513{
514	struct sparx5_dwrr dwrr = {0};
515
516	return sparx5_dwrr_conf_set(port, &dwrr);
517}