Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0+
  2/* Microchip Sparx5 Switch driver
  3 *
  4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
  5 */
  6
  7#include <net/pkt_cls.h>
  8
  9#include "sparx5_main.h"
 10#include "sparx5_qos.h"
 11
 12/* Calculate new base_time based on cycle_time.
 13 *
 14 * The hardware requires a base_time that is always in the future.
 15 * We define threshold_time as current_time + (2 * cycle_time).
 16 * If base_time is below threshold_time this function recalculates it to be in
 17 * the interval:
 18 * threshold_time <= base_time < (threshold_time + cycle_time)
 19 *
 20 * A very simple algorithm could be like this:
 21 * new_base_time = org_base_time + N * cycle_time
 22 * using the lowest N so (new_base_time >= threshold_time
 23 */
 24void sparx5_new_base_time(struct sparx5 *sparx5, const u32 cycle_time,
 25			  const ktime_t org_base_time, ktime_t *new_base_time)
 26{
 27	ktime_t current_time, threshold_time, new_time;
 28	struct timespec64 ts;
 29	u64 nr_of_cycles_p2;
 30	u64 nr_of_cycles;
 31	u64 diff_time;
 32
 33	new_time = org_base_time;
 34
 35	sparx5_ptp_gettime64(&sparx5->phc[SPARX5_PHC_PORT].info, &ts);
 36	current_time = timespec64_to_ktime(ts);
 37	threshold_time = current_time + (2 * cycle_time);
 38	diff_time = threshold_time - new_time;
 39	nr_of_cycles = div_u64(diff_time, cycle_time);
 40	nr_of_cycles_p2 = 1; /* Use 2^0 as start value */
 41
 42	if (new_time >= threshold_time) {
 43		*new_base_time = new_time;
 44		return;
 45	}
 46
 47	/* Calculate the smallest power of 2 (nr_of_cycles_p2)
 48	 * that is larger than nr_of_cycles.
 49	 */
 50	while (nr_of_cycles_p2 < nr_of_cycles)
 51		nr_of_cycles_p2 <<= 1; /* Next (higher) power of 2 */
 52
 53	/* Add as big chunks (power of 2 * cycle_time)
 54	 * as possible for each power of 2
 55	 */
 56	while (nr_of_cycles_p2) {
 57		if (new_time < threshold_time) {
 58			new_time += cycle_time * nr_of_cycles_p2;
 59			while (new_time < threshold_time)
 60				new_time += cycle_time * nr_of_cycles_p2;
 61			new_time -= cycle_time * nr_of_cycles_p2;
 62		}
 63		nr_of_cycles_p2 >>= 1; /* Next (lower) power of 2 */
 64	}
 65	new_time += cycle_time;
 66	*new_base_time = new_time;
 67}
 68
 69/* Max rates for leak groups */
 70static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
 71	1048568, /*  1.049 Gbps */
 72	2621420, /*  2.621 Gbps */
 73	10485680, /* 10.486 Gbps */
 74	26214200 /* 26.214 Gbps */
 75};
 76
 77u32 sparx5_get_hsch_max_group_rate(int grp)
 78{
 79	return spx5_hsch_max_group_rate[grp];
 80}
 81
 82static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
 83
 84static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
 85{
 86	u32 value;
 87
 88	value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
 89	return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
 90}
 91
 92static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
 93				    u32 leak_time)
 94{
 95	spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
 96		HSCH_HSCH_TIMER_CFG(layer, group));
 97}
 98
 99static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
100{
101	u32 value;
102
103	value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
104	return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
105}
106
107static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
108			      u32 idx)
109
110{
111	u32 value;
112
113	value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
114	return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
115}
116
117static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
118{
119	u32 itr, next;
120
121	itr = sparx5_lg_get_first(sparx5, layer, group);
122
123	for (;;) {
124		next = sparx5_lg_get_next(sparx5, layer, group, itr);
125		if (itr == next)
126			return itr;
127
128		itr = next;
129	}
130}
131
132static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
133			      u32 idx)
134{
135	return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
136}
137
138static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
139			       u32 idx)
140{
141	return idx == sparx5_lg_get_first(sparx5, layer, group);
142}
143
144static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
145{
146	return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
147}
148
149static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
150{
151	if (sparx5_lg_is_empty(sparx5, layer, group))
152		return false;
153
154	return sparx5_lg_get_first(sparx5, layer, group) ==
155	       sparx5_lg_get_last(sparx5, layer, group);
156}
157
158static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
159			     u32 leak_time)
160{
161	sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
162}
163
164static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
165{
166	sparx5_lg_set_leak_time(sparx5, layer, group, 0);
167}
168
169static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
170					u32 idx, u32 *group)
171{
172	u32 itr, next;
173	int i;
174
175	for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
176		if (sparx5_lg_is_empty(sparx5, layer, i))
177			continue;
178
179		itr = sparx5_lg_get_first(sparx5, layer, i);
180
181		for (;;) {
182			next = sparx5_lg_get_next(sparx5, layer, i, itr);
183
184			if (itr == idx) {
185				*group = i;
186				return 0; /* Found it */
187			}
188			if (itr == next)
189				break; /* Was not found */
190
191			itr = next;
192		}
193	}
194
195	return -1;
196}
197
198static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
199{
200	struct sparx5_layer *l = &layers[layer];
201	struct sparx5_lg *lg;
202	u32 i;
203
204	for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
205		lg = &l->leak_groups[i];
206		if (rate <= lg->max_rate) {
207			*group = i;
208			return 0;
209		}
210	}
211
212	return -1;
213}
214
215static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
216				  u32 idx, u32 *prev, u32 *next, u32 *first)
217{
218	u32 itr;
219
220	*first = sparx5_lg_get_first(sparx5, layer, group);
221	*prev = *first;
222	*next = *first;
223	itr = *first;
224
225	for (;;) {
226		*next = sparx5_lg_get_next(sparx5, layer, group, itr);
227
228		if (itr == idx)
229			return 0; /* Found it */
230
231		if (itr == *next)
232			return -1; /* Was not found */
233
234		*prev = itr;
235		itr = *next;
236	}
237
238	return -1;
239}
240
241static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
242			      u32 se_first, u32 idx, u32 idx_next, bool empty)
243{
244	u32 leak_time = layers[layer].leak_groups[group].leak_time;
245
246	/* Stop leaking */
247	sparx5_lg_disable(sparx5, layer, group);
248
249	if (empty)
250		return 0;
251
252	/* Select layer */
253	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
254		 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
255
256	/* Link elements */
257	spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
258		HSCH_SE_CONNECT(idx));
259
260	/* Set the first element. */
261	spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
262		 HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
263		 HSCH_HSCH_LEAK_CFG(layer, group));
264
265	/* Start leaking */
266	sparx5_lg_enable(sparx5, layer, group, leak_time);
267
268	return 0;
269}
270
271static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
272{
273	u32 first, next, prev;
274	bool empty = false;
275
276	/* idx *must* be present in the leak group */
277	WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
278				       &first) < 0);
279
280	if (sparx5_lg_is_singular(sparx5, layer, group)) {
281		empty = true;
282	} else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
283		/* idx is removed, prev is now last */
284		idx = prev;
285		next = prev;
286	} else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
287		/* idx is removed and points to itself, first is next */
288		first = next;
289		next = idx;
290	} else {
291		/* Next is not touched */
292		idx = prev;
293	}
294
295	return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
296				  empty);
297}
298
299static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
300			 u32 idx)
301{
302	u32 first, next, old_group;
303
304	pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
305		 idx);
306
307	/* Is this SE already shaping ? */
308	if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
309		if (old_group != new_group) {
310			/* Delete from old group */
311			sparx5_lg_del(sparx5, layer, old_group, idx);
312		} else {
313			/* Nothing to do here */
314			return 0;
315		}
316	}
317
318	/* We always add to head of the list */
319	first = idx;
320
321	if (sparx5_lg_is_empty(sparx5, layer, new_group))
322		next = idx;
323	else
324		next = sparx5_lg_get_first(sparx5, layer, new_group);
325
326	return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
327				  false);
328}
329
330static int sparx5_shaper_conf_set(struct sparx5_port *port,
331				  const struct sparx5_shaper *sh, u32 layer,
332				  u32 idx, u32 group)
333{
334	int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
335	struct sparx5 *sparx5 = port->sparx5;
336
337	if (!sh->rate && !sh->burst)
338		sparx5_lg_action = &sparx5_lg_del;
339	else
340		sparx5_lg_action = &sparx5_lg_add;
341
342	/* Select layer */
343	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
344		 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
345
346	/* Set frame mode */
347	spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
348		 sparx5, HSCH_SE_CFG(idx));
349
350	/* Set committed rate and burst */
351	spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
352			HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
353		sparx5, HSCH_CIR_CFG(idx));
354
355	/* This has to be done after the shaper configuration has been set */
356	sparx5_lg_action(sparx5, layer, group, idx);
357
358	return 0;
359}
360
361static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
362{
363	return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
364	       1;
365}
366
367static int sparx5_dwrr_conf_set(struct sparx5_port *port,
368				struct sparx5_dwrr *dwrr)
369{
370	u32 layer = is_sparx5(port->sparx5) ? 2 : 1;
371	int i;
372
373	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer) |
374		 HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
375		 HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
376		 port->sparx5, HSCH_HSCH_CFG_CFG);
377
378	/* Number of *lower* indexes that are arbitrated dwrr */
379	spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
380		 HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
381		 HSCH_SE_CFG(port->portno));
382
383	for (i = 0; i < dwrr->count; i++) {
384		spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
385			 HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
386			 HSCH_DWRR_ENTRY(i));
387	}
388
389	return 0;
390}
391
392static int sparx5_leak_groups_init(struct sparx5 *sparx5)
393{
394	const struct sparx5_ops *ops = sparx5->data->ops;
395	struct sparx5_layer *layer;
396	u32 sys_clk_per_100ps;
397	struct sparx5_lg *lg;
398	u32 leak_time_us;
399	int i, ii;
400
401	sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
402
403	for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
404		layer = &layers[i];
405		for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
406			lg = &layer->leak_groups[ii];
407			lg->max_rate = ops->get_hsch_max_group_rate(i);
408
409			/* Calculate the leak time in us, to serve a maximum
410			 * rate of 'max_rate' for this group
411			 */
412			leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
413
414			/* Hardware wants leak time in ns */
415			lg->leak_time = 1000 * leak_time_us;
416
417			/* Calculate resolution */
418			lg->resolution = 1000 / leak_time_us;
419
420			/* Maximum number of shapers that can be served by
421			 * this leak group
422			 */
423			lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
424
425			/* Example:
426			 * Wanted bandwidth is 100Mbit:
427			 *
428			 * 100 mbps can be served by leak group zero.
429			 *
430			 * leak_time is 125000 ns.
431			 * resolution is: 8
432			 *
433			 * cir          = 100000 / 8 = 12500
434			 * leaks_pr_sec = 125000 / 10^9 = 8000
435			 * bw           = 12500 * 8000 = 10^8 (100 Mbit)
436			 */
437
438			/* Disable by default - this also indicates an empty
439			 * leak group
440			 */
441			sparx5_lg_disable(sparx5, i, ii);
442		}
443	}
444
445	return 0;
446}
447
448int sparx5_qos_init(struct sparx5 *sparx5)
449{
450	int ret;
451
452	ret = sparx5_leak_groups_init(sparx5);
453	if (ret < 0)
454		return ret;
455
456	ret = sparx5_dcb_init(sparx5);
457	if (ret < 0)
458		return ret;
459
460	sparx5_psfp_init(sparx5);
461
462	return 0;
463}
464
465int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
466{
467	int i;
468
469	if (num_tc != SPX5_PRIOS) {
470		netdev_err(ndev, "Only %d traffic classes supported\n",
471			   SPX5_PRIOS);
472		return -EINVAL;
473	}
474
475	netdev_set_num_tc(ndev, num_tc);
476
477	for (i = 0; i < num_tc; i++)
478		netdev_set_tc_queue(ndev, i, 1, i);
479
480	netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
481		   ndev->num_tc, ndev->real_num_tx_queues);
482
483	return 0;
484}
485
486int sparx5_tc_mqprio_del(struct net_device *ndev)
487{
488	netdev_reset_tc(ndev);
489
490	netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
491		   ndev->num_tc, ndev->real_num_tx_queues);
492
493	return 0;
494}
495
496int sparx5_tc_tbf_add(struct sparx5_port *port,
497		      struct tc_tbf_qopt_offload_replace_params *params,
498		      u32 layer, u32 idx)
499{
500	struct sparx5_shaper sh = {
501		.mode = SPX5_SE_MODE_DATARATE,
502		.rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
503		.burst = params->max_size,
504	};
505	struct sparx5_lg *lg;
506	u32 group;
507
508	/* Find suitable group for this se */
509	if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
510		pr_debug("Could not find leak group for se with rate: %d",
511			 sh.rate);
512		return -EINVAL;
513	}
514
515	lg = &layers[layer].leak_groups[group];
516
517	pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
518
519	if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
520		return -EINVAL;
521
522	/* Calculate committed rate and burst */
523	sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
524	sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
525
526	if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
527		return -EINVAL;
528
529	return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
530}
531
532int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
533{
534	struct sparx5_shaper sh = {0};
535	u32 group;
536
537	sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
538
539	return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
540}
541
542int sparx5_tc_ets_add(struct sparx5_port *port,
543		      struct tc_ets_qopt_offload_replace_params *params)
544{
545	struct sparx5_dwrr dwrr = {0};
546	/* Minimum weight for each iteration */
547	unsigned int w_min = 100;
548	int i;
549
550	/* Find minimum weight for all dwrr bands */
551	for (i = 0; i < SPX5_PRIOS; i++) {
552		if (params->quanta[i] == 0)
553			continue;
554		w_min = min(w_min, params->weights[i]);
555	}
556
557	for (i = 0; i < SPX5_PRIOS; i++) {
558		/* Strict band; skip */
559		if (params->quanta[i] == 0)
560			continue;
561
562		dwrr.count++;
563
564		/* On the sparx5, bands with higher indexes are preferred and
565		 * arbitrated strict. Strict bands are put in the lower indexes,
566		 * by tc, so we reverse the bands here.
567		 *
568		 * Also convert the weight to something the hardware
569		 * understands.
570		 */
571		dwrr.cost[SPX5_PRIOS - i - 1] =
572			sparx5_weight_to_hw_cost(w_min, params->weights[i]);
573	}
574
575	return sparx5_dwrr_conf_set(port, &dwrr);
576}
577
578int sparx5_tc_ets_del(struct sparx5_port *port)
579{
580	struct sparx5_dwrr dwrr = {0};
581
582	return sparx5_dwrr_conf_set(port, &dwrr);
583}
v6.2
  1// SPDX-License-Identifier: GPL-2.0+
  2/* Microchip Sparx5 Switch driver
  3 *
  4 * Copyright (c) 2022 Microchip Technology Inc. and its subsidiaries.
  5 */
  6
  7#include <net/pkt_cls.h>
  8
  9#include "sparx5_main.h"
 10#include "sparx5_qos.h"
 11
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 12/* Max rates for leak groups */
 13static const u32 spx5_hsch_max_group_rate[SPX5_HSCH_LEAK_GRP_CNT] = {
 14	1048568, /*  1.049 Gbps */
 15	2621420, /*  2.621 Gbps */
 16	10485680, /* 10.486 Gbps */
 17	26214200 /* 26.214 Gbps */
 18};
 19
 
 
 
 
 
 20static struct sparx5_layer layers[SPX5_HSCH_LAYER_CNT];
 21
 22static u32 sparx5_lg_get_leak_time(struct sparx5 *sparx5, u32 layer, u32 group)
 23{
 24	u32 value;
 25
 26	value = spx5_rd(sparx5, HSCH_HSCH_TIMER_CFG(layer, group));
 27	return HSCH_HSCH_TIMER_CFG_LEAK_TIME_GET(value);
 28}
 29
 30static void sparx5_lg_set_leak_time(struct sparx5 *sparx5, u32 layer, u32 group,
 31				    u32 leak_time)
 32{
 33	spx5_wr(HSCH_HSCH_TIMER_CFG_LEAK_TIME_SET(leak_time), sparx5,
 34		HSCH_HSCH_TIMER_CFG(layer, group));
 35}
 36
 37static u32 sparx5_lg_get_first(struct sparx5 *sparx5, u32 layer, u32 group)
 38{
 39	u32 value;
 40
 41	value = spx5_rd(sparx5, HSCH_HSCH_LEAK_CFG(layer, group));
 42	return HSCH_HSCH_LEAK_CFG_LEAK_FIRST_GET(value);
 43}
 44
 45static u32 sparx5_lg_get_next(struct sparx5 *sparx5, u32 layer, u32 group,
 46			      u32 idx)
 47
 48{
 49	u32 value;
 50
 51	value = spx5_rd(sparx5, HSCH_SE_CONNECT(idx));
 52	return HSCH_SE_CONNECT_SE_LEAK_LINK_GET(value);
 53}
 54
 55static u32 sparx5_lg_get_last(struct sparx5 *sparx5, u32 layer, u32 group)
 56{
 57	u32 itr, next;
 58
 59	itr = sparx5_lg_get_first(sparx5, layer, group);
 60
 61	for (;;) {
 62		next = sparx5_lg_get_next(sparx5, layer, group, itr);
 63		if (itr == next)
 64			return itr;
 65
 66		itr = next;
 67	}
 68}
 69
 70static bool sparx5_lg_is_last(struct sparx5 *sparx5, u32 layer, u32 group,
 71			      u32 idx)
 72{
 73	return idx == sparx5_lg_get_next(sparx5, layer, group, idx);
 74}
 75
 76static bool sparx5_lg_is_first(struct sparx5 *sparx5, u32 layer, u32 group,
 77			       u32 idx)
 78{
 79	return idx == sparx5_lg_get_first(sparx5, layer, group);
 80}
 81
 82static bool sparx5_lg_is_empty(struct sparx5 *sparx5, u32 layer, u32 group)
 83{
 84	return sparx5_lg_get_leak_time(sparx5, layer, group) == 0;
 85}
 86
 87static bool sparx5_lg_is_singular(struct sparx5 *sparx5, u32 layer, u32 group)
 88{
 89	if (sparx5_lg_is_empty(sparx5, layer, group))
 90		return false;
 91
 92	return sparx5_lg_get_first(sparx5, layer, group) ==
 93	       sparx5_lg_get_last(sparx5, layer, group);
 94}
 95
 96static void sparx5_lg_enable(struct sparx5 *sparx5, u32 layer, u32 group,
 97			     u32 leak_time)
 98{
 99	sparx5_lg_set_leak_time(sparx5, layer, group, leak_time);
100}
101
102static void sparx5_lg_disable(struct sparx5 *sparx5, u32 layer, u32 group)
103{
104	sparx5_lg_set_leak_time(sparx5, layer, group, 0);
105}
106
107static int sparx5_lg_get_group_by_index(struct sparx5 *sparx5, u32 layer,
108					u32 idx, u32 *group)
109{
110	u32 itr, next;
111	int i;
112
113	for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
114		if (sparx5_lg_is_empty(sparx5, layer, i))
115			continue;
116
117		itr = sparx5_lg_get_first(sparx5, layer, i);
118
119		for (;;) {
120			next = sparx5_lg_get_next(sparx5, layer, i, itr);
121
122			if (itr == idx) {
123				*group = i;
124				return 0; /* Found it */
125			}
126			if (itr == next)
127				break; /* Was not found */
128
129			itr = next;
130		}
131	}
132
133	return -1;
134}
135
136static int sparx5_lg_get_group_by_rate(u32 layer, u32 rate, u32 *group)
137{
138	struct sparx5_layer *l = &layers[layer];
139	struct sparx5_lg *lg;
140	u32 i;
141
142	for (i = 0; i < SPX5_HSCH_LEAK_GRP_CNT; i++) {
143		lg = &l->leak_groups[i];
144		if (rate <= lg->max_rate) {
145			*group = i;
146			return 0;
147		}
148	}
149
150	return -1;
151}
152
153static int sparx5_lg_get_adjacent(struct sparx5 *sparx5, u32 layer, u32 group,
154				  u32 idx, u32 *prev, u32 *next, u32 *first)
155{
156	u32 itr;
157
158	*first = sparx5_lg_get_first(sparx5, layer, group);
159	*prev = *first;
160	*next = *first;
161	itr = *first;
162
163	for (;;) {
164		*next = sparx5_lg_get_next(sparx5, layer, group, itr);
165
166		if (itr == idx)
167			return 0; /* Found it */
168
169		if (itr == *next)
170			return -1; /* Was not found */
171
172		*prev = itr;
173		itr = *next;
174	}
175
176	return -1;
177}
178
179static int sparx5_lg_conf_set(struct sparx5 *sparx5, u32 layer, u32 group,
180			      u32 se_first, u32 idx, u32 idx_next, bool empty)
181{
182	u32 leak_time = layers[layer].leak_groups[group].leak_time;
183
184	/* Stop leaking */
185	sparx5_lg_disable(sparx5, layer, group);
186
187	if (empty)
188		return 0;
189
190	/* Select layer */
191	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
192		 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
193
194	/* Link elements */
195	spx5_wr(HSCH_SE_CONNECT_SE_LEAK_LINK_SET(idx_next), sparx5,
196		HSCH_SE_CONNECT(idx));
197
198	/* Set the first element. */
199	spx5_rmw(HSCH_HSCH_LEAK_CFG_LEAK_FIRST_SET(se_first),
200		 HSCH_HSCH_LEAK_CFG_LEAK_FIRST, sparx5,
201		 HSCH_HSCH_LEAK_CFG(layer, group));
202
203	/* Start leaking */
204	sparx5_lg_enable(sparx5, layer, group, leak_time);
205
206	return 0;
207}
208
209static int sparx5_lg_del(struct sparx5 *sparx5, u32 layer, u32 group, u32 idx)
210{
211	u32 first, next, prev;
212	bool empty = false;
213
214	/* idx *must* be present in the leak group */
215	WARN_ON(sparx5_lg_get_adjacent(sparx5, layer, group, idx, &prev, &next,
216				       &first) < 0);
217
218	if (sparx5_lg_is_singular(sparx5, layer, group)) {
219		empty = true;
220	} else if (sparx5_lg_is_last(sparx5, layer, group, idx)) {
221		/* idx is removed, prev is now last */
222		idx = prev;
223		next = prev;
224	} else if (sparx5_lg_is_first(sparx5, layer, group, idx)) {
225		/* idx is removed and points to itself, first is next */
226		first = next;
227		next = idx;
228	} else {
229		/* Next is not touched */
230		idx = prev;
231	}
232
233	return sparx5_lg_conf_set(sparx5, layer, group, first, idx, next,
234				  empty);
235}
236
237static int sparx5_lg_add(struct sparx5 *sparx5, u32 layer, u32 new_group,
238			 u32 idx)
239{
240	u32 first, next, old_group;
241
242	pr_debug("ADD: layer: %d, new_group: %d, idx: %d", layer, new_group,
243		 idx);
244
245	/* Is this SE already shaping ? */
246	if (sparx5_lg_get_group_by_index(sparx5, layer, idx, &old_group) >= 0) {
247		if (old_group != new_group) {
248			/* Delete from old group */
249			sparx5_lg_del(sparx5, layer, old_group, idx);
250		} else {
251			/* Nothing to do here */
252			return 0;
253		}
254	}
255
256	/* We always add to head of the list */
257	first = idx;
258
259	if (sparx5_lg_is_empty(sparx5, layer, new_group))
260		next = idx;
261	else
262		next = sparx5_lg_get_first(sparx5, layer, new_group);
263
264	return sparx5_lg_conf_set(sparx5, layer, new_group, first, idx, next,
265				  false);
266}
267
268static int sparx5_shaper_conf_set(struct sparx5_port *port,
269				  const struct sparx5_shaper *sh, u32 layer,
270				  u32 idx, u32 group)
271{
272	int (*sparx5_lg_action)(struct sparx5 *, u32, u32, u32);
273	struct sparx5 *sparx5 = port->sparx5;
274
275	if (!sh->rate && !sh->burst)
276		sparx5_lg_action = &sparx5_lg_del;
277	else
278		sparx5_lg_action = &sparx5_lg_add;
279
280	/* Select layer */
281	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(layer),
282		 HSCH_HSCH_CFG_CFG_HSCH_LAYER, sparx5, HSCH_HSCH_CFG_CFG);
283
284	/* Set frame mode */
285	spx5_rmw(HSCH_SE_CFG_SE_FRM_MODE_SET(sh->mode), HSCH_SE_CFG_SE_FRM_MODE,
286		 sparx5, HSCH_SE_CFG(idx));
287
288	/* Set committed rate and burst */
289	spx5_wr(HSCH_CIR_CFG_CIR_RATE_SET(sh->rate) |
290			HSCH_CIR_CFG_CIR_BURST_SET(sh->burst),
291		sparx5, HSCH_CIR_CFG(idx));
292
293	/* This has to be done after the shaper configuration has been set */
294	sparx5_lg_action(sparx5, layer, group, idx);
295
296	return 0;
297}
298
299static u32 sparx5_weight_to_hw_cost(u32 weight_min, u32 weight)
300{
301	return ((((SPX5_DWRR_COST_MAX << 4) * weight_min / weight) + 8) >> 4) -
302	       1;
303}
304
305static int sparx5_dwrr_conf_set(struct sparx5_port *port,
306				struct sparx5_dwrr *dwrr)
307{
 
308	int i;
309
310	spx5_rmw(HSCH_HSCH_CFG_CFG_HSCH_LAYER_SET(2) |
311		 HSCH_HSCH_CFG_CFG_CFG_SE_IDX_SET(port->portno),
312		 HSCH_HSCH_CFG_CFG_HSCH_LAYER | HSCH_HSCH_CFG_CFG_CFG_SE_IDX,
313		 port->sparx5, HSCH_HSCH_CFG_CFG);
314
315	/* Number of *lower* indexes that are arbitrated dwrr */
316	spx5_rmw(HSCH_SE_CFG_SE_DWRR_CNT_SET(dwrr->count),
317		 HSCH_SE_CFG_SE_DWRR_CNT, port->sparx5,
318		 HSCH_SE_CFG(port->portno));
319
320	for (i = 0; i < dwrr->count; i++) {
321		spx5_rmw(HSCH_DWRR_ENTRY_DWRR_COST_SET(dwrr->cost[i]),
322			 HSCH_DWRR_ENTRY_DWRR_COST, port->sparx5,
323			 HSCH_DWRR_ENTRY(i));
324	}
325
326	return 0;
327}
328
329static int sparx5_leak_groups_init(struct sparx5 *sparx5)
330{
 
331	struct sparx5_layer *layer;
332	u32 sys_clk_per_100ps;
333	struct sparx5_lg *lg;
334	u32 leak_time_us;
335	int i, ii;
336
337	sys_clk_per_100ps = spx5_rd(sparx5, HSCH_SYS_CLK_PER);
338
339	for (i = 0; i < SPX5_HSCH_LAYER_CNT; i++) {
340		layer = &layers[i];
341		for (ii = 0; ii < SPX5_HSCH_LEAK_GRP_CNT; ii++) {
342			lg = &layer->leak_groups[ii];
343			lg->max_rate = spx5_hsch_max_group_rate[ii];
344
345			/* Calculate the leak time in us, to serve a maximum
346			 * rate of 'max_rate' for this group
347			 */
348			leak_time_us = (SPX5_SE_RATE_MAX * 1000) / lg->max_rate;
349
350			/* Hardware wants leak time in ns */
351			lg->leak_time = 1000 * leak_time_us;
352
353			/* Calculate resolution */
354			lg->resolution = 1000 / leak_time_us;
355
356			/* Maximum number of shapers that can be served by
357			 * this leak group
358			 */
359			lg->max_ses = (1000 * leak_time_us) / sys_clk_per_100ps;
360
361			/* Example:
362			 * Wanted bandwidth is 100Mbit:
363			 *
364			 * 100 mbps can be served by leak group zero.
365			 *
366			 * leak_time is 125000 ns.
367			 * resolution is: 8
368			 *
369			 * cir          = 100000 / 8 = 12500
370			 * leaks_pr_sec = 125000 / 10^9 = 8000
371			 * bw           = 12500 * 8000 = 10^8 (100 Mbit)
372			 */
373
374			/* Disable by default - this also indicates an empty
375			 * leak group
376			 */
377			sparx5_lg_disable(sparx5, i, ii);
378		}
379	}
380
381	return 0;
382}
383
384int sparx5_qos_init(struct sparx5 *sparx5)
385{
386	int ret;
387
388	ret = sparx5_leak_groups_init(sparx5);
389	if (ret < 0)
390		return ret;
391
392	ret = sparx5_dcb_init(sparx5);
393	if (ret < 0)
394		return ret;
 
 
395
396	return 0;
397}
398
399int sparx5_tc_mqprio_add(struct net_device *ndev, u8 num_tc)
400{
401	int i;
402
403	if (num_tc != SPX5_PRIOS) {
404		netdev_err(ndev, "Only %d traffic classes supported\n",
405			   SPX5_PRIOS);
406		return -EINVAL;
407	}
408
409	netdev_set_num_tc(ndev, num_tc);
410
411	for (i = 0; i < num_tc; i++)
412		netdev_set_tc_queue(ndev, i, 1, i);
413
414	netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
415		   ndev->num_tc, ndev->real_num_tx_queues);
416
417	return 0;
418}
419
420int sparx5_tc_mqprio_del(struct net_device *ndev)
421{
422	netdev_reset_tc(ndev);
423
424	netdev_dbg(ndev, "dev->num_tc %u dev->real_num_tx_queues %u\n",
425		   ndev->num_tc, ndev->real_num_tx_queues);
426
427	return 0;
428}
429
430int sparx5_tc_tbf_add(struct sparx5_port *port,
431		      struct tc_tbf_qopt_offload_replace_params *params,
432		      u32 layer, u32 idx)
433{
434	struct sparx5_shaper sh = {
435		.mode = SPX5_SE_MODE_DATARATE,
436		.rate = div_u64(params->rate.rate_bytes_ps, 1000) * 8,
437		.burst = params->max_size,
438	};
439	struct sparx5_lg *lg;
440	u32 group;
441
442	/* Find suitable group for this se */
443	if (sparx5_lg_get_group_by_rate(layer, sh.rate, &group) < 0) {
444		pr_debug("Could not find leak group for se with rate: %d",
445			 sh.rate);
446		return -EINVAL;
447	}
448
449	lg = &layers[layer].leak_groups[group];
450
451	pr_debug("Found matching group (speed: %d)\n", lg->max_rate);
452
453	if (sh.rate < SPX5_SE_RATE_MIN || sh.burst < SPX5_SE_BURST_MIN)
454		return -EINVAL;
455
456	/* Calculate committed rate and burst */
457	sh.rate = DIV_ROUND_UP(sh.rate, lg->resolution);
458	sh.burst = DIV_ROUND_UP(sh.burst, SPX5_SE_BURST_UNIT);
459
460	if (sh.rate > SPX5_SE_RATE_MAX || sh.burst > SPX5_SE_BURST_MAX)
461		return -EINVAL;
462
463	return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
464}
465
466int sparx5_tc_tbf_del(struct sparx5_port *port, u32 layer, u32 idx)
467{
468	struct sparx5_shaper sh = {0};
469	u32 group;
470
471	sparx5_lg_get_group_by_index(port->sparx5, layer, idx, &group);
472
473	return sparx5_shaper_conf_set(port, &sh, layer, idx, group);
474}
475
476int sparx5_tc_ets_add(struct sparx5_port *port,
477		      struct tc_ets_qopt_offload_replace_params *params)
478{
479	struct sparx5_dwrr dwrr = {0};
480	/* Minimum weight for each iteration */
481	unsigned int w_min = 100;
482	int i;
483
484	/* Find minimum weight for all dwrr bands */
485	for (i = 0; i < SPX5_PRIOS; i++) {
486		if (params->quanta[i] == 0)
487			continue;
488		w_min = min(w_min, params->weights[i]);
489	}
490
491	for (i = 0; i < SPX5_PRIOS; i++) {
492		/* Strict band; skip */
493		if (params->quanta[i] == 0)
494			continue;
495
496		dwrr.count++;
497
498		/* On the sparx5, bands with higher indexes are preferred and
499		 * arbitrated strict. Strict bands are put in the lower indexes,
500		 * by tc, so we reverse the bands here.
501		 *
502		 * Also convert the weight to something the hardware
503		 * understands.
504		 */
505		dwrr.cost[SPX5_PRIOS - i - 1] =
506			sparx5_weight_to_hw_cost(w_min, params->weights[i]);
507	}
508
509	return sparx5_dwrr_conf_set(port, &dwrr);
510}
511
512int sparx5_tc_ets_del(struct sparx5_port *port)
513{
514	struct sparx5_dwrr dwrr = {0};
515
516	return sparx5_dwrr_conf_set(port, &dwrr);
517}