Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v4.6
 
  1/*
  2 * net/sched/sch_gred.c	Generic Random Early Detection queue.
  3 *
  4 *
  5 *              This program is free software; you can redistribute it and/or
  6 *              modify it under the terms of the GNU General Public License
  7 *              as published by the Free Software Foundation; either version
  8 *              2 of the License, or (at your option) any later version.
  9 *
 10 * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
 11 *
 12 *             991129: -  Bug fix with grio mode
 13 *		       - a better sing. AvgQ mode with Grio(WRED)
 14 *		       - A finer grained VQ dequeue based on sugestion
 15 *		         from Ren Liu
 16 *		       - More error checks
 17 *
 18 *  For all the glorious comments look at include/net/red.h
 19 */
 20
 21#include <linux/slab.h>
 22#include <linux/module.h>
 23#include <linux/types.h>
 24#include <linux/kernel.h>
 25#include <linux/skbuff.h>
 
 26#include <net/pkt_sched.h>
 27#include <net/red.h>
 28
 29#define GRED_DEF_PRIO (MAX_DPs / 2)
 30#define GRED_VQ_MASK (MAX_DPs - 1)
 31
 
 
 32struct gred_sched_data;
 33struct gred_sched;
 34
 35struct gred_sched_data {
 36	u32		limit;		/* HARD maximal queue length	*/
 37	u32		DP;		/* the drop parameters */
 38	u32		bytesin;	/* bytes seen on virtualQ so far*/
 
 39	u32		packetsin;	/* packets seen on virtualQ so far*/
 40	u32		backlog;	/* bytes on the virtualQ */
 41	u8		prio;		/* the prio of this vq */
 42
 43	struct red_parms parms;
 44	struct red_vars  vars;
 45	struct red_stats stats;
 46};
 47
 48enum {
 49	GRED_WRED_MODE = 1,
 50	GRED_RIO_MODE,
 51};
 52
 53struct gred_sched {
 54	struct gred_sched_data *tab[MAX_DPs];
 55	unsigned long	flags;
 56	u32		red_flags;
 57	u32 		DPs;
 58	u32 		def;
 59	struct red_vars wred_set;
 
 60};
 61
 62static inline int gred_wred_mode(struct gred_sched *table)
 63{
 64	return test_bit(GRED_WRED_MODE, &table->flags);
 65}
 66
 67static inline void gred_enable_wred_mode(struct gred_sched *table)
 68{
 69	__set_bit(GRED_WRED_MODE, &table->flags);
 70}
 71
 72static inline void gred_disable_wred_mode(struct gred_sched *table)
 73{
 74	__clear_bit(GRED_WRED_MODE, &table->flags);
 75}
 76
 77static inline int gred_rio_mode(struct gred_sched *table)
 78{
 79	return test_bit(GRED_RIO_MODE, &table->flags);
 80}
 81
 82static inline void gred_enable_rio_mode(struct gred_sched *table)
 83{
 84	__set_bit(GRED_RIO_MODE, &table->flags);
 85}
 86
 87static inline void gred_disable_rio_mode(struct gred_sched *table)
 88{
 89	__clear_bit(GRED_RIO_MODE, &table->flags);
 90}
 91
 92static inline int gred_wred_mode_check(struct Qdisc *sch)
 93{
 94	struct gred_sched *table = qdisc_priv(sch);
 95	int i;
 96
 97	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
 98	for (i = 0; i < table->DPs; i++) {
 99		struct gred_sched_data *q = table->tab[i];
100		int n;
101
102		if (q == NULL)
103			continue;
104
105		for (n = i + 1; n < table->DPs; n++)
106			if (table->tab[n] && table->tab[n]->prio == q->prio)
107				return 1;
108	}
109
110	return 0;
111}
112
113static inline unsigned int gred_backlog(struct gred_sched *table,
114					struct gred_sched_data *q,
115					struct Qdisc *sch)
116{
117	if (gred_wred_mode(table))
118		return sch->qstats.backlog;
119	else
120		return q->backlog;
121}
122
123static inline u16 tc_index_to_dp(struct sk_buff *skb)
124{
125	return skb->tc_index & GRED_VQ_MASK;
126}
127
128static inline void gred_load_wred_set(const struct gred_sched *table,
129				      struct gred_sched_data *q)
130{
131	q->vars.qavg = table->wred_set.qavg;
132	q->vars.qidlestart = table->wred_set.qidlestart;
133}
134
135static inline void gred_store_wred_set(struct gred_sched *table,
136				       struct gred_sched_data *q)
137{
138	table->wred_set.qavg = q->vars.qavg;
139	table->wred_set.qidlestart = q->vars.qidlestart;
140}
141
142static inline int gred_use_ecn(struct gred_sched *t)
143{
144	return t->red_flags & TC_RED_ECN;
145}
146
147static inline int gred_use_harddrop(struct gred_sched *t)
148{
149	return t->red_flags & TC_RED_HARDDROP;
150}
151
152static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
153{
154	struct gred_sched_data *q = NULL;
155	struct gred_sched *t = qdisc_priv(sch);
156	unsigned long qavg = 0;
157	u16 dp = tc_index_to_dp(skb);
158
159	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
160		dp = t->def;
161
162		q = t->tab[dp];
163		if (!q) {
164			/* Pass through packets not assigned to a DP
165			 * if no default DP has been configured. This
166			 * allows for DP flows to be left untouched.
167			 */
168			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
169					sch->limit))
170				return qdisc_enqueue_tail(skb, sch);
171			else
172				goto drop;
173		}
174
175		/* fix tc_index? --could be controversial but needed for
176		   requeueing */
177		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
178	}
179
180	/* sum up all the qaves of prios < ours to get the new qave */
181	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
182		int i;
183
184		for (i = 0; i < t->DPs; i++) {
185			if (t->tab[i] && t->tab[i]->prio < q->prio &&
186			    !red_is_idling(&t->tab[i]->vars))
187				qavg += t->tab[i]->vars.qavg;
188		}
189
190	}
191
192	q->packetsin++;
193	q->bytesin += qdisc_pkt_len(skb);
194
195	if (gred_wred_mode(t))
196		gred_load_wred_set(t, q);
197
198	q->vars.qavg = red_calc_qavg(&q->parms,
199				     &q->vars,
200				     gred_backlog(t, q, sch));
201
202	if (red_is_idling(&q->vars))
203		red_end_of_idle_period(&q->vars);
204
205	if (gred_wred_mode(t))
206		gred_store_wred_set(t, q);
207
208	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
209	case RED_DONT_MARK:
210		break;
211
212	case RED_PROB_MARK:
213		qdisc_qstats_overlimit(sch);
214		if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
215			q->stats.prob_drop++;
216			goto congestion_drop;
217		}
218
219		q->stats.prob_mark++;
220		break;
221
222	case RED_HARD_MARK:
223		qdisc_qstats_overlimit(sch);
224		if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
225		    !INET_ECN_set_ce(skb)) {
226			q->stats.forced_drop++;
227			goto congestion_drop;
228		}
229		q->stats.forced_mark++;
230		break;
231	}
232
233	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
234		q->backlog += qdisc_pkt_len(skb);
235		return qdisc_enqueue_tail(skb, sch);
236	}
237
238	q->stats.pdrop++;
239drop:
240	return qdisc_drop(skb, sch);
241
242congestion_drop:
243	qdisc_drop(skb, sch);
244	return NET_XMIT_CN;
245}
246
247static struct sk_buff *gred_dequeue(struct Qdisc *sch)
248{
249	struct sk_buff *skb;
250	struct gred_sched *t = qdisc_priv(sch);
251
252	skb = qdisc_dequeue_head(sch);
253
254	if (skb) {
255		struct gred_sched_data *q;
256		u16 dp = tc_index_to_dp(skb);
257
258		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
259			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
260					     tc_index_to_dp(skb));
261		} else {
262			q->backlog -= qdisc_pkt_len(skb);
263
264			if (gred_wred_mode(t)) {
265				if (!sch->qstats.backlog)
266					red_start_of_idle_period(&t->wred_set);
267			} else {
268				if (!q->backlog)
269					red_start_of_idle_period(&q->vars);
270			}
271		}
272
273		return skb;
274	}
275
276	return NULL;
277}
278
279static unsigned int gred_drop(struct Qdisc *sch)
280{
281	struct sk_buff *skb;
282	struct gred_sched *t = qdisc_priv(sch);
283
284	skb = qdisc_dequeue_tail(sch);
285	if (skb) {
286		unsigned int len = qdisc_pkt_len(skb);
287		struct gred_sched_data *q;
288		u16 dp = tc_index_to_dp(skb);
289
290		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
291			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
292					     tc_index_to_dp(skb));
293		} else {
294			q->backlog -= len;
295			q->stats.other++;
296
297			if (gred_wred_mode(t)) {
298				if (!sch->qstats.backlog)
299					red_start_of_idle_period(&t->wred_set);
300			} else {
301				if (!q->backlog)
302					red_start_of_idle_period(&q->vars);
303			}
304		}
305
306		qdisc_drop(skb, sch);
307		return len;
308	}
309
310	return 0;
311}
312
313static void gred_reset(struct Qdisc *sch)
314{
315	int i;
316	struct gred_sched *t = qdisc_priv(sch);
317
318	qdisc_reset_queue(sch);
319
320	for (i = 0; i < t->DPs; i++) {
321		struct gred_sched_data *q = t->tab[i];
322
323		if (!q)
324			continue;
325
326		red_restart(&q->vars);
327		q->backlog = 0;
328	}
329}
330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331static inline void gred_destroy_vq(struct gred_sched_data *q)
332{
333	kfree(q);
334}
335
336static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 
337{
338	struct gred_sched *table = qdisc_priv(sch);
339	struct tc_gred_sopt *sopt;
 
340	int i;
341
342	if (dps == NULL)
343		return -EINVAL;
344
345	sopt = nla_data(dps);
346
347	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
348		return -EINVAL;
 
349
350	sch_tree_lock(sch);
351	table->DPs = sopt->DPs;
352	table->def = sopt->def_DP;
 
353	table->red_flags = sopt->flags;
354
355	/*
356	 * Every entry point to GRED is synchronized with the above code
357	 * and the DP is checked against DPs, i.e. shadowed VQs can no
358	 * longer be found so we can unlock right here.
359	 */
360	sch_tree_unlock(sch);
361
362	if (sopt->grio) {
363		gred_enable_rio_mode(table);
364		gred_disable_wred_mode(table);
365		if (gred_wred_mode_check(sch))
366			gred_enable_wred_mode(table);
367	} else {
368		gred_disable_rio_mode(table);
369		gred_disable_wred_mode(table);
370	}
371
 
 
 
 
 
 
372	for (i = table->DPs; i < MAX_DPs; i++) {
373		if (table->tab[i]) {
374			pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
375				i);
376			gred_destroy_vq(table->tab[i]);
377			table->tab[i] = NULL;
378		}
379	}
380
 
381	return 0;
382}
383
384static inline int gred_change_vq(struct Qdisc *sch, int dp,
385				 struct tc_gred_qopt *ctl, int prio,
386				 u8 *stab, u32 max_P,
387				 struct gred_sched_data **prealloc)
 
388{
389	struct gred_sched *table = qdisc_priv(sch);
390	struct gred_sched_data *q = table->tab[dp];
391
 
 
 
 
 
392	if (!q) {
393		table->tab[dp] = q = *prealloc;
394		*prealloc = NULL;
395		if (!q)
396			return -ENOMEM;
 
397	}
398
399	q->DP = dp;
400	q->prio = prio;
401	if (ctl->limit > sch->limit)
402		q->limit = sch->limit;
403	else
404		q->limit = ctl->limit;
405
406	if (q->backlog == 0)
407		red_end_of_idle_period(&q->vars);
408
409	red_set_parms(&q->parms,
410		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
411		      ctl->Scell_log, stab, max_P);
412	red_set_vars(&q->vars);
413	return 0;
414}
415
 
 
 
 
 
 
 
 
 
416static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
417	[TCA_GRED_PARMS]	= { .len = sizeof(struct tc_gred_qopt) },
418	[TCA_GRED_STAB]		= { .len = 256 },
419	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
420	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
421	[TCA_GRED_LIMIT]	= { .type = NLA_U32 },
 
422};
423
424static int gred_change(struct Qdisc *sch, struct nlattr *opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425{
426	struct gred_sched *table = qdisc_priv(sch);
427	struct tc_gred_qopt *ctl;
428	struct nlattr *tb[TCA_GRED_MAX + 1];
429	int err, prio = GRED_DEF_PRIO;
430	u8 *stab;
431	u32 max_P;
432	struct gred_sched_data *prealloc;
433
434	if (opt == NULL)
435		return -EINVAL;
436
437	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
438	if (err < 0)
439		return err;
440
441	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
442		if (tb[TCA_GRED_LIMIT] != NULL)
443			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
444		return gred_change_table_def(sch, opt);
445	}
446
447	if (tb[TCA_GRED_PARMS] == NULL ||
448	    tb[TCA_GRED_STAB] == NULL ||
449	    tb[TCA_GRED_LIMIT] != NULL)
 
450		return -EINVAL;
 
451
452	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
453
454	err = -EINVAL;
455	ctl = nla_data(tb[TCA_GRED_PARMS]);
456	stab = nla_data(tb[TCA_GRED_STAB]);
457
458	if (ctl->DP >= table->DPs)
459		goto errout;
 
 
 
 
 
 
 
 
 
460
461	if (gred_rio_mode(table)) {
462		if (ctl->prio == 0) {
463			int def_prio = GRED_DEF_PRIO;
464
465			if (table->tab[table->def])
466				def_prio = table->tab[table->def]->prio;
467
468			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
469			       "setting default to %d\n", ctl->DP, def_prio);
470
471			prio = def_prio;
472		} else
473			prio = ctl->prio;
474	}
475
476	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
477	sch_tree_lock(sch);
478
479	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
 
480	if (err < 0)
481		goto errout_locked;
 
 
 
482
483	if (gred_rio_mode(table)) {
484		gred_disable_wred_mode(table);
485		if (gred_wred_mode_check(sch))
486			gred_enable_wred_mode(table);
487	}
488
489	err = 0;
 
 
 
 
490
491errout_locked:
492	sch_tree_unlock(sch);
493	kfree(prealloc);
494errout:
495	return err;
496}
497
498static int gred_init(struct Qdisc *sch, struct nlattr *opt)
 
499{
 
500	struct nlattr *tb[TCA_GRED_MAX + 1];
501	int err;
502
503	if (opt == NULL)
504		return -EINVAL;
505
506	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
 
507	if (err < 0)
508		return err;
509
510	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
 
 
511		return -EINVAL;
 
512
513	if (tb[TCA_GRED_LIMIT])
514		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
515	else
516		sch->limit = qdisc_dev(sch)->tx_queue_len
517		             * psched_mtu(qdisc_dev(sch));
518
519	return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
 
 
 
 
 
 
520}
521
522static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
523{
524	struct gred_sched *table = qdisc_priv(sch);
525	struct nlattr *parms, *opts = NULL;
526	int i;
527	u32 max_p[MAX_DPs];
528	struct tc_gred_sopt sopt = {
529		.DPs	= table->DPs,
530		.def_DP	= table->def,
531		.grio	= gred_rio_mode(table),
532		.flags	= table->red_flags,
533	};
534
535	opts = nla_nest_start(skb, TCA_OPTIONS);
 
 
 
536	if (opts == NULL)
537		goto nla_put_failure;
538	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
539		goto nla_put_failure;
540
541	for (i = 0; i < MAX_DPs; i++) {
542		struct gred_sched_data *q = table->tab[i];
543
544		max_p[i] = q ? q->parms.max_P : 0;
545	}
546	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
547		goto nla_put_failure;
548
549	if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
550		goto nla_put_failure;
551
552	parms = nla_nest_start(skb, TCA_GRED_PARMS);
 
553	if (parms == NULL)
554		goto nla_put_failure;
555
556	for (i = 0; i < MAX_DPs; i++) {
557		struct gred_sched_data *q = table->tab[i];
558		struct tc_gred_qopt opt;
559		unsigned long qavg;
560
561		memset(&opt, 0, sizeof(opt));
562
563		if (!q) {
564			/* hack -- fix at some point with proper message
565			   This is how we indicate to tc that there is no VQ
566			   at this DP */
567
568			opt.DP = MAX_DPs + i;
569			goto append_opt;
570		}
571
572		opt.limit	= q->limit;
573		opt.DP		= q->DP;
574		opt.backlog	= gred_backlog(table, q, sch);
575		opt.prio	= q->prio;
576		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
577		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
578		opt.Wlog	= q->parms.Wlog;
579		opt.Plog	= q->parms.Plog;
580		opt.Scell_log	= q->parms.Scell_log;
581		opt.other	= q->stats.other;
582		opt.early	= q->stats.prob_drop;
583		opt.forced	= q->stats.forced_drop;
584		opt.pdrop	= q->stats.pdrop;
585		opt.packets	= q->packetsin;
586		opt.bytesin	= q->bytesin;
587
588		if (gred_wred_mode(table))
589			gred_load_wred_set(table, q);
590
591		qavg = red_calc_qavg(&q->parms, &q->vars,
592				     q->vars.qavg >> q->parms.Wlog);
593		opt.qave = qavg >> q->parms.Wlog;
594
595append_opt:
596		if (nla_append(skb, sizeof(opt), &opt) < 0)
597			goto nla_put_failure;
598	}
599
600	nla_nest_end(skb, parms);
601
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
602	return nla_nest_end(skb, opts);
603
604nla_put_failure:
605	nla_nest_cancel(skb, opts);
606	return -EMSGSIZE;
607}
608
609static void gred_destroy(struct Qdisc *sch)
610{
611	struct gred_sched *table = qdisc_priv(sch);
612	int i;
613
614	for (i = 0; i < table->DPs; i++) {
615		if (table->tab[i])
616			gred_destroy_vq(table->tab[i]);
617	}
 
618}
619
620static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
621	.id		=	"gred",
622	.priv_size	=	sizeof(struct gred_sched),
623	.enqueue	=	gred_enqueue,
624	.dequeue	=	gred_dequeue,
625	.peek		=	qdisc_peek_head,
626	.drop		=	gred_drop,
627	.init		=	gred_init,
628	.reset		=	gred_reset,
629	.destroy	=	gred_destroy,
630	.change		=	gred_change,
631	.dump		=	gred_dump,
632	.owner		=	THIS_MODULE,
633};
 
634
635static int __init gred_module_init(void)
636{
637	return register_qdisc(&gred_qdisc_ops);
638}
639
640static void __exit gred_module_exit(void)
641{
642	unregister_qdisc(&gred_qdisc_ops);
643}
644
645module_init(gred_module_init)
646module_exit(gred_module_exit)
647
648MODULE_LICENSE("GPL");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/sch_gred.c	Generic Random Early Detection queue.
  4 *
 
 
 
 
 
 
  5 * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
  6 *
  7 *             991129: -  Bug fix with grio mode
  8 *		       - a better sing. AvgQ mode with Grio(WRED)
  9 *		       - A finer grained VQ dequeue based on suggestion
 10 *		         from Ren Liu
 11 *		       - More error checks
 12 *
 13 *  For all the glorious comments look at include/net/red.h
 14 */
 15
 16#include <linux/slab.h>
 17#include <linux/module.h>
 18#include <linux/types.h>
 19#include <linux/kernel.h>
 20#include <linux/skbuff.h>
 21#include <net/pkt_cls.h>
 22#include <net/pkt_sched.h>
 23#include <net/red.h>
 24
 25#define GRED_DEF_PRIO (MAX_DPs / 2)
 26#define GRED_VQ_MASK (MAX_DPs - 1)
 27
 28#define GRED_VQ_RED_FLAGS	(TC_RED_ECN | TC_RED_HARDDROP)
 29
 30struct gred_sched_data;
 31struct gred_sched;
 32
 33struct gred_sched_data {
 34	u32		limit;		/* HARD maximal queue length	*/
 35	u32		DP;		/* the drop parameters */
 36	u32		red_flags;	/* virtualQ version of red_flags */
 37	u64		bytesin;	/* bytes seen on virtualQ so far*/
 38	u32		packetsin;	/* packets seen on virtualQ so far*/
 39	u32		backlog;	/* bytes on the virtualQ */
 40	u8		prio;		/* the prio of this vq */
 41
 42	struct red_parms parms;
 43	struct red_vars  vars;
 44	struct red_stats stats;
 45};
 46
 47enum {
 48	GRED_WRED_MODE = 1,
 49	GRED_RIO_MODE,
 50};
 51
 52struct gred_sched {
 53	struct gred_sched_data *tab[MAX_DPs];
 54	unsigned long	flags;
 55	u32		red_flags;
 56	u32 		DPs;
 57	u32 		def;
 58	struct red_vars wred_set;
 59	struct tc_gred_qopt_offload *opt;
 60};
 61
 62static inline int gred_wred_mode(struct gred_sched *table)
 63{
 64	return test_bit(GRED_WRED_MODE, &table->flags);
 65}
 66
 67static inline void gred_enable_wred_mode(struct gred_sched *table)
 68{
 69	__set_bit(GRED_WRED_MODE, &table->flags);
 70}
 71
 72static inline void gred_disable_wred_mode(struct gred_sched *table)
 73{
 74	__clear_bit(GRED_WRED_MODE, &table->flags);
 75}
 76
 77static inline int gred_rio_mode(struct gred_sched *table)
 78{
 79	return test_bit(GRED_RIO_MODE, &table->flags);
 80}
 81
 82static inline void gred_enable_rio_mode(struct gred_sched *table)
 83{
 84	__set_bit(GRED_RIO_MODE, &table->flags);
 85}
 86
 87static inline void gred_disable_rio_mode(struct gred_sched *table)
 88{
 89	__clear_bit(GRED_RIO_MODE, &table->flags);
 90}
 91
 92static inline int gred_wred_mode_check(struct Qdisc *sch)
 93{
 94	struct gred_sched *table = qdisc_priv(sch);
 95	int i;
 96
 97	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
 98	for (i = 0; i < table->DPs; i++) {
 99		struct gred_sched_data *q = table->tab[i];
100		int n;
101
102		if (q == NULL)
103			continue;
104
105		for (n = i + 1; n < table->DPs; n++)
106			if (table->tab[n] && table->tab[n]->prio == q->prio)
107				return 1;
108	}
109
110	return 0;
111}
112
113static inline unsigned int gred_backlog(struct gred_sched *table,
114					struct gred_sched_data *q,
115					struct Qdisc *sch)
116{
117	if (gred_wred_mode(table))
118		return sch->qstats.backlog;
119	else
120		return q->backlog;
121}
122
123static inline u16 tc_index_to_dp(struct sk_buff *skb)
124{
125	return skb->tc_index & GRED_VQ_MASK;
126}
127
128static inline void gred_load_wred_set(const struct gred_sched *table,
129				      struct gred_sched_data *q)
130{
131	q->vars.qavg = table->wred_set.qavg;
132	q->vars.qidlestart = table->wred_set.qidlestart;
133}
134
135static inline void gred_store_wred_set(struct gred_sched *table,
136				       struct gred_sched_data *q)
137{
138	table->wred_set.qavg = q->vars.qavg;
139	table->wred_set.qidlestart = q->vars.qidlestart;
140}
141
142static int gred_use_ecn(struct gred_sched_data *q)
143{
144	return q->red_flags & TC_RED_ECN;
145}
146
147static int gred_use_harddrop(struct gred_sched_data *q)
148{
149	return q->red_flags & TC_RED_HARDDROP;
150}
151
152static bool gred_per_vq_red_flags_used(struct gred_sched *table)
153{
154	unsigned int i;
155
156	/* Local per-vq flags couldn't have been set unless global are 0 */
157	if (table->red_flags)
158		return false;
159	for (i = 0; i < MAX_DPs; i++)
160		if (table->tab[i] && table->tab[i]->red_flags)
161			return true;
162	return false;
163}
164
165static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
166			struct sk_buff **to_free)
167{
168	struct gred_sched_data *q = NULL;
169	struct gred_sched *t = qdisc_priv(sch);
170	unsigned long qavg = 0;
171	u16 dp = tc_index_to_dp(skb);
172
173	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
174		dp = t->def;
175
176		q = t->tab[dp];
177		if (!q) {
178			/* Pass through packets not assigned to a DP
179			 * if no default DP has been configured. This
180			 * allows for DP flows to be left untouched.
181			 */
182			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
183					sch->limit))
184				return qdisc_enqueue_tail(skb, sch);
185			else
186				goto drop;
187		}
188
189		/* fix tc_index? --could be controversial but needed for
190		   requeueing */
191		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
192	}
193
194	/* sum up all the qaves of prios < ours to get the new qave */
195	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
196		int i;
197
198		for (i = 0; i < t->DPs; i++) {
199			if (t->tab[i] && t->tab[i]->prio < q->prio &&
200			    !red_is_idling(&t->tab[i]->vars))
201				qavg += t->tab[i]->vars.qavg;
202		}
203
204	}
205
206	q->packetsin++;
207	q->bytesin += qdisc_pkt_len(skb);
208
209	if (gred_wred_mode(t))
210		gred_load_wred_set(t, q);
211
212	q->vars.qavg = red_calc_qavg(&q->parms,
213				     &q->vars,
214				     gred_backlog(t, q, sch));
215
216	if (red_is_idling(&q->vars))
217		red_end_of_idle_period(&q->vars);
218
219	if (gred_wred_mode(t))
220		gred_store_wred_set(t, q);
221
222	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
223	case RED_DONT_MARK:
224		break;
225
226	case RED_PROB_MARK:
227		qdisc_qstats_overlimit(sch);
228		if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
229			q->stats.prob_drop++;
230			goto congestion_drop;
231		}
232
233		q->stats.prob_mark++;
234		break;
235
236	case RED_HARD_MARK:
237		qdisc_qstats_overlimit(sch);
238		if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
239		    !INET_ECN_set_ce(skb)) {
240			q->stats.forced_drop++;
241			goto congestion_drop;
242		}
243		q->stats.forced_mark++;
244		break;
245	}
246
247	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
248		q->backlog += qdisc_pkt_len(skb);
249		return qdisc_enqueue_tail(skb, sch);
250	}
251
252	q->stats.pdrop++;
253drop:
254	return qdisc_drop(skb, sch, to_free);
255
256congestion_drop:
257	qdisc_drop(skb, sch, to_free);
258	return NET_XMIT_CN;
259}
260
261static struct sk_buff *gred_dequeue(struct Qdisc *sch)
262{
263	struct sk_buff *skb;
264	struct gred_sched *t = qdisc_priv(sch);
265
266	skb = qdisc_dequeue_head(sch);
267
268	if (skb) {
269		struct gred_sched_data *q;
270		u16 dp = tc_index_to_dp(skb);
271
272		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
273			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
274					     tc_index_to_dp(skb));
275		} else {
276			q->backlog -= qdisc_pkt_len(skb);
277
278			if (gred_wred_mode(t)) {
279				if (!sch->qstats.backlog)
280					red_start_of_idle_period(&t->wred_set);
281			} else {
282				if (!q->backlog)
283					red_start_of_idle_period(&q->vars);
284			}
285		}
286
287		return skb;
288	}
289
290	return NULL;
291}
292
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293static void gred_reset(struct Qdisc *sch)
294{
295	int i;
296	struct gred_sched *t = qdisc_priv(sch);
297
298	qdisc_reset_queue(sch);
299
300	for (i = 0; i < t->DPs; i++) {
301		struct gred_sched_data *q = t->tab[i];
302
303		if (!q)
304			continue;
305
306		red_restart(&q->vars);
307		q->backlog = 0;
308	}
309}
310
311static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
312{
313	struct gred_sched *table = qdisc_priv(sch);
314	struct net_device *dev = qdisc_dev(sch);
315	struct tc_gred_qopt_offload *opt = table->opt;
316
317	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
318		return;
319
320	memset(opt, 0, sizeof(*opt));
321	opt->command = command;
322	opt->handle = sch->handle;
323	opt->parent = sch->parent;
324
325	if (command == TC_GRED_REPLACE) {
326		unsigned int i;
327
328		opt->set.grio_on = gred_rio_mode(table);
329		opt->set.wred_on = gred_wred_mode(table);
330		opt->set.dp_cnt = table->DPs;
331		opt->set.dp_def = table->def;
332
333		for (i = 0; i < table->DPs; i++) {
334			struct gred_sched_data *q = table->tab[i];
335
336			if (!q)
337				continue;
338			opt->set.tab[i].present = true;
339			opt->set.tab[i].limit = q->limit;
340			opt->set.tab[i].prio = q->prio;
341			opt->set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
342			opt->set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
343			opt->set.tab[i].is_ecn = gred_use_ecn(q);
344			opt->set.tab[i].is_harddrop = gred_use_harddrop(q);
345			opt->set.tab[i].probability = q->parms.max_P;
346			opt->set.tab[i].backlog = &q->backlog;
347		}
348		opt->set.qstats = &sch->qstats;
349	}
350
351	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, opt);
352}
353
354static int gred_offload_dump_stats(struct Qdisc *sch)
355{
356	struct gred_sched *table = qdisc_priv(sch);
357	struct tc_gred_qopt_offload *hw_stats;
358	u64 bytes = 0, packets = 0;
359	unsigned int i;
360	int ret;
361
362	hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
363	if (!hw_stats)
364		return -ENOMEM;
365
366	hw_stats->command = TC_GRED_STATS;
367	hw_stats->handle = sch->handle;
368	hw_stats->parent = sch->parent;
369
370	for (i = 0; i < MAX_DPs; i++) {
371		gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]);
372		if (table->tab[i])
373			hw_stats->stats.xstats[i] = &table->tab[i]->stats;
374	}
375
376	ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
377	/* Even if driver returns failure adjust the stats - in case offload
378	 * ended but driver still wants to adjust the values.
379	 */
380	sch_tree_lock(sch);
381	for (i = 0; i < MAX_DPs; i++) {
382		if (!table->tab[i])
383			continue;
384		table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets);
385		table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
386		table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
387
388		bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
389		packets += u64_stats_read(&hw_stats->stats.bstats[i].packets);
390		sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
391		sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
392		sch->qstats.drops += hw_stats->stats.qstats[i].drops;
393		sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
394		sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
395	}
396	_bstats_update(&sch->bstats, bytes, packets);
397	sch_tree_unlock(sch);
398
399	kfree(hw_stats);
400	return ret;
401}
402
403static inline void gred_destroy_vq(struct gred_sched_data *q)
404{
405	kfree(q);
406}
407
408static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
409				 struct netlink_ext_ack *extack)
410{
411	struct gred_sched *table = qdisc_priv(sch);
412	struct tc_gred_sopt *sopt;
413	bool red_flags_changed;
414	int i;
415
416	if (!dps)
417		return -EINVAL;
418
419	sopt = nla_data(dps);
420
421	if (sopt->DPs > MAX_DPs) {
422		NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
423		return -EINVAL;
424	}
425	if (sopt->DPs == 0) {
426		NL_SET_ERR_MSG_MOD(extack,
427				   "number of virtual queues can't be 0");
428		return -EINVAL;
429	}
430	if (sopt->def_DP >= sopt->DPs) {
431		NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
432		return -EINVAL;
433	}
434	if (sopt->flags && gred_per_vq_red_flags_used(table)) {
435		NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
436		return -EINVAL;
437	}
438
439	sch_tree_lock(sch);
440	table->DPs = sopt->DPs;
441	table->def = sopt->def_DP;
442	red_flags_changed = table->red_flags != sopt->flags;
443	table->red_flags = sopt->flags;
444
445	/*
446	 * Every entry point to GRED is synchronized with the above code
447	 * and the DP is checked against DPs, i.e. shadowed VQs can no
448	 * longer be found so we can unlock right here.
449	 */
450	sch_tree_unlock(sch);
451
452	if (sopt->grio) {
453		gred_enable_rio_mode(table);
454		gred_disable_wred_mode(table);
455		if (gred_wred_mode_check(sch))
456			gred_enable_wred_mode(table);
457	} else {
458		gred_disable_rio_mode(table);
459		gred_disable_wred_mode(table);
460	}
461
462	if (red_flags_changed)
463		for (i = 0; i < table->DPs; i++)
464			if (table->tab[i])
465				table->tab[i]->red_flags =
466					table->red_flags & GRED_VQ_RED_FLAGS;
467
468	for (i = table->DPs; i < MAX_DPs; i++) {
469		if (table->tab[i]) {
470			pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
471				i);
472			gred_destroy_vq(table->tab[i]);
473			table->tab[i] = NULL;
474		}
475	}
476
477	gred_offload(sch, TC_GRED_REPLACE);
478	return 0;
479}
480
481static inline int gred_change_vq(struct Qdisc *sch, int dp,
482				 struct tc_gred_qopt *ctl, int prio,
483				 u8 *stab, u32 max_P,
484				 struct gred_sched_data **prealloc,
485				 struct netlink_ext_ack *extack)
486{
487	struct gred_sched *table = qdisc_priv(sch);
488	struct gred_sched_data *q = table->tab[dp];
489
490	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
491		NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
492		return -EINVAL;
493	}
494
495	if (!q) {
496		table->tab[dp] = q = *prealloc;
497		*prealloc = NULL;
498		if (!q)
499			return -ENOMEM;
500		q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
501	}
502
503	q->DP = dp;
504	q->prio = prio;
505	if (ctl->limit > sch->limit)
506		q->limit = sch->limit;
507	else
508		q->limit = ctl->limit;
509
510	if (q->backlog == 0)
511		red_end_of_idle_period(&q->vars);
512
513	red_set_parms(&q->parms,
514		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
515		      ctl->Scell_log, stab, max_P);
516	red_set_vars(&q->vars);
517	return 0;
518}
519
520static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
521	[TCA_GRED_VQ_DP]	= { .type = NLA_U32 },
522	[TCA_GRED_VQ_FLAGS]	= { .type = NLA_U32 },
523};
524
525static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
526	[TCA_GRED_VQ_ENTRY]	= { .type = NLA_NESTED },
527};
528
529static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
530	[TCA_GRED_PARMS]	= { .len = sizeof(struct tc_gred_qopt) },
531	[TCA_GRED_STAB]		= { .len = 256 },
532	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
533	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
534	[TCA_GRED_LIMIT]	= { .type = NLA_U32 },
535	[TCA_GRED_VQ_LIST]	= { .type = NLA_NESTED },
536};
537
538static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
539{
540	struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
541	u32 dp;
542
543	nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
544				    gred_vq_policy, NULL);
545
546	dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
547
548	if (tb[TCA_GRED_VQ_FLAGS])
549		table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
550}
551
552static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
553{
554	const struct nlattr *attr;
555	int rem;
556
557	nla_for_each_nested(attr, vqs, rem) {
558		switch (nla_type(attr)) {
559		case TCA_GRED_VQ_ENTRY:
560			gred_vq_apply(table, attr);
561			break;
562		}
563	}
564}
565
566static int gred_vq_validate(struct gred_sched *table, u32 cdp,
567			    const struct nlattr *entry,
568			    struct netlink_ext_ack *extack)
569{
570	struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
571	int err;
572	u32 dp;
573
574	err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
575					  gred_vq_policy, extack);
576	if (err < 0)
577		return err;
578
579	if (!tb[TCA_GRED_VQ_DP]) {
580		NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
581		return -EINVAL;
582	}
583	dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
584	if (dp >= table->DPs) {
585		NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
586		return -EINVAL;
587	}
588	if (dp != cdp && !table->tab[dp]) {
589		NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
590		return -EINVAL;
591	}
592
593	if (tb[TCA_GRED_VQ_FLAGS]) {
594		u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
595
596		if (table->red_flags && table->red_flags != red_flags) {
597			NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
598			return -EINVAL;
599		}
600		if (red_flags & ~GRED_VQ_RED_FLAGS) {
601			NL_SET_ERR_MSG_MOD(extack,
602					   "invalid RED flags specified");
603			return -EINVAL;
604		}
605	}
606
607	return 0;
608}
609
610static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
611			     struct nlattr *vqs, struct netlink_ext_ack *extack)
612{
613	const struct nlattr *attr;
614	int rem, err;
615
616	err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
617					     gred_vqe_policy, extack);
618	if (err < 0)
619		return err;
620
621	nla_for_each_nested(attr, vqs, rem) {
622		switch (nla_type(attr)) {
623		case TCA_GRED_VQ_ENTRY:
624			err = gred_vq_validate(table, cdp, attr, extack);
625			if (err)
626				return err;
627			break;
628		default:
629			NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
630			return -EINVAL;
631		}
632	}
633
634	if (rem > 0) {
635		NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
636		return -EINVAL;
637	}
638
639	return 0;
640}
641
642static int gred_change(struct Qdisc *sch, struct nlattr *opt,
643		       struct netlink_ext_ack *extack)
644{
645	struct gred_sched *table = qdisc_priv(sch);
646	struct tc_gred_qopt *ctl;
647	struct nlattr *tb[TCA_GRED_MAX + 1];
648	int err, prio = GRED_DEF_PRIO;
649	u8 *stab;
650	u32 max_P;
651	struct gred_sched_data *prealloc;
652
653	err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
654					  extack);
 
 
655	if (err < 0)
656		return err;
657
658	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
659		if (tb[TCA_GRED_LIMIT] != NULL)
660			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
661		return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
662	}
663
664	if (tb[TCA_GRED_PARMS] == NULL ||
665	    tb[TCA_GRED_STAB] == NULL ||
666	    tb[TCA_GRED_LIMIT] != NULL) {
667		NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
668		return -EINVAL;
669	}
670
671	max_P = nla_get_u32_default(tb[TCA_GRED_MAX_P], 0);
672
 
673	ctl = nla_data(tb[TCA_GRED_PARMS]);
674	stab = nla_data(tb[TCA_GRED_STAB]);
675
676	if (ctl->DP >= table->DPs) {
677		NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
678		return -EINVAL;
679	}
680
681	if (tb[TCA_GRED_VQ_LIST]) {
682		err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
683					extack);
684		if (err)
685			return err;
686	}
687
688	if (gred_rio_mode(table)) {
689		if (ctl->prio == 0) {
690			int def_prio = GRED_DEF_PRIO;
691
692			if (table->tab[table->def])
693				def_prio = table->tab[table->def]->prio;
694
695			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
696			       "setting default to %d\n", ctl->DP, def_prio);
697
698			prio = def_prio;
699		} else
700			prio = ctl->prio;
701	}
702
703	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
704	sch_tree_lock(sch);
705
706	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
707			     extack);
708	if (err < 0)
709		goto err_unlock_free;
710
711	if (tb[TCA_GRED_VQ_LIST])
712		gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
713
714	if (gred_rio_mode(table)) {
715		gred_disable_wred_mode(table);
716		if (gred_wred_mode_check(sch))
717			gred_enable_wred_mode(table);
718	}
719
720	sch_tree_unlock(sch);
721	kfree(prealloc);
722
723	gred_offload(sch, TC_GRED_REPLACE);
724	return 0;
725
726err_unlock_free:
727	sch_tree_unlock(sch);
728	kfree(prealloc);
 
729	return err;
730}
731
732static int gred_init(struct Qdisc *sch, struct nlattr *opt,
733		     struct netlink_ext_ack *extack)
734{
735	struct gred_sched *table = qdisc_priv(sch);
736	struct nlattr *tb[TCA_GRED_MAX + 1];
737	int err;
738
739	if (!opt)
740		return -EINVAL;
741
742	err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
743					  extack);
744	if (err < 0)
745		return err;
746
747	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
748		NL_SET_ERR_MSG_MOD(extack,
749				   "virtual queue configuration can't be specified at initialization time");
750		return -EINVAL;
751	}
752
753	if (tb[TCA_GRED_LIMIT])
754		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
755	else
756		sch->limit = qdisc_dev(sch)->tx_queue_len
757		             * psched_mtu(qdisc_dev(sch));
758
759	if (qdisc_dev(sch)->netdev_ops->ndo_setup_tc) {
760		table->opt = kzalloc(sizeof(*table->opt), GFP_KERNEL);
761		if (!table->opt)
762			return -ENOMEM;
763	}
764
765	return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
766}
767
768static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
769{
770	struct gred_sched *table = qdisc_priv(sch);
771	struct nlattr *parms, *vqs, *opts = NULL;
772	int i;
773	u32 max_p[MAX_DPs];
774	struct tc_gred_sopt sopt = {
775		.DPs	= table->DPs,
776		.def_DP	= table->def,
777		.grio	= gred_rio_mode(table),
778		.flags	= table->red_flags,
779	};
780
781	if (gred_offload_dump_stats(sch))
782		goto nla_put_failure;
783
784	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
785	if (opts == NULL)
786		goto nla_put_failure;
787	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
788		goto nla_put_failure;
789
790	for (i = 0; i < MAX_DPs; i++) {
791		struct gred_sched_data *q = table->tab[i];
792
793		max_p[i] = q ? q->parms.max_P : 0;
794	}
795	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
796		goto nla_put_failure;
797
798	if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
799		goto nla_put_failure;
800
801	/* Old style all-in-one dump of VQs */
802	parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
803	if (parms == NULL)
804		goto nla_put_failure;
805
806	for (i = 0; i < MAX_DPs; i++) {
807		struct gred_sched_data *q = table->tab[i];
808		struct tc_gred_qopt opt;
809		unsigned long qavg;
810
811		memset(&opt, 0, sizeof(opt));
812
813		if (!q) {
814			/* hack -- fix at some point with proper message
815			   This is how we indicate to tc that there is no VQ
816			   at this DP */
817
818			opt.DP = MAX_DPs + i;
819			goto append_opt;
820		}
821
822		opt.limit	= q->limit;
823		opt.DP		= q->DP;
824		opt.backlog	= gred_backlog(table, q, sch);
825		opt.prio	= q->prio;
826		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
827		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
828		opt.Wlog	= q->parms.Wlog;
829		opt.Plog	= q->parms.Plog;
830		opt.Scell_log	= q->parms.Scell_log;
 
831		opt.early	= q->stats.prob_drop;
832		opt.forced	= q->stats.forced_drop;
833		opt.pdrop	= q->stats.pdrop;
834		opt.packets	= q->packetsin;
835		opt.bytesin	= q->bytesin;
836
837		if (gred_wred_mode(table))
838			gred_load_wred_set(table, q);
839
840		qavg = red_calc_qavg(&q->parms, &q->vars,
841				     q->vars.qavg >> q->parms.Wlog);
842		opt.qave = qavg >> q->parms.Wlog;
843
844append_opt:
845		if (nla_append(skb, sizeof(opt), &opt) < 0)
846			goto nla_put_failure;
847	}
848
849	nla_nest_end(skb, parms);
850
851	/* Dump the VQs again, in more structured way */
852	vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
853	if (!vqs)
854		goto nla_put_failure;
855
856	for (i = 0; i < MAX_DPs; i++) {
857		struct gred_sched_data *q = table->tab[i];
858		struct nlattr *vq;
859
860		if (!q)
861			continue;
862
863		vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
864		if (!vq)
865			goto nla_put_failure;
866
867		if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
868			goto nla_put_failure;
869
870		if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
871			goto nla_put_failure;
872
873		/* Stats */
874		if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
875				      TCA_GRED_VQ_PAD))
876			goto nla_put_failure;
877		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
878			goto nla_put_failure;
879		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
880				gred_backlog(table, q, sch)))
881			goto nla_put_failure;
882		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
883				q->stats.prob_drop))
884			goto nla_put_failure;
885		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
886				q->stats.prob_mark))
887			goto nla_put_failure;
888		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
889				q->stats.forced_drop))
890			goto nla_put_failure;
891		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
892				q->stats.forced_mark))
893			goto nla_put_failure;
894		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
895			goto nla_put_failure;
896
897		nla_nest_end(skb, vq);
898	}
899	nla_nest_end(skb, vqs);
900
901	return nla_nest_end(skb, opts);
902
903nla_put_failure:
904	nla_nest_cancel(skb, opts);
905	return -EMSGSIZE;
906}
907
908static void gred_destroy(struct Qdisc *sch)
909{
910	struct gred_sched *table = qdisc_priv(sch);
911	int i;
912
913	for (i = 0; i < table->DPs; i++)
914		gred_destroy_vq(table->tab[i]);
915
916	gred_offload(sch, TC_GRED_DESTROY);
917	kfree(table->opt);
918}
919
920static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
921	.id		=	"gred",
922	.priv_size	=	sizeof(struct gred_sched),
923	.enqueue	=	gred_enqueue,
924	.dequeue	=	gred_dequeue,
925	.peek		=	qdisc_peek_head,
 
926	.init		=	gred_init,
927	.reset		=	gred_reset,
928	.destroy	=	gred_destroy,
929	.change		=	gred_change,
930	.dump		=	gred_dump,
931	.owner		=	THIS_MODULE,
932};
933MODULE_ALIAS_NET_SCH("gred");
934
935static int __init gred_module_init(void)
936{
937	return register_qdisc(&gred_qdisc_ops);
938}
939
940static void __exit gred_module_exit(void)
941{
942	unregister_qdisc(&gred_qdisc_ops);
943}
944
945module_init(gred_module_init)
946module_exit(gred_module_exit)
947
948MODULE_LICENSE("GPL");
949MODULE_DESCRIPTION("Generic Random Early Detection qdisc");