Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * net/sched/sch_gred.c	Generic Random Early Detection queue.
  3 *
  4 *
  5 *              This program is free software; you can redistribute it and/or
  6 *              modify it under the terms of the GNU General Public License
  7 *              as published by the Free Software Foundation; either version
  8 *              2 of the License, or (at your option) any later version.
  9 *
 10 * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
 11 *
 12 *             991129: -  Bug fix with grio mode
 13 *		       - a better sing. AvgQ mode with Grio(WRED)
 14 *		       - A finer grained VQ dequeue based on sugestion
 15 *		         from Ren Liu
 16 *		       - More error checks
 17 *
 18 *  For all the glorious comments look at include/net/red.h
 19 */
 20
 21#include <linux/slab.h>
 22#include <linux/module.h>
 23#include <linux/types.h>
 24#include <linux/kernel.h>
 25#include <linux/skbuff.h>
 
 26#include <net/pkt_sched.h>
 27#include <net/red.h>
 28
 29#define GRED_DEF_PRIO (MAX_DPs / 2)
 30#define GRED_VQ_MASK (MAX_DPs - 1)
 31
 
 
 32struct gred_sched_data;
 33struct gred_sched;
 34
 35struct gred_sched_data {
 36	u32		limit;		/* HARD maximal queue length	*/
 37	u32		DP;		/* the drop parameters */
 38	u32		bytesin;	/* bytes seen on virtualQ so far*/
 
 39	u32		packetsin;	/* packets seen on virtualQ so far*/
 40	u32		backlog;	/* bytes on the virtualQ */
 41	u8		prio;		/* the prio of this vq */
 42
 43	struct red_parms parms;
 44	struct red_vars  vars;
 45	struct red_stats stats;
 46};
 47
 48enum {
 49	GRED_WRED_MODE = 1,
 50	GRED_RIO_MODE,
 51};
 52
 53struct gred_sched {
 54	struct gred_sched_data *tab[MAX_DPs];
 55	unsigned long	flags;
 56	u32		red_flags;
 57	u32 		DPs;
 58	u32 		def;
 59	struct red_vars wred_set;
 60};
 61
 62static inline int gred_wred_mode(struct gred_sched *table)
 63{
 64	return test_bit(GRED_WRED_MODE, &table->flags);
 65}
 66
 67static inline void gred_enable_wred_mode(struct gred_sched *table)
 68{
 69	__set_bit(GRED_WRED_MODE, &table->flags);
 70}
 71
 72static inline void gred_disable_wred_mode(struct gred_sched *table)
 73{
 74	__clear_bit(GRED_WRED_MODE, &table->flags);
 75}
 76
 77static inline int gred_rio_mode(struct gred_sched *table)
 78{
 79	return test_bit(GRED_RIO_MODE, &table->flags);
 80}
 81
 82static inline void gred_enable_rio_mode(struct gred_sched *table)
 83{
 84	__set_bit(GRED_RIO_MODE, &table->flags);
 85}
 86
 87static inline void gred_disable_rio_mode(struct gred_sched *table)
 88{
 89	__clear_bit(GRED_RIO_MODE, &table->flags);
 90}
 91
 92static inline int gred_wred_mode_check(struct Qdisc *sch)
 93{
 94	struct gred_sched *table = qdisc_priv(sch);
 95	int i;
 96
 97	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
 98	for (i = 0; i < table->DPs; i++) {
 99		struct gred_sched_data *q = table->tab[i];
100		int n;
101
102		if (q == NULL)
103			continue;
104
105		for (n = 0; n < table->DPs; n++)
106			if (table->tab[n] && table->tab[n] != q &&
107			    table->tab[n]->prio == q->prio)
108				return 1;
109	}
110
111	return 0;
112}
113
114static inline unsigned int gred_backlog(struct gred_sched *table,
115					struct gred_sched_data *q,
116					struct Qdisc *sch)
117{
118	if (gred_wred_mode(table))
119		return sch->qstats.backlog;
120	else
121		return q->backlog;
122}
123
124static inline u16 tc_index_to_dp(struct sk_buff *skb)
125{
126	return skb->tc_index & GRED_VQ_MASK;
127}
128
129static inline void gred_load_wred_set(const struct gred_sched *table,
130				      struct gred_sched_data *q)
131{
132	q->vars.qavg = table->wred_set.qavg;
133	q->vars.qidlestart = table->wred_set.qidlestart;
134}
135
136static inline void gred_store_wred_set(struct gred_sched *table,
137				       struct gred_sched_data *q)
138{
139	table->wred_set.qavg = q->vars.qavg;
 
 
 
 
 
 
140}
141
142static inline int gred_use_ecn(struct gred_sched *t)
143{
144	return t->red_flags & TC_RED_ECN;
145}
146
147static inline int gred_use_harddrop(struct gred_sched *t)
148{
149	return t->red_flags & TC_RED_HARDDROP;
 
 
 
 
 
 
 
 
150}
151
152static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
153{
154	struct gred_sched_data *q = NULL;
155	struct gred_sched *t = qdisc_priv(sch);
156	unsigned long qavg = 0;
157	u16 dp = tc_index_to_dp(skb);
158
159	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
160		dp = t->def;
161
162		q = t->tab[dp];
163		if (!q) {
164			/* Pass through packets not assigned to a DP
165			 * if no default DP has been configured. This
166			 * allows for DP flows to be left untouched.
167			 */
168			if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
 
169				return qdisc_enqueue_tail(skb, sch);
170			else
171				goto drop;
172		}
173
174		/* fix tc_index? --could be controversial but needed for
175		   requeueing */
176		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
177	}
178
179	/* sum up all the qaves of prios <= to ours to get the new qave */
180	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
181		int i;
182
183		for (i = 0; i < t->DPs; i++) {
184			if (t->tab[i] && t->tab[i]->prio < q->prio &&
185			    !red_is_idling(&t->tab[i]->vars))
186				qavg += t->tab[i]->vars.qavg;
187		}
188
189	}
190
191	q->packetsin++;
192	q->bytesin += qdisc_pkt_len(skb);
193
194	if (gred_wred_mode(t))
195		gred_load_wred_set(t, q);
196
197	q->vars.qavg = red_calc_qavg(&q->parms,
198				     &q->vars,
199				     gred_backlog(t, q, sch));
200
201	if (red_is_idling(&q->vars))
202		red_end_of_idle_period(&q->vars);
203
204	if (gred_wred_mode(t))
205		gred_store_wred_set(t, q);
206
207	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
208	case RED_DONT_MARK:
209		break;
210
211	case RED_PROB_MARK:
212		sch->qstats.overlimits++;
213		if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
214			q->stats.prob_drop++;
215			goto congestion_drop;
216		}
217
218		q->stats.prob_mark++;
219		break;
220
221	case RED_HARD_MARK:
222		sch->qstats.overlimits++;
223		if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
224		    !INET_ECN_set_ce(skb)) {
225			q->stats.forced_drop++;
226			goto congestion_drop;
227		}
228		q->stats.forced_mark++;
229		break;
230	}
231
232	if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
233		q->backlog += qdisc_pkt_len(skb);
234		return qdisc_enqueue_tail(skb, sch);
235	}
236
237	q->stats.pdrop++;
238drop:
239	return qdisc_drop(skb, sch);
240
241congestion_drop:
242	qdisc_drop(skb, sch);
243	return NET_XMIT_CN;
244}
245
246static struct sk_buff *gred_dequeue(struct Qdisc *sch)
247{
248	struct sk_buff *skb;
249	struct gred_sched *t = qdisc_priv(sch);
250
251	skb = qdisc_dequeue_head(sch);
252
253	if (skb) {
254		struct gred_sched_data *q;
255		u16 dp = tc_index_to_dp(skb);
256
257		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
258			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
259					     tc_index_to_dp(skb));
260		} else {
261			q->backlog -= qdisc_pkt_len(skb);
262
263			if (!q->backlog && !gred_wred_mode(t))
264				red_start_of_idle_period(&q->vars);
 
 
 
 
 
265		}
266
267		return skb;
268	}
269
270	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
271		red_start_of_idle_period(&t->wred_set);
272
273	return NULL;
274}
275
276static unsigned int gred_drop(struct Qdisc *sch)
277{
278	struct sk_buff *skb;
279	struct gred_sched *t = qdisc_priv(sch);
280
281	skb = qdisc_dequeue_tail(sch);
282	if (skb) {
283		unsigned int len = qdisc_pkt_len(skb);
284		struct gred_sched_data *q;
285		u16 dp = tc_index_to_dp(skb);
286
287		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
288			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
289					     tc_index_to_dp(skb));
290		} else {
291			q->backlog -= len;
292			q->stats.other++;
293
294			if (!q->backlog && !gred_wred_mode(t))
295				red_start_of_idle_period(&q->vars);
296		}
297
298		qdisc_drop(skb, sch);
299		return len;
300	}
 
301
302	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
303		red_start_of_idle_period(&t->wred_set);
 
 
 
 
 
 
 
304
305	return 0;
 
306
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
307}
308
309static void gred_reset(struct Qdisc *sch)
310{
311	int i;
312	struct gred_sched *t = qdisc_priv(sch);
313
314	qdisc_reset_queue(sch);
 
 
 
 
 
 
 
 
315
316	for (i = 0; i < t->DPs; i++) {
317		struct gred_sched_data *q = t->tab[i];
 
318
319		if (!q)
 
 
 
 
 
320			continue;
321
322		red_restart(&q->vars);
323		q->backlog = 0;
 
 
 
 
 
 
 
 
 
324	}
 
 
 
325}
326
327static inline void gred_destroy_vq(struct gred_sched_data *q)
328{
329	kfree(q);
330}
331
332static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 
333{
334	struct gred_sched *table = qdisc_priv(sch);
335	struct tc_gred_sopt *sopt;
 
336	int i;
337
338	if (dps == NULL)
339		return -EINVAL;
340
341	sopt = nla_data(dps);
342
343	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
 
344		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345
346	sch_tree_lock(sch);
347	table->DPs = sopt->DPs;
348	table->def = sopt->def_DP;
 
349	table->red_flags = sopt->flags;
350
351	/*
352	 * Every entry point to GRED is synchronized with the above code
353	 * and the DP is checked against DPs, i.e. shadowed VQs can no
354	 * longer be found so we can unlock right here.
355	 */
356	sch_tree_unlock(sch);
357
358	if (sopt->grio) {
359		gred_enable_rio_mode(table);
360		gred_disable_wred_mode(table);
361		if (gred_wred_mode_check(sch))
362			gred_enable_wred_mode(table);
363	} else {
364		gred_disable_rio_mode(table);
365		gred_disable_wred_mode(table);
366	}
367
 
 
 
 
 
 
368	for (i = table->DPs; i < MAX_DPs; i++) {
369		if (table->tab[i]) {
370			pr_warning("GRED: Warning: Destroying "
371				   "shadowed VQ 0x%x\n", i);
372			gred_destroy_vq(table->tab[i]);
373			table->tab[i] = NULL;
374		}
375	}
376
 
377	return 0;
378}
379
380static inline int gred_change_vq(struct Qdisc *sch, int dp,
381				 struct tc_gred_qopt *ctl, int prio,
382				 u8 *stab, u32 max_P,
383				 struct gred_sched_data **prealloc)
 
384{
385	struct gred_sched *table = qdisc_priv(sch);
386	struct gred_sched_data *q = table->tab[dp];
387
 
 
 
 
 
388	if (!q) {
389		table->tab[dp] = q = *prealloc;
390		*prealloc = NULL;
391		if (!q)
392			return -ENOMEM;
 
393	}
394
395	q->DP = dp;
396	q->prio = prio;
397	q->limit = ctl->limit;
 
 
 
398
399	if (q->backlog == 0)
400		red_end_of_idle_period(&q->vars);
401
402	red_set_parms(&q->parms,
403		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
404		      ctl->Scell_log, stab, max_P);
405	red_set_vars(&q->vars);
406	return 0;
407}
408
 
 
 
 
 
 
 
 
 
409static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
410	[TCA_GRED_PARMS]	= { .len = sizeof(struct tc_gred_qopt) },
411	[TCA_GRED_STAB]		= { .len = 256 },
412	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
413	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
 
 
414};
415
416static int gred_change(struct Qdisc *sch, struct nlattr *opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
417{
418	struct gred_sched *table = qdisc_priv(sch);
419	struct tc_gred_qopt *ctl;
420	struct nlattr *tb[TCA_GRED_MAX + 1];
421	int err, prio = GRED_DEF_PRIO;
422	u8 *stab;
423	u32 max_P;
424	struct gred_sched_data *prealloc;
425
426	if (opt == NULL)
427		return -EINVAL;
428
429	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
 
430	if (err < 0)
431		return err;
432
433	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL)
434		return gred_change_table_def(sch, opt);
 
 
 
435
436	if (tb[TCA_GRED_PARMS] == NULL ||
437	    tb[TCA_GRED_STAB] == NULL)
 
 
438		return -EINVAL;
 
439
440	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
441
442	err = -EINVAL;
443	ctl = nla_data(tb[TCA_GRED_PARMS]);
444	stab = nla_data(tb[TCA_GRED_STAB]);
445
446	if (ctl->DP >= table->DPs)
447		goto errout;
 
 
 
 
 
 
 
 
 
448
449	if (gred_rio_mode(table)) {
450		if (ctl->prio == 0) {
451			int def_prio = GRED_DEF_PRIO;
452
453			if (table->tab[table->def])
454				def_prio = table->tab[table->def]->prio;
455
456			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
457			       "setting default to %d\n", ctl->DP, def_prio);
458
459			prio = def_prio;
460		} else
461			prio = ctl->prio;
462	}
463
464	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
465	sch_tree_lock(sch);
466
467	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
 
468	if (err < 0)
469		goto errout_locked;
 
 
 
470
471	if (gred_rio_mode(table)) {
472		gred_disable_wred_mode(table);
473		if (gred_wred_mode_check(sch))
474			gred_enable_wred_mode(table);
475	}
476
477	err = 0;
 
 
 
 
478
479errout_locked:
480	sch_tree_unlock(sch);
481	kfree(prealloc);
482errout:
483	return err;
484}
485
486static int gred_init(struct Qdisc *sch, struct nlattr *opt)
 
487{
488	struct nlattr *tb[TCA_GRED_MAX + 1];
489	int err;
490
491	if (opt == NULL)
492		return -EINVAL;
493
494	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
 
495	if (err < 0)
496		return err;
497
498	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
 
 
499		return -EINVAL;
 
500
501	return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
 
 
 
 
 
 
502}
503
504static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
505{
506	struct gred_sched *table = qdisc_priv(sch);
507	struct nlattr *parms, *opts = NULL;
508	int i;
509	u32 max_p[MAX_DPs];
510	struct tc_gred_sopt sopt = {
511		.DPs	= table->DPs,
512		.def_DP	= table->def,
513		.grio	= gred_rio_mode(table),
514		.flags	= table->red_flags,
515	};
516
517	opts = nla_nest_start(skb, TCA_OPTIONS);
 
 
 
518	if (opts == NULL)
519		goto nla_put_failure;
520	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
521		goto nla_put_failure;
522
523	for (i = 0; i < MAX_DPs; i++) {
524		struct gred_sched_data *q = table->tab[i];
525
526		max_p[i] = q ? q->parms.max_P : 0;
527	}
528	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
529		goto nla_put_failure;
530
531	parms = nla_nest_start(skb, TCA_GRED_PARMS);
 
 
 
 
532	if (parms == NULL)
533		goto nla_put_failure;
534
535	for (i = 0; i < MAX_DPs; i++) {
536		struct gred_sched_data *q = table->tab[i];
537		struct tc_gred_qopt opt;
 
538
539		memset(&opt, 0, sizeof(opt));
540
541		if (!q) {
542			/* hack -- fix at some point with proper message
543			   This is how we indicate to tc that there is no VQ
544			   at this DP */
545
546			opt.DP = MAX_DPs + i;
547			goto append_opt;
548		}
549
550		opt.limit	= q->limit;
551		opt.DP		= q->DP;
552		opt.backlog	= q->backlog;
553		opt.prio	= q->prio;
554		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
555		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
556		opt.Wlog	= q->parms.Wlog;
557		opt.Plog	= q->parms.Plog;
558		opt.Scell_log	= q->parms.Scell_log;
559		opt.other	= q->stats.other;
560		opt.early	= q->stats.prob_drop;
561		opt.forced	= q->stats.forced_drop;
562		opt.pdrop	= q->stats.pdrop;
563		opt.packets	= q->packetsin;
564		opt.bytesin	= q->bytesin;
565
566		if (gred_wred_mode(table))
567			gred_load_wred_set(table, q);
568
569		opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
 
 
570
571append_opt:
572		if (nla_append(skb, sizeof(opt), &opt) < 0)
573			goto nla_put_failure;
574	}
575
576	nla_nest_end(skb, parms);
577
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
578	return nla_nest_end(skb, opts);
579
580nla_put_failure:
581	nla_nest_cancel(skb, opts);
582	return -EMSGSIZE;
583}
584
585static void gred_destroy(struct Qdisc *sch)
586{
587	struct gred_sched *table = qdisc_priv(sch);
588	int i;
589
590	for (i = 0; i < table->DPs; i++) {
591		if (table->tab[i])
592			gred_destroy_vq(table->tab[i]);
593	}
 
594}
595
596static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
597	.id		=	"gred",
598	.priv_size	=	sizeof(struct gred_sched),
599	.enqueue	=	gred_enqueue,
600	.dequeue	=	gred_dequeue,
601	.peek		=	qdisc_peek_head,
602	.drop		=	gred_drop,
603	.init		=	gred_init,
604	.reset		=	gred_reset,
605	.destroy	=	gred_destroy,
606	.change		=	gred_change,
607	.dump		=	gred_dump,
608	.owner		=	THIS_MODULE,
609};
610
611static int __init gred_module_init(void)
612{
613	return register_qdisc(&gred_qdisc_ops);
614}
615
616static void __exit gred_module_exit(void)
617{
618	unregister_qdisc(&gred_qdisc_ops);
619}
620
621module_init(gred_module_init)
622module_exit(gred_module_exit)
623
624MODULE_LICENSE("GPL");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/sch_gred.c	Generic Random Early Detection queue.
  4 *
 
 
 
 
 
 
  5 * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
  6 *
  7 *             991129: -  Bug fix with grio mode
  8 *		       - a better sing. AvgQ mode with Grio(WRED)
  9 *		       - A finer grained VQ dequeue based on suggestion
 10 *		         from Ren Liu
 11 *		       - More error checks
 12 *
 13 *  For all the glorious comments look at include/net/red.h
 14 */
 15
 16#include <linux/slab.h>
 17#include <linux/module.h>
 18#include <linux/types.h>
 19#include <linux/kernel.h>
 20#include <linux/skbuff.h>
 21#include <net/pkt_cls.h>
 22#include <net/pkt_sched.h>
 23#include <net/red.h>
 24
 25#define GRED_DEF_PRIO (MAX_DPs / 2)
 26#define GRED_VQ_MASK (MAX_DPs - 1)
 27
 28#define GRED_VQ_RED_FLAGS	(TC_RED_ECN | TC_RED_HARDDROP)
 29
 30struct gred_sched_data;
 31struct gred_sched;
 32
 33struct gred_sched_data {
 34	u32		limit;		/* HARD maximal queue length	*/
 35	u32		DP;		/* the drop parameters */
 36	u32		red_flags;	/* virtualQ version of red_flags */
 37	u64		bytesin;	/* bytes seen on virtualQ so far*/
 38	u32		packetsin;	/* packets seen on virtualQ so far*/
 39	u32		backlog;	/* bytes on the virtualQ */
 40	u8		prio;		/* the prio of this vq */
 41
 42	struct red_parms parms;
 43	struct red_vars  vars;
 44	struct red_stats stats;
 45};
 46
 47enum {
 48	GRED_WRED_MODE = 1,
 49	GRED_RIO_MODE,
 50};
 51
 52struct gred_sched {
 53	struct gred_sched_data *tab[MAX_DPs];
 54	unsigned long	flags;
 55	u32		red_flags;
 56	u32 		DPs;
 57	u32 		def;
 58	struct red_vars wred_set;
 59};
 60
 61static inline int gred_wred_mode(struct gred_sched *table)
 62{
 63	return test_bit(GRED_WRED_MODE, &table->flags);
 64}
 65
 66static inline void gred_enable_wred_mode(struct gred_sched *table)
 67{
 68	__set_bit(GRED_WRED_MODE, &table->flags);
 69}
 70
 71static inline void gred_disable_wred_mode(struct gred_sched *table)
 72{
 73	__clear_bit(GRED_WRED_MODE, &table->flags);
 74}
 75
 76static inline int gred_rio_mode(struct gred_sched *table)
 77{
 78	return test_bit(GRED_RIO_MODE, &table->flags);
 79}
 80
 81static inline void gred_enable_rio_mode(struct gred_sched *table)
 82{
 83	__set_bit(GRED_RIO_MODE, &table->flags);
 84}
 85
 86static inline void gred_disable_rio_mode(struct gred_sched *table)
 87{
 88	__clear_bit(GRED_RIO_MODE, &table->flags);
 89}
 90
 91static inline int gred_wred_mode_check(struct Qdisc *sch)
 92{
 93	struct gred_sched *table = qdisc_priv(sch);
 94	int i;
 95
 96	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
 97	for (i = 0; i < table->DPs; i++) {
 98		struct gred_sched_data *q = table->tab[i];
 99		int n;
100
101		if (q == NULL)
102			continue;
103
104		for (n = i + 1; n < table->DPs; n++)
105			if (table->tab[n] && table->tab[n]->prio == q->prio)
 
106				return 1;
107	}
108
109	return 0;
110}
111
112static inline unsigned int gred_backlog(struct gred_sched *table,
113					struct gred_sched_data *q,
114					struct Qdisc *sch)
115{
116	if (gred_wred_mode(table))
117		return sch->qstats.backlog;
118	else
119		return q->backlog;
120}
121
122static inline u16 tc_index_to_dp(struct sk_buff *skb)
123{
124	return skb->tc_index & GRED_VQ_MASK;
125}
126
127static inline void gred_load_wred_set(const struct gred_sched *table,
128				      struct gred_sched_data *q)
129{
130	q->vars.qavg = table->wred_set.qavg;
131	q->vars.qidlestart = table->wred_set.qidlestart;
132}
133
134static inline void gred_store_wred_set(struct gred_sched *table,
135				       struct gred_sched_data *q)
136{
137	table->wred_set.qavg = q->vars.qavg;
138	table->wred_set.qidlestart = q->vars.qidlestart;
139}
140
141static int gred_use_ecn(struct gred_sched_data *q)
142{
143	return q->red_flags & TC_RED_ECN;
144}
145
146static int gred_use_harddrop(struct gred_sched_data *q)
147{
148	return q->red_flags & TC_RED_HARDDROP;
149}
150
151static bool gred_per_vq_red_flags_used(struct gred_sched *table)
152{
153	unsigned int i;
154
155	/* Local per-vq flags couldn't have been set unless global are 0 */
156	if (table->red_flags)
157		return false;
158	for (i = 0; i < MAX_DPs; i++)
159		if (table->tab[i] && table->tab[i]->red_flags)
160			return true;
161	return false;
162}
163
164static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
165			struct sk_buff **to_free)
166{
167	struct gred_sched_data *q = NULL;
168	struct gred_sched *t = qdisc_priv(sch);
169	unsigned long qavg = 0;
170	u16 dp = tc_index_to_dp(skb);
171
172	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
173		dp = t->def;
174
175		q = t->tab[dp];
176		if (!q) {
177			/* Pass through packets not assigned to a DP
178			 * if no default DP has been configured. This
179			 * allows for DP flows to be left untouched.
180			 */
181			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
182					sch->limit))
183				return qdisc_enqueue_tail(skb, sch);
184			else
185				goto drop;
186		}
187
188		/* fix tc_index? --could be controversial but needed for
189		   requeueing */
190		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
191	}
192
193	/* sum up all the qaves of prios < ours to get the new qave */
194	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
195		int i;
196
197		for (i = 0; i < t->DPs; i++) {
198			if (t->tab[i] && t->tab[i]->prio < q->prio &&
199			    !red_is_idling(&t->tab[i]->vars))
200				qavg += t->tab[i]->vars.qavg;
201		}
202
203	}
204
205	q->packetsin++;
206	q->bytesin += qdisc_pkt_len(skb);
207
208	if (gred_wred_mode(t))
209		gred_load_wred_set(t, q);
210
211	q->vars.qavg = red_calc_qavg(&q->parms,
212				     &q->vars,
213				     gred_backlog(t, q, sch));
214
215	if (red_is_idling(&q->vars))
216		red_end_of_idle_period(&q->vars);
217
218	if (gred_wred_mode(t))
219		gred_store_wred_set(t, q);
220
221	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
222	case RED_DONT_MARK:
223		break;
224
225	case RED_PROB_MARK:
226		qdisc_qstats_overlimit(sch);
227		if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
228			q->stats.prob_drop++;
229			goto congestion_drop;
230		}
231
232		q->stats.prob_mark++;
233		break;
234
235	case RED_HARD_MARK:
236		qdisc_qstats_overlimit(sch);
237		if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
238		    !INET_ECN_set_ce(skb)) {
239			q->stats.forced_drop++;
240			goto congestion_drop;
241		}
242		q->stats.forced_mark++;
243		break;
244	}
245
246	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
247		q->backlog += qdisc_pkt_len(skb);
248		return qdisc_enqueue_tail(skb, sch);
249	}
250
251	q->stats.pdrop++;
252drop:
253	return qdisc_drop(skb, sch, to_free);
254
255congestion_drop:
256	qdisc_drop(skb, sch, to_free);
257	return NET_XMIT_CN;
258}
259
260static struct sk_buff *gred_dequeue(struct Qdisc *sch)
261{
262	struct sk_buff *skb;
263	struct gred_sched *t = qdisc_priv(sch);
264
265	skb = qdisc_dequeue_head(sch);
266
267	if (skb) {
268		struct gred_sched_data *q;
269		u16 dp = tc_index_to_dp(skb);
270
271		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
272			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
273					     tc_index_to_dp(skb));
274		} else {
275			q->backlog -= qdisc_pkt_len(skb);
276
277			if (gred_wred_mode(t)) {
278				if (!sch->qstats.backlog)
279					red_start_of_idle_period(&t->wred_set);
280			} else {
281				if (!q->backlog)
282					red_start_of_idle_period(&q->vars);
283			}
284		}
285
286		return skb;
287	}
288
 
 
 
289	return NULL;
290}
291
292static void gred_reset(struct Qdisc *sch)
293{
294	int i;
295	struct gred_sched *t = qdisc_priv(sch);
296
297	qdisc_reset_queue(sch);
 
 
 
 
298
299	for (i = 0; i < t->DPs; i++) {
300		struct gred_sched_data *q = t->tab[i];
 
 
 
 
301
302		if (!q)
303			continue;
 
304
305		red_restart(&q->vars);
306		q->backlog = 0;
307	}
308}
309
310static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
311{
312	struct gred_sched *table = qdisc_priv(sch);
313	struct net_device *dev = qdisc_dev(sch);
314	struct tc_gred_qopt_offload opt = {
315		.command	= command,
316		.handle		= sch->handle,
317		.parent		= sch->parent,
318	};
319
320	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
321		return;
322
323	if (command == TC_GRED_REPLACE) {
324		unsigned int i;
325
326		opt.set.grio_on = gred_rio_mode(table);
327		opt.set.wred_on = gred_wred_mode(table);
328		opt.set.dp_cnt = table->DPs;
329		opt.set.dp_def = table->def;
330
331		for (i = 0; i < table->DPs; i++) {
332			struct gred_sched_data *q = table->tab[i];
333
334			if (!q)
335				continue;
336			opt.set.tab[i].present = true;
337			opt.set.tab[i].limit = q->limit;
338			opt.set.tab[i].prio = q->prio;
339			opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
340			opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
341			opt.set.tab[i].is_ecn = gred_use_ecn(q);
342			opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
343			opt.set.tab[i].probability = q->parms.max_P;
344			opt.set.tab[i].backlog = &q->backlog;
345		}
346		opt.set.qstats = &sch->qstats;
347	}
348
349	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
350}
351
352static int gred_offload_dump_stats(struct Qdisc *sch)
353{
354	struct gred_sched *table = qdisc_priv(sch);
355	struct tc_gred_qopt_offload *hw_stats;
356	unsigned int i;
357	int ret;
358
359	hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
360	if (!hw_stats)
361		return -ENOMEM;
362
363	hw_stats->command = TC_GRED_STATS;
364	hw_stats->handle = sch->handle;
365	hw_stats->parent = sch->parent;
366
367	for (i = 0; i < MAX_DPs; i++)
368		if (table->tab[i])
369			hw_stats->stats.xstats[i] = &table->tab[i]->stats;
370
371	ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
372	/* Even if driver returns failure adjust the stats - in case offload
373	 * ended but driver still wants to adjust the values.
374	 */
375	for (i = 0; i < MAX_DPs; i++) {
376		if (!table->tab[i])
377			continue;
378		table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
379		table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
380		table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
381
382		_bstats_update(&sch->bstats,
383			       hw_stats->stats.bstats[i].bytes,
384			       hw_stats->stats.bstats[i].packets);
385		sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
386		sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
387		sch->qstats.drops += hw_stats->stats.qstats[i].drops;
388		sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
389		sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
390	}
391
392	kfree(hw_stats);
393	return ret;
394}
395
396static inline void gred_destroy_vq(struct gred_sched_data *q)
397{
398	kfree(q);
399}
400
401static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
402				 struct netlink_ext_ack *extack)
403{
404	struct gred_sched *table = qdisc_priv(sch);
405	struct tc_gred_sopt *sopt;
406	bool red_flags_changed;
407	int i;
408
409	if (!dps)
410		return -EINVAL;
411
412	sopt = nla_data(dps);
413
414	if (sopt->DPs > MAX_DPs) {
415		NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
416		return -EINVAL;
417	}
418	if (sopt->DPs == 0) {
419		NL_SET_ERR_MSG_MOD(extack,
420				   "number of virtual queues can't be 0");
421		return -EINVAL;
422	}
423	if (sopt->def_DP >= sopt->DPs) {
424		NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
425		return -EINVAL;
426	}
427	if (sopt->flags && gred_per_vq_red_flags_used(table)) {
428		NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
429		return -EINVAL;
430	}
431
432	sch_tree_lock(sch);
433	table->DPs = sopt->DPs;
434	table->def = sopt->def_DP;
435	red_flags_changed = table->red_flags != sopt->flags;
436	table->red_flags = sopt->flags;
437
438	/*
439	 * Every entry point to GRED is synchronized with the above code
440	 * and the DP is checked against DPs, i.e. shadowed VQs can no
441	 * longer be found so we can unlock right here.
442	 */
443	sch_tree_unlock(sch);
444
445	if (sopt->grio) {
446		gred_enable_rio_mode(table);
447		gred_disable_wred_mode(table);
448		if (gred_wred_mode_check(sch))
449			gred_enable_wred_mode(table);
450	} else {
451		gred_disable_rio_mode(table);
452		gred_disable_wred_mode(table);
453	}
454
455	if (red_flags_changed)
456		for (i = 0; i < table->DPs; i++)
457			if (table->tab[i])
458				table->tab[i]->red_flags =
459					table->red_flags & GRED_VQ_RED_FLAGS;
460
461	for (i = table->DPs; i < MAX_DPs; i++) {
462		if (table->tab[i]) {
463			pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
464				i);
465			gred_destroy_vq(table->tab[i]);
466			table->tab[i] = NULL;
467		}
468	}
469
470	gred_offload(sch, TC_GRED_REPLACE);
471	return 0;
472}
473
474static inline int gred_change_vq(struct Qdisc *sch, int dp,
475				 struct tc_gred_qopt *ctl, int prio,
476				 u8 *stab, u32 max_P,
477				 struct gred_sched_data **prealloc,
478				 struct netlink_ext_ack *extack)
479{
480	struct gred_sched *table = qdisc_priv(sch);
481	struct gred_sched_data *q = table->tab[dp];
482
483	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
484		NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
485		return -EINVAL;
486	}
487
488	if (!q) {
489		table->tab[dp] = q = *prealloc;
490		*prealloc = NULL;
491		if (!q)
492			return -ENOMEM;
493		q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
494	}
495
496	q->DP = dp;
497	q->prio = prio;
498	if (ctl->limit > sch->limit)
499		q->limit = sch->limit;
500	else
501		q->limit = ctl->limit;
502
503	if (q->backlog == 0)
504		red_end_of_idle_period(&q->vars);
505
506	red_set_parms(&q->parms,
507		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
508		      ctl->Scell_log, stab, max_P);
509	red_set_vars(&q->vars);
510	return 0;
511}
512
513static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
514	[TCA_GRED_VQ_DP]	= { .type = NLA_U32 },
515	[TCA_GRED_VQ_FLAGS]	= { .type = NLA_U32 },
516};
517
518static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
519	[TCA_GRED_VQ_ENTRY]	= { .type = NLA_NESTED },
520};
521
522static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
523	[TCA_GRED_PARMS]	= { .len = sizeof(struct tc_gred_qopt) },
524	[TCA_GRED_STAB]		= { .len = 256 },
525	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
526	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
527	[TCA_GRED_LIMIT]	= { .type = NLA_U32 },
528	[TCA_GRED_VQ_LIST]	= { .type = NLA_NESTED },
529};
530
531static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
532{
533	struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
534	u32 dp;
535
536	nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
537				    gred_vq_policy, NULL);
538
539	dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
540
541	if (tb[TCA_GRED_VQ_FLAGS])
542		table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
543}
544
545static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
546{
547	const struct nlattr *attr;
548	int rem;
549
550	nla_for_each_nested(attr, vqs, rem) {
551		switch (nla_type(attr)) {
552		case TCA_GRED_VQ_ENTRY:
553			gred_vq_apply(table, attr);
554			break;
555		}
556	}
557}
558
559static int gred_vq_validate(struct gred_sched *table, u32 cdp,
560			    const struct nlattr *entry,
561			    struct netlink_ext_ack *extack)
562{
563	struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
564	int err;
565	u32 dp;
566
567	err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
568					  gred_vq_policy, extack);
569	if (err < 0)
570		return err;
571
572	if (!tb[TCA_GRED_VQ_DP]) {
573		NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
574		return -EINVAL;
575	}
576	dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
577	if (dp >= table->DPs) {
578		NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
579		return -EINVAL;
580	}
581	if (dp != cdp && !table->tab[dp]) {
582		NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
583		return -EINVAL;
584	}
585
586	if (tb[TCA_GRED_VQ_FLAGS]) {
587		u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
588
589		if (table->red_flags && table->red_flags != red_flags) {
590			NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
591			return -EINVAL;
592		}
593		if (red_flags & ~GRED_VQ_RED_FLAGS) {
594			NL_SET_ERR_MSG_MOD(extack,
595					   "invalid RED flags specified");
596			return -EINVAL;
597		}
598	}
599
600	return 0;
601}
602
603static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
604			     struct nlattr *vqs, struct netlink_ext_ack *extack)
605{
606	const struct nlattr *attr;
607	int rem, err;
608
609	err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
610					     gred_vqe_policy, extack);
611	if (err < 0)
612		return err;
613
614	nla_for_each_nested(attr, vqs, rem) {
615		switch (nla_type(attr)) {
616		case TCA_GRED_VQ_ENTRY:
617			err = gred_vq_validate(table, cdp, attr, extack);
618			if (err)
619				return err;
620			break;
621		default:
622			NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
623			return -EINVAL;
624		}
625	}
626
627	if (rem > 0) {
628		NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
629		return -EINVAL;
630	}
631
632	return 0;
633}
634
635static int gred_change(struct Qdisc *sch, struct nlattr *opt,
636		       struct netlink_ext_ack *extack)
637{
638	struct gred_sched *table = qdisc_priv(sch);
639	struct tc_gred_qopt *ctl;
640	struct nlattr *tb[TCA_GRED_MAX + 1];
641	int err, prio = GRED_DEF_PRIO;
642	u8 *stab;
643	u32 max_P;
644	struct gred_sched_data *prealloc;
645
646	if (opt == NULL)
647		return -EINVAL;
648
649	err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
650					  extack);
651	if (err < 0)
652		return err;
653
654	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
655		if (tb[TCA_GRED_LIMIT] != NULL)
656			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
657		return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
658	}
659
660	if (tb[TCA_GRED_PARMS] == NULL ||
661	    tb[TCA_GRED_STAB] == NULL ||
662	    tb[TCA_GRED_LIMIT] != NULL) {
663		NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
664		return -EINVAL;
665	}
666
667	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
668
 
669	ctl = nla_data(tb[TCA_GRED_PARMS]);
670	stab = nla_data(tb[TCA_GRED_STAB]);
671
672	if (ctl->DP >= table->DPs) {
673		NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
674		return -EINVAL;
675	}
676
677	if (tb[TCA_GRED_VQ_LIST]) {
678		err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
679					extack);
680		if (err)
681			return err;
682	}
683
684	if (gred_rio_mode(table)) {
685		if (ctl->prio == 0) {
686			int def_prio = GRED_DEF_PRIO;
687
688			if (table->tab[table->def])
689				def_prio = table->tab[table->def]->prio;
690
691			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
692			       "setting default to %d\n", ctl->DP, def_prio);
693
694			prio = def_prio;
695		} else
696			prio = ctl->prio;
697	}
698
699	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
700	sch_tree_lock(sch);
701
702	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
703			     extack);
704	if (err < 0)
705		goto err_unlock_free;
706
707	if (tb[TCA_GRED_VQ_LIST])
708		gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
709
710	if (gred_rio_mode(table)) {
711		gred_disable_wred_mode(table);
712		if (gred_wred_mode_check(sch))
713			gred_enable_wred_mode(table);
714	}
715
716	sch_tree_unlock(sch);
717	kfree(prealloc);
718
719	gred_offload(sch, TC_GRED_REPLACE);
720	return 0;
721
722err_unlock_free:
723	sch_tree_unlock(sch);
724	kfree(prealloc);
 
725	return err;
726}
727
728static int gred_init(struct Qdisc *sch, struct nlattr *opt,
729		     struct netlink_ext_ack *extack)
730{
731	struct nlattr *tb[TCA_GRED_MAX + 1];
732	int err;
733
734	if (!opt)
735		return -EINVAL;
736
737	err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
738					  extack);
739	if (err < 0)
740		return err;
741
742	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
743		NL_SET_ERR_MSG_MOD(extack,
744				   "virtual queue configuration can't be specified at initialization time");
745		return -EINVAL;
746	}
747
748	if (tb[TCA_GRED_LIMIT])
749		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
750	else
751		sch->limit = qdisc_dev(sch)->tx_queue_len
752		             * psched_mtu(qdisc_dev(sch));
753
754	return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
755}
756
757static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
758{
759	struct gred_sched *table = qdisc_priv(sch);
760	struct nlattr *parms, *vqs, *opts = NULL;
761	int i;
762	u32 max_p[MAX_DPs];
763	struct tc_gred_sopt sopt = {
764		.DPs	= table->DPs,
765		.def_DP	= table->def,
766		.grio	= gred_rio_mode(table),
767		.flags	= table->red_flags,
768	};
769
770	if (gred_offload_dump_stats(sch))
771		goto nla_put_failure;
772
773	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
774	if (opts == NULL)
775		goto nla_put_failure;
776	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
777		goto nla_put_failure;
778
779	for (i = 0; i < MAX_DPs; i++) {
780		struct gred_sched_data *q = table->tab[i];
781
782		max_p[i] = q ? q->parms.max_P : 0;
783	}
784	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
785		goto nla_put_failure;
786
787	if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
788		goto nla_put_failure;
789
790	/* Old style all-in-one dump of VQs */
791	parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
792	if (parms == NULL)
793		goto nla_put_failure;
794
795	for (i = 0; i < MAX_DPs; i++) {
796		struct gred_sched_data *q = table->tab[i];
797		struct tc_gred_qopt opt;
798		unsigned long qavg;
799
800		memset(&opt, 0, sizeof(opt));
801
802		if (!q) {
803			/* hack -- fix at some point with proper message
804			   This is how we indicate to tc that there is no VQ
805			   at this DP */
806
807			opt.DP = MAX_DPs + i;
808			goto append_opt;
809		}
810
811		opt.limit	= q->limit;
812		opt.DP		= q->DP;
813		opt.backlog	= gred_backlog(table, q, sch);
814		opt.prio	= q->prio;
815		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
816		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
817		opt.Wlog	= q->parms.Wlog;
818		opt.Plog	= q->parms.Plog;
819		opt.Scell_log	= q->parms.Scell_log;
820		opt.other	= q->stats.other;
821		opt.early	= q->stats.prob_drop;
822		opt.forced	= q->stats.forced_drop;
823		opt.pdrop	= q->stats.pdrop;
824		opt.packets	= q->packetsin;
825		opt.bytesin	= q->bytesin;
826
827		if (gred_wred_mode(table))
828			gred_load_wred_set(table, q);
829
830		qavg = red_calc_qavg(&q->parms, &q->vars,
831				     q->vars.qavg >> q->parms.Wlog);
832		opt.qave = qavg >> q->parms.Wlog;
833
834append_opt:
835		if (nla_append(skb, sizeof(opt), &opt) < 0)
836			goto nla_put_failure;
837	}
838
839	nla_nest_end(skb, parms);
840
841	/* Dump the VQs again, in more structured way */
842	vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
843	if (!vqs)
844		goto nla_put_failure;
845
846	for (i = 0; i < MAX_DPs; i++) {
847		struct gred_sched_data *q = table->tab[i];
848		struct nlattr *vq;
849
850		if (!q)
851			continue;
852
853		vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
854		if (!vq)
855			goto nla_put_failure;
856
857		if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
858			goto nla_put_failure;
859
860		if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
861			goto nla_put_failure;
862
863		/* Stats */
864		if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
865				      TCA_GRED_VQ_PAD))
866			goto nla_put_failure;
867		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
868			goto nla_put_failure;
869		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
870				gred_backlog(table, q, sch)))
871			goto nla_put_failure;
872		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
873				q->stats.prob_drop))
874			goto nla_put_failure;
875		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
876				q->stats.prob_mark))
877			goto nla_put_failure;
878		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
879				q->stats.forced_drop))
880			goto nla_put_failure;
881		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
882				q->stats.forced_mark))
883			goto nla_put_failure;
884		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
885			goto nla_put_failure;
886		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
887			goto nla_put_failure;
888
889		nla_nest_end(skb, vq);
890	}
891	nla_nest_end(skb, vqs);
892
893	return nla_nest_end(skb, opts);
894
895nla_put_failure:
896	nla_nest_cancel(skb, opts);
897	return -EMSGSIZE;
898}
899
900static void gred_destroy(struct Qdisc *sch)
901{
902	struct gred_sched *table = qdisc_priv(sch);
903	int i;
904
905	for (i = 0; i < table->DPs; i++) {
906		if (table->tab[i])
907			gred_destroy_vq(table->tab[i]);
908	}
909	gred_offload(sch, TC_GRED_DESTROY);
910}
911
912static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
913	.id		=	"gred",
914	.priv_size	=	sizeof(struct gred_sched),
915	.enqueue	=	gred_enqueue,
916	.dequeue	=	gred_dequeue,
917	.peek		=	qdisc_peek_head,
 
918	.init		=	gred_init,
919	.reset		=	gred_reset,
920	.destroy	=	gred_destroy,
921	.change		=	gred_change,
922	.dump		=	gred_dump,
923	.owner		=	THIS_MODULE,
924};
925
926static int __init gred_module_init(void)
927{
928	return register_qdisc(&gred_qdisc_ops);
929}
930
931static void __exit gred_module_exit(void)
932{
933	unregister_qdisc(&gred_qdisc_ops);
934}
935
936module_init(gred_module_init)
937module_exit(gred_module_exit)
938
939MODULE_LICENSE("GPL");