Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v3.5.6
  1/*
  2 * net/sched/sch_gred.c	Generic Random Early Detection queue.
  3 *
  4 *
  5 *              This program is free software; you can redistribute it and/or
  6 *              modify it under the terms of the GNU General Public License
  7 *              as published by the Free Software Foundation; either version
  8 *              2 of the License, or (at your option) any later version.
  9 *
 10 * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
 11 *
 12 *             991129: -  Bug fix with grio mode
 13 *		       - a better sing. AvgQ mode with Grio(WRED)
 14 *		       - A finer grained VQ dequeue based on sugestion
 15 *		         from Ren Liu
 16 *		       - More error checks
 17 *
 18 *  For all the glorious comments look at include/net/red.h
 19 */
 20
 21#include <linux/slab.h>
 22#include <linux/module.h>
 23#include <linux/types.h>
 24#include <linux/kernel.h>
 25#include <linux/skbuff.h>
 26#include <net/pkt_sched.h>
 27#include <net/red.h>
 28
 29#define GRED_DEF_PRIO (MAX_DPs / 2)
 30#define GRED_VQ_MASK (MAX_DPs - 1)
 31
 32struct gred_sched_data;
 33struct gred_sched;
 34
 35struct gred_sched_data {
 36	u32		limit;		/* HARD maximal queue length	*/
 37	u32		DP;		/* the drop parameters */
 38	u32		bytesin;	/* bytes seen on virtualQ so far*/
 39	u32		packetsin;	/* packets seen on virtualQ so far*/
 40	u32		backlog;	/* bytes on the virtualQ */
 41	u8		prio;		/* the prio of this vq */
 42
 43	struct red_parms parms;
 44	struct red_vars  vars;
 45	struct red_stats stats;
 46};
 47
 48enum {
 49	GRED_WRED_MODE = 1,
 50	GRED_RIO_MODE,
 51};
 52
 53struct gred_sched {
 54	struct gred_sched_data *tab[MAX_DPs];
 55	unsigned long	flags;
 56	u32		red_flags;
 57	u32 		DPs;
 58	u32 		def;
 59	struct red_vars wred_set;
 60};
 61
 62static inline int gred_wred_mode(struct gred_sched *table)
 63{
 64	return test_bit(GRED_WRED_MODE, &table->flags);
 65}
 66
 67static inline void gred_enable_wred_mode(struct gred_sched *table)
 68{
 69	__set_bit(GRED_WRED_MODE, &table->flags);
 70}
 71
 72static inline void gred_disable_wred_mode(struct gred_sched *table)
 73{
 74	__clear_bit(GRED_WRED_MODE, &table->flags);
 75}
 76
 77static inline int gred_rio_mode(struct gred_sched *table)
 78{
 79	return test_bit(GRED_RIO_MODE, &table->flags);
 80}
 81
 82static inline void gred_enable_rio_mode(struct gred_sched *table)
 83{
 84	__set_bit(GRED_RIO_MODE, &table->flags);
 85}
 86
 87static inline void gred_disable_rio_mode(struct gred_sched *table)
 88{
 89	__clear_bit(GRED_RIO_MODE, &table->flags);
 90}
 91
 92static inline int gred_wred_mode_check(struct Qdisc *sch)
 93{
 94	struct gred_sched *table = qdisc_priv(sch);
 95	int i;
 96
 97	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
 98	for (i = 0; i < table->DPs; i++) {
 99		struct gred_sched_data *q = table->tab[i];
100		int n;
101
102		if (q == NULL)
103			continue;
104
105		for (n = 0; n < table->DPs; n++)
106			if (table->tab[n] && table->tab[n] != q &&
107			    table->tab[n]->prio == q->prio)
108				return 1;
109	}
110
111	return 0;
112}
113
114static inline unsigned int gred_backlog(struct gred_sched *table,
115					struct gred_sched_data *q,
116					struct Qdisc *sch)
117{
118	if (gred_wred_mode(table))
119		return sch->qstats.backlog;
120	else
121		return q->backlog;
122}
123
124static inline u16 tc_index_to_dp(struct sk_buff *skb)
125{
126	return skb->tc_index & GRED_VQ_MASK;
127}
128
129static inline void gred_load_wred_set(const struct gred_sched *table,
130				      struct gred_sched_data *q)
131{
132	q->vars.qavg = table->wred_set.qavg;
133	q->vars.qidlestart = table->wred_set.qidlestart;
134}
135
136static inline void gred_store_wred_set(struct gred_sched *table,
137				       struct gred_sched_data *q)
138{
139	table->wred_set.qavg = q->vars.qavg;
 
140}
141
142static inline int gred_use_ecn(struct gred_sched *t)
143{
144	return t->red_flags & TC_RED_ECN;
145}
146
147static inline int gred_use_harddrop(struct gred_sched *t)
148{
149	return t->red_flags & TC_RED_HARDDROP;
150}
151
152static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
153{
154	struct gred_sched_data *q = NULL;
155	struct gred_sched *t = qdisc_priv(sch);
156	unsigned long qavg = 0;
157	u16 dp = tc_index_to_dp(skb);
158
159	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
160		dp = t->def;
161
162		q = t->tab[dp];
163		if (!q) {
164			/* Pass through packets not assigned to a DP
165			 * if no default DP has been configured. This
166			 * allows for DP flows to be left untouched.
167			 */
168			if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
169				return qdisc_enqueue_tail(skb, sch);
170			else
171				goto drop;
172		}
173
174		/* fix tc_index? --could be controversial but needed for
175		   requeueing */
176		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
177	}
178
179	/* sum up all the qaves of prios <= to ours to get the new qave */
180	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
181		int i;
182
183		for (i = 0; i < t->DPs; i++) {
184			if (t->tab[i] && t->tab[i]->prio < q->prio &&
185			    !red_is_idling(&t->tab[i]->vars))
186				qavg += t->tab[i]->vars.qavg;
187		}
188
189	}
190
191	q->packetsin++;
192	q->bytesin += qdisc_pkt_len(skb);
193
194	if (gred_wred_mode(t))
195		gred_load_wred_set(t, q);
196
197	q->vars.qavg = red_calc_qavg(&q->parms,
198				     &q->vars,
199				     gred_backlog(t, q, sch));
200
201	if (red_is_idling(&q->vars))
202		red_end_of_idle_period(&q->vars);
203
204	if (gred_wred_mode(t))
205		gred_store_wred_set(t, q);
206
207	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
208	case RED_DONT_MARK:
209		break;
210
211	case RED_PROB_MARK:
212		sch->qstats.overlimits++;
213		if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
214			q->stats.prob_drop++;
215			goto congestion_drop;
216		}
217
218		q->stats.prob_mark++;
219		break;
220
221	case RED_HARD_MARK:
222		sch->qstats.overlimits++;
223		if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
224		    !INET_ECN_set_ce(skb)) {
225			q->stats.forced_drop++;
226			goto congestion_drop;
227		}
228		q->stats.forced_mark++;
229		break;
230	}
231
232	if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
233		q->backlog += qdisc_pkt_len(skb);
234		return qdisc_enqueue_tail(skb, sch);
235	}
236
237	q->stats.pdrop++;
238drop:
239	return qdisc_drop(skb, sch);
240
241congestion_drop:
242	qdisc_drop(skb, sch);
243	return NET_XMIT_CN;
244}
245
246static struct sk_buff *gred_dequeue(struct Qdisc *sch)
247{
248	struct sk_buff *skb;
249	struct gred_sched *t = qdisc_priv(sch);
250
251	skb = qdisc_dequeue_head(sch);
252
253	if (skb) {
254		struct gred_sched_data *q;
255		u16 dp = tc_index_to_dp(skb);
256
257		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
258			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
259					     tc_index_to_dp(skb));
260		} else {
261			q->backlog -= qdisc_pkt_len(skb);
262
263			if (!q->backlog && !gred_wred_mode(t))
264				red_start_of_idle_period(&q->vars);
 
 
 
 
 
265		}
266
267		return skb;
268	}
269
270	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
271		red_start_of_idle_period(&t->wred_set);
272
273	return NULL;
274}
275
276static unsigned int gred_drop(struct Qdisc *sch)
277{
278	struct sk_buff *skb;
279	struct gred_sched *t = qdisc_priv(sch);
280
281	skb = qdisc_dequeue_tail(sch);
282	if (skb) {
283		unsigned int len = qdisc_pkt_len(skb);
284		struct gred_sched_data *q;
285		u16 dp = tc_index_to_dp(skb);
286
287		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
288			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
289					     tc_index_to_dp(skb));
290		} else {
291			q->backlog -= len;
292			q->stats.other++;
293
294			if (!q->backlog && !gred_wred_mode(t))
295				red_start_of_idle_period(&q->vars);
 
 
 
 
 
296		}
297
298		qdisc_drop(skb, sch);
299		return len;
300	}
301
302	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
303		red_start_of_idle_period(&t->wred_set);
304
305	return 0;
306
307}
308
309static void gred_reset(struct Qdisc *sch)
310{
311	int i;
312	struct gred_sched *t = qdisc_priv(sch);
313
314	qdisc_reset_queue(sch);
315
316	for (i = 0; i < t->DPs; i++) {
317		struct gred_sched_data *q = t->tab[i];
318
319		if (!q)
320			continue;
321
322		red_restart(&q->vars);
323		q->backlog = 0;
324	}
325}
326
327static inline void gred_destroy_vq(struct gred_sched_data *q)
328{
329	kfree(q);
330}
331
332static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
333{
334	struct gred_sched *table = qdisc_priv(sch);
335	struct tc_gred_sopt *sopt;
336	int i;
337
338	if (dps == NULL)
339		return -EINVAL;
340
341	sopt = nla_data(dps);
342
343	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
344		return -EINVAL;
345
346	sch_tree_lock(sch);
347	table->DPs = sopt->DPs;
348	table->def = sopt->def_DP;
349	table->red_flags = sopt->flags;
350
351	/*
352	 * Every entry point to GRED is synchronized with the above code
353	 * and the DP is checked against DPs, i.e. shadowed VQs can no
354	 * longer be found so we can unlock right here.
355	 */
356	sch_tree_unlock(sch);
357
358	if (sopt->grio) {
359		gred_enable_rio_mode(table);
360		gred_disable_wred_mode(table);
361		if (gred_wred_mode_check(sch))
362			gred_enable_wred_mode(table);
363	} else {
364		gred_disable_rio_mode(table);
365		gred_disable_wred_mode(table);
366	}
367
368	for (i = table->DPs; i < MAX_DPs; i++) {
369		if (table->tab[i]) {
370			pr_warning("GRED: Warning: Destroying "
371				   "shadowed VQ 0x%x\n", i);
372			gred_destroy_vq(table->tab[i]);
373			table->tab[i] = NULL;
374		}
375	}
376
377	return 0;
378}
379
380static inline int gred_change_vq(struct Qdisc *sch, int dp,
381				 struct tc_gred_qopt *ctl, int prio,
382				 u8 *stab, u32 max_P,
383				 struct gred_sched_data **prealloc)
384{
385	struct gred_sched *table = qdisc_priv(sch);
386	struct gred_sched_data *q = table->tab[dp];
387
388	if (!q) {
389		table->tab[dp] = q = *prealloc;
390		*prealloc = NULL;
391		if (!q)
392			return -ENOMEM;
393	}
394
395	q->DP = dp;
396	q->prio = prio;
397	q->limit = ctl->limit;
398
399	if (q->backlog == 0)
400		red_end_of_idle_period(&q->vars);
401
402	red_set_parms(&q->parms,
403		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
404		      ctl->Scell_log, stab, max_P);
405	red_set_vars(&q->vars);
406	return 0;
407}
408
409static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
410	[TCA_GRED_PARMS]	= { .len = sizeof(struct tc_gred_qopt) },
411	[TCA_GRED_STAB]		= { .len = 256 },
412	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
413	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
414};
415
416static int gred_change(struct Qdisc *sch, struct nlattr *opt)
417{
418	struct gred_sched *table = qdisc_priv(sch);
419	struct tc_gred_qopt *ctl;
420	struct nlattr *tb[TCA_GRED_MAX + 1];
421	int err, prio = GRED_DEF_PRIO;
422	u8 *stab;
423	u32 max_P;
424	struct gred_sched_data *prealloc;
425
426	if (opt == NULL)
427		return -EINVAL;
428
429	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
430	if (err < 0)
431		return err;
432
433	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL)
434		return gred_change_table_def(sch, opt);
435
436	if (tb[TCA_GRED_PARMS] == NULL ||
437	    tb[TCA_GRED_STAB] == NULL)
438		return -EINVAL;
439
440	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
441
442	err = -EINVAL;
443	ctl = nla_data(tb[TCA_GRED_PARMS]);
444	stab = nla_data(tb[TCA_GRED_STAB]);
445
446	if (ctl->DP >= table->DPs)
447		goto errout;
448
449	if (gred_rio_mode(table)) {
450		if (ctl->prio == 0) {
451			int def_prio = GRED_DEF_PRIO;
452
453			if (table->tab[table->def])
454				def_prio = table->tab[table->def]->prio;
455
456			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
457			       "setting default to %d\n", ctl->DP, def_prio);
458
459			prio = def_prio;
460		} else
461			prio = ctl->prio;
462	}
463
464	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
465	sch_tree_lock(sch);
466
467	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
468	if (err < 0)
469		goto errout_locked;
470
471	if (gred_rio_mode(table)) {
472		gred_disable_wred_mode(table);
473		if (gred_wred_mode_check(sch))
474			gred_enable_wred_mode(table);
475	}
476
477	err = 0;
478
479errout_locked:
480	sch_tree_unlock(sch);
481	kfree(prealloc);
482errout:
483	return err;
484}
485
486static int gred_init(struct Qdisc *sch, struct nlattr *opt)
487{
488	struct nlattr *tb[TCA_GRED_MAX + 1];
489	int err;
490
491	if (opt == NULL)
492		return -EINVAL;
493
494	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
495	if (err < 0)
496		return err;
497
498	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
499		return -EINVAL;
500
501	return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
502}
503
504static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
505{
506	struct gred_sched *table = qdisc_priv(sch);
507	struct nlattr *parms, *opts = NULL;
508	int i;
509	u32 max_p[MAX_DPs];
510	struct tc_gred_sopt sopt = {
511		.DPs	= table->DPs,
512		.def_DP	= table->def,
513		.grio	= gred_rio_mode(table),
514		.flags	= table->red_flags,
515	};
516
517	opts = nla_nest_start(skb, TCA_OPTIONS);
518	if (opts == NULL)
519		goto nla_put_failure;
520	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
521		goto nla_put_failure;
522
523	for (i = 0; i < MAX_DPs; i++) {
524		struct gred_sched_data *q = table->tab[i];
525
526		max_p[i] = q ? q->parms.max_P : 0;
527	}
528	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
529		goto nla_put_failure;
530
531	parms = nla_nest_start(skb, TCA_GRED_PARMS);
532	if (parms == NULL)
533		goto nla_put_failure;
534
535	for (i = 0; i < MAX_DPs; i++) {
536		struct gred_sched_data *q = table->tab[i];
537		struct tc_gred_qopt opt;
 
538
539		memset(&opt, 0, sizeof(opt));
540
541		if (!q) {
542			/* hack -- fix at some point with proper message
543			   This is how we indicate to tc that there is no VQ
544			   at this DP */
545
546			opt.DP = MAX_DPs + i;
547			goto append_opt;
548		}
549
550		opt.limit	= q->limit;
551		opt.DP		= q->DP;
552		opt.backlog	= q->backlog;
553		opt.prio	= q->prio;
554		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
555		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
556		opt.Wlog	= q->parms.Wlog;
557		opt.Plog	= q->parms.Plog;
558		opt.Scell_log	= q->parms.Scell_log;
559		opt.other	= q->stats.other;
560		opt.early	= q->stats.prob_drop;
561		opt.forced	= q->stats.forced_drop;
562		opt.pdrop	= q->stats.pdrop;
563		opt.packets	= q->packetsin;
564		opt.bytesin	= q->bytesin;
565
566		if (gred_wred_mode(table))
567			gred_load_wred_set(table, q);
568
569		opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg);
 
 
570
571append_opt:
572		if (nla_append(skb, sizeof(opt), &opt) < 0)
573			goto nla_put_failure;
574	}
575
576	nla_nest_end(skb, parms);
577
578	return nla_nest_end(skb, opts);
579
580nla_put_failure:
581	nla_nest_cancel(skb, opts);
582	return -EMSGSIZE;
583}
584
585static void gred_destroy(struct Qdisc *sch)
586{
587	struct gred_sched *table = qdisc_priv(sch);
588	int i;
589
590	for (i = 0; i < table->DPs; i++) {
591		if (table->tab[i])
592			gred_destroy_vq(table->tab[i]);
593	}
594}
595
596static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
597	.id		=	"gred",
598	.priv_size	=	sizeof(struct gred_sched),
599	.enqueue	=	gred_enqueue,
600	.dequeue	=	gred_dequeue,
601	.peek		=	qdisc_peek_head,
602	.drop		=	gred_drop,
603	.init		=	gred_init,
604	.reset		=	gred_reset,
605	.destroy	=	gred_destroy,
606	.change		=	gred_change,
607	.dump		=	gred_dump,
608	.owner		=	THIS_MODULE,
609};
610
611static int __init gred_module_init(void)
612{
613	return register_qdisc(&gred_qdisc_ops);
614}
615
616static void __exit gred_module_exit(void)
617{
618	unregister_qdisc(&gred_qdisc_ops);
619}
620
621module_init(gred_module_init)
622module_exit(gred_module_exit)
623
624MODULE_LICENSE("GPL");
v3.15
  1/*
  2 * net/sched/sch_gred.c	Generic Random Early Detection queue.
  3 *
  4 *
  5 *              This program is free software; you can redistribute it and/or
  6 *              modify it under the terms of the GNU General Public License
  7 *              as published by the Free Software Foundation; either version
  8 *              2 of the License, or (at your option) any later version.
  9 *
 10 * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
 11 *
 12 *             991129: -  Bug fix with grio mode
 13 *		       - a better sing. AvgQ mode with Grio(WRED)
 14 *		       - A finer grained VQ dequeue based on sugestion
 15 *		         from Ren Liu
 16 *		       - More error checks
 17 *
 18 *  For all the glorious comments look at include/net/red.h
 19 */
 20
 21#include <linux/slab.h>
 22#include <linux/module.h>
 23#include <linux/types.h>
 24#include <linux/kernel.h>
 25#include <linux/skbuff.h>
 26#include <net/pkt_sched.h>
 27#include <net/red.h>
 28
 29#define GRED_DEF_PRIO (MAX_DPs / 2)
 30#define GRED_VQ_MASK (MAX_DPs - 1)
 31
 32struct gred_sched_data;
 33struct gred_sched;
 34
 35struct gred_sched_data {
 36	u32		limit;		/* HARD maximal queue length	*/
 37	u32		DP;		/* the drop parameters */
 38	u32		bytesin;	/* bytes seen on virtualQ so far*/
 39	u32		packetsin;	/* packets seen on virtualQ so far*/
 40	u32		backlog;	/* bytes on the virtualQ */
 41	u8		prio;		/* the prio of this vq */
 42
 43	struct red_parms parms;
 44	struct red_vars  vars;
 45	struct red_stats stats;
 46};
 47
 48enum {
 49	GRED_WRED_MODE = 1,
 50	GRED_RIO_MODE,
 51};
 52
 53struct gred_sched {
 54	struct gred_sched_data *tab[MAX_DPs];
 55	unsigned long	flags;
 56	u32		red_flags;
 57	u32 		DPs;
 58	u32 		def;
 59	struct red_vars wred_set;
 60};
 61
 62static inline int gred_wred_mode(struct gred_sched *table)
 63{
 64	return test_bit(GRED_WRED_MODE, &table->flags);
 65}
 66
 67static inline void gred_enable_wred_mode(struct gred_sched *table)
 68{
 69	__set_bit(GRED_WRED_MODE, &table->flags);
 70}
 71
 72static inline void gred_disable_wred_mode(struct gred_sched *table)
 73{
 74	__clear_bit(GRED_WRED_MODE, &table->flags);
 75}
 76
 77static inline int gred_rio_mode(struct gred_sched *table)
 78{
 79	return test_bit(GRED_RIO_MODE, &table->flags);
 80}
 81
 82static inline void gred_enable_rio_mode(struct gred_sched *table)
 83{
 84	__set_bit(GRED_RIO_MODE, &table->flags);
 85}
 86
 87static inline void gred_disable_rio_mode(struct gred_sched *table)
 88{
 89	__clear_bit(GRED_RIO_MODE, &table->flags);
 90}
 91
 92static inline int gred_wred_mode_check(struct Qdisc *sch)
 93{
 94	struct gred_sched *table = qdisc_priv(sch);
 95	int i;
 96
 97	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
 98	for (i = 0; i < table->DPs; i++) {
 99		struct gred_sched_data *q = table->tab[i];
100		int n;
101
102		if (q == NULL)
103			continue;
104
105		for (n = i + 1; n < table->DPs; n++)
106			if (table->tab[n] && table->tab[n]->prio == q->prio)
 
107				return 1;
108	}
109
110	return 0;
111}
112
113static inline unsigned int gred_backlog(struct gred_sched *table,
114					struct gred_sched_data *q,
115					struct Qdisc *sch)
116{
117	if (gred_wred_mode(table))
118		return sch->qstats.backlog;
119	else
120		return q->backlog;
121}
122
123static inline u16 tc_index_to_dp(struct sk_buff *skb)
124{
125	return skb->tc_index & GRED_VQ_MASK;
126}
127
128static inline void gred_load_wred_set(const struct gred_sched *table,
129				      struct gred_sched_data *q)
130{
131	q->vars.qavg = table->wred_set.qavg;
132	q->vars.qidlestart = table->wred_set.qidlestart;
133}
134
135static inline void gred_store_wred_set(struct gred_sched *table,
136				       struct gred_sched_data *q)
137{
138	table->wred_set.qavg = q->vars.qavg;
139	table->wred_set.qidlestart = q->vars.qidlestart;
140}
141
142static inline int gred_use_ecn(struct gred_sched *t)
143{
144	return t->red_flags & TC_RED_ECN;
145}
146
147static inline int gred_use_harddrop(struct gred_sched *t)
148{
149	return t->red_flags & TC_RED_HARDDROP;
150}
151
152static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
153{
154	struct gred_sched_data *q = NULL;
155	struct gred_sched *t = qdisc_priv(sch);
156	unsigned long qavg = 0;
157	u16 dp = tc_index_to_dp(skb);
158
159	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
160		dp = t->def;
161
162		q = t->tab[dp];
163		if (!q) {
164			/* Pass through packets not assigned to a DP
165			 * if no default DP has been configured. This
166			 * allows for DP flows to be left untouched.
167			 */
168			if (skb_queue_len(&sch->q) < qdisc_dev(sch)->tx_queue_len)
169				return qdisc_enqueue_tail(skb, sch);
170			else
171				goto drop;
172		}
173
174		/* fix tc_index? --could be controversial but needed for
175		   requeueing */
176		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
177	}
178
179	/* sum up all the qaves of prios < ours to get the new qave */
180	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
181		int i;
182
183		for (i = 0; i < t->DPs; i++) {
184			if (t->tab[i] && t->tab[i]->prio < q->prio &&
185			    !red_is_idling(&t->tab[i]->vars))
186				qavg += t->tab[i]->vars.qavg;
187		}
188
189	}
190
191	q->packetsin++;
192	q->bytesin += qdisc_pkt_len(skb);
193
194	if (gred_wred_mode(t))
195		gred_load_wred_set(t, q);
196
197	q->vars.qavg = red_calc_qavg(&q->parms,
198				     &q->vars,
199				     gred_backlog(t, q, sch));
200
201	if (red_is_idling(&q->vars))
202		red_end_of_idle_period(&q->vars);
203
204	if (gred_wred_mode(t))
205		gred_store_wred_set(t, q);
206
207	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
208	case RED_DONT_MARK:
209		break;
210
211	case RED_PROB_MARK:
212		sch->qstats.overlimits++;
213		if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
214			q->stats.prob_drop++;
215			goto congestion_drop;
216		}
217
218		q->stats.prob_mark++;
219		break;
220
221	case RED_HARD_MARK:
222		sch->qstats.overlimits++;
223		if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
224		    !INET_ECN_set_ce(skb)) {
225			q->stats.forced_drop++;
226			goto congestion_drop;
227		}
228		q->stats.forced_mark++;
229		break;
230	}
231
232	if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
233		q->backlog += qdisc_pkt_len(skb);
234		return qdisc_enqueue_tail(skb, sch);
235	}
236
237	q->stats.pdrop++;
238drop:
239	return qdisc_drop(skb, sch);
240
241congestion_drop:
242	qdisc_drop(skb, sch);
243	return NET_XMIT_CN;
244}
245
246static struct sk_buff *gred_dequeue(struct Qdisc *sch)
247{
248	struct sk_buff *skb;
249	struct gred_sched *t = qdisc_priv(sch);
250
251	skb = qdisc_dequeue_head(sch);
252
253	if (skb) {
254		struct gred_sched_data *q;
255		u16 dp = tc_index_to_dp(skb);
256
257		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
258			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
259					     tc_index_to_dp(skb));
260		} else {
261			q->backlog -= qdisc_pkt_len(skb);
262
263			if (gred_wred_mode(t)) {
264				if (!sch->qstats.backlog)
265					red_start_of_idle_period(&t->wred_set);
266			} else {
267				if (!q->backlog)
268					red_start_of_idle_period(&q->vars);
269			}
270		}
271
272		return skb;
273	}
274
 
 
 
275	return NULL;
276}
277
278static unsigned int gred_drop(struct Qdisc *sch)
279{
280	struct sk_buff *skb;
281	struct gred_sched *t = qdisc_priv(sch);
282
283	skb = qdisc_dequeue_tail(sch);
284	if (skb) {
285		unsigned int len = qdisc_pkt_len(skb);
286		struct gred_sched_data *q;
287		u16 dp = tc_index_to_dp(skb);
288
289		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
290			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
291					     tc_index_to_dp(skb));
292		} else {
293			q->backlog -= len;
294			q->stats.other++;
295
296			if (gred_wred_mode(t)) {
297				if (!sch->qstats.backlog)
298					red_start_of_idle_period(&t->wred_set);
299			} else {
300				if (!q->backlog)
301					red_start_of_idle_period(&q->vars);
302			}
303		}
304
305		qdisc_drop(skb, sch);
306		return len;
307	}
308
 
 
 
309	return 0;
 
310}
311
312static void gred_reset(struct Qdisc *sch)
313{
314	int i;
315	struct gred_sched *t = qdisc_priv(sch);
316
317	qdisc_reset_queue(sch);
318
319	for (i = 0; i < t->DPs; i++) {
320		struct gred_sched_data *q = t->tab[i];
321
322		if (!q)
323			continue;
324
325		red_restart(&q->vars);
326		q->backlog = 0;
327	}
328}
329
330static inline void gred_destroy_vq(struct gred_sched_data *q)
331{
332	kfree(q);
333}
334
335static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
336{
337	struct gred_sched *table = qdisc_priv(sch);
338	struct tc_gred_sopt *sopt;
339	int i;
340
341	if (dps == NULL)
342		return -EINVAL;
343
344	sopt = nla_data(dps);
345
346	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
347		return -EINVAL;
348
349	sch_tree_lock(sch);
350	table->DPs = sopt->DPs;
351	table->def = sopt->def_DP;
352	table->red_flags = sopt->flags;
353
354	/*
355	 * Every entry point to GRED is synchronized with the above code
356	 * and the DP is checked against DPs, i.e. shadowed VQs can no
357	 * longer be found so we can unlock right here.
358	 */
359	sch_tree_unlock(sch);
360
361	if (sopt->grio) {
362		gred_enable_rio_mode(table);
363		gred_disable_wred_mode(table);
364		if (gred_wred_mode_check(sch))
365			gred_enable_wred_mode(table);
366	} else {
367		gred_disable_rio_mode(table);
368		gred_disable_wred_mode(table);
369	}
370
371	for (i = table->DPs; i < MAX_DPs; i++) {
372		if (table->tab[i]) {
373			pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
374				i);
375			gred_destroy_vq(table->tab[i]);
376			table->tab[i] = NULL;
377		}
378	}
379
380	return 0;
381}
382
383static inline int gred_change_vq(struct Qdisc *sch, int dp,
384				 struct tc_gred_qopt *ctl, int prio,
385				 u8 *stab, u32 max_P,
386				 struct gred_sched_data **prealloc)
387{
388	struct gred_sched *table = qdisc_priv(sch);
389	struct gred_sched_data *q = table->tab[dp];
390
391	if (!q) {
392		table->tab[dp] = q = *prealloc;
393		*prealloc = NULL;
394		if (!q)
395			return -ENOMEM;
396	}
397
398	q->DP = dp;
399	q->prio = prio;
400	q->limit = ctl->limit;
401
402	if (q->backlog == 0)
403		red_end_of_idle_period(&q->vars);
404
405	red_set_parms(&q->parms,
406		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
407		      ctl->Scell_log, stab, max_P);
408	red_set_vars(&q->vars);
409	return 0;
410}
411
412static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
413	[TCA_GRED_PARMS]	= { .len = sizeof(struct tc_gred_qopt) },
414	[TCA_GRED_STAB]		= { .len = 256 },
415	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
416	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
417};
418
419static int gred_change(struct Qdisc *sch, struct nlattr *opt)
420{
421	struct gred_sched *table = qdisc_priv(sch);
422	struct tc_gred_qopt *ctl;
423	struct nlattr *tb[TCA_GRED_MAX + 1];
424	int err, prio = GRED_DEF_PRIO;
425	u8 *stab;
426	u32 max_P;
427	struct gred_sched_data *prealloc;
428
429	if (opt == NULL)
430		return -EINVAL;
431
432	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
433	if (err < 0)
434		return err;
435
436	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL)
437		return gred_change_table_def(sch, opt);
438
439	if (tb[TCA_GRED_PARMS] == NULL ||
440	    tb[TCA_GRED_STAB] == NULL)
441		return -EINVAL;
442
443	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
444
445	err = -EINVAL;
446	ctl = nla_data(tb[TCA_GRED_PARMS]);
447	stab = nla_data(tb[TCA_GRED_STAB]);
448
449	if (ctl->DP >= table->DPs)
450		goto errout;
451
452	if (gred_rio_mode(table)) {
453		if (ctl->prio == 0) {
454			int def_prio = GRED_DEF_PRIO;
455
456			if (table->tab[table->def])
457				def_prio = table->tab[table->def]->prio;
458
459			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
460			       "setting default to %d\n", ctl->DP, def_prio);
461
462			prio = def_prio;
463		} else
464			prio = ctl->prio;
465	}
466
467	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
468	sch_tree_lock(sch);
469
470	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
471	if (err < 0)
472		goto errout_locked;
473
474	if (gred_rio_mode(table)) {
475		gred_disable_wred_mode(table);
476		if (gred_wred_mode_check(sch))
477			gred_enable_wred_mode(table);
478	}
479
480	err = 0;
481
482errout_locked:
483	sch_tree_unlock(sch);
484	kfree(prealloc);
485errout:
486	return err;
487}
488
489static int gred_init(struct Qdisc *sch, struct nlattr *opt)
490{
491	struct nlattr *tb[TCA_GRED_MAX + 1];
492	int err;
493
494	if (opt == NULL)
495		return -EINVAL;
496
497	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
498	if (err < 0)
499		return err;
500
501	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
502		return -EINVAL;
503
504	return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
505}
506
507static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
508{
509	struct gred_sched *table = qdisc_priv(sch);
510	struct nlattr *parms, *opts = NULL;
511	int i;
512	u32 max_p[MAX_DPs];
513	struct tc_gred_sopt sopt = {
514		.DPs	= table->DPs,
515		.def_DP	= table->def,
516		.grio	= gred_rio_mode(table),
517		.flags	= table->red_flags,
518	};
519
520	opts = nla_nest_start(skb, TCA_OPTIONS);
521	if (opts == NULL)
522		goto nla_put_failure;
523	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
524		goto nla_put_failure;
525
526	for (i = 0; i < MAX_DPs; i++) {
527		struct gred_sched_data *q = table->tab[i];
528
529		max_p[i] = q ? q->parms.max_P : 0;
530	}
531	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
532		goto nla_put_failure;
533
534	parms = nla_nest_start(skb, TCA_GRED_PARMS);
535	if (parms == NULL)
536		goto nla_put_failure;
537
538	for (i = 0; i < MAX_DPs; i++) {
539		struct gred_sched_data *q = table->tab[i];
540		struct tc_gred_qopt opt;
541		unsigned long qavg;
542
543		memset(&opt, 0, sizeof(opt));
544
545		if (!q) {
546			/* hack -- fix at some point with proper message
547			   This is how we indicate to tc that there is no VQ
548			   at this DP */
549
550			opt.DP = MAX_DPs + i;
551			goto append_opt;
552		}
553
554		opt.limit	= q->limit;
555		opt.DP		= q->DP;
556		opt.backlog	= q->backlog;
557		opt.prio	= q->prio;
558		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
559		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
560		opt.Wlog	= q->parms.Wlog;
561		opt.Plog	= q->parms.Plog;
562		opt.Scell_log	= q->parms.Scell_log;
563		opt.other	= q->stats.other;
564		opt.early	= q->stats.prob_drop;
565		opt.forced	= q->stats.forced_drop;
566		opt.pdrop	= q->stats.pdrop;
567		opt.packets	= q->packetsin;
568		opt.bytesin	= q->bytesin;
569
570		if (gred_wred_mode(table))
571			gred_load_wred_set(table, q);
572
573		qavg = red_calc_qavg(&q->parms, &q->vars,
574				     q->vars.qavg >> q->parms.Wlog);
575		opt.qave = qavg >> q->parms.Wlog;
576
577append_opt:
578		if (nla_append(skb, sizeof(opt), &opt) < 0)
579			goto nla_put_failure;
580	}
581
582	nla_nest_end(skb, parms);
583
584	return nla_nest_end(skb, opts);
585
586nla_put_failure:
587	nla_nest_cancel(skb, opts);
588	return -EMSGSIZE;
589}
590
591static void gred_destroy(struct Qdisc *sch)
592{
593	struct gred_sched *table = qdisc_priv(sch);
594	int i;
595
596	for (i = 0; i < table->DPs; i++) {
597		if (table->tab[i])
598			gred_destroy_vq(table->tab[i]);
599	}
600}
601
602static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
603	.id		=	"gred",
604	.priv_size	=	sizeof(struct gred_sched),
605	.enqueue	=	gred_enqueue,
606	.dequeue	=	gred_dequeue,
607	.peek		=	qdisc_peek_head,
608	.drop		=	gred_drop,
609	.init		=	gred_init,
610	.reset		=	gred_reset,
611	.destroy	=	gred_destroy,
612	.change		=	gred_change,
613	.dump		=	gred_dump,
614	.owner		=	THIS_MODULE,
615};
616
617static int __init gred_module_init(void)
618{
619	return register_qdisc(&gred_qdisc_ops);
620}
621
622static void __exit gred_module_exit(void)
623{
624	unregister_qdisc(&gred_qdisc_ops);
625}
626
627module_init(gred_module_init)
628module_exit(gred_module_exit)
629
630MODULE_LICENSE("GPL");