Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/sch_gred.c	Generic Random Early Detection queue.
  4 *
 
 
 
 
 
 
  5 * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
  6 *
  7 *             991129: -  Bug fix with grio mode
  8 *		       - a better sing. AvgQ mode with Grio(WRED)
  9 *		       - A finer grained VQ dequeue based on sugestion
 10 *		         from Ren Liu
 11 *		       - More error checks
 12 *
 13 *  For all the glorious comments look at include/net/red.h
 14 */
 15
 16#include <linux/slab.h>
 17#include <linux/module.h>
 18#include <linux/types.h>
 19#include <linux/kernel.h>
 20#include <linux/skbuff.h>
 21#include <net/pkt_cls.h>
 22#include <net/pkt_sched.h>
 23#include <net/red.h>
 24
 25#define GRED_DEF_PRIO (MAX_DPs / 2)
 26#define GRED_VQ_MASK (MAX_DPs - 1)
 27
 28#define GRED_VQ_RED_FLAGS	(TC_RED_ECN | TC_RED_HARDDROP)
 29
 30struct gred_sched_data;
 31struct gred_sched;
 32
 33struct gred_sched_data {
 34	u32		limit;		/* HARD maximal queue length	*/
 35	u32		DP;		/* the drop parameters */
 36	u32		red_flags;	/* virtualQ version of red_flags */
 37	u64		bytesin;	/* bytes seen on virtualQ so far*/
 38	u32		packetsin;	/* packets seen on virtualQ so far*/
 39	u32		backlog;	/* bytes on the virtualQ */
 40	u8		prio;		/* the prio of this vq */
 41
 42	struct red_parms parms;
 43	struct red_vars  vars;
 44	struct red_stats stats;
 45};
 46
 47enum {
 48	GRED_WRED_MODE = 1,
 49	GRED_RIO_MODE,
 50};
 51
 52struct gred_sched {
 53	struct gred_sched_data *tab[MAX_DPs];
 54	unsigned long	flags;
 55	u32		red_flags;
 56	u32 		DPs;
 57	u32 		def;
 58	struct red_vars wred_set;
 59};
 60
 61static inline int gred_wred_mode(struct gred_sched *table)
 62{
 63	return test_bit(GRED_WRED_MODE, &table->flags);
 64}
 65
 66static inline void gred_enable_wred_mode(struct gred_sched *table)
 67{
 68	__set_bit(GRED_WRED_MODE, &table->flags);
 69}
 70
 71static inline void gred_disable_wred_mode(struct gred_sched *table)
 72{
 73	__clear_bit(GRED_WRED_MODE, &table->flags);
 74}
 75
 76static inline int gred_rio_mode(struct gred_sched *table)
 77{
 78	return test_bit(GRED_RIO_MODE, &table->flags);
 79}
 80
 81static inline void gred_enable_rio_mode(struct gred_sched *table)
 82{
 83	__set_bit(GRED_RIO_MODE, &table->flags);
 84}
 85
 86static inline void gred_disable_rio_mode(struct gred_sched *table)
 87{
 88	__clear_bit(GRED_RIO_MODE, &table->flags);
 89}
 90
 91static inline int gred_wred_mode_check(struct Qdisc *sch)
 92{
 93	struct gred_sched *table = qdisc_priv(sch);
 94	int i;
 95
 96	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
 97	for (i = 0; i < table->DPs; i++) {
 98		struct gred_sched_data *q = table->tab[i];
 99		int n;
100
101		if (q == NULL)
102			continue;
103
104		for (n = i + 1; n < table->DPs; n++)
105			if (table->tab[n] && table->tab[n]->prio == q->prio)
106				return 1;
107	}
108
109	return 0;
110}
111
112static inline unsigned int gred_backlog(struct gred_sched *table,
113					struct gred_sched_data *q,
114					struct Qdisc *sch)
115{
116	if (gred_wred_mode(table))
117		return sch->qstats.backlog;
118	else
119		return q->backlog;
120}
121
122static inline u16 tc_index_to_dp(struct sk_buff *skb)
123{
124	return skb->tc_index & GRED_VQ_MASK;
125}
126
127static inline void gred_load_wred_set(const struct gred_sched *table,
128				      struct gred_sched_data *q)
129{
130	q->vars.qavg = table->wred_set.qavg;
131	q->vars.qidlestart = table->wred_set.qidlestart;
132}
133
134static inline void gred_store_wred_set(struct gred_sched *table,
135				       struct gred_sched_data *q)
136{
137	table->wred_set.qavg = q->vars.qavg;
138	table->wred_set.qidlestart = q->vars.qidlestart;
139}
140
141static int gred_use_ecn(struct gred_sched_data *q)
142{
143	return q->red_flags & TC_RED_ECN;
144}
145
146static int gred_use_harddrop(struct gred_sched_data *q)
147{
148	return q->red_flags & TC_RED_HARDDROP;
149}
150
151static bool gred_per_vq_red_flags_used(struct gred_sched *table)
152{
153	unsigned int i;
154
155	/* Local per-vq flags couldn't have been set unless global are 0 */
156	if (table->red_flags)
157		return false;
158	for (i = 0; i < MAX_DPs; i++)
159		if (table->tab[i] && table->tab[i]->red_flags)
160			return true;
161	return false;
162}
163
164static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
165			struct sk_buff **to_free)
166{
167	struct gred_sched_data *q = NULL;
168	struct gred_sched *t = qdisc_priv(sch);
169	unsigned long qavg = 0;
170	u16 dp = tc_index_to_dp(skb);
171
172	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
173		dp = t->def;
174
175		q = t->tab[dp];
176		if (!q) {
177			/* Pass through packets not assigned to a DP
178			 * if no default DP has been configured. This
179			 * allows for DP flows to be left untouched.
180			 */
181			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
182					sch->limit))
183				return qdisc_enqueue_tail(skb, sch);
184			else
185				goto drop;
186		}
187
188		/* fix tc_index? --could be controversial but needed for
189		   requeueing */
190		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
191	}
192
193	/* sum up all the qaves of prios < ours to get the new qave */
194	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
195		int i;
196
197		for (i = 0; i < t->DPs; i++) {
198			if (t->tab[i] && t->tab[i]->prio < q->prio &&
199			    !red_is_idling(&t->tab[i]->vars))
200				qavg += t->tab[i]->vars.qavg;
201		}
202
203	}
204
205	q->packetsin++;
206	q->bytesin += qdisc_pkt_len(skb);
207
208	if (gred_wred_mode(t))
209		gred_load_wred_set(t, q);
210
211	q->vars.qavg = red_calc_qavg(&q->parms,
212				     &q->vars,
213				     gred_backlog(t, q, sch));
214
215	if (red_is_idling(&q->vars))
216		red_end_of_idle_period(&q->vars);
217
218	if (gred_wred_mode(t))
219		gred_store_wred_set(t, q);
220
221	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
222	case RED_DONT_MARK:
223		break;
224
225	case RED_PROB_MARK:
226		qdisc_qstats_overlimit(sch);
227		if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
228			q->stats.prob_drop++;
229			goto congestion_drop;
230		}
231
232		q->stats.prob_mark++;
233		break;
234
235	case RED_HARD_MARK:
236		qdisc_qstats_overlimit(sch);
237		if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
238		    !INET_ECN_set_ce(skb)) {
239			q->stats.forced_drop++;
240			goto congestion_drop;
241		}
242		q->stats.forced_mark++;
243		break;
244	}
245
246	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
247		q->backlog += qdisc_pkt_len(skb);
248		return qdisc_enqueue_tail(skb, sch);
249	}
250
251	q->stats.pdrop++;
252drop:
253	return qdisc_drop(skb, sch, to_free);
254
255congestion_drop:
256	qdisc_drop(skb, sch, to_free);
257	return NET_XMIT_CN;
258}
259
260static struct sk_buff *gred_dequeue(struct Qdisc *sch)
261{
262	struct sk_buff *skb;
263	struct gred_sched *t = qdisc_priv(sch);
264
265	skb = qdisc_dequeue_head(sch);
266
267	if (skb) {
268		struct gred_sched_data *q;
269		u16 dp = tc_index_to_dp(skb);
270
271		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
272			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
273					     tc_index_to_dp(skb));
274		} else {
275			q->backlog -= qdisc_pkt_len(skb);
276
277			if (gred_wred_mode(t)) {
278				if (!sch->qstats.backlog)
279					red_start_of_idle_period(&t->wred_set);
280			} else {
281				if (!q->backlog)
282					red_start_of_idle_period(&q->vars);
283			}
284		}
285
286		return skb;
287	}
288
289	return NULL;
290}
291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292static void gred_reset(struct Qdisc *sch)
293{
294	int i;
295	struct gred_sched *t = qdisc_priv(sch);
296
297	qdisc_reset_queue(sch);
298
299	for (i = 0; i < t->DPs; i++) {
300		struct gred_sched_data *q = t->tab[i];
301
302		if (!q)
303			continue;
304
305		red_restart(&q->vars);
306		q->backlog = 0;
307	}
308}
309
310static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
311{
312	struct gred_sched *table = qdisc_priv(sch);
313	struct net_device *dev = qdisc_dev(sch);
314	struct tc_gred_qopt_offload opt = {
315		.command	= command,
316		.handle		= sch->handle,
317		.parent		= sch->parent,
318	};
319
320	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
321		return;
322
323	if (command == TC_GRED_REPLACE) {
324		unsigned int i;
325
326		opt.set.grio_on = gred_rio_mode(table);
327		opt.set.wred_on = gred_wred_mode(table);
328		opt.set.dp_cnt = table->DPs;
329		opt.set.dp_def = table->def;
330
331		for (i = 0; i < table->DPs; i++) {
332			struct gred_sched_data *q = table->tab[i];
333
334			if (!q)
335				continue;
336			opt.set.tab[i].present = true;
337			opt.set.tab[i].limit = q->limit;
338			opt.set.tab[i].prio = q->prio;
339			opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
340			opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
341			opt.set.tab[i].is_ecn = gred_use_ecn(q);
342			opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
343			opt.set.tab[i].probability = q->parms.max_P;
344			opt.set.tab[i].backlog = &q->backlog;
345		}
346		opt.set.qstats = &sch->qstats;
347	}
348
349	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
350}
351
352static int gred_offload_dump_stats(struct Qdisc *sch)
353{
354	struct gred_sched *table = qdisc_priv(sch);
355	struct tc_gred_qopt_offload *hw_stats;
356	unsigned int i;
357	int ret;
358
359	hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
360	if (!hw_stats)
361		return -ENOMEM;
362
363	hw_stats->command = TC_GRED_STATS;
364	hw_stats->handle = sch->handle;
365	hw_stats->parent = sch->parent;
366
367	for (i = 0; i < MAX_DPs; i++)
368		if (table->tab[i])
369			hw_stats->stats.xstats[i] = &table->tab[i]->stats;
370
371	ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
372	/* Even if driver returns failure adjust the stats - in case offload
373	 * ended but driver still wants to adjust the values.
374	 */
375	for (i = 0; i < MAX_DPs; i++) {
376		if (!table->tab[i])
377			continue;
378		table->tab[i]->packetsin += hw_stats->stats.bstats[i].packets;
379		table->tab[i]->bytesin += hw_stats->stats.bstats[i].bytes;
380		table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
381
382		_bstats_update(&sch->bstats,
383			       hw_stats->stats.bstats[i].bytes,
384			       hw_stats->stats.bstats[i].packets);
385		sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
386		sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
387		sch->qstats.drops += hw_stats->stats.qstats[i].drops;
388		sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
389		sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
390	}
391
392	kfree(hw_stats);
393	return ret;
394}
395
396static inline void gred_destroy_vq(struct gred_sched_data *q)
397{
398	kfree(q);
399}
400
401static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
402				 struct netlink_ext_ack *extack)
403{
404	struct gred_sched *table = qdisc_priv(sch);
405	struct tc_gred_sopt *sopt;
406	bool red_flags_changed;
407	int i;
408
409	if (!dps)
410		return -EINVAL;
411
412	sopt = nla_data(dps);
413
414	if (sopt->DPs > MAX_DPs) {
415		NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
416		return -EINVAL;
417	}
418	if (sopt->DPs == 0) {
419		NL_SET_ERR_MSG_MOD(extack,
420				   "number of virtual queues can't be 0");
421		return -EINVAL;
422	}
423	if (sopt->def_DP >= sopt->DPs) {
424		NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
425		return -EINVAL;
426	}
427	if (sopt->flags && gred_per_vq_red_flags_used(table)) {
428		NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
429		return -EINVAL;
430	}
431
432	sch_tree_lock(sch);
433	table->DPs = sopt->DPs;
434	table->def = sopt->def_DP;
435	red_flags_changed = table->red_flags != sopt->flags;
436	table->red_flags = sopt->flags;
437
438	/*
439	 * Every entry point to GRED is synchronized with the above code
440	 * and the DP is checked against DPs, i.e. shadowed VQs can no
441	 * longer be found so we can unlock right here.
442	 */
443	sch_tree_unlock(sch);
444
445	if (sopt->grio) {
446		gred_enable_rio_mode(table);
447		gred_disable_wred_mode(table);
448		if (gred_wred_mode_check(sch))
449			gred_enable_wred_mode(table);
450	} else {
451		gred_disable_rio_mode(table);
452		gred_disable_wred_mode(table);
453	}
454
455	if (red_flags_changed)
456		for (i = 0; i < table->DPs; i++)
457			if (table->tab[i])
458				table->tab[i]->red_flags =
459					table->red_flags & GRED_VQ_RED_FLAGS;
460
461	for (i = table->DPs; i < MAX_DPs; i++) {
462		if (table->tab[i]) {
463			pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
464				i);
465			gred_destroy_vq(table->tab[i]);
466			table->tab[i] = NULL;
467		}
468	}
469
470	gred_offload(sch, TC_GRED_REPLACE);
471	return 0;
472}
473
474static inline int gred_change_vq(struct Qdisc *sch, int dp,
475				 struct tc_gred_qopt *ctl, int prio,
476				 u8 *stab, u32 max_P,
477				 struct gred_sched_data **prealloc,
478				 struct netlink_ext_ack *extack)
479{
480	struct gred_sched *table = qdisc_priv(sch);
481	struct gred_sched_data *q = table->tab[dp];
482
483	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog)) {
484		NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
485		return -EINVAL;
486	}
487
488	if (!q) {
489		table->tab[dp] = q = *prealloc;
490		*prealloc = NULL;
491		if (!q)
492			return -ENOMEM;
493		q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
494	}
495
496	q->DP = dp;
497	q->prio = prio;
498	if (ctl->limit > sch->limit)
499		q->limit = sch->limit;
500	else
501		q->limit = ctl->limit;
502
503	if (q->backlog == 0)
504		red_end_of_idle_period(&q->vars);
505
506	red_set_parms(&q->parms,
507		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
508		      ctl->Scell_log, stab, max_P);
509	red_set_vars(&q->vars);
510	return 0;
511}
512
513static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
514	[TCA_GRED_VQ_DP]	= { .type = NLA_U32 },
515	[TCA_GRED_VQ_FLAGS]	= { .type = NLA_U32 },
516};
517
518static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
519	[TCA_GRED_VQ_ENTRY]	= { .type = NLA_NESTED },
520};
521
522static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
523	[TCA_GRED_PARMS]	= { .len = sizeof(struct tc_gred_qopt) },
524	[TCA_GRED_STAB]		= { .len = 256 },
525	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
526	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
527	[TCA_GRED_LIMIT]	= { .type = NLA_U32 },
528	[TCA_GRED_VQ_LIST]	= { .type = NLA_NESTED },
529};
530
531static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
532{
533	struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
534	u32 dp;
535
536	nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
537				    gred_vq_policy, NULL);
538
539	dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
540
541	if (tb[TCA_GRED_VQ_FLAGS])
542		table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
543}
544
545static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
546{
547	const struct nlattr *attr;
548	int rem;
549
550	nla_for_each_nested(attr, vqs, rem) {
551		switch (nla_type(attr)) {
552		case TCA_GRED_VQ_ENTRY:
553			gred_vq_apply(table, attr);
554			break;
555		}
556	}
557}
558
559static int gred_vq_validate(struct gred_sched *table, u32 cdp,
560			    const struct nlattr *entry,
561			    struct netlink_ext_ack *extack)
562{
563	struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
564	int err;
565	u32 dp;
566
567	err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
568					  gred_vq_policy, extack);
569	if (err < 0)
570		return err;
571
572	if (!tb[TCA_GRED_VQ_DP]) {
573		NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
574		return -EINVAL;
575	}
576	dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
577	if (dp >= table->DPs) {
578		NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
579		return -EINVAL;
580	}
581	if (dp != cdp && !table->tab[dp]) {
582		NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
583		return -EINVAL;
584	}
585
586	if (tb[TCA_GRED_VQ_FLAGS]) {
587		u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
588
589		if (table->red_flags && table->red_flags != red_flags) {
590			NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
591			return -EINVAL;
592		}
593		if (red_flags & ~GRED_VQ_RED_FLAGS) {
594			NL_SET_ERR_MSG_MOD(extack,
595					   "invalid RED flags specified");
596			return -EINVAL;
597		}
598	}
599
600	return 0;
601}
602
603static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
604			     struct nlattr *vqs, struct netlink_ext_ack *extack)
605{
606	const struct nlattr *attr;
607	int rem, err;
608
609	err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
610					     gred_vqe_policy, extack);
611	if (err < 0)
612		return err;
613
614	nla_for_each_nested(attr, vqs, rem) {
615		switch (nla_type(attr)) {
616		case TCA_GRED_VQ_ENTRY:
617			err = gred_vq_validate(table, cdp, attr, extack);
618			if (err)
619				return err;
620			break;
621		default:
622			NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
623			return -EINVAL;
624		}
625	}
626
627	if (rem > 0) {
628		NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
629		return -EINVAL;
630	}
631
632	return 0;
633}
634
635static int gred_change(struct Qdisc *sch, struct nlattr *opt,
636		       struct netlink_ext_ack *extack)
637{
638	struct gred_sched *table = qdisc_priv(sch);
639	struct tc_gred_qopt *ctl;
640	struct nlattr *tb[TCA_GRED_MAX + 1];
641	int err, prio = GRED_DEF_PRIO;
642	u8 *stab;
643	u32 max_P;
644	struct gred_sched_data *prealloc;
645
646	if (opt == NULL)
647		return -EINVAL;
648
649	err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
650					  extack);
651	if (err < 0)
652		return err;
653
654	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
655		if (tb[TCA_GRED_LIMIT] != NULL)
656			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
657		return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
658	}
659
660	if (tb[TCA_GRED_PARMS] == NULL ||
661	    tb[TCA_GRED_STAB] == NULL ||
662	    tb[TCA_GRED_LIMIT] != NULL) {
663		NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
664		return -EINVAL;
665	}
666
667	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
668
 
669	ctl = nla_data(tb[TCA_GRED_PARMS]);
670	stab = nla_data(tb[TCA_GRED_STAB]);
671
672	if (ctl->DP >= table->DPs) {
673		NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
674		return -EINVAL;
675	}
676
677	if (tb[TCA_GRED_VQ_LIST]) {
678		err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
679					extack);
680		if (err)
681			return err;
682	}
683
684	if (gred_rio_mode(table)) {
685		if (ctl->prio == 0) {
686			int def_prio = GRED_DEF_PRIO;
687
688			if (table->tab[table->def])
689				def_prio = table->tab[table->def]->prio;
690
691			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
692			       "setting default to %d\n", ctl->DP, def_prio);
693
694			prio = def_prio;
695		} else
696			prio = ctl->prio;
697	}
698
699	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
700	sch_tree_lock(sch);
701
702	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
703			     extack);
704	if (err < 0)
705		goto err_unlock_free;
706
707	if (tb[TCA_GRED_VQ_LIST])
708		gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
709
710	if (gred_rio_mode(table)) {
711		gred_disable_wred_mode(table);
712		if (gred_wred_mode_check(sch))
713			gred_enable_wred_mode(table);
714	}
715
716	sch_tree_unlock(sch);
717	kfree(prealloc);
718
719	gred_offload(sch, TC_GRED_REPLACE);
720	return 0;
721
722err_unlock_free:
723	sch_tree_unlock(sch);
724	kfree(prealloc);
 
725	return err;
726}
727
728static int gred_init(struct Qdisc *sch, struct nlattr *opt,
729		     struct netlink_ext_ack *extack)
730{
731	struct nlattr *tb[TCA_GRED_MAX + 1];
732	int err;
733
734	if (!opt)
735		return -EINVAL;
736
737	err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
738					  extack);
739	if (err < 0)
740		return err;
741
742	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
743		NL_SET_ERR_MSG_MOD(extack,
744				   "virtual queue configuration can't be specified at initialization time");
745		return -EINVAL;
746	}
747
748	if (tb[TCA_GRED_LIMIT])
749		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
750	else
751		sch->limit = qdisc_dev(sch)->tx_queue_len
752		             * psched_mtu(qdisc_dev(sch));
753
754	return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
755}
756
757static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
758{
759	struct gred_sched *table = qdisc_priv(sch);
760	struct nlattr *parms, *vqs, *opts = NULL;
761	int i;
762	u32 max_p[MAX_DPs];
763	struct tc_gred_sopt sopt = {
764		.DPs	= table->DPs,
765		.def_DP	= table->def,
766		.grio	= gred_rio_mode(table),
767		.flags	= table->red_flags,
768	};
769
770	if (gred_offload_dump_stats(sch))
771		goto nla_put_failure;
772
773	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
774	if (opts == NULL)
775		goto nla_put_failure;
776	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
777		goto nla_put_failure;
778
779	for (i = 0; i < MAX_DPs; i++) {
780		struct gred_sched_data *q = table->tab[i];
781
782		max_p[i] = q ? q->parms.max_P : 0;
783	}
784	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
785		goto nla_put_failure;
786
787	if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
788		goto nla_put_failure;
789
790	/* Old style all-in-one dump of VQs */
791	parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
792	if (parms == NULL)
793		goto nla_put_failure;
794
795	for (i = 0; i < MAX_DPs; i++) {
796		struct gred_sched_data *q = table->tab[i];
797		struct tc_gred_qopt opt;
798		unsigned long qavg;
799
800		memset(&opt, 0, sizeof(opt));
801
802		if (!q) {
803			/* hack -- fix at some point with proper message
804			   This is how we indicate to tc that there is no VQ
805			   at this DP */
806
807			opt.DP = MAX_DPs + i;
808			goto append_opt;
809		}
810
811		opt.limit	= q->limit;
812		opt.DP		= q->DP;
813		opt.backlog	= gred_backlog(table, q, sch);
814		opt.prio	= q->prio;
815		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
816		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
817		opt.Wlog	= q->parms.Wlog;
818		opt.Plog	= q->parms.Plog;
819		opt.Scell_log	= q->parms.Scell_log;
820		opt.other	= q->stats.other;
821		opt.early	= q->stats.prob_drop;
822		opt.forced	= q->stats.forced_drop;
823		opt.pdrop	= q->stats.pdrop;
824		opt.packets	= q->packetsin;
825		opt.bytesin	= q->bytesin;
826
827		if (gred_wred_mode(table))
828			gred_load_wred_set(table, q);
829
830		qavg = red_calc_qavg(&q->parms, &q->vars,
831				     q->vars.qavg >> q->parms.Wlog);
832		opt.qave = qavg >> q->parms.Wlog;
833
834append_opt:
835		if (nla_append(skb, sizeof(opt), &opt) < 0)
836			goto nla_put_failure;
837	}
838
839	nla_nest_end(skb, parms);
840
841	/* Dump the VQs again, in more structured way */
842	vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
843	if (!vqs)
844		goto nla_put_failure;
845
846	for (i = 0; i < MAX_DPs; i++) {
847		struct gred_sched_data *q = table->tab[i];
848		struct nlattr *vq;
849
850		if (!q)
851			continue;
852
853		vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
854		if (!vq)
855			goto nla_put_failure;
856
857		if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
858			goto nla_put_failure;
859
860		if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
861			goto nla_put_failure;
862
863		/* Stats */
864		if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
865				      TCA_GRED_VQ_PAD))
866			goto nla_put_failure;
867		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
868			goto nla_put_failure;
869		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
870				gred_backlog(table, q, sch)))
871			goto nla_put_failure;
872		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
873				q->stats.prob_drop))
874			goto nla_put_failure;
875		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
876				q->stats.prob_mark))
877			goto nla_put_failure;
878		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
879				q->stats.forced_drop))
880			goto nla_put_failure;
881		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
882				q->stats.forced_mark))
883			goto nla_put_failure;
884		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
885			goto nla_put_failure;
886		if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
887			goto nla_put_failure;
888
889		nla_nest_end(skb, vq);
890	}
891	nla_nest_end(skb, vqs);
892
893	return nla_nest_end(skb, opts);
894
895nla_put_failure:
896	nla_nest_cancel(skb, opts);
897	return -EMSGSIZE;
898}
899
900static void gred_destroy(struct Qdisc *sch)
901{
902	struct gred_sched *table = qdisc_priv(sch);
903	int i;
904
905	for (i = 0; i < table->DPs; i++) {
906		if (table->tab[i])
907			gred_destroy_vq(table->tab[i]);
908	}
909	gred_offload(sch, TC_GRED_DESTROY);
910}
911
912static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
913	.id		=	"gred",
914	.priv_size	=	sizeof(struct gred_sched),
915	.enqueue	=	gred_enqueue,
916	.dequeue	=	gred_dequeue,
917	.peek		=	qdisc_peek_head,
 
918	.init		=	gred_init,
919	.reset		=	gred_reset,
920	.destroy	=	gred_destroy,
921	.change		=	gred_change,
922	.dump		=	gred_dump,
923	.owner		=	THIS_MODULE,
924};
925
926static int __init gred_module_init(void)
927{
928	return register_qdisc(&gred_qdisc_ops);
929}
930
931static void __exit gred_module_exit(void)
932{
933	unregister_qdisc(&gred_qdisc_ops);
934}
935
936module_init(gred_module_init)
937module_exit(gred_module_exit)
938
939MODULE_LICENSE("GPL");
v4.6
 
  1/*
  2 * net/sched/sch_gred.c	Generic Random Early Detection queue.
  3 *
  4 *
  5 *              This program is free software; you can redistribute it and/or
  6 *              modify it under the terms of the GNU General Public License
  7 *              as published by the Free Software Foundation; either version
  8 *              2 of the License, or (at your option) any later version.
  9 *
 10 * Authors:    J Hadi Salim (hadi@cyberus.ca) 1998-2002
 11 *
 12 *             991129: -  Bug fix with grio mode
 13 *		       - a better sing. AvgQ mode with Grio(WRED)
 14 *		       - A finer grained VQ dequeue based on sugestion
 15 *		         from Ren Liu
 16 *		       - More error checks
 17 *
 18 *  For all the glorious comments look at include/net/red.h
 19 */
 20
 21#include <linux/slab.h>
 22#include <linux/module.h>
 23#include <linux/types.h>
 24#include <linux/kernel.h>
 25#include <linux/skbuff.h>
 
 26#include <net/pkt_sched.h>
 27#include <net/red.h>
 28
 29#define GRED_DEF_PRIO (MAX_DPs / 2)
 30#define GRED_VQ_MASK (MAX_DPs - 1)
 31
 
 
 32struct gred_sched_data;
 33struct gred_sched;
 34
 35struct gred_sched_data {
 36	u32		limit;		/* HARD maximal queue length	*/
 37	u32		DP;		/* the drop parameters */
 38	u32		bytesin;	/* bytes seen on virtualQ so far*/
 
 39	u32		packetsin;	/* packets seen on virtualQ so far*/
 40	u32		backlog;	/* bytes on the virtualQ */
 41	u8		prio;		/* the prio of this vq */
 42
 43	struct red_parms parms;
 44	struct red_vars  vars;
 45	struct red_stats stats;
 46};
 47
 48enum {
 49	GRED_WRED_MODE = 1,
 50	GRED_RIO_MODE,
 51};
 52
 53struct gred_sched {
 54	struct gred_sched_data *tab[MAX_DPs];
 55	unsigned long	flags;
 56	u32		red_flags;
 57	u32 		DPs;
 58	u32 		def;
 59	struct red_vars wred_set;
 60};
 61
 62static inline int gred_wred_mode(struct gred_sched *table)
 63{
 64	return test_bit(GRED_WRED_MODE, &table->flags);
 65}
 66
 67static inline void gred_enable_wred_mode(struct gred_sched *table)
 68{
 69	__set_bit(GRED_WRED_MODE, &table->flags);
 70}
 71
 72static inline void gred_disable_wred_mode(struct gred_sched *table)
 73{
 74	__clear_bit(GRED_WRED_MODE, &table->flags);
 75}
 76
 77static inline int gred_rio_mode(struct gred_sched *table)
 78{
 79	return test_bit(GRED_RIO_MODE, &table->flags);
 80}
 81
 82static inline void gred_enable_rio_mode(struct gred_sched *table)
 83{
 84	__set_bit(GRED_RIO_MODE, &table->flags);
 85}
 86
 87static inline void gred_disable_rio_mode(struct gred_sched *table)
 88{
 89	__clear_bit(GRED_RIO_MODE, &table->flags);
 90}
 91
 92static inline int gred_wred_mode_check(struct Qdisc *sch)
 93{
 94	struct gred_sched *table = qdisc_priv(sch);
 95	int i;
 96
 97	/* Really ugly O(n^2) but shouldn't be necessary too frequent. */
 98	for (i = 0; i < table->DPs; i++) {
 99		struct gred_sched_data *q = table->tab[i];
100		int n;
101
102		if (q == NULL)
103			continue;
104
105		for (n = i + 1; n < table->DPs; n++)
106			if (table->tab[n] && table->tab[n]->prio == q->prio)
107				return 1;
108	}
109
110	return 0;
111}
112
113static inline unsigned int gred_backlog(struct gred_sched *table,
114					struct gred_sched_data *q,
115					struct Qdisc *sch)
116{
117	if (gred_wred_mode(table))
118		return sch->qstats.backlog;
119	else
120		return q->backlog;
121}
122
123static inline u16 tc_index_to_dp(struct sk_buff *skb)
124{
125	return skb->tc_index & GRED_VQ_MASK;
126}
127
128static inline void gred_load_wred_set(const struct gred_sched *table,
129				      struct gred_sched_data *q)
130{
131	q->vars.qavg = table->wred_set.qavg;
132	q->vars.qidlestart = table->wred_set.qidlestart;
133}
134
135static inline void gred_store_wred_set(struct gred_sched *table,
136				       struct gred_sched_data *q)
137{
138	table->wred_set.qavg = q->vars.qavg;
139	table->wred_set.qidlestart = q->vars.qidlestart;
140}
141
142static inline int gred_use_ecn(struct gred_sched *t)
 
 
 
 
 
143{
144	return t->red_flags & TC_RED_ECN;
145}
146
147static inline int gred_use_harddrop(struct gred_sched *t)
148{
149	return t->red_flags & TC_RED_HARDDROP;
 
 
 
 
 
 
 
 
150}
151
152static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
153{
154	struct gred_sched_data *q = NULL;
155	struct gred_sched *t = qdisc_priv(sch);
156	unsigned long qavg = 0;
157	u16 dp = tc_index_to_dp(skb);
158
159	if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
160		dp = t->def;
161
162		q = t->tab[dp];
163		if (!q) {
164			/* Pass through packets not assigned to a DP
165			 * if no default DP has been configured. This
166			 * allows for DP flows to be left untouched.
167			 */
168			if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
169					sch->limit))
170				return qdisc_enqueue_tail(skb, sch);
171			else
172				goto drop;
173		}
174
175		/* fix tc_index? --could be controversial but needed for
176		   requeueing */
177		skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
178	}
179
180	/* sum up all the qaves of prios < ours to get the new qave */
181	if (!gred_wred_mode(t) && gred_rio_mode(t)) {
182		int i;
183
184		for (i = 0; i < t->DPs; i++) {
185			if (t->tab[i] && t->tab[i]->prio < q->prio &&
186			    !red_is_idling(&t->tab[i]->vars))
187				qavg += t->tab[i]->vars.qavg;
188		}
189
190	}
191
192	q->packetsin++;
193	q->bytesin += qdisc_pkt_len(skb);
194
195	if (gred_wred_mode(t))
196		gred_load_wred_set(t, q);
197
198	q->vars.qavg = red_calc_qavg(&q->parms,
199				     &q->vars,
200				     gred_backlog(t, q, sch));
201
202	if (red_is_idling(&q->vars))
203		red_end_of_idle_period(&q->vars);
204
205	if (gred_wred_mode(t))
206		gred_store_wred_set(t, q);
207
208	switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
209	case RED_DONT_MARK:
210		break;
211
212	case RED_PROB_MARK:
213		qdisc_qstats_overlimit(sch);
214		if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
215			q->stats.prob_drop++;
216			goto congestion_drop;
217		}
218
219		q->stats.prob_mark++;
220		break;
221
222	case RED_HARD_MARK:
223		qdisc_qstats_overlimit(sch);
224		if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
225		    !INET_ECN_set_ce(skb)) {
226			q->stats.forced_drop++;
227			goto congestion_drop;
228		}
229		q->stats.forced_mark++;
230		break;
231	}
232
233	if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
234		q->backlog += qdisc_pkt_len(skb);
235		return qdisc_enqueue_tail(skb, sch);
236	}
237
238	q->stats.pdrop++;
239drop:
240	return qdisc_drop(skb, sch);
241
242congestion_drop:
243	qdisc_drop(skb, sch);
244	return NET_XMIT_CN;
245}
246
247static struct sk_buff *gred_dequeue(struct Qdisc *sch)
248{
249	struct sk_buff *skb;
250	struct gred_sched *t = qdisc_priv(sch);
251
252	skb = qdisc_dequeue_head(sch);
253
254	if (skb) {
255		struct gred_sched_data *q;
256		u16 dp = tc_index_to_dp(skb);
257
258		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
259			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
260					     tc_index_to_dp(skb));
261		} else {
262			q->backlog -= qdisc_pkt_len(skb);
263
264			if (gred_wred_mode(t)) {
265				if (!sch->qstats.backlog)
266					red_start_of_idle_period(&t->wred_set);
267			} else {
268				if (!q->backlog)
269					red_start_of_idle_period(&q->vars);
270			}
271		}
272
273		return skb;
274	}
275
276	return NULL;
277}
278
279static unsigned int gred_drop(struct Qdisc *sch)
280{
281	struct sk_buff *skb;
282	struct gred_sched *t = qdisc_priv(sch);
283
284	skb = qdisc_dequeue_tail(sch);
285	if (skb) {
286		unsigned int len = qdisc_pkt_len(skb);
287		struct gred_sched_data *q;
288		u16 dp = tc_index_to_dp(skb);
289
290		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
291			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x while dropping, screwing up backlog\n",
292					     tc_index_to_dp(skb));
293		} else {
294			q->backlog -= len;
295			q->stats.other++;
296
297			if (gred_wred_mode(t)) {
298				if (!sch->qstats.backlog)
299					red_start_of_idle_period(&t->wred_set);
300			} else {
301				if (!q->backlog)
302					red_start_of_idle_period(&q->vars);
303			}
304		}
305
306		qdisc_drop(skb, sch);
307		return len;
308	}
309
310	return 0;
311}
312
313static void gred_reset(struct Qdisc *sch)
314{
315	int i;
316	struct gred_sched *t = qdisc_priv(sch);
317
318	qdisc_reset_queue(sch);
319
320	for (i = 0; i < t->DPs; i++) {
321		struct gred_sched_data *q = t->tab[i];
322
323		if (!q)
324			continue;
325
326		red_restart(&q->vars);
327		q->backlog = 0;
328	}
329}
330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331static inline void gred_destroy_vq(struct gred_sched_data *q)
332{
333	kfree(q);
334}
335
336static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
 
337{
338	struct gred_sched *table = qdisc_priv(sch);
339	struct tc_gred_sopt *sopt;
 
340	int i;
341
342	if (dps == NULL)
343		return -EINVAL;
344
345	sopt = nla_data(dps);
346
347	if (sopt->DPs > MAX_DPs || sopt->DPs == 0 || sopt->def_DP >= sopt->DPs)
 
 
 
 
 
 
 
 
 
 
348		return -EINVAL;
 
 
 
 
 
349
350	sch_tree_lock(sch);
351	table->DPs = sopt->DPs;
352	table->def = sopt->def_DP;
 
353	table->red_flags = sopt->flags;
354
355	/*
356	 * Every entry point to GRED is synchronized with the above code
357	 * and the DP is checked against DPs, i.e. shadowed VQs can no
358	 * longer be found so we can unlock right here.
359	 */
360	sch_tree_unlock(sch);
361
362	if (sopt->grio) {
363		gred_enable_rio_mode(table);
364		gred_disable_wred_mode(table);
365		if (gred_wred_mode_check(sch))
366			gred_enable_wred_mode(table);
367	} else {
368		gred_disable_rio_mode(table);
369		gred_disable_wred_mode(table);
370	}
371
 
 
 
 
 
 
372	for (i = table->DPs; i < MAX_DPs; i++) {
373		if (table->tab[i]) {
374			pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
375				i);
376			gred_destroy_vq(table->tab[i]);
377			table->tab[i] = NULL;
378		}
379	}
380
 
381	return 0;
382}
383
384static inline int gred_change_vq(struct Qdisc *sch, int dp,
385				 struct tc_gred_qopt *ctl, int prio,
386				 u8 *stab, u32 max_P,
387				 struct gred_sched_data **prealloc)
 
388{
389	struct gred_sched *table = qdisc_priv(sch);
390	struct gred_sched_data *q = table->tab[dp];
391
 
 
 
 
 
392	if (!q) {
393		table->tab[dp] = q = *prealloc;
394		*prealloc = NULL;
395		if (!q)
396			return -ENOMEM;
 
397	}
398
399	q->DP = dp;
400	q->prio = prio;
401	if (ctl->limit > sch->limit)
402		q->limit = sch->limit;
403	else
404		q->limit = ctl->limit;
405
406	if (q->backlog == 0)
407		red_end_of_idle_period(&q->vars);
408
409	red_set_parms(&q->parms,
410		      ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
411		      ctl->Scell_log, stab, max_P);
412	red_set_vars(&q->vars);
413	return 0;
414}
415
 
 
 
 
 
 
 
 
 
416static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
417	[TCA_GRED_PARMS]	= { .len = sizeof(struct tc_gred_qopt) },
418	[TCA_GRED_STAB]		= { .len = 256 },
419	[TCA_GRED_DPS]		= { .len = sizeof(struct tc_gred_sopt) },
420	[TCA_GRED_MAX_P]	= { .type = NLA_U32 },
421	[TCA_GRED_LIMIT]	= { .type = NLA_U32 },
 
422};
423
424static int gred_change(struct Qdisc *sch, struct nlattr *opt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
425{
426	struct gred_sched *table = qdisc_priv(sch);
427	struct tc_gred_qopt *ctl;
428	struct nlattr *tb[TCA_GRED_MAX + 1];
429	int err, prio = GRED_DEF_PRIO;
430	u8 *stab;
431	u32 max_P;
432	struct gred_sched_data *prealloc;
433
434	if (opt == NULL)
435		return -EINVAL;
436
437	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
 
438	if (err < 0)
439		return err;
440
441	if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
442		if (tb[TCA_GRED_LIMIT] != NULL)
443			sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
444		return gred_change_table_def(sch, opt);
445	}
446
447	if (tb[TCA_GRED_PARMS] == NULL ||
448	    tb[TCA_GRED_STAB] == NULL ||
449	    tb[TCA_GRED_LIMIT] != NULL)
 
450		return -EINVAL;
 
451
452	max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
453
454	err = -EINVAL;
455	ctl = nla_data(tb[TCA_GRED_PARMS]);
456	stab = nla_data(tb[TCA_GRED_STAB]);
457
458	if (ctl->DP >= table->DPs)
459		goto errout;
 
 
 
 
 
 
 
 
 
460
461	if (gred_rio_mode(table)) {
462		if (ctl->prio == 0) {
463			int def_prio = GRED_DEF_PRIO;
464
465			if (table->tab[table->def])
466				def_prio = table->tab[table->def]->prio;
467
468			printk(KERN_DEBUG "GRED: DP %u does not have a prio "
469			       "setting default to %d\n", ctl->DP, def_prio);
470
471			prio = def_prio;
472		} else
473			prio = ctl->prio;
474	}
475
476	prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
477	sch_tree_lock(sch);
478
479	err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc);
 
480	if (err < 0)
481		goto errout_locked;
 
 
 
482
483	if (gred_rio_mode(table)) {
484		gred_disable_wred_mode(table);
485		if (gred_wred_mode_check(sch))
486			gred_enable_wred_mode(table);
487	}
488
489	err = 0;
 
490
491errout_locked:
 
 
 
492	sch_tree_unlock(sch);
493	kfree(prealloc);
494errout:
495	return err;
496}
497
498static int gred_init(struct Qdisc *sch, struct nlattr *opt)
 
499{
500	struct nlattr *tb[TCA_GRED_MAX + 1];
501	int err;
502
503	if (opt == NULL)
504		return -EINVAL;
505
506	err = nla_parse_nested(tb, TCA_GRED_MAX, opt, gred_policy);
 
507	if (err < 0)
508		return err;
509
510	if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB])
 
 
511		return -EINVAL;
 
512
513	if (tb[TCA_GRED_LIMIT])
514		sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
515	else
516		sch->limit = qdisc_dev(sch)->tx_queue_len
517		             * psched_mtu(qdisc_dev(sch));
518
519	return gred_change_table_def(sch, tb[TCA_GRED_DPS]);
520}
521
522static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
523{
524	struct gred_sched *table = qdisc_priv(sch);
525	struct nlattr *parms, *opts = NULL;
526	int i;
527	u32 max_p[MAX_DPs];
528	struct tc_gred_sopt sopt = {
529		.DPs	= table->DPs,
530		.def_DP	= table->def,
531		.grio	= gred_rio_mode(table),
532		.flags	= table->red_flags,
533	};
534
535	opts = nla_nest_start(skb, TCA_OPTIONS);
 
 
 
536	if (opts == NULL)
537		goto nla_put_failure;
538	if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
539		goto nla_put_failure;
540
541	for (i = 0; i < MAX_DPs; i++) {
542		struct gred_sched_data *q = table->tab[i];
543
544		max_p[i] = q ? q->parms.max_P : 0;
545	}
546	if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
547		goto nla_put_failure;
548
549	if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
550		goto nla_put_failure;
551
552	parms = nla_nest_start(skb, TCA_GRED_PARMS);
 
553	if (parms == NULL)
554		goto nla_put_failure;
555
556	for (i = 0; i < MAX_DPs; i++) {
557		struct gred_sched_data *q = table->tab[i];
558		struct tc_gred_qopt opt;
559		unsigned long qavg;
560
561		memset(&opt, 0, sizeof(opt));
562
563		if (!q) {
564			/* hack -- fix at some point with proper message
565			   This is how we indicate to tc that there is no VQ
566			   at this DP */
567
568			opt.DP = MAX_DPs + i;
569			goto append_opt;
570		}
571
572		opt.limit	= q->limit;
573		opt.DP		= q->DP;
574		opt.backlog	= gred_backlog(table, q, sch);
575		opt.prio	= q->prio;
576		opt.qth_min	= q->parms.qth_min >> q->parms.Wlog;
577		opt.qth_max	= q->parms.qth_max >> q->parms.Wlog;
578		opt.Wlog	= q->parms.Wlog;
579		opt.Plog	= q->parms.Plog;
580		opt.Scell_log	= q->parms.Scell_log;
581		opt.other	= q->stats.other;
582		opt.early	= q->stats.prob_drop;
583		opt.forced	= q->stats.forced_drop;
584		opt.pdrop	= q->stats.pdrop;
585		opt.packets	= q->packetsin;
586		opt.bytesin	= q->bytesin;
587
588		if (gred_wred_mode(table))
589			gred_load_wred_set(table, q);
590
591		qavg = red_calc_qavg(&q->parms, &q->vars,
592				     q->vars.qavg >> q->parms.Wlog);
593		opt.qave = qavg >> q->parms.Wlog;
594
595append_opt:
596		if (nla_append(skb, sizeof(opt), &opt) < 0)
597			goto nla_put_failure;
598	}
599
600	nla_nest_end(skb, parms);
601
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
602	return nla_nest_end(skb, opts);
603
604nla_put_failure:
605	nla_nest_cancel(skb, opts);
606	return -EMSGSIZE;
607}
608
609static void gred_destroy(struct Qdisc *sch)
610{
611	struct gred_sched *table = qdisc_priv(sch);
612	int i;
613
614	for (i = 0; i < table->DPs; i++) {
615		if (table->tab[i])
616			gred_destroy_vq(table->tab[i]);
617	}
 
618}
619
620static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
621	.id		=	"gred",
622	.priv_size	=	sizeof(struct gred_sched),
623	.enqueue	=	gred_enqueue,
624	.dequeue	=	gred_dequeue,
625	.peek		=	qdisc_peek_head,
626	.drop		=	gred_drop,
627	.init		=	gred_init,
628	.reset		=	gred_reset,
629	.destroy	=	gred_destroy,
630	.change		=	gred_change,
631	.dump		=	gred_dump,
632	.owner		=	THIS_MODULE,
633};
634
635static int __init gred_module_init(void)
636{
637	return register_qdisc(&gred_qdisc_ops);
638}
639
640static void __exit gred_module_exit(void)
641{
642	unregister_qdisc(&gred_qdisc_ops);
643}
644
645module_init(gred_module_init)
646module_exit(gred_module_exit)
647
648MODULE_LICENSE("GPL");