Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
v4.6
 
  1/*
  2 * net/sched/sch_red.c	Random Early Detection queue.
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10 *
 11 * Changes:
 12 * J Hadi Salim 980914:	computation fixes
 13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
 14 * J Hadi Salim 980816:  ECN support
 15 */
 16
 17#include <linux/module.h>
 18#include <linux/types.h>
 19#include <linux/kernel.h>
 20#include <linux/skbuff.h>
 21#include <net/pkt_sched.h>
 
 22#include <net/inet_ecn.h>
 23#include <net/red.h>
 24
 25
 26/*	Parameters, settable by user:
 27	-----------------------------
 28
 29	limit		- bytes (must be > qth_max + burst)
 30
 31	Hard limit on queue length, should be chosen >qth_max
 32	to allow packet bursts. This parameter does not
 33	affect the algorithms behaviour and can be chosen
 34	arbitrarily high (well, less than ram size)
 35	Really, this limit will never be reached
 36	if RED works correctly.
 37 */
 38
 39struct red_sched_data {
 40	u32			limit;		/* HARD maximal queue length */
 
 41	unsigned char		flags;
 
 
 
 42	struct timer_list	adapt_timer;
 
 43	struct red_parms	parms;
 44	struct red_vars		vars;
 45	struct red_stats	stats;
 46	struct Qdisc		*qdisc;
 
 
 47};
 48
 
 
 49static inline int red_use_ecn(struct red_sched_data *q)
 50{
 51	return q->flags & TC_RED_ECN;
 52}
 53
 54static inline int red_use_harddrop(struct red_sched_data *q)
 55{
 56	return q->flags & TC_RED_HARDDROP;
 57}
 58
 59static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
 
 
 
 
 
 60{
 61	struct red_sched_data *q = qdisc_priv(sch);
 62	struct Qdisc *child = q->qdisc;
 
 63	int ret;
 64
 65	q->vars.qavg = red_calc_qavg(&q->parms,
 66				     &q->vars,
 67				     child->qstats.backlog);
 68
 69	if (red_is_idling(&q->vars))
 70		red_end_of_idle_period(&q->vars);
 71
 72	switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
 73	case RED_DONT_MARK:
 74		break;
 75
 76	case RED_PROB_MARK:
 77		qdisc_qstats_overlimit(sch);
 78		if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
 
 
 
 
 
 
 
 
 
 
 79			q->stats.prob_drop++;
 80			goto congestion_drop;
 81		}
 82
 83		q->stats.prob_mark++;
 84		break;
 85
 86	case RED_HARD_MARK:
 87		qdisc_qstats_overlimit(sch);
 88		if (red_use_harddrop(q) || !red_use_ecn(q) ||
 89		    !INET_ECN_set_ce(skb)) {
 90			q->stats.forced_drop++;
 91			goto congestion_drop;
 92		}
 93
 94		q->stats.forced_mark++;
 
 
 
 
 
 
 
 
 
 
 95		break;
 96	}
 97
 98	ret = qdisc_enqueue(skb, child);
 
 99	if (likely(ret == NET_XMIT_SUCCESS)) {
 
100		sch->q.qlen++;
101	} else if (net_xmit_drop_count(ret)) {
102		q->stats.pdrop++;
103		qdisc_qstats_drop(sch);
104	}
105	return ret;
106
107congestion_drop:
108	qdisc_drop(skb, sch);
 
 
 
 
109	return NET_XMIT_CN;
110}
111
112static struct sk_buff *red_dequeue(struct Qdisc *sch)
113{
114	struct sk_buff *skb;
115	struct red_sched_data *q = qdisc_priv(sch);
116	struct Qdisc *child = q->qdisc;
117
118	skb = child->dequeue(child);
119	if (skb) {
120		qdisc_bstats_update(sch, skb);
 
121		sch->q.qlen--;
122	} else {
123		if (!red_is_idling(&q->vars))
124			red_start_of_idle_period(&q->vars);
125	}
126	return skb;
127}
128
129static struct sk_buff *red_peek(struct Qdisc *sch)
130{
131	struct red_sched_data *q = qdisc_priv(sch);
132	struct Qdisc *child = q->qdisc;
133
134	return child->ops->peek(child);
135}
136
137static unsigned int red_drop(struct Qdisc *sch)
138{
139	struct red_sched_data *q = qdisc_priv(sch);
140	struct Qdisc *child = q->qdisc;
141	unsigned int len;
142
143	if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
144		q->stats.other++;
145		qdisc_qstats_drop(sch);
146		sch->q.qlen--;
147		return len;
148	}
149
150	if (!red_is_idling(&q->vars))
151		red_start_of_idle_period(&q->vars);
152
153	return 0;
 
154}
155
156static void red_reset(struct Qdisc *sch)
157{
158	struct red_sched_data *q = qdisc_priv(sch);
 
 
 
 
 
159
160	qdisc_reset(q->qdisc);
161	sch->q.qlen = 0;
162	red_restart(&q->vars);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163}
164
165static void red_destroy(struct Qdisc *sch)
166{
167	struct red_sched_data *q = qdisc_priv(sch);
168
 
 
169	del_timer_sync(&q->adapt_timer);
170	qdisc_destroy(q->qdisc);
 
171}
172
173static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
 
174	[TCA_RED_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
175	[TCA_RED_STAB]	= { .len = RED_STAB_SIZE },
176	[TCA_RED_MAX_P] = { .type = NLA_U32 },
 
 
 
177};
178
179static int red_change(struct Qdisc *sch, struct nlattr *opt)
 
180{
 
181	struct red_sched_data *q = qdisc_priv(sch);
182	struct nlattr *tb[TCA_RED_MAX + 1];
183	struct tc_red_qopt *ctl;
184	struct Qdisc *child = NULL;
 
185	int err;
186	u32 max_P;
187
188	if (opt == NULL)
189		return -EINVAL;
190
191	err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy);
192	if (err < 0)
193		return err;
194
195	if (tb[TCA_RED_PARMS] == NULL ||
196	    tb[TCA_RED_STAB] == NULL)
197		return -EINVAL;
198
199	max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
200
201	ctl = nla_data(tb[TCA_RED_PARMS]);
 
 
 
 
 
 
 
 
 
 
202
203	if (ctl->limit > 0) {
204		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit);
 
205		if (IS_ERR(child))
206			return PTR_ERR(child);
 
 
 
207	}
208
209	sch_tree_lock(sch);
210	q->flags = ctl->flags;
 
 
 
 
 
 
 
211	q->limit = ctl->limit;
212	if (child) {
213		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
214					  q->qdisc->qstats.backlog);
215		qdisc_destroy(q->qdisc);
216		q->qdisc = child;
217	}
218
219	red_set_parms(&q->parms,
220		      ctl->qth_min, ctl->qth_max, ctl->Wlog,
221		      ctl->Plog, ctl->Scell_log,
222		      nla_data(tb[TCA_RED_STAB]),
223		      max_P);
224	red_set_vars(&q->vars);
225
226	del_timer(&q->adapt_timer);
227	if (ctl->flags & TC_RED_ADAPTATIVE)
228		mod_timer(&q->adapt_timer, jiffies + HZ/2);
229
230	if (!q->qdisc->q.qlen)
231		red_start_of_idle_period(&q->vars);
232
233	sch_tree_unlock(sch);
 
 
 
 
 
234	return 0;
 
 
 
 
 
 
235}
236
237static inline void red_adaptative_timer(unsigned long arg)
238{
239	struct Qdisc *sch = (struct Qdisc *)arg;
240	struct red_sched_data *q = qdisc_priv(sch);
241	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
242
243	spin_lock(root_lock);
244	red_adaptative_algo(&q->parms, &q->vars);
245	mod_timer(&q->adapt_timer, jiffies + HZ/2);
246	spin_unlock(root_lock);
247}
248
249static int red_init(struct Qdisc *sch, struct nlattr *opt)
 
250{
251	struct red_sched_data *q = qdisc_priv(sch);
 
 
252
253	q->qdisc = &noop_qdisc;
254	setup_timer(&q->adapt_timer, red_adaptative_timer, (unsigned long)sch);
255	return red_change(sch, opt);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256}
257
258static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
259{
260	struct red_sched_data *q = qdisc_priv(sch);
261	struct nlattr *opts = NULL;
262	struct tc_red_qopt opt = {
263		.limit		= q->limit,
264		.flags		= q->flags,
 
265		.qth_min	= q->parms.qth_min >> q->parms.Wlog,
266		.qth_max	= q->parms.qth_max >> q->parms.Wlog,
267		.Wlog		= q->parms.Wlog,
268		.Plog		= q->parms.Plog,
269		.Scell_log	= q->parms.Scell_log,
270	};
 
 
 
 
 
271
272	sch->qstats.backlog = q->qdisc->qstats.backlog;
273	opts = nla_nest_start(skb, TCA_OPTIONS);
274	if (opts == NULL)
275		goto nla_put_failure;
276	if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
277	    nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
 
 
 
 
278		goto nla_put_failure;
279	return nla_nest_end(skb, opts);
280
281nla_put_failure:
282	nla_nest_cancel(skb, opts);
283	return -EMSGSIZE;
284}
285
286static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
287{
288	struct red_sched_data *q = qdisc_priv(sch);
289	struct tc_red_xstats st = {
290		.early	= q->stats.prob_drop + q->stats.forced_drop,
291		.pdrop	= q->stats.pdrop,
292		.other	= q->stats.other,
293		.marked	= q->stats.prob_mark + q->stats.forced_mark,
294	};
 
 
 
 
 
 
 
 
 
 
 
 
295
296	return gnet_stats_copy_app(d, &st, sizeof(st));
297}
298
299static int red_dump_class(struct Qdisc *sch, unsigned long cl,
300			  struct sk_buff *skb, struct tcmsg *tcm)
301{
302	struct red_sched_data *q = qdisc_priv(sch);
303
304	tcm->tcm_handle |= TC_H_MIN(1);
305	tcm->tcm_info = q->qdisc->handle;
306	return 0;
307}
308
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
309static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
310		     struct Qdisc **old)
311{
312	struct red_sched_data *q = qdisc_priv(sch);
313
314	if (new == NULL)
315		new = &noop_qdisc;
316
317	*old = qdisc_replace(sch, new, &q->qdisc);
 
 
318	return 0;
319}
320
321static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
322{
323	struct red_sched_data *q = qdisc_priv(sch);
324	return q->qdisc;
325}
326
327static unsigned long red_get(struct Qdisc *sch, u32 classid)
328{
329	return 1;
330}
331
332static void red_put(struct Qdisc *sch, unsigned long arg)
333{
334}
335
336static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
337{
338	if (!walker->stop) {
339		if (walker->count >= walker->skip)
340			if (walker->fn(sch, 1, walker) < 0) {
341				walker->stop = 1;
342				return;
343			}
344		walker->count++;
345	}
346}
347
348static const struct Qdisc_class_ops red_class_ops = {
349	.graft		=	red_graft,
350	.leaf		=	red_leaf,
351	.get		=	red_get,
352	.put		=	red_put,
353	.walk		=	red_walk,
354	.dump		=	red_dump_class,
355};
356
357static struct Qdisc_ops red_qdisc_ops __read_mostly = {
358	.id		=	"red",
359	.priv_size	=	sizeof(struct red_sched_data),
360	.cl_ops		=	&red_class_ops,
361	.enqueue	=	red_enqueue,
362	.dequeue	=	red_dequeue,
363	.peek		=	red_peek,
364	.drop		=	red_drop,
365	.init		=	red_init,
366	.reset		=	red_reset,
367	.destroy	=	red_destroy,
368	.change		=	red_change,
369	.dump		=	red_dump,
370	.dump_stats	=	red_dump_stats,
371	.owner		=	THIS_MODULE,
372};
373
374static int __init red_module_init(void)
375{
376	return register_qdisc(&red_qdisc_ops);
377}
378
379static void __exit red_module_exit(void)
380{
381	unregister_qdisc(&red_qdisc_ops);
382}
383
384module_init(red_module_init)
385module_exit(red_module_exit)
386
387MODULE_LICENSE("GPL");
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/sch_red.c	Random Early Detection queue.
  4 *
 
 
 
 
 
  5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 *
  7 * Changes:
  8 * J Hadi Salim 980914:	computation fixes
  9 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
 10 * J Hadi Salim 980816:  ECN support
 11 */
 12
 13#include <linux/module.h>
 14#include <linux/types.h>
 15#include <linux/kernel.h>
 16#include <linux/skbuff.h>
 17#include <net/pkt_sched.h>
 18#include <net/pkt_cls.h>
 19#include <net/inet_ecn.h>
 20#include <net/red.h>
 21
 22
 23/*	Parameters, settable by user:
 24	-----------------------------
 25
 26	limit		- bytes (must be > qth_max + burst)
 27
 28	Hard limit on queue length, should be chosen >qth_max
 29	to allow packet bursts. This parameter does not
 30	affect the algorithms behaviour and can be chosen
 31	arbitrarily high (well, less than ram size)
 32	Really, this limit will never be reached
 33	if RED works correctly.
 34 */
 35
 36struct red_sched_data {
 37	u32			limit;		/* HARD maximal queue length */
 38
 39	unsigned char		flags;
 40	/* Non-flags in tc_red_qopt.flags. */
 41	unsigned char		userbits;
 42
 43	struct timer_list	adapt_timer;
 44	struct Qdisc		*sch;
 45	struct red_parms	parms;
 46	struct red_vars		vars;
 47	struct red_stats	stats;
 48	struct Qdisc		*qdisc;
 49	struct tcf_qevent	qe_early_drop;
 50	struct tcf_qevent	qe_mark;
 51};
 52
 53#define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
 54
 55static inline int red_use_ecn(struct red_sched_data *q)
 56{
 57	return q->flags & TC_RED_ECN;
 58}
 59
 60static inline int red_use_harddrop(struct red_sched_data *q)
 61{
 62	return q->flags & TC_RED_HARDDROP;
 63}
 64
 65static int red_use_nodrop(struct red_sched_data *q)
 66{
 67	return q->flags & TC_RED_NODROP;
 68}
 69
 70static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 71		       struct sk_buff **to_free)
 72{
 73	struct red_sched_data *q = qdisc_priv(sch);
 74	struct Qdisc *child = q->qdisc;
 75	unsigned int len;
 76	int ret;
 77
 78	q->vars.qavg = red_calc_qavg(&q->parms,
 79				     &q->vars,
 80				     child->qstats.backlog);
 81
 82	if (red_is_idling(&q->vars))
 83		red_end_of_idle_period(&q->vars);
 84
 85	switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
 86	case RED_DONT_MARK:
 87		break;
 88
 89	case RED_PROB_MARK:
 90		qdisc_qstats_overlimit(sch);
 91		if (!red_use_ecn(q)) {
 92			q->stats.prob_drop++;
 93			goto congestion_drop;
 94		}
 95
 96		if (INET_ECN_set_ce(skb)) {
 97			q->stats.prob_mark++;
 98			skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
 99			if (!skb)
100				return NET_XMIT_CN | ret;
101		} else if (!red_use_nodrop(q)) {
102			q->stats.prob_drop++;
103			goto congestion_drop;
104		}
105
106		/* Non-ECT packet in ECN nodrop mode: queue it. */
107		break;
108
109	case RED_HARD_MARK:
110		qdisc_qstats_overlimit(sch);
111		if (red_use_harddrop(q) || !red_use_ecn(q)) {
 
112			q->stats.forced_drop++;
113			goto congestion_drop;
114		}
115
116		if (INET_ECN_set_ce(skb)) {
117			q->stats.forced_mark++;
118			skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
119			if (!skb)
120				return NET_XMIT_CN | ret;
121		} else if (!red_use_nodrop(q)) {
122			q->stats.forced_drop++;
123			goto congestion_drop;
124		}
125
126		/* Non-ECT packet in ECN nodrop mode: queue it. */
127		break;
128	}
129
130	len = qdisc_pkt_len(skb);
131	ret = qdisc_enqueue(skb, child, to_free);
132	if (likely(ret == NET_XMIT_SUCCESS)) {
133		sch->qstats.backlog += len;
134		sch->q.qlen++;
135	} else if (net_xmit_drop_count(ret)) {
136		q->stats.pdrop++;
137		qdisc_qstats_drop(sch);
138	}
139	return ret;
140
141congestion_drop:
142	skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
143	if (!skb)
144		return NET_XMIT_CN | ret;
145
146	qdisc_drop(skb, sch, to_free);
147	return NET_XMIT_CN;
148}
149
150static struct sk_buff *red_dequeue(struct Qdisc *sch)
151{
152	struct sk_buff *skb;
153	struct red_sched_data *q = qdisc_priv(sch);
154	struct Qdisc *child = q->qdisc;
155
156	skb = child->dequeue(child);
157	if (skb) {
158		qdisc_bstats_update(sch, skb);
159		qdisc_qstats_backlog_dec(sch, skb);
160		sch->q.qlen--;
161	} else {
162		if (!red_is_idling(&q->vars))
163			red_start_of_idle_period(&q->vars);
164	}
165	return skb;
166}
167
168static struct sk_buff *red_peek(struct Qdisc *sch)
169{
170	struct red_sched_data *q = qdisc_priv(sch);
171	struct Qdisc *child = q->qdisc;
172
173	return child->ops->peek(child);
174}
175
176static void red_reset(struct Qdisc *sch)
177{
178	struct red_sched_data *q = qdisc_priv(sch);
 
 
 
 
 
 
 
 
 
 
 
 
179
180	qdisc_reset(q->qdisc);
181	red_restart(&q->vars);
182}
183
184static int red_offload(struct Qdisc *sch, bool enable)
185{
186	struct red_sched_data *q = qdisc_priv(sch);
187	struct net_device *dev = qdisc_dev(sch);
188	struct tc_red_qopt_offload opt = {
189		.handle = sch->handle,
190		.parent = sch->parent,
191	};
192
193	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
194		return -EOPNOTSUPP;
195
196	if (enable) {
197		opt.command = TC_RED_REPLACE;
198		opt.set.min = q->parms.qth_min >> q->parms.Wlog;
199		opt.set.max = q->parms.qth_max >> q->parms.Wlog;
200		opt.set.probability = q->parms.max_P;
201		opt.set.limit = q->limit;
202		opt.set.is_ecn = red_use_ecn(q);
203		opt.set.is_harddrop = red_use_harddrop(q);
204		opt.set.is_nodrop = red_use_nodrop(q);
205		opt.set.qstats = &sch->qstats;
206	} else {
207		opt.command = TC_RED_DESTROY;
208	}
209
210	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
211}
212
213static void red_destroy(struct Qdisc *sch)
214{
215	struct red_sched_data *q = qdisc_priv(sch);
216
217	tcf_qevent_destroy(&q->qe_mark, sch);
218	tcf_qevent_destroy(&q->qe_early_drop, sch);
219	del_timer_sync(&q->adapt_timer);
220	red_offload(sch, false);
221	qdisc_put(q->qdisc);
222}
223
224static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
225	[TCA_RED_UNSPEC] = { .strict_start_type = TCA_RED_FLAGS },
226	[TCA_RED_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
227	[TCA_RED_STAB]	= { .len = RED_STAB_SIZE },
228	[TCA_RED_MAX_P] = { .type = NLA_U32 },
229	[TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
230	[TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 },
231	[TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
232};
233
234static int __red_change(struct Qdisc *sch, struct nlattr **tb,
235			struct netlink_ext_ack *extack)
236{
237	struct Qdisc *old_child = NULL, *child = NULL;
238	struct red_sched_data *q = qdisc_priv(sch);
239	struct nla_bitfield32 flags_bf;
240	struct tc_red_qopt *ctl;
241	unsigned char userbits;
242	unsigned char flags;
243	int err;
244	u32 max_P;
245	u8 *stab;
 
 
 
 
 
 
246
247	if (tb[TCA_RED_PARMS] == NULL ||
248	    tb[TCA_RED_STAB] == NULL)
249		return -EINVAL;
250
251	max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
252
253	ctl = nla_data(tb[TCA_RED_PARMS]);
254	stab = nla_data(tb[TCA_RED_STAB]);
255	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
256			      ctl->Scell_log, stab))
257		return -EINVAL;
258
259	err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
260			    tb[TCA_RED_FLAGS], TC_RED_SUPPORTED_FLAGS,
261			    &flags_bf, &userbits, extack);
262	if (err)
263		return err;
264
265	if (ctl->limit > 0) {
266		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
267					 extack);
268		if (IS_ERR(child))
269			return PTR_ERR(child);
270
271		/* child is fifo, no need to check for noop_qdisc */
272		qdisc_hash_add(child, true);
273	}
274
275	sch_tree_lock(sch);
276
277	flags = (q->flags & ~flags_bf.selector) | flags_bf.value;
278	err = red_validate_flags(flags, extack);
279	if (err)
280		goto unlock_out;
281
282	q->flags = flags;
283	q->userbits = userbits;
284	q->limit = ctl->limit;
285	if (child) {
286		qdisc_tree_flush_backlog(q->qdisc);
287		old_child = q->qdisc;
 
288		q->qdisc = child;
289	}
290
291	red_set_parms(&q->parms,
292		      ctl->qth_min, ctl->qth_max, ctl->Wlog,
293		      ctl->Plog, ctl->Scell_log,
294		      stab,
295		      max_P);
296	red_set_vars(&q->vars);
297
298	del_timer(&q->adapt_timer);
299	if (ctl->flags & TC_RED_ADAPTATIVE)
300		mod_timer(&q->adapt_timer, jiffies + HZ/2);
301
302	if (!q->qdisc->q.qlen)
303		red_start_of_idle_period(&q->vars);
304
305	sch_tree_unlock(sch);
306
307	red_offload(sch, true);
308
309	if (old_child)
310		qdisc_put(old_child);
311	return 0;
312
313unlock_out:
314	sch_tree_unlock(sch);
315	if (child)
316		qdisc_put(child);
317	return err;
318}
319
320static inline void red_adaptative_timer(struct timer_list *t)
321{
322	struct red_sched_data *q = from_timer(q, t, adapt_timer);
323	struct Qdisc *sch = q->sch;
324	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
325
326	spin_lock(root_lock);
327	red_adaptative_algo(&q->parms, &q->vars);
328	mod_timer(&q->adapt_timer, jiffies + HZ/2);
329	spin_unlock(root_lock);
330}
331
332static int red_init(struct Qdisc *sch, struct nlattr *opt,
333		    struct netlink_ext_ack *extack)
334{
335	struct red_sched_data *q = qdisc_priv(sch);
336	struct nlattr *tb[TCA_RED_MAX + 1];
337	int err;
338
339	q->qdisc = &noop_qdisc;
340	q->sch = sch;
341	timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
342
343	if (!opt)
344		return -EINVAL;
345
346	err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
347					  extack);
348	if (err < 0)
349		return err;
350
351	err = __red_change(sch, tb, extack);
352	if (err)
353		return err;
354
355	err = tcf_qevent_init(&q->qe_early_drop, sch,
356			      FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
357			      tb[TCA_RED_EARLY_DROP_BLOCK], extack);
358	if (err)
359		return err;
360
361	return tcf_qevent_init(&q->qe_mark, sch,
362			       FLOW_BLOCK_BINDER_TYPE_RED_MARK,
363			       tb[TCA_RED_MARK_BLOCK], extack);
364}
365
366static int red_change(struct Qdisc *sch, struct nlattr *opt,
367		      struct netlink_ext_ack *extack)
368{
369	struct red_sched_data *q = qdisc_priv(sch);
370	struct nlattr *tb[TCA_RED_MAX + 1];
371	int err;
372
373	err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
374					  extack);
375	if (err < 0)
376		return err;
377
378	err = tcf_qevent_validate_change(&q->qe_early_drop,
379					 tb[TCA_RED_EARLY_DROP_BLOCK], extack);
380	if (err)
381		return err;
382
383	err = tcf_qevent_validate_change(&q->qe_mark,
384					 tb[TCA_RED_MARK_BLOCK], extack);
385	if (err)
386		return err;
387
388	return __red_change(sch, tb, extack);
389}
390
391static int red_dump_offload_stats(struct Qdisc *sch)
392{
393	struct tc_red_qopt_offload hw_stats = {
394		.command = TC_RED_STATS,
395		.handle = sch->handle,
396		.parent = sch->parent,
397		{
398			.stats.bstats = &sch->bstats,
399			.stats.qstats = &sch->qstats,
400		},
401	};
402
403	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
404}
405
406static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
407{
408	struct red_sched_data *q = qdisc_priv(sch);
409	struct nlattr *opts = NULL;
410	struct tc_red_qopt opt = {
411		.limit		= q->limit,
412		.flags		= (q->flags & TC_RED_HISTORIC_FLAGS) |
413				  q->userbits,
414		.qth_min	= q->parms.qth_min >> q->parms.Wlog,
415		.qth_max	= q->parms.qth_max >> q->parms.Wlog,
416		.Wlog		= q->parms.Wlog,
417		.Plog		= q->parms.Plog,
418		.Scell_log	= q->parms.Scell_log,
419	};
420	int err;
421
422	err = red_dump_offload_stats(sch);
423	if (err)
424		goto nla_put_failure;
425
426	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
 
427	if (opts == NULL)
428		goto nla_put_failure;
429	if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
430	    nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
431	    nla_put_bitfield32(skb, TCA_RED_FLAGS,
432			       q->flags, TC_RED_SUPPORTED_FLAGS) ||
433	    tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
434	    tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
435		goto nla_put_failure;
436	return nla_nest_end(skb, opts);
437
438nla_put_failure:
439	nla_nest_cancel(skb, opts);
440	return -EMSGSIZE;
441}
442
443static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
444{
445	struct red_sched_data *q = qdisc_priv(sch);
446	struct net_device *dev = qdisc_dev(sch);
447	struct tc_red_xstats st = {0};
448
449	if (sch->flags & TCQ_F_OFFLOADED) {
450		struct tc_red_qopt_offload hw_stats_request = {
451			.command = TC_RED_XSTATS,
452			.handle = sch->handle,
453			.parent = sch->parent,
454			{
455				.xstats = &q->stats,
456			},
457		};
458		dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
459					      &hw_stats_request);
460	}
461	st.early = q->stats.prob_drop + q->stats.forced_drop;
462	st.pdrop = q->stats.pdrop;
463	st.marked = q->stats.prob_mark + q->stats.forced_mark;
464
465	return gnet_stats_copy_app(d, &st, sizeof(st));
466}
467
468static int red_dump_class(struct Qdisc *sch, unsigned long cl,
469			  struct sk_buff *skb, struct tcmsg *tcm)
470{
471	struct red_sched_data *q = qdisc_priv(sch);
472
473	tcm->tcm_handle |= TC_H_MIN(1);
474	tcm->tcm_info = q->qdisc->handle;
475	return 0;
476}
477
478static void red_graft_offload(struct Qdisc *sch,
479			      struct Qdisc *new, struct Qdisc *old,
480			      struct netlink_ext_ack *extack)
481{
482	struct tc_red_qopt_offload graft_offload = {
483		.handle		= sch->handle,
484		.parent		= sch->parent,
485		.child_handle	= new->handle,
486		.command	= TC_RED_GRAFT,
487	};
488
489	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
490				   TC_SETUP_QDISC_RED, &graft_offload, extack);
491}
492
493static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
494		     struct Qdisc **old, struct netlink_ext_ack *extack)
495{
496	struct red_sched_data *q = qdisc_priv(sch);
497
498	if (new == NULL)
499		new = &noop_qdisc;
500
501	*old = qdisc_replace(sch, new, &q->qdisc);
502
503	red_graft_offload(sch, new, *old, extack);
504	return 0;
505}
506
507static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
508{
509	struct red_sched_data *q = qdisc_priv(sch);
510	return q->qdisc;
511}
512
513static unsigned long red_find(struct Qdisc *sch, u32 classid)
514{
515	return 1;
516}
517
 
 
 
 
518static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
519{
520	if (!walker->stop) {
521		tc_qdisc_stats_dump(sch, 1, walker);
 
 
 
 
 
522	}
523}
524
525static const struct Qdisc_class_ops red_class_ops = {
526	.graft		=	red_graft,
527	.leaf		=	red_leaf,
528	.find		=	red_find,
 
529	.walk		=	red_walk,
530	.dump		=	red_dump_class,
531};
532
533static struct Qdisc_ops red_qdisc_ops __read_mostly = {
534	.id		=	"red",
535	.priv_size	=	sizeof(struct red_sched_data),
536	.cl_ops		=	&red_class_ops,
537	.enqueue	=	red_enqueue,
538	.dequeue	=	red_dequeue,
539	.peek		=	red_peek,
 
540	.init		=	red_init,
541	.reset		=	red_reset,
542	.destroy	=	red_destroy,
543	.change		=	red_change,
544	.dump		=	red_dump,
545	.dump_stats	=	red_dump_stats,
546	.owner		=	THIS_MODULE,
547};
548
549static int __init red_module_init(void)
550{
551	return register_qdisc(&red_qdisc_ops);
552}
553
554static void __exit red_module_exit(void)
555{
556	unregister_qdisc(&red_qdisc_ops);
557}
558
559module_init(red_module_init)
560module_exit(red_module_exit)
561
562MODULE_LICENSE("GPL");