Linux Audio

Check our new training course

Loading...
  1/*
  2 * net/sched/sch_red.c	Random Early Detection queue.
  3 *
  4 *		This program is free software; you can redistribute it and/or
  5 *		modify it under the terms of the GNU General Public License
  6 *		as published by the Free Software Foundation; either version
  7 *		2 of the License, or (at your option) any later version.
  8 *
  9 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 10 *
 11 * Changes:
 12 * J Hadi Salim 980914:	computation fixes
 13 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
 14 * J Hadi Salim 980816:  ECN support
 15 */
 16
 17#include <linux/module.h>
 18#include <linux/types.h>
 19#include <linux/kernel.h>
 20#include <linux/skbuff.h>
 21#include <net/pkt_sched.h>
 22#include <net/pkt_cls.h>
 23#include <net/inet_ecn.h>
 24#include <net/red.h>
 25
 26
 27/*	Parameters, settable by user:
 28	-----------------------------
 29
 30	limit		- bytes (must be > qth_max + burst)
 31
 32	Hard limit on queue length, should be chosen >qth_max
 33	to allow packet bursts. This parameter does not
 34	affect the algorithms behaviour and can be chosen
 35	arbitrarily high (well, less than ram size)
 36	Really, this limit will never be reached
 37	if RED works correctly.
 38 */
 39
 40struct red_sched_data {
 41	u32			limit;		/* HARD maximal queue length */
 42	unsigned char		flags;
 43	struct timer_list	adapt_timer;
 44	struct Qdisc		*sch;
 45	struct red_parms	parms;
 46	struct red_vars		vars;
 47	struct red_stats	stats;
 48	struct Qdisc		*qdisc;
 49};
 50
 51static inline int red_use_ecn(struct red_sched_data *q)
 52{
 53	return q->flags & TC_RED_ECN;
 54}
 55
 56static inline int red_use_harddrop(struct red_sched_data *q)
 57{
 58	return q->flags & TC_RED_HARDDROP;
 59}
 60
 61static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 62		       struct sk_buff **to_free)
 63{
 64	struct red_sched_data *q = qdisc_priv(sch);
 65	struct Qdisc *child = q->qdisc;
 66	int ret;
 67
 68	q->vars.qavg = red_calc_qavg(&q->parms,
 69				     &q->vars,
 70				     child->qstats.backlog);
 71
 72	if (red_is_idling(&q->vars))
 73		red_end_of_idle_period(&q->vars);
 74
 75	switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
 76	case RED_DONT_MARK:
 77		break;
 78
 79	case RED_PROB_MARK:
 80		qdisc_qstats_overlimit(sch);
 81		if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
 82			q->stats.prob_drop++;
 83			goto congestion_drop;
 84		}
 85
 86		q->stats.prob_mark++;
 87		break;
 88
 89	case RED_HARD_MARK:
 90		qdisc_qstats_overlimit(sch);
 91		if (red_use_harddrop(q) || !red_use_ecn(q) ||
 92		    !INET_ECN_set_ce(skb)) {
 93			q->stats.forced_drop++;
 94			goto congestion_drop;
 95		}
 96
 97		q->stats.forced_mark++;
 98		break;
 99	}
100
101	ret = qdisc_enqueue(skb, child, to_free);
102	if (likely(ret == NET_XMIT_SUCCESS)) {
103		qdisc_qstats_backlog_inc(sch, skb);
104		sch->q.qlen++;
105	} else if (net_xmit_drop_count(ret)) {
106		q->stats.pdrop++;
107		qdisc_qstats_drop(sch);
108	}
109	return ret;
110
111congestion_drop:
112	qdisc_drop(skb, sch, to_free);
113	return NET_XMIT_CN;
114}
115
116static struct sk_buff *red_dequeue(struct Qdisc *sch)
117{
118	struct sk_buff *skb;
119	struct red_sched_data *q = qdisc_priv(sch);
120	struct Qdisc *child = q->qdisc;
121
122	skb = child->dequeue(child);
123	if (skb) {
124		qdisc_bstats_update(sch, skb);
125		qdisc_qstats_backlog_dec(sch, skb);
126		sch->q.qlen--;
127	} else {
128		if (!red_is_idling(&q->vars))
129			red_start_of_idle_period(&q->vars);
130	}
131	return skb;
132}
133
134static struct sk_buff *red_peek(struct Qdisc *sch)
135{
136	struct red_sched_data *q = qdisc_priv(sch);
137	struct Qdisc *child = q->qdisc;
138
139	return child->ops->peek(child);
140}
141
142static void red_reset(struct Qdisc *sch)
143{
144	struct red_sched_data *q = qdisc_priv(sch);
145
146	qdisc_reset(q->qdisc);
147	sch->qstats.backlog = 0;
148	sch->q.qlen = 0;
149	red_restart(&q->vars);
150}
151
152static int red_offload(struct Qdisc *sch, bool enable)
153{
154	struct red_sched_data *q = qdisc_priv(sch);
155	struct net_device *dev = qdisc_dev(sch);
156	struct tc_red_qopt_offload opt = {
157		.handle = sch->handle,
158		.parent = sch->parent,
159	};
160
161	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
162		return -EOPNOTSUPP;
163
164	if (enable) {
165		opt.command = TC_RED_REPLACE;
166		opt.set.min = q->parms.qth_min >> q->parms.Wlog;
167		opt.set.max = q->parms.qth_max >> q->parms.Wlog;
168		opt.set.probability = q->parms.max_P;
169		opt.set.is_ecn = red_use_ecn(q);
170		opt.set.qstats = &sch->qstats;
171	} else {
172		opt.command = TC_RED_DESTROY;
173	}
174
175	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
176}
177
178static void red_destroy(struct Qdisc *sch)
179{
180	struct red_sched_data *q = qdisc_priv(sch);
181
182	del_timer_sync(&q->adapt_timer);
183	red_offload(sch, false);
184	qdisc_destroy(q->qdisc);
185}
186
187static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
188	[TCA_RED_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
189	[TCA_RED_STAB]	= { .len = RED_STAB_SIZE },
190	[TCA_RED_MAX_P] = { .type = NLA_U32 },
191};
192
193static int red_change(struct Qdisc *sch, struct nlattr *opt,
194		      struct netlink_ext_ack *extack)
195{
196	struct red_sched_data *q = qdisc_priv(sch);
197	struct nlattr *tb[TCA_RED_MAX + 1];
198	struct tc_red_qopt *ctl;
199	struct Qdisc *child = NULL;
200	int err;
201	u32 max_P;
202
203	if (opt == NULL)
204		return -EINVAL;
205
206	err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy, NULL);
207	if (err < 0)
208		return err;
209
210	if (tb[TCA_RED_PARMS] == NULL ||
211	    tb[TCA_RED_STAB] == NULL)
212		return -EINVAL;
213
214	max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
215
216	ctl = nla_data(tb[TCA_RED_PARMS]);
217	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
218		return -EINVAL;
219
220	if (ctl->limit > 0) {
221		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
222					 extack);
223		if (IS_ERR(child))
224			return PTR_ERR(child);
225
226		/* child is fifo, no need to check for noop_qdisc */
227		qdisc_hash_add(child, true);
228	}
229
230	sch_tree_lock(sch);
231	q->flags = ctl->flags;
232	q->limit = ctl->limit;
233	if (child) {
234		qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen,
235					  q->qdisc->qstats.backlog);
236		qdisc_destroy(q->qdisc);
237		q->qdisc = child;
238	}
239
240	red_set_parms(&q->parms,
241		      ctl->qth_min, ctl->qth_max, ctl->Wlog,
242		      ctl->Plog, ctl->Scell_log,
243		      nla_data(tb[TCA_RED_STAB]),
244		      max_P);
245	red_set_vars(&q->vars);
246
247	del_timer(&q->adapt_timer);
248	if (ctl->flags & TC_RED_ADAPTATIVE)
249		mod_timer(&q->adapt_timer, jiffies + HZ/2);
250
251	if (!q->qdisc->q.qlen)
252		red_start_of_idle_period(&q->vars);
253
254	sch_tree_unlock(sch);
255	red_offload(sch, true);
256	return 0;
257}
258
259static inline void red_adaptative_timer(struct timer_list *t)
260{
261	struct red_sched_data *q = from_timer(q, t, adapt_timer);
262	struct Qdisc *sch = q->sch;
263	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
264
265	spin_lock(root_lock);
266	red_adaptative_algo(&q->parms, &q->vars);
267	mod_timer(&q->adapt_timer, jiffies + HZ/2);
268	spin_unlock(root_lock);
269}
270
271static int red_init(struct Qdisc *sch, struct nlattr *opt,
272		    struct netlink_ext_ack *extack)
273{
274	struct red_sched_data *q = qdisc_priv(sch);
275
276	q->qdisc = &noop_qdisc;
277	q->sch = sch;
278	timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
279	return red_change(sch, opt, extack);
280}
281
282static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt)
283{
284	struct net_device *dev = qdisc_dev(sch);
285	struct tc_red_qopt_offload hw_stats = {
286		.command = TC_RED_STATS,
287		.handle = sch->handle,
288		.parent = sch->parent,
289		{
290			.stats.bstats = &sch->bstats,
291			.stats.qstats = &sch->qstats,
292		},
293	};
294	int err;
295
296	sch->flags &= ~TCQ_F_OFFLOADED;
297
298	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
299		return 0;
300
301	err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
302					    &hw_stats);
303	if (err == -EOPNOTSUPP)
304		return 0;
305
306	if (!err)
307		sch->flags |= TCQ_F_OFFLOADED;
308
309	return err;
310}
311
312static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
313{
314	struct red_sched_data *q = qdisc_priv(sch);
315	struct nlattr *opts = NULL;
316	struct tc_red_qopt opt = {
317		.limit		= q->limit,
318		.flags		= q->flags,
319		.qth_min	= q->parms.qth_min >> q->parms.Wlog,
320		.qth_max	= q->parms.qth_max >> q->parms.Wlog,
321		.Wlog		= q->parms.Wlog,
322		.Plog		= q->parms.Plog,
323		.Scell_log	= q->parms.Scell_log,
324	};
325	int err;
326
327	err = red_dump_offload_stats(sch, &opt);
328	if (err)
329		goto nla_put_failure;
330
331	opts = nla_nest_start(skb, TCA_OPTIONS);
332	if (opts == NULL)
333		goto nla_put_failure;
334	if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
335	    nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
336		goto nla_put_failure;
337	return nla_nest_end(skb, opts);
338
339nla_put_failure:
340	nla_nest_cancel(skb, opts);
341	return -EMSGSIZE;
342}
343
344static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
345{
346	struct red_sched_data *q = qdisc_priv(sch);
347	struct net_device *dev = qdisc_dev(sch);
348	struct tc_red_xstats st = {0};
349
350	if (sch->flags & TCQ_F_OFFLOADED) {
351		struct tc_red_qopt_offload hw_stats_request = {
352			.command = TC_RED_XSTATS,
353			.handle = sch->handle,
354			.parent = sch->parent,
355			{
356				.xstats = &q->stats,
357			},
358		};
359		dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
360					      &hw_stats_request);
361	}
362	st.early = q->stats.prob_drop + q->stats.forced_drop;
363	st.pdrop = q->stats.pdrop;
364	st.other = q->stats.other;
365	st.marked = q->stats.prob_mark + q->stats.forced_mark;
366
367	return gnet_stats_copy_app(d, &st, sizeof(st));
368}
369
370static int red_dump_class(struct Qdisc *sch, unsigned long cl,
371			  struct sk_buff *skb, struct tcmsg *tcm)
372{
373	struct red_sched_data *q = qdisc_priv(sch);
374
375	tcm->tcm_handle |= TC_H_MIN(1);
376	tcm->tcm_info = q->qdisc->handle;
377	return 0;
378}
379
380static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
381		     struct Qdisc **old, struct netlink_ext_ack *extack)
382{
383	struct red_sched_data *q = qdisc_priv(sch);
384
385	if (new == NULL)
386		new = &noop_qdisc;
387
388	*old = qdisc_replace(sch, new, &q->qdisc);
389	return 0;
390}
391
392static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
393{
394	struct red_sched_data *q = qdisc_priv(sch);
395	return q->qdisc;
396}
397
398static unsigned long red_find(struct Qdisc *sch, u32 classid)
399{
400	return 1;
401}
402
403static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
404{
405	if (!walker->stop) {
406		if (walker->count >= walker->skip)
407			if (walker->fn(sch, 1, walker) < 0) {
408				walker->stop = 1;
409				return;
410			}
411		walker->count++;
412	}
413}
414
415static const struct Qdisc_class_ops red_class_ops = {
416	.graft		=	red_graft,
417	.leaf		=	red_leaf,
418	.find		=	red_find,
419	.walk		=	red_walk,
420	.dump		=	red_dump_class,
421};
422
423static struct Qdisc_ops red_qdisc_ops __read_mostly = {
424	.id		=	"red",
425	.priv_size	=	sizeof(struct red_sched_data),
426	.cl_ops		=	&red_class_ops,
427	.enqueue	=	red_enqueue,
428	.dequeue	=	red_dequeue,
429	.peek		=	red_peek,
430	.init		=	red_init,
431	.reset		=	red_reset,
432	.destroy	=	red_destroy,
433	.change		=	red_change,
434	.dump		=	red_dump,
435	.dump_stats	=	red_dump_stats,
436	.owner		=	THIS_MODULE,
437};
438
439static int __init red_module_init(void)
440{
441	return register_qdisc(&red_qdisc_ops);
442}
443
444static void __exit red_module_exit(void)
445{
446	unregister_qdisc(&red_qdisc_ops);
447}
448
449module_init(red_module_init)
450module_exit(red_module_exit)
451
452MODULE_LICENSE("GPL");
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * net/sched/sch_red.c	Random Early Detection queue.
  4 *
  5 * Authors:	Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6 *
  7 * Changes:
  8 * J Hadi Salim 980914:	computation fixes
  9 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
 10 * J Hadi Salim 980816:  ECN support
 11 */
 12
 13#include <linux/module.h>
 14#include <linux/types.h>
 15#include <linux/kernel.h>
 16#include <linux/skbuff.h>
 17#include <net/pkt_sched.h>
 18#include <net/pkt_cls.h>
 19#include <net/inet_ecn.h>
 20#include <net/red.h>
 21
 22
 23/*	Parameters, settable by user:
 24	-----------------------------
 25
 26	limit		- bytes (must be > qth_max + burst)
 27
 28	Hard limit on queue length, should be chosen >qth_max
 29	to allow packet bursts. This parameter does not
 30	affect the algorithms behaviour and can be chosen
 31	arbitrarily high (well, less than ram size)
 32	Really, this limit will never be reached
 33	if RED works correctly.
 34 */
 35
 36struct red_sched_data {
 37	u32			limit;		/* HARD maximal queue length */
 38	unsigned char		flags;
 39	struct timer_list	adapt_timer;
 40	struct Qdisc		*sch;
 41	struct red_parms	parms;
 42	struct red_vars		vars;
 43	struct red_stats	stats;
 44	struct Qdisc		*qdisc;
 45};
 46
 47static inline int red_use_ecn(struct red_sched_data *q)
 48{
 49	return q->flags & TC_RED_ECN;
 50}
 51
 52static inline int red_use_harddrop(struct red_sched_data *q)
 53{
 54	return q->flags & TC_RED_HARDDROP;
 55}
 56
 57static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 58		       struct sk_buff **to_free)
 59{
 60	struct red_sched_data *q = qdisc_priv(sch);
 61	struct Qdisc *child = q->qdisc;
 62	int ret;
 63
 64	q->vars.qavg = red_calc_qavg(&q->parms,
 65				     &q->vars,
 66				     child->qstats.backlog);
 67
 68	if (red_is_idling(&q->vars))
 69		red_end_of_idle_period(&q->vars);
 70
 71	switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
 72	case RED_DONT_MARK:
 73		break;
 74
 75	case RED_PROB_MARK:
 76		qdisc_qstats_overlimit(sch);
 77		if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
 78			q->stats.prob_drop++;
 79			goto congestion_drop;
 80		}
 81
 82		q->stats.prob_mark++;
 83		break;
 84
 85	case RED_HARD_MARK:
 86		qdisc_qstats_overlimit(sch);
 87		if (red_use_harddrop(q) || !red_use_ecn(q) ||
 88		    !INET_ECN_set_ce(skb)) {
 89			q->stats.forced_drop++;
 90			goto congestion_drop;
 91		}
 92
 93		q->stats.forced_mark++;
 94		break;
 95	}
 96
 97	ret = qdisc_enqueue(skb, child, to_free);
 98	if (likely(ret == NET_XMIT_SUCCESS)) {
 99		qdisc_qstats_backlog_inc(sch, skb);
100		sch->q.qlen++;
101	} else if (net_xmit_drop_count(ret)) {
102		q->stats.pdrop++;
103		qdisc_qstats_drop(sch);
104	}
105	return ret;
106
107congestion_drop:
108	qdisc_drop(skb, sch, to_free);
109	return NET_XMIT_CN;
110}
111
112static struct sk_buff *red_dequeue(struct Qdisc *sch)
113{
114	struct sk_buff *skb;
115	struct red_sched_data *q = qdisc_priv(sch);
116	struct Qdisc *child = q->qdisc;
117
118	skb = child->dequeue(child);
119	if (skb) {
120		qdisc_bstats_update(sch, skb);
121		qdisc_qstats_backlog_dec(sch, skb);
122		sch->q.qlen--;
123	} else {
124		if (!red_is_idling(&q->vars))
125			red_start_of_idle_period(&q->vars);
126	}
127	return skb;
128}
129
130static struct sk_buff *red_peek(struct Qdisc *sch)
131{
132	struct red_sched_data *q = qdisc_priv(sch);
133	struct Qdisc *child = q->qdisc;
134
135	return child->ops->peek(child);
136}
137
138static void red_reset(struct Qdisc *sch)
139{
140	struct red_sched_data *q = qdisc_priv(sch);
141
142	qdisc_reset(q->qdisc);
143	sch->qstats.backlog = 0;
144	sch->q.qlen = 0;
145	red_restart(&q->vars);
146}
147
148static int red_offload(struct Qdisc *sch, bool enable)
149{
150	struct red_sched_data *q = qdisc_priv(sch);
151	struct net_device *dev = qdisc_dev(sch);
152	struct tc_red_qopt_offload opt = {
153		.handle = sch->handle,
154		.parent = sch->parent,
155	};
156
157	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
158		return -EOPNOTSUPP;
159
160	if (enable) {
161		opt.command = TC_RED_REPLACE;
162		opt.set.min = q->parms.qth_min >> q->parms.Wlog;
163		opt.set.max = q->parms.qth_max >> q->parms.Wlog;
164		opt.set.probability = q->parms.max_P;
165		opt.set.limit = q->limit;
166		opt.set.is_ecn = red_use_ecn(q);
167		opt.set.is_harddrop = red_use_harddrop(q);
168		opt.set.qstats = &sch->qstats;
169	} else {
170		opt.command = TC_RED_DESTROY;
171	}
172
173	return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
174}
175
176static void red_destroy(struct Qdisc *sch)
177{
178	struct red_sched_data *q = qdisc_priv(sch);
179
180	del_timer_sync(&q->adapt_timer);
181	red_offload(sch, false);
182	qdisc_put(q->qdisc);
183}
184
185static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
186	[TCA_RED_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
187	[TCA_RED_STAB]	= { .len = RED_STAB_SIZE },
188	[TCA_RED_MAX_P] = { .type = NLA_U32 },
189};
190
191static int red_change(struct Qdisc *sch, struct nlattr *opt,
192		      struct netlink_ext_ack *extack)
193{
194	struct Qdisc *old_child = NULL, *child = NULL;
195	struct red_sched_data *q = qdisc_priv(sch);
196	struct nlattr *tb[TCA_RED_MAX + 1];
197	struct tc_red_qopt *ctl;
198	int err;
199	u32 max_P;
200
201	if (opt == NULL)
202		return -EINVAL;
203
204	err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
205					  NULL);
206	if (err < 0)
207		return err;
208
209	if (tb[TCA_RED_PARMS] == NULL ||
210	    tb[TCA_RED_STAB] == NULL)
211		return -EINVAL;
212
213	max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
214
215	ctl = nla_data(tb[TCA_RED_PARMS]);
216	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
217		return -EINVAL;
218
219	if (ctl->limit > 0) {
220		child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
221					 extack);
222		if (IS_ERR(child))
223			return PTR_ERR(child);
224
225		/* child is fifo, no need to check for noop_qdisc */
226		qdisc_hash_add(child, true);
227	}
228
229	sch_tree_lock(sch);
230	q->flags = ctl->flags;
231	q->limit = ctl->limit;
232	if (child) {
233		qdisc_tree_flush_backlog(q->qdisc);
234		old_child = q->qdisc;
235		q->qdisc = child;
236	}
237
238	red_set_parms(&q->parms,
239		      ctl->qth_min, ctl->qth_max, ctl->Wlog,
240		      ctl->Plog, ctl->Scell_log,
241		      nla_data(tb[TCA_RED_STAB]),
242		      max_P);
243	red_set_vars(&q->vars);
244
245	del_timer(&q->adapt_timer);
246	if (ctl->flags & TC_RED_ADAPTATIVE)
247		mod_timer(&q->adapt_timer, jiffies + HZ/2);
248
249	if (!q->qdisc->q.qlen)
250		red_start_of_idle_period(&q->vars);
251
252	sch_tree_unlock(sch);
253
254	red_offload(sch, true);
255
256	if (old_child)
257		qdisc_put(old_child);
258	return 0;
259}
260
261static inline void red_adaptative_timer(struct timer_list *t)
262{
263	struct red_sched_data *q = from_timer(q, t, adapt_timer);
264	struct Qdisc *sch = q->sch;
265	spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
266
267	spin_lock(root_lock);
268	red_adaptative_algo(&q->parms, &q->vars);
269	mod_timer(&q->adapt_timer, jiffies + HZ/2);
270	spin_unlock(root_lock);
271}
272
273static int red_init(struct Qdisc *sch, struct nlattr *opt,
274		    struct netlink_ext_ack *extack)
275{
276	struct red_sched_data *q = qdisc_priv(sch);
277
278	q->qdisc = &noop_qdisc;
279	q->sch = sch;
280	timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
281	return red_change(sch, opt, extack);
282}
283
284static int red_dump_offload_stats(struct Qdisc *sch)
285{
286	struct tc_red_qopt_offload hw_stats = {
287		.command = TC_RED_STATS,
288		.handle = sch->handle,
289		.parent = sch->parent,
290		{
291			.stats.bstats = &sch->bstats,
292			.stats.qstats = &sch->qstats,
293		},
294	};
295
296	return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
297}
298
299static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
300{
301	struct red_sched_data *q = qdisc_priv(sch);
302	struct nlattr *opts = NULL;
303	struct tc_red_qopt opt = {
304		.limit		= q->limit,
305		.flags		= q->flags,
306		.qth_min	= q->parms.qth_min >> q->parms.Wlog,
307		.qth_max	= q->parms.qth_max >> q->parms.Wlog,
308		.Wlog		= q->parms.Wlog,
309		.Plog		= q->parms.Plog,
310		.Scell_log	= q->parms.Scell_log,
311	};
312	int err;
313
314	err = red_dump_offload_stats(sch);
315	if (err)
316		goto nla_put_failure;
317
318	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
319	if (opts == NULL)
320		goto nla_put_failure;
321	if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
322	    nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P))
323		goto nla_put_failure;
324	return nla_nest_end(skb, opts);
325
326nla_put_failure:
327	nla_nest_cancel(skb, opts);
328	return -EMSGSIZE;
329}
330
331static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
332{
333	struct red_sched_data *q = qdisc_priv(sch);
334	struct net_device *dev = qdisc_dev(sch);
335	struct tc_red_xstats st = {0};
336
337	if (sch->flags & TCQ_F_OFFLOADED) {
338		struct tc_red_qopt_offload hw_stats_request = {
339			.command = TC_RED_XSTATS,
340			.handle = sch->handle,
341			.parent = sch->parent,
342			{
343				.xstats = &q->stats,
344			},
345		};
346		dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
347					      &hw_stats_request);
348	}
349	st.early = q->stats.prob_drop + q->stats.forced_drop;
350	st.pdrop = q->stats.pdrop;
351	st.other = q->stats.other;
352	st.marked = q->stats.prob_mark + q->stats.forced_mark;
353
354	return gnet_stats_copy_app(d, &st, sizeof(st));
355}
356
357static int red_dump_class(struct Qdisc *sch, unsigned long cl,
358			  struct sk_buff *skb, struct tcmsg *tcm)
359{
360	struct red_sched_data *q = qdisc_priv(sch);
361
362	tcm->tcm_handle |= TC_H_MIN(1);
363	tcm->tcm_info = q->qdisc->handle;
364	return 0;
365}
366
367static void red_graft_offload(struct Qdisc *sch,
368			      struct Qdisc *new, struct Qdisc *old,
369			      struct netlink_ext_ack *extack)
370{
371	struct tc_red_qopt_offload graft_offload = {
372		.handle		= sch->handle,
373		.parent		= sch->parent,
374		.child_handle	= new->handle,
375		.command	= TC_RED_GRAFT,
376	};
377
378	qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
379				   TC_SETUP_QDISC_RED, &graft_offload, extack);
380}
381
382static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
383		     struct Qdisc **old, struct netlink_ext_ack *extack)
384{
385	struct red_sched_data *q = qdisc_priv(sch);
386
387	if (new == NULL)
388		new = &noop_qdisc;
389
390	*old = qdisc_replace(sch, new, &q->qdisc);
391
392	red_graft_offload(sch, new, *old, extack);
393	return 0;
394}
395
396static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
397{
398	struct red_sched_data *q = qdisc_priv(sch);
399	return q->qdisc;
400}
401
402static unsigned long red_find(struct Qdisc *sch, u32 classid)
403{
404	return 1;
405}
406
407static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
408{
409	if (!walker->stop) {
410		if (walker->count >= walker->skip)
411			if (walker->fn(sch, 1, walker) < 0) {
412				walker->stop = 1;
413				return;
414			}
415		walker->count++;
416	}
417}
418
419static const struct Qdisc_class_ops red_class_ops = {
420	.graft		=	red_graft,
421	.leaf		=	red_leaf,
422	.find		=	red_find,
423	.walk		=	red_walk,
424	.dump		=	red_dump_class,
425};
426
427static struct Qdisc_ops red_qdisc_ops __read_mostly = {
428	.id		=	"red",
429	.priv_size	=	sizeof(struct red_sched_data),
430	.cl_ops		=	&red_class_ops,
431	.enqueue	=	red_enqueue,
432	.dequeue	=	red_dequeue,
433	.peek		=	red_peek,
434	.init		=	red_init,
435	.reset		=	red_reset,
436	.destroy	=	red_destroy,
437	.change		=	red_change,
438	.dump		=	red_dump,
439	.dump_stats	=	red_dump_stats,
440	.owner		=	THIS_MODULE,
441};
442
443static int __init red_module_init(void)
444{
445	return register_qdisc(&red_qdisc_ops);
446}
447
448static void __exit red_module_exit(void)
449{
450	unregister_qdisc(&red_qdisc_ops);
451}
452
453module_init(red_module_init)
454module_exit(red_module_exit)
455
456MODULE_LICENSE("GPL");