Linux Audio

Check our new training course

Loading...
v3.5.6
 
  1/*
  2 * net/sched/sch_choke.c	CHOKE scheduler
  3 *
  4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
  5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
  6 *
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * version 2 as published by the Free Software Foundation.
 10 *
 11 */
 12
 13#include <linux/module.h>
 14#include <linux/types.h>
 15#include <linux/kernel.h>
 16#include <linux/skbuff.h>
 17#include <linux/reciprocal_div.h>
 18#include <linux/vmalloc.h>
 19#include <net/pkt_sched.h>
 
 20#include <net/inet_ecn.h>
 21#include <net/red.h>
 22#include <net/flow_keys.h>
 23
 24/*
 25   CHOKe stateless AQM for fair bandwidth allocation
 26   =================================================
 27
 28   CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
 29   unresponsive flows) is a variant of RED that penalizes misbehaving flows but
 30   maintains no flow state. The difference from RED is an additional step
 31   during the enqueuing process. If average queue size is over the
 32   low threshold (qmin), a packet is chosen at random from the queue.
 33   If both the new and chosen packet are from the same flow, both
 34   are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
 35   needs to access packets in queue randomly. It has a minimal class
 36   interface to allow overriding the builtin flow classifier with
 37   filters.
 38
 39   Source:
 40   R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
 41   Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
 42   IEEE INFOCOM, 2000.
 43
 44   A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
 45   Characteristics", IEEE/ACM Transactions on Networking, 2004
 46
 47 */
 48
 49/* Upper bound on size of sk_buff table (packets) */
 50#define CHOKE_MAX_QUEUE	(128*1024 - 1)
 51
 52struct choke_sched_data {
 53/* Parameters */
 54	u32		 limit;
 55	unsigned char	 flags;
 56
 57	struct red_parms parms;
 58
 59/* Variables */
 60	struct red_vars  vars;
 61	struct tcf_proto *filter_list;
 62	struct {
 63		u32	prob_drop;	/* Early probability drops */
 64		u32	prob_mark;	/* Early probability marks */
 65		u32	forced_drop;	/* Forced drops, qavg > max_thresh */
 66		u32	forced_mark;	/* Forced marks, qavg > max_thresh */
 67		u32	pdrop;          /* Drops due to queue limits */
 68		u32	other;          /* Drops due to drop() calls */
 69		u32	matched;	/* Drops to flow match */
 70	} stats;
 71
 72	unsigned int	 head;
 73	unsigned int	 tail;
 74
 75	unsigned int	 tab_mask; /* size - 1 */
 76
 77	struct sk_buff **tab;
 78};
 79
 80/* deliver a random number between 0 and N - 1 */
 81static u32 random_N(unsigned int N)
 82{
 83	return reciprocal_divide(random32(), N);
 84}
 85
 86/* number of elements in queue including holes */
 87static unsigned int choke_len(const struct choke_sched_data *q)
 88{
 89	return (q->tail - q->head) & q->tab_mask;
 90}
 91
 92/* Is ECN parameter configured */
 93static int use_ecn(const struct choke_sched_data *q)
 94{
 95	return q->flags & TC_RED_ECN;
 96}
 97
 98/* Should packets over max just be dropped (versus marked) */
 99static int use_harddrop(const struct choke_sched_data *q)
100{
101	return q->flags & TC_RED_HARDDROP;
102}
103
104/* Move head pointer forward to skip over holes */
105static void choke_zap_head_holes(struct choke_sched_data *q)
106{
107	do {
108		q->head = (q->head + 1) & q->tab_mask;
109		if (q->head == q->tail)
110			break;
111	} while (q->tab[q->head] == NULL);
112}
113
114/* Move tail pointer backwards to reuse holes */
115static void choke_zap_tail_holes(struct choke_sched_data *q)
116{
117	do {
118		q->tail = (q->tail - 1) & q->tab_mask;
119		if (q->head == q->tail)
120			break;
121	} while (q->tab[q->tail] == NULL);
122}
123
124/* Drop packet from queue array by creating a "hole" */
125static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
 
126{
127	struct choke_sched_data *q = qdisc_priv(sch);
128	struct sk_buff *skb = q->tab[idx];
129
130	q->tab[idx] = NULL;
131
132	if (idx == q->head)
133		choke_zap_head_holes(q);
134	if (idx == q->tail)
135		choke_zap_tail_holes(q);
136
137	sch->qstats.backlog -= qdisc_pkt_len(skb);
138	qdisc_drop(skb, sch);
139	qdisc_tree_decrease_qlen(sch, 1);
140	--sch->q.qlen;
 
 
 
141}
142
143struct choke_skb_cb {
144	u16			classid;
145	u8			keys_valid;
146	struct flow_keys	keys;
147};
148
149static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
150{
151	qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
152	return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
153}
154
155static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
156{
157	choke_skb_cb(skb)->classid = classid;
158}
159
160static u16 choke_get_classid(const struct sk_buff *skb)
161{
162	return choke_skb_cb(skb)->classid;
163}
164
165/*
166 * Compare flow of two packets
167 *  Returns true only if source and destination address and port match.
168 *          false for special cases
169 */
170static bool choke_match_flow(struct sk_buff *skb1,
171			     struct sk_buff *skb2)
172{
 
 
173	if (skb1->protocol != skb2->protocol)
174		return false;
175
176	if (!choke_skb_cb(skb1)->keys_valid) {
177		choke_skb_cb(skb1)->keys_valid = 1;
178		skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
 
179	}
180
181	if (!choke_skb_cb(skb2)->keys_valid) {
182		choke_skb_cb(skb2)->keys_valid = 1;
183		skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
 
184	}
185
186	return !memcmp(&choke_skb_cb(skb1)->keys,
187		       &choke_skb_cb(skb2)->keys,
188		       sizeof(struct flow_keys));
189}
190
191/*
192 * Classify flow using either:
193 *  1. pre-existing classification result in skb
194 *  2. fast internal classification
195 *  3. use TC filter based classification
196 */
197static bool choke_classify(struct sk_buff *skb,
198			   struct Qdisc *sch, int *qerr)
199
200{
201	struct choke_sched_data *q = qdisc_priv(sch);
202	struct tcf_result res;
203	int result;
204
205	result = tc_classify(skb, q->filter_list, &res);
206	if (result >= 0) {
207#ifdef CONFIG_NET_CLS_ACT
208		switch (result) {
209		case TC_ACT_STOLEN:
210		case TC_ACT_QUEUED:
211			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
212		case TC_ACT_SHOT:
213			return false;
214		}
215#endif
216		choke_set_classid(skb, TC_H_MIN(res.classid));
217		return true;
218	}
219
220	return false;
221}
222
223/*
224 * Select a packet at random from queue
225 * HACK: since queue can have holes from previous deletion; retry several
226 *   times to find a random skb but then just give up and return the head
227 * Will return NULL if queue is empty (q->head == q->tail)
228 */
229static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
230					 unsigned int *pidx)
231{
232	struct sk_buff *skb;
233	int retrys = 3;
234
235	do {
236		*pidx = (q->head + random_N(choke_len(q))) & q->tab_mask;
237		skb = q->tab[*pidx];
238		if (skb)
239			return skb;
240	} while (--retrys > 0);
241
242	return q->tab[*pidx = q->head];
243}
244
245/*
246 * Compare new packet with random packet in queue
247 * returns true if matched and sets *pidx
248 */
249static bool choke_match_random(const struct choke_sched_data *q,
250			       struct sk_buff *nskb,
251			       unsigned int *pidx)
252{
253	struct sk_buff *oskb;
254
255	if (q->head == q->tail)
256		return false;
257
258	oskb = choke_peek_random(q, pidx);
259	if (q->filter_list)
260		return choke_get_classid(nskb) == choke_get_classid(oskb);
261
262	return choke_match_flow(oskb, nskb);
263}
264
265static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
266{
267	struct choke_sched_data *q = qdisc_priv(sch);
268	const struct red_parms *p = &q->parms;
269	int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
270
271	if (q->filter_list) {
272		/* If using external classifiers, get result and record it. */
273		if (!choke_classify(skb, sch, &ret))
274			goto other_drop;	/* Packet was eaten by filter */
275	}
276
277	choke_skb_cb(skb)->keys_valid = 0;
278	/* Compute average queue usage (see RED) */
279	q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
280	if (red_is_idling(&q->vars))
281		red_end_of_idle_period(&q->vars);
282
283	/* Is queue small? */
284	if (q->vars.qavg <= p->qth_min)
285		q->vars.qcount = -1;
286	else {
287		unsigned int idx;
288
289		/* Draw a packet at random from queue and compare flow */
290		if (choke_match_random(q, skb, &idx)) {
291			q->stats.matched++;
292			choke_drop_by_idx(sch, idx);
293			goto congestion_drop;
294		}
295
296		/* Queue is large, always mark/drop */
297		if (q->vars.qavg > p->qth_max) {
298			q->vars.qcount = -1;
299
300			sch->qstats.overlimits++;
301			if (use_harddrop(q) || !use_ecn(q) ||
302			    !INET_ECN_set_ce(skb)) {
303				q->stats.forced_drop++;
304				goto congestion_drop;
305			}
306
307			q->stats.forced_mark++;
308		} else if (++q->vars.qcount) {
309			if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
310				q->vars.qcount = 0;
311				q->vars.qR = red_random(p);
312
313				sch->qstats.overlimits++;
314				if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
315					q->stats.prob_drop++;
316					goto congestion_drop;
317				}
318
319				q->stats.prob_mark++;
320			}
321		} else
322			q->vars.qR = red_random(p);
323	}
324
325	/* Admit new packet */
326	if (sch->q.qlen < q->limit) {
327		q->tab[q->tail] = skb;
328		q->tail = (q->tail + 1) & q->tab_mask;
329		++sch->q.qlen;
330		sch->qstats.backlog += qdisc_pkt_len(skb);
331		return NET_XMIT_SUCCESS;
332	}
333
334	q->stats.pdrop++;
335	return qdisc_drop(skb, sch);
336
337congestion_drop:
338	qdisc_drop(skb, sch);
339	return NET_XMIT_CN;
340
341other_drop:
342	if (ret & __NET_XMIT_BYPASS)
343		sch->qstats.drops++;
344	kfree_skb(skb);
345	return ret;
346}
347
348static struct sk_buff *choke_dequeue(struct Qdisc *sch)
349{
350	struct choke_sched_data *q = qdisc_priv(sch);
351	struct sk_buff *skb;
352
353	if (q->head == q->tail) {
354		if (!red_is_idling(&q->vars))
355			red_start_of_idle_period(&q->vars);
356		return NULL;
357	}
358
359	skb = q->tab[q->head];
360	q->tab[q->head] = NULL;
361	choke_zap_head_holes(q);
362	--sch->q.qlen;
363	sch->qstats.backlog -= qdisc_pkt_len(skb);
364	qdisc_bstats_update(sch, skb);
365
366	return skb;
367}
368
369static unsigned int choke_drop(struct Qdisc *sch)
370{
371	struct choke_sched_data *q = qdisc_priv(sch);
372	unsigned int len;
373
374	len = qdisc_queue_drop(sch);
375	if (len > 0)
376		q->stats.other++;
377	else {
378		if (!red_is_idling(&q->vars))
379			red_start_of_idle_period(&q->vars);
380	}
381
382	return len;
383}
384
385static void choke_reset(struct Qdisc *sch)
386{
387	struct choke_sched_data *q = qdisc_priv(sch);
388
 
 
 
389	red_restart(&q->vars);
390}
391
392static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
393	[TCA_CHOKE_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
394	[TCA_CHOKE_STAB]	= { .len = RED_STAB_SIZE },
395	[TCA_CHOKE_MAX_P]	= { .type = NLA_U32 },
396};
397
398
399static void choke_free(void *addr)
400{
401	if (addr) {
402		if (is_vmalloc_addr(addr))
403			vfree(addr);
404		else
405			kfree(addr);
406	}
407}
408
409static int choke_change(struct Qdisc *sch, struct nlattr *opt)
 
410{
411	struct choke_sched_data *q = qdisc_priv(sch);
412	struct nlattr *tb[TCA_CHOKE_MAX + 1];
413	const struct tc_red_qopt *ctl;
414	int err;
415	struct sk_buff **old = NULL;
416	unsigned int mask;
417	u32 max_P;
 
418
419	if (opt == NULL)
420		return -EINVAL;
421
422	err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
 
423	if (err < 0)
424		return err;
425
426	if (tb[TCA_CHOKE_PARMS] == NULL ||
427	    tb[TCA_CHOKE_STAB] == NULL)
428		return -EINVAL;
429
430	max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
431
432	ctl = nla_data(tb[TCA_CHOKE_PARMS]);
 
 
 
433
434	if (ctl->limit > CHOKE_MAX_QUEUE)
435		return -EINVAL;
436
437	mask = roundup_pow_of_two(ctl->limit + 1) - 1;
438	if (mask != q->tab_mask) {
439		struct sk_buff **ntab;
440
441		ntab = kcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
442		if (!ntab)
443			ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
444		if (!ntab)
445			return -ENOMEM;
446
447		sch_tree_lock(sch);
448		old = q->tab;
449		if (old) {
450			unsigned int oqlen = sch->q.qlen, tail = 0;
 
451
452			while (q->head != q->tail) {
453				struct sk_buff *skb = q->tab[q->head];
454
455				q->head = (q->head + 1) & q->tab_mask;
456				if (!skb)
457					continue;
458				if (tail < mask) {
459					ntab[tail++] = skb;
460					continue;
461				}
462				sch->qstats.backlog -= qdisc_pkt_len(skb);
 
463				--sch->q.qlen;
464				qdisc_drop(skb, sch);
465			}
466			qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
467			q->head = 0;
468			q->tail = tail;
469		}
470
471		q->tab_mask = mask;
472		q->tab = ntab;
473	} else
474		sch_tree_lock(sch);
475
476	q->flags = ctl->flags;
477	q->limit = ctl->limit;
478
479	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
480		      ctl->Plog, ctl->Scell_log,
481		      nla_data(tb[TCA_CHOKE_STAB]),
482		      max_P);
483	red_set_vars(&q->vars);
484
485	if (q->head == q->tail)
486		red_end_of_idle_period(&q->vars);
487
488	sch_tree_unlock(sch);
489	choke_free(old);
490	return 0;
491}
492
493static int choke_init(struct Qdisc *sch, struct nlattr *opt)
 
494{
495	return choke_change(sch, opt);
496}
497
498static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
499{
500	struct choke_sched_data *q = qdisc_priv(sch);
 
501	struct nlattr *opts = NULL;
502	struct tc_red_qopt opt = {
503		.limit		= q->limit,
504		.flags		= q->flags,
505		.qth_min	= q->parms.qth_min >> q->parms.Wlog,
506		.qth_max	= q->parms.qth_max >> q->parms.Wlog,
507		.Wlog		= q->parms.Wlog,
508		.Plog		= q->parms.Plog,
509		.Scell_log	= q->parms.Scell_log,
510	};
511
512	opts = nla_nest_start(skb, TCA_OPTIONS);
513	if (opts == NULL)
514		goto nla_put_failure;
515
516	if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
517	    nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
518		goto nla_put_failure;
519	return nla_nest_end(skb, opts);
520
521nla_put_failure:
522	nla_nest_cancel(skb, opts);
523	return -EMSGSIZE;
524}
525
526static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
527{
528	struct choke_sched_data *q = qdisc_priv(sch);
529	struct tc_choke_xstats st = {
530		.early	= q->stats.prob_drop + q->stats.forced_drop,
531		.marked	= q->stats.prob_mark + q->stats.forced_mark,
532		.pdrop	= q->stats.pdrop,
533		.other	= q->stats.other,
534		.matched = q->stats.matched,
535	};
536
537	return gnet_stats_copy_app(d, &st, sizeof(st));
538}
539
540static void choke_destroy(struct Qdisc *sch)
541{
542	struct choke_sched_data *q = qdisc_priv(sch);
543
544	tcf_destroy_chain(&q->filter_list);
545	choke_free(q->tab);
546}
547
548static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
549{
550	return NULL;
551}
552
553static unsigned long choke_get(struct Qdisc *sch, u32 classid)
554{
555	return 0;
556}
557
558static void choke_put(struct Qdisc *q, unsigned long cl)
559{
560}
561
562static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
563				u32 classid)
564{
565	return 0;
566}
567
568static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
569{
570	struct choke_sched_data *q = qdisc_priv(sch);
571
572	if (cl)
573		return NULL;
574	return &q->filter_list;
575}
576
577static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
578			  struct sk_buff *skb, struct tcmsg *tcm)
579{
580	tcm->tcm_handle |= TC_H_MIN(cl);
581	return 0;
582}
583
584static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
585{
586	if (!arg->stop) {
587		if (arg->fn(sch, 1, arg) < 0) {
588			arg->stop = 1;
589			return;
590		}
591		arg->count++;
592	}
593}
594
595static const struct Qdisc_class_ops choke_class_ops = {
596	.leaf		=	choke_leaf,
597	.get		=	choke_get,
598	.put		=	choke_put,
599	.tcf_chain	=	choke_find_tcf,
600	.bind_tcf	=	choke_bind,
601	.unbind_tcf	=	choke_put,
602	.dump		=	choke_dump_class,
603	.walk		=	choke_walk,
604};
605
606static struct sk_buff *choke_peek_head(struct Qdisc *sch)
607{
608	struct choke_sched_data *q = qdisc_priv(sch);
609
610	return (q->head != q->tail) ? q->tab[q->head] : NULL;
611}
612
613static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
614	.id		=	"choke",
615	.priv_size	=	sizeof(struct choke_sched_data),
616
617	.enqueue	=	choke_enqueue,
618	.dequeue	=	choke_dequeue,
619	.peek		=	choke_peek_head,
620	.drop		=	choke_drop,
621	.init		=	choke_init,
622	.destroy	=	choke_destroy,
623	.reset		=	choke_reset,
624	.change		=	choke_change,
625	.dump		=	choke_dump,
626	.dump_stats	=	choke_dump_stats,
627	.owner		=	THIS_MODULE,
628};
 
629
630static int __init choke_module_init(void)
631{
632	return register_qdisc(&choke_qdisc_ops);
633}
634
635static void __exit choke_module_exit(void)
636{
637	unregister_qdisc(&choke_qdisc_ops);
638}
639
640module_init(choke_module_init)
641module_exit(choke_module_exit)
642
643MODULE_LICENSE("GPL");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * net/sched/sch_choke.c	CHOKE scheduler
  4 *
  5 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
  6 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
 
 
 
 
 
  7 */
  8
  9#include <linux/module.h>
 10#include <linux/types.h>
 11#include <linux/kernel.h>
 12#include <linux/skbuff.h>
 
 13#include <linux/vmalloc.h>
 14#include <net/pkt_sched.h>
 15#include <net/pkt_cls.h>
 16#include <net/inet_ecn.h>
 17#include <net/red.h>
 18#include <net/flow_dissector.h>
 19
 20/*
 21   CHOKe stateless AQM for fair bandwidth allocation
 22   =================================================
 23
 24   CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
 25   unresponsive flows) is a variant of RED that penalizes misbehaving flows but
 26   maintains no flow state. The difference from RED is an additional step
 27   during the enqueuing process. If average queue size is over the
 28   low threshold (qmin), a packet is chosen at random from the queue.
 29   If both the new and chosen packet are from the same flow, both
 30   are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
 31   needs to access packets in queue randomly. It has a minimal class
 32   interface to allow overriding the builtin flow classifier with
 33   filters.
 34
 35   Source:
 36   R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
 37   Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
 38   IEEE INFOCOM, 2000.
 39
 40   A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
 41   Characteristics", IEEE/ACM Transactions on Networking, 2004
 42
 43 */
 44
 45/* Upper bound on size of sk_buff table (packets) */
 46#define CHOKE_MAX_QUEUE	(128*1024 - 1)
 47
 48struct choke_sched_data {
 49/* Parameters */
 50	u32		 limit;
 51	unsigned char	 flags;
 52
 53	struct red_parms parms;
 54
 55/* Variables */
 56	struct red_vars  vars;
 
 57	struct {
 58		u32	prob_drop;	/* Early probability drops */
 59		u32	prob_mark;	/* Early probability marks */
 60		u32	forced_drop;	/* Forced drops, qavg > max_thresh */
 61		u32	forced_mark;	/* Forced marks, qavg > max_thresh */
 62		u32	pdrop;          /* Drops due to queue limits */
 
 63		u32	matched;	/* Drops to flow match */
 64	} stats;
 65
 66	unsigned int	 head;
 67	unsigned int	 tail;
 68
 69	unsigned int	 tab_mask; /* size - 1 */
 70
 71	struct sk_buff **tab;
 72};
 73
 
 
 
 
 
 
 74/* number of elements in queue including holes */
 75static unsigned int choke_len(const struct choke_sched_data *q)
 76{
 77	return (q->tail - q->head) & q->tab_mask;
 78}
 79
 80/* Is ECN parameter configured */
 81static int use_ecn(const struct choke_sched_data *q)
 82{
 83	return q->flags & TC_RED_ECN;
 84}
 85
 86/* Should packets over max just be dropped (versus marked) */
 87static int use_harddrop(const struct choke_sched_data *q)
 88{
 89	return q->flags & TC_RED_HARDDROP;
 90}
 91
 92/* Move head pointer forward to skip over holes */
 93static void choke_zap_head_holes(struct choke_sched_data *q)
 94{
 95	do {
 96		q->head = (q->head + 1) & q->tab_mask;
 97		if (q->head == q->tail)
 98			break;
 99	} while (q->tab[q->head] == NULL);
100}
101
102/* Move tail pointer backwards to reuse holes */
103static void choke_zap_tail_holes(struct choke_sched_data *q)
104{
105	do {
106		q->tail = (q->tail - 1) & q->tab_mask;
107		if (q->head == q->tail)
108			break;
109	} while (q->tab[q->tail] == NULL);
110}
111
112/* Drop packet from queue array by creating a "hole" */
113static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
114			      struct sk_buff **to_free)
115{
116	struct choke_sched_data *q = qdisc_priv(sch);
117	struct sk_buff *skb = q->tab[idx];
118
119	q->tab[idx] = NULL;
120
121	if (idx == q->head)
122		choke_zap_head_holes(q);
123	if (idx == q->tail)
124		choke_zap_tail_holes(q);
125
 
 
 
126	--sch->q.qlen;
127	qdisc_qstats_backlog_dec(sch, skb);
128	qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
129	qdisc_drop(skb, sch, to_free);
130}
131
132struct choke_skb_cb {
 
133	u8			keys_valid;
134	struct			flow_keys_digest keys;
135};
136
137static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
138{
139	qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
140	return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
141}
142
 
 
 
 
 
 
 
 
 
 
143/*
144 * Compare flow of two packets
145 *  Returns true only if source and destination address and port match.
146 *          false for special cases
147 */
148static bool choke_match_flow(struct sk_buff *skb1,
149			     struct sk_buff *skb2)
150{
151	struct flow_keys temp;
152
153	if (skb1->protocol != skb2->protocol)
154		return false;
155
156	if (!choke_skb_cb(skb1)->keys_valid) {
157		choke_skb_cb(skb1)->keys_valid = 1;
158		skb_flow_dissect_flow_keys(skb1, &temp, 0);
159		make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
160	}
161
162	if (!choke_skb_cb(skb2)->keys_valid) {
163		choke_skb_cb(skb2)->keys_valid = 1;
164		skb_flow_dissect_flow_keys(skb2, &temp, 0);
165		make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
166	}
167
168	return !memcmp(&choke_skb_cb(skb1)->keys,
169		       &choke_skb_cb(skb2)->keys,
170		       sizeof(choke_skb_cb(skb1)->keys));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171}
172
173/*
174 * Select a packet at random from queue
175 * HACK: since queue can have holes from previous deletion; retry several
176 *   times to find a random skb but then just give up and return the head
177 * Will return NULL if queue is empty (q->head == q->tail)
178 */
179static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
180					 unsigned int *pidx)
181{
182	struct sk_buff *skb;
183	int retrys = 3;
184
185	do {
186		*pidx = (q->head + get_random_u32_below(choke_len(q))) & q->tab_mask;
187		skb = q->tab[*pidx];
188		if (skb)
189			return skb;
190	} while (--retrys > 0);
191
192	return q->tab[*pidx = q->head];
193}
194
195/*
196 * Compare new packet with random packet in queue
197 * returns true if matched and sets *pidx
198 */
199static bool choke_match_random(const struct choke_sched_data *q,
200			       struct sk_buff *nskb,
201			       unsigned int *pidx)
202{
203	struct sk_buff *oskb;
204
205	if (q->head == q->tail)
206		return false;
207
208	oskb = choke_peek_random(q, pidx);
 
 
 
209	return choke_match_flow(oskb, nskb);
210}
211
212static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
213			 struct sk_buff **to_free)
214{
215	struct choke_sched_data *q = qdisc_priv(sch);
216	const struct red_parms *p = &q->parms;
 
 
 
 
 
 
 
217
218	choke_skb_cb(skb)->keys_valid = 0;
219	/* Compute average queue usage (see RED) */
220	q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
221	if (red_is_idling(&q->vars))
222		red_end_of_idle_period(&q->vars);
223
224	/* Is queue small? */
225	if (q->vars.qavg <= p->qth_min)
226		q->vars.qcount = -1;
227	else {
228		unsigned int idx;
229
230		/* Draw a packet at random from queue and compare flow */
231		if (choke_match_random(q, skb, &idx)) {
232			q->stats.matched++;
233			choke_drop_by_idx(sch, idx, to_free);
234			goto congestion_drop;
235		}
236
237		/* Queue is large, always mark/drop */
238		if (q->vars.qavg > p->qth_max) {
239			q->vars.qcount = -1;
240
241			qdisc_qstats_overlimit(sch);
242			if (use_harddrop(q) || !use_ecn(q) ||
243			    !INET_ECN_set_ce(skb)) {
244				q->stats.forced_drop++;
245				goto congestion_drop;
246			}
247
248			q->stats.forced_mark++;
249		} else if (++q->vars.qcount) {
250			if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
251				q->vars.qcount = 0;
252				q->vars.qR = red_random(p);
253
254				qdisc_qstats_overlimit(sch);
255				if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
256					q->stats.prob_drop++;
257					goto congestion_drop;
258				}
259
260				q->stats.prob_mark++;
261			}
262		} else
263			q->vars.qR = red_random(p);
264	}
265
266	/* Admit new packet */
267	if (sch->q.qlen < q->limit) {
268		q->tab[q->tail] = skb;
269		q->tail = (q->tail + 1) & q->tab_mask;
270		++sch->q.qlen;
271		qdisc_qstats_backlog_inc(sch, skb);
272		return NET_XMIT_SUCCESS;
273	}
274
275	q->stats.pdrop++;
276	return qdisc_drop(skb, sch, to_free);
277
278congestion_drop:
279	qdisc_drop(skb, sch, to_free);
280	return NET_XMIT_CN;
 
 
 
 
 
 
281}
282
283static struct sk_buff *choke_dequeue(struct Qdisc *sch)
284{
285	struct choke_sched_data *q = qdisc_priv(sch);
286	struct sk_buff *skb;
287
288	if (q->head == q->tail) {
289		if (!red_is_idling(&q->vars))
290			red_start_of_idle_period(&q->vars);
291		return NULL;
292	}
293
294	skb = q->tab[q->head];
295	q->tab[q->head] = NULL;
296	choke_zap_head_holes(q);
297	--sch->q.qlen;
298	qdisc_qstats_backlog_dec(sch, skb);
299	qdisc_bstats_update(sch, skb);
300
301	return skb;
302}
303
304static void choke_reset(struct Qdisc *sch)
305{
306	struct choke_sched_data *q = qdisc_priv(sch);
 
307
308	while (q->head != q->tail) {
309		struct sk_buff *skb = q->tab[q->head];
 
 
 
 
 
310
311		q->head = (q->head + 1) & q->tab_mask;
312		if (!skb)
313			continue;
314		rtnl_qdisc_drop(skb, sch);
315	}
 
316
317	if (q->tab)
318		memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
319	q->head = q->tail = 0;
320	red_restart(&q->vars);
321}
322
323static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
324	[TCA_CHOKE_PARMS]	= { .len = sizeof(struct tc_red_qopt) },
325	[TCA_CHOKE_STAB]	= { .len = RED_STAB_SIZE },
326	[TCA_CHOKE_MAX_P]	= { .type = NLA_U32 },
327};
328
329
330static void choke_free(void *addr)
331{
332	kvfree(addr);
 
 
 
 
 
333}
334
335static int choke_change(struct Qdisc *sch, struct nlattr *opt,
336			struct netlink_ext_ack *extack)
337{
338	struct choke_sched_data *q = qdisc_priv(sch);
339	struct nlattr *tb[TCA_CHOKE_MAX + 1];
340	const struct tc_red_qopt *ctl;
341	int err;
342	struct sk_buff **old = NULL;
343	unsigned int mask;
344	u32 max_P;
345	u8 *stab;
346
347	if (opt == NULL)
348		return -EINVAL;
349
350	err = nla_parse_nested_deprecated(tb, TCA_CHOKE_MAX, opt,
351					  choke_policy, NULL);
352	if (err < 0)
353		return err;
354
355	if (tb[TCA_CHOKE_PARMS] == NULL ||
356	    tb[TCA_CHOKE_STAB] == NULL)
357		return -EINVAL;
358
359	max_P = nla_get_u32_default(tb[TCA_CHOKE_MAX_P], 0);
360
361	ctl = nla_data(tb[TCA_CHOKE_PARMS]);
362	stab = nla_data(tb[TCA_CHOKE_STAB]);
363	if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
364		return -EINVAL;
365
366	if (ctl->limit > CHOKE_MAX_QUEUE)
367		return -EINVAL;
368
369	mask = roundup_pow_of_two(ctl->limit + 1) - 1;
370	if (mask != q->tab_mask) {
371		struct sk_buff **ntab;
372
373		ntab = kvcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
 
 
374		if (!ntab)
375			return -ENOMEM;
376
377		sch_tree_lock(sch);
378		old = q->tab;
379		if (old) {
380			unsigned int oqlen = sch->q.qlen, tail = 0;
381			unsigned dropped = 0;
382
383			while (q->head != q->tail) {
384				struct sk_buff *skb = q->tab[q->head];
385
386				q->head = (q->head + 1) & q->tab_mask;
387				if (!skb)
388					continue;
389				if (tail < mask) {
390					ntab[tail++] = skb;
391					continue;
392				}
393				dropped += qdisc_pkt_len(skb);
394				qdisc_qstats_backlog_dec(sch, skb);
395				--sch->q.qlen;
396				rtnl_qdisc_drop(skb, sch);
397			}
398			qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
399			q->head = 0;
400			q->tail = tail;
401		}
402
403		q->tab_mask = mask;
404		q->tab = ntab;
405	} else
406		sch_tree_lock(sch);
407
408	WRITE_ONCE(q->flags, ctl->flags);
409	WRITE_ONCE(q->limit, ctl->limit);
410
411	red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
412		      ctl->Plog, ctl->Scell_log,
413		      stab,
414		      max_P);
415	red_set_vars(&q->vars);
416
417	if (q->head == q->tail)
418		red_end_of_idle_period(&q->vars);
419
420	sch_tree_unlock(sch);
421	choke_free(old);
422	return 0;
423}
424
425static int choke_init(struct Qdisc *sch, struct nlattr *opt,
426		      struct netlink_ext_ack *extack)
427{
428	return choke_change(sch, opt, extack);
429}
430
431static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
432{
433	struct choke_sched_data *q = qdisc_priv(sch);
434	u8 Wlog = READ_ONCE(q->parms.Wlog);
435	struct nlattr *opts = NULL;
436	struct tc_red_qopt opt = {
437		.limit		= READ_ONCE(q->limit),
438		.flags		= READ_ONCE(q->flags),
439		.qth_min	= READ_ONCE(q->parms.qth_min) >> Wlog,
440		.qth_max	= READ_ONCE(q->parms.qth_max) >> Wlog,
441		.Wlog		= Wlog,
442		.Plog		= READ_ONCE(q->parms.Plog),
443		.Scell_log	= READ_ONCE(q->parms.Scell_log),
444	};
445
446	opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
447	if (opts == NULL)
448		goto nla_put_failure;
449
450	if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
451	    nla_put_u32(skb, TCA_CHOKE_MAX_P, READ_ONCE(q->parms.max_P)))
452		goto nla_put_failure;
453	return nla_nest_end(skb, opts);
454
455nla_put_failure:
456	nla_nest_cancel(skb, opts);
457	return -EMSGSIZE;
458}
459
460static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
461{
462	struct choke_sched_data *q = qdisc_priv(sch);
463	struct tc_choke_xstats st = {
464		.early	= q->stats.prob_drop + q->stats.forced_drop,
465		.marked	= q->stats.prob_mark + q->stats.forced_mark,
466		.pdrop	= q->stats.pdrop,
 
467		.matched = q->stats.matched,
468	};
469
470	return gnet_stats_copy_app(d, &st, sizeof(st));
471}
472
473static void choke_destroy(struct Qdisc *sch)
474{
475	struct choke_sched_data *q = qdisc_priv(sch);
476
 
477	choke_free(q->tab);
478}
479
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
480static struct sk_buff *choke_peek_head(struct Qdisc *sch)
481{
482	struct choke_sched_data *q = qdisc_priv(sch);
483
484	return (q->head != q->tail) ? q->tab[q->head] : NULL;
485}
486
487static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
488	.id		=	"choke",
489	.priv_size	=	sizeof(struct choke_sched_data),
490
491	.enqueue	=	choke_enqueue,
492	.dequeue	=	choke_dequeue,
493	.peek		=	choke_peek_head,
 
494	.init		=	choke_init,
495	.destroy	=	choke_destroy,
496	.reset		=	choke_reset,
497	.change		=	choke_change,
498	.dump		=	choke_dump,
499	.dump_stats	=	choke_dump_stats,
500	.owner		=	THIS_MODULE,
501};
502MODULE_ALIAS_NET_SCH("choke");
503
504static int __init choke_module_init(void)
505{
506	return register_qdisc(&choke_qdisc_ops);
507}
508
509static void __exit choke_module_exit(void)
510{
511	unregister_qdisc(&choke_qdisc_ops);
512}
513
514module_init(choke_module_init)
515module_exit(choke_module_exit)
516
517MODULE_LICENSE("GPL");
518MODULE_DESCRIPTION("Choose and keep responsive flows scheduler");