Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * net/sched/sch_choke.c CHOKE scheduler
4 *
5 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
6 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7 */
8
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/skbuff.h>
13#include <linux/vmalloc.h>
14#include <net/pkt_sched.h>
15#include <net/pkt_cls.h>
16#include <net/inet_ecn.h>
17#include <net/red.h>
18#include <net/flow_dissector.h>
19
20/*
21 CHOKe stateless AQM for fair bandwidth allocation
22 =================================================
23
24 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
25 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
26 maintains no flow state. The difference from RED is an additional step
27 during the enqueuing process. If average queue size is over the
28 low threshold (qmin), a packet is chosen at random from the queue.
29 If both the new and chosen packet are from the same flow, both
30 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
31 needs to access packets in queue randomly. It has a minimal class
32 interface to allow overriding the builtin flow classifier with
33 filters.
34
35 Source:
36 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
37 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
38 IEEE INFOCOM, 2000.
39
40 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
41 Characteristics", IEEE/ACM Transactions on Networking, 2004
42
43 */
44
45/* Upper bound on size of sk_buff table (packets) */
46#define CHOKE_MAX_QUEUE (128*1024 - 1)
47
48struct choke_sched_data {
49/* Parameters */
50 u32 limit;
51 unsigned char flags;
52
53 struct red_parms parms;
54
55/* Variables */
56 struct red_vars vars;
57 struct {
58 u32 prob_drop; /* Early probability drops */
59 u32 prob_mark; /* Early probability marks */
60 u32 forced_drop; /* Forced drops, qavg > max_thresh */
61 u32 forced_mark; /* Forced marks, qavg > max_thresh */
62 u32 pdrop; /* Drops due to queue limits */
63 u32 matched; /* Drops to flow match */
64 } stats;
65
66 unsigned int head;
67 unsigned int tail;
68
69 unsigned int tab_mask; /* size - 1 */
70
71 struct sk_buff **tab;
72};
73
74/* number of elements in queue including holes */
75static unsigned int choke_len(const struct choke_sched_data *q)
76{
77 return (q->tail - q->head) & q->tab_mask;
78}
79
80/* Is ECN parameter configured */
81static int use_ecn(const struct choke_sched_data *q)
82{
83 return q->flags & TC_RED_ECN;
84}
85
86/* Should packets over max just be dropped (versus marked) */
87static int use_harddrop(const struct choke_sched_data *q)
88{
89 return q->flags & TC_RED_HARDDROP;
90}
91
92/* Move head pointer forward to skip over holes */
93static void choke_zap_head_holes(struct choke_sched_data *q)
94{
95 do {
96 q->head = (q->head + 1) & q->tab_mask;
97 if (q->head == q->tail)
98 break;
99 } while (q->tab[q->head] == NULL);
100}
101
102/* Move tail pointer backwards to reuse holes */
103static void choke_zap_tail_holes(struct choke_sched_data *q)
104{
105 do {
106 q->tail = (q->tail - 1) & q->tab_mask;
107 if (q->head == q->tail)
108 break;
109 } while (q->tab[q->tail] == NULL);
110}
111
112/* Drop packet from queue array by creating a "hole" */
113static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
114 struct sk_buff **to_free)
115{
116 struct choke_sched_data *q = qdisc_priv(sch);
117 struct sk_buff *skb = q->tab[idx];
118
119 q->tab[idx] = NULL;
120
121 if (idx == q->head)
122 choke_zap_head_holes(q);
123 if (idx == q->tail)
124 choke_zap_tail_holes(q);
125
126 --sch->q.qlen;
127 qdisc_qstats_backlog_dec(sch, skb);
128 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
129 qdisc_drop(skb, sch, to_free);
130}
131
132struct choke_skb_cb {
133 u8 keys_valid;
134 struct flow_keys_digest keys;
135};
136
137static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
138{
139 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
140 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
141}
142
143/*
144 * Compare flow of two packets
145 * Returns true only if source and destination address and port match.
146 * false for special cases
147 */
148static bool choke_match_flow(struct sk_buff *skb1,
149 struct sk_buff *skb2)
150{
151 struct flow_keys temp;
152
153 if (skb1->protocol != skb2->protocol)
154 return false;
155
156 if (!choke_skb_cb(skb1)->keys_valid) {
157 choke_skb_cb(skb1)->keys_valid = 1;
158 skb_flow_dissect_flow_keys(skb1, &temp, 0);
159 make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
160 }
161
162 if (!choke_skb_cb(skb2)->keys_valid) {
163 choke_skb_cb(skb2)->keys_valid = 1;
164 skb_flow_dissect_flow_keys(skb2, &temp, 0);
165 make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
166 }
167
168 return !memcmp(&choke_skb_cb(skb1)->keys,
169 &choke_skb_cb(skb2)->keys,
170 sizeof(choke_skb_cb(skb1)->keys));
171}
172
173/*
174 * Select a packet at random from queue
175 * HACK: since queue can have holes from previous deletion; retry several
176 * times to find a random skb but then just give up and return the head
177 * Will return NULL if queue is empty (q->head == q->tail)
178 */
179static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
180 unsigned int *pidx)
181{
182 struct sk_buff *skb;
183 int retrys = 3;
184
185 do {
186 *pidx = (q->head + get_random_u32_below(choke_len(q))) & q->tab_mask;
187 skb = q->tab[*pidx];
188 if (skb)
189 return skb;
190 } while (--retrys > 0);
191
192 return q->tab[*pidx = q->head];
193}
194
195/*
196 * Compare new packet with random packet in queue
197 * returns true if matched and sets *pidx
198 */
199static bool choke_match_random(const struct choke_sched_data *q,
200 struct sk_buff *nskb,
201 unsigned int *pidx)
202{
203 struct sk_buff *oskb;
204
205 if (q->head == q->tail)
206 return false;
207
208 oskb = choke_peek_random(q, pidx);
209 return choke_match_flow(oskb, nskb);
210}
211
212static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
213 struct sk_buff **to_free)
214{
215 struct choke_sched_data *q = qdisc_priv(sch);
216 const struct red_parms *p = &q->parms;
217
218 choke_skb_cb(skb)->keys_valid = 0;
219 /* Compute average queue usage (see RED) */
220 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
221 if (red_is_idling(&q->vars))
222 red_end_of_idle_period(&q->vars);
223
224 /* Is queue small? */
225 if (q->vars.qavg <= p->qth_min)
226 q->vars.qcount = -1;
227 else {
228 unsigned int idx;
229
230 /* Draw a packet at random from queue and compare flow */
231 if (choke_match_random(q, skb, &idx)) {
232 q->stats.matched++;
233 choke_drop_by_idx(sch, idx, to_free);
234 goto congestion_drop;
235 }
236
237 /* Queue is large, always mark/drop */
238 if (q->vars.qavg > p->qth_max) {
239 q->vars.qcount = -1;
240
241 qdisc_qstats_overlimit(sch);
242 if (use_harddrop(q) || !use_ecn(q) ||
243 !INET_ECN_set_ce(skb)) {
244 q->stats.forced_drop++;
245 goto congestion_drop;
246 }
247
248 q->stats.forced_mark++;
249 } else if (++q->vars.qcount) {
250 if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
251 q->vars.qcount = 0;
252 q->vars.qR = red_random(p);
253
254 qdisc_qstats_overlimit(sch);
255 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
256 q->stats.prob_drop++;
257 goto congestion_drop;
258 }
259
260 q->stats.prob_mark++;
261 }
262 } else
263 q->vars.qR = red_random(p);
264 }
265
266 /* Admit new packet */
267 if (sch->q.qlen < q->limit) {
268 q->tab[q->tail] = skb;
269 q->tail = (q->tail + 1) & q->tab_mask;
270 ++sch->q.qlen;
271 qdisc_qstats_backlog_inc(sch, skb);
272 return NET_XMIT_SUCCESS;
273 }
274
275 q->stats.pdrop++;
276 return qdisc_drop(skb, sch, to_free);
277
278congestion_drop:
279 qdisc_drop(skb, sch, to_free);
280 return NET_XMIT_CN;
281}
282
283static struct sk_buff *choke_dequeue(struct Qdisc *sch)
284{
285 struct choke_sched_data *q = qdisc_priv(sch);
286 struct sk_buff *skb;
287
288 if (q->head == q->tail) {
289 if (!red_is_idling(&q->vars))
290 red_start_of_idle_period(&q->vars);
291 return NULL;
292 }
293
294 skb = q->tab[q->head];
295 q->tab[q->head] = NULL;
296 choke_zap_head_holes(q);
297 --sch->q.qlen;
298 qdisc_qstats_backlog_dec(sch, skb);
299 qdisc_bstats_update(sch, skb);
300
301 return skb;
302}
303
304static void choke_reset(struct Qdisc *sch)
305{
306 struct choke_sched_data *q = qdisc_priv(sch);
307
308 while (q->head != q->tail) {
309 struct sk_buff *skb = q->tab[q->head];
310
311 q->head = (q->head + 1) & q->tab_mask;
312 if (!skb)
313 continue;
314 rtnl_qdisc_drop(skb, sch);
315 }
316
317 if (q->tab)
318 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
319 q->head = q->tail = 0;
320 red_restart(&q->vars);
321}
322
323static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
324 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
325 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
326 [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
327};
328
329
330static void choke_free(void *addr)
331{
332 kvfree(addr);
333}
334
335static int choke_change(struct Qdisc *sch, struct nlattr *opt,
336 struct netlink_ext_ack *extack)
337{
338 struct choke_sched_data *q = qdisc_priv(sch);
339 struct nlattr *tb[TCA_CHOKE_MAX + 1];
340 const struct tc_red_qopt *ctl;
341 int err;
342 struct sk_buff **old = NULL;
343 unsigned int mask;
344 u32 max_P;
345 u8 *stab;
346
347 if (opt == NULL)
348 return -EINVAL;
349
350 err = nla_parse_nested_deprecated(tb, TCA_CHOKE_MAX, opt,
351 choke_policy, NULL);
352 if (err < 0)
353 return err;
354
355 if (tb[TCA_CHOKE_PARMS] == NULL ||
356 tb[TCA_CHOKE_STAB] == NULL)
357 return -EINVAL;
358
359 max_P = nla_get_u32_default(tb[TCA_CHOKE_MAX_P], 0);
360
361 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
362 stab = nla_data(tb[TCA_CHOKE_STAB]);
363 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab))
364 return -EINVAL;
365
366 if (ctl->limit > CHOKE_MAX_QUEUE)
367 return -EINVAL;
368
369 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
370 if (mask != q->tab_mask) {
371 struct sk_buff **ntab;
372
373 ntab = kvcalloc(mask + 1, sizeof(struct sk_buff *), GFP_KERNEL);
374 if (!ntab)
375 return -ENOMEM;
376
377 sch_tree_lock(sch);
378 old = q->tab;
379 if (old) {
380 unsigned int oqlen = sch->q.qlen, tail = 0;
381 unsigned dropped = 0;
382
383 while (q->head != q->tail) {
384 struct sk_buff *skb = q->tab[q->head];
385
386 q->head = (q->head + 1) & q->tab_mask;
387 if (!skb)
388 continue;
389 if (tail < mask) {
390 ntab[tail++] = skb;
391 continue;
392 }
393 dropped += qdisc_pkt_len(skb);
394 qdisc_qstats_backlog_dec(sch, skb);
395 --sch->q.qlen;
396 rtnl_qdisc_drop(skb, sch);
397 }
398 qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
399 q->head = 0;
400 q->tail = tail;
401 }
402
403 q->tab_mask = mask;
404 q->tab = ntab;
405 } else
406 sch_tree_lock(sch);
407
408 WRITE_ONCE(q->flags, ctl->flags);
409 WRITE_ONCE(q->limit, ctl->limit);
410
411 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
412 ctl->Plog, ctl->Scell_log,
413 stab,
414 max_P);
415 red_set_vars(&q->vars);
416
417 if (q->head == q->tail)
418 red_end_of_idle_period(&q->vars);
419
420 sch_tree_unlock(sch);
421 choke_free(old);
422 return 0;
423}
424
425static int choke_init(struct Qdisc *sch, struct nlattr *opt,
426 struct netlink_ext_ack *extack)
427{
428 return choke_change(sch, opt, extack);
429}
430
431static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
432{
433 struct choke_sched_data *q = qdisc_priv(sch);
434 u8 Wlog = READ_ONCE(q->parms.Wlog);
435 struct nlattr *opts = NULL;
436 struct tc_red_qopt opt = {
437 .limit = READ_ONCE(q->limit),
438 .flags = READ_ONCE(q->flags),
439 .qth_min = READ_ONCE(q->parms.qth_min) >> Wlog,
440 .qth_max = READ_ONCE(q->parms.qth_max) >> Wlog,
441 .Wlog = Wlog,
442 .Plog = READ_ONCE(q->parms.Plog),
443 .Scell_log = READ_ONCE(q->parms.Scell_log),
444 };
445
446 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
447 if (opts == NULL)
448 goto nla_put_failure;
449
450 if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
451 nla_put_u32(skb, TCA_CHOKE_MAX_P, READ_ONCE(q->parms.max_P)))
452 goto nla_put_failure;
453 return nla_nest_end(skb, opts);
454
455nla_put_failure:
456 nla_nest_cancel(skb, opts);
457 return -EMSGSIZE;
458}
459
460static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
461{
462 struct choke_sched_data *q = qdisc_priv(sch);
463 struct tc_choke_xstats st = {
464 .early = q->stats.prob_drop + q->stats.forced_drop,
465 .marked = q->stats.prob_mark + q->stats.forced_mark,
466 .pdrop = q->stats.pdrop,
467 .matched = q->stats.matched,
468 };
469
470 return gnet_stats_copy_app(d, &st, sizeof(st));
471}
472
473static void choke_destroy(struct Qdisc *sch)
474{
475 struct choke_sched_data *q = qdisc_priv(sch);
476
477 choke_free(q->tab);
478}
479
480static struct sk_buff *choke_peek_head(struct Qdisc *sch)
481{
482 struct choke_sched_data *q = qdisc_priv(sch);
483
484 return (q->head != q->tail) ? q->tab[q->head] : NULL;
485}
486
487static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
488 .id = "choke",
489 .priv_size = sizeof(struct choke_sched_data),
490
491 .enqueue = choke_enqueue,
492 .dequeue = choke_dequeue,
493 .peek = choke_peek_head,
494 .init = choke_init,
495 .destroy = choke_destroy,
496 .reset = choke_reset,
497 .change = choke_change,
498 .dump = choke_dump,
499 .dump_stats = choke_dump_stats,
500 .owner = THIS_MODULE,
501};
502MODULE_ALIAS_NET_SCH("choke");
503
504static int __init choke_module_init(void)
505{
506 return register_qdisc(&choke_qdisc_ops);
507}
508
509static void __exit choke_module_exit(void)
510{
511 unregister_qdisc(&choke_qdisc_ops);
512}
513
514module_init(choke_module_init)
515module_exit(choke_module_exit)
516
517MODULE_LICENSE("GPL");
518MODULE_DESCRIPTION("Choose and keep responsive flows scheduler");
1/*
2 * net/sched/sch_choke.c CHOKE scheduler
3 *
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/skbuff.h>
17#include <linux/vmalloc.h>
18#include <net/pkt_sched.h>
19#include <net/inet_ecn.h>
20#include <net/red.h>
21#include <net/flow_keys.h>
22
23/*
24 CHOKe stateless AQM for fair bandwidth allocation
25 =================================================
26
27 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
28 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
29 maintains no flow state. The difference from RED is an additional step
30 during the enqueuing process. If average queue size is over the
31 low threshold (qmin), a packet is chosen at random from the queue.
32 If both the new and chosen packet are from the same flow, both
33 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
34 needs to access packets in queue randomly. It has a minimal class
35 interface to allow overriding the builtin flow classifier with
36 filters.
37
38 Source:
39 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
40 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
41 IEEE INFOCOM, 2000.
42
43 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
44 Characteristics", IEEE/ACM Transactions on Networking, 2004
45
46 */
47
48/* Upper bound on size of sk_buff table (packets) */
49#define CHOKE_MAX_QUEUE (128*1024 - 1)
50
51struct choke_sched_data {
52/* Parameters */
53 u32 limit;
54 unsigned char flags;
55
56 struct red_parms parms;
57
58/* Variables */
59 struct red_vars vars;
60 struct tcf_proto *filter_list;
61 struct {
62 u32 prob_drop; /* Early probability drops */
63 u32 prob_mark; /* Early probability marks */
64 u32 forced_drop; /* Forced drops, qavg > max_thresh */
65 u32 forced_mark; /* Forced marks, qavg > max_thresh */
66 u32 pdrop; /* Drops due to queue limits */
67 u32 other; /* Drops due to drop() calls */
68 u32 matched; /* Drops to flow match */
69 } stats;
70
71 unsigned int head;
72 unsigned int tail;
73
74 unsigned int tab_mask; /* size - 1 */
75
76 struct sk_buff **tab;
77};
78
79/* number of elements in queue including holes */
80static unsigned int choke_len(const struct choke_sched_data *q)
81{
82 return (q->tail - q->head) & q->tab_mask;
83}
84
85/* Is ECN parameter configured */
86static int use_ecn(const struct choke_sched_data *q)
87{
88 return q->flags & TC_RED_ECN;
89}
90
91/* Should packets over max just be dropped (versus marked) */
92static int use_harddrop(const struct choke_sched_data *q)
93{
94 return q->flags & TC_RED_HARDDROP;
95}
96
97/* Move head pointer forward to skip over holes */
98static void choke_zap_head_holes(struct choke_sched_data *q)
99{
100 do {
101 q->head = (q->head + 1) & q->tab_mask;
102 if (q->head == q->tail)
103 break;
104 } while (q->tab[q->head] == NULL);
105}
106
107/* Move tail pointer backwards to reuse holes */
108static void choke_zap_tail_holes(struct choke_sched_data *q)
109{
110 do {
111 q->tail = (q->tail - 1) & q->tab_mask;
112 if (q->head == q->tail)
113 break;
114 } while (q->tab[q->tail] == NULL);
115}
116
117/* Drop packet from queue array by creating a "hole" */
118static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
119{
120 struct choke_sched_data *q = qdisc_priv(sch);
121 struct sk_buff *skb = q->tab[idx];
122
123 q->tab[idx] = NULL;
124
125 if (idx == q->head)
126 choke_zap_head_holes(q);
127 if (idx == q->tail)
128 choke_zap_tail_holes(q);
129
130 sch->qstats.backlog -= qdisc_pkt_len(skb);
131 qdisc_drop(skb, sch);
132 qdisc_tree_decrease_qlen(sch, 1);
133 --sch->q.qlen;
134}
135
136struct choke_skb_cb {
137 u16 classid;
138 u8 keys_valid;
139 struct flow_keys keys;
140};
141
142static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
143{
144 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
145 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
146}
147
148static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
149{
150 choke_skb_cb(skb)->classid = classid;
151}
152
153static u16 choke_get_classid(const struct sk_buff *skb)
154{
155 return choke_skb_cb(skb)->classid;
156}
157
158/*
159 * Compare flow of two packets
160 * Returns true only if source and destination address and port match.
161 * false for special cases
162 */
163static bool choke_match_flow(struct sk_buff *skb1,
164 struct sk_buff *skb2)
165{
166 if (skb1->protocol != skb2->protocol)
167 return false;
168
169 if (!choke_skb_cb(skb1)->keys_valid) {
170 choke_skb_cb(skb1)->keys_valid = 1;
171 skb_flow_dissect(skb1, &choke_skb_cb(skb1)->keys);
172 }
173
174 if (!choke_skb_cb(skb2)->keys_valid) {
175 choke_skb_cb(skb2)->keys_valid = 1;
176 skb_flow_dissect(skb2, &choke_skb_cb(skb2)->keys);
177 }
178
179 return !memcmp(&choke_skb_cb(skb1)->keys,
180 &choke_skb_cb(skb2)->keys,
181 sizeof(struct flow_keys));
182}
183
184/*
185 * Classify flow using either:
186 * 1. pre-existing classification result in skb
187 * 2. fast internal classification
188 * 3. use TC filter based classification
189 */
190static bool choke_classify(struct sk_buff *skb,
191 struct Qdisc *sch, int *qerr)
192
193{
194 struct choke_sched_data *q = qdisc_priv(sch);
195 struct tcf_result res;
196 int result;
197
198 result = tc_classify(skb, q->filter_list, &res);
199 if (result >= 0) {
200#ifdef CONFIG_NET_CLS_ACT
201 switch (result) {
202 case TC_ACT_STOLEN:
203 case TC_ACT_QUEUED:
204 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
205 case TC_ACT_SHOT:
206 return false;
207 }
208#endif
209 choke_set_classid(skb, TC_H_MIN(res.classid));
210 return true;
211 }
212
213 return false;
214}
215
216/*
217 * Select a packet at random from queue
218 * HACK: since queue can have holes from previous deletion; retry several
219 * times to find a random skb but then just give up and return the head
220 * Will return NULL if queue is empty (q->head == q->tail)
221 */
222static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
223 unsigned int *pidx)
224{
225 struct sk_buff *skb;
226 int retrys = 3;
227
228 do {
229 *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
230 skb = q->tab[*pidx];
231 if (skb)
232 return skb;
233 } while (--retrys > 0);
234
235 return q->tab[*pidx = q->head];
236}
237
238/*
239 * Compare new packet with random packet in queue
240 * returns true if matched and sets *pidx
241 */
242static bool choke_match_random(const struct choke_sched_data *q,
243 struct sk_buff *nskb,
244 unsigned int *pidx)
245{
246 struct sk_buff *oskb;
247
248 if (q->head == q->tail)
249 return false;
250
251 oskb = choke_peek_random(q, pidx);
252 if (q->filter_list)
253 return choke_get_classid(nskb) == choke_get_classid(oskb);
254
255 return choke_match_flow(oskb, nskb);
256}
257
258static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
259{
260 struct choke_sched_data *q = qdisc_priv(sch);
261 const struct red_parms *p = &q->parms;
262 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
263
264 if (q->filter_list) {
265 /* If using external classifiers, get result and record it. */
266 if (!choke_classify(skb, sch, &ret))
267 goto other_drop; /* Packet was eaten by filter */
268 }
269
270 choke_skb_cb(skb)->keys_valid = 0;
271 /* Compute average queue usage (see RED) */
272 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
273 if (red_is_idling(&q->vars))
274 red_end_of_idle_period(&q->vars);
275
276 /* Is queue small? */
277 if (q->vars.qavg <= p->qth_min)
278 q->vars.qcount = -1;
279 else {
280 unsigned int idx;
281
282 /* Draw a packet at random from queue and compare flow */
283 if (choke_match_random(q, skb, &idx)) {
284 q->stats.matched++;
285 choke_drop_by_idx(sch, idx);
286 goto congestion_drop;
287 }
288
289 /* Queue is large, always mark/drop */
290 if (q->vars.qavg > p->qth_max) {
291 q->vars.qcount = -1;
292
293 sch->qstats.overlimits++;
294 if (use_harddrop(q) || !use_ecn(q) ||
295 !INET_ECN_set_ce(skb)) {
296 q->stats.forced_drop++;
297 goto congestion_drop;
298 }
299
300 q->stats.forced_mark++;
301 } else if (++q->vars.qcount) {
302 if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
303 q->vars.qcount = 0;
304 q->vars.qR = red_random(p);
305
306 sch->qstats.overlimits++;
307 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
308 q->stats.prob_drop++;
309 goto congestion_drop;
310 }
311
312 q->stats.prob_mark++;
313 }
314 } else
315 q->vars.qR = red_random(p);
316 }
317
318 /* Admit new packet */
319 if (sch->q.qlen < q->limit) {
320 q->tab[q->tail] = skb;
321 q->tail = (q->tail + 1) & q->tab_mask;
322 ++sch->q.qlen;
323 sch->qstats.backlog += qdisc_pkt_len(skb);
324 return NET_XMIT_SUCCESS;
325 }
326
327 q->stats.pdrop++;
328 return qdisc_drop(skb, sch);
329
330congestion_drop:
331 qdisc_drop(skb, sch);
332 return NET_XMIT_CN;
333
334other_drop:
335 if (ret & __NET_XMIT_BYPASS)
336 sch->qstats.drops++;
337 kfree_skb(skb);
338 return ret;
339}
340
341static struct sk_buff *choke_dequeue(struct Qdisc *sch)
342{
343 struct choke_sched_data *q = qdisc_priv(sch);
344 struct sk_buff *skb;
345
346 if (q->head == q->tail) {
347 if (!red_is_idling(&q->vars))
348 red_start_of_idle_period(&q->vars);
349 return NULL;
350 }
351
352 skb = q->tab[q->head];
353 q->tab[q->head] = NULL;
354 choke_zap_head_holes(q);
355 --sch->q.qlen;
356 sch->qstats.backlog -= qdisc_pkt_len(skb);
357 qdisc_bstats_update(sch, skb);
358
359 return skb;
360}
361
362static unsigned int choke_drop(struct Qdisc *sch)
363{
364 struct choke_sched_data *q = qdisc_priv(sch);
365 unsigned int len;
366
367 len = qdisc_queue_drop(sch);
368 if (len > 0)
369 q->stats.other++;
370 else {
371 if (!red_is_idling(&q->vars))
372 red_start_of_idle_period(&q->vars);
373 }
374
375 return len;
376}
377
378static void choke_reset(struct Qdisc *sch)
379{
380 struct choke_sched_data *q = qdisc_priv(sch);
381
382 red_restart(&q->vars);
383}
384
385static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
386 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
387 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
388 [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
389};
390
391
392static void choke_free(void *addr)
393{
394 if (addr) {
395 if (is_vmalloc_addr(addr))
396 vfree(addr);
397 else
398 kfree(addr);
399 }
400}
401
402static int choke_change(struct Qdisc *sch, struct nlattr *opt)
403{
404 struct choke_sched_data *q = qdisc_priv(sch);
405 struct nlattr *tb[TCA_CHOKE_MAX + 1];
406 const struct tc_red_qopt *ctl;
407 int err;
408 struct sk_buff **old = NULL;
409 unsigned int mask;
410 u32 max_P;
411
412 if (opt == NULL)
413 return -EINVAL;
414
415 err = nla_parse_nested(tb, TCA_CHOKE_MAX, opt, choke_policy);
416 if (err < 0)
417 return err;
418
419 if (tb[TCA_CHOKE_PARMS] == NULL ||
420 tb[TCA_CHOKE_STAB] == NULL)
421 return -EINVAL;
422
423 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
424
425 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
426
427 if (ctl->limit > CHOKE_MAX_QUEUE)
428 return -EINVAL;
429
430 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
431 if (mask != q->tab_mask) {
432 struct sk_buff **ntab;
433
434 ntab = kcalloc(mask + 1, sizeof(struct sk_buff *),
435 GFP_KERNEL | __GFP_NOWARN);
436 if (!ntab)
437 ntab = vzalloc((mask + 1) * sizeof(struct sk_buff *));
438 if (!ntab)
439 return -ENOMEM;
440
441 sch_tree_lock(sch);
442 old = q->tab;
443 if (old) {
444 unsigned int oqlen = sch->q.qlen, tail = 0;
445
446 while (q->head != q->tail) {
447 struct sk_buff *skb = q->tab[q->head];
448
449 q->head = (q->head + 1) & q->tab_mask;
450 if (!skb)
451 continue;
452 if (tail < mask) {
453 ntab[tail++] = skb;
454 continue;
455 }
456 sch->qstats.backlog -= qdisc_pkt_len(skb);
457 --sch->q.qlen;
458 qdisc_drop(skb, sch);
459 }
460 qdisc_tree_decrease_qlen(sch, oqlen - sch->q.qlen);
461 q->head = 0;
462 q->tail = tail;
463 }
464
465 q->tab_mask = mask;
466 q->tab = ntab;
467 } else
468 sch_tree_lock(sch);
469
470 q->flags = ctl->flags;
471 q->limit = ctl->limit;
472
473 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
474 ctl->Plog, ctl->Scell_log,
475 nla_data(tb[TCA_CHOKE_STAB]),
476 max_P);
477 red_set_vars(&q->vars);
478
479 if (q->head == q->tail)
480 red_end_of_idle_period(&q->vars);
481
482 sch_tree_unlock(sch);
483 choke_free(old);
484 return 0;
485}
486
487static int choke_init(struct Qdisc *sch, struct nlattr *opt)
488{
489 return choke_change(sch, opt);
490}
491
492static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
493{
494 struct choke_sched_data *q = qdisc_priv(sch);
495 struct nlattr *opts = NULL;
496 struct tc_red_qopt opt = {
497 .limit = q->limit,
498 .flags = q->flags,
499 .qth_min = q->parms.qth_min >> q->parms.Wlog,
500 .qth_max = q->parms.qth_max >> q->parms.Wlog,
501 .Wlog = q->parms.Wlog,
502 .Plog = q->parms.Plog,
503 .Scell_log = q->parms.Scell_log,
504 };
505
506 opts = nla_nest_start(skb, TCA_OPTIONS);
507 if (opts == NULL)
508 goto nla_put_failure;
509
510 if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
511 nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
512 goto nla_put_failure;
513 return nla_nest_end(skb, opts);
514
515nla_put_failure:
516 nla_nest_cancel(skb, opts);
517 return -EMSGSIZE;
518}
519
520static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
521{
522 struct choke_sched_data *q = qdisc_priv(sch);
523 struct tc_choke_xstats st = {
524 .early = q->stats.prob_drop + q->stats.forced_drop,
525 .marked = q->stats.prob_mark + q->stats.forced_mark,
526 .pdrop = q->stats.pdrop,
527 .other = q->stats.other,
528 .matched = q->stats.matched,
529 };
530
531 return gnet_stats_copy_app(d, &st, sizeof(st));
532}
533
534static void choke_destroy(struct Qdisc *sch)
535{
536 struct choke_sched_data *q = qdisc_priv(sch);
537
538 tcf_destroy_chain(&q->filter_list);
539 choke_free(q->tab);
540}
541
542static struct Qdisc *choke_leaf(struct Qdisc *sch, unsigned long arg)
543{
544 return NULL;
545}
546
547static unsigned long choke_get(struct Qdisc *sch, u32 classid)
548{
549 return 0;
550}
551
552static void choke_put(struct Qdisc *q, unsigned long cl)
553{
554}
555
556static unsigned long choke_bind(struct Qdisc *sch, unsigned long parent,
557 u32 classid)
558{
559 return 0;
560}
561
562static struct tcf_proto **choke_find_tcf(struct Qdisc *sch, unsigned long cl)
563{
564 struct choke_sched_data *q = qdisc_priv(sch);
565
566 if (cl)
567 return NULL;
568 return &q->filter_list;
569}
570
571static int choke_dump_class(struct Qdisc *sch, unsigned long cl,
572 struct sk_buff *skb, struct tcmsg *tcm)
573{
574 tcm->tcm_handle |= TC_H_MIN(cl);
575 return 0;
576}
577
578static void choke_walk(struct Qdisc *sch, struct qdisc_walker *arg)
579{
580 if (!arg->stop) {
581 if (arg->fn(sch, 1, arg) < 0) {
582 arg->stop = 1;
583 return;
584 }
585 arg->count++;
586 }
587}
588
589static const struct Qdisc_class_ops choke_class_ops = {
590 .leaf = choke_leaf,
591 .get = choke_get,
592 .put = choke_put,
593 .tcf_chain = choke_find_tcf,
594 .bind_tcf = choke_bind,
595 .unbind_tcf = choke_put,
596 .dump = choke_dump_class,
597 .walk = choke_walk,
598};
599
600static struct sk_buff *choke_peek_head(struct Qdisc *sch)
601{
602 struct choke_sched_data *q = qdisc_priv(sch);
603
604 return (q->head != q->tail) ? q->tab[q->head] : NULL;
605}
606
607static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
608 .id = "choke",
609 .priv_size = sizeof(struct choke_sched_data),
610
611 .enqueue = choke_enqueue,
612 .dequeue = choke_dequeue,
613 .peek = choke_peek_head,
614 .drop = choke_drop,
615 .init = choke_init,
616 .destroy = choke_destroy,
617 .reset = choke_reset,
618 .change = choke_change,
619 .dump = choke_dump,
620 .dump_stats = choke_dump_stats,
621 .owner = THIS_MODULE,
622};
623
624static int __init choke_module_init(void)
625{
626 return register_qdisc(&choke_qdisc_ops);
627}
628
629static void __exit choke_module_exit(void)
630{
631 unregister_qdisc(&choke_qdisc_ops);
632}
633
634module_init(choke_module_init)
635module_exit(choke_module_exit)
636
637MODULE_LICENSE("GPL");