Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Flow Queue PIE discipline
  3 *
  4 * Copyright (C) 2019 Mohit P. Tahiliani <tahiliani@nitk.edu.in>
  5 * Copyright (C) 2019 Sachin D. Patil <sdp.sachin@gmail.com>
  6 * Copyright (C) 2019 V. Saicharan <vsaicharan1998@gmail.com>
  7 * Copyright (C) 2019 Mohit Bhasi <mohitbhasi1998@gmail.com>
  8 * Copyright (C) 2019 Leslie Monis <lesliemonis@gmail.com>
  9 * Copyright (C) 2019 Gautam Ramakrishnan <gautamramk@gmail.com>
 10 */
 11
 12#include <linux/jhash.h>
 13#include <linux/module.h>
 14#include <linux/sizes.h>
 15#include <linux/vmalloc.h>
 16#include <net/pkt_cls.h>
 17#include <net/pie.h>
 18
 19/* Flow Queue PIE
 20 *
 21 * Principles:
 22 *   - Packets are classified on flows.
 23 *   - This is a Stochastic model (as we use a hash, several flows might
 24 *                                 be hashed to the same slot)
 25 *   - Each flow has a PIE managed queue.
 26 *   - Flows are linked onto two (Round Robin) lists,
 27 *     so that new flows have priority on old ones.
 28 *   - For a given flow, packets are not reordered.
 29 *   - Drops during enqueue only.
 30 *   - ECN capability is off by default.
 31 *   - ECN threshold (if ECN is enabled) is at 10% by default.
 32 *   - Uses timestamps to calculate queue delay by default.
 33 */
 34
 35/**
 36 * struct fq_pie_flow - contains data for each flow
 37 * @vars:	pie vars associated with the flow
 38 * @deficit:	number of remaining byte credits
 39 * @backlog:	size of data in the flow
 40 * @qlen:	number of packets in the flow
 41 * @flowchain:	flowchain for the flow
 42 * @head:	first packet in the flow
 43 * @tail:	last packet in the flow
 44 */
 45struct fq_pie_flow {
 46	struct pie_vars vars;
 47	s32 deficit;
 48	u32 backlog;
 49	u32 qlen;
 50	struct list_head flowchain;
 51	struct sk_buff *head;
 52	struct sk_buff *tail;
 53};
 54
 55struct fq_pie_sched_data {
 56	struct tcf_proto __rcu *filter_list; /* optional external classifier */
 57	struct tcf_block *block;
 58	struct fq_pie_flow *flows;
 59	struct Qdisc *sch;
 60	struct list_head old_flows;
 61	struct list_head new_flows;
 62	struct pie_params p_params;
 63	u32 ecn_prob;
 64	u32 flows_cnt;
 65	u32 flows_cursor;
 66	u32 quantum;
 67	u32 memory_limit;
 68	u32 new_flow_count;
 69	u32 memory_usage;
 70	u32 overmemory;
 71	struct pie_stats stats;
 72	struct timer_list adapt_timer;
 73};
 74
 75static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q,
 76				struct sk_buff *skb)
 77{
 78	return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
 79}
 80
 81static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch,
 82				    int *qerr)
 83{
 84	struct fq_pie_sched_data *q = qdisc_priv(sch);
 85	struct tcf_proto *filter;
 86	struct tcf_result res;
 87	int result;
 88
 89	if (TC_H_MAJ(skb->priority) == sch->handle &&
 90	    TC_H_MIN(skb->priority) > 0 &&
 91	    TC_H_MIN(skb->priority) <= q->flows_cnt)
 92		return TC_H_MIN(skb->priority);
 93
 94	filter = rcu_dereference_bh(q->filter_list);
 95	if (!filter)
 96		return fq_pie_hash(q, skb) + 1;
 97
 98	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 99	result = tcf_classify(skb, NULL, filter, &res, false);
100	if (result >= 0) {
101#ifdef CONFIG_NET_CLS_ACT
102		switch (result) {
103		case TC_ACT_STOLEN:
104		case TC_ACT_QUEUED:
105		case TC_ACT_TRAP:
106			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
107			fallthrough;
108		case TC_ACT_SHOT:
109			return 0;
110		}
111#endif
112		if (TC_H_MIN(res.classid) <= q->flows_cnt)
113			return TC_H_MIN(res.classid);
114	}
115	return 0;
116}
117
118/* add skb to flow queue (tail add) */
119static inline void flow_queue_add(struct fq_pie_flow *flow,
120				  struct sk_buff *skb)
121{
122	if (!flow->head)
123		flow->head = skb;
124	else
125		flow->tail->next = skb;
126	flow->tail = skb;
127	skb->next = NULL;
128}
129
130static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
131				struct sk_buff **to_free)
132{
133	struct fq_pie_sched_data *q = qdisc_priv(sch);
134	struct fq_pie_flow *sel_flow;
135	int ret;
136	u8 memory_limited = false;
137	u8 enqueue = false;
138	u32 pkt_len;
139	u32 idx;
140
141	/* Classifies packet into corresponding flow */
142	idx = fq_pie_classify(skb, sch, &ret);
143	if (idx == 0) {
144		if (ret & __NET_XMIT_BYPASS)
145			qdisc_qstats_drop(sch);
146		__qdisc_drop(skb, to_free);
147		return ret;
148	}
149	idx--;
150
151	sel_flow = &q->flows[idx];
152	/* Checks whether adding a new packet would exceed memory limit */
153	get_pie_cb(skb)->mem_usage = skb->truesize;
154	memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
155
156	/* Checks if the qdisc is full */
157	if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
158		q->stats.overlimit++;
159		goto out;
160	} else if (unlikely(memory_limited)) {
161		q->overmemory++;
162	}
163
164	if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars,
165			    sel_flow->backlog, skb->len)) {
166		enqueue = true;
167	} else if (q->p_params.ecn &&
168		   sel_flow->vars.prob <= (MAX_PROB / 100) * q->ecn_prob &&
169		   INET_ECN_set_ce(skb)) {
170		/* If packet is ecn capable, mark it if drop probability
171		 * is lower than the parameter ecn_prob, else drop it.
172		 */
173		q->stats.ecn_mark++;
174		enqueue = true;
175	}
176	if (enqueue) {
177		/* Set enqueue time only when dq_rate_estimator is disabled. */
178		if (!q->p_params.dq_rate_estimator)
179			pie_set_enqueue_time(skb);
180
181		pkt_len = qdisc_pkt_len(skb);
182		q->stats.packets_in++;
183		q->memory_usage += skb->truesize;
184		sch->qstats.backlog += pkt_len;
185		sch->q.qlen++;
186		flow_queue_add(sel_flow, skb);
187		if (list_empty(&sel_flow->flowchain)) {
188			list_add_tail(&sel_flow->flowchain, &q->new_flows);
189			q->new_flow_count++;
190			sel_flow->deficit = q->quantum;
191			sel_flow->qlen = 0;
192			sel_flow->backlog = 0;
193		}
194		sel_flow->qlen++;
195		sel_flow->backlog += pkt_len;
196		return NET_XMIT_SUCCESS;
197	}
198out:
199	q->stats.dropped++;
200	sel_flow->vars.accu_prob = 0;
201	__qdisc_drop(skb, to_free);
202	qdisc_qstats_drop(sch);
203	return NET_XMIT_CN;
204}
205
206static const struct netlink_range_validation fq_pie_q_range = {
207	.min = 1,
208	.max = 1 << 20,
209};
210
211static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
212	[TCA_FQ_PIE_LIMIT]		= {.type = NLA_U32},
213	[TCA_FQ_PIE_FLOWS]		= {.type = NLA_U32},
214	[TCA_FQ_PIE_TARGET]		= {.type = NLA_U32},
215	[TCA_FQ_PIE_TUPDATE]		= {.type = NLA_U32},
216	[TCA_FQ_PIE_ALPHA]		= {.type = NLA_U32},
217	[TCA_FQ_PIE_BETA]		= {.type = NLA_U32},
218	[TCA_FQ_PIE_QUANTUM]		=
219			NLA_POLICY_FULL_RANGE(NLA_U32, &fq_pie_q_range),
220	[TCA_FQ_PIE_MEMORY_LIMIT]	= {.type = NLA_U32},
221	[TCA_FQ_PIE_ECN_PROB]		= {.type = NLA_U32},
222	[TCA_FQ_PIE_ECN]		= {.type = NLA_U32},
223	[TCA_FQ_PIE_BYTEMODE]		= {.type = NLA_U32},
224	[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]	= {.type = NLA_U32},
225};
226
227static inline struct sk_buff *dequeue_head(struct fq_pie_flow *flow)
228{
229	struct sk_buff *skb = flow->head;
230
231	flow->head = skb->next;
232	skb->next = NULL;
233	return skb;
234}
235
236static struct sk_buff *fq_pie_qdisc_dequeue(struct Qdisc *sch)
237{
238	struct fq_pie_sched_data *q = qdisc_priv(sch);
239	struct sk_buff *skb = NULL;
240	struct fq_pie_flow *flow;
241	struct list_head *head;
242	u32 pkt_len;
243
244begin:
245	head = &q->new_flows;
246	if (list_empty(head)) {
247		head = &q->old_flows;
248		if (list_empty(head))
249			return NULL;
250	}
251
252	flow = list_first_entry(head, struct fq_pie_flow, flowchain);
253	/* Flow has exhausted all its credits */
254	if (flow->deficit <= 0) {
255		flow->deficit += q->quantum;
256		list_move_tail(&flow->flowchain, &q->old_flows);
257		goto begin;
258	}
259
260	if (flow->head) {
261		skb = dequeue_head(flow);
262		pkt_len = qdisc_pkt_len(skb);
263		sch->qstats.backlog -= pkt_len;
264		sch->q.qlen--;
265		qdisc_bstats_update(sch, skb);
266	}
267
268	if (!skb) {
269		/* force a pass through old_flows to prevent starvation */
270		if (head == &q->new_flows && !list_empty(&q->old_flows))
271			list_move_tail(&flow->flowchain, &q->old_flows);
272		else
273			list_del_init(&flow->flowchain);
274		goto begin;
275	}
276
277	flow->qlen--;
278	flow->deficit -= pkt_len;
279	flow->backlog -= pkt_len;
280	q->memory_usage -= get_pie_cb(skb)->mem_usage;
281	pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog);
282	return skb;
283}
284
285static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
286			 struct netlink_ext_ack *extack)
287{
288	struct fq_pie_sched_data *q = qdisc_priv(sch);
289	struct nlattr *tb[TCA_FQ_PIE_MAX + 1];
290	unsigned int len_dropped = 0;
291	unsigned int num_dropped = 0;
292	int err;
293
 
 
 
294	err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack);
295	if (err < 0)
296		return err;
297
298	sch_tree_lock(sch);
299	if (tb[TCA_FQ_PIE_LIMIT]) {
300		u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]);
301
302		WRITE_ONCE(q->p_params.limit, limit);
303		WRITE_ONCE(sch->limit, limit);
304	}
305	if (tb[TCA_FQ_PIE_FLOWS]) {
306		if (q->flows) {
307			NL_SET_ERR_MSG_MOD(extack,
308					   "Number of flows cannot be changed");
309			goto flow_error;
310		}
311		q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
312		if (!q->flows_cnt || q->flows_cnt > 65536) {
313			NL_SET_ERR_MSG_MOD(extack,
314					   "Number of flows must range in [1..65536]");
315			goto flow_error;
316		}
317	}
318
319	/* convert from microseconds to pschedtime */
320	if (tb[TCA_FQ_PIE_TARGET]) {
321		/* target is in us */
322		u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]);
323
324		/* convert to pschedtime */
325		WRITE_ONCE(q->p_params.target,
326			   PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC));
327	}
328
329	/* tupdate is in jiffies */
330	if (tb[TCA_FQ_PIE_TUPDATE])
331		WRITE_ONCE(q->p_params.tupdate,
332			usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE])));
333
334	if (tb[TCA_FQ_PIE_ALPHA])
335		WRITE_ONCE(q->p_params.alpha,
336			   nla_get_u32(tb[TCA_FQ_PIE_ALPHA]));
337
338	if (tb[TCA_FQ_PIE_BETA])
339		WRITE_ONCE(q->p_params.beta,
340			   nla_get_u32(tb[TCA_FQ_PIE_BETA]));
341
342	if (tb[TCA_FQ_PIE_QUANTUM])
343		WRITE_ONCE(q->quantum, nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]));
344
345	if (tb[TCA_FQ_PIE_MEMORY_LIMIT])
346		WRITE_ONCE(q->memory_limit,
347			   nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]));
348
349	if (tb[TCA_FQ_PIE_ECN_PROB])
350		WRITE_ONCE(q->ecn_prob,
351			   nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]));
352
353	if (tb[TCA_FQ_PIE_ECN])
354		WRITE_ONCE(q->p_params.ecn,
355			   nla_get_u32(tb[TCA_FQ_PIE_ECN]));
356
357	if (tb[TCA_FQ_PIE_BYTEMODE])
358		WRITE_ONCE(q->p_params.bytemode,
359			   nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]));
360
361	if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR])
362		WRITE_ONCE(q->p_params.dq_rate_estimator,
363			   nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]));
364
365	/* Drop excess packets if new limit is lower */
366	while (sch->q.qlen > sch->limit) {
367		struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
368
369		len_dropped += qdisc_pkt_len(skb);
370		num_dropped += 1;
371		rtnl_kfree_skbs(skb, skb);
372	}
373	qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
374
375	sch_tree_unlock(sch);
376	return 0;
377
378flow_error:
379	sch_tree_unlock(sch);
380	return -EINVAL;
381}
382
383static void fq_pie_timer(struct timer_list *t)
384{
385	struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
386	unsigned long next, tupdate;
387	struct Qdisc *sch = q->sch;
388	spinlock_t *root_lock; /* to lock qdisc for probability calculations */
389	int max_cnt, i;
390
391	rcu_read_lock();
392	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
393	spin_lock(root_lock);
394
395	/* Limit this expensive loop to 2048 flows per round. */
396	max_cnt = min_t(int, q->flows_cnt - q->flows_cursor, 2048);
397	for (i = 0; i < max_cnt; i++) {
398		pie_calculate_probability(&q->p_params,
399					  &q->flows[q->flows_cursor].vars,
400					  q->flows[q->flows_cursor].backlog);
401		q->flows_cursor++;
402	}
403
404	tupdate = q->p_params.tupdate;
405	next = 0;
406	if (q->flows_cursor >= q->flows_cnt) {
407		q->flows_cursor = 0;
408		next = tupdate;
409	}
410	if (tupdate)
411		mod_timer(&q->adapt_timer, jiffies + next);
412	spin_unlock(root_lock);
413	rcu_read_unlock();
414}
415
416static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
417		       struct netlink_ext_ack *extack)
418{
419	struct fq_pie_sched_data *q = qdisc_priv(sch);
420	int err;
421	u32 idx;
422
423	pie_params_init(&q->p_params);
424	sch->limit = 10 * 1024;
425	q->p_params.limit = sch->limit;
426	q->quantum = psched_mtu(qdisc_dev(sch));
427	q->sch = sch;
428	q->ecn_prob = 10;
429	q->flows_cnt = 1024;
430	q->memory_limit = SZ_32M;
431
432	INIT_LIST_HEAD(&q->new_flows);
433	INIT_LIST_HEAD(&q->old_flows);
434	timer_setup(&q->adapt_timer, fq_pie_timer, 0);
435
436	if (opt) {
437		err = fq_pie_change(sch, opt, extack);
438
439		if (err)
440			return err;
441	}
442
443	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
444	if (err)
445		goto init_failure;
446
447	q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow),
448			    GFP_KERNEL);
449	if (!q->flows) {
450		err = -ENOMEM;
451		goto init_failure;
452	}
453	for (idx = 0; idx < q->flows_cnt; idx++) {
454		struct fq_pie_flow *flow = q->flows + idx;
455
456		INIT_LIST_HEAD(&flow->flowchain);
457		pie_vars_init(&flow->vars);
458	}
459
460	mod_timer(&q->adapt_timer, jiffies + HZ / 2);
461
462	return 0;
463
464init_failure:
465	q->flows_cnt = 0;
466
467	return err;
468}
469
470static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb)
471{
472	struct fq_pie_sched_data *q = qdisc_priv(sch);
473	struct nlattr *opts;
474
475	opts = nla_nest_start(skb, TCA_OPTIONS);
476	if (!opts)
477		return -EMSGSIZE;
478
479	/* convert target from pschedtime to us */
480	if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, READ_ONCE(sch->limit)) ||
481	    nla_put_u32(skb, TCA_FQ_PIE_FLOWS, READ_ONCE(q->flows_cnt)) ||
482	    nla_put_u32(skb, TCA_FQ_PIE_TARGET,
483			((u32)PSCHED_TICKS2NS(READ_ONCE(q->p_params.target))) /
484			NSEC_PER_USEC) ||
485	    nla_put_u32(skb, TCA_FQ_PIE_TUPDATE,
486			jiffies_to_usecs(READ_ONCE(q->p_params.tupdate))) ||
487	    nla_put_u32(skb, TCA_FQ_PIE_ALPHA, READ_ONCE(q->p_params.alpha)) ||
488	    nla_put_u32(skb, TCA_FQ_PIE_BETA, READ_ONCE(q->p_params.beta)) ||
489	    nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, READ_ONCE(q->quantum)) ||
490	    nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT,
491			READ_ONCE(q->memory_limit)) ||
492	    nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, READ_ONCE(q->ecn_prob)) ||
493	    nla_put_u32(skb, TCA_FQ_PIE_ECN, READ_ONCE(q->p_params.ecn)) ||
494	    nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, READ_ONCE(q->p_params.bytemode)) ||
495	    nla_put_u32(skb, TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
496			READ_ONCE(q->p_params.dq_rate_estimator)))
497		goto nla_put_failure;
498
499	return nla_nest_end(skb, opts);
500
501nla_put_failure:
502	nla_nest_cancel(skb, opts);
503	return -EMSGSIZE;
504}
505
506static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
507{
508	struct fq_pie_sched_data *q = qdisc_priv(sch);
509	struct tc_fq_pie_xstats st = {
510		.packets_in	= q->stats.packets_in,
511		.overlimit	= q->stats.overlimit,
512		.overmemory	= q->overmemory,
513		.dropped	= q->stats.dropped,
514		.ecn_mark	= q->stats.ecn_mark,
515		.new_flow_count = q->new_flow_count,
516		.memory_usage   = q->memory_usage,
517	};
518	struct list_head *pos;
519
520	sch_tree_lock(sch);
521	list_for_each(pos, &q->new_flows)
522		st.new_flows_len++;
523
524	list_for_each(pos, &q->old_flows)
525		st.old_flows_len++;
526	sch_tree_unlock(sch);
527
528	return gnet_stats_copy_app(d, &st, sizeof(st));
529}
530
531static void fq_pie_reset(struct Qdisc *sch)
532{
533	struct fq_pie_sched_data *q = qdisc_priv(sch);
534	u32 idx;
535
536	INIT_LIST_HEAD(&q->new_flows);
537	INIT_LIST_HEAD(&q->old_flows);
538	for (idx = 0; idx < q->flows_cnt; idx++) {
539		struct fq_pie_flow *flow = q->flows + idx;
540
541		/* Removes all packets from flow */
542		rtnl_kfree_skbs(flow->head, flow->tail);
543		flow->head = NULL;
544
545		INIT_LIST_HEAD(&flow->flowchain);
546		pie_vars_init(&flow->vars);
547	}
 
 
 
548}
549
550static void fq_pie_destroy(struct Qdisc *sch)
551{
552	struct fq_pie_sched_data *q = qdisc_priv(sch);
553
554	tcf_block_put(q->block);
555	q->p_params.tupdate = 0;
556	del_timer_sync(&q->adapt_timer);
557	kvfree(q->flows);
558}
559
560static struct Qdisc_ops fq_pie_qdisc_ops __read_mostly = {
561	.id		= "fq_pie",
562	.priv_size	= sizeof(struct fq_pie_sched_data),
563	.enqueue	= fq_pie_qdisc_enqueue,
564	.dequeue	= fq_pie_qdisc_dequeue,
565	.peek		= qdisc_peek_dequeued,
566	.init		= fq_pie_init,
567	.destroy	= fq_pie_destroy,
568	.reset		= fq_pie_reset,
569	.change		= fq_pie_change,
570	.dump		= fq_pie_dump,
571	.dump_stats	= fq_pie_dump_stats,
572	.owner		= THIS_MODULE,
573};
574MODULE_ALIAS_NET_SCH("fq_pie");
575
576static int __init fq_pie_module_init(void)
577{
578	return register_qdisc(&fq_pie_qdisc_ops);
579}
580
581static void __exit fq_pie_module_exit(void)
582{
583	unregister_qdisc(&fq_pie_qdisc_ops);
584}
585
586module_init(fq_pie_module_init);
587module_exit(fq_pie_module_exit);
588
589MODULE_DESCRIPTION("Flow Queue Proportional Integral controller Enhanced (FQ-PIE)");
590MODULE_AUTHOR("Mohit P. Tahiliani");
591MODULE_LICENSE("GPL");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Flow Queue PIE discipline
  3 *
  4 * Copyright (C) 2019 Mohit P. Tahiliani <tahiliani@nitk.edu.in>
  5 * Copyright (C) 2019 Sachin D. Patil <sdp.sachin@gmail.com>
  6 * Copyright (C) 2019 V. Saicharan <vsaicharan1998@gmail.com>
  7 * Copyright (C) 2019 Mohit Bhasi <mohitbhasi1998@gmail.com>
  8 * Copyright (C) 2019 Leslie Monis <lesliemonis@gmail.com>
  9 * Copyright (C) 2019 Gautam Ramakrishnan <gautamramk@gmail.com>
 10 */
 11
 12#include <linux/jhash.h>
 
 13#include <linux/sizes.h>
 14#include <linux/vmalloc.h>
 15#include <net/pkt_cls.h>
 16#include <net/pie.h>
 17
 18/* Flow Queue PIE
 19 *
 20 * Principles:
 21 *   - Packets are classified on flows.
 22 *   - This is a Stochastic model (as we use a hash, several flows might
 23 *                                 be hashed to the same slot)
 24 *   - Each flow has a PIE managed queue.
 25 *   - Flows are linked onto two (Round Robin) lists,
 26 *     so that new flows have priority on old ones.
 27 *   - For a given flow, packets are not reordered.
 28 *   - Drops during enqueue only.
 29 *   - ECN capability is off by default.
 30 *   - ECN threshold (if ECN is enabled) is at 10% by default.
 31 *   - Uses timestamps to calculate queue delay by default.
 32 */
 33
 34/**
 35 * struct fq_pie_flow - contains data for each flow
 36 * @vars:	pie vars associated with the flow
 37 * @deficit:	number of remaining byte credits
 38 * @backlog:	size of data in the flow
 39 * @qlen:	number of packets in the flow
 40 * @flowchain:	flowchain for the flow
 41 * @head:	first packet in the flow
 42 * @tail:	last packet in the flow
 43 */
 44struct fq_pie_flow {
 45	struct pie_vars vars;
 46	s32 deficit;
 47	u32 backlog;
 48	u32 qlen;
 49	struct list_head flowchain;
 50	struct sk_buff *head;
 51	struct sk_buff *tail;
 52};
 53
 54struct fq_pie_sched_data {
 55	struct tcf_proto __rcu *filter_list; /* optional external classifier */
 56	struct tcf_block *block;
 57	struct fq_pie_flow *flows;
 58	struct Qdisc *sch;
 59	struct list_head old_flows;
 60	struct list_head new_flows;
 61	struct pie_params p_params;
 62	u32 ecn_prob;
 63	u32 flows_cnt;
 
 64	u32 quantum;
 65	u32 memory_limit;
 66	u32 new_flow_count;
 67	u32 memory_usage;
 68	u32 overmemory;
 69	struct pie_stats stats;
 70	struct timer_list adapt_timer;
 71};
 72
 73static unsigned int fq_pie_hash(const struct fq_pie_sched_data *q,
 74				struct sk_buff *skb)
 75{
 76	return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
 77}
 78
 79static unsigned int fq_pie_classify(struct sk_buff *skb, struct Qdisc *sch,
 80				    int *qerr)
 81{
 82	struct fq_pie_sched_data *q = qdisc_priv(sch);
 83	struct tcf_proto *filter;
 84	struct tcf_result res;
 85	int result;
 86
 87	if (TC_H_MAJ(skb->priority) == sch->handle &&
 88	    TC_H_MIN(skb->priority) > 0 &&
 89	    TC_H_MIN(skb->priority) <= q->flows_cnt)
 90		return TC_H_MIN(skb->priority);
 91
 92	filter = rcu_dereference_bh(q->filter_list);
 93	if (!filter)
 94		return fq_pie_hash(q, skb) + 1;
 95
 96	*qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
 97	result = tcf_classify(skb, filter, &res, false);
 98	if (result >= 0) {
 99#ifdef CONFIG_NET_CLS_ACT
100		switch (result) {
101		case TC_ACT_STOLEN:
102		case TC_ACT_QUEUED:
103		case TC_ACT_TRAP:
104			*qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
105			fallthrough;
106		case TC_ACT_SHOT:
107			return 0;
108		}
109#endif
110		if (TC_H_MIN(res.classid) <= q->flows_cnt)
111			return TC_H_MIN(res.classid);
112	}
113	return 0;
114}
115
116/* add skb to flow queue (tail add) */
117static inline void flow_queue_add(struct fq_pie_flow *flow,
118				  struct sk_buff *skb)
119{
120	if (!flow->head)
121		flow->head = skb;
122	else
123		flow->tail->next = skb;
124	flow->tail = skb;
125	skb->next = NULL;
126}
127
128static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
129				struct sk_buff **to_free)
130{
131	struct fq_pie_sched_data *q = qdisc_priv(sch);
132	struct fq_pie_flow *sel_flow;
133	int ret;
134	u8 memory_limited = false;
135	u8 enqueue = false;
136	u32 pkt_len;
137	u32 idx;
138
139	/* Classifies packet into corresponding flow */
140	idx = fq_pie_classify(skb, sch, &ret);
141	if (idx == 0) {
142		if (ret & __NET_XMIT_BYPASS)
143			qdisc_qstats_drop(sch);
144		__qdisc_drop(skb, to_free);
145		return ret;
146	}
147	idx--;
148
149	sel_flow = &q->flows[idx];
150	/* Checks whether adding a new packet would exceed memory limit */
151	get_pie_cb(skb)->mem_usage = skb->truesize;
152	memory_limited = q->memory_usage > q->memory_limit + skb->truesize;
153
154	/* Checks if the qdisc is full */
155	if (unlikely(qdisc_qlen(sch) >= sch->limit)) {
156		q->stats.overlimit++;
157		goto out;
158	} else if (unlikely(memory_limited)) {
159		q->overmemory++;
160	}
161
162	if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars,
163			    sel_flow->backlog, skb->len)) {
164		enqueue = true;
165	} else if (q->p_params.ecn &&
166		   sel_flow->vars.prob <= (MAX_PROB / 100) * q->ecn_prob &&
167		   INET_ECN_set_ce(skb)) {
168		/* If packet is ecn capable, mark it if drop probability
169		 * is lower than the parameter ecn_prob, else drop it.
170		 */
171		q->stats.ecn_mark++;
172		enqueue = true;
173	}
174	if (enqueue) {
175		/* Set enqueue time only when dq_rate_estimator is disabled. */
176		if (!q->p_params.dq_rate_estimator)
177			pie_set_enqueue_time(skb);
178
179		pkt_len = qdisc_pkt_len(skb);
180		q->stats.packets_in++;
181		q->memory_usage += skb->truesize;
182		sch->qstats.backlog += pkt_len;
183		sch->q.qlen++;
184		flow_queue_add(sel_flow, skb);
185		if (list_empty(&sel_flow->flowchain)) {
186			list_add_tail(&sel_flow->flowchain, &q->new_flows);
187			q->new_flow_count++;
188			sel_flow->deficit = q->quantum;
189			sel_flow->qlen = 0;
190			sel_flow->backlog = 0;
191		}
192		sel_flow->qlen++;
193		sel_flow->backlog += pkt_len;
194		return NET_XMIT_SUCCESS;
195	}
196out:
197	q->stats.dropped++;
198	sel_flow->vars.accu_prob = 0;
199	__qdisc_drop(skb, to_free);
200	qdisc_qstats_drop(sch);
201	return NET_XMIT_CN;
202}
203
 
 
 
 
 
204static const struct nla_policy fq_pie_policy[TCA_FQ_PIE_MAX + 1] = {
205	[TCA_FQ_PIE_LIMIT]		= {.type = NLA_U32},
206	[TCA_FQ_PIE_FLOWS]		= {.type = NLA_U32},
207	[TCA_FQ_PIE_TARGET]		= {.type = NLA_U32},
208	[TCA_FQ_PIE_TUPDATE]		= {.type = NLA_U32},
209	[TCA_FQ_PIE_ALPHA]		= {.type = NLA_U32},
210	[TCA_FQ_PIE_BETA]		= {.type = NLA_U32},
211	[TCA_FQ_PIE_QUANTUM]		= {.type = NLA_U32},
 
212	[TCA_FQ_PIE_MEMORY_LIMIT]	= {.type = NLA_U32},
213	[TCA_FQ_PIE_ECN_PROB]		= {.type = NLA_U32},
214	[TCA_FQ_PIE_ECN]		= {.type = NLA_U32},
215	[TCA_FQ_PIE_BYTEMODE]		= {.type = NLA_U32},
216	[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]	= {.type = NLA_U32},
217};
218
219static inline struct sk_buff *dequeue_head(struct fq_pie_flow *flow)
220{
221	struct sk_buff *skb = flow->head;
222
223	flow->head = skb->next;
224	skb->next = NULL;
225	return skb;
226}
227
228static struct sk_buff *fq_pie_qdisc_dequeue(struct Qdisc *sch)
229{
230	struct fq_pie_sched_data *q = qdisc_priv(sch);
231	struct sk_buff *skb = NULL;
232	struct fq_pie_flow *flow;
233	struct list_head *head;
234	u32 pkt_len;
235
236begin:
237	head = &q->new_flows;
238	if (list_empty(head)) {
239		head = &q->old_flows;
240		if (list_empty(head))
241			return NULL;
242	}
243
244	flow = list_first_entry(head, struct fq_pie_flow, flowchain);
245	/* Flow has exhausted all its credits */
246	if (flow->deficit <= 0) {
247		flow->deficit += q->quantum;
248		list_move_tail(&flow->flowchain, &q->old_flows);
249		goto begin;
250	}
251
252	if (flow->head) {
253		skb = dequeue_head(flow);
254		pkt_len = qdisc_pkt_len(skb);
255		sch->qstats.backlog -= pkt_len;
256		sch->q.qlen--;
257		qdisc_bstats_update(sch, skb);
258	}
259
260	if (!skb) {
261		/* force a pass through old_flows to prevent starvation */
262		if (head == &q->new_flows && !list_empty(&q->old_flows))
263			list_move_tail(&flow->flowchain, &q->old_flows);
264		else
265			list_del_init(&flow->flowchain);
266		goto begin;
267	}
268
269	flow->qlen--;
270	flow->deficit -= pkt_len;
271	flow->backlog -= pkt_len;
272	q->memory_usage -= get_pie_cb(skb)->mem_usage;
273	pie_process_dequeue(skb, &q->p_params, &flow->vars, flow->backlog);
274	return skb;
275}
276
277static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
278			 struct netlink_ext_ack *extack)
279{
280	struct fq_pie_sched_data *q = qdisc_priv(sch);
281	struct nlattr *tb[TCA_FQ_PIE_MAX + 1];
282	unsigned int len_dropped = 0;
283	unsigned int num_dropped = 0;
284	int err;
285
286	if (!opt)
287		return -EINVAL;
288
289	err = nla_parse_nested(tb, TCA_FQ_PIE_MAX, opt, fq_pie_policy, extack);
290	if (err < 0)
291		return err;
292
293	sch_tree_lock(sch);
294	if (tb[TCA_FQ_PIE_LIMIT]) {
295		u32 limit = nla_get_u32(tb[TCA_FQ_PIE_LIMIT]);
296
297		q->p_params.limit = limit;
298		sch->limit = limit;
299	}
300	if (tb[TCA_FQ_PIE_FLOWS]) {
301		if (q->flows) {
302			NL_SET_ERR_MSG_MOD(extack,
303					   "Number of flows cannot be changed");
304			goto flow_error;
305		}
306		q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]);
307		if (!q->flows_cnt || q->flows_cnt > 65536) {
308			NL_SET_ERR_MSG_MOD(extack,
309					   "Number of flows must range in [1..65536]");
310			goto flow_error;
311		}
312	}
313
314	/* convert from microseconds to pschedtime */
315	if (tb[TCA_FQ_PIE_TARGET]) {
316		/* target is in us */
317		u32 target = nla_get_u32(tb[TCA_FQ_PIE_TARGET]);
318
319		/* convert to pschedtime */
320		q->p_params.target =
321			PSCHED_NS2TICKS((u64)target * NSEC_PER_USEC);
322	}
323
324	/* tupdate is in jiffies */
325	if (tb[TCA_FQ_PIE_TUPDATE])
326		q->p_params.tupdate =
327			usecs_to_jiffies(nla_get_u32(tb[TCA_FQ_PIE_TUPDATE]));
328
329	if (tb[TCA_FQ_PIE_ALPHA])
330		q->p_params.alpha = nla_get_u32(tb[TCA_FQ_PIE_ALPHA]);
 
331
332	if (tb[TCA_FQ_PIE_BETA])
333		q->p_params.beta = nla_get_u32(tb[TCA_FQ_PIE_BETA]);
 
334
335	if (tb[TCA_FQ_PIE_QUANTUM])
336		q->quantum = nla_get_u32(tb[TCA_FQ_PIE_QUANTUM]);
337
338	if (tb[TCA_FQ_PIE_MEMORY_LIMIT])
339		q->memory_limit = nla_get_u32(tb[TCA_FQ_PIE_MEMORY_LIMIT]);
 
340
341	if (tb[TCA_FQ_PIE_ECN_PROB])
342		q->ecn_prob = nla_get_u32(tb[TCA_FQ_PIE_ECN_PROB]);
 
343
344	if (tb[TCA_FQ_PIE_ECN])
345		q->p_params.ecn = nla_get_u32(tb[TCA_FQ_PIE_ECN]);
 
346
347	if (tb[TCA_FQ_PIE_BYTEMODE])
348		q->p_params.bytemode = nla_get_u32(tb[TCA_FQ_PIE_BYTEMODE]);
 
349
350	if (tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR])
351		q->p_params.dq_rate_estimator =
352			nla_get_u32(tb[TCA_FQ_PIE_DQ_RATE_ESTIMATOR]);
353
354	/* Drop excess packets if new limit is lower */
355	while (sch->q.qlen > sch->limit) {
356		struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
357
358		len_dropped += qdisc_pkt_len(skb);
359		num_dropped += 1;
360		rtnl_kfree_skbs(skb, skb);
361	}
362	qdisc_tree_reduce_backlog(sch, num_dropped, len_dropped);
363
364	sch_tree_unlock(sch);
365	return 0;
366
367flow_error:
368	sch_tree_unlock(sch);
369	return -EINVAL;
370}
371
372static void fq_pie_timer(struct timer_list *t)
373{
374	struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
 
375	struct Qdisc *sch = q->sch;
376	spinlock_t *root_lock; /* to lock qdisc for probability calculations */
377	u32 idx;
378
 
379	root_lock = qdisc_lock(qdisc_root_sleeping(sch));
380	spin_lock(root_lock);
381
382	for (idx = 0; idx < q->flows_cnt; idx++)
383		pie_calculate_probability(&q->p_params, &q->flows[idx].vars,
384					  q->flows[idx].backlog);
385
386	/* reset the timer to fire after 'tupdate' jiffies. */
387	if (q->p_params.tupdate)
388		mod_timer(&q->adapt_timer, jiffies + q->p_params.tupdate);
389
 
 
 
 
 
 
 
 
 
390	spin_unlock(root_lock);
 
391}
392
393static int fq_pie_init(struct Qdisc *sch, struct nlattr *opt,
394		       struct netlink_ext_ack *extack)
395{
396	struct fq_pie_sched_data *q = qdisc_priv(sch);
397	int err;
398	u32 idx;
399
400	pie_params_init(&q->p_params);
401	sch->limit = 10 * 1024;
402	q->p_params.limit = sch->limit;
403	q->quantum = psched_mtu(qdisc_dev(sch));
404	q->sch = sch;
405	q->ecn_prob = 10;
406	q->flows_cnt = 1024;
407	q->memory_limit = SZ_32M;
408
409	INIT_LIST_HEAD(&q->new_flows);
410	INIT_LIST_HEAD(&q->old_flows);
411	timer_setup(&q->adapt_timer, fq_pie_timer, 0);
412
413	if (opt) {
414		err = fq_pie_change(sch, opt, extack);
415
416		if (err)
417			return err;
418	}
419
420	err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
421	if (err)
422		goto init_failure;
423
424	q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow),
425			    GFP_KERNEL);
426	if (!q->flows) {
427		err = -ENOMEM;
428		goto init_failure;
429	}
430	for (idx = 0; idx < q->flows_cnt; idx++) {
431		struct fq_pie_flow *flow = q->flows + idx;
432
433		INIT_LIST_HEAD(&flow->flowchain);
434		pie_vars_init(&flow->vars);
435	}
436
437	mod_timer(&q->adapt_timer, jiffies + HZ / 2);
438
439	return 0;
440
441init_failure:
442	q->flows_cnt = 0;
443
444	return err;
445}
446
447static int fq_pie_dump(struct Qdisc *sch, struct sk_buff *skb)
448{
449	struct fq_pie_sched_data *q = qdisc_priv(sch);
450	struct nlattr *opts;
451
452	opts = nla_nest_start(skb, TCA_OPTIONS);
453	if (!opts)
454		return -EMSGSIZE;
455
456	/* convert target from pschedtime to us */
457	if (nla_put_u32(skb, TCA_FQ_PIE_LIMIT, sch->limit) ||
458	    nla_put_u32(skb, TCA_FQ_PIE_FLOWS, q->flows_cnt) ||
459	    nla_put_u32(skb, TCA_FQ_PIE_TARGET,
460			((u32)PSCHED_TICKS2NS(q->p_params.target)) /
461			NSEC_PER_USEC) ||
462	    nla_put_u32(skb, TCA_FQ_PIE_TUPDATE,
463			jiffies_to_usecs(q->p_params.tupdate)) ||
464	    nla_put_u32(skb, TCA_FQ_PIE_ALPHA, q->p_params.alpha) ||
465	    nla_put_u32(skb, TCA_FQ_PIE_BETA, q->p_params.beta) ||
466	    nla_put_u32(skb, TCA_FQ_PIE_QUANTUM, q->quantum) ||
467	    nla_put_u32(skb, TCA_FQ_PIE_MEMORY_LIMIT, q->memory_limit) ||
468	    nla_put_u32(skb, TCA_FQ_PIE_ECN_PROB, q->ecn_prob) ||
469	    nla_put_u32(skb, TCA_FQ_PIE_ECN, q->p_params.ecn) ||
470	    nla_put_u32(skb, TCA_FQ_PIE_BYTEMODE, q->p_params.bytemode) ||
 
471	    nla_put_u32(skb, TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
472			q->p_params.dq_rate_estimator))
473		goto nla_put_failure;
474
475	return nla_nest_end(skb, opts);
476
477nla_put_failure:
478	nla_nest_cancel(skb, opts);
479	return -EMSGSIZE;
480}
481
482static int fq_pie_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
483{
484	struct fq_pie_sched_data *q = qdisc_priv(sch);
485	struct tc_fq_pie_xstats st = {
486		.packets_in	= q->stats.packets_in,
487		.overlimit	= q->stats.overlimit,
488		.overmemory	= q->overmemory,
489		.dropped	= q->stats.dropped,
490		.ecn_mark	= q->stats.ecn_mark,
491		.new_flow_count = q->new_flow_count,
492		.memory_usage   = q->memory_usage,
493	};
494	struct list_head *pos;
495
496	sch_tree_lock(sch);
497	list_for_each(pos, &q->new_flows)
498		st.new_flows_len++;
499
500	list_for_each(pos, &q->old_flows)
501		st.old_flows_len++;
502	sch_tree_unlock(sch);
503
504	return gnet_stats_copy_app(d, &st, sizeof(st));
505}
506
507static void fq_pie_reset(struct Qdisc *sch)
508{
509	struct fq_pie_sched_data *q = qdisc_priv(sch);
510	u32 idx;
511
512	INIT_LIST_HEAD(&q->new_flows);
513	INIT_LIST_HEAD(&q->old_flows);
514	for (idx = 0; idx < q->flows_cnt; idx++) {
515		struct fq_pie_flow *flow = q->flows + idx;
516
517		/* Removes all packets from flow */
518		rtnl_kfree_skbs(flow->head, flow->tail);
519		flow->head = NULL;
520
521		INIT_LIST_HEAD(&flow->flowchain);
522		pie_vars_init(&flow->vars);
523	}
524
525	sch->q.qlen = 0;
526	sch->qstats.backlog = 0;
527}
528
529static void fq_pie_destroy(struct Qdisc *sch)
530{
531	struct fq_pie_sched_data *q = qdisc_priv(sch);
532
533	tcf_block_put(q->block);
 
534	del_timer_sync(&q->adapt_timer);
535	kvfree(q->flows);
536}
537
538static struct Qdisc_ops fq_pie_qdisc_ops __read_mostly = {
539	.id		= "fq_pie",
540	.priv_size	= sizeof(struct fq_pie_sched_data),
541	.enqueue	= fq_pie_qdisc_enqueue,
542	.dequeue	= fq_pie_qdisc_dequeue,
543	.peek		= qdisc_peek_dequeued,
544	.init		= fq_pie_init,
545	.destroy	= fq_pie_destroy,
546	.reset		= fq_pie_reset,
547	.change		= fq_pie_change,
548	.dump		= fq_pie_dump,
549	.dump_stats	= fq_pie_dump_stats,
550	.owner		= THIS_MODULE,
551};
 
552
553static int __init fq_pie_module_init(void)
554{
555	return register_qdisc(&fq_pie_qdisc_ops);
556}
557
558static void __exit fq_pie_module_exit(void)
559{
560	unregister_qdisc(&fq_pie_qdisc_ops);
561}
562
563module_init(fq_pie_module_init);
564module_exit(fq_pie_module_exit);
565
566MODULE_DESCRIPTION("Flow Queue Proportional Integral controller Enhanced (FQ-PIE)");
567MODULE_AUTHOR("Mohit P. Tahiliani");
568MODULE_LICENSE("GPL");