Loading...
1/*
2 * net/sched/sch_fifo.c The simplest FIFO queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/skbuff.h>
18#include <net/pkt_sched.h>
19
20/* 1 band FIFO pseudo-"scheduler" */
21
22static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
23 struct sk_buff **to_free)
24{
25 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
26 return qdisc_enqueue_tail(skb, sch);
27
28 return qdisc_drop(skb, sch, to_free);
29}
30
31static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
32 struct sk_buff **to_free)
33{
34 if (likely(sch->q.qlen < sch->limit))
35 return qdisc_enqueue_tail(skb, sch);
36
37 return qdisc_drop(skb, sch, to_free);
38}
39
40static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
41 struct sk_buff **to_free)
42{
43 unsigned int prev_backlog;
44
45 if (likely(sch->q.qlen < sch->limit))
46 return qdisc_enqueue_tail(skb, sch);
47
48 prev_backlog = sch->qstats.backlog;
49 /* queue full, remove one skb to fulfill the limit */
50 __qdisc_queue_drop_head(sch, &sch->q, to_free);
51 qdisc_qstats_drop(sch);
52 qdisc_enqueue_tail(skb, sch);
53
54 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
55 return NET_XMIT_CN;
56}
57
58static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
59 struct netlink_ext_ack *extack)
60{
61 bool bypass;
62 bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
63
64 if (opt == NULL) {
65 u32 limit = qdisc_dev(sch)->tx_queue_len;
66
67 if (is_bfifo)
68 limit *= psched_mtu(qdisc_dev(sch));
69
70 sch->limit = limit;
71 } else {
72 struct tc_fifo_qopt *ctl = nla_data(opt);
73
74 if (nla_len(opt) < sizeof(*ctl))
75 return -EINVAL;
76
77 sch->limit = ctl->limit;
78 }
79
80 if (is_bfifo)
81 bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
82 else
83 bypass = sch->limit >= 1;
84
85 if (bypass)
86 sch->flags |= TCQ_F_CAN_BYPASS;
87 else
88 sch->flags &= ~TCQ_F_CAN_BYPASS;
89 return 0;
90}
91
92static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
93{
94 struct tc_fifo_qopt opt = { .limit = sch->limit };
95
96 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
97 goto nla_put_failure;
98 return skb->len;
99
100nla_put_failure:
101 return -1;
102}
103
104struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
105 .id = "pfifo",
106 .priv_size = 0,
107 .enqueue = pfifo_enqueue,
108 .dequeue = qdisc_dequeue_head,
109 .peek = qdisc_peek_head,
110 .init = fifo_init,
111 .reset = qdisc_reset_queue,
112 .change = fifo_init,
113 .dump = fifo_dump,
114 .owner = THIS_MODULE,
115};
116EXPORT_SYMBOL(pfifo_qdisc_ops);
117
118struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
119 .id = "bfifo",
120 .priv_size = 0,
121 .enqueue = bfifo_enqueue,
122 .dequeue = qdisc_dequeue_head,
123 .peek = qdisc_peek_head,
124 .init = fifo_init,
125 .reset = qdisc_reset_queue,
126 .change = fifo_init,
127 .dump = fifo_dump,
128 .owner = THIS_MODULE,
129};
130EXPORT_SYMBOL(bfifo_qdisc_ops);
131
132struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
133 .id = "pfifo_head_drop",
134 .priv_size = 0,
135 .enqueue = pfifo_tail_enqueue,
136 .dequeue = qdisc_dequeue_head,
137 .peek = qdisc_peek_head,
138 .init = fifo_init,
139 .reset = qdisc_reset_queue,
140 .change = fifo_init,
141 .dump = fifo_dump,
142 .owner = THIS_MODULE,
143};
144
145/* Pass size change message down to embedded FIFO */
146int fifo_set_limit(struct Qdisc *q, unsigned int limit)
147{
148 struct nlattr *nla;
149 int ret = -ENOMEM;
150
151 /* Hack to avoid sending change message to non-FIFO */
152 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
153 return 0;
154
155 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
156 if (nla) {
157 nla->nla_type = RTM_NEWQDISC;
158 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
159 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
160
161 ret = q->ops->change(q, nla, NULL);
162 kfree(nla);
163 }
164 return ret;
165}
166EXPORT_SYMBOL(fifo_set_limit);
167
168struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
169 unsigned int limit,
170 struct netlink_ext_ack *extack)
171{
172 struct Qdisc *q;
173 int err = -ENOMEM;
174
175 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
176 extack);
177 if (q) {
178 err = fifo_set_limit(q, limit);
179 if (err < 0) {
180 qdisc_destroy(q);
181 q = NULL;
182 }
183 }
184
185 return q ? : ERR_PTR(err);
186}
187EXPORT_SYMBOL(fifo_create_dflt);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_fifo.c The simplest FIFO queue.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/skbuff.h>
14#include <net/pkt_sched.h>
15#include <net/pkt_cls.h>
16
17/* 1 band FIFO pseudo-"scheduler" */
18
19static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
20 struct sk_buff **to_free)
21{
22 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
23 return qdisc_enqueue_tail(skb, sch);
24
25 return qdisc_drop(skb, sch, to_free);
26}
27
28static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
29 struct sk_buff **to_free)
30{
31 if (likely(sch->q.qlen < sch->limit))
32 return qdisc_enqueue_tail(skb, sch);
33
34 return qdisc_drop(skb, sch, to_free);
35}
36
37static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
38 struct sk_buff **to_free)
39{
40 unsigned int prev_backlog;
41
42 if (likely(sch->q.qlen < sch->limit))
43 return qdisc_enqueue_tail(skb, sch);
44
45 prev_backlog = sch->qstats.backlog;
46 /* queue full, remove one skb to fulfill the limit */
47 __qdisc_queue_drop_head(sch, &sch->q, to_free);
48 qdisc_qstats_drop(sch);
49 qdisc_enqueue_tail(skb, sch);
50
51 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
52 return NET_XMIT_CN;
53}
54
55static void fifo_offload_init(struct Qdisc *sch)
56{
57 struct net_device *dev = qdisc_dev(sch);
58 struct tc_fifo_qopt_offload qopt;
59
60 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
61 return;
62
63 qopt.command = TC_FIFO_REPLACE;
64 qopt.handle = sch->handle;
65 qopt.parent = sch->parent;
66 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
67}
68
69static void fifo_offload_destroy(struct Qdisc *sch)
70{
71 struct net_device *dev = qdisc_dev(sch);
72 struct tc_fifo_qopt_offload qopt;
73
74 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
75 return;
76
77 qopt.command = TC_FIFO_DESTROY;
78 qopt.handle = sch->handle;
79 qopt.parent = sch->parent;
80 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
81}
82
83static int fifo_offload_dump(struct Qdisc *sch)
84{
85 struct tc_fifo_qopt_offload qopt;
86
87 qopt.command = TC_FIFO_STATS;
88 qopt.handle = sch->handle;
89 qopt.parent = sch->parent;
90 qopt.stats.bstats = &sch->bstats;
91 qopt.stats.qstats = &sch->qstats;
92
93 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_FIFO, &qopt);
94}
95
96static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
97 struct netlink_ext_ack *extack)
98{
99 bool bypass;
100 bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
101
102 if (opt == NULL) {
103 u32 limit = qdisc_dev(sch)->tx_queue_len;
104
105 if (is_bfifo)
106 limit *= psched_mtu(qdisc_dev(sch));
107
108 sch->limit = limit;
109 } else {
110 struct tc_fifo_qopt *ctl = nla_data(opt);
111
112 if (nla_len(opt) < sizeof(*ctl))
113 return -EINVAL;
114
115 sch->limit = ctl->limit;
116 }
117
118 if (is_bfifo)
119 bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
120 else
121 bypass = sch->limit >= 1;
122
123 if (bypass)
124 sch->flags |= TCQ_F_CAN_BYPASS;
125 else
126 sch->flags &= ~TCQ_F_CAN_BYPASS;
127
128 return 0;
129}
130
131static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
132 struct netlink_ext_ack *extack)
133{
134 int err;
135
136 err = __fifo_init(sch, opt, extack);
137 if (err)
138 return err;
139
140 fifo_offload_init(sch);
141 return 0;
142}
143
144static int fifo_hd_init(struct Qdisc *sch, struct nlattr *opt,
145 struct netlink_ext_ack *extack)
146{
147 return __fifo_init(sch, opt, extack);
148}
149
150static void fifo_destroy(struct Qdisc *sch)
151{
152 fifo_offload_destroy(sch);
153}
154
155static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
156{
157 struct tc_fifo_qopt opt = { .limit = sch->limit };
158
159 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
160 goto nla_put_failure;
161 return skb->len;
162
163nla_put_failure:
164 return -1;
165}
166
167static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
168{
169 int err;
170
171 err = fifo_offload_dump(sch);
172 if (err)
173 return err;
174
175 return __fifo_dump(sch, skb);
176}
177
178static int fifo_hd_dump(struct Qdisc *sch, struct sk_buff *skb)
179{
180 return __fifo_dump(sch, skb);
181}
182
183struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
184 .id = "pfifo",
185 .priv_size = 0,
186 .enqueue = pfifo_enqueue,
187 .dequeue = qdisc_dequeue_head,
188 .peek = qdisc_peek_head,
189 .init = fifo_init,
190 .destroy = fifo_destroy,
191 .reset = qdisc_reset_queue,
192 .change = fifo_init,
193 .dump = fifo_dump,
194 .owner = THIS_MODULE,
195};
196EXPORT_SYMBOL(pfifo_qdisc_ops);
197
198struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
199 .id = "bfifo",
200 .priv_size = 0,
201 .enqueue = bfifo_enqueue,
202 .dequeue = qdisc_dequeue_head,
203 .peek = qdisc_peek_head,
204 .init = fifo_init,
205 .destroy = fifo_destroy,
206 .reset = qdisc_reset_queue,
207 .change = fifo_init,
208 .dump = fifo_dump,
209 .owner = THIS_MODULE,
210};
211EXPORT_SYMBOL(bfifo_qdisc_ops);
212
213struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
214 .id = "pfifo_head_drop",
215 .priv_size = 0,
216 .enqueue = pfifo_tail_enqueue,
217 .dequeue = qdisc_dequeue_head,
218 .peek = qdisc_peek_head,
219 .init = fifo_hd_init,
220 .reset = qdisc_reset_queue,
221 .change = fifo_hd_init,
222 .dump = fifo_hd_dump,
223 .owner = THIS_MODULE,
224};
225
226/* Pass size change message down to embedded FIFO */
227int fifo_set_limit(struct Qdisc *q, unsigned int limit)
228{
229 struct nlattr *nla;
230 int ret = -ENOMEM;
231
232 /* Hack to avoid sending change message to non-FIFO */
233 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
234 return 0;
235
236 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
237 if (nla) {
238 nla->nla_type = RTM_NEWQDISC;
239 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
240 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
241
242 ret = q->ops->change(q, nla, NULL);
243 kfree(nla);
244 }
245 return ret;
246}
247EXPORT_SYMBOL(fifo_set_limit);
248
249struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
250 unsigned int limit,
251 struct netlink_ext_ack *extack)
252{
253 struct Qdisc *q;
254 int err = -ENOMEM;
255
256 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
257 extack);
258 if (q) {
259 err = fifo_set_limit(q, limit);
260 if (err < 0) {
261 qdisc_put(q);
262 q = NULL;
263 }
264 }
265
266 return q ? : ERR_PTR(err);
267}
268EXPORT_SYMBOL(fifo_create_dflt);