Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_fifo.c The simplest FIFO queue.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/skbuff.h>
14#include <net/pkt_sched.h>
15
16/* 1 band FIFO pseudo-"scheduler" */
17
18static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
19 struct sk_buff **to_free)
20{
21 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
22 return qdisc_enqueue_tail(skb, sch);
23
24 return qdisc_drop(skb, sch, to_free);
25}
26
27static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
28 struct sk_buff **to_free)
29{
30 if (likely(sch->q.qlen < sch->limit))
31 return qdisc_enqueue_tail(skb, sch);
32
33 return qdisc_drop(skb, sch, to_free);
34}
35
36static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
37 struct sk_buff **to_free)
38{
39 unsigned int prev_backlog;
40
41 if (likely(sch->q.qlen < sch->limit))
42 return qdisc_enqueue_tail(skb, sch);
43
44 prev_backlog = sch->qstats.backlog;
45 /* queue full, remove one skb to fulfill the limit */
46 __qdisc_queue_drop_head(sch, &sch->q, to_free);
47 qdisc_qstats_drop(sch);
48 qdisc_enqueue_tail(skb, sch);
49
50 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
51 return NET_XMIT_CN;
52}
53
54static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
55 struct netlink_ext_ack *extack)
56{
57 bool bypass;
58 bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
59
60 if (opt == NULL) {
61 u32 limit = qdisc_dev(sch)->tx_queue_len;
62
63 if (is_bfifo)
64 limit *= psched_mtu(qdisc_dev(sch));
65
66 sch->limit = limit;
67 } else {
68 struct tc_fifo_qopt *ctl = nla_data(opt);
69
70 if (nla_len(opt) < sizeof(*ctl))
71 return -EINVAL;
72
73 sch->limit = ctl->limit;
74 }
75
76 if (is_bfifo)
77 bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
78 else
79 bypass = sch->limit >= 1;
80
81 if (bypass)
82 sch->flags |= TCQ_F_CAN_BYPASS;
83 else
84 sch->flags &= ~TCQ_F_CAN_BYPASS;
85 return 0;
86}
87
88static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
89{
90 struct tc_fifo_qopt opt = { .limit = sch->limit };
91
92 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
93 goto nla_put_failure;
94 return skb->len;
95
96nla_put_failure:
97 return -1;
98}
99
100struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
101 .id = "pfifo",
102 .priv_size = 0,
103 .enqueue = pfifo_enqueue,
104 .dequeue = qdisc_dequeue_head,
105 .peek = qdisc_peek_head,
106 .init = fifo_init,
107 .reset = qdisc_reset_queue,
108 .change = fifo_init,
109 .dump = fifo_dump,
110 .owner = THIS_MODULE,
111};
112EXPORT_SYMBOL(pfifo_qdisc_ops);
113
114struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
115 .id = "bfifo",
116 .priv_size = 0,
117 .enqueue = bfifo_enqueue,
118 .dequeue = qdisc_dequeue_head,
119 .peek = qdisc_peek_head,
120 .init = fifo_init,
121 .reset = qdisc_reset_queue,
122 .change = fifo_init,
123 .dump = fifo_dump,
124 .owner = THIS_MODULE,
125};
126EXPORT_SYMBOL(bfifo_qdisc_ops);
127
128struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
129 .id = "pfifo_head_drop",
130 .priv_size = 0,
131 .enqueue = pfifo_tail_enqueue,
132 .dequeue = qdisc_dequeue_head,
133 .peek = qdisc_peek_head,
134 .init = fifo_init,
135 .reset = qdisc_reset_queue,
136 .change = fifo_init,
137 .dump = fifo_dump,
138 .owner = THIS_MODULE,
139};
140
141/* Pass size change message down to embedded FIFO */
142int fifo_set_limit(struct Qdisc *q, unsigned int limit)
143{
144 struct nlattr *nla;
145 int ret = -ENOMEM;
146
147 /* Hack to avoid sending change message to non-FIFO */
148 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
149 return 0;
150
151 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
152 if (nla) {
153 nla->nla_type = RTM_NEWQDISC;
154 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
155 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
156
157 ret = q->ops->change(q, nla, NULL);
158 kfree(nla);
159 }
160 return ret;
161}
162EXPORT_SYMBOL(fifo_set_limit);
163
164struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
165 unsigned int limit,
166 struct netlink_ext_ack *extack)
167{
168 struct Qdisc *q;
169 int err = -ENOMEM;
170
171 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
172 extack);
173 if (q) {
174 err = fifo_set_limit(q, limit);
175 if (err < 0) {
176 qdisc_put(q);
177 q = NULL;
178 }
179 }
180
181 return q ? : ERR_PTR(err);
182}
183EXPORT_SYMBOL(fifo_create_dflt);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_fifo.c The simplest FIFO queue.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/skbuff.h>
14#include <net/pkt_sched.h>
15#include <net/pkt_cls.h>
16
17/* 1 band FIFO pseudo-"scheduler" */
18
19static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
20 struct sk_buff **to_free)
21{
22 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
23 READ_ONCE(sch->limit)))
24 return qdisc_enqueue_tail(skb, sch);
25
26 return qdisc_drop(skb, sch, to_free);
27}
28
29static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
30 struct sk_buff **to_free)
31{
32 if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
33 return qdisc_enqueue_tail(skb, sch);
34
35 return qdisc_drop(skb, sch, to_free);
36}
37
38static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
39 struct sk_buff **to_free)
40{
41 unsigned int prev_backlog;
42
43 if (unlikely(READ_ONCE(sch->limit) == 0))
44 return qdisc_drop(skb, sch, to_free);
45
46 if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
47 return qdisc_enqueue_tail(skb, sch);
48
49 prev_backlog = sch->qstats.backlog;
50 /* queue full, remove one skb to fulfill the limit */
51 __qdisc_queue_drop_head(sch, &sch->q, to_free);
52 qdisc_qstats_drop(sch);
53 qdisc_enqueue_tail(skb, sch);
54
55 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
56 return NET_XMIT_CN;
57}
58
59static void fifo_offload_init(struct Qdisc *sch)
60{
61 struct net_device *dev = qdisc_dev(sch);
62 struct tc_fifo_qopt_offload qopt;
63
64 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
65 return;
66
67 qopt.command = TC_FIFO_REPLACE;
68 qopt.handle = sch->handle;
69 qopt.parent = sch->parent;
70 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
71}
72
73static void fifo_offload_destroy(struct Qdisc *sch)
74{
75 struct net_device *dev = qdisc_dev(sch);
76 struct tc_fifo_qopt_offload qopt;
77
78 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
79 return;
80
81 qopt.command = TC_FIFO_DESTROY;
82 qopt.handle = sch->handle;
83 qopt.parent = sch->parent;
84 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
85}
86
87static int fifo_offload_dump(struct Qdisc *sch)
88{
89 struct tc_fifo_qopt_offload qopt;
90
91 qopt.command = TC_FIFO_STATS;
92 qopt.handle = sch->handle;
93 qopt.parent = sch->parent;
94 qopt.stats.bstats = &sch->bstats;
95 qopt.stats.qstats = &sch->qstats;
96
97 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_FIFO, &qopt);
98}
99
100static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
101 struct netlink_ext_ack *extack)
102{
103 bool bypass;
104 bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
105
106 if (opt == NULL) {
107 u32 limit = qdisc_dev(sch)->tx_queue_len;
108
109 if (is_bfifo)
110 limit *= psched_mtu(qdisc_dev(sch));
111
112 WRITE_ONCE(sch->limit, limit);
113 } else {
114 struct tc_fifo_qopt *ctl = nla_data(opt);
115
116 if (nla_len(opt) < sizeof(*ctl))
117 return -EINVAL;
118
119 WRITE_ONCE(sch->limit, ctl->limit);
120 }
121
122 if (is_bfifo)
123 bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
124 else
125 bypass = sch->limit >= 1;
126
127 if (bypass)
128 sch->flags |= TCQ_F_CAN_BYPASS;
129 else
130 sch->flags &= ~TCQ_F_CAN_BYPASS;
131
132 return 0;
133}
134
135static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
136 struct netlink_ext_ack *extack)
137{
138 int err;
139
140 err = __fifo_init(sch, opt, extack);
141 if (err)
142 return err;
143
144 fifo_offload_init(sch);
145 return 0;
146}
147
148static int fifo_hd_init(struct Qdisc *sch, struct nlattr *opt,
149 struct netlink_ext_ack *extack)
150{
151 return __fifo_init(sch, opt, extack);
152}
153
154static void fifo_destroy(struct Qdisc *sch)
155{
156 fifo_offload_destroy(sch);
157}
158
159static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
160{
161 struct tc_fifo_qopt opt = { .limit = READ_ONCE(sch->limit) };
162
163 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
164 goto nla_put_failure;
165 return skb->len;
166
167nla_put_failure:
168 return -1;
169}
170
171static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
172{
173 int err;
174
175 err = fifo_offload_dump(sch);
176 if (err)
177 return err;
178
179 return __fifo_dump(sch, skb);
180}
181
182static int fifo_hd_dump(struct Qdisc *sch, struct sk_buff *skb)
183{
184 return __fifo_dump(sch, skb);
185}
186
187struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
188 .id = "pfifo",
189 .priv_size = 0,
190 .enqueue = pfifo_enqueue,
191 .dequeue = qdisc_dequeue_head,
192 .peek = qdisc_peek_head,
193 .init = fifo_init,
194 .destroy = fifo_destroy,
195 .reset = qdisc_reset_queue,
196 .change = fifo_init,
197 .dump = fifo_dump,
198 .owner = THIS_MODULE,
199};
200EXPORT_SYMBOL(pfifo_qdisc_ops);
201
202struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
203 .id = "bfifo",
204 .priv_size = 0,
205 .enqueue = bfifo_enqueue,
206 .dequeue = qdisc_dequeue_head,
207 .peek = qdisc_peek_head,
208 .init = fifo_init,
209 .destroy = fifo_destroy,
210 .reset = qdisc_reset_queue,
211 .change = fifo_init,
212 .dump = fifo_dump,
213 .owner = THIS_MODULE,
214};
215EXPORT_SYMBOL(bfifo_qdisc_ops);
216
217struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
218 .id = "pfifo_head_drop",
219 .priv_size = 0,
220 .enqueue = pfifo_tail_enqueue,
221 .dequeue = qdisc_dequeue_head,
222 .peek = qdisc_peek_head,
223 .init = fifo_hd_init,
224 .reset = qdisc_reset_queue,
225 .change = fifo_hd_init,
226 .dump = fifo_hd_dump,
227 .owner = THIS_MODULE,
228};
229
230/* Pass size change message down to embedded FIFO */
231int fifo_set_limit(struct Qdisc *q, unsigned int limit)
232{
233 struct nlattr *nla;
234 int ret = -ENOMEM;
235
236 /* Hack to avoid sending change message to non-FIFO */
237 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
238 return 0;
239
240 if (!q->ops->change)
241 return 0;
242
243 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
244 if (nla) {
245 nla->nla_type = RTM_NEWQDISC;
246 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
247 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
248
249 ret = q->ops->change(q, nla, NULL);
250 kfree(nla);
251 }
252 return ret;
253}
254EXPORT_SYMBOL(fifo_set_limit);
255
256struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
257 unsigned int limit,
258 struct netlink_ext_ack *extack)
259{
260 struct Qdisc *q;
261 int err = -ENOMEM;
262
263 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
264 extack);
265 if (q) {
266 err = fifo_set_limit(q, limit);
267 if (err < 0) {
268 qdisc_put(q);
269 q = NULL;
270 }
271 }
272
273 return q ? : ERR_PTR(err);
274}
275EXPORT_SYMBOL(fifo_create_dflt);
276MODULE_DESCRIPTION("Single queue packet and byte based First In First Out(P/BFIFO) scheduler");