Loading...
1/*
2 * net/sched/sch_fifo.c The simplest FIFO queue.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
12#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/types.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/skbuff.h>
18#include <net/pkt_sched.h>
19
20/* 1 band FIFO pseudo-"scheduler" */
21
22static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
23{
24 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
25 return qdisc_enqueue_tail(skb, sch);
26
27 return qdisc_reshape_fail(skb, sch);
28}
29
30static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch)
31{
32 if (likely(skb_queue_len(&sch->q) < sch->limit))
33 return qdisc_enqueue_tail(skb, sch);
34
35 return qdisc_reshape_fail(skb, sch);
36}
37
38static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
39{
40 if (likely(skb_queue_len(&sch->q) < sch->limit))
41 return qdisc_enqueue_tail(skb, sch);
42
43 /* queue full, remove one skb to fulfill the limit */
44 __qdisc_queue_drop_head(sch, &sch->q);
45 sch->qstats.drops++;
46 qdisc_enqueue_tail(skb, sch);
47
48 return NET_XMIT_CN;
49}
50
51static int fifo_init(struct Qdisc *sch, struct nlattr *opt)
52{
53 bool bypass;
54 bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
55
56 if (opt == NULL) {
57 u32 limit = qdisc_dev(sch)->tx_queue_len ? : 1;
58
59 if (is_bfifo)
60 limit *= psched_mtu(qdisc_dev(sch));
61
62 sch->limit = limit;
63 } else {
64 struct tc_fifo_qopt *ctl = nla_data(opt);
65
66 if (nla_len(opt) < sizeof(*ctl))
67 return -EINVAL;
68
69 sch->limit = ctl->limit;
70 }
71
72 if (is_bfifo)
73 bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
74 else
75 bypass = sch->limit >= 1;
76
77 if (bypass)
78 sch->flags |= TCQ_F_CAN_BYPASS;
79 else
80 sch->flags &= ~TCQ_F_CAN_BYPASS;
81 return 0;
82}
83
84static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
85{
86 struct tc_fifo_qopt opt = { .limit = sch->limit };
87
88 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
89 return skb->len;
90
91nla_put_failure:
92 return -1;
93}
94
95struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
96 .id = "pfifo",
97 .priv_size = 0,
98 .enqueue = pfifo_enqueue,
99 .dequeue = qdisc_dequeue_head,
100 .peek = qdisc_peek_head,
101 .drop = qdisc_queue_drop,
102 .init = fifo_init,
103 .reset = qdisc_reset_queue,
104 .change = fifo_init,
105 .dump = fifo_dump,
106 .owner = THIS_MODULE,
107};
108EXPORT_SYMBOL(pfifo_qdisc_ops);
109
110struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
111 .id = "bfifo",
112 .priv_size = 0,
113 .enqueue = bfifo_enqueue,
114 .dequeue = qdisc_dequeue_head,
115 .peek = qdisc_peek_head,
116 .drop = qdisc_queue_drop,
117 .init = fifo_init,
118 .reset = qdisc_reset_queue,
119 .change = fifo_init,
120 .dump = fifo_dump,
121 .owner = THIS_MODULE,
122};
123EXPORT_SYMBOL(bfifo_qdisc_ops);
124
125struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
126 .id = "pfifo_head_drop",
127 .priv_size = 0,
128 .enqueue = pfifo_tail_enqueue,
129 .dequeue = qdisc_dequeue_head,
130 .peek = qdisc_peek_head,
131 .drop = qdisc_queue_drop_head,
132 .init = fifo_init,
133 .reset = qdisc_reset_queue,
134 .change = fifo_init,
135 .dump = fifo_dump,
136 .owner = THIS_MODULE,
137};
138
139/* Pass size change message down to embedded FIFO */
140int fifo_set_limit(struct Qdisc *q, unsigned int limit)
141{
142 struct nlattr *nla;
143 int ret = -ENOMEM;
144
145 /* Hack to avoid sending change message to non-FIFO */
146 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
147 return 0;
148
149 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
150 if (nla) {
151 nla->nla_type = RTM_NEWQDISC;
152 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
153 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
154
155 ret = q->ops->change(q, nla);
156 kfree(nla);
157 }
158 return ret;
159}
160EXPORT_SYMBOL(fifo_set_limit);
161
162struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
163 unsigned int limit)
164{
165 struct Qdisc *q;
166 int err = -ENOMEM;
167
168 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1));
169 if (q) {
170 err = fifo_set_limit(q, limit);
171 if (err < 0) {
172 qdisc_destroy(q);
173 q = NULL;
174 }
175 }
176
177 return q ? : ERR_PTR(err);
178}
179EXPORT_SYMBOL(fifo_create_dflt);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/sch_fifo.c The simplest FIFO queue.
4 *
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
7
8#include <linux/module.h>
9#include <linux/slab.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/skbuff.h>
14#include <net/pkt_sched.h>
15#include <net/pkt_cls.h>
16
17/* 1 band FIFO pseudo-"scheduler" */
18
19static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
20 struct sk_buff **to_free)
21{
22 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
23 return qdisc_enqueue_tail(skb, sch);
24
25 return qdisc_drop(skb, sch, to_free);
26}
27
28static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
29 struct sk_buff **to_free)
30{
31 if (likely(sch->q.qlen < sch->limit))
32 return qdisc_enqueue_tail(skb, sch);
33
34 return qdisc_drop(skb, sch, to_free);
35}
36
37static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
38 struct sk_buff **to_free)
39{
40 unsigned int prev_backlog;
41
42 if (likely(sch->q.qlen < sch->limit))
43 return qdisc_enqueue_tail(skb, sch);
44
45 prev_backlog = sch->qstats.backlog;
46 /* queue full, remove one skb to fulfill the limit */
47 __qdisc_queue_drop_head(sch, &sch->q, to_free);
48 qdisc_qstats_drop(sch);
49 qdisc_enqueue_tail(skb, sch);
50
51 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog);
52 return NET_XMIT_CN;
53}
54
55static void fifo_offload_init(struct Qdisc *sch)
56{
57 struct net_device *dev = qdisc_dev(sch);
58 struct tc_fifo_qopt_offload qopt;
59
60 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
61 return;
62
63 qopt.command = TC_FIFO_REPLACE;
64 qopt.handle = sch->handle;
65 qopt.parent = sch->parent;
66 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
67}
68
69static void fifo_offload_destroy(struct Qdisc *sch)
70{
71 struct net_device *dev = qdisc_dev(sch);
72 struct tc_fifo_qopt_offload qopt;
73
74 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
75 return;
76
77 qopt.command = TC_FIFO_DESTROY;
78 qopt.handle = sch->handle;
79 qopt.parent = sch->parent;
80 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_FIFO, &qopt);
81}
82
83static int fifo_offload_dump(struct Qdisc *sch)
84{
85 struct tc_fifo_qopt_offload qopt;
86
87 qopt.command = TC_FIFO_STATS;
88 qopt.handle = sch->handle;
89 qopt.parent = sch->parent;
90 qopt.stats.bstats = &sch->bstats;
91 qopt.stats.qstats = &sch->qstats;
92
93 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_FIFO, &qopt);
94}
95
96static int __fifo_init(struct Qdisc *sch, struct nlattr *opt,
97 struct netlink_ext_ack *extack)
98{
99 bool bypass;
100 bool is_bfifo = sch->ops == &bfifo_qdisc_ops;
101
102 if (opt == NULL) {
103 u32 limit = qdisc_dev(sch)->tx_queue_len;
104
105 if (is_bfifo)
106 limit *= psched_mtu(qdisc_dev(sch));
107
108 sch->limit = limit;
109 } else {
110 struct tc_fifo_qopt *ctl = nla_data(opt);
111
112 if (nla_len(opt) < sizeof(*ctl))
113 return -EINVAL;
114
115 sch->limit = ctl->limit;
116 }
117
118 if (is_bfifo)
119 bypass = sch->limit >= psched_mtu(qdisc_dev(sch));
120 else
121 bypass = sch->limit >= 1;
122
123 if (bypass)
124 sch->flags |= TCQ_F_CAN_BYPASS;
125 else
126 sch->flags &= ~TCQ_F_CAN_BYPASS;
127
128 return 0;
129}
130
131static int fifo_init(struct Qdisc *sch, struct nlattr *opt,
132 struct netlink_ext_ack *extack)
133{
134 int err;
135
136 err = __fifo_init(sch, opt, extack);
137 if (err)
138 return err;
139
140 fifo_offload_init(sch);
141 return 0;
142}
143
144static int fifo_hd_init(struct Qdisc *sch, struct nlattr *opt,
145 struct netlink_ext_ack *extack)
146{
147 return __fifo_init(sch, opt, extack);
148}
149
150static void fifo_destroy(struct Qdisc *sch)
151{
152 fifo_offload_destroy(sch);
153}
154
155static int __fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
156{
157 struct tc_fifo_qopt opt = { .limit = sch->limit };
158
159 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
160 goto nla_put_failure;
161 return skb->len;
162
163nla_put_failure:
164 return -1;
165}
166
167static int fifo_dump(struct Qdisc *sch, struct sk_buff *skb)
168{
169 int err;
170
171 err = fifo_offload_dump(sch);
172 if (err)
173 return err;
174
175 return __fifo_dump(sch, skb);
176}
177
178static int fifo_hd_dump(struct Qdisc *sch, struct sk_buff *skb)
179{
180 return __fifo_dump(sch, skb);
181}
182
183struct Qdisc_ops pfifo_qdisc_ops __read_mostly = {
184 .id = "pfifo",
185 .priv_size = 0,
186 .enqueue = pfifo_enqueue,
187 .dequeue = qdisc_dequeue_head,
188 .peek = qdisc_peek_head,
189 .init = fifo_init,
190 .destroy = fifo_destroy,
191 .reset = qdisc_reset_queue,
192 .change = fifo_init,
193 .dump = fifo_dump,
194 .owner = THIS_MODULE,
195};
196EXPORT_SYMBOL(pfifo_qdisc_ops);
197
198struct Qdisc_ops bfifo_qdisc_ops __read_mostly = {
199 .id = "bfifo",
200 .priv_size = 0,
201 .enqueue = bfifo_enqueue,
202 .dequeue = qdisc_dequeue_head,
203 .peek = qdisc_peek_head,
204 .init = fifo_init,
205 .destroy = fifo_destroy,
206 .reset = qdisc_reset_queue,
207 .change = fifo_init,
208 .dump = fifo_dump,
209 .owner = THIS_MODULE,
210};
211EXPORT_SYMBOL(bfifo_qdisc_ops);
212
213struct Qdisc_ops pfifo_head_drop_qdisc_ops __read_mostly = {
214 .id = "pfifo_head_drop",
215 .priv_size = 0,
216 .enqueue = pfifo_tail_enqueue,
217 .dequeue = qdisc_dequeue_head,
218 .peek = qdisc_peek_head,
219 .init = fifo_hd_init,
220 .reset = qdisc_reset_queue,
221 .change = fifo_hd_init,
222 .dump = fifo_hd_dump,
223 .owner = THIS_MODULE,
224};
225
226/* Pass size change message down to embedded FIFO */
227int fifo_set_limit(struct Qdisc *q, unsigned int limit)
228{
229 struct nlattr *nla;
230 int ret = -ENOMEM;
231
232 /* Hack to avoid sending change message to non-FIFO */
233 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
234 return 0;
235
236 nla = kmalloc(nla_attr_size(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
237 if (nla) {
238 nla->nla_type = RTM_NEWQDISC;
239 nla->nla_len = nla_attr_size(sizeof(struct tc_fifo_qopt));
240 ((struct tc_fifo_qopt *)nla_data(nla))->limit = limit;
241
242 ret = q->ops->change(q, nla, NULL);
243 kfree(nla);
244 }
245 return ret;
246}
247EXPORT_SYMBOL(fifo_set_limit);
248
249struct Qdisc *fifo_create_dflt(struct Qdisc *sch, struct Qdisc_ops *ops,
250 unsigned int limit,
251 struct netlink_ext_ack *extack)
252{
253 struct Qdisc *q;
254 int err = -ENOMEM;
255
256 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1),
257 extack);
258 if (q) {
259 err = fifo_set_limit(q, limit);
260 if (err < 0) {
261 qdisc_put(q);
262 q = NULL;
263 }
264 }
265
266 return q ? : ERR_PTR(err);
267}
268EXPORT_SYMBOL(fifo_create_dflt);