Loading...
1/* net/sched/sch_dsmark.c - Differentiated Services field marker */
2
3/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
4
5
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/errno.h>
12#include <linux/skbuff.h>
13#include <linux/rtnetlink.h>
14#include <linux/bitops.h>
15#include <net/pkt_sched.h>
16#include <net/dsfield.h>
17#include <net/inet_ecn.h>
18#include <asm/byteorder.h>
19
20/*
21 * classid class marking
22 * ------- ----- -------
23 * n/a 0 n/a
24 * x:0 1 use entry [0]
25 * ... ... ...
26 * x:y y>0 y+1 use entry [y]
27 * ... ... ...
28 * x:indices-1 indices use entry [indices-1]
29 * ... ... ...
30 * x:y y+1 use entry [y & (indices-1)]
31 * ... ... ...
32 * 0xffff 0x10000 use entry [indices-1]
33 */
34
35
36#define NO_DEFAULT_INDEX (1 << 16)
37
38struct dsmark_qdisc_data {
39 struct Qdisc *q;
40 struct tcf_proto *filter_list;
41 u8 *mask; /* "owns" the array */
42 u8 *value;
43 u16 indices;
44 u32 default_index; /* index range is 0...0xffff */
45 int set_tc_index;
46};
47
48static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
49{
50 return (index <= p->indices && index > 0);
51}
52
53/* ------------------------- Class/flow operations ------------------------- */
54
55static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
56 struct Qdisc *new, struct Qdisc **old)
57{
58 struct dsmark_qdisc_data *p = qdisc_priv(sch);
59
60 pr_debug("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",
61 sch, p, new, old);
62
63 if (new == NULL) {
64 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
65 sch->handle);
66 if (new == NULL)
67 new = &noop_qdisc;
68 }
69
70 sch_tree_lock(sch);
71 *old = p->q;
72 p->q = new;
73 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
74 qdisc_reset(*old);
75 sch_tree_unlock(sch);
76
77 return 0;
78}
79
80static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
81{
82 struct dsmark_qdisc_data *p = qdisc_priv(sch);
83 return p->q;
84}
85
86static unsigned long dsmark_get(struct Qdisc *sch, u32 classid)
87{
88 pr_debug("dsmark_get(sch %p,[qdisc %p],classid %x)\n",
89 sch, qdisc_priv(sch), classid);
90
91 return TC_H_MIN(classid) + 1;
92}
93
94static unsigned long dsmark_bind_filter(struct Qdisc *sch,
95 unsigned long parent, u32 classid)
96{
97 return dsmark_get(sch, classid);
98}
99
100static void dsmark_put(struct Qdisc *sch, unsigned long cl)
101{
102}
103
104static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
105 [TCA_DSMARK_INDICES] = { .type = NLA_U16 },
106 [TCA_DSMARK_DEFAULT_INDEX] = { .type = NLA_U16 },
107 [TCA_DSMARK_SET_TC_INDEX] = { .type = NLA_FLAG },
108 [TCA_DSMARK_MASK] = { .type = NLA_U8 },
109 [TCA_DSMARK_VALUE] = { .type = NLA_U8 },
110};
111
112static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
113 struct nlattr **tca, unsigned long *arg)
114{
115 struct dsmark_qdisc_data *p = qdisc_priv(sch);
116 struct nlattr *opt = tca[TCA_OPTIONS];
117 struct nlattr *tb[TCA_DSMARK_MAX + 1];
118 int err = -EINVAL;
119 u8 mask = 0;
120
121 pr_debug("dsmark_change(sch %p,[qdisc %p],classid %x,parent %x),"
122 "arg 0x%lx\n", sch, p, classid, parent, *arg);
123
124 if (!dsmark_valid_index(p, *arg)) {
125 err = -ENOENT;
126 goto errout;
127 }
128
129 if (!opt)
130 goto errout;
131
132 err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy);
133 if (err < 0)
134 goto errout;
135
136 if (tb[TCA_DSMARK_MASK])
137 mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
138
139 if (tb[TCA_DSMARK_VALUE])
140 p->value[*arg - 1] = nla_get_u8(tb[TCA_DSMARK_VALUE]);
141
142 if (tb[TCA_DSMARK_MASK])
143 p->mask[*arg - 1] = mask;
144
145 err = 0;
146
147errout:
148 return err;
149}
150
151static int dsmark_delete(struct Qdisc *sch, unsigned long arg)
152{
153 struct dsmark_qdisc_data *p = qdisc_priv(sch);
154
155 if (!dsmark_valid_index(p, arg))
156 return -EINVAL;
157
158 p->mask[arg - 1] = 0xff;
159 p->value[arg - 1] = 0;
160
161 return 0;
162}
163
164static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
165{
166 struct dsmark_qdisc_data *p = qdisc_priv(sch);
167 int i;
168
169 pr_debug("dsmark_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
170
171 if (walker->stop)
172 return;
173
174 for (i = 0; i < p->indices; i++) {
175 if (p->mask[i] == 0xff && !p->value[i])
176 goto ignore;
177 if (walker->count >= walker->skip) {
178 if (walker->fn(sch, i + 1, walker) < 0) {
179 walker->stop = 1;
180 break;
181 }
182 }
183ignore:
184 walker->count++;
185 }
186}
187
188static inline struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch,
189 unsigned long cl)
190{
191 struct dsmark_qdisc_data *p = qdisc_priv(sch);
192 return &p->filter_list;
193}
194
195/* --------------------------- Qdisc operations ---------------------------- */
196
197static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
198{
199 struct dsmark_qdisc_data *p = qdisc_priv(sch);
200 int err;
201
202 pr_debug("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
203
204 if (p->set_tc_index) {
205 switch (skb->protocol) {
206 case htons(ETH_P_IP):
207 if (skb_cow_head(skb, sizeof(struct iphdr)))
208 goto drop;
209
210 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
211 & ~INET_ECN_MASK;
212 break;
213
214 case htons(ETH_P_IPV6):
215 if (skb_cow_head(skb, sizeof(struct ipv6hdr)))
216 goto drop;
217
218 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
219 & ~INET_ECN_MASK;
220 break;
221 default:
222 skb->tc_index = 0;
223 break;
224 }
225 }
226
227 if (TC_H_MAJ(skb->priority) == sch->handle)
228 skb->tc_index = TC_H_MIN(skb->priority);
229 else {
230 struct tcf_result res;
231 int result = tc_classify(skb, p->filter_list, &res);
232
233 pr_debug("result %d class 0x%04x\n", result, res.classid);
234
235 switch (result) {
236#ifdef CONFIG_NET_CLS_ACT
237 case TC_ACT_QUEUED:
238 case TC_ACT_STOLEN:
239 kfree_skb(skb);
240 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
241
242 case TC_ACT_SHOT:
243 goto drop;
244#endif
245 case TC_ACT_OK:
246 skb->tc_index = TC_H_MIN(res.classid);
247 break;
248
249 default:
250 if (p->default_index != NO_DEFAULT_INDEX)
251 skb->tc_index = p->default_index;
252 break;
253 }
254 }
255
256 err = qdisc_enqueue(skb, p->q);
257 if (err != NET_XMIT_SUCCESS) {
258 if (net_xmit_drop_count(err))
259 sch->qstats.drops++;
260 return err;
261 }
262
263 sch->q.qlen++;
264
265 return NET_XMIT_SUCCESS;
266
267drop:
268 kfree_skb(skb);
269 sch->qstats.drops++;
270 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
271}
272
273static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
274{
275 struct dsmark_qdisc_data *p = qdisc_priv(sch);
276 struct sk_buff *skb;
277 u32 index;
278
279 pr_debug("dsmark_dequeue(sch %p,[qdisc %p])\n", sch, p);
280
281 skb = p->q->ops->dequeue(p->q);
282 if (skb == NULL)
283 return NULL;
284
285 qdisc_bstats_update(sch, skb);
286 sch->q.qlen--;
287
288 index = skb->tc_index & (p->indices - 1);
289 pr_debug("index %d->%d\n", skb->tc_index, index);
290
291 switch (skb->protocol) {
292 case htons(ETH_P_IP):
293 ipv4_change_dsfield(ip_hdr(skb), p->mask[index],
294 p->value[index]);
295 break;
296 case htons(ETH_P_IPV6):
297 ipv6_change_dsfield(ipv6_hdr(skb), p->mask[index],
298 p->value[index]);
299 break;
300 default:
301 /*
302 * Only complain if a change was actually attempted.
303 * This way, we can send non-IP traffic through dsmark
304 * and don't need yet another qdisc as a bypass.
305 */
306 if (p->mask[index] != 0xff || p->value[index])
307 pr_warning("dsmark_dequeue: unsupported protocol %d\n",
308 ntohs(skb->protocol));
309 break;
310 }
311
312 return skb;
313}
314
315static struct sk_buff *dsmark_peek(struct Qdisc *sch)
316{
317 struct dsmark_qdisc_data *p = qdisc_priv(sch);
318
319 pr_debug("dsmark_peek(sch %p,[qdisc %p])\n", sch, p);
320
321 return p->q->ops->peek(p->q);
322}
323
324static unsigned int dsmark_drop(struct Qdisc *sch)
325{
326 struct dsmark_qdisc_data *p = qdisc_priv(sch);
327 unsigned int len;
328
329 pr_debug("dsmark_reset(sch %p,[qdisc %p])\n", sch, p);
330
331 if (p->q->ops->drop == NULL)
332 return 0;
333
334 len = p->q->ops->drop(p->q);
335 if (len)
336 sch->q.qlen--;
337
338 return len;
339}
340
341static int dsmark_init(struct Qdisc *sch, struct nlattr *opt)
342{
343 struct dsmark_qdisc_data *p = qdisc_priv(sch);
344 struct nlattr *tb[TCA_DSMARK_MAX + 1];
345 int err = -EINVAL;
346 u32 default_index = NO_DEFAULT_INDEX;
347 u16 indices;
348 u8 *mask;
349
350 pr_debug("dsmark_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
351
352 if (!opt)
353 goto errout;
354
355 err = nla_parse_nested(tb, TCA_DSMARK_MAX, opt, dsmark_policy);
356 if (err < 0)
357 goto errout;
358
359 err = -EINVAL;
360 indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
361
362 if (hweight32(indices) != 1)
363 goto errout;
364
365 if (tb[TCA_DSMARK_DEFAULT_INDEX])
366 default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
367
368 mask = kmalloc(indices * 2, GFP_KERNEL);
369 if (mask == NULL) {
370 err = -ENOMEM;
371 goto errout;
372 }
373
374 p->mask = mask;
375 memset(p->mask, 0xff, indices);
376
377 p->value = p->mask + indices;
378 memset(p->value, 0, indices);
379
380 p->indices = indices;
381 p->default_index = default_index;
382 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
383
384 p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle);
385 if (p->q == NULL)
386 p->q = &noop_qdisc;
387
388 pr_debug("dsmark_init: qdisc %p\n", p->q);
389
390 err = 0;
391errout:
392 return err;
393}
394
395static void dsmark_reset(struct Qdisc *sch)
396{
397 struct dsmark_qdisc_data *p = qdisc_priv(sch);
398
399 pr_debug("dsmark_reset(sch %p,[qdisc %p])\n", sch, p);
400 qdisc_reset(p->q);
401 sch->q.qlen = 0;
402}
403
404static void dsmark_destroy(struct Qdisc *sch)
405{
406 struct dsmark_qdisc_data *p = qdisc_priv(sch);
407
408 pr_debug("dsmark_destroy(sch %p,[qdisc %p])\n", sch, p);
409
410 tcf_destroy_chain(&p->filter_list);
411 qdisc_destroy(p->q);
412 kfree(p->mask);
413}
414
415static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
416 struct sk_buff *skb, struct tcmsg *tcm)
417{
418 struct dsmark_qdisc_data *p = qdisc_priv(sch);
419 struct nlattr *opts = NULL;
420
421 pr_debug("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n", sch, p, cl);
422
423 if (!dsmark_valid_index(p, cl))
424 return -EINVAL;
425
426 tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
427 tcm->tcm_info = p->q->handle;
428
429 opts = nla_nest_start(skb, TCA_OPTIONS);
430 if (opts == NULL)
431 goto nla_put_failure;
432 NLA_PUT_U8(skb, TCA_DSMARK_MASK, p->mask[cl - 1]);
433 NLA_PUT_U8(skb, TCA_DSMARK_VALUE, p->value[cl - 1]);
434
435 return nla_nest_end(skb, opts);
436
437nla_put_failure:
438 nla_nest_cancel(skb, opts);
439 return -EMSGSIZE;
440}
441
442static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
443{
444 struct dsmark_qdisc_data *p = qdisc_priv(sch);
445 struct nlattr *opts = NULL;
446
447 opts = nla_nest_start(skb, TCA_OPTIONS);
448 if (opts == NULL)
449 goto nla_put_failure;
450 NLA_PUT_U16(skb, TCA_DSMARK_INDICES, p->indices);
451
452 if (p->default_index != NO_DEFAULT_INDEX)
453 NLA_PUT_U16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index);
454
455 if (p->set_tc_index)
456 NLA_PUT_FLAG(skb, TCA_DSMARK_SET_TC_INDEX);
457
458 return nla_nest_end(skb, opts);
459
460nla_put_failure:
461 nla_nest_cancel(skb, opts);
462 return -EMSGSIZE;
463}
464
465static const struct Qdisc_class_ops dsmark_class_ops = {
466 .graft = dsmark_graft,
467 .leaf = dsmark_leaf,
468 .get = dsmark_get,
469 .put = dsmark_put,
470 .change = dsmark_change,
471 .delete = dsmark_delete,
472 .walk = dsmark_walk,
473 .tcf_chain = dsmark_find_tcf,
474 .bind_tcf = dsmark_bind_filter,
475 .unbind_tcf = dsmark_put,
476 .dump = dsmark_dump_class,
477};
478
479static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
480 .next = NULL,
481 .cl_ops = &dsmark_class_ops,
482 .id = "dsmark",
483 .priv_size = sizeof(struct dsmark_qdisc_data),
484 .enqueue = dsmark_enqueue,
485 .dequeue = dsmark_dequeue,
486 .peek = dsmark_peek,
487 .drop = dsmark_drop,
488 .init = dsmark_init,
489 .reset = dsmark_reset,
490 .destroy = dsmark_destroy,
491 .change = NULL,
492 .dump = dsmark_dump,
493 .owner = THIS_MODULE,
494};
495
496static int __init dsmark_module_init(void)
497{
498 return register_qdisc(&dsmark_qdisc_ops);
499}
500
501static void __exit dsmark_module_exit(void)
502{
503 unregister_qdisc(&dsmark_qdisc_ops);
504}
505
506module_init(dsmark_module_init)
507module_exit(dsmark_module_exit)
508
509MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-only
2/* net/sched/sch_dsmark.c - Differentiated Services field marker */
3
4/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
5
6
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/types.h>
11#include <linux/string.h>
12#include <linux/errno.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/bitops.h>
16#include <net/pkt_sched.h>
17#include <net/pkt_cls.h>
18#include <net/dsfield.h>
19#include <net/inet_ecn.h>
20#include <asm/byteorder.h>
21
22/*
23 * classid class marking
24 * ------- ----- -------
25 * n/a 0 n/a
26 * x:0 1 use entry [0]
27 * ... ... ...
28 * x:y y>0 y+1 use entry [y]
29 * ... ... ...
30 * x:indices-1 indices use entry [indices-1]
31 * ... ... ...
32 * x:y y+1 use entry [y & (indices-1)]
33 * ... ... ...
34 * 0xffff 0x10000 use entry [indices-1]
35 */
36
37
38#define NO_DEFAULT_INDEX (1 << 16)
39
40struct mask_value {
41 u8 mask;
42 u8 value;
43};
44
45struct dsmark_qdisc_data {
46 struct Qdisc *q;
47 struct tcf_proto __rcu *filter_list;
48 struct tcf_block *block;
49 struct mask_value *mv;
50 u16 indices;
51 u8 set_tc_index;
52 u32 default_index; /* index range is 0...0xffff */
53#define DSMARK_EMBEDDED_SZ 16
54 struct mask_value embedded[DSMARK_EMBEDDED_SZ];
55};
56
57static inline int dsmark_valid_index(struct dsmark_qdisc_data *p, u16 index)
58{
59 return index <= p->indices && index > 0;
60}
61
62/* ------------------------- Class/flow operations ------------------------- */
63
64static int dsmark_graft(struct Qdisc *sch, unsigned long arg,
65 struct Qdisc *new, struct Qdisc **old,
66 struct netlink_ext_ack *extack)
67{
68 struct dsmark_qdisc_data *p = qdisc_priv(sch);
69
70 pr_debug("%s(sch %p,[qdisc %p],new %p,old %p)\n",
71 __func__, sch, p, new, old);
72
73 if (new == NULL) {
74 new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
75 sch->handle, NULL);
76 if (new == NULL)
77 new = &noop_qdisc;
78 }
79
80 *old = qdisc_replace(sch, new, &p->q);
81 return 0;
82}
83
84static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
85{
86 struct dsmark_qdisc_data *p = qdisc_priv(sch);
87 return p->q;
88}
89
90static unsigned long dsmark_find(struct Qdisc *sch, u32 classid)
91{
92 return TC_H_MIN(classid) + 1;
93}
94
95static unsigned long dsmark_bind_filter(struct Qdisc *sch,
96 unsigned long parent, u32 classid)
97{
98 pr_debug("%s(sch %p,[qdisc %p],classid %x)\n",
99 __func__, sch, qdisc_priv(sch), classid);
100
101 return dsmark_find(sch, classid);
102}
103
104static void dsmark_unbind_filter(struct Qdisc *sch, unsigned long cl)
105{
106}
107
108static const struct nla_policy dsmark_policy[TCA_DSMARK_MAX + 1] = {
109 [TCA_DSMARK_INDICES] = { .type = NLA_U16 },
110 [TCA_DSMARK_DEFAULT_INDEX] = { .type = NLA_U16 },
111 [TCA_DSMARK_SET_TC_INDEX] = { .type = NLA_FLAG },
112 [TCA_DSMARK_MASK] = { .type = NLA_U8 },
113 [TCA_DSMARK_VALUE] = { .type = NLA_U8 },
114};
115
116static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
117 struct nlattr **tca, unsigned long *arg,
118 struct netlink_ext_ack *extack)
119{
120 struct dsmark_qdisc_data *p = qdisc_priv(sch);
121 struct nlattr *opt = tca[TCA_OPTIONS];
122 struct nlattr *tb[TCA_DSMARK_MAX + 1];
123 int err = -EINVAL;
124
125 pr_debug("%s(sch %p,[qdisc %p],classid %x,parent %x), arg 0x%lx\n",
126 __func__, sch, p, classid, parent, *arg);
127
128 if (!dsmark_valid_index(p, *arg)) {
129 err = -ENOENT;
130 goto errout;
131 }
132
133 if (!opt)
134 goto errout;
135
136 err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
137 dsmark_policy, NULL);
138 if (err < 0)
139 goto errout;
140
141 if (tb[TCA_DSMARK_VALUE])
142 p->mv[*arg - 1].value = nla_get_u8(tb[TCA_DSMARK_VALUE]);
143
144 if (tb[TCA_DSMARK_MASK])
145 p->mv[*arg - 1].mask = nla_get_u8(tb[TCA_DSMARK_MASK]);
146
147 err = 0;
148
149errout:
150 return err;
151}
152
153static int dsmark_delete(struct Qdisc *sch, unsigned long arg,
154 struct netlink_ext_ack *extack)
155{
156 struct dsmark_qdisc_data *p = qdisc_priv(sch);
157
158 if (!dsmark_valid_index(p, arg))
159 return -EINVAL;
160
161 p->mv[arg - 1].mask = 0xff;
162 p->mv[arg - 1].value = 0;
163
164 return 0;
165}
166
167static void dsmark_walk(struct Qdisc *sch, struct qdisc_walker *walker)
168{
169 struct dsmark_qdisc_data *p = qdisc_priv(sch);
170 int i;
171
172 pr_debug("%s(sch %p,[qdisc %p],walker %p)\n",
173 __func__, sch, p, walker);
174
175 if (walker->stop)
176 return;
177
178 for (i = 0; i < p->indices; i++) {
179 if (p->mv[i].mask == 0xff && !p->mv[i].value) {
180 walker->count++;
181 continue;
182 }
183 if (!tc_qdisc_stats_dump(sch, i + 1, walker))
184 break;
185 }
186}
187
188static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
189 struct netlink_ext_ack *extack)
190{
191 struct dsmark_qdisc_data *p = qdisc_priv(sch);
192
193 return p->block;
194}
195
196/* --------------------------- Qdisc operations ---------------------------- */
197
198static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
199 struct sk_buff **to_free)
200{
201 unsigned int len = qdisc_pkt_len(skb);
202 struct dsmark_qdisc_data *p = qdisc_priv(sch);
203 int err;
204
205 pr_debug("%s(skb %p,sch %p,[qdisc %p])\n", __func__, skb, sch, p);
206
207 if (p->set_tc_index) {
208 int wlen = skb_network_offset(skb);
209
210 switch (skb_protocol(skb, true)) {
211 case htons(ETH_P_IP):
212 wlen += sizeof(struct iphdr);
213 if (!pskb_may_pull(skb, wlen) ||
214 skb_try_make_writable(skb, wlen))
215 goto drop;
216
217 skb->tc_index = ipv4_get_dsfield(ip_hdr(skb))
218 & ~INET_ECN_MASK;
219 break;
220
221 case htons(ETH_P_IPV6):
222 wlen += sizeof(struct ipv6hdr);
223 if (!pskb_may_pull(skb, wlen) ||
224 skb_try_make_writable(skb, wlen))
225 goto drop;
226
227 skb->tc_index = ipv6_get_dsfield(ipv6_hdr(skb))
228 & ~INET_ECN_MASK;
229 break;
230 default:
231 skb->tc_index = 0;
232 break;
233 }
234 }
235
236 if (TC_H_MAJ(skb->priority) == sch->handle)
237 skb->tc_index = TC_H_MIN(skb->priority);
238 else {
239 struct tcf_result res;
240 struct tcf_proto *fl = rcu_dereference_bh(p->filter_list);
241 int result = tcf_classify(skb, NULL, fl, &res, false);
242
243 pr_debug("result %d class 0x%04x\n", result, res.classid);
244
245 switch (result) {
246#ifdef CONFIG_NET_CLS_ACT
247 case TC_ACT_QUEUED:
248 case TC_ACT_STOLEN:
249 case TC_ACT_TRAP:
250 __qdisc_drop(skb, to_free);
251 return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
252
253 case TC_ACT_SHOT:
254 goto drop;
255#endif
256 case TC_ACT_OK:
257 skb->tc_index = TC_H_MIN(res.classid);
258 break;
259
260 default:
261 if (p->default_index != NO_DEFAULT_INDEX)
262 skb->tc_index = p->default_index;
263 break;
264 }
265 }
266
267 err = qdisc_enqueue(skb, p->q, to_free);
268 if (err != NET_XMIT_SUCCESS) {
269 if (net_xmit_drop_count(err))
270 qdisc_qstats_drop(sch);
271 return err;
272 }
273
274 sch->qstats.backlog += len;
275 sch->q.qlen++;
276
277 return NET_XMIT_SUCCESS;
278
279drop:
280 qdisc_drop(skb, sch, to_free);
281 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
282}
283
284static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
285{
286 struct dsmark_qdisc_data *p = qdisc_priv(sch);
287 struct sk_buff *skb;
288 u32 index;
289
290 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
291
292 skb = qdisc_dequeue_peeked(p->q);
293 if (skb == NULL)
294 return NULL;
295
296 qdisc_bstats_update(sch, skb);
297 qdisc_qstats_backlog_dec(sch, skb);
298 sch->q.qlen--;
299
300 index = skb->tc_index & (p->indices - 1);
301 pr_debug("index %d->%d\n", skb->tc_index, index);
302
303 switch (skb_protocol(skb, true)) {
304 case htons(ETH_P_IP):
305 ipv4_change_dsfield(ip_hdr(skb), p->mv[index].mask,
306 p->mv[index].value);
307 break;
308 case htons(ETH_P_IPV6):
309 ipv6_change_dsfield(ipv6_hdr(skb), p->mv[index].mask,
310 p->mv[index].value);
311 break;
312 default:
313 /*
314 * Only complain if a change was actually attempted.
315 * This way, we can send non-IP traffic through dsmark
316 * and don't need yet another qdisc as a bypass.
317 */
318 if (p->mv[index].mask != 0xff || p->mv[index].value)
319 pr_warn("%s: unsupported protocol %d\n",
320 __func__, ntohs(skb_protocol(skb, true)));
321 break;
322 }
323
324 return skb;
325}
326
327static struct sk_buff *dsmark_peek(struct Qdisc *sch)
328{
329 struct dsmark_qdisc_data *p = qdisc_priv(sch);
330
331 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
332
333 return p->q->ops->peek(p->q);
334}
335
336static int dsmark_init(struct Qdisc *sch, struct nlattr *opt,
337 struct netlink_ext_ack *extack)
338{
339 struct dsmark_qdisc_data *p = qdisc_priv(sch);
340 struct nlattr *tb[TCA_DSMARK_MAX + 1];
341 int err = -EINVAL;
342 u32 default_index = NO_DEFAULT_INDEX;
343 u16 indices;
344 int i;
345
346 pr_debug("%s(sch %p,[qdisc %p],opt %p)\n", __func__, sch, p, opt);
347
348 if (!opt)
349 goto errout;
350
351 err = tcf_block_get(&p->block, &p->filter_list, sch, extack);
352 if (err)
353 return err;
354
355 err = nla_parse_nested_deprecated(tb, TCA_DSMARK_MAX, opt,
356 dsmark_policy, NULL);
357 if (err < 0)
358 goto errout;
359
360 err = -EINVAL;
361 if (!tb[TCA_DSMARK_INDICES])
362 goto errout;
363 indices = nla_get_u16(tb[TCA_DSMARK_INDICES]);
364
365 if (hweight32(indices) != 1)
366 goto errout;
367
368 if (tb[TCA_DSMARK_DEFAULT_INDEX])
369 default_index = nla_get_u16(tb[TCA_DSMARK_DEFAULT_INDEX]);
370
371 if (indices <= DSMARK_EMBEDDED_SZ)
372 p->mv = p->embedded;
373 else
374 p->mv = kmalloc_array(indices, sizeof(*p->mv), GFP_KERNEL);
375 if (!p->mv) {
376 err = -ENOMEM;
377 goto errout;
378 }
379 for (i = 0; i < indices; i++) {
380 p->mv[i].mask = 0xff;
381 p->mv[i].value = 0;
382 }
383 p->indices = indices;
384 p->default_index = default_index;
385 p->set_tc_index = nla_get_flag(tb[TCA_DSMARK_SET_TC_INDEX]);
386
387 p->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, sch->handle,
388 NULL);
389 if (p->q == NULL)
390 p->q = &noop_qdisc;
391 else
392 qdisc_hash_add(p->q, true);
393
394 pr_debug("%s: qdisc %p\n", __func__, p->q);
395
396 err = 0;
397errout:
398 return err;
399}
400
401static void dsmark_reset(struct Qdisc *sch)
402{
403 struct dsmark_qdisc_data *p = qdisc_priv(sch);
404
405 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
406 if (p->q)
407 qdisc_reset(p->q);
408}
409
410static void dsmark_destroy(struct Qdisc *sch)
411{
412 struct dsmark_qdisc_data *p = qdisc_priv(sch);
413
414 pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
415
416 tcf_block_put(p->block);
417 qdisc_put(p->q);
418 if (p->mv != p->embedded)
419 kfree(p->mv);
420}
421
422static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
423 struct sk_buff *skb, struct tcmsg *tcm)
424{
425 struct dsmark_qdisc_data *p = qdisc_priv(sch);
426 struct nlattr *opts = NULL;
427
428 pr_debug("%s(sch %p,[qdisc %p],class %ld\n", __func__, sch, p, cl);
429
430 if (!dsmark_valid_index(p, cl))
431 return -EINVAL;
432
433 tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle), cl - 1);
434 tcm->tcm_info = p->q->handle;
435
436 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
437 if (opts == NULL)
438 goto nla_put_failure;
439 if (nla_put_u8(skb, TCA_DSMARK_MASK, p->mv[cl - 1].mask) ||
440 nla_put_u8(skb, TCA_DSMARK_VALUE, p->mv[cl - 1].value))
441 goto nla_put_failure;
442
443 return nla_nest_end(skb, opts);
444
445nla_put_failure:
446 nla_nest_cancel(skb, opts);
447 return -EMSGSIZE;
448}
449
450static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
451{
452 struct dsmark_qdisc_data *p = qdisc_priv(sch);
453 struct nlattr *opts = NULL;
454
455 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
456 if (opts == NULL)
457 goto nla_put_failure;
458 if (nla_put_u16(skb, TCA_DSMARK_INDICES, p->indices))
459 goto nla_put_failure;
460
461 if (p->default_index != NO_DEFAULT_INDEX &&
462 nla_put_u16(skb, TCA_DSMARK_DEFAULT_INDEX, p->default_index))
463 goto nla_put_failure;
464
465 if (p->set_tc_index &&
466 nla_put_flag(skb, TCA_DSMARK_SET_TC_INDEX))
467 goto nla_put_failure;
468
469 return nla_nest_end(skb, opts);
470
471nla_put_failure:
472 nla_nest_cancel(skb, opts);
473 return -EMSGSIZE;
474}
475
476static const struct Qdisc_class_ops dsmark_class_ops = {
477 .graft = dsmark_graft,
478 .leaf = dsmark_leaf,
479 .find = dsmark_find,
480 .change = dsmark_change,
481 .delete = dsmark_delete,
482 .walk = dsmark_walk,
483 .tcf_block = dsmark_tcf_block,
484 .bind_tcf = dsmark_bind_filter,
485 .unbind_tcf = dsmark_unbind_filter,
486 .dump = dsmark_dump_class,
487};
488
489static struct Qdisc_ops dsmark_qdisc_ops __read_mostly = {
490 .next = NULL,
491 .cl_ops = &dsmark_class_ops,
492 .id = "dsmark",
493 .priv_size = sizeof(struct dsmark_qdisc_data),
494 .enqueue = dsmark_enqueue,
495 .dequeue = dsmark_dequeue,
496 .peek = dsmark_peek,
497 .init = dsmark_init,
498 .reset = dsmark_reset,
499 .destroy = dsmark_destroy,
500 .change = NULL,
501 .dump = dsmark_dump,
502 .owner = THIS_MODULE,
503};
504
505static int __init dsmark_module_init(void)
506{
507 return register_qdisc(&dsmark_qdisc_ops);
508}
509
510static void __exit dsmark_module_exit(void)
511{
512 unregister_qdisc(&dsmark_qdisc_ops);
513}
514
515module_init(dsmark_module_init)
516module_exit(dsmark_module_exit)
517
518MODULE_LICENSE("GPL");