Loading...
1/*
2 * net/sched/em_meta.c Metadata ematch
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Thomas Graf <tgraf@suug.ch>
10 *
11 * ==========================================================================
12 *
13 * The metadata ematch compares two meta objects where each object
14 * represents either a meta value stored in the kernel or a static
15 * value provided by userspace. The objects are not provided by
16 * userspace itself but rather a definition providing the information
17 * to build them. Every object is of a certain type which must be
18 * equal to the object it is being compared to.
19 *
20 * The definition of a objects conists of the type (meta type), a
21 * identifier (meta id) and additional type specific information.
22 * The meta id is either TCF_META_TYPE_VALUE for values provided by
23 * userspace or a index to the meta operations table consisting of
24 * function pointers to type specific meta data collectors returning
25 * the value of the requested meta value.
26 *
27 * lvalue rvalue
28 * +-----------+ +-----------+
29 * | type: INT | | type: INT |
30 * def | id: DEV | | id: VALUE |
31 * | data: | | data: 3 |
32 * +-----------+ +-----------+
33 * | |
34 * ---> meta_ops[INT][DEV](...) |
35 * | |
36 * ----------- |
37 * V V
38 * +-----------+ +-----------+
39 * | type: INT | | type: INT |
40 * obj | id: DEV | | id: VALUE |
41 * | data: 2 |<--data got filled out | data: 3 |
42 * +-----------+ +-----------+
43 * | |
44 * --------------> 2 equals 3 <--------------
45 *
46 * This is a simplified schema, the complexity varies depending
47 * on the meta type. Obviously, the length of the data must also
48 * be provided for non-numeric types.
49 *
50 * Additionally, type dependent modifiers such as shift operators
51 * or mask may be applied to extend the functionaliy. As of now,
52 * the variable length type supports shifting the byte string to
53 * the right, eating up any number of octets and thus supporting
54 * wildcard interface name comparisons such as "ppp%" matching
55 * ppp0..9.
56 *
57 * NOTE: Certain meta values depend on other subsystems and are
58 * only available if that subsystem is enabled in the kernel.
59 */
60
61#include <linux/slab.h>
62#include <linux/module.h>
63#include <linux/types.h>
64#include <linux/kernel.h>
65#include <linux/sched.h>
66#include <linux/string.h>
67#include <linux/skbuff.h>
68#include <linux/random.h>
69#include <linux/if_vlan.h>
70#include <linux/tc_ematch/tc_em_meta.h>
71#include <net/dst.h>
72#include <net/route.h>
73#include <net/pkt_cls.h>
74#include <net/sock.h>
75
76struct meta_obj {
77 unsigned long value;
78 unsigned int len;
79};
80
81struct meta_value {
82 struct tcf_meta_val hdr;
83 unsigned long val;
84 unsigned int len;
85};
86
87struct meta_match {
88 struct meta_value lvalue;
89 struct meta_value rvalue;
90};
91
92static inline int meta_id(struct meta_value *v)
93{
94 return TCF_META_ID(v->hdr.kind);
95}
96
97static inline int meta_type(struct meta_value *v)
98{
99 return TCF_META_TYPE(v->hdr.kind);
100}
101
102#define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
103 struct tcf_pkt_info *info, struct meta_value *v, \
104 struct meta_obj *dst, int *err)
105
106/**************************************************************************
107 * System status & misc
108 **************************************************************************/
109
110META_COLLECTOR(int_random)
111{
112 get_random_bytes(&dst->value, sizeof(dst->value));
113}
114
115static inline unsigned long fixed_loadavg(int load)
116{
117 int rnd_load = load + (FIXED_1/200);
118 int rnd_frac = ((rnd_load & (FIXED_1-1)) * 100) >> FSHIFT;
119
120 return ((rnd_load >> FSHIFT) * 100) + rnd_frac;
121}
122
123META_COLLECTOR(int_loadavg_0)
124{
125 dst->value = fixed_loadavg(avenrun[0]);
126}
127
128META_COLLECTOR(int_loadavg_1)
129{
130 dst->value = fixed_loadavg(avenrun[1]);
131}
132
133META_COLLECTOR(int_loadavg_2)
134{
135 dst->value = fixed_loadavg(avenrun[2]);
136}
137
138/**************************************************************************
139 * Device names & indices
140 **************************************************************************/
141
142static inline int int_dev(struct net_device *dev, struct meta_obj *dst)
143{
144 if (unlikely(dev == NULL))
145 return -1;
146
147 dst->value = dev->ifindex;
148 return 0;
149}
150
151static inline int var_dev(struct net_device *dev, struct meta_obj *dst)
152{
153 if (unlikely(dev == NULL))
154 return -1;
155
156 dst->value = (unsigned long) dev->name;
157 dst->len = strlen(dev->name);
158 return 0;
159}
160
161META_COLLECTOR(int_dev)
162{
163 *err = int_dev(skb->dev, dst);
164}
165
166META_COLLECTOR(var_dev)
167{
168 *err = var_dev(skb->dev, dst);
169}
170
171/**************************************************************************
172 * vlan tag
173 **************************************************************************/
174
175META_COLLECTOR(int_vlan_tag)
176{
177 unsigned short tag;
178
179 tag = vlan_tx_tag_get(skb);
180 if (!tag && __vlan_get_tag(skb, &tag))
181 *err = -1;
182 else
183 dst->value = tag;
184}
185
186
187
188/**************************************************************************
189 * skb attributes
190 **************************************************************************/
191
192META_COLLECTOR(int_priority)
193{
194 dst->value = skb->priority;
195}
196
197META_COLLECTOR(int_protocol)
198{
199 /* Let userspace take care of the byte ordering */
200 dst->value = skb->protocol;
201}
202
203META_COLLECTOR(int_pkttype)
204{
205 dst->value = skb->pkt_type;
206}
207
208META_COLLECTOR(int_pktlen)
209{
210 dst->value = skb->len;
211}
212
213META_COLLECTOR(int_datalen)
214{
215 dst->value = skb->data_len;
216}
217
218META_COLLECTOR(int_maclen)
219{
220 dst->value = skb->mac_len;
221}
222
223META_COLLECTOR(int_rxhash)
224{
225 dst->value = skb_get_rxhash(skb);
226}
227
228/**************************************************************************
229 * Netfilter
230 **************************************************************************/
231
232META_COLLECTOR(int_mark)
233{
234 dst->value = skb->mark;
235}
236
237/**************************************************************************
238 * Traffic Control
239 **************************************************************************/
240
241META_COLLECTOR(int_tcindex)
242{
243 dst->value = skb->tc_index;
244}
245
246/**************************************************************************
247 * Routing
248 **************************************************************************/
249
250META_COLLECTOR(int_rtclassid)
251{
252 if (unlikely(skb_dst(skb) == NULL))
253 *err = -1;
254 else
255#ifdef CONFIG_IP_ROUTE_CLASSID
256 dst->value = skb_dst(skb)->tclassid;
257#else
258 dst->value = 0;
259#endif
260}
261
262META_COLLECTOR(int_rtiif)
263{
264 if (unlikely(skb_rtable(skb) == NULL))
265 *err = -1;
266 else
267 dst->value = skb_rtable(skb)->rt_iif;
268}
269
270/**************************************************************************
271 * Socket Attributes
272 **************************************************************************/
273
274#define SKIP_NONLOCAL(skb) \
275 if (unlikely(skb->sk == NULL)) { \
276 *err = -1; \
277 return; \
278 }
279
280META_COLLECTOR(int_sk_family)
281{
282 SKIP_NONLOCAL(skb);
283 dst->value = skb->sk->sk_family;
284}
285
286META_COLLECTOR(int_sk_state)
287{
288 SKIP_NONLOCAL(skb);
289 dst->value = skb->sk->sk_state;
290}
291
292META_COLLECTOR(int_sk_reuse)
293{
294 SKIP_NONLOCAL(skb);
295 dst->value = skb->sk->sk_reuse;
296}
297
298META_COLLECTOR(int_sk_bound_if)
299{
300 SKIP_NONLOCAL(skb);
301 /* No error if bound_dev_if is 0, legal userspace check */
302 dst->value = skb->sk->sk_bound_dev_if;
303}
304
305META_COLLECTOR(var_sk_bound_if)
306{
307 SKIP_NONLOCAL(skb);
308
309 if (skb->sk->sk_bound_dev_if == 0) {
310 dst->value = (unsigned long) "any";
311 dst->len = 3;
312 } else {
313 struct net_device *dev;
314
315 rcu_read_lock();
316 dev = dev_get_by_index_rcu(sock_net(skb->sk),
317 skb->sk->sk_bound_dev_if);
318 *err = var_dev(dev, dst);
319 rcu_read_unlock();
320 }
321}
322
323META_COLLECTOR(int_sk_refcnt)
324{
325 SKIP_NONLOCAL(skb);
326 dst->value = atomic_read(&skb->sk->sk_refcnt);
327}
328
329META_COLLECTOR(int_sk_rcvbuf)
330{
331 SKIP_NONLOCAL(skb);
332 dst->value = skb->sk->sk_rcvbuf;
333}
334
335META_COLLECTOR(int_sk_shutdown)
336{
337 SKIP_NONLOCAL(skb);
338 dst->value = skb->sk->sk_shutdown;
339}
340
341META_COLLECTOR(int_sk_proto)
342{
343 SKIP_NONLOCAL(skb);
344 dst->value = skb->sk->sk_protocol;
345}
346
347META_COLLECTOR(int_sk_type)
348{
349 SKIP_NONLOCAL(skb);
350 dst->value = skb->sk->sk_type;
351}
352
353META_COLLECTOR(int_sk_rmem_alloc)
354{
355 SKIP_NONLOCAL(skb);
356 dst->value = sk_rmem_alloc_get(skb->sk);
357}
358
359META_COLLECTOR(int_sk_wmem_alloc)
360{
361 SKIP_NONLOCAL(skb);
362 dst->value = sk_wmem_alloc_get(skb->sk);
363}
364
365META_COLLECTOR(int_sk_omem_alloc)
366{
367 SKIP_NONLOCAL(skb);
368 dst->value = atomic_read(&skb->sk->sk_omem_alloc);
369}
370
371META_COLLECTOR(int_sk_rcv_qlen)
372{
373 SKIP_NONLOCAL(skb);
374 dst->value = skb->sk->sk_receive_queue.qlen;
375}
376
377META_COLLECTOR(int_sk_snd_qlen)
378{
379 SKIP_NONLOCAL(skb);
380 dst->value = skb->sk->sk_write_queue.qlen;
381}
382
383META_COLLECTOR(int_sk_wmem_queued)
384{
385 SKIP_NONLOCAL(skb);
386 dst->value = skb->sk->sk_wmem_queued;
387}
388
389META_COLLECTOR(int_sk_fwd_alloc)
390{
391 SKIP_NONLOCAL(skb);
392 dst->value = skb->sk->sk_forward_alloc;
393}
394
395META_COLLECTOR(int_sk_sndbuf)
396{
397 SKIP_NONLOCAL(skb);
398 dst->value = skb->sk->sk_sndbuf;
399}
400
401META_COLLECTOR(int_sk_alloc)
402{
403 SKIP_NONLOCAL(skb);
404 dst->value = (__force int) skb->sk->sk_allocation;
405}
406
407META_COLLECTOR(int_sk_hash)
408{
409 SKIP_NONLOCAL(skb);
410 dst->value = skb->sk->sk_hash;
411}
412
413META_COLLECTOR(int_sk_lingertime)
414{
415 SKIP_NONLOCAL(skb);
416 dst->value = skb->sk->sk_lingertime / HZ;
417}
418
419META_COLLECTOR(int_sk_err_qlen)
420{
421 SKIP_NONLOCAL(skb);
422 dst->value = skb->sk->sk_error_queue.qlen;
423}
424
425META_COLLECTOR(int_sk_ack_bl)
426{
427 SKIP_NONLOCAL(skb);
428 dst->value = skb->sk->sk_ack_backlog;
429}
430
431META_COLLECTOR(int_sk_max_ack_bl)
432{
433 SKIP_NONLOCAL(skb);
434 dst->value = skb->sk->sk_max_ack_backlog;
435}
436
437META_COLLECTOR(int_sk_prio)
438{
439 SKIP_NONLOCAL(skb);
440 dst->value = skb->sk->sk_priority;
441}
442
443META_COLLECTOR(int_sk_rcvlowat)
444{
445 SKIP_NONLOCAL(skb);
446 dst->value = skb->sk->sk_rcvlowat;
447}
448
449META_COLLECTOR(int_sk_rcvtimeo)
450{
451 SKIP_NONLOCAL(skb);
452 dst->value = skb->sk->sk_rcvtimeo / HZ;
453}
454
455META_COLLECTOR(int_sk_sndtimeo)
456{
457 SKIP_NONLOCAL(skb);
458 dst->value = skb->sk->sk_sndtimeo / HZ;
459}
460
461META_COLLECTOR(int_sk_sendmsg_off)
462{
463 SKIP_NONLOCAL(skb);
464 dst->value = skb->sk->sk_sndmsg_off;
465}
466
467META_COLLECTOR(int_sk_write_pend)
468{
469 SKIP_NONLOCAL(skb);
470 dst->value = skb->sk->sk_write_pending;
471}
472
473/**************************************************************************
474 * Meta value collectors assignment table
475 **************************************************************************/
476
477struct meta_ops {
478 void (*get)(struct sk_buff *, struct tcf_pkt_info *,
479 struct meta_value *, struct meta_obj *, int *);
480};
481
482#define META_ID(name) TCF_META_ID_##name
483#define META_FUNC(name) { .get = meta_##name }
484
485/* Meta value operations table listing all meta value collectors and
486 * assigns them to a type and meta id. */
487static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
488 [TCF_META_TYPE_VAR] = {
489 [META_ID(DEV)] = META_FUNC(var_dev),
490 [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
491 },
492 [TCF_META_TYPE_INT] = {
493 [META_ID(RANDOM)] = META_FUNC(int_random),
494 [META_ID(LOADAVG_0)] = META_FUNC(int_loadavg_0),
495 [META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1),
496 [META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2),
497 [META_ID(DEV)] = META_FUNC(int_dev),
498 [META_ID(PRIORITY)] = META_FUNC(int_priority),
499 [META_ID(PROTOCOL)] = META_FUNC(int_protocol),
500 [META_ID(PKTTYPE)] = META_FUNC(int_pkttype),
501 [META_ID(PKTLEN)] = META_FUNC(int_pktlen),
502 [META_ID(DATALEN)] = META_FUNC(int_datalen),
503 [META_ID(MACLEN)] = META_FUNC(int_maclen),
504 [META_ID(NFMARK)] = META_FUNC(int_mark),
505 [META_ID(TCINDEX)] = META_FUNC(int_tcindex),
506 [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid),
507 [META_ID(RTIIF)] = META_FUNC(int_rtiif),
508 [META_ID(SK_FAMILY)] = META_FUNC(int_sk_family),
509 [META_ID(SK_STATE)] = META_FUNC(int_sk_state),
510 [META_ID(SK_REUSE)] = META_FUNC(int_sk_reuse),
511 [META_ID(SK_BOUND_IF)] = META_FUNC(int_sk_bound_if),
512 [META_ID(SK_REFCNT)] = META_FUNC(int_sk_refcnt),
513 [META_ID(SK_RCVBUF)] = META_FUNC(int_sk_rcvbuf),
514 [META_ID(SK_SNDBUF)] = META_FUNC(int_sk_sndbuf),
515 [META_ID(SK_SHUTDOWN)] = META_FUNC(int_sk_shutdown),
516 [META_ID(SK_PROTO)] = META_FUNC(int_sk_proto),
517 [META_ID(SK_TYPE)] = META_FUNC(int_sk_type),
518 [META_ID(SK_RMEM_ALLOC)] = META_FUNC(int_sk_rmem_alloc),
519 [META_ID(SK_WMEM_ALLOC)] = META_FUNC(int_sk_wmem_alloc),
520 [META_ID(SK_OMEM_ALLOC)] = META_FUNC(int_sk_omem_alloc),
521 [META_ID(SK_WMEM_QUEUED)] = META_FUNC(int_sk_wmem_queued),
522 [META_ID(SK_RCV_QLEN)] = META_FUNC(int_sk_rcv_qlen),
523 [META_ID(SK_SND_QLEN)] = META_FUNC(int_sk_snd_qlen),
524 [META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen),
525 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc),
526 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc),
527 [META_ID(SK_HASH)] = META_FUNC(int_sk_hash),
528 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime),
529 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl),
530 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl),
531 [META_ID(SK_PRIO)] = META_FUNC(int_sk_prio),
532 [META_ID(SK_RCVLOWAT)] = META_FUNC(int_sk_rcvlowat),
533 [META_ID(SK_RCVTIMEO)] = META_FUNC(int_sk_rcvtimeo),
534 [META_ID(SK_SNDTIMEO)] = META_FUNC(int_sk_sndtimeo),
535 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
536 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
537 [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag),
538 [META_ID(RXHASH)] = META_FUNC(int_rxhash),
539 }
540};
541
542static inline struct meta_ops *meta_ops(struct meta_value *val)
543{
544 return &__meta_ops[meta_type(val)][meta_id(val)];
545}
546
547/**************************************************************************
548 * Type specific operations for TCF_META_TYPE_VAR
549 **************************************************************************/
550
551static int meta_var_compare(struct meta_obj *a, struct meta_obj *b)
552{
553 int r = a->len - b->len;
554
555 if (r == 0)
556 r = memcmp((void *) a->value, (void *) b->value, a->len);
557
558 return r;
559}
560
561static int meta_var_change(struct meta_value *dst, struct nlattr *nla)
562{
563 int len = nla_len(nla);
564
565 dst->val = (unsigned long)kmemdup(nla_data(nla), len, GFP_KERNEL);
566 if (dst->val == 0UL)
567 return -ENOMEM;
568 dst->len = len;
569 return 0;
570}
571
572static void meta_var_destroy(struct meta_value *v)
573{
574 kfree((void *) v->val);
575}
576
577static void meta_var_apply_extras(struct meta_value *v,
578 struct meta_obj *dst)
579{
580 int shift = v->hdr.shift;
581
582 if (shift && shift < dst->len)
583 dst->len -= shift;
584}
585
586static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
587{
588 if (v->val && v->len &&
589 nla_put(skb, tlv, v->len, (void *) v->val))
590 goto nla_put_failure;
591 return 0;
592
593nla_put_failure:
594 return -1;
595}
596
597/**************************************************************************
598 * Type specific operations for TCF_META_TYPE_INT
599 **************************************************************************/
600
601static int meta_int_compare(struct meta_obj *a, struct meta_obj *b)
602{
603 /* Let gcc optimize it, the unlikely is not really based on
604 * some numbers but jump free code for mismatches seems
605 * more logical. */
606 if (unlikely(a->value == b->value))
607 return 0;
608 else if (a->value < b->value)
609 return -1;
610 else
611 return 1;
612}
613
614static int meta_int_change(struct meta_value *dst, struct nlattr *nla)
615{
616 if (nla_len(nla) >= sizeof(unsigned long)) {
617 dst->val = *(unsigned long *) nla_data(nla);
618 dst->len = sizeof(unsigned long);
619 } else if (nla_len(nla) == sizeof(u32)) {
620 dst->val = nla_get_u32(nla);
621 dst->len = sizeof(u32);
622 } else
623 return -EINVAL;
624
625 return 0;
626}
627
628static void meta_int_apply_extras(struct meta_value *v,
629 struct meta_obj *dst)
630{
631 if (v->hdr.shift)
632 dst->value >>= v->hdr.shift;
633
634 if (v->val)
635 dst->value &= v->val;
636}
637
638static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
639{
640 if (v->len == sizeof(unsigned long)) {
641 if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
642 goto nla_put_failure;
643 } else if (v->len == sizeof(u32)) {
644 if (nla_put_u32(skb, tlv, v->val))
645 goto nla_put_failure;
646 }
647
648 return 0;
649
650nla_put_failure:
651 return -1;
652}
653
654/**************************************************************************
655 * Type specific operations table
656 **************************************************************************/
657
658struct meta_type_ops {
659 void (*destroy)(struct meta_value *);
660 int (*compare)(struct meta_obj *, struct meta_obj *);
661 int (*change)(struct meta_value *, struct nlattr *);
662 void (*apply_extras)(struct meta_value *, struct meta_obj *);
663 int (*dump)(struct sk_buff *, struct meta_value *, int);
664};
665
666static struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
667 [TCF_META_TYPE_VAR] = {
668 .destroy = meta_var_destroy,
669 .compare = meta_var_compare,
670 .change = meta_var_change,
671 .apply_extras = meta_var_apply_extras,
672 .dump = meta_var_dump
673 },
674 [TCF_META_TYPE_INT] = {
675 .compare = meta_int_compare,
676 .change = meta_int_change,
677 .apply_extras = meta_int_apply_extras,
678 .dump = meta_int_dump
679 }
680};
681
682static inline struct meta_type_ops *meta_type_ops(struct meta_value *v)
683{
684 return &__meta_type_ops[meta_type(v)];
685}
686
687/**************************************************************************
688 * Core
689 **************************************************************************/
690
691static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
692 struct meta_value *v, struct meta_obj *dst)
693{
694 int err = 0;
695
696 if (meta_id(v) == TCF_META_ID_VALUE) {
697 dst->value = v->val;
698 dst->len = v->len;
699 return 0;
700 }
701
702 meta_ops(v)->get(skb, info, v, dst, &err);
703 if (err < 0)
704 return err;
705
706 if (meta_type_ops(v)->apply_extras)
707 meta_type_ops(v)->apply_extras(v, dst);
708
709 return 0;
710}
711
712static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
713 struct tcf_pkt_info *info)
714{
715 int r;
716 struct meta_match *meta = (struct meta_match *) m->data;
717 struct meta_obj l_value, r_value;
718
719 if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 ||
720 meta_get(skb, info, &meta->rvalue, &r_value) < 0)
721 return 0;
722
723 r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
724
725 switch (meta->lvalue.hdr.op) {
726 case TCF_EM_OPND_EQ:
727 return !r;
728 case TCF_EM_OPND_LT:
729 return r < 0;
730 case TCF_EM_OPND_GT:
731 return r > 0;
732 }
733
734 return 0;
735}
736
737static void meta_delete(struct meta_match *meta)
738{
739 if (meta) {
740 struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
741
742 if (ops && ops->destroy) {
743 ops->destroy(&meta->lvalue);
744 ops->destroy(&meta->rvalue);
745 }
746 }
747
748 kfree(meta);
749}
750
751static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
752{
753 if (nla) {
754 if (nla_len(nla) == 0)
755 return -EINVAL;
756
757 return meta_type_ops(dst)->change(dst, nla);
758 }
759
760 return 0;
761}
762
763static inline int meta_is_supported(struct meta_value *val)
764{
765 return !meta_id(val) || meta_ops(val)->get;
766}
767
768static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
769 [TCA_EM_META_HDR] = { .len = sizeof(struct tcf_meta_hdr) },
770};
771
772static int em_meta_change(struct tcf_proto *tp, void *data, int len,
773 struct tcf_ematch *m)
774{
775 int err;
776 struct nlattr *tb[TCA_EM_META_MAX + 1];
777 struct tcf_meta_hdr *hdr;
778 struct meta_match *meta = NULL;
779
780 err = nla_parse(tb, TCA_EM_META_MAX, data, len, meta_policy);
781 if (err < 0)
782 goto errout;
783
784 err = -EINVAL;
785 if (tb[TCA_EM_META_HDR] == NULL)
786 goto errout;
787 hdr = nla_data(tb[TCA_EM_META_HDR]);
788
789 if (TCF_META_TYPE(hdr->left.kind) != TCF_META_TYPE(hdr->right.kind) ||
790 TCF_META_TYPE(hdr->left.kind) > TCF_META_TYPE_MAX ||
791 TCF_META_ID(hdr->left.kind) > TCF_META_ID_MAX ||
792 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
793 goto errout;
794
795 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
796 if (meta == NULL)
797 goto errout;
798
799 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
800 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
801
802 if (!meta_is_supported(&meta->lvalue) ||
803 !meta_is_supported(&meta->rvalue)) {
804 err = -EOPNOTSUPP;
805 goto errout;
806 }
807
808 if (meta_change_data(&meta->lvalue, tb[TCA_EM_META_LVALUE]) < 0 ||
809 meta_change_data(&meta->rvalue, tb[TCA_EM_META_RVALUE]) < 0)
810 goto errout;
811
812 m->datalen = sizeof(*meta);
813 m->data = (unsigned long) meta;
814
815 err = 0;
816errout:
817 if (err && meta)
818 meta_delete(meta);
819 return err;
820}
821
822static void em_meta_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
823{
824 if (m)
825 meta_delete((struct meta_match *) m->data);
826}
827
828static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
829{
830 struct meta_match *meta = (struct meta_match *) em->data;
831 struct tcf_meta_hdr hdr;
832 struct meta_type_ops *ops;
833
834 memset(&hdr, 0, sizeof(hdr));
835 memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
836 memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
837
838 if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
839 goto nla_put_failure;
840
841 ops = meta_type_ops(&meta->lvalue);
842 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
843 ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0)
844 goto nla_put_failure;
845
846 return 0;
847
848nla_put_failure:
849 return -1;
850}
851
852static struct tcf_ematch_ops em_meta_ops = {
853 .kind = TCF_EM_META,
854 .change = em_meta_change,
855 .match = em_meta_match,
856 .destroy = em_meta_destroy,
857 .dump = em_meta_dump,
858 .owner = THIS_MODULE,
859 .link = LIST_HEAD_INIT(em_meta_ops.link)
860};
861
862static int __init init_em_meta(void)
863{
864 return tcf_em_register(&em_meta_ops);
865}
866
867static void __exit exit_em_meta(void)
868{
869 tcf_em_unregister(&em_meta_ops);
870}
871
872MODULE_LICENSE("GPL");
873
874module_init(init_em_meta);
875module_exit(exit_em_meta);
876
877MODULE_ALIAS_TCF_EMATCH(TCF_EM_META);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/em_meta.c Metadata ematch
4 *
5 * Authors: Thomas Graf <tgraf@suug.ch>
6 *
7 * ==========================================================================
8 *
9 * The metadata ematch compares two meta objects where each object
10 * represents either a meta value stored in the kernel or a static
11 * value provided by userspace. The objects are not provided by
12 * userspace itself but rather a definition providing the information
13 * to build them. Every object is of a certain type which must be
14 * equal to the object it is being compared to.
15 *
16 * The definition of a objects conists of the type (meta type), a
17 * identifier (meta id) and additional type specific information.
18 * The meta id is either TCF_META_TYPE_VALUE for values provided by
19 * userspace or a index to the meta operations table consisting of
20 * function pointers to type specific meta data collectors returning
21 * the value of the requested meta value.
22 *
23 * lvalue rvalue
24 * +-----------+ +-----------+
25 * | type: INT | | type: INT |
26 * def | id: DEV | | id: VALUE |
27 * | data: | | data: 3 |
28 * +-----------+ +-----------+
29 * | |
30 * ---> meta_ops[INT][DEV](...) |
31 * | |
32 * ----------- |
33 * V V
34 * +-----------+ +-----------+
35 * | type: INT | | type: INT |
36 * obj | id: DEV | | id: VALUE |
37 * | data: 2 |<--data got filled out | data: 3 |
38 * +-----------+ +-----------+
39 * | |
40 * --------------> 2 equals 3 <--------------
41 *
42 * This is a simplified schema, the complexity varies depending
43 * on the meta type. Obviously, the length of the data must also
44 * be provided for non-numeric types.
45 *
46 * Additionally, type dependent modifiers such as shift operators
47 * or mask may be applied to extend the functionality. As of now,
48 * the variable length type supports shifting the byte string to
49 * the right, eating up any number of octets and thus supporting
50 * wildcard interface name comparisons such as "ppp%" matching
51 * ppp0..9.
52 *
53 * NOTE: Certain meta values depend on other subsystems and are
54 * only available if that subsystem is enabled in the kernel.
55 */
56
57#include <linux/slab.h>
58#include <linux/module.h>
59#include <linux/types.h>
60#include <linux/kernel.h>
61#include <linux/sched.h>
62#include <linux/sched/loadavg.h>
63#include <linux/string.h>
64#include <linux/skbuff.h>
65#include <linux/random.h>
66#include <linux/if_vlan.h>
67#include <linux/tc_ematch/tc_em_meta.h>
68#include <net/dst.h>
69#include <net/route.h>
70#include <net/pkt_cls.h>
71#include <net/sock.h>
72
73struct meta_obj {
74 unsigned long value;
75 unsigned int len;
76};
77
78struct meta_value {
79 struct tcf_meta_val hdr;
80 unsigned long val;
81 unsigned int len;
82};
83
84struct meta_match {
85 struct meta_value lvalue;
86 struct meta_value rvalue;
87};
88
89static inline int meta_id(struct meta_value *v)
90{
91 return TCF_META_ID(v->hdr.kind);
92}
93
94static inline int meta_type(struct meta_value *v)
95{
96 return TCF_META_TYPE(v->hdr.kind);
97}
98
99#define META_COLLECTOR(FUNC) static void meta_##FUNC(struct sk_buff *skb, \
100 struct tcf_pkt_info *info, struct meta_value *v, \
101 struct meta_obj *dst, int *err)
102
103/**************************************************************************
104 * System status & misc
105 **************************************************************************/
106
107META_COLLECTOR(int_random)
108{
109 get_random_bytes(&dst->value, sizeof(dst->value));
110}
111
112static inline unsigned long fixed_loadavg(int load)
113{
114 int rnd_load = load + (FIXED_1/200);
115 int rnd_frac = ((rnd_load & (FIXED_1-1)) * 100) >> FSHIFT;
116
117 return ((rnd_load >> FSHIFT) * 100) + rnd_frac;
118}
119
120META_COLLECTOR(int_loadavg_0)
121{
122 dst->value = fixed_loadavg(avenrun[0]);
123}
124
125META_COLLECTOR(int_loadavg_1)
126{
127 dst->value = fixed_loadavg(avenrun[1]);
128}
129
130META_COLLECTOR(int_loadavg_2)
131{
132 dst->value = fixed_loadavg(avenrun[2]);
133}
134
135/**************************************************************************
136 * Device names & indices
137 **************************************************************************/
138
139static inline int int_dev(struct net_device *dev, struct meta_obj *dst)
140{
141 if (unlikely(dev == NULL))
142 return -1;
143
144 dst->value = dev->ifindex;
145 return 0;
146}
147
148static inline int var_dev(struct net_device *dev, struct meta_obj *dst)
149{
150 if (unlikely(dev == NULL))
151 return -1;
152
153 dst->value = (unsigned long) dev->name;
154 dst->len = strlen(dev->name);
155 return 0;
156}
157
158META_COLLECTOR(int_dev)
159{
160 *err = int_dev(skb->dev, dst);
161}
162
163META_COLLECTOR(var_dev)
164{
165 *err = var_dev(skb->dev, dst);
166}
167
168/**************************************************************************
169 * vlan tag
170 **************************************************************************/
171
172META_COLLECTOR(int_vlan_tag)
173{
174 unsigned short tag;
175
176 if (skb_vlan_tag_present(skb))
177 dst->value = skb_vlan_tag_get(skb);
178 else if (!__vlan_get_tag(skb, &tag))
179 dst->value = tag;
180 else
181 *err = -1;
182}
183
184
185
186/**************************************************************************
187 * skb attributes
188 **************************************************************************/
189
190META_COLLECTOR(int_priority)
191{
192 dst->value = skb->priority;
193}
194
195META_COLLECTOR(int_protocol)
196{
197 /* Let userspace take care of the byte ordering */
198 dst->value = skb_protocol(skb, false);
199}
200
201META_COLLECTOR(int_pkttype)
202{
203 dst->value = skb->pkt_type;
204}
205
206META_COLLECTOR(int_pktlen)
207{
208 dst->value = skb->len;
209}
210
211META_COLLECTOR(int_datalen)
212{
213 dst->value = skb->data_len;
214}
215
216META_COLLECTOR(int_maclen)
217{
218 dst->value = skb->mac_len;
219}
220
221META_COLLECTOR(int_rxhash)
222{
223 dst->value = skb_get_hash(skb);
224}
225
226/**************************************************************************
227 * Netfilter
228 **************************************************************************/
229
230META_COLLECTOR(int_mark)
231{
232 dst->value = skb->mark;
233}
234
235/**************************************************************************
236 * Traffic Control
237 **************************************************************************/
238
239META_COLLECTOR(int_tcindex)
240{
241 dst->value = skb->tc_index;
242}
243
244/**************************************************************************
245 * Routing
246 **************************************************************************/
247
248META_COLLECTOR(int_rtclassid)
249{
250 if (unlikely(skb_dst(skb) == NULL))
251 *err = -1;
252 else
253#ifdef CONFIG_IP_ROUTE_CLASSID
254 dst->value = skb_dst(skb)->tclassid;
255#else
256 dst->value = 0;
257#endif
258}
259
260META_COLLECTOR(int_rtiif)
261{
262 if (unlikely(skb_rtable(skb) == NULL))
263 *err = -1;
264 else
265 dst->value = inet_iif(skb);
266}
267
268/**************************************************************************
269 * Socket Attributes
270 **************************************************************************/
271
272#define skip_nonlocal(skb) \
273 (unlikely(skb->sk == NULL))
274
275META_COLLECTOR(int_sk_family)
276{
277 if (skip_nonlocal(skb)) {
278 *err = -1;
279 return;
280 }
281 dst->value = skb->sk->sk_family;
282}
283
284META_COLLECTOR(int_sk_state)
285{
286 if (skip_nonlocal(skb)) {
287 *err = -1;
288 return;
289 }
290 dst->value = skb->sk->sk_state;
291}
292
293META_COLLECTOR(int_sk_reuse)
294{
295 if (skip_nonlocal(skb)) {
296 *err = -1;
297 return;
298 }
299 dst->value = skb->sk->sk_reuse;
300}
301
302META_COLLECTOR(int_sk_bound_if)
303{
304 if (skip_nonlocal(skb)) {
305 *err = -1;
306 return;
307 }
308 /* No error if bound_dev_if is 0, legal userspace check */
309 dst->value = skb->sk->sk_bound_dev_if;
310}
311
312META_COLLECTOR(var_sk_bound_if)
313{
314 int bound_dev_if;
315
316 if (skip_nonlocal(skb)) {
317 *err = -1;
318 return;
319 }
320
321 bound_dev_if = READ_ONCE(skb->sk->sk_bound_dev_if);
322 if (bound_dev_if == 0) {
323 dst->value = (unsigned long) "any";
324 dst->len = 3;
325 } else {
326 struct net_device *dev;
327
328 rcu_read_lock();
329 dev = dev_get_by_index_rcu(sock_net(skb->sk),
330 bound_dev_if);
331 *err = var_dev(dev, dst);
332 rcu_read_unlock();
333 }
334}
335
336META_COLLECTOR(int_sk_refcnt)
337{
338 if (skip_nonlocal(skb)) {
339 *err = -1;
340 return;
341 }
342 dst->value = refcount_read(&skb->sk->sk_refcnt);
343}
344
345META_COLLECTOR(int_sk_rcvbuf)
346{
347 const struct sock *sk = skb_to_full_sk(skb);
348
349 if (!sk) {
350 *err = -1;
351 return;
352 }
353 dst->value = sk->sk_rcvbuf;
354}
355
356META_COLLECTOR(int_sk_shutdown)
357{
358 const struct sock *sk = skb_to_full_sk(skb);
359
360 if (!sk) {
361 *err = -1;
362 return;
363 }
364 dst->value = sk->sk_shutdown;
365}
366
367META_COLLECTOR(int_sk_proto)
368{
369 const struct sock *sk = skb_to_full_sk(skb);
370
371 if (!sk) {
372 *err = -1;
373 return;
374 }
375 dst->value = sk->sk_protocol;
376}
377
378META_COLLECTOR(int_sk_type)
379{
380 const struct sock *sk = skb_to_full_sk(skb);
381
382 if (!sk) {
383 *err = -1;
384 return;
385 }
386 dst->value = sk->sk_type;
387}
388
389META_COLLECTOR(int_sk_rmem_alloc)
390{
391 const struct sock *sk = skb_to_full_sk(skb);
392
393 if (!sk) {
394 *err = -1;
395 return;
396 }
397 dst->value = sk_rmem_alloc_get(sk);
398}
399
400META_COLLECTOR(int_sk_wmem_alloc)
401{
402 const struct sock *sk = skb_to_full_sk(skb);
403
404 if (!sk) {
405 *err = -1;
406 return;
407 }
408 dst->value = sk_wmem_alloc_get(sk);
409}
410
411META_COLLECTOR(int_sk_omem_alloc)
412{
413 const struct sock *sk = skb_to_full_sk(skb);
414
415 if (!sk) {
416 *err = -1;
417 return;
418 }
419 dst->value = atomic_read(&sk->sk_omem_alloc);
420}
421
422META_COLLECTOR(int_sk_rcv_qlen)
423{
424 const struct sock *sk = skb_to_full_sk(skb);
425
426 if (!sk) {
427 *err = -1;
428 return;
429 }
430 dst->value = sk->sk_receive_queue.qlen;
431}
432
433META_COLLECTOR(int_sk_snd_qlen)
434{
435 const struct sock *sk = skb_to_full_sk(skb);
436
437 if (!sk) {
438 *err = -1;
439 return;
440 }
441 dst->value = sk->sk_write_queue.qlen;
442}
443
444META_COLLECTOR(int_sk_wmem_queued)
445{
446 const struct sock *sk = skb_to_full_sk(skb);
447
448 if (!sk) {
449 *err = -1;
450 return;
451 }
452 dst->value = READ_ONCE(sk->sk_wmem_queued);
453}
454
455META_COLLECTOR(int_sk_fwd_alloc)
456{
457 const struct sock *sk = skb_to_full_sk(skb);
458
459 if (!sk) {
460 *err = -1;
461 return;
462 }
463 dst->value = sk_forward_alloc_get(sk);
464}
465
466META_COLLECTOR(int_sk_sndbuf)
467{
468 const struct sock *sk = skb_to_full_sk(skb);
469
470 if (!sk) {
471 *err = -1;
472 return;
473 }
474 dst->value = sk->sk_sndbuf;
475}
476
477META_COLLECTOR(int_sk_alloc)
478{
479 const struct sock *sk = skb_to_full_sk(skb);
480
481 if (!sk) {
482 *err = -1;
483 return;
484 }
485 dst->value = (__force int) sk->sk_allocation;
486}
487
488META_COLLECTOR(int_sk_hash)
489{
490 if (skip_nonlocal(skb)) {
491 *err = -1;
492 return;
493 }
494 dst->value = skb->sk->sk_hash;
495}
496
497META_COLLECTOR(int_sk_lingertime)
498{
499 const struct sock *sk = skb_to_full_sk(skb);
500
501 if (!sk) {
502 *err = -1;
503 return;
504 }
505 dst->value = READ_ONCE(sk->sk_lingertime) / HZ;
506}
507
508META_COLLECTOR(int_sk_err_qlen)
509{
510 const struct sock *sk = skb_to_full_sk(skb);
511
512 if (!sk) {
513 *err = -1;
514 return;
515 }
516 dst->value = sk->sk_error_queue.qlen;
517}
518
519META_COLLECTOR(int_sk_ack_bl)
520{
521 const struct sock *sk = skb_to_full_sk(skb);
522
523 if (!sk) {
524 *err = -1;
525 return;
526 }
527 dst->value = READ_ONCE(sk->sk_ack_backlog);
528}
529
530META_COLLECTOR(int_sk_max_ack_bl)
531{
532 const struct sock *sk = skb_to_full_sk(skb);
533
534 if (!sk) {
535 *err = -1;
536 return;
537 }
538 dst->value = READ_ONCE(sk->sk_max_ack_backlog);
539}
540
541META_COLLECTOR(int_sk_prio)
542{
543 const struct sock *sk = skb_to_full_sk(skb);
544
545 if (!sk) {
546 *err = -1;
547 return;
548 }
549 dst->value = READ_ONCE(sk->sk_priority);
550}
551
552META_COLLECTOR(int_sk_rcvlowat)
553{
554 const struct sock *sk = skb_to_full_sk(skb);
555
556 if (!sk) {
557 *err = -1;
558 return;
559 }
560 dst->value = READ_ONCE(sk->sk_rcvlowat);
561}
562
563META_COLLECTOR(int_sk_rcvtimeo)
564{
565 const struct sock *sk = skb_to_full_sk(skb);
566
567 if (!sk) {
568 *err = -1;
569 return;
570 }
571 dst->value = READ_ONCE(sk->sk_rcvtimeo) / HZ;
572}
573
574META_COLLECTOR(int_sk_sndtimeo)
575{
576 const struct sock *sk = skb_to_full_sk(skb);
577
578 if (!sk) {
579 *err = -1;
580 return;
581 }
582 dst->value = READ_ONCE(sk->sk_sndtimeo) / HZ;
583}
584
585META_COLLECTOR(int_sk_sendmsg_off)
586{
587 const struct sock *sk = skb_to_full_sk(skb);
588
589 if (!sk) {
590 *err = -1;
591 return;
592 }
593 dst->value = sk->sk_frag.offset;
594}
595
596META_COLLECTOR(int_sk_write_pend)
597{
598 const struct sock *sk = skb_to_full_sk(skb);
599
600 if (!sk) {
601 *err = -1;
602 return;
603 }
604 dst->value = sk->sk_write_pending;
605}
606
607/**************************************************************************
608 * Meta value collectors assignment table
609 **************************************************************************/
610
611struct meta_ops {
612 void (*get)(struct sk_buff *, struct tcf_pkt_info *,
613 struct meta_value *, struct meta_obj *, int *);
614};
615
616#define META_ID(name) TCF_META_ID_##name
617#define META_FUNC(name) { .get = meta_##name }
618
619/* Meta value operations table listing all meta value collectors and
620 * assigns them to a type and meta id. */
621static struct meta_ops __meta_ops[TCF_META_TYPE_MAX + 1][TCF_META_ID_MAX + 1] = {
622 [TCF_META_TYPE_VAR] = {
623 [META_ID(DEV)] = META_FUNC(var_dev),
624 [META_ID(SK_BOUND_IF)] = META_FUNC(var_sk_bound_if),
625 },
626 [TCF_META_TYPE_INT] = {
627 [META_ID(RANDOM)] = META_FUNC(int_random),
628 [META_ID(LOADAVG_0)] = META_FUNC(int_loadavg_0),
629 [META_ID(LOADAVG_1)] = META_FUNC(int_loadavg_1),
630 [META_ID(LOADAVG_2)] = META_FUNC(int_loadavg_2),
631 [META_ID(DEV)] = META_FUNC(int_dev),
632 [META_ID(PRIORITY)] = META_FUNC(int_priority),
633 [META_ID(PROTOCOL)] = META_FUNC(int_protocol),
634 [META_ID(PKTTYPE)] = META_FUNC(int_pkttype),
635 [META_ID(PKTLEN)] = META_FUNC(int_pktlen),
636 [META_ID(DATALEN)] = META_FUNC(int_datalen),
637 [META_ID(MACLEN)] = META_FUNC(int_maclen),
638 [META_ID(NFMARK)] = META_FUNC(int_mark),
639 [META_ID(TCINDEX)] = META_FUNC(int_tcindex),
640 [META_ID(RTCLASSID)] = META_FUNC(int_rtclassid),
641 [META_ID(RTIIF)] = META_FUNC(int_rtiif),
642 [META_ID(SK_FAMILY)] = META_FUNC(int_sk_family),
643 [META_ID(SK_STATE)] = META_FUNC(int_sk_state),
644 [META_ID(SK_REUSE)] = META_FUNC(int_sk_reuse),
645 [META_ID(SK_BOUND_IF)] = META_FUNC(int_sk_bound_if),
646 [META_ID(SK_REFCNT)] = META_FUNC(int_sk_refcnt),
647 [META_ID(SK_RCVBUF)] = META_FUNC(int_sk_rcvbuf),
648 [META_ID(SK_SNDBUF)] = META_FUNC(int_sk_sndbuf),
649 [META_ID(SK_SHUTDOWN)] = META_FUNC(int_sk_shutdown),
650 [META_ID(SK_PROTO)] = META_FUNC(int_sk_proto),
651 [META_ID(SK_TYPE)] = META_FUNC(int_sk_type),
652 [META_ID(SK_RMEM_ALLOC)] = META_FUNC(int_sk_rmem_alloc),
653 [META_ID(SK_WMEM_ALLOC)] = META_FUNC(int_sk_wmem_alloc),
654 [META_ID(SK_OMEM_ALLOC)] = META_FUNC(int_sk_omem_alloc),
655 [META_ID(SK_WMEM_QUEUED)] = META_FUNC(int_sk_wmem_queued),
656 [META_ID(SK_RCV_QLEN)] = META_FUNC(int_sk_rcv_qlen),
657 [META_ID(SK_SND_QLEN)] = META_FUNC(int_sk_snd_qlen),
658 [META_ID(SK_ERR_QLEN)] = META_FUNC(int_sk_err_qlen),
659 [META_ID(SK_FORWARD_ALLOCS)] = META_FUNC(int_sk_fwd_alloc),
660 [META_ID(SK_ALLOCS)] = META_FUNC(int_sk_alloc),
661 [META_ID(SK_HASH)] = META_FUNC(int_sk_hash),
662 [META_ID(SK_LINGERTIME)] = META_FUNC(int_sk_lingertime),
663 [META_ID(SK_ACK_BACKLOG)] = META_FUNC(int_sk_ack_bl),
664 [META_ID(SK_MAX_ACK_BACKLOG)] = META_FUNC(int_sk_max_ack_bl),
665 [META_ID(SK_PRIO)] = META_FUNC(int_sk_prio),
666 [META_ID(SK_RCVLOWAT)] = META_FUNC(int_sk_rcvlowat),
667 [META_ID(SK_RCVTIMEO)] = META_FUNC(int_sk_rcvtimeo),
668 [META_ID(SK_SNDTIMEO)] = META_FUNC(int_sk_sndtimeo),
669 [META_ID(SK_SENDMSG_OFF)] = META_FUNC(int_sk_sendmsg_off),
670 [META_ID(SK_WRITE_PENDING)] = META_FUNC(int_sk_write_pend),
671 [META_ID(VLAN_TAG)] = META_FUNC(int_vlan_tag),
672 [META_ID(RXHASH)] = META_FUNC(int_rxhash),
673 }
674};
675
676static inline struct meta_ops *meta_ops(struct meta_value *val)
677{
678 return &__meta_ops[meta_type(val)][meta_id(val)];
679}
680
681/**************************************************************************
682 * Type specific operations for TCF_META_TYPE_VAR
683 **************************************************************************/
684
685static int meta_var_compare(struct meta_obj *a, struct meta_obj *b)
686{
687 int r = a->len - b->len;
688
689 if (r == 0)
690 r = memcmp((void *) a->value, (void *) b->value, a->len);
691
692 return r;
693}
694
695static int meta_var_change(struct meta_value *dst, struct nlattr *nla)
696{
697 int len = nla_len(nla);
698
699 dst->val = (unsigned long)kmemdup(nla_data(nla), len, GFP_KERNEL);
700 if (dst->val == 0UL)
701 return -ENOMEM;
702 dst->len = len;
703 return 0;
704}
705
706static void meta_var_destroy(struct meta_value *v)
707{
708 kfree((void *) v->val);
709}
710
711static void meta_var_apply_extras(struct meta_value *v,
712 struct meta_obj *dst)
713{
714 int shift = v->hdr.shift;
715
716 if (shift && shift < dst->len)
717 dst->len -= shift;
718}
719
720static int meta_var_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
721{
722 if (v->val && v->len &&
723 nla_put(skb, tlv, v->len, (void *) v->val))
724 goto nla_put_failure;
725 return 0;
726
727nla_put_failure:
728 return -1;
729}
730
731/**************************************************************************
732 * Type specific operations for TCF_META_TYPE_INT
733 **************************************************************************/
734
735static int meta_int_compare(struct meta_obj *a, struct meta_obj *b)
736{
737 /* Let gcc optimize it, the unlikely is not really based on
738 * some numbers but jump free code for mismatches seems
739 * more logical. */
740 if (unlikely(a->value == b->value))
741 return 0;
742 else if (a->value < b->value)
743 return -1;
744 else
745 return 1;
746}
747
748static int meta_int_change(struct meta_value *dst, struct nlattr *nla)
749{
750 if (nla_len(nla) >= sizeof(unsigned long)) {
751 dst->val = *(unsigned long *) nla_data(nla);
752 dst->len = sizeof(unsigned long);
753 } else if (nla_len(nla) == sizeof(u32)) {
754 dst->val = nla_get_u32(nla);
755 dst->len = sizeof(u32);
756 } else
757 return -EINVAL;
758
759 return 0;
760}
761
762static void meta_int_apply_extras(struct meta_value *v,
763 struct meta_obj *dst)
764{
765 if (v->hdr.shift)
766 dst->value >>= v->hdr.shift;
767
768 if (v->val)
769 dst->value &= v->val;
770}
771
772static int meta_int_dump(struct sk_buff *skb, struct meta_value *v, int tlv)
773{
774 if (v->len == sizeof(unsigned long)) {
775 if (nla_put(skb, tlv, sizeof(unsigned long), &v->val))
776 goto nla_put_failure;
777 } else if (v->len == sizeof(u32)) {
778 if (nla_put_u32(skb, tlv, v->val))
779 goto nla_put_failure;
780 }
781
782 return 0;
783
784nla_put_failure:
785 return -1;
786}
787
788/**************************************************************************
789 * Type specific operations table
790 **************************************************************************/
791
792struct meta_type_ops {
793 void (*destroy)(struct meta_value *);
794 int (*compare)(struct meta_obj *, struct meta_obj *);
795 int (*change)(struct meta_value *, struct nlattr *);
796 void (*apply_extras)(struct meta_value *, struct meta_obj *);
797 int (*dump)(struct sk_buff *, struct meta_value *, int);
798};
799
800static const struct meta_type_ops __meta_type_ops[TCF_META_TYPE_MAX + 1] = {
801 [TCF_META_TYPE_VAR] = {
802 .destroy = meta_var_destroy,
803 .compare = meta_var_compare,
804 .change = meta_var_change,
805 .apply_extras = meta_var_apply_extras,
806 .dump = meta_var_dump
807 },
808 [TCF_META_TYPE_INT] = {
809 .compare = meta_int_compare,
810 .change = meta_int_change,
811 .apply_extras = meta_int_apply_extras,
812 .dump = meta_int_dump
813 }
814};
815
816static inline const struct meta_type_ops *meta_type_ops(struct meta_value *v)
817{
818 return &__meta_type_ops[meta_type(v)];
819}
820
821/**************************************************************************
822 * Core
823 **************************************************************************/
824
825static int meta_get(struct sk_buff *skb, struct tcf_pkt_info *info,
826 struct meta_value *v, struct meta_obj *dst)
827{
828 int err = 0;
829
830 if (meta_id(v) == TCF_META_ID_VALUE) {
831 dst->value = v->val;
832 dst->len = v->len;
833 return 0;
834 }
835
836 meta_ops(v)->get(skb, info, v, dst, &err);
837 if (err < 0)
838 return err;
839
840 if (meta_type_ops(v)->apply_extras)
841 meta_type_ops(v)->apply_extras(v, dst);
842
843 return 0;
844}
845
846static int em_meta_match(struct sk_buff *skb, struct tcf_ematch *m,
847 struct tcf_pkt_info *info)
848{
849 int r;
850 struct meta_match *meta = (struct meta_match *) m->data;
851 struct meta_obj l_value, r_value;
852
853 if (meta_get(skb, info, &meta->lvalue, &l_value) < 0 ||
854 meta_get(skb, info, &meta->rvalue, &r_value) < 0)
855 return 0;
856
857 r = meta_type_ops(&meta->lvalue)->compare(&l_value, &r_value);
858
859 switch (meta->lvalue.hdr.op) {
860 case TCF_EM_OPND_EQ:
861 return !r;
862 case TCF_EM_OPND_LT:
863 return r < 0;
864 case TCF_EM_OPND_GT:
865 return r > 0;
866 }
867
868 return 0;
869}
870
871static void meta_delete(struct meta_match *meta)
872{
873 if (meta) {
874 const struct meta_type_ops *ops = meta_type_ops(&meta->lvalue);
875
876 if (ops && ops->destroy) {
877 ops->destroy(&meta->lvalue);
878 ops->destroy(&meta->rvalue);
879 }
880 }
881
882 kfree(meta);
883}
884
885static inline int meta_change_data(struct meta_value *dst, struct nlattr *nla)
886{
887 if (nla) {
888 if (nla_len(nla) == 0)
889 return -EINVAL;
890
891 return meta_type_ops(dst)->change(dst, nla);
892 }
893
894 return 0;
895}
896
897static inline int meta_is_supported(struct meta_value *val)
898{
899 return !meta_id(val) || meta_ops(val)->get;
900}
901
902static const struct nla_policy meta_policy[TCA_EM_META_MAX + 1] = {
903 [TCA_EM_META_HDR] = { .len = sizeof(struct tcf_meta_hdr) },
904};
905
906static int em_meta_change(struct net *net, void *data, int len,
907 struct tcf_ematch *m)
908{
909 int err;
910 struct nlattr *tb[TCA_EM_META_MAX + 1];
911 struct tcf_meta_hdr *hdr;
912 struct meta_match *meta = NULL;
913
914 err = nla_parse_deprecated(tb, TCA_EM_META_MAX, data, len,
915 meta_policy, NULL);
916 if (err < 0)
917 goto errout;
918
919 err = -EINVAL;
920 if (tb[TCA_EM_META_HDR] == NULL)
921 goto errout;
922 hdr = nla_data(tb[TCA_EM_META_HDR]);
923
924 if (TCF_META_TYPE(hdr->left.kind) != TCF_META_TYPE(hdr->right.kind) ||
925 TCF_META_TYPE(hdr->left.kind) > TCF_META_TYPE_MAX ||
926 TCF_META_ID(hdr->left.kind) > TCF_META_ID_MAX ||
927 TCF_META_ID(hdr->right.kind) > TCF_META_ID_MAX)
928 goto errout;
929
930 meta = kzalloc(sizeof(*meta), GFP_KERNEL);
931 if (meta == NULL) {
932 err = -ENOMEM;
933 goto errout;
934 }
935
936 memcpy(&meta->lvalue.hdr, &hdr->left, sizeof(hdr->left));
937 memcpy(&meta->rvalue.hdr, &hdr->right, sizeof(hdr->right));
938
939 if (!meta_is_supported(&meta->lvalue) ||
940 !meta_is_supported(&meta->rvalue)) {
941 err = -EOPNOTSUPP;
942 goto errout;
943 }
944
945 if (meta_change_data(&meta->lvalue, tb[TCA_EM_META_LVALUE]) < 0 ||
946 meta_change_data(&meta->rvalue, tb[TCA_EM_META_RVALUE]) < 0)
947 goto errout;
948
949 m->datalen = sizeof(*meta);
950 m->data = (unsigned long) meta;
951
952 err = 0;
953errout:
954 if (err && meta)
955 meta_delete(meta);
956 return err;
957}
958
959static void em_meta_destroy(struct tcf_ematch *m)
960{
961 if (m)
962 meta_delete((struct meta_match *) m->data);
963}
964
965static int em_meta_dump(struct sk_buff *skb, struct tcf_ematch *em)
966{
967 struct meta_match *meta = (struct meta_match *) em->data;
968 struct tcf_meta_hdr hdr;
969 const struct meta_type_ops *ops;
970
971 memset(&hdr, 0, sizeof(hdr));
972 memcpy(&hdr.left, &meta->lvalue.hdr, sizeof(hdr.left));
973 memcpy(&hdr.right, &meta->rvalue.hdr, sizeof(hdr.right));
974
975 if (nla_put(skb, TCA_EM_META_HDR, sizeof(hdr), &hdr))
976 goto nla_put_failure;
977
978 ops = meta_type_ops(&meta->lvalue);
979 if (ops->dump(skb, &meta->lvalue, TCA_EM_META_LVALUE) < 0 ||
980 ops->dump(skb, &meta->rvalue, TCA_EM_META_RVALUE) < 0)
981 goto nla_put_failure;
982
983 return 0;
984
985nla_put_failure:
986 return -1;
987}
988
989static struct tcf_ematch_ops em_meta_ops = {
990 .kind = TCF_EM_META,
991 .change = em_meta_change,
992 .match = em_meta_match,
993 .destroy = em_meta_destroy,
994 .dump = em_meta_dump,
995 .owner = THIS_MODULE,
996 .link = LIST_HEAD_INIT(em_meta_ops.link)
997};
998
999static int __init init_em_meta(void)
1000{
1001 return tcf_em_register(&em_meta_ops);
1002}
1003
1004static void __exit exit_em_meta(void)
1005{
1006 tcf_em_unregister(&em_meta_ops);
1007}
1008
1009MODULE_DESCRIPTION("ematch classifier for various internal kernel metadata, skb metadata and sk metadata");
1010MODULE_LICENSE("GPL");
1011
1012module_init(init_em_meta);
1013module_exit(exit_em_meta);
1014
1015MODULE_ALIAS_TCF_EMATCH(TCF_EM_META);