Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/netlink.h>
12#include <linux/netfilter.h>
13#include <linux/if_arp.h>
14#include <linux/netfilter/nf_tables.h>
15#include <net/netfilter/nf_tables_core.h>
16#include <net/netfilter/nf_tables_offload.h>
17#include <net/netfilter/nf_tables.h>
18
19struct nft_cmp_expr {
20 struct nft_data data;
21 u8 sreg;
22 u8 len;
23 enum nft_cmp_ops op:8;
24};
25
26void nft_cmp_eval(const struct nft_expr *expr,
27 struct nft_regs *regs,
28 const struct nft_pktinfo *pkt)
29{
30 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
31 int d;
32
33 d = memcmp(®s->data[priv->sreg], &priv->data, priv->len);
34 switch (priv->op) {
35 case NFT_CMP_EQ:
36 if (d != 0)
37 goto mismatch;
38 break;
39 case NFT_CMP_NEQ:
40 if (d == 0)
41 goto mismatch;
42 break;
43 case NFT_CMP_LT:
44 if (d == 0)
45 goto mismatch;
46 fallthrough;
47 case NFT_CMP_LTE:
48 if (d > 0)
49 goto mismatch;
50 break;
51 case NFT_CMP_GT:
52 if (d == 0)
53 goto mismatch;
54 fallthrough;
55 case NFT_CMP_GTE:
56 if (d < 0)
57 goto mismatch;
58 break;
59 }
60 return;
61
62mismatch:
63 regs->verdict.code = NFT_BREAK;
64}
65
66static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
67 [NFTA_CMP_SREG] = { .type = NLA_U32 },
68 [NFTA_CMP_OP] = { .type = NLA_U32 },
69 [NFTA_CMP_DATA] = { .type = NLA_NESTED },
70};
71
72static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
73 const struct nlattr * const tb[])
74{
75 struct nft_cmp_expr *priv = nft_expr_priv(expr);
76 struct nft_data_desc desc;
77 int err;
78
79 err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc,
80 tb[NFTA_CMP_DATA]);
81 if (err < 0)
82 return err;
83
84 if (desc.type != NFT_DATA_VALUE) {
85 err = -EINVAL;
86 nft_data_release(&priv->data, desc.type);
87 return err;
88 }
89
90 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
91 if (err < 0)
92 return err;
93
94 priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
95 priv->len = desc.len;
96 return 0;
97}
98
99static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
100{
101 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
102
103 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
104 goto nla_put_failure;
105 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
106 goto nla_put_failure;
107
108 if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
109 NFT_DATA_VALUE, priv->len) < 0)
110 goto nla_put_failure;
111 return 0;
112
113nla_put_failure:
114 return -1;
115}
116
117union nft_cmp_offload_data {
118 u16 val16;
119 u32 val32;
120 u64 val64;
121};
122
123static void nft_payload_n2h(union nft_cmp_offload_data *data,
124 const u8 *val, u32 len)
125{
126 switch (len) {
127 case 2:
128 data->val16 = ntohs(*((u16 *)val));
129 break;
130 case 4:
131 data->val32 = ntohl(*((u32 *)val));
132 break;
133 case 8:
134 data->val64 = be64_to_cpu(*((u64 *)val));
135 break;
136 default:
137 WARN_ON_ONCE(1);
138 break;
139 }
140}
141
142static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
143 struct nft_flow_rule *flow,
144 const struct nft_cmp_expr *priv)
145{
146 struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
147 union nft_cmp_offload_data _data, _datamask;
148 u8 *mask = (u8 *)&flow->match.mask;
149 u8 *key = (u8 *)&flow->match.key;
150 u8 *data, *datamask;
151
152 if (priv->op != NFT_CMP_EQ || priv->len > reg->len)
153 return -EOPNOTSUPP;
154
155 if (reg->flags & NFT_OFFLOAD_F_NETWORK2HOST) {
156 nft_payload_n2h(&_data, (u8 *)&priv->data, reg->len);
157 nft_payload_n2h(&_datamask, (u8 *)®->mask, reg->len);
158 data = (u8 *)&_data;
159 datamask = (u8 *)&_datamask;
160 } else {
161 data = (u8 *)&priv->data;
162 datamask = (u8 *)®->mask;
163 }
164
165 memcpy(key + reg->offset, data, reg->len);
166 memcpy(mask + reg->offset, datamask, reg->len);
167
168 flow->match.dissector.used_keys |= BIT(reg->key);
169 flow->match.dissector.offset[reg->key] = reg->base_offset;
170
171 if (reg->key == FLOW_DISSECTOR_KEY_META &&
172 reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
173 nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
174 return -EOPNOTSUPP;
175
176 nft_offload_update_dependency(ctx, &priv->data, reg->len);
177
178 return 0;
179}
180
181static int nft_cmp_offload(struct nft_offload_ctx *ctx,
182 struct nft_flow_rule *flow,
183 const struct nft_expr *expr)
184{
185 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
186
187 return __nft_cmp_offload(ctx, flow, priv);
188}
189
190static const struct nft_expr_ops nft_cmp_ops = {
191 .type = &nft_cmp_type,
192 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
193 .eval = nft_cmp_eval,
194 .init = nft_cmp_init,
195 .dump = nft_cmp_dump,
196 .offload = nft_cmp_offload,
197};
198
199static int nft_cmp_fast_init(const struct nft_ctx *ctx,
200 const struct nft_expr *expr,
201 const struct nlattr * const tb[])
202{
203 struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
204 struct nft_data_desc desc;
205 struct nft_data data;
206 int err;
207
208 err = nft_data_init(NULL, &data, sizeof(data), &desc,
209 tb[NFTA_CMP_DATA]);
210 if (err < 0)
211 return err;
212
213 err = nft_parse_register_load(tb[NFTA_CMP_SREG], &priv->sreg, desc.len);
214 if (err < 0)
215 return err;
216
217 desc.len *= BITS_PER_BYTE;
218
219 priv->mask = nft_cmp_fast_mask(desc.len);
220 priv->data = data.data[0] & priv->mask;
221 priv->len = desc.len;
222 priv->inv = ntohl(nla_get_be32(tb[NFTA_CMP_OP])) != NFT_CMP_EQ;
223 return 0;
224}
225
226static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
227 struct nft_flow_rule *flow,
228 const struct nft_expr *expr)
229{
230 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
231 struct nft_cmp_expr cmp = {
232 .data = {
233 .data = {
234 [0] = priv->data,
235 },
236 },
237 .sreg = priv->sreg,
238 .len = priv->len / BITS_PER_BYTE,
239 .op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ,
240 };
241
242 return __nft_cmp_offload(ctx, flow, &cmp);
243}
244
245static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
246{
247 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
248 enum nft_cmp_ops op = priv->inv ? NFT_CMP_NEQ : NFT_CMP_EQ;
249 struct nft_data data;
250
251 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
252 goto nla_put_failure;
253 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(op)))
254 goto nla_put_failure;
255
256 data.data[0] = priv->data;
257 if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
258 NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
259 goto nla_put_failure;
260 return 0;
261
262nla_put_failure:
263 return -1;
264}
265
266const struct nft_expr_ops nft_cmp_fast_ops = {
267 .type = &nft_cmp_type,
268 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
269 .eval = NULL, /* inlined */
270 .init = nft_cmp_fast_init,
271 .dump = nft_cmp_fast_dump,
272 .offload = nft_cmp_fast_offload,
273};
274
275static const struct nft_expr_ops *
276nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
277{
278 struct nft_data_desc desc;
279 struct nft_data data;
280 enum nft_cmp_ops op;
281 int err;
282
283 if (tb[NFTA_CMP_SREG] == NULL ||
284 tb[NFTA_CMP_OP] == NULL ||
285 tb[NFTA_CMP_DATA] == NULL)
286 return ERR_PTR(-EINVAL);
287
288 op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
289 switch (op) {
290 case NFT_CMP_EQ:
291 case NFT_CMP_NEQ:
292 case NFT_CMP_LT:
293 case NFT_CMP_LTE:
294 case NFT_CMP_GT:
295 case NFT_CMP_GTE:
296 break;
297 default:
298 return ERR_PTR(-EINVAL);
299 }
300
301 err = nft_data_init(NULL, &data, sizeof(data), &desc,
302 tb[NFTA_CMP_DATA]);
303 if (err < 0)
304 return ERR_PTR(err);
305
306 if (desc.type != NFT_DATA_VALUE)
307 goto err1;
308
309 if (desc.len <= sizeof(u32) && (op == NFT_CMP_EQ || op == NFT_CMP_NEQ))
310 return &nft_cmp_fast_ops;
311
312 return &nft_cmp_ops;
313err1:
314 nft_data_release(&data, desc.type);
315 return ERR_PTR(-EINVAL);
316}
317
318struct nft_expr_type nft_cmp_type __read_mostly = {
319 .name = "cmp",
320 .select_ops = nft_cmp_select_ops,
321 .policy = nft_cmp_policy,
322 .maxattr = NFTA_CMP_MAX,
323 .owner = THIS_MODULE,
324};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4 *
5 * Development of this code funded by Astaro AG (http://www.astaro.com/)
6 */
7
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/netlink.h>
12#include <linux/netfilter.h>
13#include <linux/if_arp.h>
14#include <linux/netfilter/nf_tables.h>
15#include <net/netfilter/nf_tables_core.h>
16#include <net/netfilter/nf_tables_offload.h>
17#include <net/netfilter/nf_tables.h>
18
19struct nft_cmp_expr {
20 struct nft_data data;
21 enum nft_registers sreg:8;
22 u8 len;
23 enum nft_cmp_ops op:8;
24};
25
26void nft_cmp_eval(const struct nft_expr *expr,
27 struct nft_regs *regs,
28 const struct nft_pktinfo *pkt)
29{
30 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
31 int d;
32
33 d = memcmp(®s->data[priv->sreg], &priv->data, priv->len);
34 switch (priv->op) {
35 case NFT_CMP_EQ:
36 if (d != 0)
37 goto mismatch;
38 break;
39 case NFT_CMP_NEQ:
40 if (d == 0)
41 goto mismatch;
42 break;
43 case NFT_CMP_LT:
44 if (d == 0)
45 goto mismatch;
46 fallthrough;
47 case NFT_CMP_LTE:
48 if (d > 0)
49 goto mismatch;
50 break;
51 case NFT_CMP_GT:
52 if (d == 0)
53 goto mismatch;
54 fallthrough;
55 case NFT_CMP_GTE:
56 if (d < 0)
57 goto mismatch;
58 break;
59 }
60 return;
61
62mismatch:
63 regs->verdict.code = NFT_BREAK;
64}
65
66static const struct nla_policy nft_cmp_policy[NFTA_CMP_MAX + 1] = {
67 [NFTA_CMP_SREG] = { .type = NLA_U32 },
68 [NFTA_CMP_OP] = { .type = NLA_U32 },
69 [NFTA_CMP_DATA] = { .type = NLA_NESTED },
70};
71
72static int nft_cmp_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
73 const struct nlattr * const tb[])
74{
75 struct nft_cmp_expr *priv = nft_expr_priv(expr);
76 struct nft_data_desc desc;
77 int err;
78
79 err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc,
80 tb[NFTA_CMP_DATA]);
81 if (err < 0)
82 return err;
83
84 if (desc.type != NFT_DATA_VALUE) {
85 err = -EINVAL;
86 nft_data_release(&priv->data, desc.type);
87 return err;
88 }
89
90 priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
91 err = nft_validate_register_load(priv->sreg, desc.len);
92 if (err < 0)
93 return err;
94
95 priv->op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
96 priv->len = desc.len;
97 return 0;
98}
99
100static int nft_cmp_dump(struct sk_buff *skb, const struct nft_expr *expr)
101{
102 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
103
104 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
105 goto nla_put_failure;
106 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(priv->op)))
107 goto nla_put_failure;
108
109 if (nft_data_dump(skb, NFTA_CMP_DATA, &priv->data,
110 NFT_DATA_VALUE, priv->len) < 0)
111 goto nla_put_failure;
112 return 0;
113
114nla_put_failure:
115 return -1;
116}
117
118static int __nft_cmp_offload(struct nft_offload_ctx *ctx,
119 struct nft_flow_rule *flow,
120 const struct nft_cmp_expr *priv)
121{
122 struct nft_offload_reg *reg = &ctx->regs[priv->sreg];
123 u8 *mask = (u8 *)&flow->match.mask;
124 u8 *key = (u8 *)&flow->match.key;
125
126 if (priv->op != NFT_CMP_EQ || reg->len != priv->len)
127 return -EOPNOTSUPP;
128
129 memcpy(key + reg->offset, &priv->data, priv->len);
130 memcpy(mask + reg->offset, ®->mask, priv->len);
131
132 flow->match.dissector.used_keys |= BIT(reg->key);
133 flow->match.dissector.offset[reg->key] = reg->base_offset;
134
135 if (reg->key == FLOW_DISSECTOR_KEY_META &&
136 reg->offset == offsetof(struct nft_flow_key, meta.ingress_iftype) &&
137 nft_reg_load16(priv->data.data) != ARPHRD_ETHER)
138 return -EOPNOTSUPP;
139
140 nft_offload_update_dependency(ctx, &priv->data, priv->len);
141
142 return 0;
143}
144
145static int nft_cmp_offload(struct nft_offload_ctx *ctx,
146 struct nft_flow_rule *flow,
147 const struct nft_expr *expr)
148{
149 const struct nft_cmp_expr *priv = nft_expr_priv(expr);
150
151 return __nft_cmp_offload(ctx, flow, priv);
152}
153
154static const struct nft_expr_ops nft_cmp_ops = {
155 .type = &nft_cmp_type,
156 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_expr)),
157 .eval = nft_cmp_eval,
158 .init = nft_cmp_init,
159 .dump = nft_cmp_dump,
160 .offload = nft_cmp_offload,
161};
162
163static int nft_cmp_fast_init(const struct nft_ctx *ctx,
164 const struct nft_expr *expr,
165 const struct nlattr * const tb[])
166{
167 struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
168 struct nft_data_desc desc;
169 struct nft_data data;
170 u32 mask;
171 int err;
172
173 err = nft_data_init(NULL, &data, sizeof(data), &desc,
174 tb[NFTA_CMP_DATA]);
175 if (err < 0)
176 return err;
177
178 priv->sreg = nft_parse_register(tb[NFTA_CMP_SREG]);
179 err = nft_validate_register_load(priv->sreg, desc.len);
180 if (err < 0)
181 return err;
182
183 desc.len *= BITS_PER_BYTE;
184 mask = nft_cmp_fast_mask(desc.len);
185
186 priv->data = data.data[0] & mask;
187 priv->len = desc.len;
188 return 0;
189}
190
191static int nft_cmp_fast_offload(struct nft_offload_ctx *ctx,
192 struct nft_flow_rule *flow,
193 const struct nft_expr *expr)
194{
195 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
196 struct nft_cmp_expr cmp = {
197 .data = {
198 .data = {
199 [0] = priv->data,
200 },
201 },
202 .sreg = priv->sreg,
203 .len = priv->len / BITS_PER_BYTE,
204 .op = NFT_CMP_EQ,
205 };
206
207 return __nft_cmp_offload(ctx, flow, &cmp);
208}
209
210static int nft_cmp_fast_dump(struct sk_buff *skb, const struct nft_expr *expr)
211{
212 const struct nft_cmp_fast_expr *priv = nft_expr_priv(expr);
213 struct nft_data data;
214
215 if (nft_dump_register(skb, NFTA_CMP_SREG, priv->sreg))
216 goto nla_put_failure;
217 if (nla_put_be32(skb, NFTA_CMP_OP, htonl(NFT_CMP_EQ)))
218 goto nla_put_failure;
219
220 data.data[0] = priv->data;
221 if (nft_data_dump(skb, NFTA_CMP_DATA, &data,
222 NFT_DATA_VALUE, priv->len / BITS_PER_BYTE) < 0)
223 goto nla_put_failure;
224 return 0;
225
226nla_put_failure:
227 return -1;
228}
229
230const struct nft_expr_ops nft_cmp_fast_ops = {
231 .type = &nft_cmp_type,
232 .size = NFT_EXPR_SIZE(sizeof(struct nft_cmp_fast_expr)),
233 .eval = NULL, /* inlined */
234 .init = nft_cmp_fast_init,
235 .dump = nft_cmp_fast_dump,
236 .offload = nft_cmp_fast_offload,
237};
238
239static const struct nft_expr_ops *
240nft_cmp_select_ops(const struct nft_ctx *ctx, const struct nlattr * const tb[])
241{
242 struct nft_data_desc desc;
243 struct nft_data data;
244 enum nft_cmp_ops op;
245 int err;
246
247 if (tb[NFTA_CMP_SREG] == NULL ||
248 tb[NFTA_CMP_OP] == NULL ||
249 tb[NFTA_CMP_DATA] == NULL)
250 return ERR_PTR(-EINVAL);
251
252 op = ntohl(nla_get_be32(tb[NFTA_CMP_OP]));
253 switch (op) {
254 case NFT_CMP_EQ:
255 case NFT_CMP_NEQ:
256 case NFT_CMP_LT:
257 case NFT_CMP_LTE:
258 case NFT_CMP_GT:
259 case NFT_CMP_GTE:
260 break;
261 default:
262 return ERR_PTR(-EINVAL);
263 }
264
265 err = nft_data_init(NULL, &data, sizeof(data), &desc,
266 tb[NFTA_CMP_DATA]);
267 if (err < 0)
268 return ERR_PTR(err);
269
270 if (desc.type != NFT_DATA_VALUE) {
271 err = -EINVAL;
272 goto err1;
273 }
274
275 if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ)
276 return &nft_cmp_fast_ops;
277
278 return &nft_cmp_ops;
279err1:
280 nft_data_release(&data, desc.type);
281 return ERR_PTR(-EINVAL);
282}
283
284struct nft_expr_type nft_cmp_type __read_mostly = {
285 .name = "cmp",
286 .select_ops = nft_cmp_select_ops,
287 .policy = nft_cmp_policy,
288 .maxattr = NFTA_CMP_MAX,
289 .owner = THIS_MODULE,
290};