Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2021. Huawei Technologies Co., Ltd
4 */
5#include <linux/kernel.h>
6#include <linux/bpf_verifier.h>
7#include <linux/bpf.h>
8#include <linux/btf.h>
9
10extern struct bpf_struct_ops bpf_bpf_dummy_ops;
11
12/* A common type for test_N with return value in bpf_dummy_ops */
13typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
14
15struct bpf_dummy_ops_test_args {
16 u64 args[MAX_BPF_FUNC_ARGS];
17 struct bpf_dummy_ops_state state;
18};
19
20static struct bpf_dummy_ops_test_args *
21dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
22{
23 __u32 size_in;
24 struct bpf_dummy_ops_test_args *args;
25 void __user *ctx_in;
26 void __user *u_state;
27
28 size_in = kattr->test.ctx_size_in;
29 if (size_in != sizeof(u64) * nr)
30 return ERR_PTR(-EINVAL);
31
32 args = kzalloc(sizeof(*args), GFP_KERNEL);
33 if (!args)
34 return ERR_PTR(-ENOMEM);
35
36 ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
37 if (copy_from_user(args->args, ctx_in, size_in))
38 goto out;
39
40 /* args[0] is 0 means state argument of test_N will be NULL */
41 u_state = u64_to_user_ptr(args->args[0]);
42 if (u_state && copy_from_user(&args->state, u_state,
43 sizeof(args->state)))
44 goto out;
45
46 return args;
47out:
48 kfree(args);
49 return ERR_PTR(-EFAULT);
50}
51
52static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args)
53{
54 void __user *u_state;
55
56 u_state = u64_to_user_ptr(args->args[0]);
57 if (u_state && copy_to_user(u_state, &args->state, sizeof(args->state)))
58 return -EFAULT;
59
60 return 0;
61}
62
63static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
64{
65 dummy_ops_test_ret_fn test = (void *)image;
66 struct bpf_dummy_ops_state *state = NULL;
67
68 /* state needs to be NULL if args[0] is 0 */
69 if (args->args[0])
70 state = &args->state;
71 return test(state, args->args[1], args->args[2],
72 args->args[3], args->args[4]);
73}
74
75extern const struct bpf_link_ops bpf_struct_ops_link_lops;
76
77int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
78 union bpf_attr __user *uattr)
79{
80 const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
81 const struct btf_type *func_proto;
82 struct bpf_dummy_ops_test_args *args;
83 struct bpf_tramp_links *tlinks;
84 struct bpf_tramp_link *link = NULL;
85 void *image = NULL;
86 unsigned int op_idx;
87 int prog_ret;
88 int err;
89
90 if (prog->aux->attach_btf_id != st_ops->type_id)
91 return -EOPNOTSUPP;
92
93 func_proto = prog->aux->attach_func_proto;
94 args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto));
95 if (IS_ERR(args))
96 return PTR_ERR(args);
97
98 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
99 if (!tlinks) {
100 err = -ENOMEM;
101 goto out;
102 }
103
104 image = bpf_jit_alloc_exec(PAGE_SIZE);
105 if (!image) {
106 err = -ENOMEM;
107 goto out;
108 }
109 set_vm_flush_reset_perms(image);
110
111 link = kzalloc(sizeof(*link), GFP_USER);
112 if (!link) {
113 err = -ENOMEM;
114 goto out;
115 }
116 /* prog doesn't take the ownership of the reference from caller */
117 bpf_prog_inc(prog);
118 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog);
119
120 op_idx = prog->expected_attach_type;
121 err = bpf_struct_ops_prepare_trampoline(tlinks, link,
122 &st_ops->func_models[op_idx],
123 image, image + PAGE_SIZE);
124 if (err < 0)
125 goto out;
126
127 set_memory_rox((long)image, 1);
128 prog_ret = dummy_ops_call_op(image, args);
129
130 err = dummy_ops_copy_args(args);
131 if (err)
132 goto out;
133 if (put_user(prog_ret, &uattr->test.retval))
134 err = -EFAULT;
135out:
136 kfree(args);
137 bpf_jit_free_exec(image);
138 if (link)
139 bpf_link_put(&link->link);
140 kfree(tlinks);
141 return err;
142}
143
144static int bpf_dummy_init(struct btf *btf)
145{
146 return 0;
147}
148
149static bool bpf_dummy_ops_is_valid_access(int off, int size,
150 enum bpf_access_type type,
151 const struct bpf_prog *prog,
152 struct bpf_insn_access_aux *info)
153{
154 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
155}
156
157static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
158 const struct bpf_reg_state *reg,
159 int off, int size, enum bpf_access_type atype,
160 u32 *next_btf_id,
161 enum bpf_type_flag *flag)
162{
163 const struct btf_type *state;
164 const struct btf_type *t;
165 s32 type_id;
166 int err;
167
168 type_id = btf_find_by_name_kind(reg->btf, "bpf_dummy_ops_state",
169 BTF_KIND_STRUCT);
170 if (type_id < 0)
171 return -EINVAL;
172
173 t = btf_type_by_id(reg->btf, reg->btf_id);
174 state = btf_type_by_id(reg->btf, type_id);
175 if (t != state) {
176 bpf_log(log, "only access to bpf_dummy_ops_state is supported\n");
177 return -EACCES;
178 }
179
180 err = btf_struct_access(log, reg, off, size, atype, next_btf_id, flag);
181 if (err < 0)
182 return err;
183
184 return atype == BPF_READ ? err : NOT_INIT;
185}
186
187static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
188 .is_valid_access = bpf_dummy_ops_is_valid_access,
189 .btf_struct_access = bpf_dummy_ops_btf_struct_access,
190};
191
192static int bpf_dummy_init_member(const struct btf_type *t,
193 const struct btf_member *member,
194 void *kdata, const void *udata)
195{
196 return -EOPNOTSUPP;
197}
198
199static int bpf_dummy_reg(void *kdata)
200{
201 return -EOPNOTSUPP;
202}
203
204static void bpf_dummy_unreg(void *kdata)
205{
206}
207
208struct bpf_struct_ops bpf_bpf_dummy_ops = {
209 .verifier_ops = &bpf_dummy_verifier_ops,
210 .init = bpf_dummy_init,
211 .init_member = bpf_dummy_init_member,
212 .reg = bpf_dummy_reg,
213 .unreg = bpf_dummy_unreg,
214 .name = "bpf_dummy_ops",
215};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2021. Huawei Technologies Co., Ltd
4 */
5#include <linux/kernel.h>
6#include <linux/bpf_verifier.h>
7#include <linux/bpf.h>
8#include <linux/btf.h>
9
10static struct bpf_struct_ops bpf_bpf_dummy_ops;
11
12/* A common type for test_N with return value in bpf_dummy_ops */
13typedef int (*dummy_ops_test_ret_fn)(struct bpf_dummy_ops_state *state, ...);
14
15static int dummy_ops_test_ret_function(struct bpf_dummy_ops_state *state, ...)
16{
17 return 0;
18}
19
20struct bpf_dummy_ops_test_args {
21 u64 args[MAX_BPF_FUNC_ARGS];
22 struct bpf_dummy_ops_state state;
23};
24
25static struct btf *bpf_dummy_ops_btf;
26
27static struct bpf_dummy_ops_test_args *
28dummy_ops_init_args(const union bpf_attr *kattr, unsigned int nr)
29{
30 __u32 size_in;
31 struct bpf_dummy_ops_test_args *args;
32 void __user *ctx_in;
33 void __user *u_state;
34
35 size_in = kattr->test.ctx_size_in;
36 if (size_in != sizeof(u64) * nr)
37 return ERR_PTR(-EINVAL);
38
39 args = kzalloc(sizeof(*args), GFP_KERNEL);
40 if (!args)
41 return ERR_PTR(-ENOMEM);
42
43 ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
44 if (copy_from_user(args->args, ctx_in, size_in))
45 goto out;
46
47 /* args[0] is 0 means state argument of test_N will be NULL */
48 u_state = u64_to_user_ptr(args->args[0]);
49 if (u_state && copy_from_user(&args->state, u_state,
50 sizeof(args->state)))
51 goto out;
52
53 return args;
54out:
55 kfree(args);
56 return ERR_PTR(-EFAULT);
57}
58
59static int dummy_ops_copy_args(struct bpf_dummy_ops_test_args *args)
60{
61 void __user *u_state;
62
63 u_state = u64_to_user_ptr(args->args[0]);
64 if (u_state && copy_to_user(u_state, &args->state, sizeof(args->state)))
65 return -EFAULT;
66
67 return 0;
68}
69
70static int dummy_ops_call_op(void *image, struct bpf_dummy_ops_test_args *args)
71{
72 dummy_ops_test_ret_fn test = (void *)image + cfi_get_offset();
73 struct bpf_dummy_ops_state *state = NULL;
74
75 /* state needs to be NULL if args[0] is 0 */
76 if (args->args[0])
77 state = &args->state;
78 return test(state, args->args[1], args->args[2],
79 args->args[3], args->args[4]);
80}
81
82static const struct bpf_ctx_arg_aux *find_ctx_arg_info(struct bpf_prog_aux *aux, int offset)
83{
84 int i;
85
86 for (i = 0; i < aux->ctx_arg_info_size; i++)
87 if (aux->ctx_arg_info[i].offset == offset)
88 return &aux->ctx_arg_info[i];
89
90 return NULL;
91}
92
93/* There is only one check at the moment:
94 * - zero should not be passed for pointer parameters not marked as nullable.
95 */
96static int check_test_run_args(struct bpf_prog *prog, struct bpf_dummy_ops_test_args *args)
97{
98 const struct btf_type *func_proto = prog->aux->attach_func_proto;
99
100 for (u32 arg_no = 0; arg_no < btf_type_vlen(func_proto) ; ++arg_no) {
101 const struct btf_param *param = &btf_params(func_proto)[arg_no];
102 const struct bpf_ctx_arg_aux *info;
103 const struct btf_type *t;
104 int offset;
105
106 if (args->args[arg_no] != 0)
107 continue;
108
109 /* Program is validated already, so there is no need
110 * to check if t is NULL.
111 */
112 t = btf_type_skip_modifiers(bpf_dummy_ops_btf, param->type, NULL);
113 if (!btf_type_is_ptr(t))
114 continue;
115
116 offset = btf_ctx_arg_offset(bpf_dummy_ops_btf, func_proto, arg_no);
117 info = find_ctx_arg_info(prog->aux, offset);
118 if (info && type_may_be_null(info->reg_type))
119 continue;
120
121 return -EINVAL;
122 }
123
124 return 0;
125}
126
127extern const struct bpf_link_ops bpf_struct_ops_link_lops;
128
129int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr,
130 union bpf_attr __user *uattr)
131{
132 const struct bpf_struct_ops *st_ops = &bpf_bpf_dummy_ops;
133 const struct btf_type *func_proto;
134 struct bpf_dummy_ops_test_args *args;
135 struct bpf_tramp_links *tlinks = NULL;
136 struct bpf_tramp_link *link = NULL;
137 void *image = NULL;
138 unsigned int op_idx;
139 u32 image_off = 0;
140 int prog_ret;
141 s32 type_id;
142 int err;
143
144 type_id = btf_find_by_name_kind(bpf_dummy_ops_btf,
145 bpf_bpf_dummy_ops.name,
146 BTF_KIND_STRUCT);
147 if (type_id < 0)
148 return -EINVAL;
149 if (prog->aux->attach_btf_id != type_id)
150 return -EOPNOTSUPP;
151
152 func_proto = prog->aux->attach_func_proto;
153 args = dummy_ops_init_args(kattr, btf_type_vlen(func_proto));
154 if (IS_ERR(args))
155 return PTR_ERR(args);
156
157 err = check_test_run_args(prog, args);
158 if (err)
159 goto out;
160
161 tlinks = kcalloc(BPF_TRAMP_MAX, sizeof(*tlinks), GFP_KERNEL);
162 if (!tlinks) {
163 err = -ENOMEM;
164 goto out;
165 }
166
167 link = kzalloc(sizeof(*link), GFP_USER);
168 if (!link) {
169 err = -ENOMEM;
170 goto out;
171 }
172 /* prog doesn't take the ownership of the reference from caller */
173 bpf_prog_inc(prog);
174 bpf_link_init(&link->link, BPF_LINK_TYPE_STRUCT_OPS, &bpf_struct_ops_link_lops, prog);
175
176 op_idx = prog->expected_attach_type;
177 err = bpf_struct_ops_prepare_trampoline(tlinks, link,
178 &st_ops->func_models[op_idx],
179 &dummy_ops_test_ret_function,
180 &image, &image_off,
181 true);
182 if (err < 0)
183 goto out;
184
185 err = arch_protect_bpf_trampoline(image, PAGE_SIZE);
186 if (err)
187 goto out;
188 prog_ret = dummy_ops_call_op(image, args);
189
190 err = dummy_ops_copy_args(args);
191 if (err)
192 goto out;
193 if (put_user(prog_ret, &uattr->test.retval))
194 err = -EFAULT;
195out:
196 kfree(args);
197 bpf_struct_ops_image_free(image);
198 if (link)
199 bpf_link_put(&link->link);
200 kfree(tlinks);
201 return err;
202}
203
204static int bpf_dummy_init(struct btf *btf)
205{
206 bpf_dummy_ops_btf = btf;
207 return 0;
208}
209
210static bool bpf_dummy_ops_is_valid_access(int off, int size,
211 enum bpf_access_type type,
212 const struct bpf_prog *prog,
213 struct bpf_insn_access_aux *info)
214{
215 return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
216}
217
218static int bpf_dummy_ops_check_member(const struct btf_type *t,
219 const struct btf_member *member,
220 const struct bpf_prog *prog)
221{
222 u32 moff = __btf_member_bit_offset(t, member) / 8;
223
224 switch (moff) {
225 case offsetof(struct bpf_dummy_ops, test_sleepable):
226 break;
227 default:
228 if (prog->sleepable)
229 return -EINVAL;
230 }
231
232 return 0;
233}
234
235static int bpf_dummy_ops_btf_struct_access(struct bpf_verifier_log *log,
236 const struct bpf_reg_state *reg,
237 int off, int size)
238{
239 const struct btf_type *state;
240 const struct btf_type *t;
241 s32 type_id;
242
243 type_id = btf_find_by_name_kind(reg->btf, "bpf_dummy_ops_state",
244 BTF_KIND_STRUCT);
245 if (type_id < 0)
246 return -EINVAL;
247
248 t = btf_type_by_id(reg->btf, reg->btf_id);
249 state = btf_type_by_id(reg->btf, type_id);
250 if (t != state) {
251 bpf_log(log, "only access to bpf_dummy_ops_state is supported\n");
252 return -EACCES;
253 }
254
255 if (off + size > sizeof(struct bpf_dummy_ops_state)) {
256 bpf_log(log, "write access at off %d with size %d\n", off, size);
257 return -EACCES;
258 }
259
260 return NOT_INIT;
261}
262
263static const struct bpf_verifier_ops bpf_dummy_verifier_ops = {
264 .is_valid_access = bpf_dummy_ops_is_valid_access,
265 .btf_struct_access = bpf_dummy_ops_btf_struct_access,
266};
267
268static int bpf_dummy_init_member(const struct btf_type *t,
269 const struct btf_member *member,
270 void *kdata, const void *udata)
271{
272 return -EOPNOTSUPP;
273}
274
275static int bpf_dummy_reg(void *kdata, struct bpf_link *link)
276{
277 return -EOPNOTSUPP;
278}
279
280static void bpf_dummy_unreg(void *kdata, struct bpf_link *link)
281{
282}
283
284static int bpf_dummy_ops__test_1(struct bpf_dummy_ops_state *cb__nullable)
285{
286 return 0;
287}
288
289static int bpf_dummy_test_2(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2,
290 char a3, unsigned long a4)
291{
292 return 0;
293}
294
295static int bpf_dummy_test_sleepable(struct bpf_dummy_ops_state *cb)
296{
297 return 0;
298}
299
300static struct bpf_dummy_ops __bpf_bpf_dummy_ops = {
301 .test_1 = bpf_dummy_ops__test_1,
302 .test_2 = bpf_dummy_test_2,
303 .test_sleepable = bpf_dummy_test_sleepable,
304};
305
306static struct bpf_struct_ops bpf_bpf_dummy_ops = {
307 .verifier_ops = &bpf_dummy_verifier_ops,
308 .init = bpf_dummy_init,
309 .check_member = bpf_dummy_ops_check_member,
310 .init_member = bpf_dummy_init_member,
311 .reg = bpf_dummy_reg,
312 .unreg = bpf_dummy_unreg,
313 .name = "bpf_dummy_ops",
314 .cfi_stubs = &__bpf_bpf_dummy_ops,
315 .owner = THIS_MODULE,
316};
317
318static int __init bpf_dummy_struct_ops_init(void)
319{
320 return register_bpf_struct_ops(&bpf_bpf_dummy_ops, bpf_dummy_ops);
321}
322late_initcall(bpf_dummy_struct_ops_init);