Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <unistd.h>
3#include <pthread.h>
4#include <sys/mman.h>
5#include <stdatomic.h>
6#include <test_progs.h>
7#include <sys/syscall.h>
8#include <linux/module.h>
9#include <linux/userfaultfd.h>
10
11#include "ksym_race.skel.h"
12#include "bpf_mod_race.skel.h"
13#include "kfunc_call_race.skel.h"
14#include "testing_helpers.h"
15
16/* This test crafts a race between btf_try_get_module and do_init_module, and
17 * checks whether btf_try_get_module handles the invocation for a well-formed
18 * but uninitialized module correctly. Unless the module has completed its
19 * initcalls, the verifier should fail the program load and return ENXIO.
20 *
21 * userfaultfd is used to trigger a fault in an fmod_ret program, and make it
22 * sleep, then the BPF program is loaded and the return value from verifier is
23 * inspected. After this, the userfaultfd is closed so that the module loading
24 * thread makes forward progress, and fmod_ret injects an error so that the
25 * module load fails and it is freed.
26 *
27 * If the verifier succeeded in loading the supplied program, it will end up
28 * taking reference to freed module, and trigger a crash when the program fd
29 * is closed later. This is true for both kfuncs and ksyms. In both cases,
30 * the crash is triggered inside bpf_prog_free_deferred, when module reference
31 * is finally released.
32 */
33
34struct test_config {
35 const char *str_open;
36 void *(*bpf_open_and_load)();
37 void (*bpf_destroy)(void *);
38};
39
40enum bpf_test_state {
41 _TS_INVALID,
42 TS_MODULE_LOAD,
43 TS_MODULE_LOAD_FAIL,
44};
45
46static _Atomic enum bpf_test_state state = _TS_INVALID;
47
48static void *load_module_thread(void *p)
49{
50
51 if (!ASSERT_NEQ(load_bpf_testmod(false), 0, "load_module_thread must fail"))
52 atomic_store(&state, TS_MODULE_LOAD);
53 else
54 atomic_store(&state, TS_MODULE_LOAD_FAIL);
55 return p;
56}
57
58static int sys_userfaultfd(int flags)
59{
60 return syscall(__NR_userfaultfd, flags);
61}
62
63static int test_setup_uffd(void *fault_addr)
64{
65 struct uffdio_register uffd_register = {};
66 struct uffdio_api uffd_api = {};
67 int uffd;
68
69 uffd = sys_userfaultfd(O_CLOEXEC);
70 if (uffd < 0)
71 return -errno;
72
73 uffd_api.api = UFFD_API;
74 uffd_api.features = 0;
75 if (ioctl(uffd, UFFDIO_API, &uffd_api)) {
76 close(uffd);
77 return -1;
78 }
79
80 uffd_register.range.start = (unsigned long)fault_addr;
81 uffd_register.range.len = 4096;
82 uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
83 if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) {
84 close(uffd);
85 return -1;
86 }
87 return uffd;
88}
89
90static void test_bpf_mod_race_config(const struct test_config *config)
91{
92 void *fault_addr, *skel_fail;
93 struct bpf_mod_race *skel;
94 struct uffd_msg uffd_msg;
95 pthread_t load_mod_thrd;
96 _Atomic int *blockingp;
97 int uffd, ret;
98
99 fault_addr = mmap(0, 4096, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
100 if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration"))
101 return;
102
103 if (!ASSERT_OK(unload_bpf_testmod(false), "unload bpf_testmod"))
104 goto end_mmap;
105
106 skel = bpf_mod_race__open();
107 if (!ASSERT_OK_PTR(skel, "bpf_mod_kfunc_race__open"))
108 goto end_module;
109
110 skel->rodata->bpf_mod_race_config.tgid = getpid();
111 skel->rodata->bpf_mod_race_config.inject_error = -4242;
112 skel->rodata->bpf_mod_race_config.fault_addr = fault_addr;
113 if (!ASSERT_OK(bpf_mod_race__load(skel), "bpf_mod___load"))
114 goto end_destroy;
115 blockingp = (_Atomic int *)&skel->bss->bpf_blocking;
116
117 if (!ASSERT_OK(bpf_mod_race__attach(skel), "bpf_mod_kfunc_race__attach"))
118 goto end_destroy;
119
120 uffd = test_setup_uffd(fault_addr);
121 if (!ASSERT_GE(uffd, 0, "userfaultfd open + register address"))
122 goto end_destroy;
123
124 if (!ASSERT_OK(pthread_create(&load_mod_thrd, NULL, load_module_thread, NULL),
125 "load module thread"))
126 goto end_uffd;
127
128 /* Now, we either fail loading module, or block in bpf prog, spin to find out */
129 while (!atomic_load(&state) && !atomic_load(blockingp))
130 ;
131 if (!ASSERT_EQ(state, _TS_INVALID, "module load should block"))
132 goto end_join;
133 if (!ASSERT_EQ(*blockingp, 1, "module load blocked")) {
134 pthread_kill(load_mod_thrd, SIGKILL);
135 goto end_uffd;
136 }
137
138 /* We might have set bpf_blocking to 1, but may have not blocked in
139 * bpf_copy_from_user. Read userfaultfd descriptor to verify that.
140 */
141 if (!ASSERT_EQ(read(uffd, &uffd_msg, sizeof(uffd_msg)), sizeof(uffd_msg),
142 "read uffd block event"))
143 goto end_join;
144 if (!ASSERT_EQ(uffd_msg.event, UFFD_EVENT_PAGEFAULT, "read uffd event is pagefault"))
145 goto end_join;
146
147 /* We know that load_mod_thrd is blocked in the fmod_ret program, the
148 * module state is still MODULE_STATE_COMING because mod->init hasn't
149 * returned. This is the time we try to load a program calling kfunc and
150 * check if we get ENXIO from verifier.
151 */
152 skel_fail = config->bpf_open_and_load();
153 ret = errno;
154 if (!ASSERT_EQ(skel_fail, NULL, config->str_open)) {
155 /* Close uffd to unblock load_mod_thrd */
156 close(uffd);
157 uffd = -1;
158 while (atomic_load(blockingp) != 2)
159 ;
160 ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
161 config->bpf_destroy(skel_fail);
162 goto end_join;
163
164 }
165 ASSERT_EQ(ret, ENXIO, "verifier returns ENXIO");
166 ASSERT_EQ(skel->data->res_try_get_module, false, "btf_try_get_module == false");
167
168 close(uffd);
169 uffd = -1;
170end_join:
171 pthread_join(load_mod_thrd, NULL);
172 if (uffd < 0)
173 ASSERT_EQ(atomic_load(&state), TS_MODULE_LOAD_FAIL, "load_mod_thrd success");
174end_uffd:
175 if (uffd >= 0)
176 close(uffd);
177end_destroy:
178 bpf_mod_race__destroy(skel);
179 ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
180end_module:
181 unload_bpf_testmod(false);
182 ASSERT_OK(load_bpf_testmod(false), "restore bpf_testmod");
183end_mmap:
184 munmap(fault_addr, 4096);
185 atomic_store(&state, _TS_INVALID);
186}
187
188static const struct test_config ksym_config = {
189 .str_open = "ksym_race__open_and_load",
190 .bpf_open_and_load = (void *)ksym_race__open_and_load,
191 .bpf_destroy = (void *)ksym_race__destroy,
192};
193
194static const struct test_config kfunc_config = {
195 .str_open = "kfunc_call_race__open_and_load",
196 .bpf_open_and_load = (void *)kfunc_call_race__open_and_load,
197 .bpf_destroy = (void *)kfunc_call_race__destroy,
198};
199
200void serial_test_bpf_mod_race(void)
201{
202 if (test__start_subtest("ksym (used_btfs UAF)"))
203 test_bpf_mod_race_config(&ksym_config);
204 if (test__start_subtest("kfunc (kfunc_btf_tab UAF)"))
205 test_bpf_mod_race_config(&kfunc_config);
206}
1// SPDX-License-Identifier: GPL-2.0
2#include <unistd.h>
3#include <pthread.h>
4#include <sys/mman.h>
5#include <stdatomic.h>
6#include <test_progs.h>
7#include <sys/syscall.h>
8#include <linux/module.h>
9#include <linux/userfaultfd.h>
10
11#include "ksym_race.skel.h"
12#include "bpf_mod_race.skel.h"
13#include "kfunc_call_race.skel.h"
14
15/* This test crafts a race between btf_try_get_module and do_init_module, and
16 * checks whether btf_try_get_module handles the invocation for a well-formed
17 * but uninitialized module correctly. Unless the module has completed its
18 * initcalls, the verifier should fail the program load and return ENXIO.
19 *
20 * userfaultfd is used to trigger a fault in an fmod_ret program, and make it
21 * sleep, then the BPF program is loaded and the return value from verifier is
22 * inspected. After this, the userfaultfd is closed so that the module loading
23 * thread makes forward progress, and fmod_ret injects an error so that the
24 * module load fails and it is freed.
25 *
26 * If the verifier succeeded in loading the supplied program, it will end up
27 * taking reference to freed module, and trigger a crash when the program fd
28 * is closed later. This is true for both kfuncs and ksyms. In both cases,
29 * the crash is triggered inside bpf_prog_free_deferred, when module reference
30 * is finally released.
31 */
32
33struct test_config {
34 const char *str_open;
35 void *(*bpf_open_and_load)();
36 void (*bpf_destroy)(void *);
37};
38
39enum bpf_test_state {
40 _TS_INVALID,
41 TS_MODULE_LOAD,
42 TS_MODULE_LOAD_FAIL,
43};
44
45static _Atomic enum bpf_test_state state = _TS_INVALID;
46
47static int sys_finit_module(int fd, const char *param_values, int flags)
48{
49 return syscall(__NR_finit_module, fd, param_values, flags);
50}
51
52static int sys_delete_module(const char *name, unsigned int flags)
53{
54 return syscall(__NR_delete_module, name, flags);
55}
56
57static int load_module(const char *mod)
58{
59 int ret, fd;
60
61 fd = open("bpf_testmod.ko", O_RDONLY);
62 if (fd < 0)
63 return fd;
64
65 ret = sys_finit_module(fd, "", 0);
66 close(fd);
67 if (ret < 0)
68 return ret;
69 return 0;
70}
71
72static void *load_module_thread(void *p)
73{
74
75 if (!ASSERT_NEQ(load_module("bpf_testmod.ko"), 0, "load_module_thread must fail"))
76 atomic_store(&state, TS_MODULE_LOAD);
77 else
78 atomic_store(&state, TS_MODULE_LOAD_FAIL);
79 return p;
80}
81
82static int sys_userfaultfd(int flags)
83{
84 return syscall(__NR_userfaultfd, flags);
85}
86
87static int test_setup_uffd(void *fault_addr)
88{
89 struct uffdio_register uffd_register = {};
90 struct uffdio_api uffd_api = {};
91 int uffd;
92
93 uffd = sys_userfaultfd(O_CLOEXEC);
94 if (uffd < 0)
95 return -errno;
96
97 uffd_api.api = UFFD_API;
98 uffd_api.features = 0;
99 if (ioctl(uffd, UFFDIO_API, &uffd_api)) {
100 close(uffd);
101 return -1;
102 }
103
104 uffd_register.range.start = (unsigned long)fault_addr;
105 uffd_register.range.len = 4096;
106 uffd_register.mode = UFFDIO_REGISTER_MODE_MISSING;
107 if (ioctl(uffd, UFFDIO_REGISTER, &uffd_register)) {
108 close(uffd);
109 return -1;
110 }
111 return uffd;
112}
113
114static void test_bpf_mod_race_config(const struct test_config *config)
115{
116 void *fault_addr, *skel_fail;
117 struct bpf_mod_race *skel;
118 struct uffd_msg uffd_msg;
119 pthread_t load_mod_thrd;
120 _Atomic int *blockingp;
121 int uffd, ret;
122
123 fault_addr = mmap(0, 4096, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
124 if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration"))
125 return;
126
127 if (!ASSERT_OK(sys_delete_module("bpf_testmod", 0), "unload bpf_testmod"))
128 goto end_mmap;
129
130 skel = bpf_mod_race__open();
131 if (!ASSERT_OK_PTR(skel, "bpf_mod_kfunc_race__open"))
132 goto end_module;
133
134 skel->rodata->bpf_mod_race_config.tgid = getpid();
135 skel->rodata->bpf_mod_race_config.inject_error = -4242;
136 skel->rodata->bpf_mod_race_config.fault_addr = fault_addr;
137 if (!ASSERT_OK(bpf_mod_race__load(skel), "bpf_mod___load"))
138 goto end_destroy;
139 blockingp = (_Atomic int *)&skel->bss->bpf_blocking;
140
141 if (!ASSERT_OK(bpf_mod_race__attach(skel), "bpf_mod_kfunc_race__attach"))
142 goto end_destroy;
143
144 uffd = test_setup_uffd(fault_addr);
145 if (!ASSERT_GE(uffd, 0, "userfaultfd open + register address"))
146 goto end_destroy;
147
148 if (!ASSERT_OK(pthread_create(&load_mod_thrd, NULL, load_module_thread, NULL),
149 "load module thread"))
150 goto end_uffd;
151
152 /* Now, we either fail loading module, or block in bpf prog, spin to find out */
153 while (!atomic_load(&state) && !atomic_load(blockingp))
154 ;
155 if (!ASSERT_EQ(state, _TS_INVALID, "module load should block"))
156 goto end_join;
157 if (!ASSERT_EQ(*blockingp, 1, "module load blocked")) {
158 pthread_kill(load_mod_thrd, SIGKILL);
159 goto end_uffd;
160 }
161
162 /* We might have set bpf_blocking to 1, but may have not blocked in
163 * bpf_copy_from_user. Read userfaultfd descriptor to verify that.
164 */
165 if (!ASSERT_EQ(read(uffd, &uffd_msg, sizeof(uffd_msg)), sizeof(uffd_msg),
166 "read uffd block event"))
167 goto end_join;
168 if (!ASSERT_EQ(uffd_msg.event, UFFD_EVENT_PAGEFAULT, "read uffd event is pagefault"))
169 goto end_join;
170
171 /* We know that load_mod_thrd is blocked in the fmod_ret program, the
172 * module state is still MODULE_STATE_COMING because mod->init hasn't
173 * returned. This is the time we try to load a program calling kfunc and
174 * check if we get ENXIO from verifier.
175 */
176 skel_fail = config->bpf_open_and_load();
177 ret = errno;
178 if (!ASSERT_EQ(skel_fail, NULL, config->str_open)) {
179 /* Close uffd to unblock load_mod_thrd */
180 close(uffd);
181 uffd = -1;
182 while (atomic_load(blockingp) != 2)
183 ;
184 ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
185 config->bpf_destroy(skel_fail);
186 goto end_join;
187
188 }
189 ASSERT_EQ(ret, ENXIO, "verifier returns ENXIO");
190 ASSERT_EQ(skel->data->res_try_get_module, false, "btf_try_get_module == false");
191
192 close(uffd);
193 uffd = -1;
194end_join:
195 pthread_join(load_mod_thrd, NULL);
196 if (uffd < 0)
197 ASSERT_EQ(atomic_load(&state), TS_MODULE_LOAD_FAIL, "load_mod_thrd success");
198end_uffd:
199 if (uffd >= 0)
200 close(uffd);
201end_destroy:
202 bpf_mod_race__destroy(skel);
203 ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu");
204end_module:
205 sys_delete_module("bpf_testmod", 0);
206 ASSERT_OK(load_module("bpf_testmod.ko"), "restore bpf_testmod");
207end_mmap:
208 munmap(fault_addr, 4096);
209 atomic_store(&state, _TS_INVALID);
210}
211
212static const struct test_config ksym_config = {
213 .str_open = "ksym_race__open_and_load",
214 .bpf_open_and_load = (void *)ksym_race__open_and_load,
215 .bpf_destroy = (void *)ksym_race__destroy,
216};
217
218static const struct test_config kfunc_config = {
219 .str_open = "kfunc_call_race__open_and_load",
220 .bpf_open_and_load = (void *)kfunc_call_race__open_and_load,
221 .bpf_destroy = (void *)kfunc_call_race__destroy,
222};
223
224void serial_test_bpf_mod_race(void)
225{
226 if (test__start_subtest("ksym (used_btfs UAF)"))
227 test_bpf_mod_race_config(&ksym_config);
228 if (test__start_subtest("kfunc (kfunc_btf_tab UAF)"))
229 test_bpf_mod_race_config(&kfunc_config);
230}