Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4#include <vmlinux.h>
5#include <bpf/bpf_tracing.h>
6#include <bpf/bpf_helpers.h>
7#include <bpf/bpf_core_read.h>
8#include "bpf_experimental.h"
9
10struct node_data {
11 long key;
12 long data;
13 struct bpf_rb_node node;
14};
15
16struct root_nested_inner {
17 struct bpf_spin_lock glock;
18 struct bpf_rb_root root __contains(node_data, node);
19};
20
21struct root_nested {
22 struct root_nested_inner inner;
23};
24
25long less_callback_ran = -1;
26long removed_key = -1;
27long first_data[2] = {-1, -1};
28
29#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
30private(A) struct bpf_spin_lock glock;
31private(A) struct bpf_rb_root groot __contains(node_data, node);
32private(A) struct bpf_rb_root groot_array[2] __contains(node_data, node);
33private(A) struct bpf_rb_root groot_array_one[1] __contains(node_data, node);
34private(B) struct root_nested groot_nested;
35
36static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
37{
38 struct node_data *node_a;
39 struct node_data *node_b;
40
41 node_a = container_of(a, struct node_data, node);
42 node_b = container_of(b, struct node_data, node);
43 less_callback_ran = 1;
44
45 return node_a->key < node_b->key;
46}
47
48static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock)
49{
50 struct node_data *n, *m;
51
52 n = bpf_obj_new(typeof(*n));
53 if (!n)
54 return 1;
55 n->key = 5;
56
57 m = bpf_obj_new(typeof(*m));
58 if (!m) {
59 bpf_obj_drop(n);
60 return 2;
61 }
62 m->key = 1;
63
64 bpf_spin_lock(&glock);
65 bpf_rbtree_add(&groot, &n->node, less);
66 bpf_rbtree_add(&groot, &m->node, less);
67 bpf_spin_unlock(&glock);
68
69 n = bpf_obj_new(typeof(*n));
70 if (!n)
71 return 3;
72 n->key = 3;
73
74 bpf_spin_lock(&glock);
75 bpf_rbtree_add(&groot, &n->node, less);
76 bpf_spin_unlock(&glock);
77 return 0;
78}
79
80SEC("tc")
81long rbtree_add_nodes(void *ctx)
82{
83 return __add_three(&groot, &glock);
84}
85
86SEC("tc")
87long rbtree_add_nodes_nested(void *ctx)
88{
89 return __add_three(&groot_nested.inner.root, &groot_nested.inner.glock);
90}
91
92SEC("tc")
93long rbtree_add_and_remove(void *ctx)
94{
95 struct bpf_rb_node *res = NULL;
96 struct node_data *n, *m = NULL;
97
98 n = bpf_obj_new(typeof(*n));
99 if (!n)
100 goto err_out;
101 n->key = 5;
102
103 m = bpf_obj_new(typeof(*m));
104 if (!m)
105 goto err_out;
106 m->key = 3;
107
108 bpf_spin_lock(&glock);
109 bpf_rbtree_add(&groot, &n->node, less);
110 bpf_rbtree_add(&groot, &m->node, less);
111 res = bpf_rbtree_remove(&groot, &n->node);
112 bpf_spin_unlock(&glock);
113
114 if (!res)
115 return 1;
116
117 n = container_of(res, struct node_data, node);
118 removed_key = n->key;
119 bpf_obj_drop(n);
120
121 return 0;
122err_out:
123 if (n)
124 bpf_obj_drop(n);
125 if (m)
126 bpf_obj_drop(m);
127 return 1;
128}
129
130SEC("tc")
131long rbtree_add_and_remove_array(void *ctx)
132{
133 struct bpf_rb_node *res1 = NULL, *res2 = NULL, *res3 = NULL;
134 struct node_data *nodes[3][2] = {{NULL, NULL}, {NULL, NULL}, {NULL, NULL}};
135 struct node_data *n;
136 long k1 = -1, k2 = -1, k3 = -1;
137 int i, j;
138
139 for (i = 0; i < 3; i++) {
140 for (j = 0; j < 2; j++) {
141 nodes[i][j] = bpf_obj_new(typeof(*nodes[i][j]));
142 if (!nodes[i][j])
143 goto err_out;
144 nodes[i][j]->key = i * 2 + j;
145 }
146 }
147
148 bpf_spin_lock(&glock);
149 for (i = 0; i < 2; i++)
150 for (j = 0; j < 2; j++)
151 bpf_rbtree_add(&groot_array[i], &nodes[i][j]->node, less);
152 for (j = 0; j < 2; j++)
153 bpf_rbtree_add(&groot_array_one[0], &nodes[2][j]->node, less);
154 res1 = bpf_rbtree_remove(&groot_array[0], &nodes[0][0]->node);
155 res2 = bpf_rbtree_remove(&groot_array[1], &nodes[1][0]->node);
156 res3 = bpf_rbtree_remove(&groot_array_one[0], &nodes[2][0]->node);
157 bpf_spin_unlock(&glock);
158
159 if (res1) {
160 n = container_of(res1, struct node_data, node);
161 k1 = n->key;
162 bpf_obj_drop(n);
163 }
164 if (res2) {
165 n = container_of(res2, struct node_data, node);
166 k2 = n->key;
167 bpf_obj_drop(n);
168 }
169 if (res3) {
170 n = container_of(res3, struct node_data, node);
171 k3 = n->key;
172 bpf_obj_drop(n);
173 }
174 if (k1 != 0 || k2 != 2 || k3 != 4)
175 return 2;
176
177 return 0;
178
179err_out:
180 for (i = 0; i < 3; i++) {
181 for (j = 0; j < 2; j++) {
182 if (nodes[i][j])
183 bpf_obj_drop(nodes[i][j]);
184 }
185 }
186 return 1;
187}
188
189SEC("tc")
190long rbtree_first_and_remove(void *ctx)
191{
192 struct bpf_rb_node *res = NULL;
193 struct node_data *n, *m, *o;
194
195 n = bpf_obj_new(typeof(*n));
196 if (!n)
197 return 1;
198 n->key = 3;
199 n->data = 4;
200
201 m = bpf_obj_new(typeof(*m));
202 if (!m)
203 goto err_out;
204 m->key = 5;
205 m->data = 6;
206
207 o = bpf_obj_new(typeof(*o));
208 if (!o)
209 goto err_out;
210 o->key = 1;
211 o->data = 2;
212
213 bpf_spin_lock(&glock);
214 bpf_rbtree_add(&groot, &n->node, less);
215 bpf_rbtree_add(&groot, &m->node, less);
216 bpf_rbtree_add(&groot, &o->node, less);
217
218 res = bpf_rbtree_first(&groot);
219 if (!res) {
220 bpf_spin_unlock(&glock);
221 return 2;
222 }
223
224 o = container_of(res, struct node_data, node);
225 first_data[0] = o->data;
226
227 res = bpf_rbtree_remove(&groot, &o->node);
228 bpf_spin_unlock(&glock);
229
230 if (!res)
231 return 5;
232
233 o = container_of(res, struct node_data, node);
234 removed_key = o->key;
235 bpf_obj_drop(o);
236
237 bpf_spin_lock(&glock);
238 res = bpf_rbtree_first(&groot);
239 if (!res) {
240 bpf_spin_unlock(&glock);
241 return 3;
242 }
243
244 o = container_of(res, struct node_data, node);
245 first_data[1] = o->data;
246 bpf_spin_unlock(&glock);
247
248 return 0;
249err_out:
250 if (n)
251 bpf_obj_drop(n);
252 if (m)
253 bpf_obj_drop(m);
254 return 1;
255}
256
257SEC("tc")
258long rbtree_api_release_aliasing(void *ctx)
259{
260 struct node_data *n, *m, *o;
261 struct bpf_rb_node *res, *res2;
262
263 n = bpf_obj_new(typeof(*n));
264 if (!n)
265 return 1;
266 n->key = 41;
267 n->data = 42;
268
269 bpf_spin_lock(&glock);
270 bpf_rbtree_add(&groot, &n->node, less);
271 bpf_spin_unlock(&glock);
272
273 bpf_spin_lock(&glock);
274
275 /* m and o point to the same node,
276 * but verifier doesn't know this
277 */
278 res = bpf_rbtree_first(&groot);
279 if (!res)
280 goto err_out;
281 o = container_of(res, struct node_data, node);
282
283 res = bpf_rbtree_first(&groot);
284 if (!res)
285 goto err_out;
286 m = container_of(res, struct node_data, node);
287
288 res = bpf_rbtree_remove(&groot, &m->node);
289 /* Retval of previous remove returns an owning reference to m,
290 * which is the same node non-owning ref o is pointing at.
291 * We can safely try to remove o as the second rbtree_remove will
292 * return NULL since the node isn't in a tree.
293 *
294 * Previously we relied on the verifier type system + rbtree_remove
295 * invalidating non-owning refs to ensure that rbtree_remove couldn't
296 * fail, but now rbtree_remove does runtime checking so we no longer
297 * invalidate non-owning refs after remove.
298 */
299 res2 = bpf_rbtree_remove(&groot, &o->node);
300
301 bpf_spin_unlock(&glock);
302
303 if (res) {
304 o = container_of(res, struct node_data, node);
305 first_data[0] = o->data;
306 bpf_obj_drop(o);
307 }
308 if (res2) {
309 /* The second remove fails, so res2 is null and this doesn't
310 * execute
311 */
312 m = container_of(res2, struct node_data, node);
313 first_data[1] = m->data;
314 bpf_obj_drop(m);
315 }
316 return 0;
317
318err_out:
319 bpf_spin_unlock(&glock);
320 return 1;
321}
322
323char _license[] SEC("license") = "GPL";
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4#include <vmlinux.h>
5#include <bpf/bpf_tracing.h>
6#include <bpf/bpf_helpers.h>
7#include <bpf/bpf_core_read.h>
8#include "bpf_experimental.h"
9
10struct node_data {
11 long key;
12 long data;
13 struct bpf_rb_node node;
14};
15
16long less_callback_ran = -1;
17long removed_key = -1;
18long first_data[2] = {-1, -1};
19
20#define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8)))
21private(A) struct bpf_spin_lock glock;
22private(A) struct bpf_rb_root groot __contains(node_data, node);
23
24static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b)
25{
26 struct node_data *node_a;
27 struct node_data *node_b;
28
29 node_a = container_of(a, struct node_data, node);
30 node_b = container_of(b, struct node_data, node);
31 less_callback_ran = 1;
32
33 return node_a->key < node_b->key;
34}
35
36static long __add_three(struct bpf_rb_root *root, struct bpf_spin_lock *lock)
37{
38 struct node_data *n, *m;
39
40 n = bpf_obj_new(typeof(*n));
41 if (!n)
42 return 1;
43 n->key = 5;
44
45 m = bpf_obj_new(typeof(*m));
46 if (!m) {
47 bpf_obj_drop(n);
48 return 2;
49 }
50 m->key = 1;
51
52 bpf_spin_lock(&glock);
53 bpf_rbtree_add(&groot, &n->node, less);
54 bpf_rbtree_add(&groot, &m->node, less);
55 bpf_spin_unlock(&glock);
56
57 n = bpf_obj_new(typeof(*n));
58 if (!n)
59 return 3;
60 n->key = 3;
61
62 bpf_spin_lock(&glock);
63 bpf_rbtree_add(&groot, &n->node, less);
64 bpf_spin_unlock(&glock);
65 return 0;
66}
67
68SEC("tc")
69long rbtree_add_nodes(void *ctx)
70{
71 return __add_three(&groot, &glock);
72}
73
74SEC("tc")
75long rbtree_add_and_remove(void *ctx)
76{
77 struct bpf_rb_node *res = NULL;
78 struct node_data *n, *m = NULL;
79
80 n = bpf_obj_new(typeof(*n));
81 if (!n)
82 goto err_out;
83 n->key = 5;
84
85 m = bpf_obj_new(typeof(*m));
86 if (!m)
87 goto err_out;
88 m->key = 3;
89
90 bpf_spin_lock(&glock);
91 bpf_rbtree_add(&groot, &n->node, less);
92 bpf_rbtree_add(&groot, &m->node, less);
93 res = bpf_rbtree_remove(&groot, &n->node);
94 bpf_spin_unlock(&glock);
95
96 if (!res)
97 return 1;
98
99 n = container_of(res, struct node_data, node);
100 removed_key = n->key;
101 bpf_obj_drop(n);
102
103 return 0;
104err_out:
105 if (n)
106 bpf_obj_drop(n);
107 if (m)
108 bpf_obj_drop(m);
109 return 1;
110}
111
112SEC("tc")
113long rbtree_first_and_remove(void *ctx)
114{
115 struct bpf_rb_node *res = NULL;
116 struct node_data *n, *m, *o;
117
118 n = bpf_obj_new(typeof(*n));
119 if (!n)
120 return 1;
121 n->key = 3;
122 n->data = 4;
123
124 m = bpf_obj_new(typeof(*m));
125 if (!m)
126 goto err_out;
127 m->key = 5;
128 m->data = 6;
129
130 o = bpf_obj_new(typeof(*o));
131 if (!o)
132 goto err_out;
133 o->key = 1;
134 o->data = 2;
135
136 bpf_spin_lock(&glock);
137 bpf_rbtree_add(&groot, &n->node, less);
138 bpf_rbtree_add(&groot, &m->node, less);
139 bpf_rbtree_add(&groot, &o->node, less);
140
141 res = bpf_rbtree_first(&groot);
142 if (!res) {
143 bpf_spin_unlock(&glock);
144 return 2;
145 }
146
147 o = container_of(res, struct node_data, node);
148 first_data[0] = o->data;
149
150 res = bpf_rbtree_remove(&groot, &o->node);
151 bpf_spin_unlock(&glock);
152
153 if (!res)
154 return 5;
155
156 o = container_of(res, struct node_data, node);
157 removed_key = o->key;
158 bpf_obj_drop(o);
159
160 bpf_spin_lock(&glock);
161 res = bpf_rbtree_first(&groot);
162 if (!res) {
163 bpf_spin_unlock(&glock);
164 return 3;
165 }
166
167 o = container_of(res, struct node_data, node);
168 first_data[1] = o->data;
169 bpf_spin_unlock(&glock);
170
171 return 0;
172err_out:
173 if (n)
174 bpf_obj_drop(n);
175 if (m)
176 bpf_obj_drop(m);
177 return 1;
178}
179
180SEC("tc")
181long rbtree_api_release_aliasing(void *ctx)
182{
183 struct node_data *n, *m, *o;
184 struct bpf_rb_node *res, *res2;
185
186 n = bpf_obj_new(typeof(*n));
187 if (!n)
188 return 1;
189 n->key = 41;
190 n->data = 42;
191
192 bpf_spin_lock(&glock);
193 bpf_rbtree_add(&groot, &n->node, less);
194 bpf_spin_unlock(&glock);
195
196 bpf_spin_lock(&glock);
197
198 /* m and o point to the same node,
199 * but verifier doesn't know this
200 */
201 res = bpf_rbtree_first(&groot);
202 if (!res)
203 goto err_out;
204 o = container_of(res, struct node_data, node);
205
206 res = bpf_rbtree_first(&groot);
207 if (!res)
208 goto err_out;
209 m = container_of(res, struct node_data, node);
210
211 res = bpf_rbtree_remove(&groot, &m->node);
212 /* Retval of previous remove returns an owning reference to m,
213 * which is the same node non-owning ref o is pointing at.
214 * We can safely try to remove o as the second rbtree_remove will
215 * return NULL since the node isn't in a tree.
216 *
217 * Previously we relied on the verifier type system + rbtree_remove
218 * invalidating non-owning refs to ensure that rbtree_remove couldn't
219 * fail, but now rbtree_remove does runtime checking so we no longer
220 * invalidate non-owning refs after remove.
221 */
222 res2 = bpf_rbtree_remove(&groot, &o->node);
223
224 bpf_spin_unlock(&glock);
225
226 if (res) {
227 o = container_of(res, struct node_data, node);
228 first_data[0] = o->data;
229 bpf_obj_drop(o);
230 }
231 if (res2) {
232 /* The second remove fails, so res2 is null and this doesn't
233 * execute
234 */
235 m = container_of(res2, struct node_data, node);
236 first_data[1] = m->data;
237 bpf_obj_drop(m);
238 }
239 return 0;
240
241err_out:
242 bpf_spin_unlock(&glock);
243 return 1;
244}
245
246char _license[] SEC("license") = "GPL";