Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4#define _GNU_SOURCE
5#include <linux/compiler.h>
6#include <linux/ring_buffer.h>
7#include <linux/build_bug.h>
8#include <pthread.h>
9#include <stdio.h>
10#include <stdlib.h>
11#include <sys/mman.h>
12#include <sys/syscall.h>
13#include <sys/sysinfo.h>
14#include <test_progs.h>
15#include <uapi/linux/bpf.h>
16#include <unistd.h>
17
18#include "user_ringbuf_fail.skel.h"
19#include "user_ringbuf_success.skel.h"
20
21#include "../progs/test_user_ringbuf.h"
22
23static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
24static const long c_ringbuf_size = 1 << 12; /* 1 small page */
25static const long c_max_entries = c_ringbuf_size / c_sample_size;
26
27static void drain_current_samples(void)
28{
29 syscall(__NR_getpgid);
30}
31
32static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples)
33{
34 int i, err = 0;
35
36 /* Write some number of samples to the ring buffer. */
37 for (i = 0; i < num_samples; i++) {
38 struct sample *entry;
39 int read;
40
41 entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry));
42 if (!entry) {
43 err = -errno;
44 goto done;
45 }
46
47 entry->pid = getpid();
48 entry->seq = i;
49 entry->value = i * i;
50
51 read = snprintf(entry->comm, sizeof(entry->comm), "%u", i);
52 if (read <= 0) {
53 /* Assert on the error path to avoid spamming logs with
54 * mostly success messages.
55 */
56 ASSERT_GT(read, 0, "snprintf_comm");
57 err = read;
58 user_ring_buffer__discard(ringbuf, entry);
59 goto done;
60 }
61
62 user_ring_buffer__submit(ringbuf, entry);
63 }
64
65done:
66 drain_current_samples();
67
68 return err;
69}
70
71static struct user_ringbuf_success *open_load_ringbuf_skel(void)
72{
73 struct user_ringbuf_success *skel;
74 int err;
75
76 skel = user_ringbuf_success__open();
77 if (!ASSERT_OK_PTR(skel, "skel_open"))
78 return NULL;
79
80 err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size);
81 if (!ASSERT_OK(err, "set_max_entries"))
82 goto cleanup;
83
84 err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size);
85 if (!ASSERT_OK(err, "set_max_entries"))
86 goto cleanup;
87
88 err = user_ringbuf_success__load(skel);
89 if (!ASSERT_OK(err, "skel_load"))
90 goto cleanup;
91
92 return skel;
93
94cleanup:
95 user_ringbuf_success__destroy(skel);
96 return NULL;
97}
98
99static void test_user_ringbuf_mappings(void)
100{
101 int err, rb_fd;
102 int page_size = getpagesize();
103 void *mmap_ptr;
104 struct user_ringbuf_success *skel;
105
106 skel = open_load_ringbuf_skel();
107 if (!skel)
108 return;
109
110 rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
111 /* cons_pos can be mapped R/O, can't add +X with mprotect. */
112 mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
113 ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos");
114 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect");
115 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
116 ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos");
117 err = -errno;
118 ASSERT_ERR(err, "wr_prod_pos_err");
119 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons");
120
121 /* prod_pos can be mapped RW, can't add +X with mprotect. */
122 mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
123 rb_fd, page_size);
124 ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos");
125 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect");
126 err = -errno;
127 ASSERT_ERR(err, "wr_prod_pos_err");
128 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod");
129
130 /* data pages can be mapped RW, can't add +X with mprotect. */
131 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd,
132 2 * page_size);
133 ASSERT_OK_PTR(mmap_ptr, "rw_data");
134 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect");
135 err = -errno;
136 ASSERT_ERR(err, "exec_data_err");
137 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data");
138
139 user_ringbuf_success__destroy(skel);
140}
141
142static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out,
143 struct ring_buffer **kern_ringbuf_out,
144 ring_buffer_sample_fn callback,
145 struct user_ring_buffer **user_ringbuf_out)
146{
147 struct user_ringbuf_success *skel;
148 struct ring_buffer *kern_ringbuf = NULL;
149 struct user_ring_buffer *user_ringbuf = NULL;
150 int err = -ENOMEM, rb_fd;
151
152 skel = open_load_ringbuf_skel();
153 if (!skel)
154 return err;
155
156 /* only trigger BPF program for current process */
157 skel->bss->pid = getpid();
158
159 if (kern_ringbuf_out) {
160 rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf);
161 kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL);
162 if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create"))
163 goto cleanup;
164
165 *kern_ringbuf_out = kern_ringbuf;
166 }
167
168 if (user_ringbuf_out) {
169 rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
170 user_ringbuf = user_ring_buffer__new(rb_fd, NULL);
171 if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create"))
172 goto cleanup;
173
174 *user_ringbuf_out = user_ringbuf;
175 ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load");
176 }
177
178 err = user_ringbuf_success__attach(skel);
179 if (!ASSERT_OK(err, "skel_attach"))
180 goto cleanup;
181
182 *skel_out = skel;
183 return 0;
184
185cleanup:
186 if (kern_ringbuf_out)
187 *kern_ringbuf_out = NULL;
188 if (user_ringbuf_out)
189 *user_ringbuf_out = NULL;
190 ring_buffer__free(kern_ringbuf);
191 user_ring_buffer__free(user_ringbuf);
192 user_ringbuf_success__destroy(skel);
193 return err;
194}
195
196static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out,
197 struct user_ring_buffer **ringbuf_out)
198{
199 return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out);
200}
201
202static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel,
203 __u32 size, __u64 producer_pos, int err)
204{
205 void *data_ptr;
206 __u64 *producer_pos_ptr;
207 int rb_fd, page_size = getpagesize();
208
209 rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
210
211 ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample");
212
213 /* Map the producer_pos as RW. */
214 producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
215 MAP_SHARED, rb_fd, page_size);
216 ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr");
217
218 /* Map the data pages as RW. */
219 data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
220 ASSERT_OK_PTR(data_ptr, "rw_data");
221
222 memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ);
223 *(__u32 *)data_ptr = size;
224
225 /* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */
226 smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ);
227
228 drain_current_samples();
229 ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample");
230 ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample");
231
232 ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos");
233 ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr");
234}
235
236static void test_user_ringbuf_post_misaligned(void)
237{
238 struct user_ringbuf_success *skel;
239 struct user_ring_buffer *ringbuf;
240 int err;
241 __u32 size = (1 << 5) + 7;
242
243 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
244 if (!ASSERT_OK(err, "misaligned_skel"))
245 return;
246
247 manually_write_test_invalid_sample(skel, size, size, -EINVAL);
248 user_ring_buffer__free(ringbuf);
249 user_ringbuf_success__destroy(skel);
250}
251
252static void test_user_ringbuf_post_producer_wrong_offset(void)
253{
254 struct user_ringbuf_success *skel;
255 struct user_ring_buffer *ringbuf;
256 int err;
257 __u32 size = (1 << 5);
258
259 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
260 if (!ASSERT_OK(err, "wrong_offset_skel"))
261 return;
262
263 manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL);
264 user_ring_buffer__free(ringbuf);
265 user_ringbuf_success__destroy(skel);
266}
267
268static void test_user_ringbuf_post_larger_than_ringbuf_sz(void)
269{
270 struct user_ringbuf_success *skel;
271 struct user_ring_buffer *ringbuf;
272 int err;
273 __u32 size = c_ringbuf_size;
274
275 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
276 if (!ASSERT_OK(err, "huge_sample_skel"))
277 return;
278
279 manually_write_test_invalid_sample(skel, size, size, -E2BIG);
280 user_ring_buffer__free(ringbuf);
281 user_ringbuf_success__destroy(skel);
282}
283
284static void test_user_ringbuf_basic(void)
285{
286 struct user_ringbuf_success *skel;
287 struct user_ring_buffer *ringbuf;
288 int err;
289
290 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
291 if (!ASSERT_OK(err, "ringbuf_basic_skel"))
292 return;
293
294 ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
295
296 err = write_samples(ringbuf, 2);
297 if (!ASSERT_OK(err, "write_samples"))
298 goto cleanup;
299
300 ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after");
301
302cleanup:
303 user_ring_buffer__free(ringbuf);
304 user_ringbuf_success__destroy(skel);
305}
306
307static void test_user_ringbuf_sample_full_ring_buffer(void)
308{
309 struct user_ringbuf_success *skel;
310 struct user_ring_buffer *ringbuf;
311 int err;
312 void *sample;
313
314 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
315 if (!ASSERT_OK(err, "ringbuf_full_sample_skel"))
316 return;
317
318 sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ);
319 if (!ASSERT_OK_PTR(sample, "full_sample"))
320 goto cleanup;
321
322 user_ring_buffer__submit(ringbuf, sample);
323 ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
324 drain_current_samples();
325 ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
326
327cleanup:
328 user_ring_buffer__free(ringbuf);
329 user_ringbuf_success__destroy(skel);
330}
331
332static void test_user_ringbuf_post_alignment_autoadjust(void)
333{
334 struct user_ringbuf_success *skel;
335 struct user_ring_buffer *ringbuf;
336 struct sample *sample;
337 int err;
338
339 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
340 if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel"))
341 return;
342
343 /* libbpf should automatically round any sample up to an 8-byte alignment. */
344 sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1);
345 ASSERT_OK_PTR(sample, "reserve_autoaligned");
346 user_ring_buffer__submit(ringbuf, sample);
347
348 ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
349 drain_current_samples();
350 ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
351
352 user_ring_buffer__free(ringbuf);
353 user_ringbuf_success__destroy(skel);
354}
355
356static void test_user_ringbuf_overfill(void)
357{
358 struct user_ringbuf_success *skel;
359 struct user_ring_buffer *ringbuf;
360 int err;
361
362 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
363 if (err)
364 return;
365
366 err = write_samples(ringbuf, c_max_entries * 5);
367 ASSERT_ERR(err, "write_samples");
368 ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries");
369
370 user_ring_buffer__free(ringbuf);
371 user_ringbuf_success__destroy(skel);
372}
373
374static void test_user_ringbuf_discards_properly_ignored(void)
375{
376 struct user_ringbuf_success *skel;
377 struct user_ring_buffer *ringbuf;
378 int err, num_discarded = 0;
379 __u64 *token;
380
381 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
382 if (err)
383 return;
384
385 ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
386
387 while (1) {
388 /* Write samples until the buffer is full. */
389 token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
390 if (!token)
391 break;
392
393 user_ring_buffer__discard(ringbuf, token);
394 num_discarded++;
395 }
396
397 if (!ASSERT_GE(num_discarded, 0, "num_discarded"))
398 goto cleanup;
399
400 /* Should not read any samples, as they are all discarded. */
401 ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
402 drain_current_samples();
403 ASSERT_EQ(skel->bss->read, 0, "num_post_kick");
404
405 /* Now that the ring buffer has been drained, we should be able to
406 * reserve another token.
407 */
408 token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
409
410 if (!ASSERT_OK_PTR(token, "new_token"))
411 goto cleanup;
412
413 user_ring_buffer__discard(ringbuf, token);
414cleanup:
415 user_ring_buffer__free(ringbuf);
416 user_ringbuf_success__destroy(skel);
417}
418
419static void test_user_ringbuf_loop(void)
420{
421 struct user_ringbuf_success *skel;
422 struct user_ring_buffer *ringbuf;
423 uint32_t total_samples = 8192;
424 uint32_t remaining_samples = total_samples;
425 int err;
426
427 BUILD_BUG_ON(total_samples <= c_max_entries);
428 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
429 if (err)
430 return;
431
432 do {
433 uint32_t curr_samples;
434
435 curr_samples = remaining_samples > c_max_entries
436 ? c_max_entries : remaining_samples;
437 err = write_samples(ringbuf, curr_samples);
438 if (err != 0) {
439 /* Assert inside of if statement to avoid flooding logs
440 * on the success path.
441 */
442 ASSERT_OK(err, "write_samples");
443 goto cleanup;
444 }
445
446 remaining_samples -= curr_samples;
447 ASSERT_EQ(skel->bss->read, total_samples - remaining_samples,
448 "current_batched_entries");
449 } while (remaining_samples > 0);
450 ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries");
451
452cleanup:
453 user_ring_buffer__free(ringbuf);
454 user_ringbuf_success__destroy(skel);
455}
456
457static int send_test_message(struct user_ring_buffer *ringbuf,
458 enum test_msg_op op, s64 operand_64,
459 s32 operand_32)
460{
461 struct test_msg *msg;
462
463 msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg));
464 if (!msg) {
465 /* Assert on the error path to avoid spamming logs with mostly
466 * success messages.
467 */
468 ASSERT_OK_PTR(msg, "reserve_msg");
469 return -ENOMEM;
470 }
471
472 msg->msg_op = op;
473
474 switch (op) {
475 case TEST_MSG_OP_INC64:
476 case TEST_MSG_OP_MUL64:
477 msg->operand_64 = operand_64;
478 break;
479 case TEST_MSG_OP_INC32:
480 case TEST_MSG_OP_MUL32:
481 msg->operand_32 = operand_32;
482 break;
483 default:
484 PRINT_FAIL("Invalid operand %d\n", op);
485 user_ring_buffer__discard(ringbuf, msg);
486 return -EINVAL;
487 }
488
489 user_ring_buffer__submit(ringbuf, msg);
490
491 return 0;
492}
493
494static void kick_kernel_read_messages(void)
495{
496 syscall(__NR_prctl);
497}
498
499static int handle_kernel_msg(void *ctx, void *data, size_t len)
500{
501 struct user_ringbuf_success *skel = ctx;
502 struct test_msg *msg = data;
503
504 switch (msg->msg_op) {
505 case TEST_MSG_OP_INC64:
506 skel->bss->user_mutated += msg->operand_64;
507 return 0;
508 case TEST_MSG_OP_INC32:
509 skel->bss->user_mutated += msg->operand_32;
510 return 0;
511 case TEST_MSG_OP_MUL64:
512 skel->bss->user_mutated *= msg->operand_64;
513 return 0;
514 case TEST_MSG_OP_MUL32:
515 skel->bss->user_mutated *= msg->operand_32;
516 return 0;
517 default:
518 fprintf(stderr, "Invalid operand %d\n", msg->msg_op);
519 return -EINVAL;
520 }
521}
522
523static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf,
524 struct user_ringbuf_success *skel)
525{
526 int cnt;
527
528 cnt = ring_buffer__consume(kern_ringbuf);
529 ASSERT_EQ(cnt, 8, "consume_kern_ringbuf");
530 ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err");
531}
532
533static void test_user_ringbuf_msg_protocol(void)
534{
535 struct user_ringbuf_success *skel;
536 struct user_ring_buffer *user_ringbuf;
537 struct ring_buffer *kern_ringbuf;
538 int err, i;
539 __u64 expected_kern = 0;
540
541 err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf);
542 if (!ASSERT_OK(err, "create_ringbufs"))
543 return;
544
545 for (i = 0; i < 64; i++) {
546 enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS;
547 __u64 operand_64 = TEST_OP_64;
548 __u32 operand_32 = TEST_OP_32;
549
550 err = send_test_message(user_ringbuf, op, operand_64, operand_32);
551 if (err) {
552 /* Only assert on a failure to avoid spamming success logs. */
553 ASSERT_OK(err, "send_test_message");
554 goto cleanup;
555 }
556
557 switch (op) {
558 case TEST_MSG_OP_INC64:
559 expected_kern += operand_64;
560 break;
561 case TEST_MSG_OP_INC32:
562 expected_kern += operand_32;
563 break;
564 case TEST_MSG_OP_MUL64:
565 expected_kern *= operand_64;
566 break;
567 case TEST_MSG_OP_MUL32:
568 expected_kern *= operand_32;
569 break;
570 default:
571 PRINT_FAIL("Unexpected op %d\n", op);
572 goto cleanup;
573 }
574
575 if (i % 8 == 0) {
576 kick_kernel_read_messages();
577 ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern");
578 ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err");
579 drain_kernel_messages_buffer(kern_ringbuf, skel);
580 }
581 }
582
583cleanup:
584 ring_buffer__free(kern_ringbuf);
585 user_ring_buffer__free(user_ringbuf);
586 user_ringbuf_success__destroy(skel);
587}
588
589static void *kick_kernel_cb(void *arg)
590{
591 /* Kick the kernel, causing it to drain the ring buffer and then wake
592 * up the test thread waiting on epoll.
593 */
594 syscall(__NR_prlimit64);
595
596 return NULL;
597}
598
599static int spawn_kick_thread_for_poll(void)
600{
601 pthread_t thread;
602
603 return pthread_create(&thread, NULL, kick_kernel_cb, NULL);
604}
605
606static void test_user_ringbuf_blocking_reserve(void)
607{
608 struct user_ringbuf_success *skel;
609 struct user_ring_buffer *ringbuf;
610 int err, num_written = 0;
611 __u64 *token;
612
613 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
614 if (err)
615 return;
616
617 ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
618
619 while (1) {
620 /* Write samples until the buffer is full. */
621 token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
622 if (!token)
623 break;
624
625 *token = 0xdeadbeef;
626
627 user_ring_buffer__submit(ringbuf, token);
628 num_written++;
629 }
630
631 if (!ASSERT_GE(num_written, 0, "num_written"))
632 goto cleanup;
633
634 /* Should not have read any samples until the kernel is kicked. */
635 ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
636
637 /* We correctly time out after 1 second, without a sample. */
638 token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000);
639 if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token"))
640 goto cleanup;
641
642 err = spawn_kick_thread_for_poll();
643 if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n"))
644 goto cleanup;
645
646 /* After spawning another thread that asynchronously kicks the kernel to
647 * drain the messages, we're able to block and successfully get a
648 * sample once we receive an event notification.
649 */
650 token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000);
651
652 if (!ASSERT_OK_PTR(token, "block_token"))
653 goto cleanup;
654
655 ASSERT_GT(skel->bss->read, 0, "num_post_kill");
656 ASSERT_LE(skel->bss->read, num_written, "num_post_kill");
657 ASSERT_EQ(skel->bss->err, 0, "err_post_poll");
658 user_ring_buffer__discard(ringbuf, token);
659
660cleanup:
661 user_ring_buffer__free(ringbuf);
662 user_ringbuf_success__destroy(skel);
663}
664
665#define SUCCESS_TEST(_func) { _func, #_func }
666
667static struct {
668 void (*test_callback)(void);
669 const char *test_name;
670} success_tests[] = {
671 SUCCESS_TEST(test_user_ringbuf_mappings),
672 SUCCESS_TEST(test_user_ringbuf_post_misaligned),
673 SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset),
674 SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz),
675 SUCCESS_TEST(test_user_ringbuf_basic),
676 SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer),
677 SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust),
678 SUCCESS_TEST(test_user_ringbuf_overfill),
679 SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored),
680 SUCCESS_TEST(test_user_ringbuf_loop),
681 SUCCESS_TEST(test_user_ringbuf_msg_protocol),
682 SUCCESS_TEST(test_user_ringbuf_blocking_reserve),
683};
684
685void test_user_ringbuf(void)
686{
687 int i;
688
689 for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
690 if (!test__start_subtest(success_tests[i].test_name))
691 continue;
692
693 success_tests[i].test_callback();
694 }
695
696 RUN_TESTS(user_ringbuf_fail);
697}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4#define _GNU_SOURCE
5#include <linux/compiler.h>
6#include <linux/ring_buffer.h>
7#include <pthread.h>
8#include <stdio.h>
9#include <stdlib.h>
10#include <sys/mman.h>
11#include <sys/syscall.h>
12#include <sys/sysinfo.h>
13#include <test_progs.h>
14#include <uapi/linux/bpf.h>
15#include <unistd.h>
16
17#include "user_ringbuf_fail.skel.h"
18#include "user_ringbuf_success.skel.h"
19
20#include "../progs/test_user_ringbuf.h"
21
22static size_t log_buf_sz = 1 << 20; /* 1 MB */
23static char obj_log_buf[1048576];
24static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
25static const long c_ringbuf_size = 1 << 12; /* 1 small page */
26static const long c_max_entries = c_ringbuf_size / c_sample_size;
27
28static void drain_current_samples(void)
29{
30 syscall(__NR_getpgid);
31}
32
33static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples)
34{
35 int i, err = 0;
36
37 /* Write some number of samples to the ring buffer. */
38 for (i = 0; i < num_samples; i++) {
39 struct sample *entry;
40 int read;
41
42 entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry));
43 if (!entry) {
44 err = -errno;
45 goto done;
46 }
47
48 entry->pid = getpid();
49 entry->seq = i;
50 entry->value = i * i;
51
52 read = snprintf(entry->comm, sizeof(entry->comm), "%u", i);
53 if (read <= 0) {
54 /* Assert on the error path to avoid spamming logs with
55 * mostly success messages.
56 */
57 ASSERT_GT(read, 0, "snprintf_comm");
58 err = read;
59 user_ring_buffer__discard(ringbuf, entry);
60 goto done;
61 }
62
63 user_ring_buffer__submit(ringbuf, entry);
64 }
65
66done:
67 drain_current_samples();
68
69 return err;
70}
71
72static struct user_ringbuf_success *open_load_ringbuf_skel(void)
73{
74 struct user_ringbuf_success *skel;
75 int err;
76
77 skel = user_ringbuf_success__open();
78 if (!ASSERT_OK_PTR(skel, "skel_open"))
79 return NULL;
80
81 err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size);
82 if (!ASSERT_OK(err, "set_max_entries"))
83 goto cleanup;
84
85 err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size);
86 if (!ASSERT_OK(err, "set_max_entries"))
87 goto cleanup;
88
89 err = user_ringbuf_success__load(skel);
90 if (!ASSERT_OK(err, "skel_load"))
91 goto cleanup;
92
93 return skel;
94
95cleanup:
96 user_ringbuf_success__destroy(skel);
97 return NULL;
98}
99
100static void test_user_ringbuf_mappings(void)
101{
102 int err, rb_fd;
103 int page_size = getpagesize();
104 void *mmap_ptr;
105 struct user_ringbuf_success *skel;
106
107 skel = open_load_ringbuf_skel();
108 if (!skel)
109 return;
110
111 rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
112 /* cons_pos can be mapped R/O, can't add +X with mprotect. */
113 mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
114 ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos");
115 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect");
116 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
117 ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos");
118 err = -errno;
119 ASSERT_ERR(err, "wr_prod_pos_err");
120 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons");
121
122 /* prod_pos can be mapped RW, can't add +X with mprotect. */
123 mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
124 rb_fd, page_size);
125 ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos");
126 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect");
127 err = -errno;
128 ASSERT_ERR(err, "wr_prod_pos_err");
129 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod");
130
131 /* data pages can be mapped RW, can't add +X with mprotect. */
132 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd,
133 2 * page_size);
134 ASSERT_OK_PTR(mmap_ptr, "rw_data");
135 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect");
136 err = -errno;
137 ASSERT_ERR(err, "exec_data_err");
138 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data");
139
140 user_ringbuf_success__destroy(skel);
141}
142
143static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out,
144 struct ring_buffer **kern_ringbuf_out,
145 ring_buffer_sample_fn callback,
146 struct user_ring_buffer **user_ringbuf_out)
147{
148 struct user_ringbuf_success *skel;
149 struct ring_buffer *kern_ringbuf = NULL;
150 struct user_ring_buffer *user_ringbuf = NULL;
151 int err = -ENOMEM, rb_fd;
152
153 skel = open_load_ringbuf_skel();
154 if (!skel)
155 return err;
156
157 /* only trigger BPF program for current process */
158 skel->bss->pid = getpid();
159
160 if (kern_ringbuf_out) {
161 rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf);
162 kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL);
163 if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create"))
164 goto cleanup;
165
166 *kern_ringbuf_out = kern_ringbuf;
167 }
168
169 if (user_ringbuf_out) {
170 rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
171 user_ringbuf = user_ring_buffer__new(rb_fd, NULL);
172 if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create"))
173 goto cleanup;
174
175 *user_ringbuf_out = user_ringbuf;
176 ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load");
177 }
178
179 err = user_ringbuf_success__attach(skel);
180 if (!ASSERT_OK(err, "skel_attach"))
181 goto cleanup;
182
183 *skel_out = skel;
184 return 0;
185
186cleanup:
187 if (kern_ringbuf_out)
188 *kern_ringbuf_out = NULL;
189 if (user_ringbuf_out)
190 *user_ringbuf_out = NULL;
191 ring_buffer__free(kern_ringbuf);
192 user_ring_buffer__free(user_ringbuf);
193 user_ringbuf_success__destroy(skel);
194 return err;
195}
196
197static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out,
198 struct user_ring_buffer **ringbuf_out)
199{
200 return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out);
201}
202
203static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel,
204 __u32 size, __u64 producer_pos, int err)
205{
206 void *data_ptr;
207 __u64 *producer_pos_ptr;
208 int rb_fd, page_size = getpagesize();
209
210 rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
211
212 ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample");
213
214 /* Map the producer_pos as RW. */
215 producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
216 MAP_SHARED, rb_fd, page_size);
217 ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr");
218
219 /* Map the data pages as RW. */
220 data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
221 ASSERT_OK_PTR(data_ptr, "rw_data");
222
223 memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ);
224 *(__u32 *)data_ptr = size;
225
226 /* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */
227 smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ);
228
229 drain_current_samples();
230 ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample");
231 ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample");
232
233 ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos");
234 ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr");
235}
236
237static void test_user_ringbuf_post_misaligned(void)
238{
239 struct user_ringbuf_success *skel;
240 struct user_ring_buffer *ringbuf;
241 int err;
242 __u32 size = (1 << 5) + 7;
243
244 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
245 if (!ASSERT_OK(err, "misaligned_skel"))
246 return;
247
248 manually_write_test_invalid_sample(skel, size, size, -EINVAL);
249 user_ring_buffer__free(ringbuf);
250 user_ringbuf_success__destroy(skel);
251}
252
253static void test_user_ringbuf_post_producer_wrong_offset(void)
254{
255 struct user_ringbuf_success *skel;
256 struct user_ring_buffer *ringbuf;
257 int err;
258 __u32 size = (1 << 5);
259
260 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
261 if (!ASSERT_OK(err, "wrong_offset_skel"))
262 return;
263
264 manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL);
265 user_ring_buffer__free(ringbuf);
266 user_ringbuf_success__destroy(skel);
267}
268
269static void test_user_ringbuf_post_larger_than_ringbuf_sz(void)
270{
271 struct user_ringbuf_success *skel;
272 struct user_ring_buffer *ringbuf;
273 int err;
274 __u32 size = c_ringbuf_size;
275
276 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
277 if (!ASSERT_OK(err, "huge_sample_skel"))
278 return;
279
280 manually_write_test_invalid_sample(skel, size, size, -E2BIG);
281 user_ring_buffer__free(ringbuf);
282 user_ringbuf_success__destroy(skel);
283}
284
285static void test_user_ringbuf_basic(void)
286{
287 struct user_ringbuf_success *skel;
288 struct user_ring_buffer *ringbuf;
289 int err;
290
291 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
292 if (!ASSERT_OK(err, "ringbuf_basic_skel"))
293 return;
294
295 ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
296
297 err = write_samples(ringbuf, 2);
298 if (!ASSERT_OK(err, "write_samples"))
299 goto cleanup;
300
301 ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after");
302
303cleanup:
304 user_ring_buffer__free(ringbuf);
305 user_ringbuf_success__destroy(skel);
306}
307
308static void test_user_ringbuf_sample_full_ring_buffer(void)
309{
310 struct user_ringbuf_success *skel;
311 struct user_ring_buffer *ringbuf;
312 int err;
313 void *sample;
314
315 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
316 if (!ASSERT_OK(err, "ringbuf_full_sample_skel"))
317 return;
318
319 sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ);
320 if (!ASSERT_OK_PTR(sample, "full_sample"))
321 goto cleanup;
322
323 user_ring_buffer__submit(ringbuf, sample);
324 ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
325 drain_current_samples();
326 ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
327
328cleanup:
329 user_ring_buffer__free(ringbuf);
330 user_ringbuf_success__destroy(skel);
331}
332
333static void test_user_ringbuf_post_alignment_autoadjust(void)
334{
335 struct user_ringbuf_success *skel;
336 struct user_ring_buffer *ringbuf;
337 struct sample *sample;
338 int err;
339
340 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
341 if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel"))
342 return;
343
344 /* libbpf should automatically round any sample up to an 8-byte alignment. */
345 sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1);
346 ASSERT_OK_PTR(sample, "reserve_autoaligned");
347 user_ring_buffer__submit(ringbuf, sample);
348
349 ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
350 drain_current_samples();
351 ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
352
353 user_ring_buffer__free(ringbuf);
354 user_ringbuf_success__destroy(skel);
355}
356
357static void test_user_ringbuf_overfill(void)
358{
359 struct user_ringbuf_success *skel;
360 struct user_ring_buffer *ringbuf;
361 int err;
362
363 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
364 if (err)
365 return;
366
367 err = write_samples(ringbuf, c_max_entries * 5);
368 ASSERT_ERR(err, "write_samples");
369 ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries");
370
371 user_ring_buffer__free(ringbuf);
372 user_ringbuf_success__destroy(skel);
373}
374
375static void test_user_ringbuf_discards_properly_ignored(void)
376{
377 struct user_ringbuf_success *skel;
378 struct user_ring_buffer *ringbuf;
379 int err, num_discarded = 0;
380 __u64 *token;
381
382 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
383 if (err)
384 return;
385
386 ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
387
388 while (1) {
389 /* Write samples until the buffer is full. */
390 token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
391 if (!token)
392 break;
393
394 user_ring_buffer__discard(ringbuf, token);
395 num_discarded++;
396 }
397
398 if (!ASSERT_GE(num_discarded, 0, "num_discarded"))
399 goto cleanup;
400
401 /* Should not read any samples, as they are all discarded. */
402 ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
403 drain_current_samples();
404 ASSERT_EQ(skel->bss->read, 0, "num_post_kick");
405
406 /* Now that the ring buffer has been drained, we should be able to
407 * reserve another token.
408 */
409 token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
410
411 if (!ASSERT_OK_PTR(token, "new_token"))
412 goto cleanup;
413
414 user_ring_buffer__discard(ringbuf, token);
415cleanup:
416 user_ring_buffer__free(ringbuf);
417 user_ringbuf_success__destroy(skel);
418}
419
420static void test_user_ringbuf_loop(void)
421{
422 struct user_ringbuf_success *skel;
423 struct user_ring_buffer *ringbuf;
424 uint32_t total_samples = 8192;
425 uint32_t remaining_samples = total_samples;
426 int err;
427
428 BUILD_BUG_ON(total_samples <= c_max_entries);
429 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
430 if (err)
431 return;
432
433 do {
434 uint32_t curr_samples;
435
436 curr_samples = remaining_samples > c_max_entries
437 ? c_max_entries : remaining_samples;
438 err = write_samples(ringbuf, curr_samples);
439 if (err != 0) {
440 /* Assert inside of if statement to avoid flooding logs
441 * on the success path.
442 */
443 ASSERT_OK(err, "write_samples");
444 goto cleanup;
445 }
446
447 remaining_samples -= curr_samples;
448 ASSERT_EQ(skel->bss->read, total_samples - remaining_samples,
449 "current_batched_entries");
450 } while (remaining_samples > 0);
451 ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries");
452
453cleanup:
454 user_ring_buffer__free(ringbuf);
455 user_ringbuf_success__destroy(skel);
456}
457
458static int send_test_message(struct user_ring_buffer *ringbuf,
459 enum test_msg_op op, s64 operand_64,
460 s32 operand_32)
461{
462 struct test_msg *msg;
463
464 msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg));
465 if (!msg) {
466 /* Assert on the error path to avoid spamming logs with mostly
467 * success messages.
468 */
469 ASSERT_OK_PTR(msg, "reserve_msg");
470 return -ENOMEM;
471 }
472
473 msg->msg_op = op;
474
475 switch (op) {
476 case TEST_MSG_OP_INC64:
477 case TEST_MSG_OP_MUL64:
478 msg->operand_64 = operand_64;
479 break;
480 case TEST_MSG_OP_INC32:
481 case TEST_MSG_OP_MUL32:
482 msg->operand_32 = operand_32;
483 break;
484 default:
485 PRINT_FAIL("Invalid operand %d\n", op);
486 user_ring_buffer__discard(ringbuf, msg);
487 return -EINVAL;
488 }
489
490 user_ring_buffer__submit(ringbuf, msg);
491
492 return 0;
493}
494
495static void kick_kernel_read_messages(void)
496{
497 syscall(__NR_prctl);
498}
499
500static int handle_kernel_msg(void *ctx, void *data, size_t len)
501{
502 struct user_ringbuf_success *skel = ctx;
503 struct test_msg *msg = data;
504
505 switch (msg->msg_op) {
506 case TEST_MSG_OP_INC64:
507 skel->bss->user_mutated += msg->operand_64;
508 return 0;
509 case TEST_MSG_OP_INC32:
510 skel->bss->user_mutated += msg->operand_32;
511 return 0;
512 case TEST_MSG_OP_MUL64:
513 skel->bss->user_mutated *= msg->operand_64;
514 return 0;
515 case TEST_MSG_OP_MUL32:
516 skel->bss->user_mutated *= msg->operand_32;
517 return 0;
518 default:
519 fprintf(stderr, "Invalid operand %d\n", msg->msg_op);
520 return -EINVAL;
521 }
522}
523
524static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf,
525 struct user_ringbuf_success *skel)
526{
527 int cnt;
528
529 cnt = ring_buffer__consume(kern_ringbuf);
530 ASSERT_EQ(cnt, 8, "consume_kern_ringbuf");
531 ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err");
532}
533
534static void test_user_ringbuf_msg_protocol(void)
535{
536 struct user_ringbuf_success *skel;
537 struct user_ring_buffer *user_ringbuf;
538 struct ring_buffer *kern_ringbuf;
539 int err, i;
540 __u64 expected_kern = 0;
541
542 err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf);
543 if (!ASSERT_OK(err, "create_ringbufs"))
544 return;
545
546 for (i = 0; i < 64; i++) {
547 enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS;
548 __u64 operand_64 = TEST_OP_64;
549 __u32 operand_32 = TEST_OP_32;
550
551 err = send_test_message(user_ringbuf, op, operand_64, operand_32);
552 if (err) {
553 /* Only assert on a failure to avoid spamming success logs. */
554 ASSERT_OK(err, "send_test_message");
555 goto cleanup;
556 }
557
558 switch (op) {
559 case TEST_MSG_OP_INC64:
560 expected_kern += operand_64;
561 break;
562 case TEST_MSG_OP_INC32:
563 expected_kern += operand_32;
564 break;
565 case TEST_MSG_OP_MUL64:
566 expected_kern *= operand_64;
567 break;
568 case TEST_MSG_OP_MUL32:
569 expected_kern *= operand_32;
570 break;
571 default:
572 PRINT_FAIL("Unexpected op %d\n", op);
573 goto cleanup;
574 }
575
576 if (i % 8 == 0) {
577 kick_kernel_read_messages();
578 ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern");
579 ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err");
580 drain_kernel_messages_buffer(kern_ringbuf, skel);
581 }
582 }
583
584cleanup:
585 ring_buffer__free(kern_ringbuf);
586 user_ring_buffer__free(user_ringbuf);
587 user_ringbuf_success__destroy(skel);
588}
589
590static void *kick_kernel_cb(void *arg)
591{
592 /* Kick the kernel, causing it to drain the ring buffer and then wake
593 * up the test thread waiting on epoll.
594 */
595 syscall(__NR_getrlimit);
596
597 return NULL;
598}
599
600static int spawn_kick_thread_for_poll(void)
601{
602 pthread_t thread;
603
604 return pthread_create(&thread, NULL, kick_kernel_cb, NULL);
605}
606
607static void test_user_ringbuf_blocking_reserve(void)
608{
609 struct user_ringbuf_success *skel;
610 struct user_ring_buffer *ringbuf;
611 int err, num_written = 0;
612 __u64 *token;
613
614 err = load_skel_create_user_ringbuf(&skel, &ringbuf);
615 if (err)
616 return;
617
618 ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
619
620 while (1) {
621 /* Write samples until the buffer is full. */
622 token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
623 if (!token)
624 break;
625
626 *token = 0xdeadbeef;
627
628 user_ring_buffer__submit(ringbuf, token);
629 num_written++;
630 }
631
632 if (!ASSERT_GE(num_written, 0, "num_written"))
633 goto cleanup;
634
635 /* Should not have read any samples until the kernel is kicked. */
636 ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
637
638 /* We correctly time out after 1 second, without a sample. */
639 token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000);
640 if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token"))
641 goto cleanup;
642
643 err = spawn_kick_thread_for_poll();
644 if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n"))
645 goto cleanup;
646
647 /* After spawning another thread that asychronously kicks the kernel to
648 * drain the messages, we're able to block and successfully get a
649 * sample once we receive an event notification.
650 */
651 token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000);
652
653 if (!ASSERT_OK_PTR(token, "block_token"))
654 goto cleanup;
655
656 ASSERT_GT(skel->bss->read, 0, "num_post_kill");
657 ASSERT_LE(skel->bss->read, num_written, "num_post_kill");
658 ASSERT_EQ(skel->bss->err, 0, "err_post_poll");
659 user_ring_buffer__discard(ringbuf, token);
660
661cleanup:
662 user_ring_buffer__free(ringbuf);
663 user_ringbuf_success__destroy(skel);
664}
665
666static struct {
667 const char *prog_name;
668 const char *expected_err_msg;
669} failure_tests[] = {
670 /* failure cases */
671 {"user_ringbuf_callback_bad_access1", "negative offset dynptr_ptr ptr"},
672 {"user_ringbuf_callback_bad_access2", "dereference of modified dynptr_ptr ptr"},
673 {"user_ringbuf_callback_write_forbidden", "invalid mem access 'dynptr_ptr'"},
674 {"user_ringbuf_callback_null_context_write", "invalid mem access 'scalar'"},
675 {"user_ringbuf_callback_null_context_read", "invalid mem access 'scalar'"},
676 {"user_ringbuf_callback_discard_dynptr", "cannot release unowned const bpf_dynptr"},
677 {"user_ringbuf_callback_submit_dynptr", "cannot release unowned const bpf_dynptr"},
678 {"user_ringbuf_callback_invalid_return", "At callback return the register R0 has value"},
679 {"user_ringbuf_callback_reinit_dynptr_mem", "Dynptr has to be an uninitialized dynptr"},
680 {"user_ringbuf_callback_reinit_dynptr_ringbuf", "Dynptr has to be an uninitialized dynptr"},
681};
682
683#define SUCCESS_TEST(_func) { _func, #_func }
684
685static struct {
686 void (*test_callback)(void);
687 const char *test_name;
688} success_tests[] = {
689 SUCCESS_TEST(test_user_ringbuf_mappings),
690 SUCCESS_TEST(test_user_ringbuf_post_misaligned),
691 SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset),
692 SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz),
693 SUCCESS_TEST(test_user_ringbuf_basic),
694 SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer),
695 SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust),
696 SUCCESS_TEST(test_user_ringbuf_overfill),
697 SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored),
698 SUCCESS_TEST(test_user_ringbuf_loop),
699 SUCCESS_TEST(test_user_ringbuf_msg_protocol),
700 SUCCESS_TEST(test_user_ringbuf_blocking_reserve),
701};
702
703static void verify_fail(const char *prog_name, const char *expected_err_msg)
704{
705 LIBBPF_OPTS(bpf_object_open_opts, opts);
706 struct bpf_program *prog;
707 struct user_ringbuf_fail *skel;
708 int err;
709
710 opts.kernel_log_buf = obj_log_buf;
711 opts.kernel_log_size = log_buf_sz;
712 opts.kernel_log_level = 1;
713
714 skel = user_ringbuf_fail__open_opts(&opts);
715 if (!ASSERT_OK_PTR(skel, "dynptr_fail__open_opts"))
716 goto cleanup;
717
718 prog = bpf_object__find_program_by_name(skel->obj, prog_name);
719 if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
720 goto cleanup;
721
722 bpf_program__set_autoload(prog, true);
723
724 bpf_map__set_max_entries(skel->maps.user_ringbuf, getpagesize());
725
726 err = user_ringbuf_fail__load(skel);
727 if (!ASSERT_ERR(err, "unexpected load success"))
728 goto cleanup;
729
730 if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
731 fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
732 fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
733 }
734
735cleanup:
736 user_ringbuf_fail__destroy(skel);
737}
738
739void test_user_ringbuf(void)
740{
741 int i;
742
743 for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
744 if (!test__start_subtest(success_tests[i].test_name))
745 continue;
746
747 success_tests[i].test_callback();
748 }
749
750 for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
751 if (!test__start_subtest(failure_tests[i].prog_name))
752 continue;
753
754 verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg);
755 }
756}