Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
  3
  4#define _GNU_SOURCE
  5#include <linux/compiler.h>
  6#include <linux/ring_buffer.h>
  7#include <pthread.h>
  8#include <stdio.h>
  9#include <stdlib.h>
 10#include <sys/mman.h>
 11#include <sys/syscall.h>
 12#include <sys/sysinfo.h>
 13#include <test_progs.h>
 14#include <uapi/linux/bpf.h>
 15#include <unistd.h>
 16
 17#include "user_ringbuf_fail.skel.h"
 18#include "user_ringbuf_success.skel.h"
 19
 20#include "../progs/test_user_ringbuf.h"
 21
 
 
 22static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
 23static const long c_ringbuf_size = 1 << 12; /* 1 small page */
 24static const long c_max_entries = c_ringbuf_size / c_sample_size;
 25
 26static void drain_current_samples(void)
 27{
 28	syscall(__NR_getpgid);
 29}
 30
 31static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples)
 32{
 33	int i, err = 0;
 34
 35	/* Write some number of samples to the ring buffer. */
 36	for (i = 0; i < num_samples; i++) {
 37		struct sample *entry;
 38		int read;
 39
 40		entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry));
 41		if (!entry) {
 42			err = -errno;
 43			goto done;
 44		}
 45
 46		entry->pid = getpid();
 47		entry->seq = i;
 48		entry->value = i * i;
 49
 50		read = snprintf(entry->comm, sizeof(entry->comm), "%u", i);
 51		if (read <= 0) {
 52			/* Assert on the error path to avoid spamming logs with
 53			 * mostly success messages.
 54			 */
 55			ASSERT_GT(read, 0, "snprintf_comm");
 56			err = read;
 57			user_ring_buffer__discard(ringbuf, entry);
 58			goto done;
 59		}
 60
 61		user_ring_buffer__submit(ringbuf, entry);
 62	}
 63
 64done:
 65	drain_current_samples();
 66
 67	return err;
 68}
 69
 70static struct user_ringbuf_success *open_load_ringbuf_skel(void)
 71{
 72	struct user_ringbuf_success *skel;
 73	int err;
 74
 75	skel = user_ringbuf_success__open();
 76	if (!ASSERT_OK_PTR(skel, "skel_open"))
 77		return NULL;
 78
 79	err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size);
 80	if (!ASSERT_OK(err, "set_max_entries"))
 81		goto cleanup;
 82
 83	err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size);
 84	if (!ASSERT_OK(err, "set_max_entries"))
 85		goto cleanup;
 86
 87	err = user_ringbuf_success__load(skel);
 88	if (!ASSERT_OK(err, "skel_load"))
 89		goto cleanup;
 90
 91	return skel;
 92
 93cleanup:
 94	user_ringbuf_success__destroy(skel);
 95	return NULL;
 96}
 97
 98static void test_user_ringbuf_mappings(void)
 99{
100	int err, rb_fd;
101	int page_size = getpagesize();
102	void *mmap_ptr;
103	struct user_ringbuf_success *skel;
104
105	skel = open_load_ringbuf_skel();
106	if (!skel)
107		return;
108
109	rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
110	/* cons_pos can be mapped R/O, can't add +X with mprotect. */
111	mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
112	ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos");
113	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect");
114	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
115	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos");
116	err = -errno;
117	ASSERT_ERR(err, "wr_prod_pos_err");
118	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons");
119
120	/* prod_pos can be mapped RW, can't add +X with mprotect. */
121	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
122			rb_fd, page_size);
123	ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos");
124	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect");
125	err = -errno;
126	ASSERT_ERR(err, "wr_prod_pos_err");
127	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod");
128
129	/* data pages can be mapped RW, can't add +X with mprotect. */
130	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd,
131			2 * page_size);
132	ASSERT_OK_PTR(mmap_ptr, "rw_data");
133	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect");
134	err = -errno;
135	ASSERT_ERR(err, "exec_data_err");
136	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data");
137
138	user_ringbuf_success__destroy(skel);
139}
140
141static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out,
142				     struct ring_buffer **kern_ringbuf_out,
143				     ring_buffer_sample_fn callback,
144				     struct user_ring_buffer **user_ringbuf_out)
145{
146	struct user_ringbuf_success *skel;
147	struct ring_buffer *kern_ringbuf = NULL;
148	struct user_ring_buffer *user_ringbuf = NULL;
149	int err = -ENOMEM, rb_fd;
150
151	skel = open_load_ringbuf_skel();
152	if (!skel)
153		return err;
154
155	/* only trigger BPF program for current process */
156	skel->bss->pid = getpid();
157
158	if (kern_ringbuf_out) {
159		rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf);
160		kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL);
161		if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create"))
162			goto cleanup;
163
164		*kern_ringbuf_out = kern_ringbuf;
165	}
166
167	if (user_ringbuf_out) {
168		rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
169		user_ringbuf = user_ring_buffer__new(rb_fd, NULL);
170		if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create"))
171			goto cleanup;
172
173		*user_ringbuf_out = user_ringbuf;
174		ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load");
175	}
176
177	err = user_ringbuf_success__attach(skel);
178	if (!ASSERT_OK(err, "skel_attach"))
179		goto cleanup;
180
181	*skel_out = skel;
182	return 0;
183
184cleanup:
185	if (kern_ringbuf_out)
186		*kern_ringbuf_out = NULL;
187	if (user_ringbuf_out)
188		*user_ringbuf_out = NULL;
189	ring_buffer__free(kern_ringbuf);
190	user_ring_buffer__free(user_ringbuf);
191	user_ringbuf_success__destroy(skel);
192	return err;
193}
194
195static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out,
196					 struct user_ring_buffer **ringbuf_out)
197{
198	return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out);
199}
200
201static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel,
202					       __u32 size, __u64 producer_pos, int err)
203{
204	void *data_ptr;
205	__u64 *producer_pos_ptr;
206	int rb_fd, page_size = getpagesize();
207
208	rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
209
210	ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample");
211
212	/* Map the producer_pos as RW. */
213	producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
214				MAP_SHARED, rb_fd, page_size);
215	ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr");
216
217	/* Map the data pages as RW. */
218	data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
219	ASSERT_OK_PTR(data_ptr, "rw_data");
220
221	memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ);
222	*(__u32 *)data_ptr = size;
223
224	/* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */
225	smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ);
226
227	drain_current_samples();
228	ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample");
229	ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample");
230
231	ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos");
232	ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr");
233}
234
235static void test_user_ringbuf_post_misaligned(void)
236{
237	struct user_ringbuf_success *skel;
238	struct user_ring_buffer *ringbuf;
239	int err;
240	__u32 size = (1 << 5) + 7;
241
242	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
243	if (!ASSERT_OK(err, "misaligned_skel"))
244		return;
245
246	manually_write_test_invalid_sample(skel, size, size, -EINVAL);
247	user_ring_buffer__free(ringbuf);
248	user_ringbuf_success__destroy(skel);
249}
250
251static void test_user_ringbuf_post_producer_wrong_offset(void)
252{
253	struct user_ringbuf_success *skel;
254	struct user_ring_buffer *ringbuf;
255	int err;
256	__u32 size = (1 << 5);
257
258	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
259	if (!ASSERT_OK(err, "wrong_offset_skel"))
260		return;
261
262	manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL);
263	user_ring_buffer__free(ringbuf);
264	user_ringbuf_success__destroy(skel);
265}
266
267static void test_user_ringbuf_post_larger_than_ringbuf_sz(void)
268{
269	struct user_ringbuf_success *skel;
270	struct user_ring_buffer *ringbuf;
271	int err;
272	__u32 size = c_ringbuf_size;
273
274	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
275	if (!ASSERT_OK(err, "huge_sample_skel"))
276		return;
277
278	manually_write_test_invalid_sample(skel, size, size, -E2BIG);
279	user_ring_buffer__free(ringbuf);
280	user_ringbuf_success__destroy(skel);
281}
282
283static void test_user_ringbuf_basic(void)
284{
285	struct user_ringbuf_success *skel;
286	struct user_ring_buffer *ringbuf;
287	int err;
288
289	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
290	if (!ASSERT_OK(err, "ringbuf_basic_skel"))
291		return;
292
293	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
294
295	err = write_samples(ringbuf, 2);
296	if (!ASSERT_OK(err, "write_samples"))
297		goto cleanup;
298
299	ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after");
300
301cleanup:
302	user_ring_buffer__free(ringbuf);
303	user_ringbuf_success__destroy(skel);
304}
305
306static void test_user_ringbuf_sample_full_ring_buffer(void)
307{
308	struct user_ringbuf_success *skel;
309	struct user_ring_buffer *ringbuf;
310	int err;
311	void *sample;
312
313	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
314	if (!ASSERT_OK(err, "ringbuf_full_sample_skel"))
315		return;
316
317	sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ);
318	if (!ASSERT_OK_PTR(sample, "full_sample"))
319		goto cleanup;
320
321	user_ring_buffer__submit(ringbuf, sample);
322	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
323	drain_current_samples();
324	ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
325
326cleanup:
327	user_ring_buffer__free(ringbuf);
328	user_ringbuf_success__destroy(skel);
329}
330
331static void test_user_ringbuf_post_alignment_autoadjust(void)
332{
333	struct user_ringbuf_success *skel;
334	struct user_ring_buffer *ringbuf;
335	struct sample *sample;
336	int err;
337
338	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
339	if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel"))
340		return;
341
342	/* libbpf should automatically round any sample up to an 8-byte alignment. */
343	sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1);
344	ASSERT_OK_PTR(sample, "reserve_autoaligned");
345	user_ring_buffer__submit(ringbuf, sample);
346
347	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
348	drain_current_samples();
349	ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
350
351	user_ring_buffer__free(ringbuf);
352	user_ringbuf_success__destroy(skel);
353}
354
355static void test_user_ringbuf_overfill(void)
356{
357	struct user_ringbuf_success *skel;
358	struct user_ring_buffer *ringbuf;
359	int err;
360
361	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
362	if (err)
363		return;
364
365	err = write_samples(ringbuf, c_max_entries * 5);
366	ASSERT_ERR(err, "write_samples");
367	ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries");
368
369	user_ring_buffer__free(ringbuf);
370	user_ringbuf_success__destroy(skel);
371}
372
373static void test_user_ringbuf_discards_properly_ignored(void)
374{
375	struct user_ringbuf_success *skel;
376	struct user_ring_buffer *ringbuf;
377	int err, num_discarded = 0;
378	__u64 *token;
379
380	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
381	if (err)
382		return;
383
384	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
385
386	while (1) {
387		/* Write samples until the buffer is full. */
388		token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
389		if (!token)
390			break;
391
392		user_ring_buffer__discard(ringbuf, token);
393		num_discarded++;
394	}
395
396	if (!ASSERT_GE(num_discarded, 0, "num_discarded"))
397		goto cleanup;
398
399	/* Should not read any samples, as they are all discarded. */
400	ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
401	drain_current_samples();
402	ASSERT_EQ(skel->bss->read, 0, "num_post_kick");
403
404	/* Now that the ring buffer has been drained, we should be able to
405	 * reserve another token.
406	 */
407	token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
408
409	if (!ASSERT_OK_PTR(token, "new_token"))
410		goto cleanup;
411
412	user_ring_buffer__discard(ringbuf, token);
413cleanup:
414	user_ring_buffer__free(ringbuf);
415	user_ringbuf_success__destroy(skel);
416}
417
418static void test_user_ringbuf_loop(void)
419{
420	struct user_ringbuf_success *skel;
421	struct user_ring_buffer *ringbuf;
422	uint32_t total_samples = 8192;
423	uint32_t remaining_samples = total_samples;
424	int err;
425
426	BUILD_BUG_ON(total_samples <= c_max_entries);
427	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
428	if (err)
429		return;
430
431	do  {
432		uint32_t curr_samples;
433
434		curr_samples = remaining_samples > c_max_entries
435			? c_max_entries : remaining_samples;
436		err = write_samples(ringbuf, curr_samples);
437		if (err != 0) {
438			/* Assert inside of if statement to avoid flooding logs
439			 * on the success path.
440			 */
441			ASSERT_OK(err, "write_samples");
442			goto cleanup;
443		}
444
445		remaining_samples -= curr_samples;
446		ASSERT_EQ(skel->bss->read, total_samples - remaining_samples,
447			  "current_batched_entries");
448	} while (remaining_samples > 0);
449	ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries");
450
451cleanup:
452	user_ring_buffer__free(ringbuf);
453	user_ringbuf_success__destroy(skel);
454}
455
456static int send_test_message(struct user_ring_buffer *ringbuf,
457			     enum test_msg_op op, s64 operand_64,
458			     s32 operand_32)
459{
460	struct test_msg *msg;
461
462	msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg));
463	if (!msg) {
464		/* Assert on the error path to avoid spamming logs with mostly
465		 * success messages.
466		 */
467		ASSERT_OK_PTR(msg, "reserve_msg");
468		return -ENOMEM;
469	}
470
471	msg->msg_op = op;
472
473	switch (op) {
474	case TEST_MSG_OP_INC64:
475	case TEST_MSG_OP_MUL64:
476		msg->operand_64 = operand_64;
477		break;
478	case TEST_MSG_OP_INC32:
479	case TEST_MSG_OP_MUL32:
480		msg->operand_32 = operand_32;
481		break;
482	default:
483		PRINT_FAIL("Invalid operand %d\n", op);
484		user_ring_buffer__discard(ringbuf, msg);
485		return -EINVAL;
486	}
487
488	user_ring_buffer__submit(ringbuf, msg);
489
490	return 0;
491}
492
493static void kick_kernel_read_messages(void)
494{
495	syscall(__NR_prctl);
496}
497
498static int handle_kernel_msg(void *ctx, void *data, size_t len)
499{
500	struct user_ringbuf_success *skel = ctx;
501	struct test_msg *msg = data;
502
503	switch (msg->msg_op) {
504	case TEST_MSG_OP_INC64:
505		skel->bss->user_mutated += msg->operand_64;
506		return 0;
507	case TEST_MSG_OP_INC32:
508		skel->bss->user_mutated += msg->operand_32;
509		return 0;
510	case TEST_MSG_OP_MUL64:
511		skel->bss->user_mutated *= msg->operand_64;
512		return 0;
513	case TEST_MSG_OP_MUL32:
514		skel->bss->user_mutated *= msg->operand_32;
515		return 0;
516	default:
517		fprintf(stderr, "Invalid operand %d\n", msg->msg_op);
518		return -EINVAL;
519	}
520}
521
522static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf,
523					 struct user_ringbuf_success *skel)
524{
525	int cnt;
526
527	cnt = ring_buffer__consume(kern_ringbuf);
528	ASSERT_EQ(cnt, 8, "consume_kern_ringbuf");
529	ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err");
530}
531
532static void test_user_ringbuf_msg_protocol(void)
533{
534	struct user_ringbuf_success *skel;
535	struct user_ring_buffer *user_ringbuf;
536	struct ring_buffer *kern_ringbuf;
537	int err, i;
538	__u64 expected_kern = 0;
539
540	err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf);
541	if (!ASSERT_OK(err, "create_ringbufs"))
542		return;
543
544	for (i = 0; i < 64; i++) {
545		enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS;
546		__u64 operand_64 = TEST_OP_64;
547		__u32 operand_32 = TEST_OP_32;
548
549		err = send_test_message(user_ringbuf, op, operand_64, operand_32);
550		if (err) {
551			/* Only assert on a failure to avoid spamming success logs. */
552			ASSERT_OK(err, "send_test_message");
553			goto cleanup;
554		}
555
556		switch (op) {
557		case TEST_MSG_OP_INC64:
558			expected_kern += operand_64;
559			break;
560		case TEST_MSG_OP_INC32:
561			expected_kern += operand_32;
562			break;
563		case TEST_MSG_OP_MUL64:
564			expected_kern *= operand_64;
565			break;
566		case TEST_MSG_OP_MUL32:
567			expected_kern *= operand_32;
568			break;
569		default:
570			PRINT_FAIL("Unexpected op %d\n", op);
571			goto cleanup;
572		}
573
574		if (i % 8 == 0) {
575			kick_kernel_read_messages();
576			ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern");
577			ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err");
578			drain_kernel_messages_buffer(kern_ringbuf, skel);
579		}
580	}
581
582cleanup:
583	ring_buffer__free(kern_ringbuf);
584	user_ring_buffer__free(user_ringbuf);
585	user_ringbuf_success__destroy(skel);
586}
587
588static void *kick_kernel_cb(void *arg)
589{
590	/* Kick the kernel, causing it to drain the ring buffer and then wake
591	 * up the test thread waiting on epoll.
592	 */
593	syscall(__NR_prlimit64);
594
595	return NULL;
596}
597
598static int spawn_kick_thread_for_poll(void)
599{
600	pthread_t thread;
601
602	return pthread_create(&thread, NULL, kick_kernel_cb, NULL);
603}
604
605static void test_user_ringbuf_blocking_reserve(void)
606{
607	struct user_ringbuf_success *skel;
608	struct user_ring_buffer *ringbuf;
609	int err, num_written = 0;
610	__u64 *token;
611
612	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
613	if (err)
614		return;
615
616	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
617
618	while (1) {
619		/* Write samples until the buffer is full. */
620		token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
621		if (!token)
622			break;
623
624		*token = 0xdeadbeef;
625
626		user_ring_buffer__submit(ringbuf, token);
627		num_written++;
628	}
629
630	if (!ASSERT_GE(num_written, 0, "num_written"))
631		goto cleanup;
632
633	/* Should not have read any samples until the kernel is kicked. */
634	ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
635
636	/* We correctly time out after 1 second, without a sample. */
637	token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000);
638	if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token"))
639		goto cleanup;
640
641	err = spawn_kick_thread_for_poll();
642	if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n"))
643		goto cleanup;
644
645	/* After spawning another thread that asychronously kicks the kernel to
646	 * drain the messages, we're able to block and successfully get a
647	 * sample once we receive an event notification.
648	 */
649	token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000);
650
651	if (!ASSERT_OK_PTR(token, "block_token"))
652		goto cleanup;
653
654	ASSERT_GT(skel->bss->read, 0, "num_post_kill");
655	ASSERT_LE(skel->bss->read, num_written, "num_post_kill");
656	ASSERT_EQ(skel->bss->err, 0, "err_post_poll");
657	user_ring_buffer__discard(ringbuf, token);
658
659cleanup:
660	user_ring_buffer__free(ringbuf);
661	user_ringbuf_success__destroy(skel);
662}
663
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664#define SUCCESS_TEST(_func) { _func, #_func }
665
666static struct {
667	void (*test_callback)(void);
668	const char *test_name;
669} success_tests[] = {
670	SUCCESS_TEST(test_user_ringbuf_mappings),
671	SUCCESS_TEST(test_user_ringbuf_post_misaligned),
672	SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset),
673	SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz),
674	SUCCESS_TEST(test_user_ringbuf_basic),
675	SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer),
676	SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust),
677	SUCCESS_TEST(test_user_ringbuf_overfill),
678	SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored),
679	SUCCESS_TEST(test_user_ringbuf_loop),
680	SUCCESS_TEST(test_user_ringbuf_msg_protocol),
681	SUCCESS_TEST(test_user_ringbuf_blocking_reserve),
682};
683
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
684void test_user_ringbuf(void)
685{
686	int i;
687
688	for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
689		if (!test__start_subtest(success_tests[i].test_name))
690			continue;
691
692		success_tests[i].test_callback();
693	}
694
695	RUN_TESTS(user_ringbuf_fail);
 
 
 
 
 
696}
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
  3
  4#define _GNU_SOURCE
  5#include <linux/compiler.h>
  6#include <linux/ring_buffer.h>
  7#include <pthread.h>
  8#include <stdio.h>
  9#include <stdlib.h>
 10#include <sys/mman.h>
 11#include <sys/syscall.h>
 12#include <sys/sysinfo.h>
 13#include <test_progs.h>
 14#include <uapi/linux/bpf.h>
 15#include <unistd.h>
 16
 17#include "user_ringbuf_fail.skel.h"
 18#include "user_ringbuf_success.skel.h"
 19
 20#include "../progs/test_user_ringbuf.h"
 21
 22static size_t log_buf_sz = 1 << 20; /* 1 MB */
 23static char obj_log_buf[1048576];
 24static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
 25static const long c_ringbuf_size = 1 << 12; /* 1 small page */
 26static const long c_max_entries = c_ringbuf_size / c_sample_size;
 27
 28static void drain_current_samples(void)
 29{
 30	syscall(__NR_getpgid);
 31}
 32
 33static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples)
 34{
 35	int i, err = 0;
 36
 37	/* Write some number of samples to the ring buffer. */
 38	for (i = 0; i < num_samples; i++) {
 39		struct sample *entry;
 40		int read;
 41
 42		entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry));
 43		if (!entry) {
 44			err = -errno;
 45			goto done;
 46		}
 47
 48		entry->pid = getpid();
 49		entry->seq = i;
 50		entry->value = i * i;
 51
 52		read = snprintf(entry->comm, sizeof(entry->comm), "%u", i);
 53		if (read <= 0) {
 54			/* Assert on the error path to avoid spamming logs with
 55			 * mostly success messages.
 56			 */
 57			ASSERT_GT(read, 0, "snprintf_comm");
 58			err = read;
 59			user_ring_buffer__discard(ringbuf, entry);
 60			goto done;
 61		}
 62
 63		user_ring_buffer__submit(ringbuf, entry);
 64	}
 65
 66done:
 67	drain_current_samples();
 68
 69	return err;
 70}
 71
 72static struct user_ringbuf_success *open_load_ringbuf_skel(void)
 73{
 74	struct user_ringbuf_success *skel;
 75	int err;
 76
 77	skel = user_ringbuf_success__open();
 78	if (!ASSERT_OK_PTR(skel, "skel_open"))
 79		return NULL;
 80
 81	err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size);
 82	if (!ASSERT_OK(err, "set_max_entries"))
 83		goto cleanup;
 84
 85	err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size);
 86	if (!ASSERT_OK(err, "set_max_entries"))
 87		goto cleanup;
 88
 89	err = user_ringbuf_success__load(skel);
 90	if (!ASSERT_OK(err, "skel_load"))
 91		goto cleanup;
 92
 93	return skel;
 94
 95cleanup:
 96	user_ringbuf_success__destroy(skel);
 97	return NULL;
 98}
 99
100static void test_user_ringbuf_mappings(void)
101{
102	int err, rb_fd;
103	int page_size = getpagesize();
104	void *mmap_ptr;
105	struct user_ringbuf_success *skel;
106
107	skel = open_load_ringbuf_skel();
108	if (!skel)
109		return;
110
111	rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
112	/* cons_pos can be mapped R/O, can't add +X with mprotect. */
113	mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
114	ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos");
115	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect");
116	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
117	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos");
118	err = -errno;
119	ASSERT_ERR(err, "wr_prod_pos_err");
120	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons");
121
122	/* prod_pos can be mapped RW, can't add +X with mprotect. */
123	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
124			rb_fd, page_size);
125	ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos");
126	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect");
127	err = -errno;
128	ASSERT_ERR(err, "wr_prod_pos_err");
129	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod");
130
131	/* data pages can be mapped RW, can't add +X with mprotect. */
132	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd,
133			2 * page_size);
134	ASSERT_OK_PTR(mmap_ptr, "rw_data");
135	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect");
136	err = -errno;
137	ASSERT_ERR(err, "exec_data_err");
138	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data");
139
140	user_ringbuf_success__destroy(skel);
141}
142
143static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out,
144				     struct ring_buffer **kern_ringbuf_out,
145				     ring_buffer_sample_fn callback,
146				     struct user_ring_buffer **user_ringbuf_out)
147{
148	struct user_ringbuf_success *skel;
149	struct ring_buffer *kern_ringbuf = NULL;
150	struct user_ring_buffer *user_ringbuf = NULL;
151	int err = -ENOMEM, rb_fd;
152
153	skel = open_load_ringbuf_skel();
154	if (!skel)
155		return err;
156
157	/* only trigger BPF program for current process */
158	skel->bss->pid = getpid();
159
160	if (kern_ringbuf_out) {
161		rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf);
162		kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL);
163		if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create"))
164			goto cleanup;
165
166		*kern_ringbuf_out = kern_ringbuf;
167	}
168
169	if (user_ringbuf_out) {
170		rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
171		user_ringbuf = user_ring_buffer__new(rb_fd, NULL);
172		if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create"))
173			goto cleanup;
174
175		*user_ringbuf_out = user_ringbuf;
176		ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load");
177	}
178
179	err = user_ringbuf_success__attach(skel);
180	if (!ASSERT_OK(err, "skel_attach"))
181		goto cleanup;
182
183	*skel_out = skel;
184	return 0;
185
186cleanup:
187	if (kern_ringbuf_out)
188		*kern_ringbuf_out = NULL;
189	if (user_ringbuf_out)
190		*user_ringbuf_out = NULL;
191	ring_buffer__free(kern_ringbuf);
192	user_ring_buffer__free(user_ringbuf);
193	user_ringbuf_success__destroy(skel);
194	return err;
195}
196
197static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out,
198					 struct user_ring_buffer **ringbuf_out)
199{
200	return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out);
201}
202
203static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel,
204					       __u32 size, __u64 producer_pos, int err)
205{
206	void *data_ptr;
207	__u64 *producer_pos_ptr;
208	int rb_fd, page_size = getpagesize();
209
210	rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
211
212	ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample");
213
214	/* Map the producer_pos as RW. */
215	producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
216				MAP_SHARED, rb_fd, page_size);
217	ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr");
218
219	/* Map the data pages as RW. */
220	data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
221	ASSERT_OK_PTR(data_ptr, "rw_data");
222
223	memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ);
224	*(__u32 *)data_ptr = size;
225
226	/* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */
227	smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ);
228
229	drain_current_samples();
230	ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample");
231	ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample");
232
233	ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos");
234	ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr");
235}
236
237static void test_user_ringbuf_post_misaligned(void)
238{
239	struct user_ringbuf_success *skel;
240	struct user_ring_buffer *ringbuf;
241	int err;
242	__u32 size = (1 << 5) + 7;
243
244	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
245	if (!ASSERT_OK(err, "misaligned_skel"))
246		return;
247
248	manually_write_test_invalid_sample(skel, size, size, -EINVAL);
249	user_ring_buffer__free(ringbuf);
250	user_ringbuf_success__destroy(skel);
251}
252
253static void test_user_ringbuf_post_producer_wrong_offset(void)
254{
255	struct user_ringbuf_success *skel;
256	struct user_ring_buffer *ringbuf;
257	int err;
258	__u32 size = (1 << 5);
259
260	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
261	if (!ASSERT_OK(err, "wrong_offset_skel"))
262		return;
263
264	manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL);
265	user_ring_buffer__free(ringbuf);
266	user_ringbuf_success__destroy(skel);
267}
268
269static void test_user_ringbuf_post_larger_than_ringbuf_sz(void)
270{
271	struct user_ringbuf_success *skel;
272	struct user_ring_buffer *ringbuf;
273	int err;
274	__u32 size = c_ringbuf_size;
275
276	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
277	if (!ASSERT_OK(err, "huge_sample_skel"))
278		return;
279
280	manually_write_test_invalid_sample(skel, size, size, -E2BIG);
281	user_ring_buffer__free(ringbuf);
282	user_ringbuf_success__destroy(skel);
283}
284
285static void test_user_ringbuf_basic(void)
286{
287	struct user_ringbuf_success *skel;
288	struct user_ring_buffer *ringbuf;
289	int err;
290
291	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
292	if (!ASSERT_OK(err, "ringbuf_basic_skel"))
293		return;
294
295	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
296
297	err = write_samples(ringbuf, 2);
298	if (!ASSERT_OK(err, "write_samples"))
299		goto cleanup;
300
301	ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after");
302
303cleanup:
304	user_ring_buffer__free(ringbuf);
305	user_ringbuf_success__destroy(skel);
306}
307
308static void test_user_ringbuf_sample_full_ring_buffer(void)
309{
310	struct user_ringbuf_success *skel;
311	struct user_ring_buffer *ringbuf;
312	int err;
313	void *sample;
314
315	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
316	if (!ASSERT_OK(err, "ringbuf_full_sample_skel"))
317		return;
318
319	sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ);
320	if (!ASSERT_OK_PTR(sample, "full_sample"))
321		goto cleanup;
322
323	user_ring_buffer__submit(ringbuf, sample);
324	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
325	drain_current_samples();
326	ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
327
328cleanup:
329	user_ring_buffer__free(ringbuf);
330	user_ringbuf_success__destroy(skel);
331}
332
333static void test_user_ringbuf_post_alignment_autoadjust(void)
334{
335	struct user_ringbuf_success *skel;
336	struct user_ring_buffer *ringbuf;
337	struct sample *sample;
338	int err;
339
340	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
341	if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel"))
342		return;
343
344	/* libbpf should automatically round any sample up to an 8-byte alignment. */
345	sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1);
346	ASSERT_OK_PTR(sample, "reserve_autoaligned");
347	user_ring_buffer__submit(ringbuf, sample);
348
349	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
350	drain_current_samples();
351	ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
352
353	user_ring_buffer__free(ringbuf);
354	user_ringbuf_success__destroy(skel);
355}
356
357static void test_user_ringbuf_overfill(void)
358{
359	struct user_ringbuf_success *skel;
360	struct user_ring_buffer *ringbuf;
361	int err;
362
363	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
364	if (err)
365		return;
366
367	err = write_samples(ringbuf, c_max_entries * 5);
368	ASSERT_ERR(err, "write_samples");
369	ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries");
370
371	user_ring_buffer__free(ringbuf);
372	user_ringbuf_success__destroy(skel);
373}
374
375static void test_user_ringbuf_discards_properly_ignored(void)
376{
377	struct user_ringbuf_success *skel;
378	struct user_ring_buffer *ringbuf;
379	int err, num_discarded = 0;
380	__u64 *token;
381
382	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
383	if (err)
384		return;
385
386	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
387
388	while (1) {
389		/* Write samples until the buffer is full. */
390		token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
391		if (!token)
392			break;
393
394		user_ring_buffer__discard(ringbuf, token);
395		num_discarded++;
396	}
397
398	if (!ASSERT_GE(num_discarded, 0, "num_discarded"))
399		goto cleanup;
400
401	/* Should not read any samples, as they are all discarded. */
402	ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
403	drain_current_samples();
404	ASSERT_EQ(skel->bss->read, 0, "num_post_kick");
405
406	/* Now that the ring buffer has been drained, we should be able to
407	 * reserve another token.
408	 */
409	token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
410
411	if (!ASSERT_OK_PTR(token, "new_token"))
412		goto cleanup;
413
414	user_ring_buffer__discard(ringbuf, token);
415cleanup:
416	user_ring_buffer__free(ringbuf);
417	user_ringbuf_success__destroy(skel);
418}
419
420static void test_user_ringbuf_loop(void)
421{
422	struct user_ringbuf_success *skel;
423	struct user_ring_buffer *ringbuf;
424	uint32_t total_samples = 8192;
425	uint32_t remaining_samples = total_samples;
426	int err;
427
428	BUILD_BUG_ON(total_samples <= c_max_entries);
429	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
430	if (err)
431		return;
432
433	do  {
434		uint32_t curr_samples;
435
436		curr_samples = remaining_samples > c_max_entries
437			? c_max_entries : remaining_samples;
438		err = write_samples(ringbuf, curr_samples);
439		if (err != 0) {
440			/* Assert inside of if statement to avoid flooding logs
441			 * on the success path.
442			 */
443			ASSERT_OK(err, "write_samples");
444			goto cleanup;
445		}
446
447		remaining_samples -= curr_samples;
448		ASSERT_EQ(skel->bss->read, total_samples - remaining_samples,
449			  "current_batched_entries");
450	} while (remaining_samples > 0);
451	ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries");
452
453cleanup:
454	user_ring_buffer__free(ringbuf);
455	user_ringbuf_success__destroy(skel);
456}
457
458static int send_test_message(struct user_ring_buffer *ringbuf,
459			     enum test_msg_op op, s64 operand_64,
460			     s32 operand_32)
461{
462	struct test_msg *msg;
463
464	msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg));
465	if (!msg) {
466		/* Assert on the error path to avoid spamming logs with mostly
467		 * success messages.
468		 */
469		ASSERT_OK_PTR(msg, "reserve_msg");
470		return -ENOMEM;
471	}
472
473	msg->msg_op = op;
474
475	switch (op) {
476	case TEST_MSG_OP_INC64:
477	case TEST_MSG_OP_MUL64:
478		msg->operand_64 = operand_64;
479		break;
480	case TEST_MSG_OP_INC32:
481	case TEST_MSG_OP_MUL32:
482		msg->operand_32 = operand_32;
483		break;
484	default:
485		PRINT_FAIL("Invalid operand %d\n", op);
486		user_ring_buffer__discard(ringbuf, msg);
487		return -EINVAL;
488	}
489
490	user_ring_buffer__submit(ringbuf, msg);
491
492	return 0;
493}
494
495static void kick_kernel_read_messages(void)
496{
497	syscall(__NR_prctl);
498}
499
500static int handle_kernel_msg(void *ctx, void *data, size_t len)
501{
502	struct user_ringbuf_success *skel = ctx;
503	struct test_msg *msg = data;
504
505	switch (msg->msg_op) {
506	case TEST_MSG_OP_INC64:
507		skel->bss->user_mutated += msg->operand_64;
508		return 0;
509	case TEST_MSG_OP_INC32:
510		skel->bss->user_mutated += msg->operand_32;
511		return 0;
512	case TEST_MSG_OP_MUL64:
513		skel->bss->user_mutated *= msg->operand_64;
514		return 0;
515	case TEST_MSG_OP_MUL32:
516		skel->bss->user_mutated *= msg->operand_32;
517		return 0;
518	default:
519		fprintf(stderr, "Invalid operand %d\n", msg->msg_op);
520		return -EINVAL;
521	}
522}
523
524static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf,
525					 struct user_ringbuf_success *skel)
526{
527	int cnt;
528
529	cnt = ring_buffer__consume(kern_ringbuf);
530	ASSERT_EQ(cnt, 8, "consume_kern_ringbuf");
531	ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err");
532}
533
534static void test_user_ringbuf_msg_protocol(void)
535{
536	struct user_ringbuf_success *skel;
537	struct user_ring_buffer *user_ringbuf;
538	struct ring_buffer *kern_ringbuf;
539	int err, i;
540	__u64 expected_kern = 0;
541
542	err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf);
543	if (!ASSERT_OK(err, "create_ringbufs"))
544		return;
545
546	for (i = 0; i < 64; i++) {
547		enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS;
548		__u64 operand_64 = TEST_OP_64;
549		__u32 operand_32 = TEST_OP_32;
550
551		err = send_test_message(user_ringbuf, op, operand_64, operand_32);
552		if (err) {
553			/* Only assert on a failure to avoid spamming success logs. */
554			ASSERT_OK(err, "send_test_message");
555			goto cleanup;
556		}
557
558		switch (op) {
559		case TEST_MSG_OP_INC64:
560			expected_kern += operand_64;
561			break;
562		case TEST_MSG_OP_INC32:
563			expected_kern += operand_32;
564			break;
565		case TEST_MSG_OP_MUL64:
566			expected_kern *= operand_64;
567			break;
568		case TEST_MSG_OP_MUL32:
569			expected_kern *= operand_32;
570			break;
571		default:
572			PRINT_FAIL("Unexpected op %d\n", op);
573			goto cleanup;
574		}
575
576		if (i % 8 == 0) {
577			kick_kernel_read_messages();
578			ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern");
579			ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err");
580			drain_kernel_messages_buffer(kern_ringbuf, skel);
581		}
582	}
583
584cleanup:
585	ring_buffer__free(kern_ringbuf);
586	user_ring_buffer__free(user_ringbuf);
587	user_ringbuf_success__destroy(skel);
588}
589
590static void *kick_kernel_cb(void *arg)
591{
592	/* Kick the kernel, causing it to drain the ring buffer and then wake
593	 * up the test thread waiting on epoll.
594	 */
595	syscall(__NR_getrlimit);
596
597	return NULL;
598}
599
600static int spawn_kick_thread_for_poll(void)
601{
602	pthread_t thread;
603
604	return pthread_create(&thread, NULL, kick_kernel_cb, NULL);
605}
606
607static void test_user_ringbuf_blocking_reserve(void)
608{
609	struct user_ringbuf_success *skel;
610	struct user_ring_buffer *ringbuf;
611	int err, num_written = 0;
612	__u64 *token;
613
614	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
615	if (err)
616		return;
617
618	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
619
620	while (1) {
621		/* Write samples until the buffer is full. */
622		token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
623		if (!token)
624			break;
625
626		*token = 0xdeadbeef;
627
628		user_ring_buffer__submit(ringbuf, token);
629		num_written++;
630	}
631
632	if (!ASSERT_GE(num_written, 0, "num_written"))
633		goto cleanup;
634
635	/* Should not have read any samples until the kernel is kicked. */
636	ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
637
638	/* We correctly time out after 1 second, without a sample. */
639	token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000);
640	if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token"))
641		goto cleanup;
642
643	err = spawn_kick_thread_for_poll();
644	if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n"))
645		goto cleanup;
646
647	/* After spawning another thread that asychronously kicks the kernel to
648	 * drain the messages, we're able to block and successfully get a
649	 * sample once we receive an event notification.
650	 */
651	token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000);
652
653	if (!ASSERT_OK_PTR(token, "block_token"))
654		goto cleanup;
655
656	ASSERT_GT(skel->bss->read, 0, "num_post_kill");
657	ASSERT_LE(skel->bss->read, num_written, "num_post_kill");
658	ASSERT_EQ(skel->bss->err, 0, "err_post_poll");
659	user_ring_buffer__discard(ringbuf, token);
660
661cleanup:
662	user_ring_buffer__free(ringbuf);
663	user_ringbuf_success__destroy(skel);
664}
665
666static struct {
667	const char *prog_name;
668	const char *expected_err_msg;
669} failure_tests[] = {
670	/* failure cases */
671	{"user_ringbuf_callback_bad_access1", "negative offset dynptr_ptr ptr"},
672	{"user_ringbuf_callback_bad_access2", "dereference of modified dynptr_ptr ptr"},
673	{"user_ringbuf_callback_write_forbidden", "invalid mem access 'dynptr_ptr'"},
674	{"user_ringbuf_callback_null_context_write", "invalid mem access 'scalar'"},
675	{"user_ringbuf_callback_null_context_read", "invalid mem access 'scalar'"},
676	{"user_ringbuf_callback_discard_dynptr", "cannot release unowned const bpf_dynptr"},
677	{"user_ringbuf_callback_submit_dynptr", "cannot release unowned const bpf_dynptr"},
678	{"user_ringbuf_callback_invalid_return", "At callback return the register R0 has value"},
679	{"user_ringbuf_callback_reinit_dynptr_mem", "Dynptr has to be an uninitialized dynptr"},
680	{"user_ringbuf_callback_reinit_dynptr_ringbuf", "Dynptr has to be an uninitialized dynptr"},
681};
682
683#define SUCCESS_TEST(_func) { _func, #_func }
684
685static struct {
686	void (*test_callback)(void);
687	const char *test_name;
688} success_tests[] = {
689	SUCCESS_TEST(test_user_ringbuf_mappings),
690	SUCCESS_TEST(test_user_ringbuf_post_misaligned),
691	SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset),
692	SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz),
693	SUCCESS_TEST(test_user_ringbuf_basic),
694	SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer),
695	SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust),
696	SUCCESS_TEST(test_user_ringbuf_overfill),
697	SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored),
698	SUCCESS_TEST(test_user_ringbuf_loop),
699	SUCCESS_TEST(test_user_ringbuf_msg_protocol),
700	SUCCESS_TEST(test_user_ringbuf_blocking_reserve),
701};
702
703static void verify_fail(const char *prog_name, const char *expected_err_msg)
704{
705	LIBBPF_OPTS(bpf_object_open_opts, opts);
706	struct bpf_program *prog;
707	struct user_ringbuf_fail *skel;
708	int err;
709
710	opts.kernel_log_buf = obj_log_buf;
711	opts.kernel_log_size = log_buf_sz;
712	opts.kernel_log_level = 1;
713
714	skel = user_ringbuf_fail__open_opts(&opts);
715	if (!ASSERT_OK_PTR(skel, "dynptr_fail__open_opts"))
716		goto cleanup;
717
718	prog = bpf_object__find_program_by_name(skel->obj, prog_name);
719	if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name"))
720		goto cleanup;
721
722	bpf_program__set_autoload(prog, true);
723
724	bpf_map__set_max_entries(skel->maps.user_ringbuf, getpagesize());
725
726	err = user_ringbuf_fail__load(skel);
727	if (!ASSERT_ERR(err, "unexpected load success"))
728		goto cleanup;
729
730	if (!ASSERT_OK_PTR(strstr(obj_log_buf, expected_err_msg), "expected_err_msg")) {
731		fprintf(stderr, "Expected err_msg: %s\n", expected_err_msg);
732		fprintf(stderr, "Verifier output: %s\n", obj_log_buf);
733	}
734
735cleanup:
736	user_ringbuf_fail__destroy(skel);
737}
738
739void test_user_ringbuf(void)
740{
741	int i;
742
743	for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
744		if (!test__start_subtest(success_tests[i].test_name))
745			continue;
746
747		success_tests[i].test_callback();
748	}
749
750	for (i = 0; i < ARRAY_SIZE(failure_tests); i++) {
751		if (!test__start_subtest(failure_tests[i].prog_name))
752			continue;
753
754		verify_fail(failure_tests[i].prog_name, failure_tests[i].expected_err_msg);
755	}
756}