Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
  3
  4#define _GNU_SOURCE
  5#include <linux/compiler.h>
  6#include <linux/ring_buffer.h>
  7#include <linux/build_bug.h>
  8#include <pthread.h>
  9#include <stdio.h>
 10#include <stdlib.h>
 11#include <sys/mman.h>
 12#include <sys/syscall.h>
 13#include <sys/sysinfo.h>
 14#include <test_progs.h>
 15#include <uapi/linux/bpf.h>
 16#include <unistd.h>
 17
 18#include "user_ringbuf_fail.skel.h"
 19#include "user_ringbuf_success.skel.h"
 20
 21#include "../progs/test_user_ringbuf.h"
 22
 23static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
 24static const long c_ringbuf_size = 1 << 12; /* 1 small page */
 25static const long c_max_entries = c_ringbuf_size / c_sample_size;
 26
 27static void drain_current_samples(void)
 28{
 29	syscall(__NR_getpgid);
 30}
 31
 32static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples)
 33{
 34	int i, err = 0;
 35
 36	/* Write some number of samples to the ring buffer. */
 37	for (i = 0; i < num_samples; i++) {
 38		struct sample *entry;
 39		int read;
 40
 41		entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry));
 42		if (!entry) {
 43			err = -errno;
 44			goto done;
 45		}
 46
 47		entry->pid = getpid();
 48		entry->seq = i;
 49		entry->value = i * i;
 50
 51		read = snprintf(entry->comm, sizeof(entry->comm), "%u", i);
 52		if (read <= 0) {
 53			/* Assert on the error path to avoid spamming logs with
 54			 * mostly success messages.
 55			 */
 56			ASSERT_GT(read, 0, "snprintf_comm");
 57			err = read;
 58			user_ring_buffer__discard(ringbuf, entry);
 59			goto done;
 60		}
 61
 62		user_ring_buffer__submit(ringbuf, entry);
 63	}
 64
 65done:
 66	drain_current_samples();
 67
 68	return err;
 69}
 70
 71static struct user_ringbuf_success *open_load_ringbuf_skel(void)
 72{
 73	struct user_ringbuf_success *skel;
 74	int err;
 75
 76	skel = user_ringbuf_success__open();
 77	if (!ASSERT_OK_PTR(skel, "skel_open"))
 78		return NULL;
 79
 80	err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size);
 81	if (!ASSERT_OK(err, "set_max_entries"))
 82		goto cleanup;
 83
 84	err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size);
 85	if (!ASSERT_OK(err, "set_max_entries"))
 86		goto cleanup;
 87
 88	err = user_ringbuf_success__load(skel);
 89	if (!ASSERT_OK(err, "skel_load"))
 90		goto cleanup;
 91
 92	return skel;
 93
 94cleanup:
 95	user_ringbuf_success__destroy(skel);
 96	return NULL;
 97}
 98
 99static void test_user_ringbuf_mappings(void)
100{
101	int err, rb_fd;
102	int page_size = getpagesize();
103	void *mmap_ptr;
104	struct user_ringbuf_success *skel;
105
106	skel = open_load_ringbuf_skel();
107	if (!skel)
108		return;
109
110	rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
111	/* cons_pos can be mapped R/O, can't add +X with mprotect. */
112	mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
113	ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos");
114	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect");
115	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
116	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos");
117	err = -errno;
118	ASSERT_ERR(err, "wr_prod_pos_err");
119	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons");
120
121	/* prod_pos can be mapped RW, can't add +X with mprotect. */
122	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
123			rb_fd, page_size);
124	ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos");
125	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect");
126	err = -errno;
127	ASSERT_ERR(err, "wr_prod_pos_err");
128	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod");
129
130	/* data pages can be mapped RW, can't add +X with mprotect. */
131	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd,
132			2 * page_size);
133	ASSERT_OK_PTR(mmap_ptr, "rw_data");
134	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect");
135	err = -errno;
136	ASSERT_ERR(err, "exec_data_err");
137	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data");
138
139	user_ringbuf_success__destroy(skel);
140}
141
142static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out,
143				     struct ring_buffer **kern_ringbuf_out,
144				     ring_buffer_sample_fn callback,
145				     struct user_ring_buffer **user_ringbuf_out)
146{
147	struct user_ringbuf_success *skel;
148	struct ring_buffer *kern_ringbuf = NULL;
149	struct user_ring_buffer *user_ringbuf = NULL;
150	int err = -ENOMEM, rb_fd;
151
152	skel = open_load_ringbuf_skel();
153	if (!skel)
154		return err;
155
156	/* only trigger BPF program for current process */
157	skel->bss->pid = getpid();
158
159	if (kern_ringbuf_out) {
160		rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf);
161		kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL);
162		if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create"))
163			goto cleanup;
164
165		*kern_ringbuf_out = kern_ringbuf;
166	}
167
168	if (user_ringbuf_out) {
169		rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
170		user_ringbuf = user_ring_buffer__new(rb_fd, NULL);
171		if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create"))
172			goto cleanup;
173
174		*user_ringbuf_out = user_ringbuf;
175		ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load");
176	}
177
178	err = user_ringbuf_success__attach(skel);
179	if (!ASSERT_OK(err, "skel_attach"))
180		goto cleanup;
181
182	*skel_out = skel;
183	return 0;
184
185cleanup:
186	if (kern_ringbuf_out)
187		*kern_ringbuf_out = NULL;
188	if (user_ringbuf_out)
189		*user_ringbuf_out = NULL;
190	ring_buffer__free(kern_ringbuf);
191	user_ring_buffer__free(user_ringbuf);
192	user_ringbuf_success__destroy(skel);
193	return err;
194}
195
196static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out,
197					 struct user_ring_buffer **ringbuf_out)
198{
199	return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out);
200}
201
202static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel,
203					       __u32 size, __u64 producer_pos, int err)
204{
205	void *data_ptr;
206	__u64 *producer_pos_ptr;
207	int rb_fd, page_size = getpagesize();
208
209	rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
210
211	ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample");
212
213	/* Map the producer_pos as RW. */
214	producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
215				MAP_SHARED, rb_fd, page_size);
216	ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr");
217
218	/* Map the data pages as RW. */
219	data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
220	ASSERT_OK_PTR(data_ptr, "rw_data");
221
222	memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ);
223	*(__u32 *)data_ptr = size;
224
225	/* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */
226	smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ);
227
228	drain_current_samples();
229	ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample");
230	ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample");
231
232	ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos");
233	ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr");
234}
235
236static void test_user_ringbuf_post_misaligned(void)
237{
238	struct user_ringbuf_success *skel;
239	struct user_ring_buffer *ringbuf;
240	int err;
241	__u32 size = (1 << 5) + 7;
242
243	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
244	if (!ASSERT_OK(err, "misaligned_skel"))
245		return;
246
247	manually_write_test_invalid_sample(skel, size, size, -EINVAL);
248	user_ring_buffer__free(ringbuf);
249	user_ringbuf_success__destroy(skel);
250}
251
252static void test_user_ringbuf_post_producer_wrong_offset(void)
253{
254	struct user_ringbuf_success *skel;
255	struct user_ring_buffer *ringbuf;
256	int err;
257	__u32 size = (1 << 5);
258
259	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
260	if (!ASSERT_OK(err, "wrong_offset_skel"))
261		return;
262
263	manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL);
264	user_ring_buffer__free(ringbuf);
265	user_ringbuf_success__destroy(skel);
266}
267
268static void test_user_ringbuf_post_larger_than_ringbuf_sz(void)
269{
270	struct user_ringbuf_success *skel;
271	struct user_ring_buffer *ringbuf;
272	int err;
273	__u32 size = c_ringbuf_size;
274
275	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
276	if (!ASSERT_OK(err, "huge_sample_skel"))
277		return;
278
279	manually_write_test_invalid_sample(skel, size, size, -E2BIG);
280	user_ring_buffer__free(ringbuf);
281	user_ringbuf_success__destroy(skel);
282}
283
284static void test_user_ringbuf_basic(void)
285{
286	struct user_ringbuf_success *skel;
287	struct user_ring_buffer *ringbuf;
288	int err;
289
290	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
291	if (!ASSERT_OK(err, "ringbuf_basic_skel"))
292		return;
293
294	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
295
296	err = write_samples(ringbuf, 2);
297	if (!ASSERT_OK(err, "write_samples"))
298		goto cleanup;
299
300	ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after");
301
302cleanup:
303	user_ring_buffer__free(ringbuf);
304	user_ringbuf_success__destroy(skel);
305}
306
307static void test_user_ringbuf_sample_full_ring_buffer(void)
308{
309	struct user_ringbuf_success *skel;
310	struct user_ring_buffer *ringbuf;
311	int err;
312	void *sample;
313
314	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
315	if (!ASSERT_OK(err, "ringbuf_full_sample_skel"))
316		return;
317
318	sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ);
319	if (!ASSERT_OK_PTR(sample, "full_sample"))
320		goto cleanup;
321
322	user_ring_buffer__submit(ringbuf, sample);
323	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
324	drain_current_samples();
325	ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
326
327cleanup:
328	user_ring_buffer__free(ringbuf);
329	user_ringbuf_success__destroy(skel);
330}
331
332static void test_user_ringbuf_post_alignment_autoadjust(void)
333{
334	struct user_ringbuf_success *skel;
335	struct user_ring_buffer *ringbuf;
336	struct sample *sample;
337	int err;
338
339	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
340	if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel"))
341		return;
342
343	/* libbpf should automatically round any sample up to an 8-byte alignment. */
344	sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1);
345	ASSERT_OK_PTR(sample, "reserve_autoaligned");
346	user_ring_buffer__submit(ringbuf, sample);
347
348	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
349	drain_current_samples();
350	ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
351
352	user_ring_buffer__free(ringbuf);
353	user_ringbuf_success__destroy(skel);
354}
355
356static void test_user_ringbuf_overfill(void)
357{
358	struct user_ringbuf_success *skel;
359	struct user_ring_buffer *ringbuf;
360	int err;
361
362	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
363	if (err)
364		return;
365
366	err = write_samples(ringbuf, c_max_entries * 5);
367	ASSERT_ERR(err, "write_samples");
368	ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries");
369
370	user_ring_buffer__free(ringbuf);
371	user_ringbuf_success__destroy(skel);
372}
373
374static void test_user_ringbuf_discards_properly_ignored(void)
375{
376	struct user_ringbuf_success *skel;
377	struct user_ring_buffer *ringbuf;
378	int err, num_discarded = 0;
379	__u64 *token;
380
381	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
382	if (err)
383		return;
384
385	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
386
387	while (1) {
388		/* Write samples until the buffer is full. */
389		token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
390		if (!token)
391			break;
392
393		user_ring_buffer__discard(ringbuf, token);
394		num_discarded++;
395	}
396
397	if (!ASSERT_GE(num_discarded, 0, "num_discarded"))
398		goto cleanup;
399
400	/* Should not read any samples, as they are all discarded. */
401	ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
402	drain_current_samples();
403	ASSERT_EQ(skel->bss->read, 0, "num_post_kick");
404
405	/* Now that the ring buffer has been drained, we should be able to
406	 * reserve another token.
407	 */
408	token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
409
410	if (!ASSERT_OK_PTR(token, "new_token"))
411		goto cleanup;
412
413	user_ring_buffer__discard(ringbuf, token);
414cleanup:
415	user_ring_buffer__free(ringbuf);
416	user_ringbuf_success__destroy(skel);
417}
418
419static void test_user_ringbuf_loop(void)
420{
421	struct user_ringbuf_success *skel;
422	struct user_ring_buffer *ringbuf;
423	uint32_t total_samples = 8192;
424	uint32_t remaining_samples = total_samples;
425	int err;
426
427	BUILD_BUG_ON(total_samples <= c_max_entries);
428	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
429	if (err)
430		return;
431
432	do  {
433		uint32_t curr_samples;
434
435		curr_samples = remaining_samples > c_max_entries
436			? c_max_entries : remaining_samples;
437		err = write_samples(ringbuf, curr_samples);
438		if (err != 0) {
439			/* Assert inside of if statement to avoid flooding logs
440			 * on the success path.
441			 */
442			ASSERT_OK(err, "write_samples");
443			goto cleanup;
444		}
445
446		remaining_samples -= curr_samples;
447		ASSERT_EQ(skel->bss->read, total_samples - remaining_samples,
448			  "current_batched_entries");
449	} while (remaining_samples > 0);
450	ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries");
451
452cleanup:
453	user_ring_buffer__free(ringbuf);
454	user_ringbuf_success__destroy(skel);
455}
456
457static int send_test_message(struct user_ring_buffer *ringbuf,
458			     enum test_msg_op op, s64 operand_64,
459			     s32 operand_32)
460{
461	struct test_msg *msg;
462
463	msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg));
464	if (!msg) {
465		/* Assert on the error path to avoid spamming logs with mostly
466		 * success messages.
467		 */
468		ASSERT_OK_PTR(msg, "reserve_msg");
469		return -ENOMEM;
470	}
471
472	msg->msg_op = op;
473
474	switch (op) {
475	case TEST_MSG_OP_INC64:
476	case TEST_MSG_OP_MUL64:
477		msg->operand_64 = operand_64;
478		break;
479	case TEST_MSG_OP_INC32:
480	case TEST_MSG_OP_MUL32:
481		msg->operand_32 = operand_32;
482		break;
483	default:
484		PRINT_FAIL("Invalid operand %d\n", op);
485		user_ring_buffer__discard(ringbuf, msg);
486		return -EINVAL;
487	}
488
489	user_ring_buffer__submit(ringbuf, msg);
490
491	return 0;
492}
493
494static void kick_kernel_read_messages(void)
495{
496	syscall(__NR_prctl);
497}
498
499static int handle_kernel_msg(void *ctx, void *data, size_t len)
500{
501	struct user_ringbuf_success *skel = ctx;
502	struct test_msg *msg = data;
503
504	switch (msg->msg_op) {
505	case TEST_MSG_OP_INC64:
506		skel->bss->user_mutated += msg->operand_64;
507		return 0;
508	case TEST_MSG_OP_INC32:
509		skel->bss->user_mutated += msg->operand_32;
510		return 0;
511	case TEST_MSG_OP_MUL64:
512		skel->bss->user_mutated *= msg->operand_64;
513		return 0;
514	case TEST_MSG_OP_MUL32:
515		skel->bss->user_mutated *= msg->operand_32;
516		return 0;
517	default:
518		fprintf(stderr, "Invalid operand %d\n", msg->msg_op);
519		return -EINVAL;
520	}
521}
522
523static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf,
524					 struct user_ringbuf_success *skel)
525{
526	int cnt;
527
528	cnt = ring_buffer__consume(kern_ringbuf);
529	ASSERT_EQ(cnt, 8, "consume_kern_ringbuf");
530	ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err");
531}
532
533static void test_user_ringbuf_msg_protocol(void)
534{
535	struct user_ringbuf_success *skel;
536	struct user_ring_buffer *user_ringbuf;
537	struct ring_buffer *kern_ringbuf;
538	int err, i;
539	__u64 expected_kern = 0;
540
541	err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf);
542	if (!ASSERT_OK(err, "create_ringbufs"))
543		return;
544
545	for (i = 0; i < 64; i++) {
546		enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS;
547		__u64 operand_64 = TEST_OP_64;
548		__u32 operand_32 = TEST_OP_32;
549
550		err = send_test_message(user_ringbuf, op, operand_64, operand_32);
551		if (err) {
552			/* Only assert on a failure to avoid spamming success logs. */
553			ASSERT_OK(err, "send_test_message");
554			goto cleanup;
555		}
556
557		switch (op) {
558		case TEST_MSG_OP_INC64:
559			expected_kern += operand_64;
560			break;
561		case TEST_MSG_OP_INC32:
562			expected_kern += operand_32;
563			break;
564		case TEST_MSG_OP_MUL64:
565			expected_kern *= operand_64;
566			break;
567		case TEST_MSG_OP_MUL32:
568			expected_kern *= operand_32;
569			break;
570		default:
571			PRINT_FAIL("Unexpected op %d\n", op);
572			goto cleanup;
573		}
574
575		if (i % 8 == 0) {
576			kick_kernel_read_messages();
577			ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern");
578			ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err");
579			drain_kernel_messages_buffer(kern_ringbuf, skel);
580		}
581	}
582
583cleanup:
584	ring_buffer__free(kern_ringbuf);
585	user_ring_buffer__free(user_ringbuf);
586	user_ringbuf_success__destroy(skel);
587}
588
589static void *kick_kernel_cb(void *arg)
590{
591	/* Kick the kernel, causing it to drain the ring buffer and then wake
592	 * up the test thread waiting on epoll.
593	 */
594	syscall(__NR_prlimit64);
595
596	return NULL;
597}
598
599static int spawn_kick_thread_for_poll(void)
600{
601	pthread_t thread;
602
603	return pthread_create(&thread, NULL, kick_kernel_cb, NULL);
604}
605
606static void test_user_ringbuf_blocking_reserve(void)
607{
608	struct user_ringbuf_success *skel;
609	struct user_ring_buffer *ringbuf;
610	int err, num_written = 0;
611	__u64 *token;
612
613	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
614	if (err)
615		return;
616
617	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
618
619	while (1) {
620		/* Write samples until the buffer is full. */
621		token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
622		if (!token)
623			break;
624
625		*token = 0xdeadbeef;
626
627		user_ring_buffer__submit(ringbuf, token);
628		num_written++;
629	}
630
631	if (!ASSERT_GE(num_written, 0, "num_written"))
632		goto cleanup;
633
634	/* Should not have read any samples until the kernel is kicked. */
635	ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
636
637	/* We correctly time out after 1 second, without a sample. */
638	token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000);
639	if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token"))
640		goto cleanup;
641
642	err = spawn_kick_thread_for_poll();
643	if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n"))
644		goto cleanup;
645
646	/* After spawning another thread that asynchronously kicks the kernel to
647	 * drain the messages, we're able to block and successfully get a
648	 * sample once we receive an event notification.
649	 */
650	token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000);
651
652	if (!ASSERT_OK_PTR(token, "block_token"))
653		goto cleanup;
654
655	ASSERT_GT(skel->bss->read, 0, "num_post_kill");
656	ASSERT_LE(skel->bss->read, num_written, "num_post_kill");
657	ASSERT_EQ(skel->bss->err, 0, "err_post_poll");
658	user_ring_buffer__discard(ringbuf, token);
659
660cleanup:
661	user_ring_buffer__free(ringbuf);
662	user_ringbuf_success__destroy(skel);
663}
664
665#define SUCCESS_TEST(_func) { _func, #_func }
666
667static struct {
668	void (*test_callback)(void);
669	const char *test_name;
670} success_tests[] = {
671	SUCCESS_TEST(test_user_ringbuf_mappings),
672	SUCCESS_TEST(test_user_ringbuf_post_misaligned),
673	SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset),
674	SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz),
675	SUCCESS_TEST(test_user_ringbuf_basic),
676	SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer),
677	SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust),
678	SUCCESS_TEST(test_user_ringbuf_overfill),
679	SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored),
680	SUCCESS_TEST(test_user_ringbuf_loop),
681	SUCCESS_TEST(test_user_ringbuf_msg_protocol),
682	SUCCESS_TEST(test_user_ringbuf_blocking_reserve),
683};
684
685void test_user_ringbuf(void)
686{
687	int i;
688
689	for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
690		if (!test__start_subtest(success_tests[i].test_name))
691			continue;
692
693		success_tests[i].test_callback();
694	}
695
696	RUN_TESTS(user_ringbuf_fail);
697}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
  3
  4#define _GNU_SOURCE
  5#include <linux/compiler.h>
  6#include <linux/ring_buffer.h>
 
  7#include <pthread.h>
  8#include <stdio.h>
  9#include <stdlib.h>
 10#include <sys/mman.h>
 11#include <sys/syscall.h>
 12#include <sys/sysinfo.h>
 13#include <test_progs.h>
 14#include <uapi/linux/bpf.h>
 15#include <unistd.h>
 16
 17#include "user_ringbuf_fail.skel.h"
 18#include "user_ringbuf_success.skel.h"
 19
 20#include "../progs/test_user_ringbuf.h"
 21
 22static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
 23static const long c_ringbuf_size = 1 << 12; /* 1 small page */
 24static const long c_max_entries = c_ringbuf_size / c_sample_size;
 25
 26static void drain_current_samples(void)
 27{
 28	syscall(__NR_getpgid);
 29}
 30
 31static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples)
 32{
 33	int i, err = 0;
 34
 35	/* Write some number of samples to the ring buffer. */
 36	for (i = 0; i < num_samples; i++) {
 37		struct sample *entry;
 38		int read;
 39
 40		entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry));
 41		if (!entry) {
 42			err = -errno;
 43			goto done;
 44		}
 45
 46		entry->pid = getpid();
 47		entry->seq = i;
 48		entry->value = i * i;
 49
 50		read = snprintf(entry->comm, sizeof(entry->comm), "%u", i);
 51		if (read <= 0) {
 52			/* Assert on the error path to avoid spamming logs with
 53			 * mostly success messages.
 54			 */
 55			ASSERT_GT(read, 0, "snprintf_comm");
 56			err = read;
 57			user_ring_buffer__discard(ringbuf, entry);
 58			goto done;
 59		}
 60
 61		user_ring_buffer__submit(ringbuf, entry);
 62	}
 63
 64done:
 65	drain_current_samples();
 66
 67	return err;
 68}
 69
 70static struct user_ringbuf_success *open_load_ringbuf_skel(void)
 71{
 72	struct user_ringbuf_success *skel;
 73	int err;
 74
 75	skel = user_ringbuf_success__open();
 76	if (!ASSERT_OK_PTR(skel, "skel_open"))
 77		return NULL;
 78
 79	err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size);
 80	if (!ASSERT_OK(err, "set_max_entries"))
 81		goto cleanup;
 82
 83	err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size);
 84	if (!ASSERT_OK(err, "set_max_entries"))
 85		goto cleanup;
 86
 87	err = user_ringbuf_success__load(skel);
 88	if (!ASSERT_OK(err, "skel_load"))
 89		goto cleanup;
 90
 91	return skel;
 92
 93cleanup:
 94	user_ringbuf_success__destroy(skel);
 95	return NULL;
 96}
 97
 98static void test_user_ringbuf_mappings(void)
 99{
100	int err, rb_fd;
101	int page_size = getpagesize();
102	void *mmap_ptr;
103	struct user_ringbuf_success *skel;
104
105	skel = open_load_ringbuf_skel();
106	if (!skel)
107		return;
108
109	rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
110	/* cons_pos can be mapped R/O, can't add +X with mprotect. */
111	mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
112	ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos");
113	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect");
114	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
115	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos");
116	err = -errno;
117	ASSERT_ERR(err, "wr_prod_pos_err");
118	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons");
119
120	/* prod_pos can be mapped RW, can't add +X with mprotect. */
121	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
122			rb_fd, page_size);
123	ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos");
124	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect");
125	err = -errno;
126	ASSERT_ERR(err, "wr_prod_pos_err");
127	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod");
128
129	/* data pages can be mapped RW, can't add +X with mprotect. */
130	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd,
131			2 * page_size);
132	ASSERT_OK_PTR(mmap_ptr, "rw_data");
133	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect");
134	err = -errno;
135	ASSERT_ERR(err, "exec_data_err");
136	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data");
137
138	user_ringbuf_success__destroy(skel);
139}
140
141static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out,
142				     struct ring_buffer **kern_ringbuf_out,
143				     ring_buffer_sample_fn callback,
144				     struct user_ring_buffer **user_ringbuf_out)
145{
146	struct user_ringbuf_success *skel;
147	struct ring_buffer *kern_ringbuf = NULL;
148	struct user_ring_buffer *user_ringbuf = NULL;
149	int err = -ENOMEM, rb_fd;
150
151	skel = open_load_ringbuf_skel();
152	if (!skel)
153		return err;
154
155	/* only trigger BPF program for current process */
156	skel->bss->pid = getpid();
157
158	if (kern_ringbuf_out) {
159		rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf);
160		kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL);
161		if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create"))
162			goto cleanup;
163
164		*kern_ringbuf_out = kern_ringbuf;
165	}
166
167	if (user_ringbuf_out) {
168		rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
169		user_ringbuf = user_ring_buffer__new(rb_fd, NULL);
170		if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create"))
171			goto cleanup;
172
173		*user_ringbuf_out = user_ringbuf;
174		ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load");
175	}
176
177	err = user_ringbuf_success__attach(skel);
178	if (!ASSERT_OK(err, "skel_attach"))
179		goto cleanup;
180
181	*skel_out = skel;
182	return 0;
183
184cleanup:
185	if (kern_ringbuf_out)
186		*kern_ringbuf_out = NULL;
187	if (user_ringbuf_out)
188		*user_ringbuf_out = NULL;
189	ring_buffer__free(kern_ringbuf);
190	user_ring_buffer__free(user_ringbuf);
191	user_ringbuf_success__destroy(skel);
192	return err;
193}
194
195static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out,
196					 struct user_ring_buffer **ringbuf_out)
197{
198	return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out);
199}
200
201static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel,
202					       __u32 size, __u64 producer_pos, int err)
203{
204	void *data_ptr;
205	__u64 *producer_pos_ptr;
206	int rb_fd, page_size = getpagesize();
207
208	rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
209
210	ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample");
211
212	/* Map the producer_pos as RW. */
213	producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
214				MAP_SHARED, rb_fd, page_size);
215	ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr");
216
217	/* Map the data pages as RW. */
218	data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
219	ASSERT_OK_PTR(data_ptr, "rw_data");
220
221	memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ);
222	*(__u32 *)data_ptr = size;
223
224	/* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */
225	smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ);
226
227	drain_current_samples();
228	ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample");
229	ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample");
230
231	ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos");
232	ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr");
233}
234
235static void test_user_ringbuf_post_misaligned(void)
236{
237	struct user_ringbuf_success *skel;
238	struct user_ring_buffer *ringbuf;
239	int err;
240	__u32 size = (1 << 5) + 7;
241
242	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
243	if (!ASSERT_OK(err, "misaligned_skel"))
244		return;
245
246	manually_write_test_invalid_sample(skel, size, size, -EINVAL);
247	user_ring_buffer__free(ringbuf);
248	user_ringbuf_success__destroy(skel);
249}
250
251static void test_user_ringbuf_post_producer_wrong_offset(void)
252{
253	struct user_ringbuf_success *skel;
254	struct user_ring_buffer *ringbuf;
255	int err;
256	__u32 size = (1 << 5);
257
258	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
259	if (!ASSERT_OK(err, "wrong_offset_skel"))
260		return;
261
262	manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL);
263	user_ring_buffer__free(ringbuf);
264	user_ringbuf_success__destroy(skel);
265}
266
267static void test_user_ringbuf_post_larger_than_ringbuf_sz(void)
268{
269	struct user_ringbuf_success *skel;
270	struct user_ring_buffer *ringbuf;
271	int err;
272	__u32 size = c_ringbuf_size;
273
274	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
275	if (!ASSERT_OK(err, "huge_sample_skel"))
276		return;
277
278	manually_write_test_invalid_sample(skel, size, size, -E2BIG);
279	user_ring_buffer__free(ringbuf);
280	user_ringbuf_success__destroy(skel);
281}
282
283static void test_user_ringbuf_basic(void)
284{
285	struct user_ringbuf_success *skel;
286	struct user_ring_buffer *ringbuf;
287	int err;
288
289	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
290	if (!ASSERT_OK(err, "ringbuf_basic_skel"))
291		return;
292
293	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
294
295	err = write_samples(ringbuf, 2);
296	if (!ASSERT_OK(err, "write_samples"))
297		goto cleanup;
298
299	ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after");
300
301cleanup:
302	user_ring_buffer__free(ringbuf);
303	user_ringbuf_success__destroy(skel);
304}
305
306static void test_user_ringbuf_sample_full_ring_buffer(void)
307{
308	struct user_ringbuf_success *skel;
309	struct user_ring_buffer *ringbuf;
310	int err;
311	void *sample;
312
313	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
314	if (!ASSERT_OK(err, "ringbuf_full_sample_skel"))
315		return;
316
317	sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ);
318	if (!ASSERT_OK_PTR(sample, "full_sample"))
319		goto cleanup;
320
321	user_ring_buffer__submit(ringbuf, sample);
322	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
323	drain_current_samples();
324	ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
325
326cleanup:
327	user_ring_buffer__free(ringbuf);
328	user_ringbuf_success__destroy(skel);
329}
330
331static void test_user_ringbuf_post_alignment_autoadjust(void)
332{
333	struct user_ringbuf_success *skel;
334	struct user_ring_buffer *ringbuf;
335	struct sample *sample;
336	int err;
337
338	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
339	if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel"))
340		return;
341
342	/* libbpf should automatically round any sample up to an 8-byte alignment. */
343	sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1);
344	ASSERT_OK_PTR(sample, "reserve_autoaligned");
345	user_ring_buffer__submit(ringbuf, sample);
346
347	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
348	drain_current_samples();
349	ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
350
351	user_ring_buffer__free(ringbuf);
352	user_ringbuf_success__destroy(skel);
353}
354
355static void test_user_ringbuf_overfill(void)
356{
357	struct user_ringbuf_success *skel;
358	struct user_ring_buffer *ringbuf;
359	int err;
360
361	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
362	if (err)
363		return;
364
365	err = write_samples(ringbuf, c_max_entries * 5);
366	ASSERT_ERR(err, "write_samples");
367	ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries");
368
369	user_ring_buffer__free(ringbuf);
370	user_ringbuf_success__destroy(skel);
371}
372
373static void test_user_ringbuf_discards_properly_ignored(void)
374{
375	struct user_ringbuf_success *skel;
376	struct user_ring_buffer *ringbuf;
377	int err, num_discarded = 0;
378	__u64 *token;
379
380	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
381	if (err)
382		return;
383
384	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
385
386	while (1) {
387		/* Write samples until the buffer is full. */
388		token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
389		if (!token)
390			break;
391
392		user_ring_buffer__discard(ringbuf, token);
393		num_discarded++;
394	}
395
396	if (!ASSERT_GE(num_discarded, 0, "num_discarded"))
397		goto cleanup;
398
399	/* Should not read any samples, as they are all discarded. */
400	ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
401	drain_current_samples();
402	ASSERT_EQ(skel->bss->read, 0, "num_post_kick");
403
404	/* Now that the ring buffer has been drained, we should be able to
405	 * reserve another token.
406	 */
407	token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
408
409	if (!ASSERT_OK_PTR(token, "new_token"))
410		goto cleanup;
411
412	user_ring_buffer__discard(ringbuf, token);
413cleanup:
414	user_ring_buffer__free(ringbuf);
415	user_ringbuf_success__destroy(skel);
416}
417
418static void test_user_ringbuf_loop(void)
419{
420	struct user_ringbuf_success *skel;
421	struct user_ring_buffer *ringbuf;
422	uint32_t total_samples = 8192;
423	uint32_t remaining_samples = total_samples;
424	int err;
425
426	BUILD_BUG_ON(total_samples <= c_max_entries);
427	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
428	if (err)
429		return;
430
431	do  {
432		uint32_t curr_samples;
433
434		curr_samples = remaining_samples > c_max_entries
435			? c_max_entries : remaining_samples;
436		err = write_samples(ringbuf, curr_samples);
437		if (err != 0) {
438			/* Assert inside of if statement to avoid flooding logs
439			 * on the success path.
440			 */
441			ASSERT_OK(err, "write_samples");
442			goto cleanup;
443		}
444
445		remaining_samples -= curr_samples;
446		ASSERT_EQ(skel->bss->read, total_samples - remaining_samples,
447			  "current_batched_entries");
448	} while (remaining_samples > 0);
449	ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries");
450
451cleanup:
452	user_ring_buffer__free(ringbuf);
453	user_ringbuf_success__destroy(skel);
454}
455
456static int send_test_message(struct user_ring_buffer *ringbuf,
457			     enum test_msg_op op, s64 operand_64,
458			     s32 operand_32)
459{
460	struct test_msg *msg;
461
462	msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg));
463	if (!msg) {
464		/* Assert on the error path to avoid spamming logs with mostly
465		 * success messages.
466		 */
467		ASSERT_OK_PTR(msg, "reserve_msg");
468		return -ENOMEM;
469	}
470
471	msg->msg_op = op;
472
473	switch (op) {
474	case TEST_MSG_OP_INC64:
475	case TEST_MSG_OP_MUL64:
476		msg->operand_64 = operand_64;
477		break;
478	case TEST_MSG_OP_INC32:
479	case TEST_MSG_OP_MUL32:
480		msg->operand_32 = operand_32;
481		break;
482	default:
483		PRINT_FAIL("Invalid operand %d\n", op);
484		user_ring_buffer__discard(ringbuf, msg);
485		return -EINVAL;
486	}
487
488	user_ring_buffer__submit(ringbuf, msg);
489
490	return 0;
491}
492
493static void kick_kernel_read_messages(void)
494{
495	syscall(__NR_prctl);
496}
497
498static int handle_kernel_msg(void *ctx, void *data, size_t len)
499{
500	struct user_ringbuf_success *skel = ctx;
501	struct test_msg *msg = data;
502
503	switch (msg->msg_op) {
504	case TEST_MSG_OP_INC64:
505		skel->bss->user_mutated += msg->operand_64;
506		return 0;
507	case TEST_MSG_OP_INC32:
508		skel->bss->user_mutated += msg->operand_32;
509		return 0;
510	case TEST_MSG_OP_MUL64:
511		skel->bss->user_mutated *= msg->operand_64;
512		return 0;
513	case TEST_MSG_OP_MUL32:
514		skel->bss->user_mutated *= msg->operand_32;
515		return 0;
516	default:
517		fprintf(stderr, "Invalid operand %d\n", msg->msg_op);
518		return -EINVAL;
519	}
520}
521
522static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf,
523					 struct user_ringbuf_success *skel)
524{
525	int cnt;
526
527	cnt = ring_buffer__consume(kern_ringbuf);
528	ASSERT_EQ(cnt, 8, "consume_kern_ringbuf");
529	ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err");
530}
531
532static void test_user_ringbuf_msg_protocol(void)
533{
534	struct user_ringbuf_success *skel;
535	struct user_ring_buffer *user_ringbuf;
536	struct ring_buffer *kern_ringbuf;
537	int err, i;
538	__u64 expected_kern = 0;
539
540	err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf);
541	if (!ASSERT_OK(err, "create_ringbufs"))
542		return;
543
544	for (i = 0; i < 64; i++) {
545		enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS;
546		__u64 operand_64 = TEST_OP_64;
547		__u32 operand_32 = TEST_OP_32;
548
549		err = send_test_message(user_ringbuf, op, operand_64, operand_32);
550		if (err) {
551			/* Only assert on a failure to avoid spamming success logs. */
552			ASSERT_OK(err, "send_test_message");
553			goto cleanup;
554		}
555
556		switch (op) {
557		case TEST_MSG_OP_INC64:
558			expected_kern += operand_64;
559			break;
560		case TEST_MSG_OP_INC32:
561			expected_kern += operand_32;
562			break;
563		case TEST_MSG_OP_MUL64:
564			expected_kern *= operand_64;
565			break;
566		case TEST_MSG_OP_MUL32:
567			expected_kern *= operand_32;
568			break;
569		default:
570			PRINT_FAIL("Unexpected op %d\n", op);
571			goto cleanup;
572		}
573
574		if (i % 8 == 0) {
575			kick_kernel_read_messages();
576			ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern");
577			ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err");
578			drain_kernel_messages_buffer(kern_ringbuf, skel);
579		}
580	}
581
582cleanup:
583	ring_buffer__free(kern_ringbuf);
584	user_ring_buffer__free(user_ringbuf);
585	user_ringbuf_success__destroy(skel);
586}
587
588static void *kick_kernel_cb(void *arg)
589{
590	/* Kick the kernel, causing it to drain the ring buffer and then wake
591	 * up the test thread waiting on epoll.
592	 */
593	syscall(__NR_prlimit64);
594
595	return NULL;
596}
597
598static int spawn_kick_thread_for_poll(void)
599{
600	pthread_t thread;
601
602	return pthread_create(&thread, NULL, kick_kernel_cb, NULL);
603}
604
605static void test_user_ringbuf_blocking_reserve(void)
606{
607	struct user_ringbuf_success *skel;
608	struct user_ring_buffer *ringbuf;
609	int err, num_written = 0;
610	__u64 *token;
611
612	err = load_skel_create_user_ringbuf(&skel, &ringbuf);
613	if (err)
614		return;
615
616	ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
617
618	while (1) {
619		/* Write samples until the buffer is full. */
620		token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
621		if (!token)
622			break;
623
624		*token = 0xdeadbeef;
625
626		user_ring_buffer__submit(ringbuf, token);
627		num_written++;
628	}
629
630	if (!ASSERT_GE(num_written, 0, "num_written"))
631		goto cleanup;
632
633	/* Should not have read any samples until the kernel is kicked. */
634	ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
635
636	/* We correctly time out after 1 second, without a sample. */
637	token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000);
638	if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token"))
639		goto cleanup;
640
641	err = spawn_kick_thread_for_poll();
642	if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n"))
643		goto cleanup;
644
645	/* After spawning another thread that asychronously kicks the kernel to
646	 * drain the messages, we're able to block and successfully get a
647	 * sample once we receive an event notification.
648	 */
649	token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000);
650
651	if (!ASSERT_OK_PTR(token, "block_token"))
652		goto cleanup;
653
654	ASSERT_GT(skel->bss->read, 0, "num_post_kill");
655	ASSERT_LE(skel->bss->read, num_written, "num_post_kill");
656	ASSERT_EQ(skel->bss->err, 0, "err_post_poll");
657	user_ring_buffer__discard(ringbuf, token);
658
659cleanup:
660	user_ring_buffer__free(ringbuf);
661	user_ringbuf_success__destroy(skel);
662}
663
664#define SUCCESS_TEST(_func) { _func, #_func }
665
666static struct {
667	void (*test_callback)(void);
668	const char *test_name;
669} success_tests[] = {
670	SUCCESS_TEST(test_user_ringbuf_mappings),
671	SUCCESS_TEST(test_user_ringbuf_post_misaligned),
672	SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset),
673	SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz),
674	SUCCESS_TEST(test_user_ringbuf_basic),
675	SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer),
676	SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust),
677	SUCCESS_TEST(test_user_ringbuf_overfill),
678	SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored),
679	SUCCESS_TEST(test_user_ringbuf_loop),
680	SUCCESS_TEST(test_user_ringbuf_msg_protocol),
681	SUCCESS_TEST(test_user_ringbuf_blocking_reserve),
682};
683
684void test_user_ringbuf(void)
685{
686	int i;
687
688	for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
689		if (!test__start_subtest(success_tests[i].test_name))
690			continue;
691
692		success_tests[i].test_callback();
693	}
694
695	RUN_TESTS(user_ringbuf_fail);
696}