Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#define _GNU_SOURCE
  3#include <linux/compiler.h>
  4#include <asm/barrier.h>
  5#include <test_progs.h>
  6#include <sys/mman.h>
  7#include <sys/epoll.h>
  8#include <time.h>
  9#include <sched.h>
 10#include <signal.h>
 11#include <pthread.h>
 12#include <sys/sysinfo.h>
 13#include <linux/perf_event.h>
 14#include <linux/ring_buffer.h>
 15
 16#include "test_ringbuf.lskel.h"
 17#include "test_ringbuf_n.lskel.h"
 18#include "test_ringbuf_map_key.lskel.h"
 19#include "test_ringbuf_write.lskel.h"
 20
 21#define EDONE 7777
 22
 23static int duration = 0;
 24
 25struct sample {
 26	int pid;
 27	int seq;
 28	long value;
 29	char comm[16];
 30};
 31
 32static int sample_cnt;
 33
 34static void atomic_inc(int *cnt)
 35{
 36	__atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST);
 37}
 38
 39static int atomic_xchg(int *cnt, int val)
 40{
 41	return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST);
 42}
 43
 44static int process_sample(void *ctx, void *data, size_t len)
 45{
 46	struct sample *s = data;
 47
 48	atomic_inc(&sample_cnt);
 49
 50	switch (s->seq) {
 51	case 0:
 52		CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
 53		      333L, s->value);
 54		return 0;
 55	case 1:
 56		CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
 57		      777L, s->value);
 58		return -EDONE;
 59	default:
 60		/* we don't care about the rest */
 61		return 0;
 62	}
 63}
 64
 65static struct test_ringbuf_map_key_lskel *skel_map_key;
 66static struct test_ringbuf_lskel *skel;
 67static struct ring_buffer *ringbuf;
 68
 69static void trigger_samples()
 70{
 71	skel->bss->dropped = 0;
 72	skel->bss->total = 0;
 73	skel->bss->discarded = 0;
 74
 75	/* trigger exactly two samples */
 76	skel->bss->value = 333;
 77	syscall(__NR_getpgid);
 78	skel->bss->value = 777;
 79	syscall(__NR_getpgid);
 80}
 81
 82static void *poll_thread(void *input)
 83{
 84	long timeout = (long)input;
 85
 86	return (void *)(long)ring_buffer__poll(ringbuf, timeout);
 87}
 88
 89static void ringbuf_write_subtest(void)
 90{
 91	struct test_ringbuf_write_lskel *skel;
 92	int page_size = getpagesize();
 93	size_t *mmap_ptr;
 94	int err, rb_fd;
 95
 96	skel = test_ringbuf_write_lskel__open();
 97	if (!ASSERT_OK_PTR(skel, "skel_open"))
 98		return;
 99
100	skel->maps.ringbuf.max_entries = 0x4000;
101
102	err = test_ringbuf_write_lskel__load(skel);
103	if (!ASSERT_OK(err, "skel_load"))
104		goto cleanup;
105
106	rb_fd = skel->maps.ringbuf.map_fd;
107
108	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
109	if (!ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos"))
110		goto cleanup;
111	*mmap_ptr = 0x3000;
112	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
113
114	skel->bss->pid = getpid();
115
116	ringbuf = ring_buffer__new(rb_fd, process_sample, NULL, NULL);
117	if (!ASSERT_OK_PTR(ringbuf, "ringbuf_new"))
118		goto cleanup;
119
120	err = test_ringbuf_write_lskel__attach(skel);
121	if (!ASSERT_OK(err, "skel_attach"))
122		goto cleanup_ringbuf;
123
124	skel->bss->discarded = 0;
125	skel->bss->passed = 0;
126
127	/* trigger exactly two samples */
128	syscall(__NR_getpgid);
129	syscall(__NR_getpgid);
130
131	ASSERT_EQ(skel->bss->discarded, 2, "discarded");
132	ASSERT_EQ(skel->bss->passed, 0, "passed");
133
134	test_ringbuf_write_lskel__detach(skel);
135cleanup_ringbuf:
136	ring_buffer__free(ringbuf);
137cleanup:
138	test_ringbuf_write_lskel__destroy(skel);
139}
140
141static void ringbuf_subtest(void)
142{
143	const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
144	pthread_t thread;
145	long bg_ret = -1;
146	int err, cnt, rb_fd;
147	int page_size = getpagesize();
148	void *mmap_ptr, *tmp_ptr;
149	struct ring *ring;
150	int map_fd;
151	unsigned long avail_data, ring_size, cons_pos, prod_pos;
152
153	skel = test_ringbuf_lskel__open();
154	if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
155		return;
156
157	skel->maps.ringbuf.max_entries = page_size;
158
159	err = test_ringbuf_lskel__load(skel);
160	if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
161		goto cleanup;
162
163	rb_fd = skel->maps.ringbuf.map_fd;
164	/* good read/write cons_pos */
165	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
166	ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
167	tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
168	if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
169		goto cleanup;
170	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
171	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
172
173	/* bad writeable prod_pos */
174	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
175	err = -errno;
176	ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
177	ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
178
179	/* bad writeable data pages */
180	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
181	err = -errno;
182	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
183	ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
184	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
185	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
186	mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
187	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
188
189	/* good read-only pages */
190	mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
191	if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
192		goto cleanup;
193
194	ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
195	ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
196	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
197	ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
198
199	/* good read-only pages with initial offset */
200	mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
201	if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
202		goto cleanup;
203
204	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
205	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
206	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
207	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
208
209	/* only trigger BPF program for current process */
210	skel->bss->pid = getpid();
211
212	ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
213				   process_sample, NULL, NULL);
214	if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
215		goto cleanup;
216
217	err = test_ringbuf_lskel__attach(skel);
218	if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
219		goto cleanup;
220
221	trigger_samples();
222
223	ring = ring_buffer__ring(ringbuf, 0);
224	if (!ASSERT_OK_PTR(ring, "ring_buffer__ring_idx_0"))
225		goto cleanup;
226
227	map_fd = ring__map_fd(ring);
228	ASSERT_EQ(map_fd, skel->maps.ringbuf.map_fd, "ring_map_fd");
229
230	/* 2 submitted + 1 discarded records */
231	CHECK(skel->bss->avail_data != 3 * rec_sz,
232	      "err_avail_size", "exp %ld, got %ld\n",
233	      3L * rec_sz, skel->bss->avail_data);
234	CHECK(skel->bss->ring_size != page_size,
235	      "err_ring_size", "exp %ld, got %ld\n",
236	      (long)page_size, skel->bss->ring_size);
237	CHECK(skel->bss->cons_pos != 0,
238	      "err_cons_pos", "exp %ld, got %ld\n",
239	      0L, skel->bss->cons_pos);
240	CHECK(skel->bss->prod_pos != 3 * rec_sz,
241	      "err_prod_pos", "exp %ld, got %ld\n",
242	      3L * rec_sz, skel->bss->prod_pos);
243
244	/* verify getting this data directly via the ring object yields the same
245	 * results
246	 */
247	avail_data = ring__avail_data_size(ring);
248	ASSERT_EQ(avail_data, 3 * rec_sz, "ring_avail_size");
249	ring_size = ring__size(ring);
250	ASSERT_EQ(ring_size, page_size, "ring_ring_size");
251	cons_pos = ring__consumer_pos(ring);
252	ASSERT_EQ(cons_pos, 0, "ring_cons_pos");
253	prod_pos = ring__producer_pos(ring);
254	ASSERT_EQ(prod_pos, 3 * rec_sz, "ring_prod_pos");
255
256	/* poll for samples */
257	err = ring_buffer__poll(ringbuf, -1);
258
259	/* -EDONE is used as an indicator that we are done */
260	if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err))
261		goto cleanup;
262	cnt = atomic_xchg(&sample_cnt, 0);
263	CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
264
265	/* we expect extra polling to return nothing */
266	err = ring_buffer__poll(ringbuf, 0);
267	if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err))
268		goto cleanup;
269	cnt = atomic_xchg(&sample_cnt, 0);
270	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
271
272	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
273	      0L, skel->bss->dropped);
274	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
275	      2L, skel->bss->total);
276	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
277	      1L, skel->bss->discarded);
278
279	/* now validate consumer position is updated and returned */
280	trigger_samples();
281	CHECK(skel->bss->cons_pos != 3 * rec_sz,
282	      "err_cons_pos", "exp %ld, got %ld\n",
283	      3L * rec_sz, skel->bss->cons_pos);
284	err = ring_buffer__poll(ringbuf, -1);
285	CHECK(err <= 0, "poll_err", "err %d\n", err);
286	cnt = atomic_xchg(&sample_cnt, 0);
287	CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
288
289	/* start poll in background w/ long timeout */
290	err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000);
291	if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err))
292		goto cleanup;
293
294	/* turn off notifications now */
295	skel->bss->flags = BPF_RB_NO_WAKEUP;
296
297	/* give background thread a bit of a time */
298	usleep(50000);
299	trigger_samples();
300	/* sleeping arbitrarily is bad, but no better way to know that
301	 * epoll_wait() **DID NOT** unblock in background thread
302	 */
303	usleep(50000);
304	/* background poll should still be blocked */
305	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
306	if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
307		goto cleanup;
308
309	/* BPF side did everything right */
310	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
311	      0L, skel->bss->dropped);
312	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
313	      2L, skel->bss->total);
314	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
315	      1L, skel->bss->discarded);
316	cnt = atomic_xchg(&sample_cnt, 0);
317	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
318
319	/* clear flags to return to "adaptive" notification mode */
320	skel->bss->flags = 0;
321
322	/* produce new samples, no notification should be triggered, because
323	 * consumer is now behind
324	 */
325	trigger_samples();
326
327	/* background poll should still be blocked */
328	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
329	if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
330		goto cleanup;
331
332	/* still no samples, because consumer is behind */
333	cnt = atomic_xchg(&sample_cnt, 0);
334	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
335
336	skel->bss->dropped = 0;
337	skel->bss->total = 0;
338	skel->bss->discarded = 0;
339
340	skel->bss->value = 333;
341	syscall(__NR_getpgid);
342	/* now force notifications */
343	skel->bss->flags = BPF_RB_FORCE_WAKEUP;
344	skel->bss->value = 777;
345	syscall(__NR_getpgid);
346
347	/* now we should get a pending notification */
348	usleep(50000);
349	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
350	if (CHECK(err, "join_bg", "err %d\n", err))
351		goto cleanup;
352
353	if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
354		goto cleanup;
355
356	/* due to timing variations, there could still be non-notified
357	 * samples, so consume them here to collect all the samples
358	 */
359	err = ring_buffer__consume(ringbuf);
360	CHECK(err < 0, "rb_consume", "failed: %d\b", err);
361
362	/* also consume using ring__consume to make sure it works the same */
363	err = ring__consume(ring);
364	ASSERT_GE(err, 0, "ring_consume");
365
366	/* 3 rounds, 2 samples each */
367	cnt = atomic_xchg(&sample_cnt, 0);
368	CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
369
370	/* BPF side did everything right */
371	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
372	      0L, skel->bss->dropped);
373	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
374	      2L, skel->bss->total);
375	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
376	      1L, skel->bss->discarded);
377
378	test_ringbuf_lskel__detach(skel);
379cleanup:
380	ring_buffer__free(ringbuf);
381	test_ringbuf_lskel__destroy(skel);
382}
383
384/*
385 * Test ring_buffer__consume_n() by producing N_TOT_SAMPLES samples in the ring
386 * buffer, via getpid(), and consuming them in chunks of N_SAMPLES.
387 */
388#define N_TOT_SAMPLES	32
389#define N_SAMPLES	4
390
391/* Sample value to verify the callback validity */
392#define SAMPLE_VALUE	42L
393
394static int process_n_sample(void *ctx, void *data, size_t len)
395{
396	struct sample *s = data;
397
398	ASSERT_EQ(s->value, SAMPLE_VALUE, "sample_value");
399
400	return 0;
401}
402
403static void ringbuf_n_subtest(void)
404{
405	struct test_ringbuf_n_lskel *skel_n;
406	int err, i;
407
408	skel_n = test_ringbuf_n_lskel__open();
409	if (!ASSERT_OK_PTR(skel_n, "test_ringbuf_n_lskel__open"))
410		return;
411
412	skel_n->maps.ringbuf.max_entries = getpagesize();
413	skel_n->bss->pid = getpid();
414
415	err = test_ringbuf_n_lskel__load(skel_n);
416	if (!ASSERT_OK(err, "test_ringbuf_n_lskel__load"))
417		goto cleanup;
418
419	ringbuf = ring_buffer__new(skel_n->maps.ringbuf.map_fd,
420				   process_n_sample, NULL, NULL);
421	if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
422		goto cleanup;
423
424	err = test_ringbuf_n_lskel__attach(skel_n);
425	if (!ASSERT_OK(err, "test_ringbuf_n_lskel__attach"))
426		goto cleanup_ringbuf;
427
428	/* Produce N_TOT_SAMPLES samples in the ring buffer by calling getpid() */
429	skel_n->bss->value = SAMPLE_VALUE;
430	for (i = 0; i < N_TOT_SAMPLES; i++)
431		syscall(__NR_getpgid);
432
433	/* Consume all samples from the ring buffer in batches of N_SAMPLES */
434	for (i = 0; i < N_TOT_SAMPLES; i += err) {
435		err = ring_buffer__consume_n(ringbuf, N_SAMPLES);
436		if (!ASSERT_EQ(err, N_SAMPLES, "rb_consume"))
437			goto cleanup_ringbuf;
438	}
439
440cleanup_ringbuf:
441	ring_buffer__free(ringbuf);
442cleanup:
443	test_ringbuf_n_lskel__destroy(skel_n);
444}
445
446static int process_map_key_sample(void *ctx, void *data, size_t len)
447{
448	struct sample *s;
449	int err, val;
450
451	s = data;
452	switch (s->seq) {
453	case 1:
454		ASSERT_EQ(s->value, 42, "sample_value");
455		err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd,
456					  s, &val);
457		ASSERT_OK(err, "hash_map bpf_map_lookup_elem");
458		ASSERT_EQ(val, 1, "hash_map val");
459		return -EDONE;
460	default:
461		return 0;
462	}
463}
464
465static void ringbuf_map_key_subtest(void)
466{
467	int err;
468
469	skel_map_key = test_ringbuf_map_key_lskel__open();
470	if (!ASSERT_OK_PTR(skel_map_key, "test_ringbuf_map_key_lskel__open"))
471		return;
472
473	skel_map_key->maps.ringbuf.max_entries = getpagesize();
474	skel_map_key->bss->pid = getpid();
475
476	err = test_ringbuf_map_key_lskel__load(skel_map_key);
477	if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__load"))
478		goto cleanup;
479
480	ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd,
481				   process_map_key_sample, NULL, NULL);
482	if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
483		goto cleanup;
484
485	err = test_ringbuf_map_key_lskel__attach(skel_map_key);
486	if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__attach"))
487		goto cleanup_ringbuf;
488
489	syscall(__NR_getpgid);
490	ASSERT_EQ(skel_map_key->bss->seq, 1, "skel_map_key->bss->seq");
491	err = ring_buffer__poll(ringbuf, -1);
492	ASSERT_EQ(err, -EDONE, "ring_buffer__poll");
493
494cleanup_ringbuf:
495	ring_buffer__free(ringbuf);
496cleanup:
497	test_ringbuf_map_key_lskel__destroy(skel_map_key);
498}
499
500void test_ringbuf(void)
501{
502	if (test__start_subtest("ringbuf"))
503		ringbuf_subtest();
504	if (test__start_subtest("ringbuf_n"))
505		ringbuf_n_subtest();
506	if (test__start_subtest("ringbuf_map_key"))
507		ringbuf_map_key_subtest();
508	if (test__start_subtest("ringbuf_write"))
509		ringbuf_write_subtest();
510}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#define _GNU_SOURCE
  3#include <linux/compiler.h>
  4#include <asm/barrier.h>
  5#include <test_progs.h>
  6#include <sys/mman.h>
  7#include <sys/epoll.h>
  8#include <time.h>
  9#include <sched.h>
 10#include <signal.h>
 11#include <pthread.h>
 12#include <sys/sysinfo.h>
 13#include <linux/perf_event.h>
 14#include <linux/ring_buffer.h>
 
 15#include "test_ringbuf.lskel.h"
 
 16#include "test_ringbuf_map_key.lskel.h"
 
 17
 18#define EDONE 7777
 19
 20static int duration = 0;
 21
 22struct sample {
 23	int pid;
 24	int seq;
 25	long value;
 26	char comm[16];
 27};
 28
 29static int sample_cnt;
 30
 31static void atomic_inc(int *cnt)
 32{
 33	__atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST);
 34}
 35
 36static int atomic_xchg(int *cnt, int val)
 37{
 38	return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST);
 39}
 40
 41static int process_sample(void *ctx, void *data, size_t len)
 42{
 43	struct sample *s = data;
 44
 45	atomic_inc(&sample_cnt);
 46
 47	switch (s->seq) {
 48	case 0:
 49		CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
 50		      333L, s->value);
 51		return 0;
 52	case 1:
 53		CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
 54		      777L, s->value);
 55		return -EDONE;
 56	default:
 57		/* we don't care about the rest */
 58		return 0;
 59	}
 60}
 61
 62static struct test_ringbuf_map_key_lskel *skel_map_key;
 63static struct test_ringbuf_lskel *skel;
 64static struct ring_buffer *ringbuf;
 65
 66static void trigger_samples()
 67{
 68	skel->bss->dropped = 0;
 69	skel->bss->total = 0;
 70	skel->bss->discarded = 0;
 71
 72	/* trigger exactly two samples */
 73	skel->bss->value = 333;
 74	syscall(__NR_getpgid);
 75	skel->bss->value = 777;
 76	syscall(__NR_getpgid);
 77}
 78
 79static void *poll_thread(void *input)
 80{
 81	long timeout = (long)input;
 82
 83	return (void *)(long)ring_buffer__poll(ringbuf, timeout);
 84}
 85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86static void ringbuf_subtest(void)
 87{
 88	const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
 89	pthread_t thread;
 90	long bg_ret = -1;
 91	int err, cnt, rb_fd;
 92	int page_size = getpagesize();
 93	void *mmap_ptr, *tmp_ptr;
 
 
 
 94
 95	skel = test_ringbuf_lskel__open();
 96	if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
 97		return;
 98
 99	skel->maps.ringbuf.max_entries = page_size;
100
101	err = test_ringbuf_lskel__load(skel);
102	if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
103		goto cleanup;
104
105	rb_fd = skel->maps.ringbuf.map_fd;
106	/* good read/write cons_pos */
107	mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
108	ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
109	tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
110	if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
111		goto cleanup;
112	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
113	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
114
115	/* bad writeable prod_pos */
116	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
117	err = -errno;
118	ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
119	ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
120
121	/* bad writeable data pages */
122	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
123	err = -errno;
124	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
125	ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
126	mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
127	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
128	mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
129	ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
130
131	/* good read-only pages */
132	mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
133	if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
134		goto cleanup;
135
136	ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
137	ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
138	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
139	ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
140
141	/* good read-only pages with initial offset */
142	mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
143	if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
144		goto cleanup;
145
146	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
147	ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
148	ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
149	ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
150
151	/* only trigger BPF program for current process */
152	skel->bss->pid = getpid();
153
154	ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
155				   process_sample, NULL, NULL);
156	if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
157		goto cleanup;
158
159	err = test_ringbuf_lskel__attach(skel);
160	if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
161		goto cleanup;
162
163	trigger_samples();
164
 
 
 
 
 
 
 
165	/* 2 submitted + 1 discarded records */
166	CHECK(skel->bss->avail_data != 3 * rec_sz,
167	      "err_avail_size", "exp %ld, got %ld\n",
168	      3L * rec_sz, skel->bss->avail_data);
169	CHECK(skel->bss->ring_size != page_size,
170	      "err_ring_size", "exp %ld, got %ld\n",
171	      (long)page_size, skel->bss->ring_size);
172	CHECK(skel->bss->cons_pos != 0,
173	      "err_cons_pos", "exp %ld, got %ld\n",
174	      0L, skel->bss->cons_pos);
175	CHECK(skel->bss->prod_pos != 3 * rec_sz,
176	      "err_prod_pos", "exp %ld, got %ld\n",
177	      3L * rec_sz, skel->bss->prod_pos);
178
 
 
 
 
 
 
 
 
 
 
 
 
179	/* poll for samples */
180	err = ring_buffer__poll(ringbuf, -1);
181
182	/* -EDONE is used as an indicator that we are done */
183	if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err))
184		goto cleanup;
185	cnt = atomic_xchg(&sample_cnt, 0);
186	CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
187
188	/* we expect extra polling to return nothing */
189	err = ring_buffer__poll(ringbuf, 0);
190	if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err))
191		goto cleanup;
192	cnt = atomic_xchg(&sample_cnt, 0);
193	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
194
195	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
196	      0L, skel->bss->dropped);
197	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
198	      2L, skel->bss->total);
199	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
200	      1L, skel->bss->discarded);
201
202	/* now validate consumer position is updated and returned */
203	trigger_samples();
204	CHECK(skel->bss->cons_pos != 3 * rec_sz,
205	      "err_cons_pos", "exp %ld, got %ld\n",
206	      3L * rec_sz, skel->bss->cons_pos);
207	err = ring_buffer__poll(ringbuf, -1);
208	CHECK(err <= 0, "poll_err", "err %d\n", err);
209	cnt = atomic_xchg(&sample_cnt, 0);
210	CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
211
212	/* start poll in background w/ long timeout */
213	err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000);
214	if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err))
215		goto cleanup;
216
217	/* turn off notifications now */
218	skel->bss->flags = BPF_RB_NO_WAKEUP;
219
220	/* give background thread a bit of a time */
221	usleep(50000);
222	trigger_samples();
223	/* sleeping arbitrarily is bad, but no better way to know that
224	 * epoll_wait() **DID NOT** unblock in background thread
225	 */
226	usleep(50000);
227	/* background poll should still be blocked */
228	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
229	if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
230		goto cleanup;
231
232	/* BPF side did everything right */
233	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
234	      0L, skel->bss->dropped);
235	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
236	      2L, skel->bss->total);
237	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
238	      1L, skel->bss->discarded);
239	cnt = atomic_xchg(&sample_cnt, 0);
240	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
241
242	/* clear flags to return to "adaptive" notification mode */
243	skel->bss->flags = 0;
244
245	/* produce new samples, no notification should be triggered, because
246	 * consumer is now behind
247	 */
248	trigger_samples();
249
250	/* background poll should still be blocked */
251	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
252	if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
253		goto cleanup;
254
255	/* still no samples, because consumer is behind */
256	cnt = atomic_xchg(&sample_cnt, 0);
257	CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
258
259	skel->bss->dropped = 0;
260	skel->bss->total = 0;
261	skel->bss->discarded = 0;
262
263	skel->bss->value = 333;
264	syscall(__NR_getpgid);
265	/* now force notifications */
266	skel->bss->flags = BPF_RB_FORCE_WAKEUP;
267	skel->bss->value = 777;
268	syscall(__NR_getpgid);
269
270	/* now we should get a pending notification */
271	usleep(50000);
272	err = pthread_tryjoin_np(thread, (void **)&bg_ret);
273	if (CHECK(err, "join_bg", "err %d\n", err))
274		goto cleanup;
275
276	if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
277		goto cleanup;
278
279	/* due to timing variations, there could still be non-notified
280	 * samples, so consume them here to collect all the samples
281	 */
282	err = ring_buffer__consume(ringbuf);
283	CHECK(err < 0, "rb_consume", "failed: %d\b", err);
284
 
 
 
 
285	/* 3 rounds, 2 samples each */
286	cnt = atomic_xchg(&sample_cnt, 0);
287	CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
288
289	/* BPF side did everything right */
290	CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
291	      0L, skel->bss->dropped);
292	CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
293	      2L, skel->bss->total);
294	CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
295	      1L, skel->bss->discarded);
296
297	test_ringbuf_lskel__detach(skel);
298cleanup:
299	ring_buffer__free(ringbuf);
300	test_ringbuf_lskel__destroy(skel);
301}
302
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303static int process_map_key_sample(void *ctx, void *data, size_t len)
304{
305	struct sample *s;
306	int err, val;
307
308	s = data;
309	switch (s->seq) {
310	case 1:
311		ASSERT_EQ(s->value, 42, "sample_value");
312		err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd,
313					  s, &val);
314		ASSERT_OK(err, "hash_map bpf_map_lookup_elem");
315		ASSERT_EQ(val, 1, "hash_map val");
316		return -EDONE;
317	default:
318		return 0;
319	}
320}
321
322static void ringbuf_map_key_subtest(void)
323{
324	int err;
325
326	skel_map_key = test_ringbuf_map_key_lskel__open();
327	if (!ASSERT_OK_PTR(skel_map_key, "test_ringbuf_map_key_lskel__open"))
328		return;
329
330	skel_map_key->maps.ringbuf.max_entries = getpagesize();
331	skel_map_key->bss->pid = getpid();
332
333	err = test_ringbuf_map_key_lskel__load(skel_map_key);
334	if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__load"))
335		goto cleanup;
336
337	ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd,
338				   process_map_key_sample, NULL, NULL);
339	if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
340		goto cleanup;
341
342	err = test_ringbuf_map_key_lskel__attach(skel_map_key);
343	if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__attach"))
344		goto cleanup_ringbuf;
345
346	syscall(__NR_getpgid);
347	ASSERT_EQ(skel_map_key->bss->seq, 1, "skel_map_key->bss->seq");
348	err = ring_buffer__poll(ringbuf, -1);
349	ASSERT_EQ(err, -EDONE, "ring_buffer__poll");
350
351cleanup_ringbuf:
352	ring_buffer__free(ringbuf);
353cleanup:
354	test_ringbuf_map_key_lskel__destroy(skel_map_key);
355}
356
357void test_ringbuf(void)
358{
359	if (test__start_subtest("ringbuf"))
360		ringbuf_subtest();
 
 
361	if (test__start_subtest("ringbuf_map_key"))
362		ringbuf_map_key_subtest();
 
 
363}