Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
  1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
  2/*
  3 * Ring buffer operations.
  4 *
  5 * Copyright (C) 2020 Facebook, Inc.
  6 */
  7#ifndef _GNU_SOURCE
  8#define _GNU_SOURCE
  9#endif
 10#include <stdlib.h>
 11#include <stdio.h>
 12#include <errno.h>
 13#include <unistd.h>
 14#include <linux/err.h>
 15#include <linux/bpf.h>
 16#include <asm/barrier.h>
 17#include <sys/mman.h>
 18#include <sys/epoll.h>
 19#include <time.h>
 20
 21#include "libbpf.h"
 22#include "libbpf_internal.h"
 23#include "bpf.h"
 24
 25struct ring {
 26	ring_buffer_sample_fn sample_cb;
 27	void *ctx;
 28	void *data;
 29	unsigned long *consumer_pos;
 30	unsigned long *producer_pos;
 31	unsigned long mask;
 32	int map_fd;
 33};
 34
 35struct ring_buffer {
 36	struct epoll_event *events;
 37	struct ring *rings;
 38	size_t page_size;
 39	int epoll_fd;
 40	int ring_cnt;
 41};
 42
 43struct user_ring_buffer {
 44	struct epoll_event event;
 45	unsigned long *consumer_pos;
 46	unsigned long *producer_pos;
 47	void *data;
 48	unsigned long mask;
 49	size_t page_size;
 50	int map_fd;
 51	int epoll_fd;
 52};
 53
 54/* 8-byte ring buffer header structure */
 55struct ringbuf_hdr {
 56	__u32 len;
 57	__u32 pad;
 58};
 59
 60static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r)
 61{
 62	if (r->consumer_pos) {
 63		munmap(r->consumer_pos, rb->page_size);
 64		r->consumer_pos = NULL;
 65	}
 66	if (r->producer_pos) {
 67		munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1));
 68		r->producer_pos = NULL;
 69	}
 70}
 71
 72/* Add extra RINGBUF maps to this ring buffer manager */
 73int ring_buffer__add(struct ring_buffer *rb, int map_fd,
 74		     ring_buffer_sample_fn sample_cb, void *ctx)
 75{
 76	struct bpf_map_info info;
 77	__u32 len = sizeof(info);
 78	struct epoll_event *e;
 79	struct ring *r;
 80	__u64 mmap_sz;
 81	void *tmp;
 82	int err;
 83
 84	memset(&info, 0, sizeof(info));
 85
 86	err = bpf_obj_get_info_by_fd(map_fd, &info, &len);
 87	if (err) {
 88		err = -errno;
 89		pr_warn("ringbuf: failed to get map info for fd=%d: %d\n",
 90			map_fd, err);
 91		return libbpf_err(err);
 92	}
 93
 94	if (info.type != BPF_MAP_TYPE_RINGBUF) {
 95		pr_warn("ringbuf: map fd=%d is not BPF_MAP_TYPE_RINGBUF\n",
 96			map_fd);
 97		return libbpf_err(-EINVAL);
 98	}
 99
100	tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings));
101	if (!tmp)
102		return libbpf_err(-ENOMEM);
103	rb->rings = tmp;
104
105	tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events));
106	if (!tmp)
107		return libbpf_err(-ENOMEM);
108	rb->events = tmp;
109
110	r = &rb->rings[rb->ring_cnt];
111	memset(r, 0, sizeof(*r));
112
113	r->map_fd = map_fd;
114	r->sample_cb = sample_cb;
115	r->ctx = ctx;
116	r->mask = info.max_entries - 1;
117
118	/* Map writable consumer page */
119	tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
120	if (tmp == MAP_FAILED) {
121		err = -errno;
122		pr_warn("ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
123			map_fd, err);
124		return libbpf_err(err);
125	}
126	r->consumer_pos = tmp;
127
128	/* Map read-only producer page and data pages. We map twice as big
129	 * data size to allow simple reading of samples that wrap around the
130	 * end of a ring buffer. See kernel implementation for details.
131	 */
132	mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
133	if (mmap_sz != (__u64)(size_t)mmap_sz) {
134		pr_warn("ringbuf: ring buffer size (%u) is too big\n", info.max_entries);
135		return libbpf_err(-E2BIG);
136	}
137	tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size);
138	if (tmp == MAP_FAILED) {
139		err = -errno;
140		ringbuf_unmap_ring(rb, r);
141		pr_warn("ringbuf: failed to mmap data pages for map fd=%d: %d\n",
142			map_fd, err);
143		return libbpf_err(err);
144	}
145	r->producer_pos = tmp;
146	r->data = tmp + rb->page_size;
147
148	e = &rb->events[rb->ring_cnt];
149	memset(e, 0, sizeof(*e));
150
151	e->events = EPOLLIN;
152	e->data.fd = rb->ring_cnt;
153	if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, e) < 0) {
154		err = -errno;
155		ringbuf_unmap_ring(rb, r);
156		pr_warn("ringbuf: failed to epoll add map fd=%d: %d\n",
157			map_fd, err);
158		return libbpf_err(err);
159	}
160
161	rb->ring_cnt++;
162	return 0;
163}
164
165void ring_buffer__free(struct ring_buffer *rb)
166{
167	int i;
168
169	if (!rb)
170		return;
171
172	for (i = 0; i < rb->ring_cnt; ++i)
173		ringbuf_unmap_ring(rb, &rb->rings[i]);
174	if (rb->epoll_fd >= 0)
175		close(rb->epoll_fd);
176
177	free(rb->events);
178	free(rb->rings);
179	free(rb);
180}
181
182struct ring_buffer *
183ring_buffer__new(int map_fd, ring_buffer_sample_fn sample_cb, void *ctx,
184		 const struct ring_buffer_opts *opts)
185{
186	struct ring_buffer *rb;
187	int err;
188
189	if (!OPTS_VALID(opts, ring_buffer_opts))
190		return errno = EINVAL, NULL;
191
192	rb = calloc(1, sizeof(*rb));
193	if (!rb)
194		return errno = ENOMEM, NULL;
195
196	rb->page_size = getpagesize();
197
198	rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
199	if (rb->epoll_fd < 0) {
200		err = -errno;
201		pr_warn("ringbuf: failed to create epoll instance: %d\n", err);
202		goto err_out;
203	}
204
205	err = ring_buffer__add(rb, map_fd, sample_cb, ctx);
206	if (err)
207		goto err_out;
208
209	return rb;
210
211err_out:
212	ring_buffer__free(rb);
213	return errno = -err, NULL;
214}
215
216static inline int roundup_len(__u32 len)
217{
218	/* clear out top 2 bits (discard and busy, if set) */
219	len <<= 2;
220	len >>= 2;
221	/* add length prefix */
222	len += BPF_RINGBUF_HDR_SZ;
223	/* round up to 8 byte alignment */
224	return (len + 7) / 8 * 8;
225}
226
227static int64_t ringbuf_process_ring(struct ring *r)
228{
229	int *len_ptr, len, err;
230	/* 64-bit to avoid overflow in case of extreme application behavior */
231	int64_t cnt = 0;
232	unsigned long cons_pos, prod_pos;
233	bool got_new_data;
234	void *sample;
235
236	cons_pos = smp_load_acquire(r->consumer_pos);
237	do {
238		got_new_data = false;
239		prod_pos = smp_load_acquire(r->producer_pos);
240		while (cons_pos < prod_pos) {
241			len_ptr = r->data + (cons_pos & r->mask);
242			len = smp_load_acquire(len_ptr);
243
244			/* sample not committed yet, bail out for now */
245			if (len & BPF_RINGBUF_BUSY_BIT)
246				goto done;
247
248			got_new_data = true;
249			cons_pos += roundup_len(len);
250
251			if ((len & BPF_RINGBUF_DISCARD_BIT) == 0) {
252				sample = (void *)len_ptr + BPF_RINGBUF_HDR_SZ;
253				err = r->sample_cb(r->ctx, sample, len);
254				if (err < 0) {
255					/* update consumer pos and bail out */
256					smp_store_release(r->consumer_pos,
257							  cons_pos);
258					return err;
259				}
260				cnt++;
261			}
262
263			smp_store_release(r->consumer_pos, cons_pos);
264		}
265	} while (got_new_data);
266done:
267	return cnt;
268}
269
270/* Consume available ring buffer(s) data without event polling.
271 * Returns number of records consumed across all registered ring buffers (or
272 * INT_MAX, whichever is less), or negative number if any of the callbacks
273 * return error.
274 */
275int ring_buffer__consume(struct ring_buffer *rb)
276{
277	int64_t err, res = 0;
278	int i;
279
280	for (i = 0; i < rb->ring_cnt; i++) {
281		struct ring *ring = &rb->rings[i];
282
283		err = ringbuf_process_ring(ring);
284		if (err < 0)
285			return libbpf_err(err);
286		res += err;
287	}
288	if (res > INT_MAX)
289		return INT_MAX;
290	return res;
291}
292
293/* Poll for available data and consume records, if any are available.
294 * Returns number of records consumed (or INT_MAX, whichever is less), or
295 * negative number, if any of the registered callbacks returned error.
296 */
297int ring_buffer__poll(struct ring_buffer *rb, int timeout_ms)
298{
299	int i, cnt;
300	int64_t err, res = 0;
301
302	cnt = epoll_wait(rb->epoll_fd, rb->events, rb->ring_cnt, timeout_ms);
303	if (cnt < 0)
304		return libbpf_err(-errno);
305
306	for (i = 0; i < cnt; i++) {
307		__u32 ring_id = rb->events[i].data.fd;
308		struct ring *ring = &rb->rings[ring_id];
309
310		err = ringbuf_process_ring(ring);
311		if (err < 0)
312			return libbpf_err(err);
313		res += err;
314	}
315	if (res > INT_MAX)
316		return INT_MAX;
317	return res;
318}
319
320/* Get an fd that can be used to sleep until data is available in the ring(s) */
321int ring_buffer__epoll_fd(const struct ring_buffer *rb)
322{
323	return rb->epoll_fd;
324}
325
326static void user_ringbuf_unmap_ring(struct user_ring_buffer *rb)
327{
328	if (rb->consumer_pos) {
329		munmap(rb->consumer_pos, rb->page_size);
330		rb->consumer_pos = NULL;
331	}
332	if (rb->producer_pos) {
333		munmap(rb->producer_pos, rb->page_size + 2 * (rb->mask + 1));
334		rb->producer_pos = NULL;
335	}
336}
337
338void user_ring_buffer__free(struct user_ring_buffer *rb)
339{
340	if (!rb)
341		return;
342
343	user_ringbuf_unmap_ring(rb);
344
345	if (rb->epoll_fd >= 0)
346		close(rb->epoll_fd);
347
348	free(rb);
349}
350
351static int user_ringbuf_map(struct user_ring_buffer *rb, int map_fd)
352{
353	struct bpf_map_info info;
354	__u32 len = sizeof(info);
355	__u64 mmap_sz;
356	void *tmp;
357	struct epoll_event *rb_epoll;
358	int err;
359
360	memset(&info, 0, sizeof(info));
361
362	err = bpf_obj_get_info_by_fd(map_fd, &info, &len);
363	if (err) {
364		err = -errno;
365		pr_warn("user ringbuf: failed to get map info for fd=%d: %d\n", map_fd, err);
366		return err;
367	}
368
369	if (info.type != BPF_MAP_TYPE_USER_RINGBUF) {
370		pr_warn("user ringbuf: map fd=%d is not BPF_MAP_TYPE_USER_RINGBUF\n", map_fd);
371		return -EINVAL;
372	}
373
374	rb->map_fd = map_fd;
375	rb->mask = info.max_entries - 1;
376
377	/* Map read-only consumer page */
378	tmp = mmap(NULL, rb->page_size, PROT_READ, MAP_SHARED, map_fd, 0);
379	if (tmp == MAP_FAILED) {
380		err = -errno;
381		pr_warn("user ringbuf: failed to mmap consumer page for map fd=%d: %d\n",
382			map_fd, err);
383		return err;
384	}
385	rb->consumer_pos = tmp;
386
387	/* Map read-write the producer page and data pages. We map the data
388	 * region as twice the total size of the ring buffer to allow the
389	 * simple reading and writing of samples that wrap around the end of
390	 * the buffer.  See the kernel implementation for details.
391	 */
392	mmap_sz = rb->page_size + 2 * (__u64)info.max_entries;
393	if (mmap_sz != (__u64)(size_t)mmap_sz) {
394		pr_warn("user ringbuf: ring buf size (%u) is too big\n", info.max_entries);
395		return -E2BIG;
396	}
397	tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ | PROT_WRITE, MAP_SHARED,
398		   map_fd, rb->page_size);
399	if (tmp == MAP_FAILED) {
400		err = -errno;
401		pr_warn("user ringbuf: failed to mmap data pages for map fd=%d: %d\n",
402			map_fd, err);
403		return err;
404	}
405
406	rb->producer_pos = tmp;
407	rb->data = tmp + rb->page_size;
408
409	rb_epoll = &rb->event;
410	rb_epoll->events = EPOLLOUT;
411	if (epoll_ctl(rb->epoll_fd, EPOLL_CTL_ADD, map_fd, rb_epoll) < 0) {
412		err = -errno;
413		pr_warn("user ringbuf: failed to epoll add map fd=%d: %d\n", map_fd, err);
414		return err;
415	}
416
417	return 0;
418}
419
420struct user_ring_buffer *
421user_ring_buffer__new(int map_fd, const struct user_ring_buffer_opts *opts)
422{
423	struct user_ring_buffer *rb;
424	int err;
425
426	if (!OPTS_VALID(opts, user_ring_buffer_opts))
427		return errno = EINVAL, NULL;
428
429	rb = calloc(1, sizeof(*rb));
430	if (!rb)
431		return errno = ENOMEM, NULL;
432
433	rb->page_size = getpagesize();
434
435	rb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
436	if (rb->epoll_fd < 0) {
437		err = -errno;
438		pr_warn("user ringbuf: failed to create epoll instance: %d\n", err);
439		goto err_out;
440	}
441
442	err = user_ringbuf_map(rb, map_fd);
443	if (err)
444		goto err_out;
445
446	return rb;
447
448err_out:
449	user_ring_buffer__free(rb);
450	return errno = -err, NULL;
451}
452
453static void user_ringbuf_commit(struct user_ring_buffer *rb, void *sample, bool discard)
454{
455	__u32 new_len;
456	struct ringbuf_hdr *hdr;
457	uintptr_t hdr_offset;
458
459	hdr_offset = rb->mask + 1 + (sample - rb->data) - BPF_RINGBUF_HDR_SZ;
460	hdr = rb->data + (hdr_offset & rb->mask);
461
462	new_len = hdr->len & ~BPF_RINGBUF_BUSY_BIT;
463	if (discard)
464		new_len |= BPF_RINGBUF_DISCARD_BIT;
465
466	/* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in
467	 * the kernel.
468	 */
469	__atomic_exchange_n(&hdr->len, new_len, __ATOMIC_ACQ_REL);
470}
471
472void user_ring_buffer__discard(struct user_ring_buffer *rb, void *sample)
473{
474	user_ringbuf_commit(rb, sample, true);
475}
476
477void user_ring_buffer__submit(struct user_ring_buffer *rb, void *sample)
478{
479	user_ringbuf_commit(rb, sample, false);
480}
481
482void *user_ring_buffer__reserve(struct user_ring_buffer *rb, __u32 size)
483{
484	__u32 avail_size, total_size, max_size;
485	/* 64-bit to avoid overflow in case of extreme application behavior */
486	__u64 cons_pos, prod_pos;
487	struct ringbuf_hdr *hdr;
488
489	/* The top two bits are used as special flags */
490	if (size & (BPF_RINGBUF_BUSY_BIT | BPF_RINGBUF_DISCARD_BIT))
491		return errno = E2BIG, NULL;
492
493	/* Synchronizes with smp_store_release() in __bpf_user_ringbuf_peek() in
494	 * the kernel.
495	 */
496	cons_pos = smp_load_acquire(rb->consumer_pos);
497	/* Synchronizes with smp_store_release() in user_ringbuf_commit() */
498	prod_pos = smp_load_acquire(rb->producer_pos);
499
500	max_size = rb->mask + 1;
501	avail_size = max_size - (prod_pos - cons_pos);
502	/* Round up total size to a multiple of 8. */
503	total_size = (size + BPF_RINGBUF_HDR_SZ + 7) / 8 * 8;
504
505	if (total_size > max_size)
506		return errno = E2BIG, NULL;
507
508	if (avail_size < total_size)
509		return errno = ENOSPC, NULL;
510
511	hdr = rb->data + (prod_pos & rb->mask);
512	hdr->len = size | BPF_RINGBUF_BUSY_BIT;
513	hdr->pad = 0;
514
515	/* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in
516	 * the kernel.
517	 */
518	smp_store_release(rb->producer_pos, prod_pos + total_size);
519
520	return (void *)rb->data + ((prod_pos + BPF_RINGBUF_HDR_SZ) & rb->mask);
521}
522
523static __u64 ns_elapsed_timespec(const struct timespec *start, const struct timespec *end)
524{
525	__u64 start_ns, end_ns, ns_per_s = 1000000000;
526
527	start_ns = (__u64)start->tv_sec * ns_per_s + start->tv_nsec;
528	end_ns = (__u64)end->tv_sec * ns_per_s + end->tv_nsec;
529
530	return end_ns - start_ns;
531}
532
533void *user_ring_buffer__reserve_blocking(struct user_ring_buffer *rb, __u32 size, int timeout_ms)
534{
535	void *sample;
536	int err, ms_remaining = timeout_ms;
537	struct timespec start;
538
539	if (timeout_ms < 0 && timeout_ms != -1)
540		return errno = EINVAL, NULL;
541
542	if (timeout_ms != -1) {
543		err = clock_gettime(CLOCK_MONOTONIC, &start);
544		if (err)
545			return NULL;
546	}
547
548	do {
549		int cnt, ms_elapsed;
550		struct timespec curr;
551		__u64 ns_per_ms = 1000000;
552
553		sample = user_ring_buffer__reserve(rb, size);
554		if (sample)
555			return sample;
556		else if (errno != ENOSPC)
557			return NULL;
558
559		/* The kernel guarantees at least one event notification
560		 * delivery whenever at least one sample is drained from the
561		 * ring buffer in an invocation to bpf_ringbuf_drain(). Other
562		 * additional events may be delivered at any time, but only one
563		 * event is guaranteed per bpf_ringbuf_drain() invocation,
564		 * provided that a sample is drained, and the BPF program did
565		 * not pass BPF_RB_NO_WAKEUP to bpf_ringbuf_drain(). If
566		 * BPF_RB_FORCE_WAKEUP is passed to bpf_ringbuf_drain(), a
567		 * wakeup event will be delivered even if no samples are
568		 * drained.
569		 */
570		cnt = epoll_wait(rb->epoll_fd, &rb->event, 1, ms_remaining);
571		if (cnt < 0)
572			return NULL;
573
574		if (timeout_ms == -1)
575			continue;
576
577		err = clock_gettime(CLOCK_MONOTONIC, &curr);
578		if (err)
579			return NULL;
580
581		ms_elapsed = ns_elapsed_timespec(&start, &curr) / ns_per_ms;
582		ms_remaining = timeout_ms - ms_elapsed;
583	} while (ms_remaining > 0);
584
585	/* Try one more time to reserve a sample after the specified timeout has elapsed. */
586	return user_ring_buffer__reserve(rb, size);
587}