Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/bpf.h>
  4#include <bpf/bpf_helpers.h>
  5
  6/* Permit pretty deep stack traces */
  7#define MAX_STACK_RAWTP 100
  8struct stack_trace_t {
  9	int pid;
 10	int kern_stack_size;
 11	int user_stack_size;
 12	int user_stack_buildid_size;
 13	__u64 kern_stack[MAX_STACK_RAWTP];
 14	__u64 user_stack[MAX_STACK_RAWTP];
 15	struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
 16};
 17
 18struct {
 19	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
 20	__uint(max_entries, 2);
 21	__uint(key_size, sizeof(int));
 22	__uint(value_size, sizeof(__u32));
 23} perfmap SEC(".maps");
 24
 25struct {
 26	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
 27	__uint(max_entries, 1);
 28	__type(key, __u32);
 29	__type(value, struct stack_trace_t);
 30} stackdata_map SEC(".maps");
 31
 32/* Allocate per-cpu space twice the needed. For the code below
 33 *   usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
 34 *   if (usize < 0)
 35 *     return 0;
 36 *   ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
 37 *
 38 * If we have value_size = MAX_STACK_RAWTP * sizeof(__u64),
 39 * verifier will complain that access "raw_data + usize"
 40 * with size "max_len - usize" may be out of bound.
 41 * The maximum "raw_data + usize" is "raw_data + max_len"
 42 * and the maximum "max_len - usize" is "max_len", verifier
 43 * concludes that the maximum buffer access range is
 44 * "raw_data[0...max_len * 2 - 1]" and hence reject the program.
 45 *
 46 * Doubling the to-be-used max buffer size can fix this verifier
 47 * issue and avoid complicated C programming massaging.
 48 * This is an acceptable workaround since there is one entry here.
 49 */
 50struct {
 51	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
 52	__uint(max_entries, 1);
 53	__type(key, __u32);
 54	__type(value, __u64[2 * MAX_STACK_RAWTP]);
 55} rawdata_map SEC(".maps");
 56
 57SEC("raw_tracepoint/sys_enter")
 58int bpf_prog1(void *ctx)
 59{
 60	int max_len, max_buildid_len, total_size;
 61	struct stack_trace_t *data;
 62	long usize, ksize;
 63	void *raw_data;
 64	__u32 key = 0;
 65
 66	data = bpf_map_lookup_elem(&stackdata_map, &key);
 67	if (!data)
 68		return 0;
 69
 70	max_len = MAX_STACK_RAWTP * sizeof(__u64);
 71	max_buildid_len = MAX_STACK_RAWTP * sizeof(struct bpf_stack_build_id);
 72	data->pid = bpf_get_current_pid_tgid();
 73	data->kern_stack_size = bpf_get_stack(ctx, data->kern_stack,
 74					      max_len, 0);
 75	data->user_stack_size = bpf_get_stack(ctx, data->user_stack, max_len,
 76					    BPF_F_USER_STACK);
 77	data->user_stack_buildid_size = bpf_get_stack(
 78		ctx, data->user_stack_buildid, max_buildid_len,
 79		BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
 80	bpf_perf_event_output(ctx, &perfmap, 0, data, sizeof(*data));
 81
 82	/* write both kernel and user stacks to the same buffer */
 83	raw_data = bpf_map_lookup_elem(&rawdata_map, &key);
 84	if (!raw_data)
 85		return 0;
 86
 87	usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
 88	if (usize < 0)
 89		return 0;
 90
 91	ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
 92	if (ksize < 0)
 93		return 0;
 94
 95	total_size = usize + ksize;
 96	if (total_size > 0 && total_size <= max_len)
 97		bpf_perf_event_output(ctx, &perfmap, 0, raw_data, total_size);
 98
 99	return 0;
100}
101
102char _license[] SEC("license") = "GPL";
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <linux/bpf.h>
  4#include <bpf/bpf_helpers.h>
  5
  6/* Permit pretty deep stack traces */
  7#define MAX_STACK_RAWTP 100
  8struct stack_trace_t {
  9	int pid;
 10	int kern_stack_size;
 11	int user_stack_size;
 12	int user_stack_buildid_size;
 13	__u64 kern_stack[MAX_STACK_RAWTP];
 14	__u64 user_stack[MAX_STACK_RAWTP];
 15	struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
 16};
 17
 18struct {
 19	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
 20	__uint(max_entries, 2);
 21	__uint(key_size, sizeof(int));
 22	__uint(value_size, sizeof(__u32));
 23} perfmap SEC(".maps");
 24
 25struct {
 26	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
 27	__uint(max_entries, 1);
 28	__type(key, __u32);
 29	__type(value, struct stack_trace_t);
 30} stackdata_map SEC(".maps");
 31
 32/* Allocate per-cpu space twice the needed. For the code below
 33 *   usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
 34 *   if (usize < 0)
 35 *     return 0;
 36 *   ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
 37 *
 38 * If we have value_size = MAX_STACK_RAWTP * sizeof(__u64),
 39 * verifier will complain that access "raw_data + usize"
 40 * with size "max_len - usize" may be out of bound.
 41 * The maximum "raw_data + usize" is "raw_data + max_len"
 42 * and the maximum "max_len - usize" is "max_len", verifier
 43 * concludes that the maximum buffer access range is
 44 * "raw_data[0...max_len * 2 - 1]" and hence reject the program.
 45 *
 46 * Doubling the to-be-used max buffer size can fix this verifier
 47 * issue and avoid complicated C programming massaging.
 48 * This is an acceptable workaround since there is one entry here.
 49 */
 50struct {
 51	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
 52	__uint(max_entries, 1);
 53	__type(key, __u32);
 54	__type(value, __u64[2 * MAX_STACK_RAWTP]);
 55} rawdata_map SEC(".maps");
 56
 57SEC("raw_tracepoint/sys_enter")
 58int bpf_prog1(void *ctx)
 59{
 60	int max_len, max_buildid_len, total_size;
 61	struct stack_trace_t *data;
 62	long usize, ksize;
 63	void *raw_data;
 64	__u32 key = 0;
 65
 66	data = bpf_map_lookup_elem(&stackdata_map, &key);
 67	if (!data)
 68		return 0;
 69
 70	max_len = MAX_STACK_RAWTP * sizeof(__u64);
 71	max_buildid_len = MAX_STACK_RAWTP * sizeof(struct bpf_stack_build_id);
 72	data->pid = bpf_get_current_pid_tgid();
 73	data->kern_stack_size = bpf_get_stack(ctx, data->kern_stack,
 74					      max_len, 0);
 75	data->user_stack_size = bpf_get_stack(ctx, data->user_stack, max_len,
 76					    BPF_F_USER_STACK);
 77	data->user_stack_buildid_size = bpf_get_stack(
 78		ctx, data->user_stack_buildid, max_buildid_len,
 79		BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
 80	bpf_perf_event_output(ctx, &perfmap, 0, data, sizeof(*data));
 81
 82	/* write both kernel and user stacks to the same buffer */
 83	raw_data = bpf_map_lookup_elem(&rawdata_map, &key);
 84	if (!raw_data)
 85		return 0;
 86
 87	usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
 88	if (usize < 0)
 89		return 0;
 90
 91	ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
 92	if (ksize < 0)
 93		return 0;
 94
 95	total_size = usize + ksize;
 96	if (total_size > 0 && total_size <= max_len)
 97		bpf_perf_event_output(ctx, &perfmap, 0, raw_data, total_size);
 98
 99	return 0;
100}
101
102char _license[] SEC("license") = "GPL";