Linux Audio

Check our new training course

Loading...
v4.6
  1/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
 
  2 *
  3 * This program is free software; you can redistribute it and/or
  4 * modify it under the terms of version 2 of the GNU General Public
  5 * License as published by the Free Software Foundation.
  6 */
  7#include <linux/kernel.h>
  8#include <linux/types.h>
  9#include <linux/slab.h>
 10#include <linux/bpf.h>
 
 11#include <linux/filter.h>
 12#include <linux/uaccess.h>
 13#include <linux/ctype.h>
 14#include "trace.h"
 15
 16/**
 17 * trace_call_bpf - invoke BPF program
 18 * @prog: BPF program
 19 * @ctx: opaque context pointer
 20 *
 21 * kprobe handlers execute BPF programs via this helper.
 22 * Can be used from static tracepoints in the future.
 23 *
 24 * Return: BPF programs always return an integer which is interpreted by
 25 * kprobe handler as:
 26 * 0 - return from kprobe (event is filtered out)
 27 * 1 - store kprobe event into ring buffer
 28 * Other values are reserved and currently alias to 1
 29 */
 30unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
 31{
 32	unsigned int ret;
 33
 34	if (in_nmi()) /* not supported yet */
 35		return 1;
 36
 37	preempt_disable();
 38
 39	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
 40		/*
 41		 * since some bpf program is already running on this cpu,
 42		 * don't call into another bpf program (same or different)
 43		 * and don't send kprobe event into ring-buffer,
 44		 * so return zero here
 45		 */
 46		ret = 0;
 47		goto out;
 48	}
 49
 50	rcu_read_lock();
 51	ret = BPF_PROG_RUN(prog, ctx);
 52	rcu_read_unlock();
 53
 54 out:
 55	__this_cpu_dec(bpf_prog_active);
 56	preempt_enable();
 57
 58	return ret;
 59}
 60EXPORT_SYMBOL_GPL(trace_call_bpf);
 61
 62static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
 63{
 64	void *dst = (void *) (long) r1;
 65	int size = (int) r2;
 66	void *unsafe_ptr = (void *) (long) r3;
 67
 68	return probe_kernel_read(dst, unsafe_ptr, size);
 
 
 
 
 69}
 70
 71static const struct bpf_func_proto bpf_probe_read_proto = {
 72	.func		= bpf_probe_read,
 73	.gpl_only	= true,
 74	.ret_type	= RET_INTEGER,
 75	.arg1_type	= ARG_PTR_TO_STACK,
 76	.arg2_type	= ARG_CONST_STACK_SIZE,
 77	.arg3_type	= ARG_ANYTHING,
 78};
 79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80/*
 81 * limited trace_printk()
 82 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
 83 */
 84static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5)
 
 85{
 86	char *fmt = (char *) (long) r1;
 87	bool str_seen = false;
 88	int mod[3] = {};
 89	int fmt_cnt = 0;
 90	u64 unsafe_addr;
 91	char buf[64];
 92	int i;
 93
 94	/*
 95	 * bpf_check()->check_func_arg()->check_stack_boundary()
 96	 * guarantees that fmt points to bpf program stack,
 97	 * fmt_size bytes of it were initialized and fmt_size > 0
 98	 */
 99	if (fmt[--fmt_size] != 0)
100		return -EINVAL;
101
102	/* check format string for allowed specifiers */
103	for (i = 0; i < fmt_size; i++) {
104		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
105			return -EINVAL;
106
107		if (fmt[i] != '%')
108			continue;
109
110		if (fmt_cnt >= 3)
111			return -EINVAL;
112
113		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
114		i++;
115		if (fmt[i] == 'l') {
116			mod[fmt_cnt]++;
117			i++;
118		} else if (fmt[i] == 'p' || fmt[i] == 's') {
119			mod[fmt_cnt]++;
120			i++;
121			if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
122				return -EINVAL;
123			fmt_cnt++;
124			if (fmt[i - 1] == 's') {
125				if (str_seen)
126					/* allow only one '%s' per fmt string */
127					return -EINVAL;
128				str_seen = true;
129
130				switch (fmt_cnt) {
131				case 1:
132					unsafe_addr = r3;
133					r3 = (long) buf;
134					break;
135				case 2:
136					unsafe_addr = r4;
137					r4 = (long) buf;
138					break;
139				case 3:
140					unsafe_addr = r5;
141					r5 = (long) buf;
142					break;
143				}
144				buf[0] = 0;
145				strncpy_from_unsafe(buf,
146						    (void *) (long) unsafe_addr,
147						    sizeof(buf));
148			}
149			continue;
150		}
151
152		if (fmt[i] == 'l') {
153			mod[fmt_cnt]++;
154			i++;
155		}
156
157		if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
158			return -EINVAL;
159		fmt_cnt++;
160	}
161
162	return __trace_printk(1/* fake ip will not be printed */, fmt,
163			      mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3,
164			      mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4,
165			      mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5);
166}
167
168static const struct bpf_func_proto bpf_trace_printk_proto = {
169	.func		= bpf_trace_printk,
170	.gpl_only	= true,
171	.ret_type	= RET_INTEGER,
172	.arg1_type	= ARG_PTR_TO_STACK,
173	.arg2_type	= ARG_CONST_STACK_SIZE,
174};
175
176const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
177{
178	/*
179	 * this program might be calling bpf_trace_printk,
180	 * so allocate per-cpu printk buffers
181	 */
182	trace_printk_init_buffers();
183
184	return &bpf_trace_printk_proto;
185}
186
187static u64 bpf_perf_event_read(u64 r1, u64 index, u64 r3, u64 r4, u64 r5)
188{
189	struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
190	struct bpf_array *array = container_of(map, struct bpf_array, map);
 
 
 
191	struct perf_event *event;
192	struct file *file;
193
 
 
 
 
194	if (unlikely(index >= array->map.max_entries))
195		return -E2BIG;
196
197	file = (struct file *)array->ptrs[index];
198	if (unlikely(!file))
199		return -ENOENT;
200
201	event = file->private_data;
 
 
 
202
203	/* make sure event is local and doesn't have pmu::count */
204	if (event->oncpu != smp_processor_id() ||
205	    event->pmu->count)
206		return -EINVAL;
207
208	/*
209	 * we don't know if the function is run successfully by the
210	 * return value. It can be judged in other places, such as
211	 * eBPF programs.
212	 */
213	return perf_event_read_local(event);
214}
215
216static const struct bpf_func_proto bpf_perf_event_read_proto = {
217	.func		= bpf_perf_event_read,
218	.gpl_only	= true,
219	.ret_type	= RET_INTEGER,
220	.arg1_type	= ARG_CONST_MAP_PTR,
221	.arg2_type	= ARG_ANYTHING,
222};
223
224static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
 
 
225{
226	struct pt_regs *regs = (struct pt_regs *) (long) r1;
227	struct bpf_map *map = (struct bpf_map *) (long) r2;
228	struct bpf_array *array = container_of(map, struct bpf_array, map);
229	void *data = (void *) (long) r4;
 
230	struct perf_sample_data sample_data;
 
231	struct perf_event *event;
232	struct file *file;
233	struct perf_raw_record raw = {
234		.size = size,
235		.data = data,
236	};
237
 
 
238	if (unlikely(index >= array->map.max_entries))
239		return -E2BIG;
240
241	file = (struct file *)array->ptrs[index];
242	if (unlikely(!file))
243		return -ENOENT;
244
245	event = file->private_data;
246
247	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
248		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
249		return -EINVAL;
250
251	if (unlikely(event->oncpu != smp_processor_id()))
252		return -EOPNOTSUPP;
253
254	perf_sample_data_init(&sample_data, 0, 0);
255	sample_data.raw = &raw;
256	perf_event_output(event, &sample_data, regs);
257	return 0;
258}
259
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260static const struct bpf_func_proto bpf_perf_event_output_proto = {
261	.func		= bpf_perf_event_output,
262	.gpl_only	= true,
263	.ret_type	= RET_INTEGER,
264	.arg1_type	= ARG_PTR_TO_CTX,
265	.arg2_type	= ARG_CONST_MAP_PTR,
266	.arg3_type	= ARG_ANYTHING,
267	.arg4_type	= ARG_PTR_TO_STACK,
268	.arg5_type	= ARG_CONST_STACK_SIZE,
269};
270
271static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272{
273	switch (func_id) {
274	case BPF_FUNC_map_lookup_elem:
275		return &bpf_map_lookup_elem_proto;
276	case BPF_FUNC_map_update_elem:
277		return &bpf_map_update_elem_proto;
278	case BPF_FUNC_map_delete_elem:
279		return &bpf_map_delete_elem_proto;
280	case BPF_FUNC_probe_read:
281		return &bpf_probe_read_proto;
282	case BPF_FUNC_ktime_get_ns:
283		return &bpf_ktime_get_ns_proto;
284	case BPF_FUNC_tail_call:
285		return &bpf_tail_call_proto;
286	case BPF_FUNC_get_current_pid_tgid:
287		return &bpf_get_current_pid_tgid_proto;
 
 
288	case BPF_FUNC_get_current_uid_gid:
289		return &bpf_get_current_uid_gid_proto;
290	case BPF_FUNC_get_current_comm:
291		return &bpf_get_current_comm_proto;
292	case BPF_FUNC_trace_printk:
293		return bpf_get_trace_printk_proto();
294	case BPF_FUNC_get_smp_processor_id:
295		return &bpf_get_smp_processor_id_proto;
 
 
296	case BPF_FUNC_perf_event_read:
297		return &bpf_perf_event_read_proto;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298	case BPF_FUNC_perf_event_output:
299		return &bpf_perf_event_output_proto;
300	case BPF_FUNC_get_stackid:
301		return &bpf_get_stackid_proto;
302	default:
303		return NULL;
304	}
305}
306
307/* bpf+kprobe programs can access fields of 'struct pt_regs' */
308static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type)
 
309{
310	/* check bounds */
311	if (off < 0 || off >= sizeof(struct pt_regs))
312		return false;
313
314	/* only read is allowed */
315	if (type != BPF_READ)
316		return false;
317
318	/* disallow misaligned access */
319	if (off % size != 0)
320		return false;
321
322	return true;
323}
324
325static const struct bpf_verifier_ops kprobe_prog_ops = {
326	.get_func_proto  = kprobe_prog_func_proto,
327	.is_valid_access = kprobe_prog_is_valid_access,
328};
329
330static struct bpf_prog_type_list kprobe_tl = {
331	.ops	= &kprobe_prog_ops,
332	.type	= BPF_PROG_TYPE_KPROBE,
333};
334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
335static int __init register_kprobe_prog_ops(void)
336{
337	bpf_register_prog_type(&kprobe_tl);
 
 
338	return 0;
339}
340late_initcall(register_kprobe_prog_ops);
v4.10.11
  1/* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com
  2 * Copyright (c) 2016 Facebook
  3 *
  4 * This program is free software; you can redistribute it and/or
  5 * modify it under the terms of version 2 of the GNU General Public
  6 * License as published by the Free Software Foundation.
  7 */
  8#include <linux/kernel.h>
  9#include <linux/types.h>
 10#include <linux/slab.h>
 11#include <linux/bpf.h>
 12#include <linux/bpf_perf_event.h>
 13#include <linux/filter.h>
 14#include <linux/uaccess.h>
 15#include <linux/ctype.h>
 16#include "trace.h"
 17
 18/**
 19 * trace_call_bpf - invoke BPF program
 20 * @prog: BPF program
 21 * @ctx: opaque context pointer
 22 *
 23 * kprobe handlers execute BPF programs via this helper.
 24 * Can be used from static tracepoints in the future.
 25 *
 26 * Return: BPF programs always return an integer which is interpreted by
 27 * kprobe handler as:
 28 * 0 - return from kprobe (event is filtered out)
 29 * 1 - store kprobe event into ring buffer
 30 * Other values are reserved and currently alias to 1
 31 */
 32unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx)
 33{
 34	unsigned int ret;
 35
 36	if (in_nmi()) /* not supported yet */
 37		return 1;
 38
 39	preempt_disable();
 40
 41	if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
 42		/*
 43		 * since some bpf program is already running on this cpu,
 44		 * don't call into another bpf program (same or different)
 45		 * and don't send kprobe event into ring-buffer,
 46		 * so return zero here
 47		 */
 48		ret = 0;
 49		goto out;
 50	}
 51
 52	rcu_read_lock();
 53	ret = BPF_PROG_RUN(prog, ctx);
 54	rcu_read_unlock();
 55
 56 out:
 57	__this_cpu_dec(bpf_prog_active);
 58	preempt_enable();
 59
 60	return ret;
 61}
 62EXPORT_SYMBOL_GPL(trace_call_bpf);
 63
 64BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
 65{
 66	int ret;
 
 
 67
 68	ret = probe_kernel_read(dst, unsafe_ptr, size);
 69	if (unlikely(ret < 0))
 70		memset(dst, 0, size);
 71
 72	return ret;
 73}
 74
 75static const struct bpf_func_proto bpf_probe_read_proto = {
 76	.func		= bpf_probe_read,
 77	.gpl_only	= true,
 78	.ret_type	= RET_INTEGER,
 79	.arg1_type	= ARG_PTR_TO_RAW_STACK,
 80	.arg2_type	= ARG_CONST_STACK_SIZE,
 81	.arg3_type	= ARG_ANYTHING,
 82};
 83
 84BPF_CALL_3(bpf_probe_write_user, void *, unsafe_ptr, const void *, src,
 85	   u32, size)
 86{
 87	/*
 88	 * Ensure we're in user context which is safe for the helper to
 89	 * run. This helper has no business in a kthread.
 90	 *
 91	 * access_ok() should prevent writing to non-user memory, but in
 92	 * some situations (nommu, temporary switch, etc) access_ok() does
 93	 * not provide enough validation, hence the check on KERNEL_DS.
 94	 */
 95
 96	if (unlikely(in_interrupt() ||
 97		     current->flags & (PF_KTHREAD | PF_EXITING)))
 98		return -EPERM;
 99	if (unlikely(segment_eq(get_fs(), KERNEL_DS)))
100		return -EPERM;
101	if (!access_ok(VERIFY_WRITE, unsafe_ptr, size))
102		return -EPERM;
103
104	return probe_kernel_write(unsafe_ptr, src, size);
105}
106
107static const struct bpf_func_proto bpf_probe_write_user_proto = {
108	.func		= bpf_probe_write_user,
109	.gpl_only	= true,
110	.ret_type	= RET_INTEGER,
111	.arg1_type	= ARG_ANYTHING,
112	.arg2_type	= ARG_PTR_TO_STACK,
113	.arg3_type	= ARG_CONST_STACK_SIZE,
114};
115
116static const struct bpf_func_proto *bpf_get_probe_write_proto(void)
117{
118	pr_warn_ratelimited("%s[%d] is installing a program with bpf_probe_write_user helper that may corrupt user memory!",
119			    current->comm, task_pid_nr(current));
120
121	return &bpf_probe_write_user_proto;
122}
123
124/*
125 * limited trace_printk()
126 * only %d %u %x %ld %lu %lx %lld %llu %llx %p %s conversion specifiers allowed
127 */
128BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
129	   u64, arg2, u64, arg3)
130{
 
131	bool str_seen = false;
132	int mod[3] = {};
133	int fmt_cnt = 0;
134	u64 unsafe_addr;
135	char buf[64];
136	int i;
137
138	/*
139	 * bpf_check()->check_func_arg()->check_stack_boundary()
140	 * guarantees that fmt points to bpf program stack,
141	 * fmt_size bytes of it were initialized and fmt_size > 0
142	 */
143	if (fmt[--fmt_size] != 0)
144		return -EINVAL;
145
146	/* check format string for allowed specifiers */
147	for (i = 0; i < fmt_size; i++) {
148		if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i]))
149			return -EINVAL;
150
151		if (fmt[i] != '%')
152			continue;
153
154		if (fmt_cnt >= 3)
155			return -EINVAL;
156
157		/* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */
158		i++;
159		if (fmt[i] == 'l') {
160			mod[fmt_cnt]++;
161			i++;
162		} else if (fmt[i] == 'p' || fmt[i] == 's') {
163			mod[fmt_cnt]++;
164			i++;
165			if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0)
166				return -EINVAL;
167			fmt_cnt++;
168			if (fmt[i - 1] == 's') {
169				if (str_seen)
170					/* allow only one '%s' per fmt string */
171					return -EINVAL;
172				str_seen = true;
173
174				switch (fmt_cnt) {
175				case 1:
176					unsafe_addr = arg1;
177					arg1 = (long) buf;
178					break;
179				case 2:
180					unsafe_addr = arg2;
181					arg2 = (long) buf;
182					break;
183				case 3:
184					unsafe_addr = arg3;
185					arg3 = (long) buf;
186					break;
187				}
188				buf[0] = 0;
189				strncpy_from_unsafe(buf,
190						    (void *) (long) unsafe_addr,
191						    sizeof(buf));
192			}
193			continue;
194		}
195
196		if (fmt[i] == 'l') {
197			mod[fmt_cnt]++;
198			i++;
199		}
200
201		if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x')
202			return -EINVAL;
203		fmt_cnt++;
204	}
205
206	return __trace_printk(1/* fake ip will not be printed */, fmt,
207			      mod[0] == 2 ? arg1 : mod[0] == 1 ? (long) arg1 : (u32) arg1,
208			      mod[1] == 2 ? arg2 : mod[1] == 1 ? (long) arg2 : (u32) arg2,
209			      mod[2] == 2 ? arg3 : mod[2] == 1 ? (long) arg3 : (u32) arg3);
210}
211
212static const struct bpf_func_proto bpf_trace_printk_proto = {
213	.func		= bpf_trace_printk,
214	.gpl_only	= true,
215	.ret_type	= RET_INTEGER,
216	.arg1_type	= ARG_PTR_TO_STACK,
217	.arg2_type	= ARG_CONST_STACK_SIZE,
218};
219
220const struct bpf_func_proto *bpf_get_trace_printk_proto(void)
221{
222	/*
223	 * this program might be calling bpf_trace_printk,
224	 * so allocate per-cpu printk buffers
225	 */
226	trace_printk_init_buffers();
227
228	return &bpf_trace_printk_proto;
229}
230
231BPF_CALL_2(bpf_perf_event_read, struct bpf_map *, map, u64, flags)
232{
 
233	struct bpf_array *array = container_of(map, struct bpf_array, map);
234	unsigned int cpu = smp_processor_id();
235	u64 index = flags & BPF_F_INDEX_MASK;
236	struct bpf_event_entry *ee;
237	struct perf_event *event;
 
238
239	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
240		return -EINVAL;
241	if (index == BPF_F_CURRENT_CPU)
242		index = cpu;
243	if (unlikely(index >= array->map.max_entries))
244		return -E2BIG;
245
246	ee = READ_ONCE(array->ptrs[index]);
247	if (!ee)
248		return -ENOENT;
249
250	event = ee->event;
251	if (unlikely(event->attr.type != PERF_TYPE_HARDWARE &&
252		     event->attr.type != PERF_TYPE_RAW))
253		return -EINVAL;
254
255	/* make sure event is local and doesn't have pmu::count */
256	if (unlikely(event->oncpu != cpu || event->pmu->count))
 
257		return -EINVAL;
258
259	/*
260	 * we don't know if the function is run successfully by the
261	 * return value. It can be judged in other places, such as
262	 * eBPF programs.
263	 */
264	return perf_event_read_local(event);
265}
266
267static const struct bpf_func_proto bpf_perf_event_read_proto = {
268	.func		= bpf_perf_event_read,
269	.gpl_only	= true,
270	.ret_type	= RET_INTEGER,
271	.arg1_type	= ARG_CONST_MAP_PTR,
272	.arg2_type	= ARG_ANYTHING,
273};
274
275static __always_inline u64
276__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
277			u64 flags, struct perf_raw_record *raw)
278{
 
 
279	struct bpf_array *array = container_of(map, struct bpf_array, map);
280	unsigned int cpu = smp_processor_id();
281	u64 index = flags & BPF_F_INDEX_MASK;
282	struct perf_sample_data sample_data;
283	struct bpf_event_entry *ee;
284	struct perf_event *event;
 
 
 
 
 
285
286	if (index == BPF_F_CURRENT_CPU)
287		index = cpu;
288	if (unlikely(index >= array->map.max_entries))
289		return -E2BIG;
290
291	ee = READ_ONCE(array->ptrs[index]);
292	if (!ee)
293		return -ENOENT;
294
295	event = ee->event;
 
296	if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
297		     event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
298		return -EINVAL;
299
300	if (unlikely(event->oncpu != cpu))
301		return -EOPNOTSUPP;
302
303	perf_sample_data_init(&sample_data, 0, 0);
304	sample_data.raw = raw;
305	perf_event_output(event, &sample_data, regs);
306	return 0;
307}
308
309BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
310	   u64, flags, void *, data, u64, size)
311{
312	struct perf_raw_record raw = {
313		.frag = {
314			.size = size,
315			.data = data,
316		},
317	};
318
319	if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
320		return -EINVAL;
321
322	return __bpf_perf_event_output(regs, map, flags, &raw);
323}
324
325static const struct bpf_func_proto bpf_perf_event_output_proto = {
326	.func		= bpf_perf_event_output,
327	.gpl_only	= true,
328	.ret_type	= RET_INTEGER,
329	.arg1_type	= ARG_PTR_TO_CTX,
330	.arg2_type	= ARG_CONST_MAP_PTR,
331	.arg3_type	= ARG_ANYTHING,
332	.arg4_type	= ARG_PTR_TO_STACK,
333	.arg5_type	= ARG_CONST_STACK_SIZE,
334};
335
336static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
337
338u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
339		     void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
340{
341	struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
342	struct perf_raw_frag frag = {
343		.copy		= ctx_copy,
344		.size		= ctx_size,
345		.data		= ctx,
346	};
347	struct perf_raw_record raw = {
348		.frag = {
349			{
350				.next	= ctx_size ? &frag : NULL,
351			},
352			.size	= meta_size,
353			.data	= meta,
354		},
355	};
356
357	perf_fetch_caller_regs(regs);
358
359	return __bpf_perf_event_output(regs, map, flags, &raw);
360}
361
362BPF_CALL_0(bpf_get_current_task)
363{
364	return (long) current;
365}
366
367static const struct bpf_func_proto bpf_get_current_task_proto = {
368	.func		= bpf_get_current_task,
369	.gpl_only	= true,
370	.ret_type	= RET_INTEGER,
371};
372
373BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
374{
375	struct bpf_array *array = container_of(map, struct bpf_array, map);
376	struct cgroup *cgrp;
377
378	if (unlikely(in_interrupt()))
379		return -EINVAL;
380	if (unlikely(idx >= array->map.max_entries))
381		return -E2BIG;
382
383	cgrp = READ_ONCE(array->ptrs[idx]);
384	if (unlikely(!cgrp))
385		return -EAGAIN;
386
387	return task_under_cgroup_hierarchy(current, cgrp);
388}
389
390static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
391	.func           = bpf_current_task_under_cgroup,
392	.gpl_only       = false,
393	.ret_type       = RET_INTEGER,
394	.arg1_type      = ARG_CONST_MAP_PTR,
395	.arg2_type      = ARG_ANYTHING,
396};
397
398static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
399{
400	switch (func_id) {
401	case BPF_FUNC_map_lookup_elem:
402		return &bpf_map_lookup_elem_proto;
403	case BPF_FUNC_map_update_elem:
404		return &bpf_map_update_elem_proto;
405	case BPF_FUNC_map_delete_elem:
406		return &bpf_map_delete_elem_proto;
407	case BPF_FUNC_probe_read:
408		return &bpf_probe_read_proto;
409	case BPF_FUNC_ktime_get_ns:
410		return &bpf_ktime_get_ns_proto;
411	case BPF_FUNC_tail_call:
412		return &bpf_tail_call_proto;
413	case BPF_FUNC_get_current_pid_tgid:
414		return &bpf_get_current_pid_tgid_proto;
415	case BPF_FUNC_get_current_task:
416		return &bpf_get_current_task_proto;
417	case BPF_FUNC_get_current_uid_gid:
418		return &bpf_get_current_uid_gid_proto;
419	case BPF_FUNC_get_current_comm:
420		return &bpf_get_current_comm_proto;
421	case BPF_FUNC_trace_printk:
422		return bpf_get_trace_printk_proto();
423	case BPF_FUNC_get_smp_processor_id:
424		return &bpf_get_smp_processor_id_proto;
425	case BPF_FUNC_get_numa_node_id:
426		return &bpf_get_numa_node_id_proto;
427	case BPF_FUNC_perf_event_read:
428		return &bpf_perf_event_read_proto;
429	case BPF_FUNC_probe_write_user:
430		return bpf_get_probe_write_proto();
431	case BPF_FUNC_current_task_under_cgroup:
432		return &bpf_current_task_under_cgroup_proto;
433	case BPF_FUNC_get_prandom_u32:
434		return &bpf_get_prandom_u32_proto;
435	default:
436		return NULL;
437	}
438}
439
440static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
441{
442	switch (func_id) {
443	case BPF_FUNC_perf_event_output:
444		return &bpf_perf_event_output_proto;
445	case BPF_FUNC_get_stackid:
446		return &bpf_get_stackid_proto;
447	default:
448		return tracing_func_proto(func_id);
449	}
450}
451
452/* bpf+kprobe programs can access fields of 'struct pt_regs' */
453static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
454					enum bpf_reg_type *reg_type)
455{
 
456	if (off < 0 || off >= sizeof(struct pt_regs))
457		return false;
 
 
458	if (type != BPF_READ)
459		return false;
 
 
460	if (off % size != 0)
461		return false;
 
462	return true;
463}
464
465static const struct bpf_verifier_ops kprobe_prog_ops = {
466	.get_func_proto  = kprobe_prog_func_proto,
467	.is_valid_access = kprobe_prog_is_valid_access,
468};
469
470static struct bpf_prog_type_list kprobe_tl = {
471	.ops	= &kprobe_prog_ops,
472	.type	= BPF_PROG_TYPE_KPROBE,
473};
474
475BPF_CALL_5(bpf_perf_event_output_tp, void *, tp_buff, struct bpf_map *, map,
476	   u64, flags, void *, data, u64, size)
477{
478	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
479
480	/*
481	 * r1 points to perf tracepoint buffer where first 8 bytes are hidden
482	 * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it
483	 * from there and call the same bpf_perf_event_output() helper inline.
484	 */
485	return ____bpf_perf_event_output(regs, map, flags, data, size);
486}
487
488static const struct bpf_func_proto bpf_perf_event_output_proto_tp = {
489	.func		= bpf_perf_event_output_tp,
490	.gpl_only	= true,
491	.ret_type	= RET_INTEGER,
492	.arg1_type	= ARG_PTR_TO_CTX,
493	.arg2_type	= ARG_CONST_MAP_PTR,
494	.arg3_type	= ARG_ANYTHING,
495	.arg4_type	= ARG_PTR_TO_STACK,
496	.arg5_type	= ARG_CONST_STACK_SIZE,
497};
498
499BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
500	   u64, flags)
501{
502	struct pt_regs *regs = *(struct pt_regs **)tp_buff;
503
504	/*
505	 * Same comment as in bpf_perf_event_output_tp(), only that this time
506	 * the other helper's function body cannot be inlined due to being
507	 * external, thus we need to call raw helper function.
508	 */
509	return bpf_get_stackid((unsigned long) regs, (unsigned long) map,
510			       flags, 0, 0);
511}
512
513static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
514	.func		= bpf_get_stackid_tp,
515	.gpl_only	= true,
516	.ret_type	= RET_INTEGER,
517	.arg1_type	= ARG_PTR_TO_CTX,
518	.arg2_type	= ARG_CONST_MAP_PTR,
519	.arg3_type	= ARG_ANYTHING,
520};
521
522static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id)
523{
524	switch (func_id) {
525	case BPF_FUNC_perf_event_output:
526		return &bpf_perf_event_output_proto_tp;
527	case BPF_FUNC_get_stackid:
528		return &bpf_get_stackid_proto_tp;
529	default:
530		return tracing_func_proto(func_id);
531	}
532}
533
534static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type,
535				    enum bpf_reg_type *reg_type)
536{
537	if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE)
538		return false;
539	if (type != BPF_READ)
540		return false;
541	if (off % size != 0)
542		return false;
543	return true;
544}
545
546static const struct bpf_verifier_ops tracepoint_prog_ops = {
547	.get_func_proto  = tp_prog_func_proto,
548	.is_valid_access = tp_prog_is_valid_access,
549};
550
551static struct bpf_prog_type_list tracepoint_tl = {
552	.ops	= &tracepoint_prog_ops,
553	.type	= BPF_PROG_TYPE_TRACEPOINT,
554};
555
556static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type,
557				    enum bpf_reg_type *reg_type)
558{
559	if (off < 0 || off >= sizeof(struct bpf_perf_event_data))
560		return false;
561	if (type != BPF_READ)
562		return false;
563	if (off % size != 0)
564		return false;
565	if (off == offsetof(struct bpf_perf_event_data, sample_period)) {
566		if (size != sizeof(u64))
567			return false;
568	} else {
569		if (size != sizeof(long))
570			return false;
571	}
572	return true;
573}
574
575static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, int dst_reg,
576				      int src_reg, int ctx_off,
577				      struct bpf_insn *insn_buf,
578				      struct bpf_prog *prog)
579{
580	struct bpf_insn *insn = insn_buf;
581
582	switch (ctx_off) {
583	case offsetof(struct bpf_perf_event_data, sample_period):
584		BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64));
585
586		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
587						       data), dst_reg, src_reg,
588				      offsetof(struct bpf_perf_event_data_kern, data));
589		*insn++ = BPF_LDX_MEM(BPF_DW, dst_reg, dst_reg,
590				      offsetof(struct perf_sample_data, period));
591		break;
592	default:
593		*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
594						       regs), dst_reg, src_reg,
595				      offsetof(struct bpf_perf_event_data_kern, regs));
596		*insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), dst_reg, dst_reg, ctx_off);
597		break;
598	}
599
600	return insn - insn_buf;
601}
602
603static const struct bpf_verifier_ops perf_event_prog_ops = {
604	.get_func_proto		= tp_prog_func_proto,
605	.is_valid_access	= pe_prog_is_valid_access,
606	.convert_ctx_access	= pe_prog_convert_ctx_access,
607};
608
609static struct bpf_prog_type_list perf_event_tl = {
610	.ops	= &perf_event_prog_ops,
611	.type	= BPF_PROG_TYPE_PERF_EVENT,
612};
613
614static int __init register_kprobe_prog_ops(void)
615{
616	bpf_register_prog_type(&kprobe_tl);
617	bpf_register_prog_type(&tracepoint_tl);
618	bpf_register_prog_type(&perf_event_tl);
619	return 0;
620}
621late_initcall(register_kprobe_prog_ops);