Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2016 Facebook
  3 */
  4#include <linux/bpf.h>
  5#include <linux/jhash.h>
  6#include <linux/filter.h>
  7#include <linux/kernel.h>
  8#include <linux/stacktrace.h>
  9#include <linux/perf_event.h>
 
 
 10#include <linux/irq_work.h>
 11#include <linux/btf_ids.h>
 12#include <linux/buildid.h>
 13#include "percpu_freelist.h"
 14
 15#define STACK_CREATE_FLAG_MASK					\
 16	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY |	\
 17	 BPF_F_STACK_BUILD_ID)
 18
 19struct stack_map_bucket {
 20	struct pcpu_freelist_node fnode;
 21	u32 hash;
 22	u32 nr;
 23	u64 data[];
 24};
 25
 26struct bpf_stack_map {
 27	struct bpf_map map;
 28	void *elems;
 29	struct pcpu_freelist freelist;
 30	u32 n_buckets;
 31	struct stack_map_bucket *buckets[];
 32};
 33
 34/* irq_work to run up_read() for build_id lookup in nmi context */
 35struct stack_map_irq_work {
 36	struct irq_work irq_work;
 37	struct mm_struct *mm;
 38};
 39
 40static void do_up_read(struct irq_work *entry)
 41{
 42	struct stack_map_irq_work *work;
 43
 44	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
 45		return;
 46
 47	work = container_of(entry, struct stack_map_irq_work, irq_work);
 48	mmap_read_unlock_non_owner(work->mm);
 49}
 50
 51static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
 52
 53static inline bool stack_map_use_build_id(struct bpf_map *map)
 54{
 55	return (map->map_flags & BPF_F_STACK_BUILD_ID);
 56}
 57
 58static inline int stack_map_data_size(struct bpf_map *map)
 59{
 60	return stack_map_use_build_id(map) ?
 61		sizeof(struct bpf_stack_build_id) : sizeof(u64);
 62}
 63
 64static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
 65{
 66	u64 elem_size = sizeof(struct stack_map_bucket) +
 67			(u64)smap->map.value_size;
 68	int err;
 69
 70	smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
 71					 smap->map.numa_node);
 72	if (!smap->elems)
 73		return -ENOMEM;
 74
 75	err = pcpu_freelist_init(&smap->freelist);
 76	if (err)
 77		goto free_elems;
 78
 79	pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
 80			       smap->map.max_entries);
 81	return 0;
 82
 83free_elems:
 84	bpf_map_area_free(smap->elems);
 85	return err;
 86}
 87
 88/* Called from syscall */
 89static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
 90{
 91	u32 value_size = attr->value_size;
 92	struct bpf_stack_map *smap;
 
 93	u64 cost, n_buckets;
 94	int err;
 95
 96	if (!bpf_capable())
 97		return ERR_PTR(-EPERM);
 98
 99	if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
100		return ERR_PTR(-EINVAL);
101
102	/* check sanity of attributes */
103	if (attr->max_entries == 0 || attr->key_size != 4 ||
104	    value_size < 8 || value_size % 8)
105		return ERR_PTR(-EINVAL);
106
107	BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
108	if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
109		if (value_size % sizeof(struct bpf_stack_build_id) ||
110		    value_size / sizeof(struct bpf_stack_build_id)
111		    > sysctl_perf_event_max_stack)
112			return ERR_PTR(-EINVAL);
113	} else if (value_size / 8 > sysctl_perf_event_max_stack)
114		return ERR_PTR(-EINVAL);
115
116	/* hash table size must be power of 2 */
117	n_buckets = roundup_pow_of_two(attr->max_entries);
118	if (!n_buckets)
119		return ERR_PTR(-E2BIG);
120
121	cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
122	cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
 
 
 
 
123	smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
124	if (!smap)
 
125		return ERR_PTR(-ENOMEM);
 
126
127	bpf_map_init_from_attr(&smap->map, attr);
128	smap->map.value_size = value_size;
129	smap->n_buckets = n_buckets;
130
131	err = get_callchain_buffers(sysctl_perf_event_max_stack);
132	if (err)
133		goto free_smap;
134
135	err = prealloc_elems_and_freelist(smap);
136	if (err)
137		goto put_buffers;
138
 
 
139	return &smap->map;
140
141put_buffers:
142	put_callchain_buffers();
143free_smap:
 
144	bpf_map_area_free(smap);
145	return ERR_PTR(err);
146}
147
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
148static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
149					  u64 *ips, u32 trace_nr, bool user)
150{
151	int i;
152	struct vm_area_struct *vma;
153	bool irq_work_busy = false;
154	struct stack_map_irq_work *work = NULL;
155
156	if (irqs_disabled()) {
157		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
158			work = this_cpu_ptr(&up_read_work);
159			if (irq_work_is_busy(&work->irq_work)) {
160				/* cannot queue more up_read, fallback */
161				irq_work_busy = true;
162			}
163		} else {
164			/*
165			 * PREEMPT_RT does not allow to trylock mmap sem in
166			 * interrupt disabled context. Force the fallback code.
167			 */
168			irq_work_busy = true;
169		}
170	}
171
172	/*
173	 * We cannot do up_read() when the irq is disabled, because of
174	 * risk to deadlock with rq_lock. To do build_id lookup when the
175	 * irqs are disabled, we need to run up_read() in irq_work. We use
176	 * a percpu variable to do the irq_work. If the irq_work is
177	 * already used by another lookup, we fall back to report ips.
178	 *
179	 * Same fallback is used for kernel stack (!user) on a stackmap
180	 * with build_id.
181	 */
182	if (!user || !current || !current->mm || irq_work_busy ||
183	    !mmap_read_trylock_non_owner(current->mm)) {
184		/* cannot access current->mm, fall back to ips */
185		for (i = 0; i < trace_nr; i++) {
186			id_offs[i].status = BPF_STACK_BUILD_ID_IP;
187			id_offs[i].ip = ips[i];
188			memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
189		}
190		return;
191	}
192
193	for (i = 0; i < trace_nr; i++) {
194		vma = find_vma(current->mm, ips[i]);
195		if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) {
196			/* per entry fall back to ips */
197			id_offs[i].status = BPF_STACK_BUILD_ID_IP;
198			id_offs[i].ip = ips[i];
199			memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
200			continue;
201		}
202		id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
203			- vma->vm_start;
204		id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
205	}
206
207	if (!work) {
208		mmap_read_unlock_non_owner(current->mm);
209	} else {
210		work->mm = current->mm;
211		irq_work_queue(&work->irq_work);
212	}
213}
214
215static struct perf_callchain_entry *
216get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
217{
218#ifdef CONFIG_STACKTRACE
219	struct perf_callchain_entry *entry;
220	int rctx;
221
222	entry = get_callchain_entry(&rctx);
223
224	if (!entry)
225		return NULL;
226
227	entry->nr = init_nr +
228		stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr),
229				     sysctl_perf_event_max_stack - init_nr, 0);
230
231	/* stack_trace_save_tsk() works on unsigned long array, while
232	 * perf_callchain_entry uses u64 array. For 32-bit systems, it is
233	 * necessary to fix this mismatch.
234	 */
235	if (__BITS_PER_LONG != 64) {
236		unsigned long *from = (unsigned long *) entry->ip;
237		u64 *to = entry->ip;
238		int i;
239
240		/* copy data from the end to avoid using extra buffer */
241		for (i = entry->nr - 1; i >= (int)init_nr; i--)
242			to[i] = (u64)(from[i]);
243	}
244
245	put_callchain_entry(rctx);
246
247	return entry;
248#else /* CONFIG_STACKTRACE */
249	return NULL;
250#endif
251}
252
253static long __bpf_get_stackid(struct bpf_map *map,
254			      struct perf_callchain_entry *trace, u64 flags)
255{
256	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
257	struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
258	u32 max_depth = map->value_size / stack_map_data_size(map);
259	/* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
260	u32 init_nr = sysctl_perf_event_max_stack - max_depth;
261	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
262	u32 hash, id, trace_nr, trace_len;
263	bool user = flags & BPF_F_USER_STACK;
264	u64 *ips;
265	bool hash_matches;
266
267	/* get_perf_callchain() guarantees that trace->nr >= init_nr
268	 * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
269	 */
270	trace_nr = trace->nr - init_nr;
271
272	if (trace_nr <= skip)
273		/* skipping more than usable stack trace */
274		return -EFAULT;
275
276	trace_nr -= skip;
277	trace_len = trace_nr * sizeof(u64);
278	ips = trace->ip + skip + init_nr;
279	hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
280	id = hash & (smap->n_buckets - 1);
281	bucket = READ_ONCE(smap->buckets[id]);
282
283	hash_matches = bucket && bucket->hash == hash;
284	/* fast cmp */
285	if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
286		return id;
287
288	if (stack_map_use_build_id(map)) {
289		/* for build_id+offset, pop a bucket before slow cmp */
290		new_bucket = (struct stack_map_bucket *)
291			pcpu_freelist_pop(&smap->freelist);
292		if (unlikely(!new_bucket))
293			return -ENOMEM;
294		new_bucket->nr = trace_nr;
295		stack_map_get_build_id_offset(
296			(struct bpf_stack_build_id *)new_bucket->data,
297			ips, trace_nr, user);
298		trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
299		if (hash_matches && bucket->nr == trace_nr &&
300		    memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
301			pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
302			return id;
303		}
304		if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
305			pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
306			return -EEXIST;
307		}
308	} else {
309		if (hash_matches && bucket->nr == trace_nr &&
310		    memcmp(bucket->data, ips, trace_len) == 0)
311			return id;
312		if (bucket && !(flags & BPF_F_REUSE_STACKID))
313			return -EEXIST;
314
315		new_bucket = (struct stack_map_bucket *)
316			pcpu_freelist_pop(&smap->freelist);
317		if (unlikely(!new_bucket))
318			return -ENOMEM;
319		memcpy(new_bucket->data, ips, trace_len);
320	}
321
322	new_bucket->hash = hash;
323	new_bucket->nr = trace_nr;
324
325	old_bucket = xchg(&smap->buckets[id], new_bucket);
326	if (old_bucket)
327		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
328	return id;
329}
330
331BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
332	   u64, flags)
333{
334	u32 max_depth = map->value_size / stack_map_data_size(map);
335	/* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
336	u32 init_nr = sysctl_perf_event_max_stack - max_depth;
337	bool user = flags & BPF_F_USER_STACK;
338	struct perf_callchain_entry *trace;
339	bool kernel = !user;
340
341	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
342			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
343		return -EINVAL;
344
345	trace = get_perf_callchain(regs, init_nr, kernel, user,
346				   sysctl_perf_event_max_stack, false, false);
347
348	if (unlikely(!trace))
349		/* couldn't fetch the stack trace */
350		return -EFAULT;
351
352	return __bpf_get_stackid(map, trace, flags);
353}
354
355const struct bpf_func_proto bpf_get_stackid_proto = {
356	.func		= bpf_get_stackid,
357	.gpl_only	= true,
358	.ret_type	= RET_INTEGER,
359	.arg1_type	= ARG_PTR_TO_CTX,
360	.arg2_type	= ARG_CONST_MAP_PTR,
361	.arg3_type	= ARG_ANYTHING,
362};
363
364static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
365{
366	__u64 nr_kernel = 0;
367
368	while (nr_kernel < trace->nr) {
369		if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
370			break;
371		nr_kernel++;
372	}
373	return nr_kernel;
374}
375
376BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
377	   struct bpf_map *, map, u64, flags)
378{
379	struct perf_event *event = ctx->event;
380	struct perf_callchain_entry *trace;
381	bool kernel, user;
382	__u64 nr_kernel;
383	int ret;
384
385	/* perf_sample_data doesn't have callchain, use bpf_get_stackid */
386	if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
387		return bpf_get_stackid((unsigned long)(ctx->regs),
388				       (unsigned long) map, flags, 0, 0);
389
390	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
391			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
392		return -EINVAL;
393
394	user = flags & BPF_F_USER_STACK;
395	kernel = !user;
396
397	trace = ctx->data->callchain;
398	if (unlikely(!trace))
399		return -EFAULT;
400
401	nr_kernel = count_kernel_ip(trace);
402
403	if (kernel) {
404		__u64 nr = trace->nr;
405
406		trace->nr = nr_kernel;
407		ret = __bpf_get_stackid(map, trace, flags);
408
409		/* restore nr */
410		trace->nr = nr;
411	} else { /* user */
412		u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
413
414		skip += nr_kernel;
415		if (skip > BPF_F_SKIP_FIELD_MASK)
416			return -EFAULT;
417
418		flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
419		ret = __bpf_get_stackid(map, trace, flags);
420	}
421	return ret;
422}
423
424const struct bpf_func_proto bpf_get_stackid_proto_pe = {
425	.func		= bpf_get_stackid_pe,
426	.gpl_only	= false,
427	.ret_type	= RET_INTEGER,
428	.arg1_type	= ARG_PTR_TO_CTX,
429	.arg2_type	= ARG_CONST_MAP_PTR,
430	.arg3_type	= ARG_ANYTHING,
431};
432
433static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
434			    struct perf_callchain_entry *trace_in,
435			    void *buf, u32 size, u64 flags)
436{
437	u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
438	bool user_build_id = flags & BPF_F_USER_BUILD_ID;
439	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
440	bool user = flags & BPF_F_USER_STACK;
441	struct perf_callchain_entry *trace;
442	bool kernel = !user;
443	int err = -EINVAL;
444	u64 *ips;
445
446	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
447			       BPF_F_USER_BUILD_ID)))
448		goto clear;
449	if (kernel && user_build_id)
450		goto clear;
451
452	elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
453					    : sizeof(u64);
454	if (unlikely(size % elem_size))
455		goto clear;
456
457	/* cannot get valid user stack for task without user_mode regs */
458	if (task && user && !user_mode(regs))
459		goto err_fault;
460
461	num_elem = size / elem_size;
462	if (sysctl_perf_event_max_stack < num_elem)
463		init_nr = 0;
464	else
465		init_nr = sysctl_perf_event_max_stack - num_elem;
466
467	if (trace_in)
468		trace = trace_in;
469	else if (kernel && task)
470		trace = get_callchain_entry_for_task(task, init_nr);
471	else
472		trace = get_perf_callchain(regs, init_nr, kernel, user,
473					   sysctl_perf_event_max_stack,
474					   false, false);
475	if (unlikely(!trace))
476		goto err_fault;
477
478	trace_nr = trace->nr - init_nr;
479	if (trace_nr < skip)
480		goto err_fault;
481
482	trace_nr -= skip;
483	trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
484	copy_len = trace_nr * elem_size;
485	ips = trace->ip + skip + init_nr;
486	if (user && user_build_id)
487		stack_map_get_build_id_offset(buf, ips, trace_nr, user);
488	else
489		memcpy(buf, ips, copy_len);
490
491	if (size > copy_len)
492		memset(buf + copy_len, 0, size - copy_len);
493	return copy_len;
494
495err_fault:
496	err = -EFAULT;
497clear:
498	memset(buf, 0, size);
499	return err;
500}
501
502BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
503	   u64, flags)
504{
505	return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
506}
507
508const struct bpf_func_proto bpf_get_stack_proto = {
509	.func		= bpf_get_stack,
510	.gpl_only	= true,
511	.ret_type	= RET_INTEGER,
512	.arg1_type	= ARG_PTR_TO_CTX,
513	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
514	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
515	.arg4_type	= ARG_ANYTHING,
516};
517
518BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
519	   u32, size, u64, flags)
520{
521	struct pt_regs *regs;
522	long res;
523
524	if (!try_get_task_stack(task))
525		return -EFAULT;
526
527	regs = task_pt_regs(task);
528	res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
529	put_task_stack(task);
530
531	return res;
532}
533
534BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
 
535
536const struct bpf_func_proto bpf_get_task_stack_proto = {
537	.func		= bpf_get_task_stack,
538	.gpl_only	= false,
539	.ret_type	= RET_INTEGER,
540	.arg1_type	= ARG_PTR_TO_BTF_ID,
541	.arg1_btf_id	= &bpf_get_task_stack_btf_ids[0],
542	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
543	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
544	.arg4_type	= ARG_ANYTHING,
 
545};
546
547BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
548	   void *, buf, u32, size, u64, flags)
549{
550	struct pt_regs *regs = (struct pt_regs *)(ctx->regs);
551	struct perf_event *event = ctx->event;
552	struct perf_callchain_entry *trace;
553	bool kernel, user;
554	int err = -EINVAL;
555	__u64 nr_kernel;
556
557	if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
558		return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
559
560	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
561			       BPF_F_USER_BUILD_ID)))
562		goto clear;
563
564	user = flags & BPF_F_USER_STACK;
565	kernel = !user;
566
567	err = -EFAULT;
568	trace = ctx->data->callchain;
569	if (unlikely(!trace))
570		goto clear;
571
572	nr_kernel = count_kernel_ip(trace);
573
574	if (kernel) {
575		__u64 nr = trace->nr;
576
577		trace->nr = nr_kernel;
578		err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
579
580		/* restore nr */
581		trace->nr = nr;
582	} else { /* user */
583		u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
584
585		skip += nr_kernel;
586		if (skip > BPF_F_SKIP_FIELD_MASK)
587			goto clear;
588
589		flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
590		err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
591	}
592	return err;
593
594clear:
595	memset(buf, 0, size);
596	return err;
597
598}
599
600const struct bpf_func_proto bpf_get_stack_proto_pe = {
601	.func		= bpf_get_stack_pe,
602	.gpl_only	= true,
603	.ret_type	= RET_INTEGER,
604	.arg1_type	= ARG_PTR_TO_CTX,
605	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
606	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
607	.arg4_type	= ARG_ANYTHING,
608};
609
610/* Called from eBPF program */
611static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
612{
613	return ERR_PTR(-EOPNOTSUPP);
614}
615
616/* Called from syscall */
617int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
618{
619	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
620	struct stack_map_bucket *bucket, *old_bucket;
621	u32 id = *(u32 *)key, trace_len;
622
623	if (unlikely(id >= smap->n_buckets))
624		return -ENOENT;
625
626	bucket = xchg(&smap->buckets[id], NULL);
627	if (!bucket)
628		return -ENOENT;
629
630	trace_len = bucket->nr * stack_map_data_size(map);
631	memcpy(value, bucket->data, trace_len);
632	memset(value + trace_len, 0, map->value_size - trace_len);
633
634	old_bucket = xchg(&smap->buckets[id], bucket);
635	if (old_bucket)
636		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
637	return 0;
638}
639
640static int stack_map_get_next_key(struct bpf_map *map, void *key,
641				  void *next_key)
642{
643	struct bpf_stack_map *smap = container_of(map,
644						  struct bpf_stack_map, map);
645	u32 id;
646
647	WARN_ON_ONCE(!rcu_read_lock_held());
648
649	if (!key) {
650		id = 0;
651	} else {
652		id = *(u32 *)key;
653		if (id >= smap->n_buckets || !smap->buckets[id])
654			id = 0;
655		else
656			id++;
657	}
658
659	while (id < smap->n_buckets && !smap->buckets[id])
660		id++;
661
662	if (id >= smap->n_buckets)
663		return -ENOENT;
664
665	*(u32 *)next_key = id;
666	return 0;
667}
668
669static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
670				 u64 map_flags)
671{
672	return -EINVAL;
673}
674
675/* Called from syscall or from eBPF program */
676static int stack_map_delete_elem(struct bpf_map *map, void *key)
677{
678	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
679	struct stack_map_bucket *old_bucket;
680	u32 id = *(u32 *)key;
681
682	if (unlikely(id >= smap->n_buckets))
683		return -E2BIG;
684
685	old_bucket = xchg(&smap->buckets[id], NULL);
686	if (old_bucket) {
687		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
688		return 0;
689	} else {
690		return -ENOENT;
691	}
692}
693
694/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
695static void stack_map_free(struct bpf_map *map)
696{
697	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
698
699	bpf_map_area_free(smap->elems);
700	pcpu_freelist_destroy(&smap->freelist);
701	bpf_map_area_free(smap);
702	put_callchain_buffers();
703}
704
705static int stack_trace_map_btf_id;
706const struct bpf_map_ops stack_trace_map_ops = {
707	.map_meta_equal = bpf_map_meta_equal,
708	.map_alloc = stack_map_alloc,
709	.map_free = stack_map_free,
710	.map_get_next_key = stack_map_get_next_key,
711	.map_lookup_elem = stack_map_lookup_elem,
712	.map_update_elem = stack_map_update_elem,
713	.map_delete_elem = stack_map_delete_elem,
714	.map_check_btf = map_check_no_btf,
715	.map_btf_name = "bpf_stack_map",
716	.map_btf_id = &stack_trace_map_btf_id,
717};
718
719static int __init stack_map_init(void)
720{
721	int cpu;
722	struct stack_map_irq_work *work;
723
724	for_each_possible_cpu(cpu) {
725		work = per_cpu_ptr(&up_read_work, cpu);
726		init_irq_work(&work->irq_work, do_up_read);
727	}
728	return 0;
729}
730subsys_initcall(stack_map_init);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2016 Facebook
  3 */
  4#include <linux/bpf.h>
  5#include <linux/jhash.h>
  6#include <linux/filter.h>
  7#include <linux/kernel.h>
  8#include <linux/stacktrace.h>
  9#include <linux/perf_event.h>
 10#include <linux/elf.h>
 11#include <linux/pagemap.h>
 12#include <linux/irq_work.h>
 13#include <linux/btf_ids.h>
 
 14#include "percpu_freelist.h"
 15
 16#define STACK_CREATE_FLAG_MASK					\
 17	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY |	\
 18	 BPF_F_STACK_BUILD_ID)
 19
 20struct stack_map_bucket {
 21	struct pcpu_freelist_node fnode;
 22	u32 hash;
 23	u32 nr;
 24	u64 data[];
 25};
 26
 27struct bpf_stack_map {
 28	struct bpf_map map;
 29	void *elems;
 30	struct pcpu_freelist freelist;
 31	u32 n_buckets;
 32	struct stack_map_bucket *buckets[];
 33};
 34
 35/* irq_work to run up_read() for build_id lookup in nmi context */
 36struct stack_map_irq_work {
 37	struct irq_work irq_work;
 38	struct mm_struct *mm;
 39};
 40
 41static void do_up_read(struct irq_work *entry)
 42{
 43	struct stack_map_irq_work *work;
 44
 45	if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
 46		return;
 47
 48	work = container_of(entry, struct stack_map_irq_work, irq_work);
 49	mmap_read_unlock_non_owner(work->mm);
 50}
 51
 52static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
 53
 54static inline bool stack_map_use_build_id(struct bpf_map *map)
 55{
 56	return (map->map_flags & BPF_F_STACK_BUILD_ID);
 57}
 58
 59static inline int stack_map_data_size(struct bpf_map *map)
 60{
 61	return stack_map_use_build_id(map) ?
 62		sizeof(struct bpf_stack_build_id) : sizeof(u64);
 63}
 64
 65static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
 66{
 67	u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
 
 68	int err;
 69
 70	smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
 71					 smap->map.numa_node);
 72	if (!smap->elems)
 73		return -ENOMEM;
 74
 75	err = pcpu_freelist_init(&smap->freelist);
 76	if (err)
 77		goto free_elems;
 78
 79	pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
 80			       smap->map.max_entries);
 81	return 0;
 82
 83free_elems:
 84	bpf_map_area_free(smap->elems);
 85	return err;
 86}
 87
 88/* Called from syscall */
 89static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
 90{
 91	u32 value_size = attr->value_size;
 92	struct bpf_stack_map *smap;
 93	struct bpf_map_memory mem;
 94	u64 cost, n_buckets;
 95	int err;
 96
 97	if (!bpf_capable())
 98		return ERR_PTR(-EPERM);
 99
100	if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
101		return ERR_PTR(-EINVAL);
102
103	/* check sanity of attributes */
104	if (attr->max_entries == 0 || attr->key_size != 4 ||
105	    value_size < 8 || value_size % 8)
106		return ERR_PTR(-EINVAL);
107
108	BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
109	if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
110		if (value_size % sizeof(struct bpf_stack_build_id) ||
111		    value_size / sizeof(struct bpf_stack_build_id)
112		    > sysctl_perf_event_max_stack)
113			return ERR_PTR(-EINVAL);
114	} else if (value_size / 8 > sysctl_perf_event_max_stack)
115		return ERR_PTR(-EINVAL);
116
117	/* hash table size must be power of 2 */
118	n_buckets = roundup_pow_of_two(attr->max_entries);
 
 
119
120	cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
121	cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
122	err = bpf_map_charge_init(&mem, cost);
123	if (err)
124		return ERR_PTR(err);
125
126	smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
127	if (!smap) {
128		bpf_map_charge_finish(&mem);
129		return ERR_PTR(-ENOMEM);
130	}
131
132	bpf_map_init_from_attr(&smap->map, attr);
133	smap->map.value_size = value_size;
134	smap->n_buckets = n_buckets;
135
136	err = get_callchain_buffers(sysctl_perf_event_max_stack);
137	if (err)
138		goto free_charge;
139
140	err = prealloc_elems_and_freelist(smap);
141	if (err)
142		goto put_buffers;
143
144	bpf_map_charge_move(&smap->map.memory, &mem);
145
146	return &smap->map;
147
148put_buffers:
149	put_callchain_buffers();
150free_charge:
151	bpf_map_charge_finish(&mem);
152	bpf_map_area_free(smap);
153	return ERR_PTR(err);
154}
155
156#define BPF_BUILD_ID 3
157/*
158 * Parse build id from the note segment. This logic can be shared between
159 * 32-bit and 64-bit system, because Elf32_Nhdr and Elf64_Nhdr are
160 * identical.
161 */
162static inline int stack_map_parse_build_id(void *page_addr,
163					   unsigned char *build_id,
164					   void *note_start,
165					   Elf32_Word note_size)
166{
167	Elf32_Word note_offs = 0, new_offs;
168
169	/* check for overflow */
170	if (note_start < page_addr || note_start + note_size < note_start)
171		return -EINVAL;
172
173	/* only supports note that fits in the first page */
174	if (note_start + note_size > page_addr + PAGE_SIZE)
175		return -EINVAL;
176
177	while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
178		Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
179
180		if (nhdr->n_type == BPF_BUILD_ID &&
181		    nhdr->n_namesz == sizeof("GNU") &&
182		    nhdr->n_descsz > 0 &&
183		    nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
184			memcpy(build_id,
185			       note_start + note_offs +
186			       ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
187			       nhdr->n_descsz);
188			memset(build_id + nhdr->n_descsz, 0,
189			       BPF_BUILD_ID_SIZE - nhdr->n_descsz);
190			return 0;
191		}
192		new_offs = note_offs + sizeof(Elf32_Nhdr) +
193			ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
194		if (new_offs <= note_offs)  /* overflow */
195			break;
196		note_offs = new_offs;
197	}
198	return -EINVAL;
199}
200
201/* Parse build ID from 32-bit ELF */
202static int stack_map_get_build_id_32(void *page_addr,
203				     unsigned char *build_id)
204{
205	Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
206	Elf32_Phdr *phdr;
207	int i;
208
209	/* only supports phdr that fits in one page */
210	if (ehdr->e_phnum >
211	    (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
212		return -EINVAL;
213
214	phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
215
216	for (i = 0; i < ehdr->e_phnum; ++i) {
217		if (phdr[i].p_type == PT_NOTE &&
218		    !stack_map_parse_build_id(page_addr, build_id,
219					      page_addr + phdr[i].p_offset,
220					      phdr[i].p_filesz))
221			return 0;
222	}
223	return -EINVAL;
224}
225
226/* Parse build ID from 64-bit ELF */
227static int stack_map_get_build_id_64(void *page_addr,
228				     unsigned char *build_id)
229{
230	Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
231	Elf64_Phdr *phdr;
232	int i;
233
234	/* only supports phdr that fits in one page */
235	if (ehdr->e_phnum >
236	    (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
237		return -EINVAL;
238
239	phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
240
241	for (i = 0; i < ehdr->e_phnum; ++i) {
242		if (phdr[i].p_type == PT_NOTE &&
243		    !stack_map_parse_build_id(page_addr, build_id,
244					      page_addr + phdr[i].p_offset,
245					      phdr[i].p_filesz))
246			return 0;
247	}
248	return -EINVAL;
249}
250
251/* Parse build ID of ELF file mapped to vma */
252static int stack_map_get_build_id(struct vm_area_struct *vma,
253				  unsigned char *build_id)
254{
255	Elf32_Ehdr *ehdr;
256	struct page *page;
257	void *page_addr;
258	int ret;
259
260	/* only works for page backed storage  */
261	if (!vma->vm_file)
262		return -EINVAL;
263
264	page = find_get_page(vma->vm_file->f_mapping, 0);
265	if (!page)
266		return -EFAULT;	/* page not mapped */
267
268	ret = -EINVAL;
269	page_addr = kmap_atomic(page);
270	ehdr = (Elf32_Ehdr *)page_addr;
271
272	/* compare magic x7f "ELF" */
273	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0)
274		goto out;
275
276	/* only support executable file and shared object file */
277	if (ehdr->e_type != ET_EXEC && ehdr->e_type != ET_DYN)
278		goto out;
279
280	if (ehdr->e_ident[EI_CLASS] == ELFCLASS32)
281		ret = stack_map_get_build_id_32(page_addr, build_id);
282	else if (ehdr->e_ident[EI_CLASS] == ELFCLASS64)
283		ret = stack_map_get_build_id_64(page_addr, build_id);
284out:
285	kunmap_atomic(page_addr);
286	put_page(page);
287	return ret;
288}
289
290static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
291					  u64 *ips, u32 trace_nr, bool user)
292{
293	int i;
294	struct vm_area_struct *vma;
295	bool irq_work_busy = false;
296	struct stack_map_irq_work *work = NULL;
297
298	if (irqs_disabled()) {
299		if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
300			work = this_cpu_ptr(&up_read_work);
301			if (atomic_read(&work->irq_work.flags) & IRQ_WORK_BUSY) {
302				/* cannot queue more up_read, fallback */
303				irq_work_busy = true;
304			}
305		} else {
306			/*
307			 * PREEMPT_RT does not allow to trylock mmap sem in
308			 * interrupt disabled context. Force the fallback code.
309			 */
310			irq_work_busy = true;
311		}
312	}
313
314	/*
315	 * We cannot do up_read() when the irq is disabled, because of
316	 * risk to deadlock with rq_lock. To do build_id lookup when the
317	 * irqs are disabled, we need to run up_read() in irq_work. We use
318	 * a percpu variable to do the irq_work. If the irq_work is
319	 * already used by another lookup, we fall back to report ips.
320	 *
321	 * Same fallback is used for kernel stack (!user) on a stackmap
322	 * with build_id.
323	 */
324	if (!user || !current || !current->mm || irq_work_busy ||
325	    !mmap_read_trylock_non_owner(current->mm)) {
326		/* cannot access current->mm, fall back to ips */
327		for (i = 0; i < trace_nr; i++) {
328			id_offs[i].status = BPF_STACK_BUILD_ID_IP;
329			id_offs[i].ip = ips[i];
330			memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
331		}
332		return;
333	}
334
335	for (i = 0; i < trace_nr; i++) {
336		vma = find_vma(current->mm, ips[i]);
337		if (!vma || stack_map_get_build_id(vma, id_offs[i].build_id)) {
338			/* per entry fall back to ips */
339			id_offs[i].status = BPF_STACK_BUILD_ID_IP;
340			id_offs[i].ip = ips[i];
341			memset(id_offs[i].build_id, 0, BPF_BUILD_ID_SIZE);
342			continue;
343		}
344		id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
345			- vma->vm_start;
346		id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
347	}
348
349	if (!work) {
350		mmap_read_unlock_non_owner(current->mm);
351	} else {
352		work->mm = current->mm;
353		irq_work_queue(&work->irq_work);
354	}
355}
356
357static struct perf_callchain_entry *
358get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
359{
360#ifdef CONFIG_STACKTRACE
361	struct perf_callchain_entry *entry;
362	int rctx;
363
364	entry = get_callchain_entry(&rctx);
365
366	if (!entry)
367		return NULL;
368
369	entry->nr = init_nr +
370		stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr),
371				     sysctl_perf_event_max_stack - init_nr, 0);
372
373	/* stack_trace_save_tsk() works on unsigned long array, while
374	 * perf_callchain_entry uses u64 array. For 32-bit systems, it is
375	 * necessary to fix this mismatch.
376	 */
377	if (__BITS_PER_LONG != 64) {
378		unsigned long *from = (unsigned long *) entry->ip;
379		u64 *to = entry->ip;
380		int i;
381
382		/* copy data from the end to avoid using extra buffer */
383		for (i = entry->nr - 1; i >= (int)init_nr; i--)
384			to[i] = (u64)(from[i]);
385	}
386
387	put_callchain_entry(rctx);
388
389	return entry;
390#else /* CONFIG_STACKTRACE */
391	return NULL;
392#endif
393}
394
395static long __bpf_get_stackid(struct bpf_map *map,
396			      struct perf_callchain_entry *trace, u64 flags)
397{
398	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
399	struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
400	u32 max_depth = map->value_size / stack_map_data_size(map);
401	/* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
402	u32 init_nr = sysctl_perf_event_max_stack - max_depth;
403	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
404	u32 hash, id, trace_nr, trace_len;
405	bool user = flags & BPF_F_USER_STACK;
406	u64 *ips;
407	bool hash_matches;
408
409	/* get_perf_callchain() guarantees that trace->nr >= init_nr
410	 * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
411	 */
412	trace_nr = trace->nr - init_nr;
413
414	if (trace_nr <= skip)
415		/* skipping more than usable stack trace */
416		return -EFAULT;
417
418	trace_nr -= skip;
419	trace_len = trace_nr * sizeof(u64);
420	ips = trace->ip + skip + init_nr;
421	hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
422	id = hash & (smap->n_buckets - 1);
423	bucket = READ_ONCE(smap->buckets[id]);
424
425	hash_matches = bucket && bucket->hash == hash;
426	/* fast cmp */
427	if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
428		return id;
429
430	if (stack_map_use_build_id(map)) {
431		/* for build_id+offset, pop a bucket before slow cmp */
432		new_bucket = (struct stack_map_bucket *)
433			pcpu_freelist_pop(&smap->freelist);
434		if (unlikely(!new_bucket))
435			return -ENOMEM;
436		new_bucket->nr = trace_nr;
437		stack_map_get_build_id_offset(
438			(struct bpf_stack_build_id *)new_bucket->data,
439			ips, trace_nr, user);
440		trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
441		if (hash_matches && bucket->nr == trace_nr &&
442		    memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
443			pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
444			return id;
445		}
446		if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
447			pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
448			return -EEXIST;
449		}
450	} else {
451		if (hash_matches && bucket->nr == trace_nr &&
452		    memcmp(bucket->data, ips, trace_len) == 0)
453			return id;
454		if (bucket && !(flags & BPF_F_REUSE_STACKID))
455			return -EEXIST;
456
457		new_bucket = (struct stack_map_bucket *)
458			pcpu_freelist_pop(&smap->freelist);
459		if (unlikely(!new_bucket))
460			return -ENOMEM;
461		memcpy(new_bucket->data, ips, trace_len);
462	}
463
464	new_bucket->hash = hash;
465	new_bucket->nr = trace_nr;
466
467	old_bucket = xchg(&smap->buckets[id], new_bucket);
468	if (old_bucket)
469		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
470	return id;
471}
472
473BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
474	   u64, flags)
475{
476	u32 max_depth = map->value_size / stack_map_data_size(map);
477	/* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
478	u32 init_nr = sysctl_perf_event_max_stack - max_depth;
479	bool user = flags & BPF_F_USER_STACK;
480	struct perf_callchain_entry *trace;
481	bool kernel = !user;
482
483	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
484			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
485		return -EINVAL;
486
487	trace = get_perf_callchain(regs, init_nr, kernel, user,
488				   sysctl_perf_event_max_stack, false, false);
489
490	if (unlikely(!trace))
491		/* couldn't fetch the stack trace */
492		return -EFAULT;
493
494	return __bpf_get_stackid(map, trace, flags);
495}
496
497const struct bpf_func_proto bpf_get_stackid_proto = {
498	.func		= bpf_get_stackid,
499	.gpl_only	= true,
500	.ret_type	= RET_INTEGER,
501	.arg1_type	= ARG_PTR_TO_CTX,
502	.arg2_type	= ARG_CONST_MAP_PTR,
503	.arg3_type	= ARG_ANYTHING,
504};
505
506static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
507{
508	__u64 nr_kernel = 0;
509
510	while (nr_kernel < trace->nr) {
511		if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
512			break;
513		nr_kernel++;
514	}
515	return nr_kernel;
516}
517
518BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
519	   struct bpf_map *, map, u64, flags)
520{
521	struct perf_event *event = ctx->event;
522	struct perf_callchain_entry *trace;
523	bool kernel, user;
524	__u64 nr_kernel;
525	int ret;
526
527	/* perf_sample_data doesn't have callchain, use bpf_get_stackid */
528	if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
529		return bpf_get_stackid((unsigned long)(ctx->regs),
530				       (unsigned long) map, flags, 0, 0);
531
532	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
533			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
534		return -EINVAL;
535
536	user = flags & BPF_F_USER_STACK;
537	kernel = !user;
538
539	trace = ctx->data->callchain;
540	if (unlikely(!trace))
541		return -EFAULT;
542
543	nr_kernel = count_kernel_ip(trace);
544
545	if (kernel) {
546		__u64 nr = trace->nr;
547
548		trace->nr = nr_kernel;
549		ret = __bpf_get_stackid(map, trace, flags);
550
551		/* restore nr */
552		trace->nr = nr;
553	} else { /* user */
554		u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
555
556		skip += nr_kernel;
557		if (skip > BPF_F_SKIP_FIELD_MASK)
558			return -EFAULT;
559
560		flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
561		ret = __bpf_get_stackid(map, trace, flags);
562	}
563	return ret;
564}
565
566const struct bpf_func_proto bpf_get_stackid_proto_pe = {
567	.func		= bpf_get_stackid_pe,
568	.gpl_only	= false,
569	.ret_type	= RET_INTEGER,
570	.arg1_type	= ARG_PTR_TO_CTX,
571	.arg2_type	= ARG_CONST_MAP_PTR,
572	.arg3_type	= ARG_ANYTHING,
573};
574
575static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
576			    struct perf_callchain_entry *trace_in,
577			    void *buf, u32 size, u64 flags)
578{
579	u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
580	bool user_build_id = flags & BPF_F_USER_BUILD_ID;
581	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
582	bool user = flags & BPF_F_USER_STACK;
583	struct perf_callchain_entry *trace;
584	bool kernel = !user;
585	int err = -EINVAL;
586	u64 *ips;
587
588	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
589			       BPF_F_USER_BUILD_ID)))
590		goto clear;
591	if (kernel && user_build_id)
592		goto clear;
593
594	elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
595					    : sizeof(u64);
596	if (unlikely(size % elem_size))
597		goto clear;
598
599	/* cannot get valid user stack for task without user_mode regs */
600	if (task && user && !user_mode(regs))
601		goto err_fault;
602
603	num_elem = size / elem_size;
604	if (sysctl_perf_event_max_stack < num_elem)
605		init_nr = 0;
606	else
607		init_nr = sysctl_perf_event_max_stack - num_elem;
608
609	if (trace_in)
610		trace = trace_in;
611	else if (kernel && task)
612		trace = get_callchain_entry_for_task(task, init_nr);
613	else
614		trace = get_perf_callchain(regs, init_nr, kernel, user,
615					   sysctl_perf_event_max_stack,
616					   false, false);
617	if (unlikely(!trace))
618		goto err_fault;
619
620	trace_nr = trace->nr - init_nr;
621	if (trace_nr < skip)
622		goto err_fault;
623
624	trace_nr -= skip;
625	trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
626	copy_len = trace_nr * elem_size;
627	ips = trace->ip + skip + init_nr;
628	if (user && user_build_id)
629		stack_map_get_build_id_offset(buf, ips, trace_nr, user);
630	else
631		memcpy(buf, ips, copy_len);
632
633	if (size > copy_len)
634		memset(buf + copy_len, 0, size - copy_len);
635	return copy_len;
636
637err_fault:
638	err = -EFAULT;
639clear:
640	memset(buf, 0, size);
641	return err;
642}
643
644BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
645	   u64, flags)
646{
647	return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
648}
649
650const struct bpf_func_proto bpf_get_stack_proto = {
651	.func		= bpf_get_stack,
652	.gpl_only	= true,
653	.ret_type	= RET_INTEGER,
654	.arg1_type	= ARG_PTR_TO_CTX,
655	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
656	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
657	.arg4_type	= ARG_ANYTHING,
658};
659
660BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
661	   u32, size, u64, flags)
662{
663	struct pt_regs *regs = task_pt_regs(task);
 
 
 
 
 
 
 
 
664
665	return __bpf_get_stack(regs, task, NULL, buf, size, flags);
666}
667
668BTF_ID_LIST(bpf_get_task_stack_btf_ids)
669BTF_ID(struct, task_struct)
670
671const struct bpf_func_proto bpf_get_task_stack_proto = {
672	.func		= bpf_get_task_stack,
673	.gpl_only	= false,
674	.ret_type	= RET_INTEGER,
675	.arg1_type	= ARG_PTR_TO_BTF_ID,
 
676	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
677	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
678	.arg4_type	= ARG_ANYTHING,
679	.btf_id		= bpf_get_task_stack_btf_ids,
680};
681
682BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
683	   void *, buf, u32, size, u64, flags)
684{
685	struct pt_regs *regs = (struct pt_regs *)(ctx->regs);
686	struct perf_event *event = ctx->event;
687	struct perf_callchain_entry *trace;
688	bool kernel, user;
689	int err = -EINVAL;
690	__u64 nr_kernel;
691
692	if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
693		return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
694
695	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
696			       BPF_F_USER_BUILD_ID)))
697		goto clear;
698
699	user = flags & BPF_F_USER_STACK;
700	kernel = !user;
701
702	err = -EFAULT;
703	trace = ctx->data->callchain;
704	if (unlikely(!trace))
705		goto clear;
706
707	nr_kernel = count_kernel_ip(trace);
708
709	if (kernel) {
710		__u64 nr = trace->nr;
711
712		trace->nr = nr_kernel;
713		err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
714
715		/* restore nr */
716		trace->nr = nr;
717	} else { /* user */
718		u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
719
720		skip += nr_kernel;
721		if (skip > BPF_F_SKIP_FIELD_MASK)
722			goto clear;
723
724		flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
725		err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
726	}
727	return err;
728
729clear:
730	memset(buf, 0, size);
731	return err;
732
733}
734
735const struct bpf_func_proto bpf_get_stack_proto_pe = {
736	.func		= bpf_get_stack_pe,
737	.gpl_only	= true,
738	.ret_type	= RET_INTEGER,
739	.arg1_type	= ARG_PTR_TO_CTX,
740	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
741	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
742	.arg4_type	= ARG_ANYTHING,
743};
744
745/* Called from eBPF program */
746static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
747{
748	return ERR_PTR(-EOPNOTSUPP);
749}
750
751/* Called from syscall */
752int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
753{
754	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
755	struct stack_map_bucket *bucket, *old_bucket;
756	u32 id = *(u32 *)key, trace_len;
757
758	if (unlikely(id >= smap->n_buckets))
759		return -ENOENT;
760
761	bucket = xchg(&smap->buckets[id], NULL);
762	if (!bucket)
763		return -ENOENT;
764
765	trace_len = bucket->nr * stack_map_data_size(map);
766	memcpy(value, bucket->data, trace_len);
767	memset(value + trace_len, 0, map->value_size - trace_len);
768
769	old_bucket = xchg(&smap->buckets[id], bucket);
770	if (old_bucket)
771		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
772	return 0;
773}
774
775static int stack_map_get_next_key(struct bpf_map *map, void *key,
776				  void *next_key)
777{
778	struct bpf_stack_map *smap = container_of(map,
779						  struct bpf_stack_map, map);
780	u32 id;
781
782	WARN_ON_ONCE(!rcu_read_lock_held());
783
784	if (!key) {
785		id = 0;
786	} else {
787		id = *(u32 *)key;
788		if (id >= smap->n_buckets || !smap->buckets[id])
789			id = 0;
790		else
791			id++;
792	}
793
794	while (id < smap->n_buckets && !smap->buckets[id])
795		id++;
796
797	if (id >= smap->n_buckets)
798		return -ENOENT;
799
800	*(u32 *)next_key = id;
801	return 0;
802}
803
804static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
805				 u64 map_flags)
806{
807	return -EINVAL;
808}
809
810/* Called from syscall or from eBPF program */
811static int stack_map_delete_elem(struct bpf_map *map, void *key)
812{
813	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
814	struct stack_map_bucket *old_bucket;
815	u32 id = *(u32 *)key;
816
817	if (unlikely(id >= smap->n_buckets))
818		return -E2BIG;
819
820	old_bucket = xchg(&smap->buckets[id], NULL);
821	if (old_bucket) {
822		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
823		return 0;
824	} else {
825		return -ENOENT;
826	}
827}
828
829/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
830static void stack_map_free(struct bpf_map *map)
831{
832	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
833
834	bpf_map_area_free(smap->elems);
835	pcpu_freelist_destroy(&smap->freelist);
836	bpf_map_area_free(smap);
837	put_callchain_buffers();
838}
839
840static int stack_trace_map_btf_id;
841const struct bpf_map_ops stack_trace_map_ops = {
 
842	.map_alloc = stack_map_alloc,
843	.map_free = stack_map_free,
844	.map_get_next_key = stack_map_get_next_key,
845	.map_lookup_elem = stack_map_lookup_elem,
846	.map_update_elem = stack_map_update_elem,
847	.map_delete_elem = stack_map_delete_elem,
848	.map_check_btf = map_check_no_btf,
849	.map_btf_name = "bpf_stack_map",
850	.map_btf_id = &stack_trace_map_btf_id,
851};
852
853static int __init stack_map_init(void)
854{
855	int cpu;
856	struct stack_map_irq_work *work;
857
858	for_each_possible_cpu(cpu) {
859		work = per_cpu_ptr(&up_read_work, cpu);
860		init_irq_work(&work->irq_work, do_up_read);
861	}
862	return 0;
863}
864subsys_initcall(stack_map_init);