Linux Audio

Check our new training course

Loading...
v4.6
 
  1/* Copyright (c) 2016 Facebook
  2 *
  3 * This program is free software; you can redistribute it and/or
  4 * modify it under the terms of version 2 of the GNU General Public
  5 * License as published by the Free Software Foundation.
  6 */
  7#include <linux/bpf.h>
  8#include <linux/jhash.h>
  9#include <linux/filter.h>
 10#include <linux/vmalloc.h>
 11#include <linux/stacktrace.h>
 12#include <linux/perf_event.h>
 
 
 13#include "percpu_freelist.h"
 
 
 
 
 
 14
 15struct stack_map_bucket {
 16	struct pcpu_freelist_node fnode;
 17	u32 hash;
 18	u32 nr;
 19	u64 ip[];
 20};
 21
 22struct bpf_stack_map {
 23	struct bpf_map map;
 24	void *elems;
 25	struct pcpu_freelist freelist;
 26	u32 n_buckets;
 27	struct stack_map_bucket *buckets[];
 28};
 29
 
 
 
 
 
 
 
 
 
 
 
 30static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
 31{
 32	u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
 
 33	int err;
 34
 35	smap->elems = vzalloc(elem_size * smap->map.max_entries);
 
 36	if (!smap->elems)
 37		return -ENOMEM;
 38
 39	err = pcpu_freelist_init(&smap->freelist);
 40	if (err)
 41		goto free_elems;
 42
 43	pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
 44			       smap->map.max_entries);
 45	return 0;
 46
 47free_elems:
 48	vfree(smap->elems);
 49	return err;
 50}
 51
 52/* Called from syscall */
 53static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
 54{
 55	u32 value_size = attr->value_size;
 56	struct bpf_stack_map *smap;
 57	u64 cost, n_buckets;
 58	int err;
 59
 60	if (!capable(CAP_SYS_ADMIN))
 61		return ERR_PTR(-EPERM);
 62
 63	if (attr->map_flags)
 64		return ERR_PTR(-EINVAL);
 65
 66	/* check sanity of attributes */
 67	if (attr->max_entries == 0 || attr->key_size != 4 ||
 68	    value_size < 8 || value_size % 8 ||
 69	    value_size / 8 > PERF_MAX_STACK_DEPTH)
 70		return ERR_PTR(-EINVAL);
 71
 72	/* hash table size must be power of 2 */
 73	n_buckets = roundup_pow_of_two(attr->max_entries);
 
 
 
 
 
 
 74
 75	cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
 76	if (cost >= U32_MAX - PAGE_SIZE)
 
 
 77		return ERR_PTR(-E2BIG);
 78
 79	smap = kzalloc(cost, GFP_USER | __GFP_NOWARN);
 80	if (!smap) {
 81		smap = vzalloc(cost);
 82		if (!smap)
 83			return ERR_PTR(-ENOMEM);
 84	}
 85
 86	err = -E2BIG;
 87	cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
 88	if (cost >= U32_MAX - PAGE_SIZE)
 89		goto free_smap;
 90
 91	smap->map.map_type = attr->map_type;
 92	smap->map.key_size = attr->key_size;
 93	smap->map.value_size = value_size;
 94	smap->map.max_entries = attr->max_entries;
 95	smap->n_buckets = n_buckets;
 96	smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
 97
 98	err = bpf_map_precharge_memlock(smap->map.pages);
 99	if (err)
100		goto free_smap;
101
102	err = get_callchain_buffers();
103	if (err)
104		goto free_smap;
105
106	err = prealloc_elems_and_freelist(smap);
107	if (err)
108		goto put_buffers;
109
110	return &smap->map;
111
112put_buffers:
113	put_callchain_buffers();
114free_smap:
115	kvfree(smap);
116	return ERR_PTR(err);
117}
118
119static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120{
121	struct pt_regs *regs = (struct pt_regs *) (long) r1;
122	struct bpf_map *map = (struct bpf_map *) (long) r2;
123	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
124	struct perf_callchain_entry *trace;
125	struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
126	u32 max_depth = map->value_size / 8;
127	/* stack_map_alloc() checks that max_depth <= PERF_MAX_STACK_DEPTH */
128	u32 init_nr = PERF_MAX_STACK_DEPTH - max_depth;
129	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
130	u32 hash, id, trace_nr, trace_len;
131	bool user = flags & BPF_F_USER_STACK;
132	bool kernel = !user;
133	u64 *ips;
 
134
135	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
136			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
137		return -EINVAL;
138
139	trace = get_perf_callchain(regs, init_nr, kernel, user, false, false);
140
141	if (unlikely(!trace))
142		/* couldn't fetch the stack trace */
143		return -EFAULT;
144
145	/* get_perf_callchain() guarantees that trace->nr >= init_nr
146	 * and trace-nr <= PERF_MAX_STACK_DEPTH, so trace_nr <= max_depth
147	 */
148	trace_nr = trace->nr - init_nr;
149
150	if (trace_nr <= skip)
151		/* skipping more than usable stack trace */
152		return -EFAULT;
153
154	trace_nr -= skip;
155	trace_len = trace_nr * sizeof(u64);
156	ips = trace->ip + skip + init_nr;
157	hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
158	id = hash & (smap->n_buckets - 1);
159	bucket = READ_ONCE(smap->buckets[id]);
160
161	if (bucket && bucket->hash == hash) {
162		if (flags & BPF_F_FAST_STACK_CMP)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163			return id;
164		if (bucket->nr == trace_nr &&
165		    memcmp(bucket->ip, ips, trace_len) == 0)
 
 
 
 
 
 
166			return id;
167	}
 
168
169	/* this call stack is not in the map, try to add it */
170	if (bucket && !(flags & BPF_F_REUSE_STACKID))
171		return -EEXIST;
172
173	new_bucket = (struct stack_map_bucket *)
174		pcpu_freelist_pop(&smap->freelist);
175	if (unlikely(!new_bucket))
176		return -ENOMEM;
177
178	memcpy(new_bucket->ip, ips, trace_len);
179	new_bucket->hash = hash;
180	new_bucket->nr = trace_nr;
181
182	old_bucket = xchg(&smap->buckets[id], new_bucket);
183	if (old_bucket)
184		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
185	return id;
186}
187
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188const struct bpf_func_proto bpf_get_stackid_proto = {
189	.func		= bpf_get_stackid,
190	.gpl_only	= true,
191	.ret_type	= RET_INTEGER,
192	.arg1_type	= ARG_PTR_TO_CTX,
193	.arg2_type	= ARG_CONST_MAP_PTR,
194	.arg3_type	= ARG_ANYTHING,
195};
196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197/* Called from eBPF program */
198static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
199{
200	return NULL;
201}
202
203/* Called from syscall */
204int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
205{
206	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
207	struct stack_map_bucket *bucket, *old_bucket;
208	u32 id = *(u32 *)key, trace_len;
209
210	if (unlikely(id >= smap->n_buckets))
211		return -ENOENT;
212
213	bucket = xchg(&smap->buckets[id], NULL);
214	if (!bucket)
215		return -ENOENT;
216
217	trace_len = bucket->nr * sizeof(u64);
218	memcpy(value, bucket->ip, trace_len);
219	memset(value + trace_len, 0, map->value_size - trace_len);
220
221	old_bucket = xchg(&smap->buckets[id], bucket);
222	if (old_bucket)
223		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
224	return 0;
225}
226
227static int stack_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
 
228{
229	return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230}
231
232static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
233				 u64 map_flags)
234{
235	return -EINVAL;
236}
237
238/* Called from syscall or from eBPF program */
239static int stack_map_delete_elem(struct bpf_map *map, void *key)
240{
241	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
242	struct stack_map_bucket *old_bucket;
243	u32 id = *(u32 *)key;
244
245	if (unlikely(id >= smap->n_buckets))
246		return -E2BIG;
247
248	old_bucket = xchg(&smap->buckets[id], NULL);
249	if (old_bucket) {
250		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
251		return 0;
252	} else {
253		return -ENOENT;
254	}
255}
256
257/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
258static void stack_map_free(struct bpf_map *map)
259{
260	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
261
262	/* wait for bpf programs to complete before freeing stack map */
263	synchronize_rcu();
264
265	vfree(smap->elems);
266	pcpu_freelist_destroy(&smap->freelist);
267	kvfree(smap);
268	put_callchain_buffers();
269}
270
271static const struct bpf_map_ops stack_map_ops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
272	.map_alloc = stack_map_alloc,
273	.map_free = stack_map_free,
274	.map_get_next_key = stack_map_get_next_key,
275	.map_lookup_elem = stack_map_lookup_elem,
276	.map_update_elem = stack_map_update_elem,
277	.map_delete_elem = stack_map_delete_elem,
 
 
 
278};
279
280static struct bpf_map_type_list stack_map_type __read_mostly = {
281	.ops = &stack_map_ops,
282	.type = BPF_MAP_TYPE_STACK_TRACE,
283};
284
285static int __init register_stack_map(void)
286{
287	bpf_register_map_type(&stack_map_type);
288	return 0;
289}
290late_initcall(register_stack_map);
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/* Copyright (c) 2016 Facebook
 
 
 
 
  3 */
  4#include <linux/bpf.h>
  5#include <linux/jhash.h>
  6#include <linux/filter.h>
  7#include <linux/kernel.h>
  8#include <linux/stacktrace.h>
  9#include <linux/perf_event.h>
 10#include <linux/btf_ids.h>
 11#include <linux/buildid.h>
 12#include "percpu_freelist.h"
 13#include "mmap_unlock_work.h"
 14
 15#define STACK_CREATE_FLAG_MASK					\
 16	(BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY |	\
 17	 BPF_F_STACK_BUILD_ID)
 18
 19struct stack_map_bucket {
 20	struct pcpu_freelist_node fnode;
 21	u32 hash;
 22	u32 nr;
 23	u64 data[];
 24};
 25
 26struct bpf_stack_map {
 27	struct bpf_map map;
 28	void *elems;
 29	struct pcpu_freelist freelist;
 30	u32 n_buckets;
 31	struct stack_map_bucket *buckets[] __counted_by(n_buckets);
 32};
 33
 34static inline bool stack_map_use_build_id(struct bpf_map *map)
 35{
 36	return (map->map_flags & BPF_F_STACK_BUILD_ID);
 37}
 38
 39static inline int stack_map_data_size(struct bpf_map *map)
 40{
 41	return stack_map_use_build_id(map) ?
 42		sizeof(struct bpf_stack_build_id) : sizeof(u64);
 43}
 44
 45static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
 46{
 47	u64 elem_size = sizeof(struct stack_map_bucket) +
 48			(u64)smap->map.value_size;
 49	int err;
 50
 51	smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
 52					 smap->map.numa_node);
 53	if (!smap->elems)
 54		return -ENOMEM;
 55
 56	err = pcpu_freelist_init(&smap->freelist);
 57	if (err)
 58		goto free_elems;
 59
 60	pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
 61			       smap->map.max_entries);
 62	return 0;
 63
 64free_elems:
 65	bpf_map_area_free(smap->elems);
 66	return err;
 67}
 68
 69/* Called from syscall */
 70static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
 71{
 72	u32 value_size = attr->value_size;
 73	struct bpf_stack_map *smap;
 74	u64 cost, n_buckets;
 75	int err;
 76
 77	if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
 
 
 
 78		return ERR_PTR(-EINVAL);
 79
 80	/* check sanity of attributes */
 81	if (attr->max_entries == 0 || attr->key_size != 4 ||
 82	    value_size < 8 || value_size % 8)
 
 83		return ERR_PTR(-EINVAL);
 84
 85	BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
 86	if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
 87		if (value_size % sizeof(struct bpf_stack_build_id) ||
 88		    value_size / sizeof(struct bpf_stack_build_id)
 89		    > sysctl_perf_event_max_stack)
 90			return ERR_PTR(-EINVAL);
 91	} else if (value_size / 8 > sysctl_perf_event_max_stack)
 92		return ERR_PTR(-EINVAL);
 93
 94	/* hash table size must be power of 2; roundup_pow_of_two() can overflow
 95	 * into UB on 32-bit arches, so check that first
 96	 */
 97	if (attr->max_entries > 1UL << 31)
 98		return ERR_PTR(-E2BIG);
 99
100	n_buckets = roundup_pow_of_two(attr->max_entries);
 
 
 
 
 
101
102	cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
103	smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
104	if (!smap)
105		return ERR_PTR(-ENOMEM);
106
107	bpf_map_init_from_attr(&smap->map, attr);
 
 
 
108	smap->n_buckets = n_buckets;
 
 
 
 
 
109
110	err = get_callchain_buffers(sysctl_perf_event_max_stack);
111	if (err)
112		goto free_smap;
113
114	err = prealloc_elems_and_freelist(smap);
115	if (err)
116		goto put_buffers;
117
118	return &smap->map;
119
120put_buffers:
121	put_callchain_buffers();
122free_smap:
123	bpf_map_area_free(smap);
124	return ERR_PTR(err);
125}
126
127static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
128					  u64 *ips, u32 trace_nr, bool user)
129{
130	int i;
131	struct mmap_unlock_irq_work *work = NULL;
132	bool irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
133	struct vm_area_struct *vma, *prev_vma = NULL;
134	const char *prev_build_id;
135
136	/* If the irq_work is in use, fall back to report ips. Same
137	 * fallback is used for kernel stack (!user) on a stackmap with
138	 * build_id.
139	 */
140	if (!user || !current || !current->mm || irq_work_busy ||
141	    !mmap_read_trylock(current->mm)) {
142		/* cannot access current->mm, fall back to ips */
143		for (i = 0; i < trace_nr; i++) {
144			id_offs[i].status = BPF_STACK_BUILD_ID_IP;
145			id_offs[i].ip = ips[i];
146			memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
147		}
148		return;
149	}
150
151	for (i = 0; i < trace_nr; i++) {
152		if (range_in_vma(prev_vma, ips[i], ips[i])) {
153			vma = prev_vma;
154			memcpy(id_offs[i].build_id, prev_build_id,
155			       BUILD_ID_SIZE_MAX);
156			goto build_id_valid;
157		}
158		vma = find_vma(current->mm, ips[i]);
159		if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) {
160			/* per entry fall back to ips */
161			id_offs[i].status = BPF_STACK_BUILD_ID_IP;
162			id_offs[i].ip = ips[i];
163			memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
164			continue;
165		}
166build_id_valid:
167		id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
168			- vma->vm_start;
169		id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
170		prev_vma = vma;
171		prev_build_id = id_offs[i].build_id;
172	}
173	bpf_mmap_unlock_mm(work, current->mm);
174}
175
176static struct perf_callchain_entry *
177get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
178{
179#ifdef CONFIG_STACKTRACE
180	struct perf_callchain_entry *entry;
181	int rctx;
182
183	entry = get_callchain_entry(&rctx);
184
185	if (!entry)
186		return NULL;
187
188	entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip,
189					 max_depth, 0);
190
191	/* stack_trace_save_tsk() works on unsigned long array, while
192	 * perf_callchain_entry uses u64 array. For 32-bit systems, it is
193	 * necessary to fix this mismatch.
194	 */
195	if (__BITS_PER_LONG != 64) {
196		unsigned long *from = (unsigned long *) entry->ip;
197		u64 *to = entry->ip;
198		int i;
199
200		/* copy data from the end to avoid using extra buffer */
201		for (i = entry->nr - 1; i >= 0; i--)
202			to[i] = (u64)(from[i]);
203	}
204
205	put_callchain_entry(rctx);
206
207	return entry;
208#else /* CONFIG_STACKTRACE */
209	return NULL;
210#endif
211}
212
213static long __bpf_get_stackid(struct bpf_map *map,
214			      struct perf_callchain_entry *trace, u64 flags)
215{
 
 
216	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
 
217	struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
 
 
 
218	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
219	u32 hash, id, trace_nr, trace_len;
220	bool user = flags & BPF_F_USER_STACK;
 
221	u64 *ips;
222	bool hash_matches;
223
224	if (trace->nr <= skip)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
225		/* skipping more than usable stack trace */
226		return -EFAULT;
227
228	trace_nr = trace->nr - skip;
229	trace_len = trace_nr * sizeof(u64);
230	ips = trace->ip + skip;
231	hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
232	id = hash & (smap->n_buckets - 1);
233	bucket = READ_ONCE(smap->buckets[id]);
234
235	hash_matches = bucket && bucket->hash == hash;
236	/* fast cmp */
237	if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
238		return id;
239
240	if (stack_map_use_build_id(map)) {
241		/* for build_id+offset, pop a bucket before slow cmp */
242		new_bucket = (struct stack_map_bucket *)
243			pcpu_freelist_pop(&smap->freelist);
244		if (unlikely(!new_bucket))
245			return -ENOMEM;
246		new_bucket->nr = trace_nr;
247		stack_map_get_build_id_offset(
248			(struct bpf_stack_build_id *)new_bucket->data,
249			ips, trace_nr, user);
250		trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
251		if (hash_matches && bucket->nr == trace_nr &&
252		    memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
253			pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
254			return id;
255		}
256		if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
257			pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
258			return -EEXIST;
259		}
260	} else {
261		if (hash_matches && bucket->nr == trace_nr &&
262		    memcmp(bucket->data, ips, trace_len) == 0)
263			return id;
264		if (bucket && !(flags & BPF_F_REUSE_STACKID))
265			return -EEXIST;
266
267		new_bucket = (struct stack_map_bucket *)
268			pcpu_freelist_pop(&smap->freelist);
269		if (unlikely(!new_bucket))
270			return -ENOMEM;
271		memcpy(new_bucket->data, ips, trace_len);
272	}
 
 
273
 
274	new_bucket->hash = hash;
275	new_bucket->nr = trace_nr;
276
277	old_bucket = xchg(&smap->buckets[id], new_bucket);
278	if (old_bucket)
279		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
280	return id;
281}
282
283BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
284	   u64, flags)
285{
286	u32 max_depth = map->value_size / stack_map_data_size(map);
287	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
288	bool user = flags & BPF_F_USER_STACK;
289	struct perf_callchain_entry *trace;
290	bool kernel = !user;
291
292	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
293			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
294		return -EINVAL;
295
296	max_depth += skip;
297	if (max_depth > sysctl_perf_event_max_stack)
298		max_depth = sysctl_perf_event_max_stack;
299
300	trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
301				   false, false);
302
303	if (unlikely(!trace))
304		/* couldn't fetch the stack trace */
305		return -EFAULT;
306
307	return __bpf_get_stackid(map, trace, flags);
308}
309
310const struct bpf_func_proto bpf_get_stackid_proto = {
311	.func		= bpf_get_stackid,
312	.gpl_only	= true,
313	.ret_type	= RET_INTEGER,
314	.arg1_type	= ARG_PTR_TO_CTX,
315	.arg2_type	= ARG_CONST_MAP_PTR,
316	.arg3_type	= ARG_ANYTHING,
317};
318
319static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
320{
321	__u64 nr_kernel = 0;
322
323	while (nr_kernel < trace->nr) {
324		if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
325			break;
326		nr_kernel++;
327	}
328	return nr_kernel;
329}
330
331BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
332	   struct bpf_map *, map, u64, flags)
333{
334	struct perf_event *event = ctx->event;
335	struct perf_callchain_entry *trace;
336	bool kernel, user;
337	__u64 nr_kernel;
338	int ret;
339
340	/* perf_sample_data doesn't have callchain, use bpf_get_stackid */
341	if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
342		return bpf_get_stackid((unsigned long)(ctx->regs),
343				       (unsigned long) map, flags, 0, 0);
344
345	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
346			       BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
347		return -EINVAL;
348
349	user = flags & BPF_F_USER_STACK;
350	kernel = !user;
351
352	trace = ctx->data->callchain;
353	if (unlikely(!trace))
354		return -EFAULT;
355
356	nr_kernel = count_kernel_ip(trace);
357
358	if (kernel) {
359		__u64 nr = trace->nr;
360
361		trace->nr = nr_kernel;
362		ret = __bpf_get_stackid(map, trace, flags);
363
364		/* restore nr */
365		trace->nr = nr;
366	} else { /* user */
367		u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
368
369		skip += nr_kernel;
370		if (skip > BPF_F_SKIP_FIELD_MASK)
371			return -EFAULT;
372
373		flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
374		ret = __bpf_get_stackid(map, trace, flags);
375	}
376	return ret;
377}
378
379const struct bpf_func_proto bpf_get_stackid_proto_pe = {
380	.func		= bpf_get_stackid_pe,
381	.gpl_only	= false,
382	.ret_type	= RET_INTEGER,
383	.arg1_type	= ARG_PTR_TO_CTX,
384	.arg2_type	= ARG_CONST_MAP_PTR,
385	.arg3_type	= ARG_ANYTHING,
386};
387
388static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
389			    struct perf_callchain_entry *trace_in,
390			    void *buf, u32 size, u64 flags)
391{
392	u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
393	bool user_build_id = flags & BPF_F_USER_BUILD_ID;
394	bool crosstask = task && task != current;
395	u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
396	bool user = flags & BPF_F_USER_STACK;
397	struct perf_callchain_entry *trace;
398	bool kernel = !user;
399	int err = -EINVAL;
400	u64 *ips;
401
402	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
403			       BPF_F_USER_BUILD_ID)))
404		goto clear;
405	if (kernel && user_build_id)
406		goto clear;
407
408	elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
409					    : sizeof(u64);
410	if (unlikely(size % elem_size))
411		goto clear;
412
413	/* cannot get valid user stack for task without user_mode regs */
414	if (task && user && !user_mode(regs))
415		goto err_fault;
416
417	/* get_perf_callchain does not support crosstask user stack walking
418	 * but returns an empty stack instead of NULL.
419	 */
420	if (crosstask && user) {
421		err = -EOPNOTSUPP;
422		goto clear;
423	}
424
425	num_elem = size / elem_size;
426	max_depth = num_elem + skip;
427	if (sysctl_perf_event_max_stack < max_depth)
428		max_depth = sysctl_perf_event_max_stack;
429
430	if (trace_in)
431		trace = trace_in;
432	else if (kernel && task)
433		trace = get_callchain_entry_for_task(task, max_depth);
434	else
435		trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
436					   crosstask, false);
437	if (unlikely(!trace))
438		goto err_fault;
439
440	if (trace->nr < skip)
441		goto err_fault;
442
443	trace_nr = trace->nr - skip;
444	trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
445	copy_len = trace_nr * elem_size;
446
447	ips = trace->ip + skip;
448	if (user && user_build_id)
449		stack_map_get_build_id_offset(buf, ips, trace_nr, user);
450	else
451		memcpy(buf, ips, copy_len);
452
453	if (size > copy_len)
454		memset(buf + copy_len, 0, size - copy_len);
455	return copy_len;
456
457err_fault:
458	err = -EFAULT;
459clear:
460	memset(buf, 0, size);
461	return err;
462}
463
464BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
465	   u64, flags)
466{
467	return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
468}
469
470const struct bpf_func_proto bpf_get_stack_proto = {
471	.func		= bpf_get_stack,
472	.gpl_only	= true,
473	.ret_type	= RET_INTEGER,
474	.arg1_type	= ARG_PTR_TO_CTX,
475	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
476	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
477	.arg4_type	= ARG_ANYTHING,
478};
479
480BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
481	   u32, size, u64, flags)
482{
483	struct pt_regs *regs;
484	long res = -EINVAL;
485
486	if (!try_get_task_stack(task))
487		return -EFAULT;
488
489	regs = task_pt_regs(task);
490	if (regs)
491		res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
492	put_task_stack(task);
493
494	return res;
495}
496
497const struct bpf_func_proto bpf_get_task_stack_proto = {
498	.func		= bpf_get_task_stack,
499	.gpl_only	= false,
500	.ret_type	= RET_INTEGER,
501	.arg1_type	= ARG_PTR_TO_BTF_ID,
502	.arg1_btf_id	= &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
503	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
504	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
505	.arg4_type	= ARG_ANYTHING,
506};
507
508BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
509	   void *, buf, u32, size, u64, flags)
510{
511	struct pt_regs *regs = (struct pt_regs *)(ctx->regs);
512	struct perf_event *event = ctx->event;
513	struct perf_callchain_entry *trace;
514	bool kernel, user;
515	int err = -EINVAL;
516	__u64 nr_kernel;
517
518	if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
519		return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
520
521	if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
522			       BPF_F_USER_BUILD_ID)))
523		goto clear;
524
525	user = flags & BPF_F_USER_STACK;
526	kernel = !user;
527
528	err = -EFAULT;
529	trace = ctx->data->callchain;
530	if (unlikely(!trace))
531		goto clear;
532
533	nr_kernel = count_kernel_ip(trace);
534
535	if (kernel) {
536		__u64 nr = trace->nr;
537
538		trace->nr = nr_kernel;
539		err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
540
541		/* restore nr */
542		trace->nr = nr;
543	} else { /* user */
544		u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
545
546		skip += nr_kernel;
547		if (skip > BPF_F_SKIP_FIELD_MASK)
548			goto clear;
549
550		flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
551		err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
552	}
553	return err;
554
555clear:
556	memset(buf, 0, size);
557	return err;
558
559}
560
561const struct bpf_func_proto bpf_get_stack_proto_pe = {
562	.func		= bpf_get_stack_pe,
563	.gpl_only	= true,
564	.ret_type	= RET_INTEGER,
565	.arg1_type	= ARG_PTR_TO_CTX,
566	.arg2_type	= ARG_PTR_TO_UNINIT_MEM,
567	.arg3_type	= ARG_CONST_SIZE_OR_ZERO,
568	.arg4_type	= ARG_ANYTHING,
569};
570
571/* Called from eBPF program */
572static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
573{
574	return ERR_PTR(-EOPNOTSUPP);
575}
576
577/* Called from syscall */
578int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
579{
580	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
581	struct stack_map_bucket *bucket, *old_bucket;
582	u32 id = *(u32 *)key, trace_len;
583
584	if (unlikely(id >= smap->n_buckets))
585		return -ENOENT;
586
587	bucket = xchg(&smap->buckets[id], NULL);
588	if (!bucket)
589		return -ENOENT;
590
591	trace_len = bucket->nr * stack_map_data_size(map);
592	memcpy(value, bucket->data, trace_len);
593	memset(value + trace_len, 0, map->value_size - trace_len);
594
595	old_bucket = xchg(&smap->buckets[id], bucket);
596	if (old_bucket)
597		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
598	return 0;
599}
600
601static int stack_map_get_next_key(struct bpf_map *map, void *key,
602				  void *next_key)
603{
604	struct bpf_stack_map *smap = container_of(map,
605						  struct bpf_stack_map, map);
606	u32 id;
607
608	WARN_ON_ONCE(!rcu_read_lock_held());
609
610	if (!key) {
611		id = 0;
612	} else {
613		id = *(u32 *)key;
614		if (id >= smap->n_buckets || !smap->buckets[id])
615			id = 0;
616		else
617			id++;
618	}
619
620	while (id < smap->n_buckets && !smap->buckets[id])
621		id++;
622
623	if (id >= smap->n_buckets)
624		return -ENOENT;
625
626	*(u32 *)next_key = id;
627	return 0;
628}
629
630static long stack_map_update_elem(struct bpf_map *map, void *key, void *value,
631				  u64 map_flags)
632{
633	return -EINVAL;
634}
635
636/* Called from syscall or from eBPF program */
637static long stack_map_delete_elem(struct bpf_map *map, void *key)
638{
639	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
640	struct stack_map_bucket *old_bucket;
641	u32 id = *(u32 *)key;
642
643	if (unlikely(id >= smap->n_buckets))
644		return -E2BIG;
645
646	old_bucket = xchg(&smap->buckets[id], NULL);
647	if (old_bucket) {
648		pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
649		return 0;
650	} else {
651		return -ENOENT;
652	}
653}
654
655/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
656static void stack_map_free(struct bpf_map *map)
657{
658	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
659
660	bpf_map_area_free(smap->elems);
 
 
 
661	pcpu_freelist_destroy(&smap->freelist);
662	bpf_map_area_free(smap);
663	put_callchain_buffers();
664}
665
666static u64 stack_map_mem_usage(const struct bpf_map *map)
667{
668	struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
669	u64 value_size = map->value_size;
670	u64 n_buckets = smap->n_buckets;
671	u64 enties = map->max_entries;
672	u64 usage = sizeof(*smap);
673
674	usage += n_buckets * sizeof(struct stack_map_bucket *);
675	usage += enties * (sizeof(struct stack_map_bucket) + value_size);
676	return usage;
677}
678
679BTF_ID_LIST_SINGLE(stack_trace_map_btf_ids, struct, bpf_stack_map)
680const struct bpf_map_ops stack_trace_map_ops = {
681	.map_meta_equal = bpf_map_meta_equal,
682	.map_alloc = stack_map_alloc,
683	.map_free = stack_map_free,
684	.map_get_next_key = stack_map_get_next_key,
685	.map_lookup_elem = stack_map_lookup_elem,
686	.map_update_elem = stack_map_update_elem,
687	.map_delete_elem = stack_map_delete_elem,
688	.map_check_btf = map_check_no_btf,
689	.map_mem_usage = stack_map_mem_usage,
690	.map_btf_id = &stack_trace_map_btf_ids[0],
691};