Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2016 Facebook
3 */
4#include <linux/bpf.h>
5#include <linux/jhash.h>
6#include <linux/filter.h>
7#include <linux/kernel.h>
8#include <linux/stacktrace.h>
9#include <linux/perf_event.h>
10#include <linux/btf_ids.h>
11#include <linux/buildid.h>
12#include "percpu_freelist.h"
13#include "mmap_unlock_work.h"
14
15#define STACK_CREATE_FLAG_MASK \
16 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \
17 BPF_F_STACK_BUILD_ID)
18
19struct stack_map_bucket {
20 struct pcpu_freelist_node fnode;
21 u32 hash;
22 u32 nr;
23 u64 data[];
24};
25
26struct bpf_stack_map {
27 struct bpf_map map;
28 void *elems;
29 struct pcpu_freelist freelist;
30 u32 n_buckets;
31 struct stack_map_bucket *buckets[] __counted_by(n_buckets);
32};
33
34static inline bool stack_map_use_build_id(struct bpf_map *map)
35{
36 return (map->map_flags & BPF_F_STACK_BUILD_ID);
37}
38
39static inline int stack_map_data_size(struct bpf_map *map)
40{
41 return stack_map_use_build_id(map) ?
42 sizeof(struct bpf_stack_build_id) : sizeof(u64);
43}
44
45static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
46{
47 u64 elem_size = sizeof(struct stack_map_bucket) +
48 (u64)smap->map.value_size;
49 int err;
50
51 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
52 smap->map.numa_node);
53 if (!smap->elems)
54 return -ENOMEM;
55
56 err = pcpu_freelist_init(&smap->freelist);
57 if (err)
58 goto free_elems;
59
60 pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
61 smap->map.max_entries);
62 return 0;
63
64free_elems:
65 bpf_map_area_free(smap->elems);
66 return err;
67}
68
69/* Called from syscall */
70static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
71{
72 u32 value_size = attr->value_size;
73 struct bpf_stack_map *smap;
74 u64 cost, n_buckets;
75 int err;
76
77 if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
78 return ERR_PTR(-EINVAL);
79
80 /* check sanity of attributes */
81 if (attr->max_entries == 0 || attr->key_size != 4 ||
82 value_size < 8 || value_size % 8)
83 return ERR_PTR(-EINVAL);
84
85 BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
86 if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
87 if (value_size % sizeof(struct bpf_stack_build_id) ||
88 value_size / sizeof(struct bpf_stack_build_id)
89 > sysctl_perf_event_max_stack)
90 return ERR_PTR(-EINVAL);
91 } else if (value_size / 8 > sysctl_perf_event_max_stack)
92 return ERR_PTR(-EINVAL);
93
94 /* hash table size must be power of 2 */
95 n_buckets = roundup_pow_of_two(attr->max_entries);
96 if (!n_buckets)
97 return ERR_PTR(-E2BIG);
98
99 cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
100 smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
101 if (!smap)
102 return ERR_PTR(-ENOMEM);
103
104 bpf_map_init_from_attr(&smap->map, attr);
105 smap->n_buckets = n_buckets;
106
107 err = get_callchain_buffers(sysctl_perf_event_max_stack);
108 if (err)
109 goto free_smap;
110
111 err = prealloc_elems_and_freelist(smap);
112 if (err)
113 goto put_buffers;
114
115 return &smap->map;
116
117put_buffers:
118 put_callchain_buffers();
119free_smap:
120 bpf_map_area_free(smap);
121 return ERR_PTR(err);
122}
123
124static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
125 u64 *ips, u32 trace_nr, bool user)
126{
127 int i;
128 struct mmap_unlock_irq_work *work = NULL;
129 bool irq_work_busy = bpf_mmap_unlock_get_irq_work(&work);
130 struct vm_area_struct *vma, *prev_vma = NULL;
131 const char *prev_build_id;
132
133 /* If the irq_work is in use, fall back to report ips. Same
134 * fallback is used for kernel stack (!user) on a stackmap with
135 * build_id.
136 */
137 if (!user || !current || !current->mm || irq_work_busy ||
138 !mmap_read_trylock(current->mm)) {
139 /* cannot access current->mm, fall back to ips */
140 for (i = 0; i < trace_nr; i++) {
141 id_offs[i].status = BPF_STACK_BUILD_ID_IP;
142 id_offs[i].ip = ips[i];
143 memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
144 }
145 return;
146 }
147
148 for (i = 0; i < trace_nr; i++) {
149 if (range_in_vma(prev_vma, ips[i], ips[i])) {
150 vma = prev_vma;
151 memcpy(id_offs[i].build_id, prev_build_id,
152 BUILD_ID_SIZE_MAX);
153 goto build_id_valid;
154 }
155 vma = find_vma(current->mm, ips[i]);
156 if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) {
157 /* per entry fall back to ips */
158 id_offs[i].status = BPF_STACK_BUILD_ID_IP;
159 id_offs[i].ip = ips[i];
160 memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
161 continue;
162 }
163build_id_valid:
164 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
165 - vma->vm_start;
166 id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
167 prev_vma = vma;
168 prev_build_id = id_offs[i].build_id;
169 }
170 bpf_mmap_unlock_mm(work, current->mm);
171}
172
173static struct perf_callchain_entry *
174get_callchain_entry_for_task(struct task_struct *task, u32 max_depth)
175{
176#ifdef CONFIG_STACKTRACE
177 struct perf_callchain_entry *entry;
178 int rctx;
179
180 entry = get_callchain_entry(&rctx);
181
182 if (!entry)
183 return NULL;
184
185 entry->nr = stack_trace_save_tsk(task, (unsigned long *)entry->ip,
186 max_depth, 0);
187
188 /* stack_trace_save_tsk() works on unsigned long array, while
189 * perf_callchain_entry uses u64 array. For 32-bit systems, it is
190 * necessary to fix this mismatch.
191 */
192 if (__BITS_PER_LONG != 64) {
193 unsigned long *from = (unsigned long *) entry->ip;
194 u64 *to = entry->ip;
195 int i;
196
197 /* copy data from the end to avoid using extra buffer */
198 for (i = entry->nr - 1; i >= 0; i--)
199 to[i] = (u64)(from[i]);
200 }
201
202 put_callchain_entry(rctx);
203
204 return entry;
205#else /* CONFIG_STACKTRACE */
206 return NULL;
207#endif
208}
209
210static long __bpf_get_stackid(struct bpf_map *map,
211 struct perf_callchain_entry *trace, u64 flags)
212{
213 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
214 struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
215 u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
216 u32 hash, id, trace_nr, trace_len;
217 bool user = flags & BPF_F_USER_STACK;
218 u64 *ips;
219 bool hash_matches;
220
221 if (trace->nr <= skip)
222 /* skipping more than usable stack trace */
223 return -EFAULT;
224
225 trace_nr = trace->nr - skip;
226 trace_len = trace_nr * sizeof(u64);
227 ips = trace->ip + skip;
228 hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
229 id = hash & (smap->n_buckets - 1);
230 bucket = READ_ONCE(smap->buckets[id]);
231
232 hash_matches = bucket && bucket->hash == hash;
233 /* fast cmp */
234 if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
235 return id;
236
237 if (stack_map_use_build_id(map)) {
238 /* for build_id+offset, pop a bucket before slow cmp */
239 new_bucket = (struct stack_map_bucket *)
240 pcpu_freelist_pop(&smap->freelist);
241 if (unlikely(!new_bucket))
242 return -ENOMEM;
243 new_bucket->nr = trace_nr;
244 stack_map_get_build_id_offset(
245 (struct bpf_stack_build_id *)new_bucket->data,
246 ips, trace_nr, user);
247 trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
248 if (hash_matches && bucket->nr == trace_nr &&
249 memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
250 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
251 return id;
252 }
253 if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
254 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
255 return -EEXIST;
256 }
257 } else {
258 if (hash_matches && bucket->nr == trace_nr &&
259 memcmp(bucket->data, ips, trace_len) == 0)
260 return id;
261 if (bucket && !(flags & BPF_F_REUSE_STACKID))
262 return -EEXIST;
263
264 new_bucket = (struct stack_map_bucket *)
265 pcpu_freelist_pop(&smap->freelist);
266 if (unlikely(!new_bucket))
267 return -ENOMEM;
268 memcpy(new_bucket->data, ips, trace_len);
269 }
270
271 new_bucket->hash = hash;
272 new_bucket->nr = trace_nr;
273
274 old_bucket = xchg(&smap->buckets[id], new_bucket);
275 if (old_bucket)
276 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
277 return id;
278}
279
280BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
281 u64, flags)
282{
283 u32 max_depth = map->value_size / stack_map_data_size(map);
284 u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
285 bool user = flags & BPF_F_USER_STACK;
286 struct perf_callchain_entry *trace;
287 bool kernel = !user;
288
289 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
290 BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
291 return -EINVAL;
292
293 max_depth += skip;
294 if (max_depth > sysctl_perf_event_max_stack)
295 max_depth = sysctl_perf_event_max_stack;
296
297 trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
298 false, false);
299
300 if (unlikely(!trace))
301 /* couldn't fetch the stack trace */
302 return -EFAULT;
303
304 return __bpf_get_stackid(map, trace, flags);
305}
306
307const struct bpf_func_proto bpf_get_stackid_proto = {
308 .func = bpf_get_stackid,
309 .gpl_only = true,
310 .ret_type = RET_INTEGER,
311 .arg1_type = ARG_PTR_TO_CTX,
312 .arg2_type = ARG_CONST_MAP_PTR,
313 .arg3_type = ARG_ANYTHING,
314};
315
316static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
317{
318 __u64 nr_kernel = 0;
319
320 while (nr_kernel < trace->nr) {
321 if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
322 break;
323 nr_kernel++;
324 }
325 return nr_kernel;
326}
327
328BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
329 struct bpf_map *, map, u64, flags)
330{
331 struct perf_event *event = ctx->event;
332 struct perf_callchain_entry *trace;
333 bool kernel, user;
334 __u64 nr_kernel;
335 int ret;
336
337 /* perf_sample_data doesn't have callchain, use bpf_get_stackid */
338 if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
339 return bpf_get_stackid((unsigned long)(ctx->regs),
340 (unsigned long) map, flags, 0, 0);
341
342 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
343 BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
344 return -EINVAL;
345
346 user = flags & BPF_F_USER_STACK;
347 kernel = !user;
348
349 trace = ctx->data->callchain;
350 if (unlikely(!trace))
351 return -EFAULT;
352
353 nr_kernel = count_kernel_ip(trace);
354
355 if (kernel) {
356 __u64 nr = trace->nr;
357
358 trace->nr = nr_kernel;
359 ret = __bpf_get_stackid(map, trace, flags);
360
361 /* restore nr */
362 trace->nr = nr;
363 } else { /* user */
364 u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
365
366 skip += nr_kernel;
367 if (skip > BPF_F_SKIP_FIELD_MASK)
368 return -EFAULT;
369
370 flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
371 ret = __bpf_get_stackid(map, trace, flags);
372 }
373 return ret;
374}
375
376const struct bpf_func_proto bpf_get_stackid_proto_pe = {
377 .func = bpf_get_stackid_pe,
378 .gpl_only = false,
379 .ret_type = RET_INTEGER,
380 .arg1_type = ARG_PTR_TO_CTX,
381 .arg2_type = ARG_CONST_MAP_PTR,
382 .arg3_type = ARG_ANYTHING,
383};
384
385static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
386 struct perf_callchain_entry *trace_in,
387 void *buf, u32 size, u64 flags)
388{
389 u32 trace_nr, copy_len, elem_size, num_elem, max_depth;
390 bool user_build_id = flags & BPF_F_USER_BUILD_ID;
391 bool crosstask = task && task != current;
392 u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
393 bool user = flags & BPF_F_USER_STACK;
394 struct perf_callchain_entry *trace;
395 bool kernel = !user;
396 int err = -EINVAL;
397 u64 *ips;
398
399 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
400 BPF_F_USER_BUILD_ID)))
401 goto clear;
402 if (kernel && user_build_id)
403 goto clear;
404
405 elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
406 : sizeof(u64);
407 if (unlikely(size % elem_size))
408 goto clear;
409
410 /* cannot get valid user stack for task without user_mode regs */
411 if (task && user && !user_mode(regs))
412 goto err_fault;
413
414 /* get_perf_callchain does not support crosstask user stack walking
415 * but returns an empty stack instead of NULL.
416 */
417 if (crosstask && user) {
418 err = -EOPNOTSUPP;
419 goto clear;
420 }
421
422 num_elem = size / elem_size;
423 max_depth = num_elem + skip;
424 if (sysctl_perf_event_max_stack < max_depth)
425 max_depth = sysctl_perf_event_max_stack;
426
427 if (trace_in)
428 trace = trace_in;
429 else if (kernel && task)
430 trace = get_callchain_entry_for_task(task, max_depth);
431 else
432 trace = get_perf_callchain(regs, 0, kernel, user, max_depth,
433 crosstask, false);
434 if (unlikely(!trace))
435 goto err_fault;
436
437 if (trace->nr < skip)
438 goto err_fault;
439
440 trace_nr = trace->nr - skip;
441 trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
442 copy_len = trace_nr * elem_size;
443
444 ips = trace->ip + skip;
445 if (user && user_build_id)
446 stack_map_get_build_id_offset(buf, ips, trace_nr, user);
447 else
448 memcpy(buf, ips, copy_len);
449
450 if (size > copy_len)
451 memset(buf + copy_len, 0, size - copy_len);
452 return copy_len;
453
454err_fault:
455 err = -EFAULT;
456clear:
457 memset(buf, 0, size);
458 return err;
459}
460
461BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
462 u64, flags)
463{
464 return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
465}
466
467const struct bpf_func_proto bpf_get_stack_proto = {
468 .func = bpf_get_stack,
469 .gpl_only = true,
470 .ret_type = RET_INTEGER,
471 .arg1_type = ARG_PTR_TO_CTX,
472 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
473 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
474 .arg4_type = ARG_ANYTHING,
475};
476
477BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
478 u32, size, u64, flags)
479{
480 struct pt_regs *regs;
481 long res = -EINVAL;
482
483 if (!try_get_task_stack(task))
484 return -EFAULT;
485
486 regs = task_pt_regs(task);
487 if (regs)
488 res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
489 put_task_stack(task);
490
491 return res;
492}
493
494const struct bpf_func_proto bpf_get_task_stack_proto = {
495 .func = bpf_get_task_stack,
496 .gpl_only = false,
497 .ret_type = RET_INTEGER,
498 .arg1_type = ARG_PTR_TO_BTF_ID,
499 .arg1_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
500 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
501 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
502 .arg4_type = ARG_ANYTHING,
503};
504
505BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
506 void *, buf, u32, size, u64, flags)
507{
508 struct pt_regs *regs = (struct pt_regs *)(ctx->regs);
509 struct perf_event *event = ctx->event;
510 struct perf_callchain_entry *trace;
511 bool kernel, user;
512 int err = -EINVAL;
513 __u64 nr_kernel;
514
515 if (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN))
516 return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
517
518 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
519 BPF_F_USER_BUILD_ID)))
520 goto clear;
521
522 user = flags & BPF_F_USER_STACK;
523 kernel = !user;
524
525 err = -EFAULT;
526 trace = ctx->data->callchain;
527 if (unlikely(!trace))
528 goto clear;
529
530 nr_kernel = count_kernel_ip(trace);
531
532 if (kernel) {
533 __u64 nr = trace->nr;
534
535 trace->nr = nr_kernel;
536 err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
537
538 /* restore nr */
539 trace->nr = nr;
540 } else { /* user */
541 u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
542
543 skip += nr_kernel;
544 if (skip > BPF_F_SKIP_FIELD_MASK)
545 goto clear;
546
547 flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
548 err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
549 }
550 return err;
551
552clear:
553 memset(buf, 0, size);
554 return err;
555
556}
557
558const struct bpf_func_proto bpf_get_stack_proto_pe = {
559 .func = bpf_get_stack_pe,
560 .gpl_only = true,
561 .ret_type = RET_INTEGER,
562 .arg1_type = ARG_PTR_TO_CTX,
563 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
564 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
565 .arg4_type = ARG_ANYTHING,
566};
567
568/* Called from eBPF program */
569static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
570{
571 return ERR_PTR(-EOPNOTSUPP);
572}
573
574/* Called from syscall */
575int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
576{
577 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
578 struct stack_map_bucket *bucket, *old_bucket;
579 u32 id = *(u32 *)key, trace_len;
580
581 if (unlikely(id >= smap->n_buckets))
582 return -ENOENT;
583
584 bucket = xchg(&smap->buckets[id], NULL);
585 if (!bucket)
586 return -ENOENT;
587
588 trace_len = bucket->nr * stack_map_data_size(map);
589 memcpy(value, bucket->data, trace_len);
590 memset(value + trace_len, 0, map->value_size - trace_len);
591
592 old_bucket = xchg(&smap->buckets[id], bucket);
593 if (old_bucket)
594 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
595 return 0;
596}
597
598static int stack_map_get_next_key(struct bpf_map *map, void *key,
599 void *next_key)
600{
601 struct bpf_stack_map *smap = container_of(map,
602 struct bpf_stack_map, map);
603 u32 id;
604
605 WARN_ON_ONCE(!rcu_read_lock_held());
606
607 if (!key) {
608 id = 0;
609 } else {
610 id = *(u32 *)key;
611 if (id >= smap->n_buckets || !smap->buckets[id])
612 id = 0;
613 else
614 id++;
615 }
616
617 while (id < smap->n_buckets && !smap->buckets[id])
618 id++;
619
620 if (id >= smap->n_buckets)
621 return -ENOENT;
622
623 *(u32 *)next_key = id;
624 return 0;
625}
626
627static long stack_map_update_elem(struct bpf_map *map, void *key, void *value,
628 u64 map_flags)
629{
630 return -EINVAL;
631}
632
633/* Called from syscall or from eBPF program */
634static long stack_map_delete_elem(struct bpf_map *map, void *key)
635{
636 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
637 struct stack_map_bucket *old_bucket;
638 u32 id = *(u32 *)key;
639
640 if (unlikely(id >= smap->n_buckets))
641 return -E2BIG;
642
643 old_bucket = xchg(&smap->buckets[id], NULL);
644 if (old_bucket) {
645 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
646 return 0;
647 } else {
648 return -ENOENT;
649 }
650}
651
652/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
653static void stack_map_free(struct bpf_map *map)
654{
655 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
656
657 bpf_map_area_free(smap->elems);
658 pcpu_freelist_destroy(&smap->freelist);
659 bpf_map_area_free(smap);
660 put_callchain_buffers();
661}
662
663static u64 stack_map_mem_usage(const struct bpf_map *map)
664{
665 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
666 u64 value_size = map->value_size;
667 u64 n_buckets = smap->n_buckets;
668 u64 enties = map->max_entries;
669 u64 usage = sizeof(*smap);
670
671 usage += n_buckets * sizeof(struct stack_map_bucket *);
672 usage += enties * (sizeof(struct stack_map_bucket) + value_size);
673 return usage;
674}
675
676BTF_ID_LIST_SINGLE(stack_trace_map_btf_ids, struct, bpf_stack_map)
677const struct bpf_map_ops stack_trace_map_ops = {
678 .map_meta_equal = bpf_map_meta_equal,
679 .map_alloc = stack_map_alloc,
680 .map_free = stack_map_free,
681 .map_get_next_key = stack_map_get_next_key,
682 .map_lookup_elem = stack_map_lookup_elem,
683 .map_update_elem = stack_map_update_elem,
684 .map_delete_elem = stack_map_delete_elem,
685 .map_check_btf = map_check_no_btf,
686 .map_mem_usage = stack_map_mem_usage,
687 .map_btf_id = &stack_trace_map_btf_ids[0],
688};
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2016 Facebook
3 */
4#include <linux/bpf.h>
5#include <linux/jhash.h>
6#include <linux/filter.h>
7#include <linux/kernel.h>
8#include <linux/stacktrace.h>
9#include <linux/perf_event.h>
10#include <linux/irq_work.h>
11#include <linux/btf_ids.h>
12#include <linux/buildid.h>
13#include "percpu_freelist.h"
14
15#define STACK_CREATE_FLAG_MASK \
16 (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY | \
17 BPF_F_STACK_BUILD_ID)
18
19struct stack_map_bucket {
20 struct pcpu_freelist_node fnode;
21 u32 hash;
22 u32 nr;
23 u64 data[];
24};
25
26struct bpf_stack_map {
27 struct bpf_map map;
28 void *elems;
29 struct pcpu_freelist freelist;
30 u32 n_buckets;
31 struct stack_map_bucket *buckets[];
32};
33
34/* irq_work to run up_read() for build_id lookup in nmi context */
35struct stack_map_irq_work {
36 struct irq_work irq_work;
37 struct mm_struct *mm;
38};
39
40static void do_up_read(struct irq_work *entry)
41{
42 struct stack_map_irq_work *work;
43
44 if (WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_RT)))
45 return;
46
47 work = container_of(entry, struct stack_map_irq_work, irq_work);
48 mmap_read_unlock_non_owner(work->mm);
49}
50
51static DEFINE_PER_CPU(struct stack_map_irq_work, up_read_work);
52
53static inline bool stack_map_use_build_id(struct bpf_map *map)
54{
55 return (map->map_flags & BPF_F_STACK_BUILD_ID);
56}
57
58static inline int stack_map_data_size(struct bpf_map *map)
59{
60 return stack_map_use_build_id(map) ?
61 sizeof(struct bpf_stack_build_id) : sizeof(u64);
62}
63
64static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
65{
66 u64 elem_size = sizeof(struct stack_map_bucket) +
67 (u64)smap->map.value_size;
68 int err;
69
70 smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries,
71 smap->map.numa_node);
72 if (!smap->elems)
73 return -ENOMEM;
74
75 err = pcpu_freelist_init(&smap->freelist);
76 if (err)
77 goto free_elems;
78
79 pcpu_freelist_populate(&smap->freelist, smap->elems, elem_size,
80 smap->map.max_entries);
81 return 0;
82
83free_elems:
84 bpf_map_area_free(smap->elems);
85 return err;
86}
87
88/* Called from syscall */
89static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
90{
91 u32 value_size = attr->value_size;
92 struct bpf_stack_map *smap;
93 u64 cost, n_buckets;
94 int err;
95
96 if (!bpf_capable())
97 return ERR_PTR(-EPERM);
98
99 if (attr->map_flags & ~STACK_CREATE_FLAG_MASK)
100 return ERR_PTR(-EINVAL);
101
102 /* check sanity of attributes */
103 if (attr->max_entries == 0 || attr->key_size != 4 ||
104 value_size < 8 || value_size % 8)
105 return ERR_PTR(-EINVAL);
106
107 BUILD_BUG_ON(sizeof(struct bpf_stack_build_id) % sizeof(u64));
108 if (attr->map_flags & BPF_F_STACK_BUILD_ID) {
109 if (value_size % sizeof(struct bpf_stack_build_id) ||
110 value_size / sizeof(struct bpf_stack_build_id)
111 > sysctl_perf_event_max_stack)
112 return ERR_PTR(-EINVAL);
113 } else if (value_size / 8 > sysctl_perf_event_max_stack)
114 return ERR_PTR(-EINVAL);
115
116 /* hash table size must be power of 2 */
117 n_buckets = roundup_pow_of_two(attr->max_entries);
118 if (!n_buckets)
119 return ERR_PTR(-E2BIG);
120
121 cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
122 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
123 smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr));
124 if (!smap)
125 return ERR_PTR(-ENOMEM);
126
127 bpf_map_init_from_attr(&smap->map, attr);
128 smap->map.value_size = value_size;
129 smap->n_buckets = n_buckets;
130
131 err = get_callchain_buffers(sysctl_perf_event_max_stack);
132 if (err)
133 goto free_smap;
134
135 err = prealloc_elems_and_freelist(smap);
136 if (err)
137 goto put_buffers;
138
139 return &smap->map;
140
141put_buffers:
142 put_callchain_buffers();
143free_smap:
144 bpf_map_area_free(smap);
145 return ERR_PTR(err);
146}
147
148static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
149 u64 *ips, u32 trace_nr, bool user)
150{
151 int i;
152 struct vm_area_struct *vma;
153 bool irq_work_busy = false;
154 struct stack_map_irq_work *work = NULL;
155
156 if (irqs_disabled()) {
157 if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {
158 work = this_cpu_ptr(&up_read_work);
159 if (irq_work_is_busy(&work->irq_work)) {
160 /* cannot queue more up_read, fallback */
161 irq_work_busy = true;
162 }
163 } else {
164 /*
165 * PREEMPT_RT does not allow to trylock mmap sem in
166 * interrupt disabled context. Force the fallback code.
167 */
168 irq_work_busy = true;
169 }
170 }
171
172 /*
173 * We cannot do up_read() when the irq is disabled, because of
174 * risk to deadlock with rq_lock. To do build_id lookup when the
175 * irqs are disabled, we need to run up_read() in irq_work. We use
176 * a percpu variable to do the irq_work. If the irq_work is
177 * already used by another lookup, we fall back to report ips.
178 *
179 * Same fallback is used for kernel stack (!user) on a stackmap
180 * with build_id.
181 */
182 if (!user || !current || !current->mm || irq_work_busy ||
183 !mmap_read_trylock_non_owner(current->mm)) {
184 /* cannot access current->mm, fall back to ips */
185 for (i = 0; i < trace_nr; i++) {
186 id_offs[i].status = BPF_STACK_BUILD_ID_IP;
187 id_offs[i].ip = ips[i];
188 memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
189 }
190 return;
191 }
192
193 for (i = 0; i < trace_nr; i++) {
194 vma = find_vma(current->mm, ips[i]);
195 if (!vma || build_id_parse(vma, id_offs[i].build_id, NULL)) {
196 /* per entry fall back to ips */
197 id_offs[i].status = BPF_STACK_BUILD_ID_IP;
198 id_offs[i].ip = ips[i];
199 memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
200 continue;
201 }
202 id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
203 - vma->vm_start;
204 id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
205 }
206
207 if (!work) {
208 mmap_read_unlock_non_owner(current->mm);
209 } else {
210 work->mm = current->mm;
211 irq_work_queue(&work->irq_work);
212 }
213}
214
215static struct perf_callchain_entry *
216get_callchain_entry_for_task(struct task_struct *task, u32 init_nr)
217{
218#ifdef CONFIG_STACKTRACE
219 struct perf_callchain_entry *entry;
220 int rctx;
221
222 entry = get_callchain_entry(&rctx);
223
224 if (!entry)
225 return NULL;
226
227 entry->nr = init_nr +
228 stack_trace_save_tsk(task, (unsigned long *)(entry->ip + init_nr),
229 sysctl_perf_event_max_stack - init_nr, 0);
230
231 /* stack_trace_save_tsk() works on unsigned long array, while
232 * perf_callchain_entry uses u64 array. For 32-bit systems, it is
233 * necessary to fix this mismatch.
234 */
235 if (__BITS_PER_LONG != 64) {
236 unsigned long *from = (unsigned long *) entry->ip;
237 u64 *to = entry->ip;
238 int i;
239
240 /* copy data from the end to avoid using extra buffer */
241 for (i = entry->nr - 1; i >= (int)init_nr; i--)
242 to[i] = (u64)(from[i]);
243 }
244
245 put_callchain_entry(rctx);
246
247 return entry;
248#else /* CONFIG_STACKTRACE */
249 return NULL;
250#endif
251}
252
253static long __bpf_get_stackid(struct bpf_map *map,
254 struct perf_callchain_entry *trace, u64 flags)
255{
256 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
257 struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
258 u32 max_depth = map->value_size / stack_map_data_size(map);
259 /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
260 u32 init_nr = sysctl_perf_event_max_stack - max_depth;
261 u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
262 u32 hash, id, trace_nr, trace_len;
263 bool user = flags & BPF_F_USER_STACK;
264 u64 *ips;
265 bool hash_matches;
266
267 /* get_perf_callchain() guarantees that trace->nr >= init_nr
268 * and trace-nr <= sysctl_perf_event_max_stack, so trace_nr <= max_depth
269 */
270 trace_nr = trace->nr - init_nr;
271
272 if (trace_nr <= skip)
273 /* skipping more than usable stack trace */
274 return -EFAULT;
275
276 trace_nr -= skip;
277 trace_len = trace_nr * sizeof(u64);
278 ips = trace->ip + skip + init_nr;
279 hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
280 id = hash & (smap->n_buckets - 1);
281 bucket = READ_ONCE(smap->buckets[id]);
282
283 hash_matches = bucket && bucket->hash == hash;
284 /* fast cmp */
285 if (hash_matches && flags & BPF_F_FAST_STACK_CMP)
286 return id;
287
288 if (stack_map_use_build_id(map)) {
289 /* for build_id+offset, pop a bucket before slow cmp */
290 new_bucket = (struct stack_map_bucket *)
291 pcpu_freelist_pop(&smap->freelist);
292 if (unlikely(!new_bucket))
293 return -ENOMEM;
294 new_bucket->nr = trace_nr;
295 stack_map_get_build_id_offset(
296 (struct bpf_stack_build_id *)new_bucket->data,
297 ips, trace_nr, user);
298 trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
299 if (hash_matches && bucket->nr == trace_nr &&
300 memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
301 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
302 return id;
303 }
304 if (bucket && !(flags & BPF_F_REUSE_STACKID)) {
305 pcpu_freelist_push(&smap->freelist, &new_bucket->fnode);
306 return -EEXIST;
307 }
308 } else {
309 if (hash_matches && bucket->nr == trace_nr &&
310 memcmp(bucket->data, ips, trace_len) == 0)
311 return id;
312 if (bucket && !(flags & BPF_F_REUSE_STACKID))
313 return -EEXIST;
314
315 new_bucket = (struct stack_map_bucket *)
316 pcpu_freelist_pop(&smap->freelist);
317 if (unlikely(!new_bucket))
318 return -ENOMEM;
319 memcpy(new_bucket->data, ips, trace_len);
320 }
321
322 new_bucket->hash = hash;
323 new_bucket->nr = trace_nr;
324
325 old_bucket = xchg(&smap->buckets[id], new_bucket);
326 if (old_bucket)
327 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
328 return id;
329}
330
331BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map,
332 u64, flags)
333{
334 u32 max_depth = map->value_size / stack_map_data_size(map);
335 /* stack_map_alloc() checks that max_depth <= sysctl_perf_event_max_stack */
336 u32 init_nr = sysctl_perf_event_max_stack - max_depth;
337 bool user = flags & BPF_F_USER_STACK;
338 struct perf_callchain_entry *trace;
339 bool kernel = !user;
340
341 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
342 BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
343 return -EINVAL;
344
345 trace = get_perf_callchain(regs, init_nr, kernel, user,
346 sysctl_perf_event_max_stack, false, false);
347
348 if (unlikely(!trace))
349 /* couldn't fetch the stack trace */
350 return -EFAULT;
351
352 return __bpf_get_stackid(map, trace, flags);
353}
354
355const struct bpf_func_proto bpf_get_stackid_proto = {
356 .func = bpf_get_stackid,
357 .gpl_only = true,
358 .ret_type = RET_INTEGER,
359 .arg1_type = ARG_PTR_TO_CTX,
360 .arg2_type = ARG_CONST_MAP_PTR,
361 .arg3_type = ARG_ANYTHING,
362};
363
364static __u64 count_kernel_ip(struct perf_callchain_entry *trace)
365{
366 __u64 nr_kernel = 0;
367
368 while (nr_kernel < trace->nr) {
369 if (trace->ip[nr_kernel] == PERF_CONTEXT_USER)
370 break;
371 nr_kernel++;
372 }
373 return nr_kernel;
374}
375
376BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
377 struct bpf_map *, map, u64, flags)
378{
379 struct perf_event *event = ctx->event;
380 struct perf_callchain_entry *trace;
381 bool kernel, user;
382 __u64 nr_kernel;
383 int ret;
384
385 /* perf_sample_data doesn't have callchain, use bpf_get_stackid */
386 if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
387 return bpf_get_stackid((unsigned long)(ctx->regs),
388 (unsigned long) map, flags, 0, 0);
389
390 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
391 BPF_F_FAST_STACK_CMP | BPF_F_REUSE_STACKID)))
392 return -EINVAL;
393
394 user = flags & BPF_F_USER_STACK;
395 kernel = !user;
396
397 trace = ctx->data->callchain;
398 if (unlikely(!trace))
399 return -EFAULT;
400
401 nr_kernel = count_kernel_ip(trace);
402
403 if (kernel) {
404 __u64 nr = trace->nr;
405
406 trace->nr = nr_kernel;
407 ret = __bpf_get_stackid(map, trace, flags);
408
409 /* restore nr */
410 trace->nr = nr;
411 } else { /* user */
412 u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
413
414 skip += nr_kernel;
415 if (skip > BPF_F_SKIP_FIELD_MASK)
416 return -EFAULT;
417
418 flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
419 ret = __bpf_get_stackid(map, trace, flags);
420 }
421 return ret;
422}
423
424const struct bpf_func_proto bpf_get_stackid_proto_pe = {
425 .func = bpf_get_stackid_pe,
426 .gpl_only = false,
427 .ret_type = RET_INTEGER,
428 .arg1_type = ARG_PTR_TO_CTX,
429 .arg2_type = ARG_CONST_MAP_PTR,
430 .arg3_type = ARG_ANYTHING,
431};
432
433static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
434 struct perf_callchain_entry *trace_in,
435 void *buf, u32 size, u64 flags)
436{
437 u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
438 bool user_build_id = flags & BPF_F_USER_BUILD_ID;
439 u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
440 bool user = flags & BPF_F_USER_STACK;
441 struct perf_callchain_entry *trace;
442 bool kernel = !user;
443 int err = -EINVAL;
444 u64 *ips;
445
446 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
447 BPF_F_USER_BUILD_ID)))
448 goto clear;
449 if (kernel && user_build_id)
450 goto clear;
451
452 elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
453 : sizeof(u64);
454 if (unlikely(size % elem_size))
455 goto clear;
456
457 /* cannot get valid user stack for task without user_mode regs */
458 if (task && user && !user_mode(regs))
459 goto err_fault;
460
461 num_elem = size / elem_size;
462 if (sysctl_perf_event_max_stack < num_elem)
463 init_nr = 0;
464 else
465 init_nr = sysctl_perf_event_max_stack - num_elem;
466
467 if (trace_in)
468 trace = trace_in;
469 else if (kernel && task)
470 trace = get_callchain_entry_for_task(task, init_nr);
471 else
472 trace = get_perf_callchain(regs, init_nr, kernel, user,
473 sysctl_perf_event_max_stack,
474 false, false);
475 if (unlikely(!trace))
476 goto err_fault;
477
478 trace_nr = trace->nr - init_nr;
479 if (trace_nr < skip)
480 goto err_fault;
481
482 trace_nr -= skip;
483 trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
484 copy_len = trace_nr * elem_size;
485 ips = trace->ip + skip + init_nr;
486 if (user && user_build_id)
487 stack_map_get_build_id_offset(buf, ips, trace_nr, user);
488 else
489 memcpy(buf, ips, copy_len);
490
491 if (size > copy_len)
492 memset(buf + copy_len, 0, size - copy_len);
493 return copy_len;
494
495err_fault:
496 err = -EFAULT;
497clear:
498 memset(buf, 0, size);
499 return err;
500}
501
502BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
503 u64, flags)
504{
505 return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
506}
507
508const struct bpf_func_proto bpf_get_stack_proto = {
509 .func = bpf_get_stack,
510 .gpl_only = true,
511 .ret_type = RET_INTEGER,
512 .arg1_type = ARG_PTR_TO_CTX,
513 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
514 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
515 .arg4_type = ARG_ANYTHING,
516};
517
518BPF_CALL_4(bpf_get_task_stack, struct task_struct *, task, void *, buf,
519 u32, size, u64, flags)
520{
521 struct pt_regs *regs;
522 long res;
523
524 if (!try_get_task_stack(task))
525 return -EFAULT;
526
527 regs = task_pt_regs(task);
528 res = __bpf_get_stack(regs, task, NULL, buf, size, flags);
529 put_task_stack(task);
530
531 return res;
532}
533
534BTF_ID_LIST_SINGLE(bpf_get_task_stack_btf_ids, struct, task_struct)
535
536const struct bpf_func_proto bpf_get_task_stack_proto = {
537 .func = bpf_get_task_stack,
538 .gpl_only = false,
539 .ret_type = RET_INTEGER,
540 .arg1_type = ARG_PTR_TO_BTF_ID,
541 .arg1_btf_id = &bpf_get_task_stack_btf_ids[0],
542 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
543 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
544 .arg4_type = ARG_ANYTHING,
545};
546
547BPF_CALL_4(bpf_get_stack_pe, struct bpf_perf_event_data_kern *, ctx,
548 void *, buf, u32, size, u64, flags)
549{
550 struct pt_regs *regs = (struct pt_regs *)(ctx->regs);
551 struct perf_event *event = ctx->event;
552 struct perf_callchain_entry *trace;
553 bool kernel, user;
554 int err = -EINVAL;
555 __u64 nr_kernel;
556
557 if (!(event->attr.sample_type & __PERF_SAMPLE_CALLCHAIN_EARLY))
558 return __bpf_get_stack(regs, NULL, NULL, buf, size, flags);
559
560 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
561 BPF_F_USER_BUILD_ID)))
562 goto clear;
563
564 user = flags & BPF_F_USER_STACK;
565 kernel = !user;
566
567 err = -EFAULT;
568 trace = ctx->data->callchain;
569 if (unlikely(!trace))
570 goto clear;
571
572 nr_kernel = count_kernel_ip(trace);
573
574 if (kernel) {
575 __u64 nr = trace->nr;
576
577 trace->nr = nr_kernel;
578 err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
579
580 /* restore nr */
581 trace->nr = nr;
582 } else { /* user */
583 u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
584
585 skip += nr_kernel;
586 if (skip > BPF_F_SKIP_FIELD_MASK)
587 goto clear;
588
589 flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
590 err = __bpf_get_stack(regs, NULL, trace, buf, size, flags);
591 }
592 return err;
593
594clear:
595 memset(buf, 0, size);
596 return err;
597
598}
599
600const struct bpf_func_proto bpf_get_stack_proto_pe = {
601 .func = bpf_get_stack_pe,
602 .gpl_only = true,
603 .ret_type = RET_INTEGER,
604 .arg1_type = ARG_PTR_TO_CTX,
605 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
606 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
607 .arg4_type = ARG_ANYTHING,
608};
609
610/* Called from eBPF program */
611static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
612{
613 return ERR_PTR(-EOPNOTSUPP);
614}
615
616/* Called from syscall */
617int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
618{
619 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
620 struct stack_map_bucket *bucket, *old_bucket;
621 u32 id = *(u32 *)key, trace_len;
622
623 if (unlikely(id >= smap->n_buckets))
624 return -ENOENT;
625
626 bucket = xchg(&smap->buckets[id], NULL);
627 if (!bucket)
628 return -ENOENT;
629
630 trace_len = bucket->nr * stack_map_data_size(map);
631 memcpy(value, bucket->data, trace_len);
632 memset(value + trace_len, 0, map->value_size - trace_len);
633
634 old_bucket = xchg(&smap->buckets[id], bucket);
635 if (old_bucket)
636 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
637 return 0;
638}
639
640static int stack_map_get_next_key(struct bpf_map *map, void *key,
641 void *next_key)
642{
643 struct bpf_stack_map *smap = container_of(map,
644 struct bpf_stack_map, map);
645 u32 id;
646
647 WARN_ON_ONCE(!rcu_read_lock_held());
648
649 if (!key) {
650 id = 0;
651 } else {
652 id = *(u32 *)key;
653 if (id >= smap->n_buckets || !smap->buckets[id])
654 id = 0;
655 else
656 id++;
657 }
658
659 while (id < smap->n_buckets && !smap->buckets[id])
660 id++;
661
662 if (id >= smap->n_buckets)
663 return -ENOENT;
664
665 *(u32 *)next_key = id;
666 return 0;
667}
668
669static int stack_map_update_elem(struct bpf_map *map, void *key, void *value,
670 u64 map_flags)
671{
672 return -EINVAL;
673}
674
675/* Called from syscall or from eBPF program */
676static int stack_map_delete_elem(struct bpf_map *map, void *key)
677{
678 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
679 struct stack_map_bucket *old_bucket;
680 u32 id = *(u32 *)key;
681
682 if (unlikely(id >= smap->n_buckets))
683 return -E2BIG;
684
685 old_bucket = xchg(&smap->buckets[id], NULL);
686 if (old_bucket) {
687 pcpu_freelist_push(&smap->freelist, &old_bucket->fnode);
688 return 0;
689 } else {
690 return -ENOENT;
691 }
692}
693
694/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
695static void stack_map_free(struct bpf_map *map)
696{
697 struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
698
699 bpf_map_area_free(smap->elems);
700 pcpu_freelist_destroy(&smap->freelist);
701 bpf_map_area_free(smap);
702 put_callchain_buffers();
703}
704
705static int stack_trace_map_btf_id;
706const struct bpf_map_ops stack_trace_map_ops = {
707 .map_meta_equal = bpf_map_meta_equal,
708 .map_alloc = stack_map_alloc,
709 .map_free = stack_map_free,
710 .map_get_next_key = stack_map_get_next_key,
711 .map_lookup_elem = stack_map_lookup_elem,
712 .map_update_elem = stack_map_update_elem,
713 .map_delete_elem = stack_map_delete_elem,
714 .map_check_btf = map_check_no_btf,
715 .map_btf_name = "bpf_stack_map",
716 .map_btf_id = &stack_trace_map_btf_id,
717};
718
719static int __init stack_map_init(void)
720{
721 int cpu;
722 struct stack_map_irq_work *work;
723
724 for_each_possible_cpu(cpu) {
725 work = per_cpu_ptr(&up_read_work, cpu);
726 init_irq_work(&work->irq_work, do_up_read);
727 }
728 return 0;
729}
730subsys_initcall(stack_map_init);