Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <trace/syscall.h>
3#include <trace/events/syscalls.h>
4#include <linux/syscalls.h>
5#include <linux/slab.h>
6#include <linux/kernel.h>
7#include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
8#include <linux/ftrace.h>
9#include <linux/perf_event.h>
10#include <asm/syscall.h>
11
12#include "trace_output.h"
13#include "trace.h"
14
15static DEFINE_MUTEX(syscall_trace_lock);
16
17static int syscall_enter_register(struct trace_event_call *event,
18 enum trace_reg type, void *data);
19static int syscall_exit_register(struct trace_event_call *event,
20 enum trace_reg type, void *data);
21
22static struct list_head *
23syscall_get_enter_fields(struct trace_event_call *call)
24{
25 struct syscall_metadata *entry = call->data;
26
27 return &entry->enter_fields;
28}
29
30extern struct syscall_metadata *__start_syscalls_metadata[];
31extern struct syscall_metadata *__stop_syscalls_metadata[];
32
33static struct syscall_metadata **syscalls_metadata;
34
35#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
36static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
37{
38 /*
39 * Only compare after the "sys" prefix. Archs that use
40 * syscall wrappers may have syscalls symbols aliases prefixed
41 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
42 * mismatch.
43 */
44 return !strcmp(sym + 3, name + 3);
45}
46#endif
47
48#ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
49/*
50 * Some architectures that allow for 32bit applications
51 * to run on a 64bit kernel, do not map the syscalls for
52 * the 32bit tasks the same as they do for 64bit tasks.
53 *
54 * *cough*x86*cough*
55 *
56 * In such a case, instead of reporting the wrong syscalls,
57 * simply ignore them.
58 *
59 * For an arch to ignore the compat syscalls it needs to
60 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
61 * define the function arch_trace_is_compat_syscall() to let
62 * the tracing system know that it should ignore it.
63 */
64static int
65trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
66{
67 if (unlikely(arch_trace_is_compat_syscall(regs)))
68 return -1;
69
70 return syscall_get_nr(task, regs);
71}
72#else
73static inline int
74trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
75{
76 return syscall_get_nr(task, regs);
77}
78#endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
79
80static __init struct syscall_metadata *
81find_syscall_meta(unsigned long syscall)
82{
83 struct syscall_metadata **start;
84 struct syscall_metadata **stop;
85 char str[KSYM_SYMBOL_LEN];
86
87
88 start = __start_syscalls_metadata;
89 stop = __stop_syscalls_metadata;
90 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
91
92 if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
93 return NULL;
94
95 for ( ; start < stop; start++) {
96 if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
97 return *start;
98 }
99 return NULL;
100}
101
102static struct syscall_metadata *syscall_nr_to_meta(int nr)
103{
104 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
105 return NULL;
106
107 return syscalls_metadata[nr];
108}
109
110const char *get_syscall_name(int syscall)
111{
112 struct syscall_metadata *entry;
113
114 entry = syscall_nr_to_meta(syscall);
115 if (!entry)
116 return NULL;
117
118 return entry->name;
119}
120
121static enum print_line_t
122print_syscall_enter(struct trace_iterator *iter, int flags,
123 struct trace_event *event)
124{
125 struct trace_array *tr = iter->tr;
126 struct trace_seq *s = &iter->seq;
127 struct trace_entry *ent = iter->ent;
128 struct syscall_trace_enter *trace;
129 struct syscall_metadata *entry;
130 int i, syscall;
131
132 trace = (typeof(trace))ent;
133 syscall = trace->nr;
134 entry = syscall_nr_to_meta(syscall);
135
136 if (!entry)
137 goto end;
138
139 if (entry->enter_event->event.type != ent->type) {
140 WARN_ON_ONCE(1);
141 goto end;
142 }
143
144 trace_seq_printf(s, "%s(", entry->name);
145
146 for (i = 0; i < entry->nb_args; i++) {
147
148 if (trace_seq_has_overflowed(s))
149 goto end;
150
151 /* parameter types */
152 if (tr->trace_flags & TRACE_ITER_VERBOSE)
153 trace_seq_printf(s, "%s ", entry->types[i]);
154
155 /* parameter values */
156 trace_seq_printf(s, "%s: %lx%s", entry->args[i],
157 trace->args[i],
158 i == entry->nb_args - 1 ? "" : ", ");
159 }
160
161 trace_seq_putc(s, ')');
162end:
163 trace_seq_putc(s, '\n');
164
165 return trace_handle_return(s);
166}
167
168static enum print_line_t
169print_syscall_exit(struct trace_iterator *iter, int flags,
170 struct trace_event *event)
171{
172 struct trace_seq *s = &iter->seq;
173 struct trace_entry *ent = iter->ent;
174 struct syscall_trace_exit *trace;
175 int syscall;
176 struct syscall_metadata *entry;
177
178 trace = (typeof(trace))ent;
179 syscall = trace->nr;
180 entry = syscall_nr_to_meta(syscall);
181
182 if (!entry) {
183 trace_seq_putc(s, '\n');
184 goto out;
185 }
186
187 if (entry->exit_event->event.type != ent->type) {
188 WARN_ON_ONCE(1);
189 return TRACE_TYPE_UNHANDLED;
190 }
191
192 trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
193 trace->ret);
194
195 out:
196 return trace_handle_return(s);
197}
198
199extern char *__bad_type_size(void);
200
201#define SYSCALL_FIELD(type, field, name) \
202 sizeof(type) != sizeof(trace.field) ? \
203 __bad_type_size() : \
204 #type, #name, offsetof(typeof(trace), field), \
205 sizeof(trace.field), is_signed_type(type)
206
207static int __init
208__set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
209{
210 int i;
211 int pos = 0;
212
213 /* When len=0, we just calculate the needed length */
214#define LEN_OR_ZERO (len ? len - pos : 0)
215
216 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
217 for (i = 0; i < entry->nb_args; i++) {
218 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
219 entry->args[i], sizeof(unsigned long),
220 i == entry->nb_args - 1 ? "" : ", ");
221 }
222 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
223
224 for (i = 0; i < entry->nb_args; i++) {
225 pos += snprintf(buf + pos, LEN_OR_ZERO,
226 ", ((unsigned long)(REC->%s))", entry->args[i]);
227 }
228
229#undef LEN_OR_ZERO
230
231 /* return the length of print_fmt */
232 return pos;
233}
234
235static int __init set_syscall_print_fmt(struct trace_event_call *call)
236{
237 char *print_fmt;
238 int len;
239 struct syscall_metadata *entry = call->data;
240
241 if (entry->enter_event != call) {
242 call->print_fmt = "\"0x%lx\", REC->ret";
243 return 0;
244 }
245
246 /* First: called with 0 length to calculate the needed length */
247 len = __set_enter_print_fmt(entry, NULL, 0);
248
249 print_fmt = kmalloc(len + 1, GFP_KERNEL);
250 if (!print_fmt)
251 return -ENOMEM;
252
253 /* Second: actually write the @print_fmt */
254 __set_enter_print_fmt(entry, print_fmt, len + 1);
255 call->print_fmt = print_fmt;
256
257 return 0;
258}
259
260static void __init free_syscall_print_fmt(struct trace_event_call *call)
261{
262 struct syscall_metadata *entry = call->data;
263
264 if (entry->enter_event == call)
265 kfree(call->print_fmt);
266}
267
268static int __init syscall_enter_define_fields(struct trace_event_call *call)
269{
270 struct syscall_trace_enter trace;
271 struct syscall_metadata *meta = call->data;
272 int ret;
273 int i;
274 int offset = offsetof(typeof(trace), args);
275
276 ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr),
277 FILTER_OTHER);
278 if (ret)
279 return ret;
280
281 for (i = 0; i < meta->nb_args; i++) {
282 ret = trace_define_field(call, meta->types[i],
283 meta->args[i], offset,
284 sizeof(unsigned long), 0,
285 FILTER_OTHER);
286 offset += sizeof(unsigned long);
287 }
288
289 return ret;
290}
291
292static int __init syscall_exit_define_fields(struct trace_event_call *call)
293{
294 struct syscall_trace_exit trace;
295 int ret;
296
297 ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr),
298 FILTER_OTHER);
299 if (ret)
300 return ret;
301
302 ret = trace_define_field(call, SYSCALL_FIELD(long, ret, ret),
303 FILTER_OTHER);
304
305 return ret;
306}
307
308static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
309{
310 struct trace_array *tr = data;
311 struct trace_event_file *trace_file;
312 struct syscall_trace_enter *entry;
313 struct syscall_metadata *sys_data;
314 struct ring_buffer_event *event;
315 struct ring_buffer *buffer;
316 unsigned long irq_flags;
317 int pc;
318 int syscall_nr;
319 int size;
320
321 syscall_nr = trace_get_syscall_nr(current, regs);
322 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
323 return;
324
325 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
326 trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
327 if (!trace_file)
328 return;
329
330 if (trace_trigger_soft_disabled(trace_file))
331 return;
332
333 sys_data = syscall_nr_to_meta(syscall_nr);
334 if (!sys_data)
335 return;
336
337 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
338
339 local_save_flags(irq_flags);
340 pc = preempt_count();
341
342 buffer = tr->trace_buffer.buffer;
343 event = trace_buffer_lock_reserve(buffer,
344 sys_data->enter_event->event.type, size, irq_flags, pc);
345 if (!event)
346 return;
347
348 entry = ring_buffer_event_data(event);
349 entry->nr = syscall_nr;
350 syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
351
352 event_trigger_unlock_commit(trace_file, buffer, event, entry,
353 irq_flags, pc);
354}
355
356static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
357{
358 struct trace_array *tr = data;
359 struct trace_event_file *trace_file;
360 struct syscall_trace_exit *entry;
361 struct syscall_metadata *sys_data;
362 struct ring_buffer_event *event;
363 struct ring_buffer *buffer;
364 unsigned long irq_flags;
365 int pc;
366 int syscall_nr;
367
368 syscall_nr = trace_get_syscall_nr(current, regs);
369 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
370 return;
371
372 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
373 trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
374 if (!trace_file)
375 return;
376
377 if (trace_trigger_soft_disabled(trace_file))
378 return;
379
380 sys_data = syscall_nr_to_meta(syscall_nr);
381 if (!sys_data)
382 return;
383
384 local_save_flags(irq_flags);
385 pc = preempt_count();
386
387 buffer = tr->trace_buffer.buffer;
388 event = trace_buffer_lock_reserve(buffer,
389 sys_data->exit_event->event.type, sizeof(*entry),
390 irq_flags, pc);
391 if (!event)
392 return;
393
394 entry = ring_buffer_event_data(event);
395 entry->nr = syscall_nr;
396 entry->ret = syscall_get_return_value(current, regs);
397
398 event_trigger_unlock_commit(trace_file, buffer, event, entry,
399 irq_flags, pc);
400}
401
402static int reg_event_syscall_enter(struct trace_event_file *file,
403 struct trace_event_call *call)
404{
405 struct trace_array *tr = file->tr;
406 int ret = 0;
407 int num;
408
409 num = ((struct syscall_metadata *)call->data)->syscall_nr;
410 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
411 return -ENOSYS;
412 mutex_lock(&syscall_trace_lock);
413 if (!tr->sys_refcount_enter)
414 ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
415 if (!ret) {
416 rcu_assign_pointer(tr->enter_syscall_files[num], file);
417 tr->sys_refcount_enter++;
418 }
419 mutex_unlock(&syscall_trace_lock);
420 return ret;
421}
422
423static void unreg_event_syscall_enter(struct trace_event_file *file,
424 struct trace_event_call *call)
425{
426 struct trace_array *tr = file->tr;
427 int num;
428
429 num = ((struct syscall_metadata *)call->data)->syscall_nr;
430 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
431 return;
432 mutex_lock(&syscall_trace_lock);
433 tr->sys_refcount_enter--;
434 RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL);
435 if (!tr->sys_refcount_enter)
436 unregister_trace_sys_enter(ftrace_syscall_enter, tr);
437 mutex_unlock(&syscall_trace_lock);
438}
439
440static int reg_event_syscall_exit(struct trace_event_file *file,
441 struct trace_event_call *call)
442{
443 struct trace_array *tr = file->tr;
444 int ret = 0;
445 int num;
446
447 num = ((struct syscall_metadata *)call->data)->syscall_nr;
448 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
449 return -ENOSYS;
450 mutex_lock(&syscall_trace_lock);
451 if (!tr->sys_refcount_exit)
452 ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
453 if (!ret) {
454 rcu_assign_pointer(tr->exit_syscall_files[num], file);
455 tr->sys_refcount_exit++;
456 }
457 mutex_unlock(&syscall_trace_lock);
458 return ret;
459}
460
461static void unreg_event_syscall_exit(struct trace_event_file *file,
462 struct trace_event_call *call)
463{
464 struct trace_array *tr = file->tr;
465 int num;
466
467 num = ((struct syscall_metadata *)call->data)->syscall_nr;
468 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
469 return;
470 mutex_lock(&syscall_trace_lock);
471 tr->sys_refcount_exit--;
472 RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL);
473 if (!tr->sys_refcount_exit)
474 unregister_trace_sys_exit(ftrace_syscall_exit, tr);
475 mutex_unlock(&syscall_trace_lock);
476}
477
478static int __init init_syscall_trace(struct trace_event_call *call)
479{
480 int id;
481 int num;
482
483 num = ((struct syscall_metadata *)call->data)->syscall_nr;
484 if (num < 0 || num >= NR_syscalls) {
485 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
486 ((struct syscall_metadata *)call->data)->name);
487 return -ENOSYS;
488 }
489
490 if (set_syscall_print_fmt(call) < 0)
491 return -ENOMEM;
492
493 id = trace_event_raw_init(call);
494
495 if (id < 0) {
496 free_syscall_print_fmt(call);
497 return id;
498 }
499
500 return id;
501}
502
503struct trace_event_functions enter_syscall_print_funcs = {
504 .trace = print_syscall_enter,
505};
506
507struct trace_event_functions exit_syscall_print_funcs = {
508 .trace = print_syscall_exit,
509};
510
511struct trace_event_class __refdata event_class_syscall_enter = {
512 .system = "syscalls",
513 .reg = syscall_enter_register,
514 .define_fields = syscall_enter_define_fields,
515 .get_fields = syscall_get_enter_fields,
516 .raw_init = init_syscall_trace,
517};
518
519struct trace_event_class __refdata event_class_syscall_exit = {
520 .system = "syscalls",
521 .reg = syscall_exit_register,
522 .define_fields = syscall_exit_define_fields,
523 .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
524 .raw_init = init_syscall_trace,
525};
526
527unsigned long __init __weak arch_syscall_addr(int nr)
528{
529 return (unsigned long)sys_call_table[nr];
530}
531
532void __init init_ftrace_syscalls(void)
533{
534 struct syscall_metadata *meta;
535 unsigned long addr;
536 int i;
537
538 syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
539 GFP_KERNEL);
540 if (!syscalls_metadata) {
541 WARN_ON(1);
542 return;
543 }
544
545 for (i = 0; i < NR_syscalls; i++) {
546 addr = arch_syscall_addr(i);
547 meta = find_syscall_meta(addr);
548 if (!meta)
549 continue;
550
551 meta->syscall_nr = i;
552 syscalls_metadata[i] = meta;
553 }
554}
555
556#ifdef CONFIG_PERF_EVENTS
557
558static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
559static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
560static int sys_perf_refcount_enter;
561static int sys_perf_refcount_exit;
562
563static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *regs,
564 struct syscall_metadata *sys_data,
565 struct syscall_trace_enter *rec)
566{
567 struct syscall_tp_t {
568 unsigned long long regs;
569 unsigned long syscall_nr;
570 unsigned long args[SYSCALL_DEFINE_MAXARGS];
571 } param;
572 int i;
573
574 *(struct pt_regs **)¶m = regs;
575 param.syscall_nr = rec->nr;
576 for (i = 0; i < sys_data->nb_args; i++)
577 param.args[i] = rec->args[i];
578 return trace_call_bpf(call, ¶m);
579}
580
581static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
582{
583 struct syscall_metadata *sys_data;
584 struct syscall_trace_enter *rec;
585 struct hlist_head *head;
586 bool valid_prog_array;
587 int syscall_nr;
588 int rctx;
589 int size;
590
591 syscall_nr = trace_get_syscall_nr(current, regs);
592 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
593 return;
594 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
595 return;
596
597 sys_data = syscall_nr_to_meta(syscall_nr);
598 if (!sys_data)
599 return;
600
601 head = this_cpu_ptr(sys_data->enter_event->perf_events);
602 valid_prog_array = bpf_prog_array_valid(sys_data->enter_event);
603 if (!valid_prog_array && hlist_empty(head))
604 return;
605
606 /* get the size after alignment with the u32 buffer size field */
607 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
608 size = ALIGN(size + sizeof(u32), sizeof(u64));
609 size -= sizeof(u32);
610
611 rec = perf_trace_buf_alloc(size, NULL, &rctx);
612 if (!rec)
613 return;
614
615 rec->nr = syscall_nr;
616 syscall_get_arguments(current, regs, 0, sys_data->nb_args,
617 (unsigned long *)&rec->args);
618
619 if ((valid_prog_array &&
620 !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
621 hlist_empty(head)) {
622 perf_swevent_put_recursion_context(rctx);
623 return;
624 }
625
626 perf_trace_buf_submit(rec, size, rctx,
627 sys_data->enter_event->event.type, 1, regs,
628 head, NULL);
629}
630
631static int perf_sysenter_enable(struct trace_event_call *call)
632{
633 int ret = 0;
634 int num;
635
636 num = ((struct syscall_metadata *)call->data)->syscall_nr;
637
638 mutex_lock(&syscall_trace_lock);
639 if (!sys_perf_refcount_enter)
640 ret = register_trace_sys_enter(perf_syscall_enter, NULL);
641 if (ret) {
642 pr_info("event trace: Could not activate syscall entry trace point");
643 } else {
644 set_bit(num, enabled_perf_enter_syscalls);
645 sys_perf_refcount_enter++;
646 }
647 mutex_unlock(&syscall_trace_lock);
648 return ret;
649}
650
651static void perf_sysenter_disable(struct trace_event_call *call)
652{
653 int num;
654
655 num = ((struct syscall_metadata *)call->data)->syscall_nr;
656
657 mutex_lock(&syscall_trace_lock);
658 sys_perf_refcount_enter--;
659 clear_bit(num, enabled_perf_enter_syscalls);
660 if (!sys_perf_refcount_enter)
661 unregister_trace_sys_enter(perf_syscall_enter, NULL);
662 mutex_unlock(&syscall_trace_lock);
663}
664
665static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *regs,
666 struct syscall_trace_exit *rec)
667{
668 struct syscall_tp_t {
669 unsigned long long regs;
670 unsigned long syscall_nr;
671 unsigned long ret;
672 } param;
673
674 *(struct pt_regs **)¶m = regs;
675 param.syscall_nr = rec->nr;
676 param.ret = rec->ret;
677 return trace_call_bpf(call, ¶m);
678}
679
680static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
681{
682 struct syscall_metadata *sys_data;
683 struct syscall_trace_exit *rec;
684 struct hlist_head *head;
685 bool valid_prog_array;
686 int syscall_nr;
687 int rctx;
688 int size;
689
690 syscall_nr = trace_get_syscall_nr(current, regs);
691 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
692 return;
693 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
694 return;
695
696 sys_data = syscall_nr_to_meta(syscall_nr);
697 if (!sys_data)
698 return;
699
700 head = this_cpu_ptr(sys_data->exit_event->perf_events);
701 valid_prog_array = bpf_prog_array_valid(sys_data->exit_event);
702 if (!valid_prog_array && hlist_empty(head))
703 return;
704
705 /* We can probably do that at build time */
706 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
707 size -= sizeof(u32);
708
709 rec = perf_trace_buf_alloc(size, NULL, &rctx);
710 if (!rec)
711 return;
712
713 rec->nr = syscall_nr;
714 rec->ret = syscall_get_return_value(current, regs);
715
716 if ((valid_prog_array &&
717 !perf_call_bpf_exit(sys_data->exit_event, regs, rec)) ||
718 hlist_empty(head)) {
719 perf_swevent_put_recursion_context(rctx);
720 return;
721 }
722
723 perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
724 1, regs, head, NULL);
725}
726
727static int perf_sysexit_enable(struct trace_event_call *call)
728{
729 int ret = 0;
730 int num;
731
732 num = ((struct syscall_metadata *)call->data)->syscall_nr;
733
734 mutex_lock(&syscall_trace_lock);
735 if (!sys_perf_refcount_exit)
736 ret = register_trace_sys_exit(perf_syscall_exit, NULL);
737 if (ret) {
738 pr_info("event trace: Could not activate syscall exit trace point");
739 } else {
740 set_bit(num, enabled_perf_exit_syscalls);
741 sys_perf_refcount_exit++;
742 }
743 mutex_unlock(&syscall_trace_lock);
744 return ret;
745}
746
747static void perf_sysexit_disable(struct trace_event_call *call)
748{
749 int num;
750
751 num = ((struct syscall_metadata *)call->data)->syscall_nr;
752
753 mutex_lock(&syscall_trace_lock);
754 sys_perf_refcount_exit--;
755 clear_bit(num, enabled_perf_exit_syscalls);
756 if (!sys_perf_refcount_exit)
757 unregister_trace_sys_exit(perf_syscall_exit, NULL);
758 mutex_unlock(&syscall_trace_lock);
759}
760
761#endif /* CONFIG_PERF_EVENTS */
762
763static int syscall_enter_register(struct trace_event_call *event,
764 enum trace_reg type, void *data)
765{
766 struct trace_event_file *file = data;
767
768 switch (type) {
769 case TRACE_REG_REGISTER:
770 return reg_event_syscall_enter(file, event);
771 case TRACE_REG_UNREGISTER:
772 unreg_event_syscall_enter(file, event);
773 return 0;
774
775#ifdef CONFIG_PERF_EVENTS
776 case TRACE_REG_PERF_REGISTER:
777 return perf_sysenter_enable(event);
778 case TRACE_REG_PERF_UNREGISTER:
779 perf_sysenter_disable(event);
780 return 0;
781 case TRACE_REG_PERF_OPEN:
782 case TRACE_REG_PERF_CLOSE:
783 case TRACE_REG_PERF_ADD:
784 case TRACE_REG_PERF_DEL:
785 return 0;
786#endif
787 }
788 return 0;
789}
790
791static int syscall_exit_register(struct trace_event_call *event,
792 enum trace_reg type, void *data)
793{
794 struct trace_event_file *file = data;
795
796 switch (type) {
797 case TRACE_REG_REGISTER:
798 return reg_event_syscall_exit(file, event);
799 case TRACE_REG_UNREGISTER:
800 unreg_event_syscall_exit(file, event);
801 return 0;
802
803#ifdef CONFIG_PERF_EVENTS
804 case TRACE_REG_PERF_REGISTER:
805 return perf_sysexit_enable(event);
806 case TRACE_REG_PERF_UNREGISTER:
807 perf_sysexit_disable(event);
808 return 0;
809 case TRACE_REG_PERF_OPEN:
810 case TRACE_REG_PERF_CLOSE:
811 case TRACE_REG_PERF_ADD:
812 case TRACE_REG_PERF_DEL:
813 return 0;
814#endif
815 }
816 return 0;
817}
1// SPDX-License-Identifier: GPL-2.0
2#include <trace/syscall.h>
3#include <trace/events/syscalls.h>
4#include <linux/syscalls.h>
5#include <linux/slab.h>
6#include <linux/kernel.h>
7#include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
8#include <linux/ftrace.h>
9#include <linux/perf_event.h>
10#include <linux/xarray.h>
11#include <asm/syscall.h>
12
13#include "trace_output.h"
14#include "trace.h"
15
16static DEFINE_MUTEX(syscall_trace_lock);
17
18static int syscall_enter_register(struct trace_event_call *event,
19 enum trace_reg type, void *data);
20static int syscall_exit_register(struct trace_event_call *event,
21 enum trace_reg type, void *data);
22
23static struct list_head *
24syscall_get_enter_fields(struct trace_event_call *call)
25{
26 struct syscall_metadata *entry = call->data;
27
28 return &entry->enter_fields;
29}
30
31extern struct syscall_metadata *__start_syscalls_metadata[];
32extern struct syscall_metadata *__stop_syscalls_metadata[];
33
34static DEFINE_XARRAY(syscalls_metadata_sparse);
35static struct syscall_metadata **syscalls_metadata;
36
37#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
38static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
39{
40 /*
41 * Only compare after the "sys" prefix. Archs that use
42 * syscall wrappers may have syscalls symbols aliases prefixed
43 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
44 * mismatch.
45 */
46 return !strcmp(sym + 3, name + 3);
47}
48#endif
49
50#ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
51/*
52 * Some architectures that allow for 32bit applications
53 * to run on a 64bit kernel, do not map the syscalls for
54 * the 32bit tasks the same as they do for 64bit tasks.
55 *
56 * *cough*x86*cough*
57 *
58 * In such a case, instead of reporting the wrong syscalls,
59 * simply ignore them.
60 *
61 * For an arch to ignore the compat syscalls it needs to
62 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
63 * define the function arch_trace_is_compat_syscall() to let
64 * the tracing system know that it should ignore it.
65 */
66static int
67trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
68{
69 if (unlikely(arch_trace_is_compat_syscall(regs)))
70 return -1;
71
72 return syscall_get_nr(task, regs);
73}
74#else
75static inline int
76trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
77{
78 return syscall_get_nr(task, regs);
79}
80#endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
81
82static __init struct syscall_metadata *
83find_syscall_meta(unsigned long syscall)
84{
85 struct syscall_metadata **start;
86 struct syscall_metadata **stop;
87 char str[KSYM_SYMBOL_LEN];
88
89
90 start = __start_syscalls_metadata;
91 stop = __stop_syscalls_metadata;
92 kallsyms_lookup(syscall, NULL, NULL, NULL, str);
93
94 if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
95 return NULL;
96
97 for ( ; start < stop; start++) {
98 if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
99 return *start;
100 }
101 return NULL;
102}
103
104static struct syscall_metadata *syscall_nr_to_meta(int nr)
105{
106 if (IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR))
107 return xa_load(&syscalls_metadata_sparse, (unsigned long)nr);
108
109 if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
110 return NULL;
111
112 return syscalls_metadata[nr];
113}
114
115const char *get_syscall_name(int syscall)
116{
117 struct syscall_metadata *entry;
118
119 entry = syscall_nr_to_meta(syscall);
120 if (!entry)
121 return NULL;
122
123 return entry->name;
124}
125
126static enum print_line_t
127print_syscall_enter(struct trace_iterator *iter, int flags,
128 struct trace_event *event)
129{
130 struct trace_array *tr = iter->tr;
131 struct trace_seq *s = &iter->seq;
132 struct trace_entry *ent = iter->ent;
133 struct syscall_trace_enter *trace;
134 struct syscall_metadata *entry;
135 int i, syscall;
136
137 trace = (typeof(trace))ent;
138 syscall = trace->nr;
139 entry = syscall_nr_to_meta(syscall);
140
141 if (!entry)
142 goto end;
143
144 if (entry->enter_event->event.type != ent->type) {
145 WARN_ON_ONCE(1);
146 goto end;
147 }
148
149 trace_seq_printf(s, "%s(", entry->name);
150
151 for (i = 0; i < entry->nb_args; i++) {
152
153 if (trace_seq_has_overflowed(s))
154 goto end;
155
156 /* parameter types */
157 if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
158 trace_seq_printf(s, "%s ", entry->types[i]);
159
160 /* parameter values */
161 trace_seq_printf(s, "%s: %lx%s", entry->args[i],
162 trace->args[i],
163 i == entry->nb_args - 1 ? "" : ", ");
164 }
165
166 trace_seq_putc(s, ')');
167end:
168 trace_seq_putc(s, '\n');
169
170 return trace_handle_return(s);
171}
172
173static enum print_line_t
174print_syscall_exit(struct trace_iterator *iter, int flags,
175 struct trace_event *event)
176{
177 struct trace_seq *s = &iter->seq;
178 struct trace_entry *ent = iter->ent;
179 struct syscall_trace_exit *trace;
180 int syscall;
181 struct syscall_metadata *entry;
182
183 trace = (typeof(trace))ent;
184 syscall = trace->nr;
185 entry = syscall_nr_to_meta(syscall);
186
187 if (!entry) {
188 trace_seq_putc(s, '\n');
189 goto out;
190 }
191
192 if (entry->exit_event->event.type != ent->type) {
193 WARN_ON_ONCE(1);
194 return TRACE_TYPE_UNHANDLED;
195 }
196
197 trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
198 trace->ret);
199
200 out:
201 return trace_handle_return(s);
202}
203
204#define SYSCALL_FIELD(_type, _name) { \
205 .type = #_type, .name = #_name, \
206 .size = sizeof(_type), .align = __alignof__(_type), \
207 .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }
208
209static int __init
210__set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
211{
212 int i;
213 int pos = 0;
214
215 /* When len=0, we just calculate the needed length */
216#define LEN_OR_ZERO (len ? len - pos : 0)
217
218 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
219 for (i = 0; i < entry->nb_args; i++) {
220 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
221 entry->args[i], sizeof(unsigned long),
222 i == entry->nb_args - 1 ? "" : ", ");
223 }
224 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
225
226 for (i = 0; i < entry->nb_args; i++) {
227 pos += snprintf(buf + pos, LEN_OR_ZERO,
228 ", ((unsigned long)(REC->%s))", entry->args[i]);
229 }
230
231#undef LEN_OR_ZERO
232
233 /* return the length of print_fmt */
234 return pos;
235}
236
237static int __init set_syscall_print_fmt(struct trace_event_call *call)
238{
239 char *print_fmt;
240 int len;
241 struct syscall_metadata *entry = call->data;
242
243 if (entry->enter_event != call) {
244 call->print_fmt = "\"0x%lx\", REC->ret";
245 return 0;
246 }
247
248 /* First: called with 0 length to calculate the needed length */
249 len = __set_enter_print_fmt(entry, NULL, 0);
250
251 print_fmt = kmalloc(len + 1, GFP_KERNEL);
252 if (!print_fmt)
253 return -ENOMEM;
254
255 /* Second: actually write the @print_fmt */
256 __set_enter_print_fmt(entry, print_fmt, len + 1);
257 call->print_fmt = print_fmt;
258
259 return 0;
260}
261
262static void __init free_syscall_print_fmt(struct trace_event_call *call)
263{
264 struct syscall_metadata *entry = call->data;
265
266 if (entry->enter_event == call)
267 kfree(call->print_fmt);
268}
269
270static int __init syscall_enter_define_fields(struct trace_event_call *call)
271{
272 struct syscall_trace_enter trace;
273 struct syscall_metadata *meta = call->data;
274 int offset = offsetof(typeof(trace), args);
275 int ret = 0;
276 int i;
277
278 for (i = 0; i < meta->nb_args; i++) {
279 ret = trace_define_field(call, meta->types[i],
280 meta->args[i], offset,
281 sizeof(unsigned long), 0,
282 FILTER_OTHER);
283 if (ret)
284 break;
285 offset += sizeof(unsigned long);
286 }
287
288 return ret;
289}
290
291static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
292{
293 struct trace_array *tr = data;
294 struct trace_event_file *trace_file;
295 struct syscall_trace_enter *entry;
296 struct syscall_metadata *sys_data;
297 struct trace_event_buffer fbuffer;
298 unsigned long args[6];
299 int syscall_nr;
300 int size;
301
302 syscall_nr = trace_get_syscall_nr(current, regs);
303 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
304 return;
305
306 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
307 trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
308 if (!trace_file)
309 return;
310
311 if (trace_trigger_soft_disabled(trace_file))
312 return;
313
314 sys_data = syscall_nr_to_meta(syscall_nr);
315 if (!sys_data)
316 return;
317
318 size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
319
320 entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
321 if (!entry)
322 return;
323
324 entry = ring_buffer_event_data(fbuffer.event);
325 entry->nr = syscall_nr;
326 syscall_get_arguments(current, regs, args);
327 memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
328
329 trace_event_buffer_commit(&fbuffer);
330}
331
332static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
333{
334 struct trace_array *tr = data;
335 struct trace_event_file *trace_file;
336 struct syscall_trace_exit *entry;
337 struct syscall_metadata *sys_data;
338 struct trace_event_buffer fbuffer;
339 int syscall_nr;
340
341 syscall_nr = trace_get_syscall_nr(current, regs);
342 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
343 return;
344
345 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
346 trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
347 if (!trace_file)
348 return;
349
350 if (trace_trigger_soft_disabled(trace_file))
351 return;
352
353 sys_data = syscall_nr_to_meta(syscall_nr);
354 if (!sys_data)
355 return;
356
357 entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry));
358 if (!entry)
359 return;
360
361 entry = ring_buffer_event_data(fbuffer.event);
362 entry->nr = syscall_nr;
363 entry->ret = syscall_get_return_value(current, regs);
364
365 trace_event_buffer_commit(&fbuffer);
366}
367
368static int reg_event_syscall_enter(struct trace_event_file *file,
369 struct trace_event_call *call)
370{
371 struct trace_array *tr = file->tr;
372 int ret = 0;
373 int num;
374
375 num = ((struct syscall_metadata *)call->data)->syscall_nr;
376 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
377 return -ENOSYS;
378 mutex_lock(&syscall_trace_lock);
379 if (!tr->sys_refcount_enter)
380 ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
381 if (!ret) {
382 rcu_assign_pointer(tr->enter_syscall_files[num], file);
383 tr->sys_refcount_enter++;
384 }
385 mutex_unlock(&syscall_trace_lock);
386 return ret;
387}
388
389static void unreg_event_syscall_enter(struct trace_event_file *file,
390 struct trace_event_call *call)
391{
392 struct trace_array *tr = file->tr;
393 int num;
394
395 num = ((struct syscall_metadata *)call->data)->syscall_nr;
396 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
397 return;
398 mutex_lock(&syscall_trace_lock);
399 tr->sys_refcount_enter--;
400 RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL);
401 if (!tr->sys_refcount_enter)
402 unregister_trace_sys_enter(ftrace_syscall_enter, tr);
403 mutex_unlock(&syscall_trace_lock);
404}
405
406static int reg_event_syscall_exit(struct trace_event_file *file,
407 struct trace_event_call *call)
408{
409 struct trace_array *tr = file->tr;
410 int ret = 0;
411 int num;
412
413 num = ((struct syscall_metadata *)call->data)->syscall_nr;
414 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
415 return -ENOSYS;
416 mutex_lock(&syscall_trace_lock);
417 if (!tr->sys_refcount_exit)
418 ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
419 if (!ret) {
420 rcu_assign_pointer(tr->exit_syscall_files[num], file);
421 tr->sys_refcount_exit++;
422 }
423 mutex_unlock(&syscall_trace_lock);
424 return ret;
425}
426
427static void unreg_event_syscall_exit(struct trace_event_file *file,
428 struct trace_event_call *call)
429{
430 struct trace_array *tr = file->tr;
431 int num;
432
433 num = ((struct syscall_metadata *)call->data)->syscall_nr;
434 if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
435 return;
436 mutex_lock(&syscall_trace_lock);
437 tr->sys_refcount_exit--;
438 RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL);
439 if (!tr->sys_refcount_exit)
440 unregister_trace_sys_exit(ftrace_syscall_exit, tr);
441 mutex_unlock(&syscall_trace_lock);
442}
443
444static int __init init_syscall_trace(struct trace_event_call *call)
445{
446 int id;
447 int num;
448
449 num = ((struct syscall_metadata *)call->data)->syscall_nr;
450 if (num < 0 || num >= NR_syscalls) {
451 pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
452 ((struct syscall_metadata *)call->data)->name);
453 return -ENOSYS;
454 }
455
456 if (set_syscall_print_fmt(call) < 0)
457 return -ENOMEM;
458
459 id = trace_event_raw_init(call);
460
461 if (id < 0) {
462 free_syscall_print_fmt(call);
463 return id;
464 }
465
466 return id;
467}
468
469static struct trace_event_fields __refdata syscall_enter_fields_array[] = {
470 SYSCALL_FIELD(int, __syscall_nr),
471 { .type = TRACE_FUNCTION_TYPE,
472 .define_fields = syscall_enter_define_fields },
473 {}
474};
475
476struct trace_event_functions enter_syscall_print_funcs = {
477 .trace = print_syscall_enter,
478};
479
480struct trace_event_functions exit_syscall_print_funcs = {
481 .trace = print_syscall_exit,
482};
483
484struct trace_event_class __refdata event_class_syscall_enter = {
485 .system = "syscalls",
486 .reg = syscall_enter_register,
487 .fields_array = syscall_enter_fields_array,
488 .get_fields = syscall_get_enter_fields,
489 .raw_init = init_syscall_trace,
490};
491
492struct trace_event_class __refdata event_class_syscall_exit = {
493 .system = "syscalls",
494 .reg = syscall_exit_register,
495 .fields_array = (struct trace_event_fields[]){
496 SYSCALL_FIELD(int, __syscall_nr),
497 SYSCALL_FIELD(long, ret),
498 {}
499 },
500 .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
501 .raw_init = init_syscall_trace,
502};
503
504unsigned long __init __weak arch_syscall_addr(int nr)
505{
506 return (unsigned long)sys_call_table[nr];
507}
508
509void __init init_ftrace_syscalls(void)
510{
511 struct syscall_metadata *meta;
512 unsigned long addr;
513 int i;
514 void *ret;
515
516 if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) {
517 syscalls_metadata = kcalloc(NR_syscalls,
518 sizeof(*syscalls_metadata),
519 GFP_KERNEL);
520 if (!syscalls_metadata) {
521 WARN_ON(1);
522 return;
523 }
524 }
525
526 for (i = 0; i < NR_syscalls; i++) {
527 addr = arch_syscall_addr(i);
528 meta = find_syscall_meta(addr);
529 if (!meta)
530 continue;
531
532 meta->syscall_nr = i;
533
534 if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) {
535 syscalls_metadata[i] = meta;
536 } else {
537 ret = xa_store(&syscalls_metadata_sparse, i, meta,
538 GFP_KERNEL);
539 WARN(xa_is_err(ret),
540 "Syscall memory allocation failed\n");
541 }
542
543 }
544}
545
546#ifdef CONFIG_PERF_EVENTS
547
548static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
549static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
550static int sys_perf_refcount_enter;
551static int sys_perf_refcount_exit;
552
553static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *regs,
554 struct syscall_metadata *sys_data,
555 struct syscall_trace_enter *rec)
556{
557 struct syscall_tp_t {
558 unsigned long long regs;
559 unsigned long syscall_nr;
560 unsigned long args[SYSCALL_DEFINE_MAXARGS];
561 } param;
562 int i;
563
564 *(struct pt_regs **)¶m = regs;
565 param.syscall_nr = rec->nr;
566 for (i = 0; i < sys_data->nb_args; i++)
567 param.args[i] = rec->args[i];
568 return trace_call_bpf(call, ¶m);
569}
570
571static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
572{
573 struct syscall_metadata *sys_data;
574 struct syscall_trace_enter *rec;
575 struct hlist_head *head;
576 unsigned long args[6];
577 bool valid_prog_array;
578 int syscall_nr;
579 int rctx;
580 int size;
581
582 syscall_nr = trace_get_syscall_nr(current, regs);
583 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
584 return;
585 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
586 return;
587
588 sys_data = syscall_nr_to_meta(syscall_nr);
589 if (!sys_data)
590 return;
591
592 head = this_cpu_ptr(sys_data->enter_event->perf_events);
593 valid_prog_array = bpf_prog_array_valid(sys_data->enter_event);
594 if (!valid_prog_array && hlist_empty(head))
595 return;
596
597 /* get the size after alignment with the u32 buffer size field */
598 size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
599 size = ALIGN(size + sizeof(u32), sizeof(u64));
600 size -= sizeof(u32);
601
602 rec = perf_trace_buf_alloc(size, NULL, &rctx);
603 if (!rec)
604 return;
605
606 rec->nr = syscall_nr;
607 syscall_get_arguments(current, regs, args);
608 memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args);
609
610 if ((valid_prog_array &&
611 !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
612 hlist_empty(head)) {
613 perf_swevent_put_recursion_context(rctx);
614 return;
615 }
616
617 perf_trace_buf_submit(rec, size, rctx,
618 sys_data->enter_event->event.type, 1, regs,
619 head, NULL);
620}
621
622static int perf_sysenter_enable(struct trace_event_call *call)
623{
624 int ret = 0;
625 int num;
626
627 num = ((struct syscall_metadata *)call->data)->syscall_nr;
628
629 mutex_lock(&syscall_trace_lock);
630 if (!sys_perf_refcount_enter)
631 ret = register_trace_sys_enter(perf_syscall_enter, NULL);
632 if (ret) {
633 pr_info("event trace: Could not activate syscall entry trace point");
634 } else {
635 set_bit(num, enabled_perf_enter_syscalls);
636 sys_perf_refcount_enter++;
637 }
638 mutex_unlock(&syscall_trace_lock);
639 return ret;
640}
641
642static void perf_sysenter_disable(struct trace_event_call *call)
643{
644 int num;
645
646 num = ((struct syscall_metadata *)call->data)->syscall_nr;
647
648 mutex_lock(&syscall_trace_lock);
649 sys_perf_refcount_enter--;
650 clear_bit(num, enabled_perf_enter_syscalls);
651 if (!sys_perf_refcount_enter)
652 unregister_trace_sys_enter(perf_syscall_enter, NULL);
653 mutex_unlock(&syscall_trace_lock);
654}
655
656static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *regs,
657 struct syscall_trace_exit *rec)
658{
659 struct syscall_tp_t {
660 unsigned long long regs;
661 unsigned long syscall_nr;
662 unsigned long ret;
663 } param;
664
665 *(struct pt_regs **)¶m = regs;
666 param.syscall_nr = rec->nr;
667 param.ret = rec->ret;
668 return trace_call_bpf(call, ¶m);
669}
670
671static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
672{
673 struct syscall_metadata *sys_data;
674 struct syscall_trace_exit *rec;
675 struct hlist_head *head;
676 bool valid_prog_array;
677 int syscall_nr;
678 int rctx;
679 int size;
680
681 syscall_nr = trace_get_syscall_nr(current, regs);
682 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
683 return;
684 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
685 return;
686
687 sys_data = syscall_nr_to_meta(syscall_nr);
688 if (!sys_data)
689 return;
690
691 head = this_cpu_ptr(sys_data->exit_event->perf_events);
692 valid_prog_array = bpf_prog_array_valid(sys_data->exit_event);
693 if (!valid_prog_array && hlist_empty(head))
694 return;
695
696 /* We can probably do that at build time */
697 size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
698 size -= sizeof(u32);
699
700 rec = perf_trace_buf_alloc(size, NULL, &rctx);
701 if (!rec)
702 return;
703
704 rec->nr = syscall_nr;
705 rec->ret = syscall_get_return_value(current, regs);
706
707 if ((valid_prog_array &&
708 !perf_call_bpf_exit(sys_data->exit_event, regs, rec)) ||
709 hlist_empty(head)) {
710 perf_swevent_put_recursion_context(rctx);
711 return;
712 }
713
714 perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
715 1, regs, head, NULL);
716}
717
718static int perf_sysexit_enable(struct trace_event_call *call)
719{
720 int ret = 0;
721 int num;
722
723 num = ((struct syscall_metadata *)call->data)->syscall_nr;
724
725 mutex_lock(&syscall_trace_lock);
726 if (!sys_perf_refcount_exit)
727 ret = register_trace_sys_exit(perf_syscall_exit, NULL);
728 if (ret) {
729 pr_info("event trace: Could not activate syscall exit trace point");
730 } else {
731 set_bit(num, enabled_perf_exit_syscalls);
732 sys_perf_refcount_exit++;
733 }
734 mutex_unlock(&syscall_trace_lock);
735 return ret;
736}
737
738static void perf_sysexit_disable(struct trace_event_call *call)
739{
740 int num;
741
742 num = ((struct syscall_metadata *)call->data)->syscall_nr;
743
744 mutex_lock(&syscall_trace_lock);
745 sys_perf_refcount_exit--;
746 clear_bit(num, enabled_perf_exit_syscalls);
747 if (!sys_perf_refcount_exit)
748 unregister_trace_sys_exit(perf_syscall_exit, NULL);
749 mutex_unlock(&syscall_trace_lock);
750}
751
752#endif /* CONFIG_PERF_EVENTS */
753
754static int syscall_enter_register(struct trace_event_call *event,
755 enum trace_reg type, void *data)
756{
757 struct trace_event_file *file = data;
758
759 switch (type) {
760 case TRACE_REG_REGISTER:
761 return reg_event_syscall_enter(file, event);
762 case TRACE_REG_UNREGISTER:
763 unreg_event_syscall_enter(file, event);
764 return 0;
765
766#ifdef CONFIG_PERF_EVENTS
767 case TRACE_REG_PERF_REGISTER:
768 return perf_sysenter_enable(event);
769 case TRACE_REG_PERF_UNREGISTER:
770 perf_sysenter_disable(event);
771 return 0;
772 case TRACE_REG_PERF_OPEN:
773 case TRACE_REG_PERF_CLOSE:
774 case TRACE_REG_PERF_ADD:
775 case TRACE_REG_PERF_DEL:
776 return 0;
777#endif
778 }
779 return 0;
780}
781
782static int syscall_exit_register(struct trace_event_call *event,
783 enum trace_reg type, void *data)
784{
785 struct trace_event_file *file = data;
786
787 switch (type) {
788 case TRACE_REG_REGISTER:
789 return reg_event_syscall_exit(file, event);
790 case TRACE_REG_UNREGISTER:
791 unreg_event_syscall_exit(file, event);
792 return 0;
793
794#ifdef CONFIG_PERF_EVENTS
795 case TRACE_REG_PERF_REGISTER:
796 return perf_sysexit_enable(event);
797 case TRACE_REG_PERF_UNREGISTER:
798 perf_sysexit_disable(event);
799 return 0;
800 case TRACE_REG_PERF_OPEN:
801 case TRACE_REG_PERF_CLOSE:
802 case TRACE_REG_PERF_ADD:
803 case TRACE_REG_PERF_DEL:
804 return 0;
805#endif
806 }
807 return 0;
808}