Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <trace/syscall.h>
  3#include <trace/events/syscalls.h>
  4#include <linux/syscalls.h>
  5#include <linux/slab.h>
  6#include <linux/kernel.h>
  7#include <linux/module.h>	/* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
  8#include <linux/ftrace.h>
  9#include <linux/perf_event.h>
 10#include <asm/syscall.h>
 11
 12#include "trace_output.h"
 13#include "trace.h"
 14
 15static DEFINE_MUTEX(syscall_trace_lock);
 
 
 
 
 
 
 
 
 
 16
 17static int syscall_enter_register(struct trace_event_call *event,
 18				 enum trace_reg type, void *data);
 19static int syscall_exit_register(struct trace_event_call *event,
 20				 enum trace_reg type, void *data);
 21
 22static struct list_head *
 23syscall_get_enter_fields(struct trace_event_call *call)
 24{
 25	struct syscall_metadata *entry = call->data;
 26
 27	return &entry->enter_fields;
 28}
 29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 30extern struct syscall_metadata *__start_syscalls_metadata[];
 31extern struct syscall_metadata *__stop_syscalls_metadata[];
 32
 33static struct syscall_metadata **syscalls_metadata;
 34
 35#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
 36static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
 37{
 38	/*
 39	 * Only compare after the "sys" prefix. Archs that use
 40	 * syscall wrappers may have syscalls symbols aliases prefixed
 41	 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
 42	 * mismatch.
 43	 */
 44	return !strcmp(sym + 3, name + 3);
 45}
 46#endif
 47
 48#ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
 49/*
 50 * Some architectures that allow for 32bit applications
 51 * to run on a 64bit kernel, do not map the syscalls for
 52 * the 32bit tasks the same as they do for 64bit tasks.
 53 *
 54 *     *cough*x86*cough*
 55 *
 56 * In such a case, instead of reporting the wrong syscalls,
 57 * simply ignore them.
 58 *
 59 * For an arch to ignore the compat syscalls it needs to
 60 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
 61 * define the function arch_trace_is_compat_syscall() to let
 62 * the tracing system know that it should ignore it.
 63 */
 64static int
 65trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
 66{
 67	if (unlikely(arch_trace_is_compat_syscall(regs)))
 68		return -1;
 69
 70	return syscall_get_nr(task, regs);
 71}
 72#else
 73static inline int
 74trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
 75{
 76	return syscall_get_nr(task, regs);
 77}
 78#endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
 79
 80static __init struct syscall_metadata *
 81find_syscall_meta(unsigned long syscall)
 82{
 83	struct syscall_metadata **start;
 84	struct syscall_metadata **stop;
 85	char str[KSYM_SYMBOL_LEN];
 86
 87
 88	start = __start_syscalls_metadata;
 89	stop = __stop_syscalls_metadata;
 90	kallsyms_lookup(syscall, NULL, NULL, NULL, str);
 91
 92	if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
 93		return NULL;
 94
 95	for ( ; start < stop; start++) {
 96		if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
 97			return *start;
 98	}
 99	return NULL;
100}
101
102static struct syscall_metadata *syscall_nr_to_meta(int nr)
103{
104	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
105		return NULL;
106
107	return syscalls_metadata[nr];
108}
109
110const char *get_syscall_name(int syscall)
111{
112	struct syscall_metadata *entry;
113
114	entry = syscall_nr_to_meta(syscall);
115	if (!entry)
116		return NULL;
117
118	return entry->name;
119}
120
121static enum print_line_t
122print_syscall_enter(struct trace_iterator *iter, int flags,
123		    struct trace_event *event)
124{
125	struct trace_array *tr = iter->tr;
126	struct trace_seq *s = &iter->seq;
127	struct trace_entry *ent = iter->ent;
128	struct syscall_trace_enter *trace;
129	struct syscall_metadata *entry;
130	int i, syscall;
131
132	trace = (typeof(trace))ent;
133	syscall = trace->nr;
134	entry = syscall_nr_to_meta(syscall);
135
136	if (!entry)
137		goto end;
138
139	if (entry->enter_event->event.type != ent->type) {
140		WARN_ON_ONCE(1);
141		goto end;
142	}
143
144	trace_seq_printf(s, "%s(", entry->name);
 
 
145
146	for (i = 0; i < entry->nb_args; i++) {
147
148		if (trace_seq_has_overflowed(s))
149			goto end;
150
151		/* parameter types */
152		if (tr->trace_flags & TRACE_ITER_VERBOSE)
153			trace_seq_printf(s, "%s ", entry->types[i]);
154
 
 
155		/* parameter values */
156		trace_seq_printf(s, "%s: %lx%s", entry->args[i],
157				 trace->args[i],
158				 i == entry->nb_args - 1 ? "" : ", ");
 
 
159	}
160
161	trace_seq_putc(s, ')');
 
 
 
162end:
163	trace_seq_putc(s, '\n');
 
 
164
165	return trace_handle_return(s);
166}
167
168static enum print_line_t
169print_syscall_exit(struct trace_iterator *iter, int flags,
170		   struct trace_event *event)
171{
172	struct trace_seq *s = &iter->seq;
173	struct trace_entry *ent = iter->ent;
174	struct syscall_trace_exit *trace;
175	int syscall;
176	struct syscall_metadata *entry;
 
177
178	trace = (typeof(trace))ent;
179	syscall = trace->nr;
180	entry = syscall_nr_to_meta(syscall);
181
182	if (!entry) {
183		trace_seq_putc(s, '\n');
184		goto out;
185	}
186
187	if (entry->exit_event->event.type != ent->type) {
188		WARN_ON_ONCE(1);
189		return TRACE_TYPE_UNHANDLED;
190	}
191
192	trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
193				trace->ret);
 
 
194
195 out:
196	return trace_handle_return(s);
197}
198
199extern char *__bad_type_size(void);
200
201#define SYSCALL_FIELD(type, field, name)				\
202	sizeof(type) != sizeof(trace.field) ?				\
203		__bad_type_size() :					\
204		#type, #name, offsetof(typeof(trace), field),		\
205		sizeof(trace.field), is_signed_type(type)
206
207static int __init
208__set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
209{
210	int i;
211	int pos = 0;
212
213	/* When len=0, we just calculate the needed length */
214#define LEN_OR_ZERO (len ? len - pos : 0)
215
216	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
217	for (i = 0; i < entry->nb_args; i++) {
218		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
219				entry->args[i], sizeof(unsigned long),
220				i == entry->nb_args - 1 ? "" : ", ");
221	}
222	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
223
224	for (i = 0; i < entry->nb_args; i++) {
225		pos += snprintf(buf + pos, LEN_OR_ZERO,
226				", ((unsigned long)(REC->%s))", entry->args[i]);
227	}
228
229#undef LEN_OR_ZERO
230
231	/* return the length of print_fmt */
232	return pos;
233}
234
235static int __init set_syscall_print_fmt(struct trace_event_call *call)
236{
237	char *print_fmt;
238	int len;
239	struct syscall_metadata *entry = call->data;
240
241	if (entry->enter_event != call) {
242		call->print_fmt = "\"0x%lx\", REC->ret";
243		return 0;
244	}
245
246	/* First: called with 0 length to calculate the needed length */
247	len = __set_enter_print_fmt(entry, NULL, 0);
248
249	print_fmt = kmalloc(len + 1, GFP_KERNEL);
250	if (!print_fmt)
251		return -ENOMEM;
252
253	/* Second: actually write the @print_fmt */
254	__set_enter_print_fmt(entry, print_fmt, len + 1);
255	call->print_fmt = print_fmt;
256
257	return 0;
258}
259
260static void __init free_syscall_print_fmt(struct trace_event_call *call)
261{
262	struct syscall_metadata *entry = call->data;
263
264	if (entry->enter_event == call)
265		kfree(call->print_fmt);
266}
267
268static int __init syscall_enter_define_fields(struct trace_event_call *call)
269{
270	struct syscall_trace_enter trace;
271	struct syscall_metadata *meta = call->data;
272	int ret;
273	int i;
274	int offset = offsetof(typeof(trace), args);
275
276	ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr),
277				 FILTER_OTHER);
278	if (ret)
279		return ret;
280
281	for (i = 0; i < meta->nb_args; i++) {
282		ret = trace_define_field(call, meta->types[i],
283					 meta->args[i], offset,
284					 sizeof(unsigned long), 0,
285					 FILTER_OTHER);
286		offset += sizeof(unsigned long);
287	}
288
289	return ret;
290}
291
292static int __init syscall_exit_define_fields(struct trace_event_call *call)
293{
294	struct syscall_trace_exit trace;
295	int ret;
296
297	ret = trace_define_field(call, SYSCALL_FIELD(int, nr, __syscall_nr),
298				 FILTER_OTHER);
299	if (ret)
300		return ret;
301
302	ret = trace_define_field(call, SYSCALL_FIELD(long, ret, ret),
303				 FILTER_OTHER);
304
305	return ret;
306}
307
308static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
309{
310	struct trace_array *tr = data;
311	struct trace_event_file *trace_file;
312	struct syscall_trace_enter *entry;
313	struct syscall_metadata *sys_data;
314	struct ring_buffer_event *event;
315	struct ring_buffer *buffer;
316	unsigned long irq_flags;
317	unsigned long args[6];
318	int pc;
319	int syscall_nr;
320	int size;
 
321
322	syscall_nr = trace_get_syscall_nr(current, regs);
323	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
324		return;
325
326	/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
327	trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
328	if (!trace_file)
329		return;
330
331	if (trace_trigger_soft_disabled(trace_file))
332		return;
333
334	sys_data = syscall_nr_to_meta(syscall_nr);
335	if (!sys_data)
336		return;
337
338	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
339
340	local_save_flags(irq_flags);
341	pc = preempt_count();
342
343	buffer = tr->trace_buffer.buffer;
344	event = trace_buffer_lock_reserve(buffer,
345			sys_data->enter_event->event.type, size, irq_flags, pc);
346	if (!event)
347		return;
348
349	entry = ring_buffer_event_data(event);
350	entry->nr = syscall_nr;
351	syscall_get_arguments(current, regs, args);
352	memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
353
354	event_trigger_unlock_commit(trace_file, buffer, event, entry,
355				    irq_flags, pc);
 
356}
357
358static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
359{
360	struct trace_array *tr = data;
361	struct trace_event_file *trace_file;
362	struct syscall_trace_exit *entry;
363	struct syscall_metadata *sys_data;
364	struct ring_buffer_event *event;
365	struct ring_buffer *buffer;
366	unsigned long irq_flags;
367	int pc;
368	int syscall_nr;
369
370	syscall_nr = trace_get_syscall_nr(current, regs);
371	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
372		return;
373
374	/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
375	trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
376	if (!trace_file)
377		return;
378
379	if (trace_trigger_soft_disabled(trace_file))
380		return;
381
382	sys_data = syscall_nr_to_meta(syscall_nr);
383	if (!sys_data)
384		return;
385
386	local_save_flags(irq_flags);
387	pc = preempt_count();
388
389	buffer = tr->trace_buffer.buffer;
390	event = trace_buffer_lock_reserve(buffer,
391			sys_data->exit_event->event.type, sizeof(*entry),
392			irq_flags, pc);
393	if (!event)
394		return;
395
396	entry = ring_buffer_event_data(event);
397	entry->nr = syscall_nr;
398	entry->ret = syscall_get_return_value(current, regs);
399
400	event_trigger_unlock_commit(trace_file, buffer, event, entry,
401				    irq_flags, pc);
 
402}
403
404static int reg_event_syscall_enter(struct trace_event_file *file,
405				   struct trace_event_call *call)
406{
407	struct trace_array *tr = file->tr;
408	int ret = 0;
409	int num;
410
411	num = ((struct syscall_metadata *)call->data)->syscall_nr;
412	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
413		return -ENOSYS;
414	mutex_lock(&syscall_trace_lock);
415	if (!tr->sys_refcount_enter)
416		ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
417	if (!ret) {
418		rcu_assign_pointer(tr->enter_syscall_files[num], file);
419		tr->sys_refcount_enter++;
420	}
421	mutex_unlock(&syscall_trace_lock);
422	return ret;
423}
424
425static void unreg_event_syscall_enter(struct trace_event_file *file,
426				      struct trace_event_call *call)
427{
428	struct trace_array *tr = file->tr;
429	int num;
430
431	num = ((struct syscall_metadata *)call->data)->syscall_nr;
432	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
433		return;
434	mutex_lock(&syscall_trace_lock);
435	tr->sys_refcount_enter--;
436	RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL);
437	if (!tr->sys_refcount_enter)
438		unregister_trace_sys_enter(ftrace_syscall_enter, tr);
439	mutex_unlock(&syscall_trace_lock);
440}
441
442static int reg_event_syscall_exit(struct trace_event_file *file,
443				  struct trace_event_call *call)
444{
445	struct trace_array *tr = file->tr;
446	int ret = 0;
447	int num;
448
449	num = ((struct syscall_metadata *)call->data)->syscall_nr;
450	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
451		return -ENOSYS;
452	mutex_lock(&syscall_trace_lock);
453	if (!tr->sys_refcount_exit)
454		ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
455	if (!ret) {
456		rcu_assign_pointer(tr->exit_syscall_files[num], file);
457		tr->sys_refcount_exit++;
458	}
459	mutex_unlock(&syscall_trace_lock);
460	return ret;
461}
462
463static void unreg_event_syscall_exit(struct trace_event_file *file,
464				     struct trace_event_call *call)
465{
466	struct trace_array *tr = file->tr;
467	int num;
468
469	num = ((struct syscall_metadata *)call->data)->syscall_nr;
470	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
471		return;
472	mutex_lock(&syscall_trace_lock);
473	tr->sys_refcount_exit--;
474	RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL);
475	if (!tr->sys_refcount_exit)
476		unregister_trace_sys_exit(ftrace_syscall_exit, tr);
477	mutex_unlock(&syscall_trace_lock);
478}
479
480static int __init init_syscall_trace(struct trace_event_call *call)
481{
482	int id;
483	int num;
484
485	num = ((struct syscall_metadata *)call->data)->syscall_nr;
486	if (num < 0 || num >= NR_syscalls) {
487		pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
488				((struct syscall_metadata *)call->data)->name);
489		return -ENOSYS;
490	}
491
492	if (set_syscall_print_fmt(call) < 0)
493		return -ENOMEM;
494
495	id = trace_event_raw_init(call);
496
497	if (id < 0) {
498		free_syscall_print_fmt(call);
499		return id;
500	}
501
502	return id;
503}
504
505struct trace_event_functions enter_syscall_print_funcs = {
506	.trace		= print_syscall_enter,
507};
508
509struct trace_event_functions exit_syscall_print_funcs = {
510	.trace		= print_syscall_exit,
511};
512
513struct trace_event_class __refdata event_class_syscall_enter = {
514	.system		= "syscalls",
515	.reg		= syscall_enter_register,
516	.define_fields	= syscall_enter_define_fields,
517	.get_fields	= syscall_get_enter_fields,
518	.raw_init	= init_syscall_trace,
519};
520
521struct trace_event_class __refdata event_class_syscall_exit = {
522	.system		= "syscalls",
523	.reg		= syscall_exit_register,
524	.define_fields	= syscall_exit_define_fields,
525	.fields		= LIST_HEAD_INIT(event_class_syscall_exit.fields),
526	.raw_init	= init_syscall_trace,
527};
528
529unsigned long __init __weak arch_syscall_addr(int nr)
530{
531	return (unsigned long)sys_call_table[nr];
532}
533
534void __init init_ftrace_syscalls(void)
535{
536	struct syscall_metadata *meta;
537	unsigned long addr;
538	int i;
539
540	syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
541				    GFP_KERNEL);
542	if (!syscalls_metadata) {
543		WARN_ON(1);
544		return;
545	}
546
547	for (i = 0; i < NR_syscalls; i++) {
548		addr = arch_syscall_addr(i);
549		meta = find_syscall_meta(addr);
550		if (!meta)
551			continue;
552
553		meta->syscall_nr = i;
554		syscalls_metadata[i] = meta;
555	}
 
 
556}
 
557
558#ifdef CONFIG_PERF_EVENTS
559
560static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
561static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
562static int sys_perf_refcount_enter;
563static int sys_perf_refcount_exit;
564
565static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *regs,
566			       struct syscall_metadata *sys_data,
567			       struct syscall_trace_enter *rec)
568{
569	struct syscall_tp_t {
570		unsigned long long regs;
571		unsigned long syscall_nr;
572		unsigned long args[SYSCALL_DEFINE_MAXARGS];
573	} param;
574	int i;
575
576	*(struct pt_regs **)&param = regs;
577	param.syscall_nr = rec->nr;
578	for (i = 0; i < sys_data->nb_args; i++)
579		param.args[i] = rec->args[i];
580	return trace_call_bpf(call, &param);
581}
582
583static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
584{
585	struct syscall_metadata *sys_data;
586	struct syscall_trace_enter *rec;
587	struct hlist_head *head;
588	unsigned long args[6];
589	bool valid_prog_array;
590	int syscall_nr;
591	int rctx;
592	int size;
593
594	syscall_nr = trace_get_syscall_nr(current, regs);
595	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
596		return;
597	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
598		return;
599
600	sys_data = syscall_nr_to_meta(syscall_nr);
601	if (!sys_data)
602		return;
603
604	head = this_cpu_ptr(sys_data->enter_event->perf_events);
605	valid_prog_array = bpf_prog_array_valid(sys_data->enter_event);
606	if (!valid_prog_array && hlist_empty(head))
607		return;
608
609	/* get the size after alignment with the u32 buffer size field */
610	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
611	size = ALIGN(size + sizeof(u32), sizeof(u64));
612	size -= sizeof(u32);
613
614	rec = perf_trace_buf_alloc(size, NULL, &rctx);
 
 
 
 
 
615	if (!rec)
616		return;
617
618	rec->nr = syscall_nr;
619	syscall_get_arguments(current, regs, args);
620	memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args);
621
622	if ((valid_prog_array &&
623	     !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
624	    hlist_empty(head)) {
625		perf_swevent_put_recursion_context(rctx);
626		return;
627	}
628
629	perf_trace_buf_submit(rec, size, rctx,
630			      sys_data->enter_event->event.type, 1, regs,
631			      head, NULL);
632}
633
634static int perf_sysenter_enable(struct trace_event_call *call)
635{
636	int ret = 0;
637	int num;
638
639	num = ((struct syscall_metadata *)call->data)->syscall_nr;
640
641	mutex_lock(&syscall_trace_lock);
642	if (!sys_perf_refcount_enter)
643		ret = register_trace_sys_enter(perf_syscall_enter, NULL);
644	if (ret) {
645		pr_info("event trace: Could not activate syscall entry trace point");
 
646	} else {
647		set_bit(num, enabled_perf_enter_syscalls);
648		sys_perf_refcount_enter++;
649	}
650	mutex_unlock(&syscall_trace_lock);
651	return ret;
652}
653
654static void perf_sysenter_disable(struct trace_event_call *call)
655{
656	int num;
657
658	num = ((struct syscall_metadata *)call->data)->syscall_nr;
659
660	mutex_lock(&syscall_trace_lock);
661	sys_perf_refcount_enter--;
662	clear_bit(num, enabled_perf_enter_syscalls);
663	if (!sys_perf_refcount_enter)
664		unregister_trace_sys_enter(perf_syscall_enter, NULL);
665	mutex_unlock(&syscall_trace_lock);
666}
667
668static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *regs,
669			      struct syscall_trace_exit *rec)
670{
671	struct syscall_tp_t {
672		unsigned long long regs;
673		unsigned long syscall_nr;
674		unsigned long ret;
675	} param;
676
677	*(struct pt_regs **)&param = regs;
678	param.syscall_nr = rec->nr;
679	param.ret = rec->ret;
680	return trace_call_bpf(call, &param);
681}
682
683static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
684{
685	struct syscall_metadata *sys_data;
686	struct syscall_trace_exit *rec;
687	struct hlist_head *head;
688	bool valid_prog_array;
689	int syscall_nr;
690	int rctx;
691	int size;
692
693	syscall_nr = trace_get_syscall_nr(current, regs);
694	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
695		return;
696	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
697		return;
698
699	sys_data = syscall_nr_to_meta(syscall_nr);
700	if (!sys_data)
701		return;
702
703	head = this_cpu_ptr(sys_data->exit_event->perf_events);
704	valid_prog_array = bpf_prog_array_valid(sys_data->exit_event);
705	if (!valid_prog_array && hlist_empty(head))
706		return;
707
708	/* We can probably do that at build time */
709	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
710	size -= sizeof(u32);
711
712	rec = perf_trace_buf_alloc(size, NULL, &rctx);
 
 
 
 
 
 
 
 
 
713	if (!rec)
714		return;
715
716	rec->nr = syscall_nr;
717	rec->ret = syscall_get_return_value(current, regs);
718
719	if ((valid_prog_array &&
720	     !perf_call_bpf_exit(sys_data->exit_event, regs, rec)) ||
721	    hlist_empty(head)) {
722		perf_swevent_put_recursion_context(rctx);
723		return;
724	}
725
726	perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
727			      1, regs, head, NULL);
728}
729
730static int perf_sysexit_enable(struct trace_event_call *call)
731{
732	int ret = 0;
733	int num;
734
735	num = ((struct syscall_metadata *)call->data)->syscall_nr;
736
737	mutex_lock(&syscall_trace_lock);
738	if (!sys_perf_refcount_exit)
739		ret = register_trace_sys_exit(perf_syscall_exit, NULL);
740	if (ret) {
741		pr_info("event trace: Could not activate syscall exit trace point");
 
742	} else {
743		set_bit(num, enabled_perf_exit_syscalls);
744		sys_perf_refcount_exit++;
745	}
746	mutex_unlock(&syscall_trace_lock);
747	return ret;
748}
749
750static void perf_sysexit_disable(struct trace_event_call *call)
751{
752	int num;
753
754	num = ((struct syscall_metadata *)call->data)->syscall_nr;
755
756	mutex_lock(&syscall_trace_lock);
757	sys_perf_refcount_exit--;
758	clear_bit(num, enabled_perf_exit_syscalls);
759	if (!sys_perf_refcount_exit)
760		unregister_trace_sys_exit(perf_syscall_exit, NULL);
761	mutex_unlock(&syscall_trace_lock);
762}
763
764#endif /* CONFIG_PERF_EVENTS */
765
766static int syscall_enter_register(struct trace_event_call *event,
767				 enum trace_reg type, void *data)
768{
769	struct trace_event_file *file = data;
770
771	switch (type) {
772	case TRACE_REG_REGISTER:
773		return reg_event_syscall_enter(file, event);
774	case TRACE_REG_UNREGISTER:
775		unreg_event_syscall_enter(file, event);
776		return 0;
777
778#ifdef CONFIG_PERF_EVENTS
779	case TRACE_REG_PERF_REGISTER:
780		return perf_sysenter_enable(event);
781	case TRACE_REG_PERF_UNREGISTER:
782		perf_sysenter_disable(event);
783		return 0;
784	case TRACE_REG_PERF_OPEN:
785	case TRACE_REG_PERF_CLOSE:
786	case TRACE_REG_PERF_ADD:
787	case TRACE_REG_PERF_DEL:
788		return 0;
789#endif
790	}
791	return 0;
792}
793
794static int syscall_exit_register(struct trace_event_call *event,
795				 enum trace_reg type, void *data)
796{
797	struct trace_event_file *file = data;
798
799	switch (type) {
800	case TRACE_REG_REGISTER:
801		return reg_event_syscall_exit(file, event);
802	case TRACE_REG_UNREGISTER:
803		unreg_event_syscall_exit(file, event);
804		return 0;
805
806#ifdef CONFIG_PERF_EVENTS
807	case TRACE_REG_PERF_REGISTER:
808		return perf_sysexit_enable(event);
809	case TRACE_REG_PERF_UNREGISTER:
810		perf_sysexit_disable(event);
811		return 0;
812	case TRACE_REG_PERF_OPEN:
813	case TRACE_REG_PERF_CLOSE:
814	case TRACE_REG_PERF_ADD:
815	case TRACE_REG_PERF_DEL:
816		return 0;
817#endif
818	}
819	return 0;
820}
v3.1
 
  1#include <trace/syscall.h>
  2#include <trace/events/syscalls.h>
 
  3#include <linux/slab.h>
  4#include <linux/kernel.h>
 
  5#include <linux/ftrace.h>
  6#include <linux/perf_event.h>
  7#include <asm/syscall.h>
  8
  9#include "trace_output.h"
 10#include "trace.h"
 11
 12static DEFINE_MUTEX(syscall_trace_lock);
 13static int sys_refcount_enter;
 14static int sys_refcount_exit;
 15static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
 16static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
 17
 18static int syscall_enter_register(struct ftrace_event_call *event,
 19				 enum trace_reg type);
 20static int syscall_exit_register(struct ftrace_event_call *event,
 21				 enum trace_reg type);
 22
 23static int syscall_enter_define_fields(struct ftrace_event_call *call);
 24static int syscall_exit_define_fields(struct ftrace_event_call *call);
 
 
 25
 26static struct list_head *
 27syscall_get_enter_fields(struct ftrace_event_call *call)
 28{
 29	struct syscall_metadata *entry = call->data;
 30
 31	return &entry->enter_fields;
 32}
 33
 34struct trace_event_functions enter_syscall_print_funcs = {
 35	.trace		= print_syscall_enter,
 36};
 37
 38struct trace_event_functions exit_syscall_print_funcs = {
 39	.trace		= print_syscall_exit,
 40};
 41
 42struct ftrace_event_class event_class_syscall_enter = {
 43	.system		= "syscalls",
 44	.reg		= syscall_enter_register,
 45	.define_fields	= syscall_enter_define_fields,
 46	.get_fields	= syscall_get_enter_fields,
 47	.raw_init	= init_syscall_trace,
 48};
 49
 50struct ftrace_event_class event_class_syscall_exit = {
 51	.system		= "syscalls",
 52	.reg		= syscall_exit_register,
 53	.define_fields	= syscall_exit_define_fields,
 54	.fields		= LIST_HEAD_INIT(event_class_syscall_exit.fields),
 55	.raw_init	= init_syscall_trace,
 56};
 57
 58extern struct syscall_metadata *__start_syscalls_metadata[];
 59extern struct syscall_metadata *__stop_syscalls_metadata[];
 60
 61static struct syscall_metadata **syscalls_metadata;
 62
 63#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
 64static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
 65{
 66	/*
 67	 * Only compare after the "sys" prefix. Archs that use
 68	 * syscall wrappers may have syscalls symbols aliases prefixed
 69	 * with "SyS" instead of "sys", leading to an unwanted
 70	 * mismatch.
 71	 */
 72	return !strcmp(sym + 3, name + 3);
 73}
 74#endif
 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 76static __init struct syscall_metadata *
 77find_syscall_meta(unsigned long syscall)
 78{
 79	struct syscall_metadata **start;
 80	struct syscall_metadata **stop;
 81	char str[KSYM_SYMBOL_LEN];
 82
 83
 84	start = __start_syscalls_metadata;
 85	stop = __stop_syscalls_metadata;
 86	kallsyms_lookup(syscall, NULL, NULL, NULL, str);
 87
 88	if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
 89		return NULL;
 90
 91	for ( ; start < stop; start++) {
 92		if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
 93			return *start;
 94	}
 95	return NULL;
 96}
 97
 98static struct syscall_metadata *syscall_nr_to_meta(int nr)
 99{
100	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
101		return NULL;
102
103	return syscalls_metadata[nr];
104}
105
106enum print_line_t
 
 
 
 
 
 
 
 
 
 
 
107print_syscall_enter(struct trace_iterator *iter, int flags,
108		    struct trace_event *event)
109{
 
110	struct trace_seq *s = &iter->seq;
111	struct trace_entry *ent = iter->ent;
112	struct syscall_trace_enter *trace;
113	struct syscall_metadata *entry;
114	int i, ret, syscall;
115
116	trace = (typeof(trace))ent;
117	syscall = trace->nr;
118	entry = syscall_nr_to_meta(syscall);
119
120	if (!entry)
121		goto end;
122
123	if (entry->enter_event->event.type != ent->type) {
124		WARN_ON_ONCE(1);
125		goto end;
126	}
127
128	ret = trace_seq_printf(s, "%s(", entry->name);
129	if (!ret)
130		return TRACE_TYPE_PARTIAL_LINE;
131
132	for (i = 0; i < entry->nb_args; i++) {
 
 
 
 
133		/* parameter types */
134		if (trace_flags & TRACE_ITER_VERBOSE) {
135			ret = trace_seq_printf(s, "%s ", entry->types[i]);
136			if (!ret)
137				return TRACE_TYPE_PARTIAL_LINE;
138		}
139		/* parameter values */
140		ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
141				       trace->args[i],
142				       i == entry->nb_args - 1 ? "" : ", ");
143		if (!ret)
144			return TRACE_TYPE_PARTIAL_LINE;
145	}
146
147	ret = trace_seq_putc(s, ')');
148	if (!ret)
149		return TRACE_TYPE_PARTIAL_LINE;
150
151end:
152	ret =  trace_seq_putc(s, '\n');
153	if (!ret)
154		return TRACE_TYPE_PARTIAL_LINE;
155
156	return TRACE_TYPE_HANDLED;
157}
158
159enum print_line_t
160print_syscall_exit(struct trace_iterator *iter, int flags,
161		   struct trace_event *event)
162{
163	struct trace_seq *s = &iter->seq;
164	struct trace_entry *ent = iter->ent;
165	struct syscall_trace_exit *trace;
166	int syscall;
167	struct syscall_metadata *entry;
168	int ret;
169
170	trace = (typeof(trace))ent;
171	syscall = trace->nr;
172	entry = syscall_nr_to_meta(syscall);
173
174	if (!entry) {
175		trace_seq_printf(s, "\n");
176		return TRACE_TYPE_HANDLED;
177	}
178
179	if (entry->exit_event->event.type != ent->type) {
180		WARN_ON_ONCE(1);
181		return TRACE_TYPE_UNHANDLED;
182	}
183
184	ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
185				trace->ret);
186	if (!ret)
187		return TRACE_TYPE_PARTIAL_LINE;
188
189	return TRACE_TYPE_HANDLED;
 
190}
191
192extern char *__bad_type_size(void);
193
194#define SYSCALL_FIELD(type, name)					\
195	sizeof(type) != sizeof(trace.name) ?				\
196		__bad_type_size() :					\
197		#type, #name, offsetof(typeof(trace), name),		\
198		sizeof(trace.name), is_signed_type(type)
199
200static
201int  __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
202{
203	int i;
204	int pos = 0;
205
206	/* When len=0, we just calculate the needed length */
207#define LEN_OR_ZERO (len ? len - pos : 0)
208
209	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
210	for (i = 0; i < entry->nb_args; i++) {
211		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
212				entry->args[i], sizeof(unsigned long),
213				i == entry->nb_args - 1 ? "" : ", ");
214	}
215	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
216
217	for (i = 0; i < entry->nb_args; i++) {
218		pos += snprintf(buf + pos, LEN_OR_ZERO,
219				", ((unsigned long)(REC->%s))", entry->args[i]);
220	}
221
222#undef LEN_OR_ZERO
223
224	/* return the length of print_fmt */
225	return pos;
226}
227
228static int set_syscall_print_fmt(struct ftrace_event_call *call)
229{
230	char *print_fmt;
231	int len;
232	struct syscall_metadata *entry = call->data;
233
234	if (entry->enter_event != call) {
235		call->print_fmt = "\"0x%lx\", REC->ret";
236		return 0;
237	}
238
239	/* First: called with 0 length to calculate the needed length */
240	len = __set_enter_print_fmt(entry, NULL, 0);
241
242	print_fmt = kmalloc(len + 1, GFP_KERNEL);
243	if (!print_fmt)
244		return -ENOMEM;
245
246	/* Second: actually write the @print_fmt */
247	__set_enter_print_fmt(entry, print_fmt, len + 1);
248	call->print_fmt = print_fmt;
249
250	return 0;
251}
252
253static void free_syscall_print_fmt(struct ftrace_event_call *call)
254{
255	struct syscall_metadata *entry = call->data;
256
257	if (entry->enter_event == call)
258		kfree(call->print_fmt);
259}
260
261static int syscall_enter_define_fields(struct ftrace_event_call *call)
262{
263	struct syscall_trace_enter trace;
264	struct syscall_metadata *meta = call->data;
265	int ret;
266	int i;
267	int offset = offsetof(typeof(trace), args);
268
269	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
 
270	if (ret)
271		return ret;
272
273	for (i = 0; i < meta->nb_args; i++) {
274		ret = trace_define_field(call, meta->types[i],
275					 meta->args[i], offset,
276					 sizeof(unsigned long), 0,
277					 FILTER_OTHER);
278		offset += sizeof(unsigned long);
279	}
280
281	return ret;
282}
283
284static int syscall_exit_define_fields(struct ftrace_event_call *call)
285{
286	struct syscall_trace_exit trace;
287	int ret;
288
289	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
 
290	if (ret)
291		return ret;
292
293	ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
294				 FILTER_OTHER);
295
296	return ret;
297}
298
299void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
300{
 
 
301	struct syscall_trace_enter *entry;
302	struct syscall_metadata *sys_data;
303	struct ring_buffer_event *event;
304	struct ring_buffer *buffer;
 
 
 
 
305	int size;
306	int syscall_nr;
307
308	syscall_nr = syscall_get_nr(current, regs);
309	if (syscall_nr < 0)
 
 
 
 
 
310		return;
311	if (!test_bit(syscall_nr, enabled_enter_syscalls))
 
312		return;
313
314	sys_data = syscall_nr_to_meta(syscall_nr);
315	if (!sys_data)
316		return;
317
318	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
319
320	event = trace_current_buffer_lock_reserve(&buffer,
321			sys_data->enter_event->event.type, size, 0, 0);
 
 
 
 
322	if (!event)
323		return;
324
325	entry = ring_buffer_event_data(event);
326	entry->nr = syscall_nr;
327	syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
 
328
329	if (!filter_current_check_discard(buffer, sys_data->enter_event,
330					  entry, event))
331		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
332}
333
334void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
335{
 
 
336	struct syscall_trace_exit *entry;
337	struct syscall_metadata *sys_data;
338	struct ring_buffer_event *event;
339	struct ring_buffer *buffer;
 
 
340	int syscall_nr;
341
342	syscall_nr = syscall_get_nr(current, regs);
343	if (syscall_nr < 0)
344		return;
345	if (!test_bit(syscall_nr, enabled_exit_syscalls))
 
 
 
 
 
 
346		return;
347
348	sys_data = syscall_nr_to_meta(syscall_nr);
349	if (!sys_data)
350		return;
351
352	event = trace_current_buffer_lock_reserve(&buffer,
353			sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
 
 
 
 
 
354	if (!event)
355		return;
356
357	entry = ring_buffer_event_data(event);
358	entry->nr = syscall_nr;
359	entry->ret = syscall_get_return_value(current, regs);
360
361	if (!filter_current_check_discard(buffer, sys_data->exit_event,
362					  entry, event))
363		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
364}
365
366int reg_event_syscall_enter(struct ftrace_event_call *call)
 
367{
 
368	int ret = 0;
369	int num;
370
371	num = ((struct syscall_metadata *)call->data)->syscall_nr;
372	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
373		return -ENOSYS;
374	mutex_lock(&syscall_trace_lock);
375	if (!sys_refcount_enter)
376		ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
377	if (!ret) {
378		set_bit(num, enabled_enter_syscalls);
379		sys_refcount_enter++;
380	}
381	mutex_unlock(&syscall_trace_lock);
382	return ret;
383}
384
385void unreg_event_syscall_enter(struct ftrace_event_call *call)
 
386{
 
387	int num;
388
389	num = ((struct syscall_metadata *)call->data)->syscall_nr;
390	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
391		return;
392	mutex_lock(&syscall_trace_lock);
393	sys_refcount_enter--;
394	clear_bit(num, enabled_enter_syscalls);
395	if (!sys_refcount_enter)
396		unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
397	mutex_unlock(&syscall_trace_lock);
398}
399
400int reg_event_syscall_exit(struct ftrace_event_call *call)
 
401{
 
402	int ret = 0;
403	int num;
404
405	num = ((struct syscall_metadata *)call->data)->syscall_nr;
406	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
407		return -ENOSYS;
408	mutex_lock(&syscall_trace_lock);
409	if (!sys_refcount_exit)
410		ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
411	if (!ret) {
412		set_bit(num, enabled_exit_syscalls);
413		sys_refcount_exit++;
414	}
415	mutex_unlock(&syscall_trace_lock);
416	return ret;
417}
418
419void unreg_event_syscall_exit(struct ftrace_event_call *call)
 
420{
 
421	int num;
422
423	num = ((struct syscall_metadata *)call->data)->syscall_nr;
424	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
425		return;
426	mutex_lock(&syscall_trace_lock);
427	sys_refcount_exit--;
428	clear_bit(num, enabled_exit_syscalls);
429	if (!sys_refcount_exit)
430		unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
431	mutex_unlock(&syscall_trace_lock);
432}
433
434int init_syscall_trace(struct ftrace_event_call *call)
435{
436	int id;
437	int num;
438
439	num = ((struct syscall_metadata *)call->data)->syscall_nr;
440	if (num < 0 || num >= NR_syscalls) {
441		pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
442				((struct syscall_metadata *)call->data)->name);
443		return -ENOSYS;
444	}
445
446	if (set_syscall_print_fmt(call) < 0)
447		return -ENOMEM;
448
449	id = trace_event_raw_init(call);
450
451	if (id < 0) {
452		free_syscall_print_fmt(call);
453		return id;
454	}
455
456	return id;
457}
458
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
459unsigned long __init __weak arch_syscall_addr(int nr)
460{
461	return (unsigned long)sys_call_table[nr];
462}
463
464int __init init_ftrace_syscalls(void)
465{
466	struct syscall_metadata *meta;
467	unsigned long addr;
468	int i;
469
470	syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
471					NR_syscalls, GFP_KERNEL);
472	if (!syscalls_metadata) {
473		WARN_ON(1);
474		return -ENOMEM;
475	}
476
477	for (i = 0; i < NR_syscalls; i++) {
478		addr = arch_syscall_addr(i);
479		meta = find_syscall_meta(addr);
480		if (!meta)
481			continue;
482
483		meta->syscall_nr = i;
484		syscalls_metadata[i] = meta;
485	}
486
487	return 0;
488}
489core_initcall(init_ftrace_syscalls);
490
491#ifdef CONFIG_PERF_EVENTS
492
493static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
494static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
495static int sys_perf_refcount_enter;
496static int sys_perf_refcount_exit;
497
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
498static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
499{
500	struct syscall_metadata *sys_data;
501	struct syscall_trace_enter *rec;
502	struct hlist_head *head;
 
 
503	int syscall_nr;
504	int rctx;
505	int size;
506
507	syscall_nr = syscall_get_nr(current, regs);
 
 
508	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
509		return;
510
511	sys_data = syscall_nr_to_meta(syscall_nr);
512	if (!sys_data)
513		return;
514
 
 
 
 
 
515	/* get the size after alignment with the u32 buffer size field */
516	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
517	size = ALIGN(size + sizeof(u32), sizeof(u64));
518	size -= sizeof(u32);
519
520	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
521		      "perf buffer not large enough"))
522		return;
523
524	rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
525				sys_data->enter_event->event.type, regs, &rctx);
526	if (!rec)
527		return;
528
529	rec->nr = syscall_nr;
530	syscall_get_arguments(current, regs, 0, sys_data->nb_args,
531			       (unsigned long *)&rec->args);
 
 
 
 
 
 
 
532
533	head = this_cpu_ptr(sys_data->enter_event->perf_events);
534	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
 
535}
536
537int perf_sysenter_enable(struct ftrace_event_call *call)
538{
539	int ret = 0;
540	int num;
541
542	num = ((struct syscall_metadata *)call->data)->syscall_nr;
543
544	mutex_lock(&syscall_trace_lock);
545	if (!sys_perf_refcount_enter)
546		ret = register_trace_sys_enter(perf_syscall_enter, NULL);
547	if (ret) {
548		pr_info("event trace: Could not activate"
549				"syscall entry trace point");
550	} else {
551		set_bit(num, enabled_perf_enter_syscalls);
552		sys_perf_refcount_enter++;
553	}
554	mutex_unlock(&syscall_trace_lock);
555	return ret;
556}
557
558void perf_sysenter_disable(struct ftrace_event_call *call)
559{
560	int num;
561
562	num = ((struct syscall_metadata *)call->data)->syscall_nr;
563
564	mutex_lock(&syscall_trace_lock);
565	sys_perf_refcount_enter--;
566	clear_bit(num, enabled_perf_enter_syscalls);
567	if (!sys_perf_refcount_enter)
568		unregister_trace_sys_enter(perf_syscall_enter, NULL);
569	mutex_unlock(&syscall_trace_lock);
570}
571
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
572static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
573{
574	struct syscall_metadata *sys_data;
575	struct syscall_trace_exit *rec;
576	struct hlist_head *head;
 
577	int syscall_nr;
578	int rctx;
579	int size;
580
581	syscall_nr = syscall_get_nr(current, regs);
 
 
582	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
583		return;
584
585	sys_data = syscall_nr_to_meta(syscall_nr);
586	if (!sys_data)
587		return;
588
 
 
 
 
 
589	/* We can probably do that at build time */
590	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
591	size -= sizeof(u32);
592
593	/*
594	 * Impossible, but be paranoid with the future
595	 * How to put this check outside runtime?
596	 */
597	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
598		"exit event has grown above perf buffer size"))
599		return;
600
601	rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
602				sys_data->exit_event->event.type, regs, &rctx);
603	if (!rec)
604		return;
605
606	rec->nr = syscall_nr;
607	rec->ret = syscall_get_return_value(current, regs);
608
609	head = this_cpu_ptr(sys_data->exit_event->perf_events);
610	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
 
 
 
 
 
 
 
611}
612
613int perf_sysexit_enable(struct ftrace_event_call *call)
614{
615	int ret = 0;
616	int num;
617
618	num = ((struct syscall_metadata *)call->data)->syscall_nr;
619
620	mutex_lock(&syscall_trace_lock);
621	if (!sys_perf_refcount_exit)
622		ret = register_trace_sys_exit(perf_syscall_exit, NULL);
623	if (ret) {
624		pr_info("event trace: Could not activate"
625				"syscall exit trace point");
626	} else {
627		set_bit(num, enabled_perf_exit_syscalls);
628		sys_perf_refcount_exit++;
629	}
630	mutex_unlock(&syscall_trace_lock);
631	return ret;
632}
633
634void perf_sysexit_disable(struct ftrace_event_call *call)
635{
636	int num;
637
638	num = ((struct syscall_metadata *)call->data)->syscall_nr;
639
640	mutex_lock(&syscall_trace_lock);
641	sys_perf_refcount_exit--;
642	clear_bit(num, enabled_perf_exit_syscalls);
643	if (!sys_perf_refcount_exit)
644		unregister_trace_sys_exit(perf_syscall_exit, NULL);
645	mutex_unlock(&syscall_trace_lock);
646}
647
648#endif /* CONFIG_PERF_EVENTS */
649
650static int syscall_enter_register(struct ftrace_event_call *event,
651				 enum trace_reg type)
652{
 
 
653	switch (type) {
654	case TRACE_REG_REGISTER:
655		return reg_event_syscall_enter(event);
656	case TRACE_REG_UNREGISTER:
657		unreg_event_syscall_enter(event);
658		return 0;
659
660#ifdef CONFIG_PERF_EVENTS
661	case TRACE_REG_PERF_REGISTER:
662		return perf_sysenter_enable(event);
663	case TRACE_REG_PERF_UNREGISTER:
664		perf_sysenter_disable(event);
665		return 0;
 
 
 
 
 
666#endif
667	}
668	return 0;
669}
670
671static int syscall_exit_register(struct ftrace_event_call *event,
672				 enum trace_reg type)
673{
 
 
674	switch (type) {
675	case TRACE_REG_REGISTER:
676		return reg_event_syscall_exit(event);
677	case TRACE_REG_UNREGISTER:
678		unreg_event_syscall_exit(event);
679		return 0;
680
681#ifdef CONFIG_PERF_EVENTS
682	case TRACE_REG_PERF_REGISTER:
683		return perf_sysexit_enable(event);
684	case TRACE_REG_PERF_UNREGISTER:
685		perf_sysexit_disable(event);
 
 
 
 
 
686		return 0;
687#endif
688	}
689	return 0;
690}