Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <trace/syscall.h>
  3#include <trace/events/syscalls.h>
  4#include <linux/syscalls.h>
  5#include <linux/slab.h>
  6#include <linux/kernel.h>
  7#include <linux/module.h>	/* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
  8#include <linux/ftrace.h>
  9#include <linux/perf_event.h>
 10#include <linux/xarray.h>
 11#include <asm/syscall.h>
 12
 13#include "trace_output.h"
 14#include "trace.h"
 15
 16static DEFINE_MUTEX(syscall_trace_lock);
 
 
 
 
 17
 18static int syscall_enter_register(struct trace_event_call *event,
 19				 enum trace_reg type, void *data);
 20static int syscall_exit_register(struct trace_event_call *event,
 21				 enum trace_reg type, void *data);
 22
 
 
 
 23static struct list_head *
 24syscall_get_enter_fields(struct trace_event_call *call)
 25{
 26	struct syscall_metadata *entry = call->data;
 27
 28	return &entry->enter_fields;
 29}
 30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31extern struct syscall_metadata *__start_syscalls_metadata[];
 32extern struct syscall_metadata *__stop_syscalls_metadata[];
 33
 34static DEFINE_XARRAY(syscalls_metadata_sparse);
 35static struct syscall_metadata **syscalls_metadata;
 36
 37#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
 38static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
 39{
 40	/*
 41	 * Only compare after the "sys" prefix. Archs that use
 42	 * syscall wrappers may have syscalls symbols aliases prefixed
 43	 * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
 44	 * mismatch.
 45	 */
 46	return !strcmp(sym + 3, name + 3);
 47}
 48#endif
 49
 50#ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
 51/*
 52 * Some architectures that allow for 32bit applications
 53 * to run on a 64bit kernel, do not map the syscalls for
 54 * the 32bit tasks the same as they do for 64bit tasks.
 55 *
 56 *     *cough*x86*cough*
 57 *
 58 * In such a case, instead of reporting the wrong syscalls,
 59 * simply ignore them.
 60 *
 61 * For an arch to ignore the compat syscalls it needs to
 62 * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
 63 * define the function arch_trace_is_compat_syscall() to let
 64 * the tracing system know that it should ignore it.
 65 */
 66static int
 67trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
 68{
 69	if (unlikely(arch_trace_is_compat_syscall(regs)))
 70		return -1;
 71
 72	return syscall_get_nr(task, regs);
 73}
 74#else
 75static inline int
 76trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
 77{
 78	return syscall_get_nr(task, regs);
 79}
 80#endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
 81
 82static __init struct syscall_metadata *
 83find_syscall_meta(unsigned long syscall)
 84{
 85	struct syscall_metadata **start;
 86	struct syscall_metadata **stop;
 87	char str[KSYM_SYMBOL_LEN];
 88
 89
 90	start = __start_syscalls_metadata;
 91	stop = __stop_syscalls_metadata;
 92	kallsyms_lookup(syscall, NULL, NULL, NULL, str);
 93
 94	if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
 95		return NULL;
 96
 97	for ( ; start < stop; start++) {
 98		if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
 99			return *start;
100	}
101	return NULL;
102}
103
104static struct syscall_metadata *syscall_nr_to_meta(int nr)
105{
106	if (IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR))
107		return xa_load(&syscalls_metadata_sparse, (unsigned long)nr);
108
109	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
110		return NULL;
111
112	return syscalls_metadata[nr];
113}
114
115const char *get_syscall_name(int syscall)
116{
117	struct syscall_metadata *entry;
118
119	entry = syscall_nr_to_meta(syscall);
120	if (!entry)
121		return NULL;
122
123	return entry->name;
124}
125
126static enum print_line_t
127print_syscall_enter(struct trace_iterator *iter, int flags,
128		    struct trace_event *event)
129{
130	struct trace_array *tr = iter->tr;
131	struct trace_seq *s = &iter->seq;
132	struct trace_entry *ent = iter->ent;
133	struct syscall_trace_enter *trace;
134	struct syscall_metadata *entry;
135	int i, syscall;
136
137	trace = (typeof(trace))ent;
138	syscall = trace->nr;
139	entry = syscall_nr_to_meta(syscall);
140
141	if (!entry)
142		goto end;
143
144	if (entry->enter_event->event.type != ent->type) {
145		WARN_ON_ONCE(1);
146		goto end;
147	}
148
149	trace_seq_printf(s, "%s(", entry->name);
 
 
150
151	for (i = 0; i < entry->nb_args; i++) {
152
153		if (trace_seq_has_overflowed(s))
154			goto end;
155
156		/* parameter types */
157		if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
158			trace_seq_printf(s, "%s ", entry->types[i]);
159
 
 
160		/* parameter values */
161		trace_seq_printf(s, "%s: %lx%s", entry->args[i],
162				 trace->args[i],
163				 i == entry->nb_args - 1 ? "" : ", ");
 
 
164	}
165
166	trace_seq_putc(s, ')');
 
 
 
167end:
168	trace_seq_putc(s, '\n');
 
 
169
170	return trace_handle_return(s);
171}
172
173static enum print_line_t
174print_syscall_exit(struct trace_iterator *iter, int flags,
175		   struct trace_event *event)
176{
177	struct trace_seq *s = &iter->seq;
178	struct trace_entry *ent = iter->ent;
179	struct syscall_trace_exit *trace;
180	int syscall;
181	struct syscall_metadata *entry;
 
182
183	trace = (typeof(trace))ent;
184	syscall = trace->nr;
185	entry = syscall_nr_to_meta(syscall);
186
187	if (!entry) {
188		trace_seq_putc(s, '\n');
189		goto out;
190	}
191
192	if (entry->exit_event->event.type != ent->type) {
193		WARN_ON_ONCE(1);
194		return TRACE_TYPE_UNHANDLED;
195	}
196
197	trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
198				trace->ret);
 
 
199
200 out:
201	return trace_handle_return(s);
202}
203
204#define SYSCALL_FIELD(_type, _name) {					\
205	.type = #_type, .name = #_name,					\
206	.size = sizeof(_type), .align = __alignof__(_type),		\
207	.is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER }
208
209static int __init
210__set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
 
 
 
 
 
 
211{
212	int i;
213	int pos = 0;
214
215	/* When len=0, we just calculate the needed length */
216#define LEN_OR_ZERO (len ? len - pos : 0)
217
218	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
219	for (i = 0; i < entry->nb_args; i++) {
220		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
221				entry->args[i], sizeof(unsigned long),
222				i == entry->nb_args - 1 ? "" : ", ");
223	}
224	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
225
226	for (i = 0; i < entry->nb_args; i++) {
227		pos += snprintf(buf + pos, LEN_OR_ZERO,
228				", ((unsigned long)(REC->%s))", entry->args[i]);
229	}
230
231#undef LEN_OR_ZERO
232
233	/* return the length of print_fmt */
234	return pos;
235}
236
237static int __init set_syscall_print_fmt(struct trace_event_call *call)
238{
239	char *print_fmt;
240	int len;
241	struct syscall_metadata *entry = call->data;
242
243	if (entry->enter_event != call) {
244		call->print_fmt = "\"0x%lx\", REC->ret";
245		return 0;
246	}
247
248	/* First: called with 0 length to calculate the needed length */
249	len = __set_enter_print_fmt(entry, NULL, 0);
250
251	print_fmt = kmalloc(len + 1, GFP_KERNEL);
252	if (!print_fmt)
253		return -ENOMEM;
254
255	/* Second: actually write the @print_fmt */
256	__set_enter_print_fmt(entry, print_fmt, len + 1);
257	call->print_fmt = print_fmt;
258
259	return 0;
260}
261
262static void __init free_syscall_print_fmt(struct trace_event_call *call)
263{
264	struct syscall_metadata *entry = call->data;
265
266	if (entry->enter_event == call)
267		kfree(call->print_fmt);
268}
269
270static int __init syscall_enter_define_fields(struct trace_event_call *call)
271{
272	struct syscall_trace_enter trace;
273	struct syscall_metadata *meta = call->data;
274	int offset = offsetof(typeof(trace), args);
275	int ret = 0;
276	int i;
 
 
 
 
 
277
278	for (i = 0; i < meta->nb_args; i++) {
279		ret = trace_define_field(call, meta->types[i],
280					 meta->args[i], offset,
281					 sizeof(unsigned long), 0,
282					 FILTER_OTHER);
283		if (ret)
284			break;
285		offset += sizeof(unsigned long);
286	}
287
288	return ret;
289}
290
291static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
292{
293	struct trace_array *tr = data;
294	struct trace_event_file *trace_file;
295	struct syscall_trace_enter *entry;
296	struct syscall_metadata *sys_data;
297	struct trace_event_buffer fbuffer;
298	unsigned long args[6];
299	int syscall_nr;
300	int size;
 
301
302	syscall_nr = trace_get_syscall_nr(current, regs);
303	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
304		return;
305
306	/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
307	trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
308	if (!trace_file)
309		return;
310
311	if (trace_trigger_soft_disabled(trace_file))
312		return;
313
314	sys_data = syscall_nr_to_meta(syscall_nr);
315	if (!sys_data)
316		return;
317
318	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
319
320	entry = trace_event_buffer_reserve(&fbuffer, trace_file, size);
321	if (!entry)
 
322		return;
323
324	entry = ring_buffer_event_data(fbuffer.event);
325	entry->nr = syscall_nr;
326	syscall_get_arguments(current, regs, args);
327	memcpy(entry->args, args, sizeof(unsigned long) * sys_data->nb_args);
328
329	trace_event_buffer_commit(&fbuffer);
 
 
330}
331
332static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
333{
334	struct trace_array *tr = data;
335	struct trace_event_file *trace_file;
336	struct syscall_trace_exit *entry;
337	struct syscall_metadata *sys_data;
338	struct trace_event_buffer fbuffer;
 
339	int syscall_nr;
340
341	syscall_nr = trace_get_syscall_nr(current, regs);
342	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
343		return;
344
345	/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
346	trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
347	if (!trace_file)
348		return;
349
350	if (trace_trigger_soft_disabled(trace_file))
351		return;
352
353	sys_data = syscall_nr_to_meta(syscall_nr);
354	if (!sys_data)
355		return;
356
357	entry = trace_event_buffer_reserve(&fbuffer, trace_file, sizeof(*entry));
358	if (!entry)
 
359		return;
360
361	entry = ring_buffer_event_data(fbuffer.event);
362	entry->nr = syscall_nr;
363	entry->ret = syscall_get_return_value(current, regs);
364
365	trace_event_buffer_commit(&fbuffer);
 
 
366}
367
368static int reg_event_syscall_enter(struct trace_event_file *file,
369				   struct trace_event_call *call)
370{
371	struct trace_array *tr = file->tr;
372	int ret = 0;
373	int num;
374
375	num = ((struct syscall_metadata *)call->data)->syscall_nr;
376	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
377		return -ENOSYS;
378	mutex_lock(&syscall_trace_lock);
379	if (!tr->sys_refcount_enter)
380		ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
381	if (!ret) {
382		rcu_assign_pointer(tr->enter_syscall_files[num], file);
383		tr->sys_refcount_enter++;
384	}
385	mutex_unlock(&syscall_trace_lock);
386	return ret;
387}
388
389static void unreg_event_syscall_enter(struct trace_event_file *file,
390				      struct trace_event_call *call)
391{
392	struct trace_array *tr = file->tr;
393	int num;
394
395	num = ((struct syscall_metadata *)call->data)->syscall_nr;
396	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
397		return;
398	mutex_lock(&syscall_trace_lock);
399	tr->sys_refcount_enter--;
400	RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL);
401	if (!tr->sys_refcount_enter)
402		unregister_trace_sys_enter(ftrace_syscall_enter, tr);
403	mutex_unlock(&syscall_trace_lock);
404}
405
406static int reg_event_syscall_exit(struct trace_event_file *file,
407				  struct trace_event_call *call)
408{
409	struct trace_array *tr = file->tr;
410	int ret = 0;
411	int num;
412
413	num = ((struct syscall_metadata *)call->data)->syscall_nr;
414	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
415		return -ENOSYS;
416	mutex_lock(&syscall_trace_lock);
417	if (!tr->sys_refcount_exit)
418		ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
419	if (!ret) {
420		rcu_assign_pointer(tr->exit_syscall_files[num], file);
421		tr->sys_refcount_exit++;
422	}
423	mutex_unlock(&syscall_trace_lock);
424	return ret;
425}
426
427static void unreg_event_syscall_exit(struct trace_event_file *file,
428				     struct trace_event_call *call)
429{
430	struct trace_array *tr = file->tr;
431	int num;
432
433	num = ((struct syscall_metadata *)call->data)->syscall_nr;
434	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
435		return;
436	mutex_lock(&syscall_trace_lock);
437	tr->sys_refcount_exit--;
438	RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL);
439	if (!tr->sys_refcount_exit)
440		unregister_trace_sys_exit(ftrace_syscall_exit, tr);
441	mutex_unlock(&syscall_trace_lock);
442}
443
444static int __init init_syscall_trace(struct trace_event_call *call)
445{
446	int id;
447	int num;
448
449	num = ((struct syscall_metadata *)call->data)->syscall_nr;
450	if (num < 0 || num >= NR_syscalls) {
451		pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
452				((struct syscall_metadata *)call->data)->name);
453		return -ENOSYS;
454	}
455
456	if (set_syscall_print_fmt(call) < 0)
457		return -ENOMEM;
458
459	id = trace_event_raw_init(call);
460
461	if (id < 0) {
462		free_syscall_print_fmt(call);
463		return id;
464	}
465
466	return id;
467}
468
469static struct trace_event_fields __refdata syscall_enter_fields_array[] = {
470	SYSCALL_FIELD(int, __syscall_nr),
471	{ .type = TRACE_FUNCTION_TYPE,
472	  .define_fields = syscall_enter_define_fields },
473	{}
474};
475
476struct trace_event_functions enter_syscall_print_funcs = {
477	.trace		= print_syscall_enter,
478};
479
480struct trace_event_functions exit_syscall_print_funcs = {
481	.trace		= print_syscall_exit,
482};
483
484struct trace_event_class __refdata event_class_syscall_enter = {
485	.system		= "syscalls",
486	.reg		= syscall_enter_register,
487	.fields_array	= syscall_enter_fields_array,
488	.get_fields	= syscall_get_enter_fields,
489	.raw_init	= init_syscall_trace,
490};
491
492struct trace_event_class __refdata event_class_syscall_exit = {
493	.system		= "syscalls",
494	.reg		= syscall_exit_register,
495	.fields_array	= (struct trace_event_fields[]){
496		SYSCALL_FIELD(int, __syscall_nr),
497		SYSCALL_FIELD(long, ret),
498		{}
499	},
500	.fields		= LIST_HEAD_INIT(event_class_syscall_exit.fields),
501	.raw_init	= init_syscall_trace,
502};
503
504unsigned long __init __weak arch_syscall_addr(int nr)
505{
506	return (unsigned long)sys_call_table[nr];
507}
508
509void __init init_ftrace_syscalls(void)
510{
511	struct syscall_metadata *meta;
512	unsigned long addr;
513	int i;
514	void *ret;
515
516	if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) {
517		syscalls_metadata = kcalloc(NR_syscalls,
518					sizeof(*syscalls_metadata),
519					GFP_KERNEL);
520		if (!syscalls_metadata) {
521			WARN_ON(1);
522			return;
523		}
524	}
525
526	for (i = 0; i < NR_syscalls; i++) {
527		addr = arch_syscall_addr(i);
528		meta = find_syscall_meta(addr);
529		if (!meta)
530			continue;
531
532		meta->syscall_nr = i;
533
534		if (!IS_ENABLED(CONFIG_HAVE_SPARSE_SYSCALL_NR)) {
535			syscalls_metadata[i] = meta;
536		} else {
537			ret = xa_store(&syscalls_metadata_sparse, i, meta,
538					GFP_KERNEL);
539			WARN(xa_is_err(ret),
540				"Syscall memory allocation failed\n");
541		}
542
543	}
 
 
544}
 
545
546#ifdef CONFIG_PERF_EVENTS
547
548static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
549static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
550static int sys_perf_refcount_enter;
551static int sys_perf_refcount_exit;
552
553static int perf_call_bpf_enter(struct trace_event_call *call, struct pt_regs *regs,
554			       struct syscall_metadata *sys_data,
555			       struct syscall_trace_enter *rec)
556{
557	struct syscall_tp_t {
558		unsigned long long regs;
559		unsigned long syscall_nr;
560		unsigned long args[SYSCALL_DEFINE_MAXARGS];
561	} param;
562	int i;
563
564	*(struct pt_regs **)&param = regs;
565	param.syscall_nr = rec->nr;
566	for (i = 0; i < sys_data->nb_args; i++)
567		param.args[i] = rec->args[i];
568	return trace_call_bpf(call, &param);
569}
570
571static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
572{
573	struct syscall_metadata *sys_data;
574	struct syscall_trace_enter *rec;
575	struct hlist_head *head;
576	unsigned long args[6];
577	bool valid_prog_array;
578	int syscall_nr;
579	int rctx;
580	int size;
581
582	syscall_nr = trace_get_syscall_nr(current, regs);
583	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
584		return;
585	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
586		return;
587
588	sys_data = syscall_nr_to_meta(syscall_nr);
589	if (!sys_data)
590		return;
591
592	head = this_cpu_ptr(sys_data->enter_event->perf_events);
593	valid_prog_array = bpf_prog_array_valid(sys_data->enter_event);
594	if (!valid_prog_array && hlist_empty(head))
595		return;
596
597	/* get the size after alignment with the u32 buffer size field */
598	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
599	size = ALIGN(size + sizeof(u32), sizeof(u64));
600	size -= sizeof(u32);
601
602	rec = perf_trace_buf_alloc(size, NULL, &rctx);
 
 
 
 
 
603	if (!rec)
604		return;
605
606	rec->nr = syscall_nr;
607	syscall_get_arguments(current, regs, args);
608	memcpy(&rec->args, args, sizeof(unsigned long) * sys_data->nb_args);
609
610	if ((valid_prog_array &&
611	     !perf_call_bpf_enter(sys_data->enter_event, regs, sys_data, rec)) ||
612	    hlist_empty(head)) {
613		perf_swevent_put_recursion_context(rctx);
614		return;
615	}
616
617	perf_trace_buf_submit(rec, size, rctx,
618			      sys_data->enter_event->event.type, 1, regs,
619			      head, NULL);
620}
621
622static int perf_sysenter_enable(struct trace_event_call *call)
623{
624	int ret = 0;
625	int num;
626
627	num = ((struct syscall_metadata *)call->data)->syscall_nr;
628
629	mutex_lock(&syscall_trace_lock);
630	if (!sys_perf_refcount_enter)
631		ret = register_trace_sys_enter(perf_syscall_enter, NULL);
632	if (ret) {
633		pr_info("event trace: Could not activate syscall entry trace point");
 
634	} else {
635		set_bit(num, enabled_perf_enter_syscalls);
636		sys_perf_refcount_enter++;
637	}
638	mutex_unlock(&syscall_trace_lock);
639	return ret;
640}
641
642static void perf_sysenter_disable(struct trace_event_call *call)
643{
644	int num;
645
646	num = ((struct syscall_metadata *)call->data)->syscall_nr;
647
648	mutex_lock(&syscall_trace_lock);
649	sys_perf_refcount_enter--;
650	clear_bit(num, enabled_perf_enter_syscalls);
651	if (!sys_perf_refcount_enter)
652		unregister_trace_sys_enter(perf_syscall_enter, NULL);
653	mutex_unlock(&syscall_trace_lock);
654}
655
656static int perf_call_bpf_exit(struct trace_event_call *call, struct pt_regs *regs,
657			      struct syscall_trace_exit *rec)
658{
659	struct syscall_tp_t {
660		unsigned long long regs;
661		unsigned long syscall_nr;
662		unsigned long ret;
663	} param;
664
665	*(struct pt_regs **)&param = regs;
666	param.syscall_nr = rec->nr;
667	param.ret = rec->ret;
668	return trace_call_bpf(call, &param);
669}
670
671static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
672{
673	struct syscall_metadata *sys_data;
674	struct syscall_trace_exit *rec;
675	struct hlist_head *head;
676	bool valid_prog_array;
677	int syscall_nr;
678	int rctx;
679	int size;
680
681	syscall_nr = trace_get_syscall_nr(current, regs);
682	if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
683		return;
684	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
685		return;
686
687	sys_data = syscall_nr_to_meta(syscall_nr);
688	if (!sys_data)
689		return;
690
691	head = this_cpu_ptr(sys_data->exit_event->perf_events);
692	valid_prog_array = bpf_prog_array_valid(sys_data->exit_event);
693	if (!valid_prog_array && hlist_empty(head))
694		return;
695
696	/* We can probably do that at build time */
697	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
698	size -= sizeof(u32);
699
700	rec = perf_trace_buf_alloc(size, NULL, &rctx);
 
 
 
 
 
 
 
 
 
701	if (!rec)
702		return;
703
704	rec->nr = syscall_nr;
705	rec->ret = syscall_get_return_value(current, regs);
706
707	if ((valid_prog_array &&
708	     !perf_call_bpf_exit(sys_data->exit_event, regs, rec)) ||
709	    hlist_empty(head)) {
710		perf_swevent_put_recursion_context(rctx);
711		return;
712	}
713
714	perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
715			      1, regs, head, NULL);
716}
717
718static int perf_sysexit_enable(struct trace_event_call *call)
719{
720	int ret = 0;
721	int num;
722
723	num = ((struct syscall_metadata *)call->data)->syscall_nr;
724
725	mutex_lock(&syscall_trace_lock);
726	if (!sys_perf_refcount_exit)
727		ret = register_trace_sys_exit(perf_syscall_exit, NULL);
728	if (ret) {
729		pr_info("event trace: Could not activate syscall exit trace point");
 
730	} else {
731		set_bit(num, enabled_perf_exit_syscalls);
732		sys_perf_refcount_exit++;
733	}
734	mutex_unlock(&syscall_trace_lock);
735	return ret;
736}
737
738static void perf_sysexit_disable(struct trace_event_call *call)
739{
740	int num;
741
742	num = ((struct syscall_metadata *)call->data)->syscall_nr;
743
744	mutex_lock(&syscall_trace_lock);
745	sys_perf_refcount_exit--;
746	clear_bit(num, enabled_perf_exit_syscalls);
747	if (!sys_perf_refcount_exit)
748		unregister_trace_sys_exit(perf_syscall_exit, NULL);
749	mutex_unlock(&syscall_trace_lock);
750}
751
752#endif /* CONFIG_PERF_EVENTS */
753
754static int syscall_enter_register(struct trace_event_call *event,
755				 enum trace_reg type, void *data)
756{
757	struct trace_event_file *file = data;
758
759	switch (type) {
760	case TRACE_REG_REGISTER:
761		return reg_event_syscall_enter(file, event);
762	case TRACE_REG_UNREGISTER:
763		unreg_event_syscall_enter(file, event);
764		return 0;
765
766#ifdef CONFIG_PERF_EVENTS
767	case TRACE_REG_PERF_REGISTER:
768		return perf_sysenter_enable(event);
769	case TRACE_REG_PERF_UNREGISTER:
770		perf_sysenter_disable(event);
771		return 0;
772	case TRACE_REG_PERF_OPEN:
773	case TRACE_REG_PERF_CLOSE:
774	case TRACE_REG_PERF_ADD:
775	case TRACE_REG_PERF_DEL:
776		return 0;
777#endif
778	}
779	return 0;
780}
781
782static int syscall_exit_register(struct trace_event_call *event,
783				 enum trace_reg type, void *data)
784{
785	struct trace_event_file *file = data;
786
787	switch (type) {
788	case TRACE_REG_REGISTER:
789		return reg_event_syscall_exit(file, event);
790	case TRACE_REG_UNREGISTER:
791		unreg_event_syscall_exit(file, event);
792		return 0;
793
794#ifdef CONFIG_PERF_EVENTS
795	case TRACE_REG_PERF_REGISTER:
796		return perf_sysexit_enable(event);
797	case TRACE_REG_PERF_UNREGISTER:
798		perf_sysexit_disable(event);
799		return 0;
800	case TRACE_REG_PERF_OPEN:
801	case TRACE_REG_PERF_CLOSE:
802	case TRACE_REG_PERF_ADD:
803	case TRACE_REG_PERF_DEL:
804		return 0;
805#endif
806	}
807	return 0;
808}
v3.5.6
 
  1#include <trace/syscall.h>
  2#include <trace/events/syscalls.h>
 
  3#include <linux/slab.h>
  4#include <linux/kernel.h>
  5#include <linux/module.h>	/* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
  6#include <linux/ftrace.h>
  7#include <linux/perf_event.h>
 
  8#include <asm/syscall.h>
  9
 10#include "trace_output.h"
 11#include "trace.h"
 12
 13static DEFINE_MUTEX(syscall_trace_lock);
 14static int sys_refcount_enter;
 15static int sys_refcount_exit;
 16static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
 17static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
 18
 19static int syscall_enter_register(struct ftrace_event_call *event,
 20				 enum trace_reg type, void *data);
 21static int syscall_exit_register(struct ftrace_event_call *event,
 22				 enum trace_reg type, void *data);
 23
 24static int syscall_enter_define_fields(struct ftrace_event_call *call);
 25static int syscall_exit_define_fields(struct ftrace_event_call *call);
 26
 27static struct list_head *
 28syscall_get_enter_fields(struct ftrace_event_call *call)
 29{
 30	struct syscall_metadata *entry = call->data;
 31
 32	return &entry->enter_fields;
 33}
 34
 35struct trace_event_functions enter_syscall_print_funcs = {
 36	.trace		= print_syscall_enter,
 37};
 38
 39struct trace_event_functions exit_syscall_print_funcs = {
 40	.trace		= print_syscall_exit,
 41};
 42
 43struct ftrace_event_class event_class_syscall_enter = {
 44	.system		= "syscalls",
 45	.reg		= syscall_enter_register,
 46	.define_fields	= syscall_enter_define_fields,
 47	.get_fields	= syscall_get_enter_fields,
 48	.raw_init	= init_syscall_trace,
 49};
 50
 51struct ftrace_event_class event_class_syscall_exit = {
 52	.system		= "syscalls",
 53	.reg		= syscall_exit_register,
 54	.define_fields	= syscall_exit_define_fields,
 55	.fields		= LIST_HEAD_INIT(event_class_syscall_exit.fields),
 56	.raw_init	= init_syscall_trace,
 57};
 58
 59extern struct syscall_metadata *__start_syscalls_metadata[];
 60extern struct syscall_metadata *__stop_syscalls_metadata[];
 61
 
 62static struct syscall_metadata **syscalls_metadata;
 63
 64#ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
 65static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
 66{
 67	/*
 68	 * Only compare after the "sys" prefix. Archs that use
 69	 * syscall wrappers may have syscalls symbols aliases prefixed
 70	 * with "SyS" instead of "sys", leading to an unwanted
 71	 * mismatch.
 72	 */
 73	return !strcmp(sym + 3, name + 3);
 74}
 75#endif
 76
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 77static __init struct syscall_metadata *
 78find_syscall_meta(unsigned long syscall)
 79{
 80	struct syscall_metadata **start;
 81	struct syscall_metadata **stop;
 82	char str[KSYM_SYMBOL_LEN];
 83
 84
 85	start = __start_syscalls_metadata;
 86	stop = __stop_syscalls_metadata;
 87	kallsyms_lookup(syscall, NULL, NULL, NULL, str);
 88
 89	if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
 90		return NULL;
 91
 92	for ( ; start < stop; start++) {
 93		if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
 94			return *start;
 95	}
 96	return NULL;
 97}
 98
 99static struct syscall_metadata *syscall_nr_to_meta(int nr)
100{
 
 
 
101	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
102		return NULL;
103
104	return syscalls_metadata[nr];
105}
106
107enum print_line_t
 
 
 
 
 
 
 
 
 
 
 
108print_syscall_enter(struct trace_iterator *iter, int flags,
109		    struct trace_event *event)
110{
 
111	struct trace_seq *s = &iter->seq;
112	struct trace_entry *ent = iter->ent;
113	struct syscall_trace_enter *trace;
114	struct syscall_metadata *entry;
115	int i, ret, syscall;
116
117	trace = (typeof(trace))ent;
118	syscall = trace->nr;
119	entry = syscall_nr_to_meta(syscall);
120
121	if (!entry)
122		goto end;
123
124	if (entry->enter_event->event.type != ent->type) {
125		WARN_ON_ONCE(1);
126		goto end;
127	}
128
129	ret = trace_seq_printf(s, "%s(", entry->name);
130	if (!ret)
131		return TRACE_TYPE_PARTIAL_LINE;
132
133	for (i = 0; i < entry->nb_args; i++) {
 
 
 
 
134		/* parameter types */
135		if (trace_flags & TRACE_ITER_VERBOSE) {
136			ret = trace_seq_printf(s, "%s ", entry->types[i]);
137			if (!ret)
138				return TRACE_TYPE_PARTIAL_LINE;
139		}
140		/* parameter values */
141		ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
142				       trace->args[i],
143				       i == entry->nb_args - 1 ? "" : ", ");
144		if (!ret)
145			return TRACE_TYPE_PARTIAL_LINE;
146	}
147
148	ret = trace_seq_putc(s, ')');
149	if (!ret)
150		return TRACE_TYPE_PARTIAL_LINE;
151
152end:
153	ret =  trace_seq_putc(s, '\n');
154	if (!ret)
155		return TRACE_TYPE_PARTIAL_LINE;
156
157	return TRACE_TYPE_HANDLED;
158}
159
160enum print_line_t
161print_syscall_exit(struct trace_iterator *iter, int flags,
162		   struct trace_event *event)
163{
164	struct trace_seq *s = &iter->seq;
165	struct trace_entry *ent = iter->ent;
166	struct syscall_trace_exit *trace;
167	int syscall;
168	struct syscall_metadata *entry;
169	int ret;
170
171	trace = (typeof(trace))ent;
172	syscall = trace->nr;
173	entry = syscall_nr_to_meta(syscall);
174
175	if (!entry) {
176		trace_seq_printf(s, "\n");
177		return TRACE_TYPE_HANDLED;
178	}
179
180	if (entry->exit_event->event.type != ent->type) {
181		WARN_ON_ONCE(1);
182		return TRACE_TYPE_UNHANDLED;
183	}
184
185	ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
186				trace->ret);
187	if (!ret)
188		return TRACE_TYPE_PARTIAL_LINE;
189
190	return TRACE_TYPE_HANDLED;
 
191}
192
193extern char *__bad_type_size(void);
 
 
 
194
195#define SYSCALL_FIELD(type, name)					\
196	sizeof(type) != sizeof(trace.name) ?				\
197		__bad_type_size() :					\
198		#type, #name, offsetof(typeof(trace), name),		\
199		sizeof(trace.name), is_signed_type(type)
200
201static
202int  __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
203{
204	int i;
205	int pos = 0;
206
207	/* When len=0, we just calculate the needed length */
208#define LEN_OR_ZERO (len ? len - pos : 0)
209
210	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
211	for (i = 0; i < entry->nb_args; i++) {
212		pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
213				entry->args[i], sizeof(unsigned long),
214				i == entry->nb_args - 1 ? "" : ", ");
215	}
216	pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
217
218	for (i = 0; i < entry->nb_args; i++) {
219		pos += snprintf(buf + pos, LEN_OR_ZERO,
220				", ((unsigned long)(REC->%s))", entry->args[i]);
221	}
222
223#undef LEN_OR_ZERO
224
225	/* return the length of print_fmt */
226	return pos;
227}
228
229static int set_syscall_print_fmt(struct ftrace_event_call *call)
230{
231	char *print_fmt;
232	int len;
233	struct syscall_metadata *entry = call->data;
234
235	if (entry->enter_event != call) {
236		call->print_fmt = "\"0x%lx\", REC->ret";
237		return 0;
238	}
239
240	/* First: called with 0 length to calculate the needed length */
241	len = __set_enter_print_fmt(entry, NULL, 0);
242
243	print_fmt = kmalloc(len + 1, GFP_KERNEL);
244	if (!print_fmt)
245		return -ENOMEM;
246
247	/* Second: actually write the @print_fmt */
248	__set_enter_print_fmt(entry, print_fmt, len + 1);
249	call->print_fmt = print_fmt;
250
251	return 0;
252}
253
254static void free_syscall_print_fmt(struct ftrace_event_call *call)
255{
256	struct syscall_metadata *entry = call->data;
257
258	if (entry->enter_event == call)
259		kfree(call->print_fmt);
260}
261
262static int syscall_enter_define_fields(struct ftrace_event_call *call)
263{
264	struct syscall_trace_enter trace;
265	struct syscall_metadata *meta = call->data;
266	int ret;
 
267	int i;
268	int offset = offsetof(typeof(trace), args);
269
270	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
271	if (ret)
272		return ret;
273
274	for (i = 0; i < meta->nb_args; i++) {
275		ret = trace_define_field(call, meta->types[i],
276					 meta->args[i], offset,
277					 sizeof(unsigned long), 0,
278					 FILTER_OTHER);
 
 
279		offset += sizeof(unsigned long);
280	}
281
282	return ret;
283}
284
285static int syscall_exit_define_fields(struct ftrace_event_call *call)
286{
287	struct syscall_trace_exit trace;
288	int ret;
289
290	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
291	if (ret)
292		return ret;
293
294	ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
295				 FILTER_OTHER);
296
297	return ret;
298}
299
300void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
301{
 
 
302	struct syscall_trace_enter *entry;
303	struct syscall_metadata *sys_data;
304	struct ring_buffer_event *event;
305	struct ring_buffer *buffer;
 
306	int size;
307	int syscall_nr;
308
309	syscall_nr = syscall_get_nr(current, regs);
310	if (syscall_nr < 0)
 
 
 
 
 
311		return;
312	if (!test_bit(syscall_nr, enabled_enter_syscalls))
 
313		return;
314
315	sys_data = syscall_nr_to_meta(syscall_nr);
316	if (!sys_data)
317		return;
318
319	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
320
321	event = trace_current_buffer_lock_reserve(&buffer,
322			sys_data->enter_event->event.type, size, 0, 0);
323	if (!event)
324		return;
325
326	entry = ring_buffer_event_data(event);
327	entry->nr = syscall_nr;
328	syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
 
329
330	if (!filter_current_check_discard(buffer, sys_data->enter_event,
331					  entry, event))
332		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
333}
334
335void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
336{
 
 
337	struct syscall_trace_exit *entry;
338	struct syscall_metadata *sys_data;
339	struct ring_buffer_event *event;
340	struct ring_buffer *buffer;
341	int syscall_nr;
342
343	syscall_nr = syscall_get_nr(current, regs);
344	if (syscall_nr < 0)
345		return;
346	if (!test_bit(syscall_nr, enabled_exit_syscalls))
 
 
 
 
 
 
347		return;
348
349	sys_data = syscall_nr_to_meta(syscall_nr);
350	if (!sys_data)
351		return;
352
353	event = trace_current_buffer_lock_reserve(&buffer,
354			sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
355	if (!event)
356		return;
357
358	entry = ring_buffer_event_data(event);
359	entry->nr = syscall_nr;
360	entry->ret = syscall_get_return_value(current, regs);
361
362	if (!filter_current_check_discard(buffer, sys_data->exit_event,
363					  entry, event))
364		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
365}
366
367int reg_event_syscall_enter(struct ftrace_event_call *call)
 
368{
 
369	int ret = 0;
370	int num;
371
372	num = ((struct syscall_metadata *)call->data)->syscall_nr;
373	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
374		return -ENOSYS;
375	mutex_lock(&syscall_trace_lock);
376	if (!sys_refcount_enter)
377		ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
378	if (!ret) {
379		set_bit(num, enabled_enter_syscalls);
380		sys_refcount_enter++;
381	}
382	mutex_unlock(&syscall_trace_lock);
383	return ret;
384}
385
386void unreg_event_syscall_enter(struct ftrace_event_call *call)
 
387{
 
388	int num;
389
390	num = ((struct syscall_metadata *)call->data)->syscall_nr;
391	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
392		return;
393	mutex_lock(&syscall_trace_lock);
394	sys_refcount_enter--;
395	clear_bit(num, enabled_enter_syscalls);
396	if (!sys_refcount_enter)
397		unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
398	mutex_unlock(&syscall_trace_lock);
399}
400
401int reg_event_syscall_exit(struct ftrace_event_call *call)
 
402{
 
403	int ret = 0;
404	int num;
405
406	num = ((struct syscall_metadata *)call->data)->syscall_nr;
407	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
408		return -ENOSYS;
409	mutex_lock(&syscall_trace_lock);
410	if (!sys_refcount_exit)
411		ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
412	if (!ret) {
413		set_bit(num, enabled_exit_syscalls);
414		sys_refcount_exit++;
415	}
416	mutex_unlock(&syscall_trace_lock);
417	return ret;
418}
419
420void unreg_event_syscall_exit(struct ftrace_event_call *call)
 
421{
 
422	int num;
423
424	num = ((struct syscall_metadata *)call->data)->syscall_nr;
425	if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
426		return;
427	mutex_lock(&syscall_trace_lock);
428	sys_refcount_exit--;
429	clear_bit(num, enabled_exit_syscalls);
430	if (!sys_refcount_exit)
431		unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
432	mutex_unlock(&syscall_trace_lock);
433}
434
435int init_syscall_trace(struct ftrace_event_call *call)
436{
437	int id;
438	int num;
439
440	num = ((struct syscall_metadata *)call->data)->syscall_nr;
441	if (num < 0 || num >= NR_syscalls) {
442		pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
443				((struct syscall_metadata *)call->data)->name);
444		return -ENOSYS;
445	}
446
447	if (set_syscall_print_fmt(call) < 0)
448		return -ENOMEM;
449
450	id = trace_event_raw_init(call);
451
452	if (id < 0) {
453		free_syscall_print_fmt(call);
454		return id;
455	}
456
457	return id;
458}
459
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
460unsigned long __init __weak arch_syscall_addr(int nr)
461{
462	return (unsigned long)sys_call_table[nr];
463}
464
465int __init init_ftrace_syscalls(void)
466{
467	struct syscall_metadata *meta;
468	unsigned long addr;
469	int i;
 
470
471	syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
472				    GFP_KERNEL);
473	if (!syscalls_metadata) {
474		WARN_ON(1);
475		return -ENOMEM;
 
 
 
476	}
477
478	for (i = 0; i < NR_syscalls; i++) {
479		addr = arch_syscall_addr(i);
480		meta = find_syscall_meta(addr);
481		if (!meta)
482			continue;
483
484		meta->syscall_nr = i;
485		syscalls_metadata[i] = meta;
 
 
 
 
 
 
 
 
 
486	}
487
488	return 0;
489}
490core_initcall(init_ftrace_syscalls);
491
492#ifdef CONFIG_PERF_EVENTS
493
494static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
495static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
496static int sys_perf_refcount_enter;
497static int sys_perf_refcount_exit;
498
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
499static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
500{
501	struct syscall_metadata *sys_data;
502	struct syscall_trace_enter *rec;
503	struct hlist_head *head;
 
 
504	int syscall_nr;
505	int rctx;
506	int size;
507
508	syscall_nr = syscall_get_nr(current, regs);
 
 
509	if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
510		return;
511
512	sys_data = syscall_nr_to_meta(syscall_nr);
513	if (!sys_data)
514		return;
515
 
 
 
 
 
516	/* get the size after alignment with the u32 buffer size field */
517	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
518	size = ALIGN(size + sizeof(u32), sizeof(u64));
519	size -= sizeof(u32);
520
521	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
522		      "perf buffer not large enough"))
523		return;
524
525	rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
526				sys_data->enter_event->event.type, regs, &rctx);
527	if (!rec)
528		return;
529
530	rec->nr = syscall_nr;
531	syscall_get_arguments(current, regs, 0, sys_data->nb_args,
532			       (unsigned long *)&rec->args);
 
 
 
 
 
 
 
533
534	head = this_cpu_ptr(sys_data->enter_event->perf_events);
535	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
 
536}
537
538int perf_sysenter_enable(struct ftrace_event_call *call)
539{
540	int ret = 0;
541	int num;
542
543	num = ((struct syscall_metadata *)call->data)->syscall_nr;
544
545	mutex_lock(&syscall_trace_lock);
546	if (!sys_perf_refcount_enter)
547		ret = register_trace_sys_enter(perf_syscall_enter, NULL);
548	if (ret) {
549		pr_info("event trace: Could not activate"
550				"syscall entry trace point");
551	} else {
552		set_bit(num, enabled_perf_enter_syscalls);
553		sys_perf_refcount_enter++;
554	}
555	mutex_unlock(&syscall_trace_lock);
556	return ret;
557}
558
559void perf_sysenter_disable(struct ftrace_event_call *call)
560{
561	int num;
562
563	num = ((struct syscall_metadata *)call->data)->syscall_nr;
564
565	mutex_lock(&syscall_trace_lock);
566	sys_perf_refcount_enter--;
567	clear_bit(num, enabled_perf_enter_syscalls);
568	if (!sys_perf_refcount_enter)
569		unregister_trace_sys_enter(perf_syscall_enter, NULL);
570	mutex_unlock(&syscall_trace_lock);
571}
572
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
573static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
574{
575	struct syscall_metadata *sys_data;
576	struct syscall_trace_exit *rec;
577	struct hlist_head *head;
 
578	int syscall_nr;
579	int rctx;
580	int size;
581
582	syscall_nr = syscall_get_nr(current, regs);
 
 
583	if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
584		return;
585
586	sys_data = syscall_nr_to_meta(syscall_nr);
587	if (!sys_data)
588		return;
589
 
 
 
 
 
590	/* We can probably do that at build time */
591	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
592	size -= sizeof(u32);
593
594	/*
595	 * Impossible, but be paranoid with the future
596	 * How to put this check outside runtime?
597	 */
598	if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
599		"exit event has grown above perf buffer size"))
600		return;
601
602	rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
603				sys_data->exit_event->event.type, regs, &rctx);
604	if (!rec)
605		return;
606
607	rec->nr = syscall_nr;
608	rec->ret = syscall_get_return_value(current, regs);
609
610	head = this_cpu_ptr(sys_data->exit_event->perf_events);
611	perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head);
 
 
 
 
 
 
 
612}
613
614int perf_sysexit_enable(struct ftrace_event_call *call)
615{
616	int ret = 0;
617	int num;
618
619	num = ((struct syscall_metadata *)call->data)->syscall_nr;
620
621	mutex_lock(&syscall_trace_lock);
622	if (!sys_perf_refcount_exit)
623		ret = register_trace_sys_exit(perf_syscall_exit, NULL);
624	if (ret) {
625		pr_info("event trace: Could not activate"
626				"syscall exit trace point");
627	} else {
628		set_bit(num, enabled_perf_exit_syscalls);
629		sys_perf_refcount_exit++;
630	}
631	mutex_unlock(&syscall_trace_lock);
632	return ret;
633}
634
635void perf_sysexit_disable(struct ftrace_event_call *call)
636{
637	int num;
638
639	num = ((struct syscall_metadata *)call->data)->syscall_nr;
640
641	mutex_lock(&syscall_trace_lock);
642	sys_perf_refcount_exit--;
643	clear_bit(num, enabled_perf_exit_syscalls);
644	if (!sys_perf_refcount_exit)
645		unregister_trace_sys_exit(perf_syscall_exit, NULL);
646	mutex_unlock(&syscall_trace_lock);
647}
648
649#endif /* CONFIG_PERF_EVENTS */
650
651static int syscall_enter_register(struct ftrace_event_call *event,
652				 enum trace_reg type, void *data)
653{
 
 
654	switch (type) {
655	case TRACE_REG_REGISTER:
656		return reg_event_syscall_enter(event);
657	case TRACE_REG_UNREGISTER:
658		unreg_event_syscall_enter(event);
659		return 0;
660
661#ifdef CONFIG_PERF_EVENTS
662	case TRACE_REG_PERF_REGISTER:
663		return perf_sysenter_enable(event);
664	case TRACE_REG_PERF_UNREGISTER:
665		perf_sysenter_disable(event);
666		return 0;
667	case TRACE_REG_PERF_OPEN:
668	case TRACE_REG_PERF_CLOSE:
669	case TRACE_REG_PERF_ADD:
670	case TRACE_REG_PERF_DEL:
671		return 0;
672#endif
673	}
674	return 0;
675}
676
677static int syscall_exit_register(struct ftrace_event_call *event,
678				 enum trace_reg type, void *data)
679{
 
 
680	switch (type) {
681	case TRACE_REG_REGISTER:
682		return reg_event_syscall_exit(event);
683	case TRACE_REG_UNREGISTER:
684		unreg_event_syscall_exit(event);
685		return 0;
686
687#ifdef CONFIG_PERF_EVENTS
688	case TRACE_REG_PERF_REGISTER:
689		return perf_sysexit_enable(event);
690	case TRACE_REG_PERF_UNREGISTER:
691		perf_sysexit_disable(event);
692		return 0;
693	case TRACE_REG_PERF_OPEN:
694	case TRACE_REG_PERF_CLOSE:
695	case TRACE_REG_PERF_ADD:
696	case TRACE_REG_PERF_DEL:
697		return 0;
698#endif
699	}
700	return 0;
701}