Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Infrastructure to took into function calls and returns.
  4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
  5 * Mostly borrowed from function tracer which
  6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
  7 *
  8 * Highly modified by Steven Rostedt (VMware).
  9 */
 10#include <linux/suspend.h>
 11#include <linux/ftrace.h>
 12#include <linux/slab.h>
 13
 14#include <trace/events/sched.h>
 15
 16#include "ftrace_internal.h"
 17
 18#ifdef CONFIG_DYNAMIC_FTRACE
 19#define ASSIGN_OPS_HASH(opsname, val) \
 20	.func_hash		= val, \
 21	.local_hash.regex_lock	= __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
 22#else
 23#define ASSIGN_OPS_HASH(opsname, val)
 24#endif
 25
 26static bool kill_ftrace_graph;
 27int ftrace_graph_active;
 28
 29/* Both enabled by default (can be cleared by function_graph tracer flags */
 30static bool fgraph_sleep_time = true;
 31
 32/**
 33 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
 34 *
 35 * ftrace_graph_stop() is called when a severe error is detected in
 36 * the function graph tracing. This function is called by the critical
 37 * paths of function graph to keep those paths from doing any more harm.
 38 */
 39bool ftrace_graph_is_dead(void)
 40{
 41	return kill_ftrace_graph;
 42}
 43
 44/**
 45 * ftrace_graph_stop - set to permanently disable function graph tracing
 46 *
 47 * In case of an error int function graph tracing, this is called
 48 * to try to keep function graph tracing from causing any more harm.
 49 * Usually this is pretty severe and this is called to try to at least
 50 * get a warning out to the user.
 51 */
 52void ftrace_graph_stop(void)
 53{
 54	kill_ftrace_graph = true;
 55}
 56
 57/* Add a function return address to the trace stack on thread info.*/
 58static int
 59ftrace_push_return_trace(unsigned long ret, unsigned long func,
 60			 unsigned long frame_pointer, unsigned long *retp)
 61{
 62	unsigned long long calltime;
 63	int index;
 64
 65	if (unlikely(ftrace_graph_is_dead()))
 66		return -EBUSY;
 67
 68	if (!current->ret_stack)
 69		return -EBUSY;
 70
 71	/*
 72	 * We must make sure the ret_stack is tested before we read
 73	 * anything else.
 74	 */
 75	smp_rmb();
 76
 77	/* The return trace stack is full */
 78	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
 79		atomic_inc(&current->trace_overrun);
 80		return -EBUSY;
 81	}
 82
 83	calltime = trace_clock_local();
 84
 85	index = ++current->curr_ret_stack;
 86	barrier();
 87	current->ret_stack[index].ret = ret;
 88	current->ret_stack[index].func = func;
 89	current->ret_stack[index].calltime = calltime;
 90#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 91	current->ret_stack[index].fp = frame_pointer;
 92#endif
 93#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 94	current->ret_stack[index].retp = retp;
 95#endif
 96	return 0;
 97}
 98
 99/*
100 * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct
101 * functions. But those archs currently don't support direct functions
102 * anyway, and ftrace_find_rec_direct() is just a stub for them.
103 * Define MCOUNT_INSN_SIZE to keep those archs compiling.
104 */
105#ifndef MCOUNT_INSN_SIZE
106/* Make sure this only works without direct calls */
107# ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
108#  error MCOUNT_INSN_SIZE not defined with direct calls enabled
109# endif
110# define MCOUNT_INSN_SIZE 0
111#endif
112
113int function_graph_enter(unsigned long ret, unsigned long func,
114			 unsigned long frame_pointer, unsigned long *retp)
115{
116	struct ftrace_graph_ent trace;
117
118	/*
119	 * Skip graph tracing if the return location is served by direct trampoline,
120	 * since call sequence and return addresses are unpredictable anyway.
121	 * Ex: BPF trampoline may call original function and may skip frame
122	 * depending on type of BPF programs attached.
123	 */
124	if (ftrace_direct_func_count &&
125	    ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
126		return -EBUSY;
127	trace.func = func;
128	trace.depth = ++current->curr_ret_depth;
129
130	if (ftrace_push_return_trace(ret, func, frame_pointer, retp))
131		goto out;
132
133	/* Only trace if the calling function expects to */
134	if (!ftrace_graph_entry(&trace))
135		goto out_ret;
136
137	return 0;
138 out_ret:
139	current->curr_ret_stack--;
140 out:
141	current->curr_ret_depth--;
142	return -EBUSY;
143}
144
145/* Retrieve a function return address to the trace stack on thread info.*/
146static void
147ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
148			unsigned long frame_pointer)
149{
150	int index;
151
152	index = current->curr_ret_stack;
153
154	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
155		ftrace_graph_stop();
156		WARN_ON(1);
157		/* Might as well panic, otherwise we have no where to go */
158		*ret = (unsigned long)panic;
159		return;
160	}
161
162#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
163	/*
164	 * The arch may choose to record the frame pointer used
165	 * and check it here to make sure that it is what we expect it
166	 * to be. If gcc does not set the place holder of the return
167	 * address in the frame pointer, and does a copy instead, then
168	 * the function graph trace will fail. This test detects this
169	 * case.
170	 *
171	 * Currently, x86_32 with optimize for size (-Os) makes the latest
172	 * gcc do the above.
173	 *
174	 * Note, -mfentry does not use frame pointers, and this test
175	 *  is not needed if CC_USING_FENTRY is set.
176	 */
177	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
178		ftrace_graph_stop();
179		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
180		     "  from func %ps return to %lx\n",
181		     current->ret_stack[index].fp,
182		     frame_pointer,
183		     (void *)current->ret_stack[index].func,
184		     current->ret_stack[index].ret);
185		*ret = (unsigned long)panic;
186		return;
187	}
188#endif
189
190	*ret = current->ret_stack[index].ret;
191	trace->func = current->ret_stack[index].func;
192	trace->calltime = current->ret_stack[index].calltime;
193	trace->overrun = atomic_read(&current->trace_overrun);
194	trace->depth = current->curr_ret_depth--;
195	/*
196	 * We still want to trace interrupts coming in if
197	 * max_depth is set to 1. Make sure the decrement is
198	 * seen before ftrace_graph_return.
199	 */
200	barrier();
201}
202
203/*
204 * Hibernation protection.
205 * The state of the current task is too much unstable during
206 * suspend/restore to disk. We want to protect against that.
207 */
208static int
209ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
210							void *unused)
211{
212	switch (state) {
213	case PM_HIBERNATION_PREPARE:
214		pause_graph_tracing();
215		break;
216
217	case PM_POST_HIBERNATION:
218		unpause_graph_tracing();
219		break;
220	}
221	return NOTIFY_DONE;
222}
223
224static struct notifier_block ftrace_suspend_notifier = {
225	.notifier_call = ftrace_suspend_notifier_call,
226};
227
228/*
229 * Send the trace to the ring-buffer.
230 * @return the original return address.
231 */
232unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
233{
234	struct ftrace_graph_ret trace;
235	unsigned long ret;
236
237	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
238	trace.rettime = trace_clock_local();
239	ftrace_graph_return(&trace);
240	/*
241	 * The ftrace_graph_return() may still access the current
242	 * ret_stack structure, we need to make sure the update of
243	 * curr_ret_stack is after that.
244	 */
245	barrier();
246	current->curr_ret_stack--;
247
248	if (unlikely(!ret)) {
249		ftrace_graph_stop();
250		WARN_ON(1);
251		/* Might as well panic. What else to do? */
252		ret = (unsigned long)panic;
253	}
254
255	return ret;
256}
257
258/**
259 * ftrace_graph_get_ret_stack - return the entry of the shadow stack
260 * @task: The task to read the shadow stack from
261 * @idx: Index down the shadow stack
262 *
263 * Return the ret_struct on the shadow stack of the @task at the
264 * call graph at @idx starting with zero. If @idx is zero, it
265 * will return the last saved ret_stack entry. If it is greater than
266 * zero, it will return the corresponding ret_stack for the depth
267 * of saved return addresses.
268 */
269struct ftrace_ret_stack *
270ftrace_graph_get_ret_stack(struct task_struct *task, int idx)
271{
272	idx = task->curr_ret_stack - idx;
273
274	if (idx >= 0 && idx <= task->curr_ret_stack)
275		return &task->ret_stack[idx];
276
277	return NULL;
278}
279
280/**
281 * ftrace_graph_ret_addr - convert a potentially modified stack return address
282 *			   to its original value
283 *
284 * This function can be called by stack unwinding code to convert a found stack
285 * return address ('ret') to its original value, in case the function graph
286 * tracer has modified it to be 'return_to_handler'.  If the address hasn't
287 * been modified, the unchanged value of 'ret' is returned.
288 *
289 * 'idx' is a state variable which should be initialized by the caller to zero
290 * before the first call.
291 *
292 * 'retp' is a pointer to the return address on the stack.  It's ignored if
293 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
294 */
295#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
296unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
297				    unsigned long ret, unsigned long *retp)
298{
299	int index = task->curr_ret_stack;
300	int i;
301
302	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
303		return ret;
304
305	if (index < 0)
306		return ret;
307
308	for (i = 0; i <= index; i++)
309		if (task->ret_stack[i].retp == retp)
310			return task->ret_stack[i].ret;
311
312	return ret;
313}
314#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
315unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
316				    unsigned long ret, unsigned long *retp)
317{
318	int task_idx;
319
320	if (ret != (unsigned long)dereference_kernel_function_descriptor(return_to_handler))
321		return ret;
322
323	task_idx = task->curr_ret_stack;
324
325	if (!task->ret_stack || task_idx < *idx)
326		return ret;
327
328	task_idx -= *idx;
329	(*idx)++;
330
331	return task->ret_stack[task_idx].ret;
332}
333#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
334
335static struct ftrace_ops graph_ops = {
336	.func			= ftrace_stub,
337	.flags			= FTRACE_OPS_FL_INITIALIZED |
338				   FTRACE_OPS_FL_PID |
339				   FTRACE_OPS_FL_STUB,
340#ifdef FTRACE_GRAPH_TRAMP_ADDR
341	.trampoline		= FTRACE_GRAPH_TRAMP_ADDR,
342	/* trampoline_size is only needed for dynamically allocated tramps */
343#endif
344	ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
345};
346
347void ftrace_graph_sleep_time_control(bool enable)
348{
349	fgraph_sleep_time = enable;
350}
351
352int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
353{
354	return 0;
355}
356
357/*
358 * Simply points to ftrace_stub, but with the proper protocol.
359 * Defined by the linker script in linux/vmlinux.lds.h
360 */
361extern void ftrace_stub_graph(struct ftrace_graph_ret *);
362
363/* The callbacks that hook a function */
364trace_func_graph_ret_t ftrace_graph_return = ftrace_stub_graph;
365trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
366static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
367
368/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
369static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
370{
371	int i;
372	int ret = 0;
373	int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
374	struct task_struct *g, *t;
375
376	for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
377		ret_stack_list[i] =
378			kmalloc_array(FTRACE_RETFUNC_DEPTH,
379				      sizeof(struct ftrace_ret_stack),
380				      GFP_KERNEL);
381		if (!ret_stack_list[i]) {
382			start = 0;
383			end = i;
384			ret = -ENOMEM;
385			goto free;
386		}
387	}
388
389	rcu_read_lock();
390	for_each_process_thread(g, t) {
391		if (start == end) {
392			ret = -EAGAIN;
393			goto unlock;
394		}
395
396		if (t->ret_stack == NULL) {
397			atomic_set(&t->trace_overrun, 0);
398			t->curr_ret_stack = -1;
399			t->curr_ret_depth = -1;
400			/* Make sure the tasks see the -1 first: */
401			smp_wmb();
402			t->ret_stack = ret_stack_list[start++];
403		}
404	}
405
406unlock:
407	rcu_read_unlock();
408free:
409	for (i = start; i < end; i++)
410		kfree(ret_stack_list[i]);
411	return ret;
412}
413
414static void
415ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
416			struct task_struct *prev, struct task_struct *next)
417{
418	unsigned long long timestamp;
419	int index;
420
421	/*
422	 * Does the user want to count the time a function was asleep.
423	 * If so, do not update the time stamps.
424	 */
425	if (fgraph_sleep_time)
426		return;
427
428	timestamp = trace_clock_local();
429
430	prev->ftrace_timestamp = timestamp;
431
432	/* only process tasks that we timestamped */
433	if (!next->ftrace_timestamp)
434		return;
435
436	/*
437	 * Update all the counters in next to make up for the
438	 * time next was sleeping.
439	 */
440	timestamp -= next->ftrace_timestamp;
441
442	for (index = next->curr_ret_stack; index >= 0; index--)
443		next->ret_stack[index].calltime += timestamp;
444}
445
446static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
447{
448	if (!ftrace_ops_test(&global_ops, trace->func, NULL))
449		return 0;
450	return __ftrace_graph_entry(trace);
451}
452
453/*
454 * The function graph tracer should only trace the functions defined
455 * by set_ftrace_filter and set_ftrace_notrace. If another function
456 * tracer ops is registered, the graph tracer requires testing the
457 * function against the global ops, and not just trace any function
458 * that any ftrace_ops registered.
459 */
460void update_function_graph_func(void)
461{
462	struct ftrace_ops *op;
463	bool do_test = false;
464
465	/*
466	 * The graph and global ops share the same set of functions
467	 * to test. If any other ops is on the list, then
468	 * the graph tracing needs to test if its the function
469	 * it should call.
470	 */
471	do_for_each_ftrace_op(op, ftrace_ops_list) {
472		if (op != &global_ops && op != &graph_ops &&
473		    op != &ftrace_list_end) {
474			do_test = true;
475			/* in double loop, break out with goto */
476			goto out;
477		}
478	} while_for_each_ftrace_op(op);
479 out:
480	if (do_test)
481		ftrace_graph_entry = ftrace_graph_entry_test;
482	else
483		ftrace_graph_entry = __ftrace_graph_entry;
484}
485
486static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
487
488static void
489graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
490{
491	atomic_set(&t->trace_overrun, 0);
492	t->ftrace_timestamp = 0;
493	/* make curr_ret_stack visible before we add the ret_stack */
494	smp_wmb();
495	t->ret_stack = ret_stack;
496}
497
498/*
499 * Allocate a return stack for the idle task. May be the first
500 * time through, or it may be done by CPU hotplug online.
501 */
502void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
503{
504	t->curr_ret_stack = -1;
505	t->curr_ret_depth = -1;
506	/*
507	 * The idle task has no parent, it either has its own
508	 * stack or no stack at all.
509	 */
510	if (t->ret_stack)
511		WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
512
513	if (ftrace_graph_active) {
514		struct ftrace_ret_stack *ret_stack;
515
516		ret_stack = per_cpu(idle_ret_stack, cpu);
517		if (!ret_stack) {
518			ret_stack =
519				kmalloc_array(FTRACE_RETFUNC_DEPTH,
520					      sizeof(struct ftrace_ret_stack),
521					      GFP_KERNEL);
522			if (!ret_stack)
523				return;
524			per_cpu(idle_ret_stack, cpu) = ret_stack;
525		}
526		graph_init_task(t, ret_stack);
527	}
528}
529
530/* Allocate a return stack for newly created task */
531void ftrace_graph_init_task(struct task_struct *t)
532{
533	/* Make sure we do not use the parent ret_stack */
534	t->ret_stack = NULL;
535	t->curr_ret_stack = -1;
536	t->curr_ret_depth = -1;
537
538	if (ftrace_graph_active) {
539		struct ftrace_ret_stack *ret_stack;
540
541		ret_stack = kmalloc_array(FTRACE_RETFUNC_DEPTH,
542					  sizeof(struct ftrace_ret_stack),
543					  GFP_KERNEL);
544		if (!ret_stack)
545			return;
546		graph_init_task(t, ret_stack);
547	}
548}
549
550void ftrace_graph_exit_task(struct task_struct *t)
551{
552	struct ftrace_ret_stack	*ret_stack = t->ret_stack;
553
554	t->ret_stack = NULL;
555	/* NULL must become visible to IRQs before we free it: */
556	barrier();
557
558	kfree(ret_stack);
559}
560
561/* Allocate a return stack for each task */
562static int start_graph_tracing(void)
563{
564	struct ftrace_ret_stack **ret_stack_list;
565	int ret, cpu;
566
567	ret_stack_list = kmalloc_array(FTRACE_RETSTACK_ALLOC_SIZE,
568				       sizeof(struct ftrace_ret_stack *),
569				       GFP_KERNEL);
570
571	if (!ret_stack_list)
572		return -ENOMEM;
573
574	/* The cpu_boot init_task->ret_stack will never be freed */
575	for_each_online_cpu(cpu) {
576		if (!idle_task(cpu)->ret_stack)
577			ftrace_graph_init_idle_task(idle_task(cpu), cpu);
578	}
579
580	do {
581		ret = alloc_retstack_tasklist(ret_stack_list);
582	} while (ret == -EAGAIN);
583
584	if (!ret) {
585		ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
586		if (ret)
587			pr_info("ftrace_graph: Couldn't activate tracepoint"
588				" probe to kernel_sched_switch\n");
589	}
590
591	kfree(ret_stack_list);
592	return ret;
593}
594
595int register_ftrace_graph(struct fgraph_ops *gops)
596{
597	int ret = 0;
598
599	mutex_lock(&ftrace_lock);
600
601	/* we currently allow only one tracer registered at a time */
602	if (ftrace_graph_active) {
603		ret = -EBUSY;
604		goto out;
605	}
606
607	register_pm_notifier(&ftrace_suspend_notifier);
608
609	ftrace_graph_active++;
610	ret = start_graph_tracing();
611	if (ret) {
612		ftrace_graph_active--;
613		goto out;
614	}
615
616	ftrace_graph_return = gops->retfunc;
617
618	/*
619	 * Update the indirect function to the entryfunc, and the
620	 * function that gets called to the entry_test first. Then
621	 * call the update fgraph entry function to determine if
622	 * the entryfunc should be called directly or not.
623	 */
624	__ftrace_graph_entry = gops->entryfunc;
625	ftrace_graph_entry = ftrace_graph_entry_test;
626	update_function_graph_func();
627
628	ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
629out:
630	mutex_unlock(&ftrace_lock);
631	return ret;
632}
633
634void unregister_ftrace_graph(struct fgraph_ops *gops)
635{
636	mutex_lock(&ftrace_lock);
637
638	if (unlikely(!ftrace_graph_active))
639		goto out;
640
641	ftrace_graph_active--;
642	ftrace_graph_return = ftrace_stub_graph;
643	ftrace_graph_entry = ftrace_graph_entry_stub;
644	__ftrace_graph_entry = ftrace_graph_entry_stub;
645	ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
646	unregister_pm_notifier(&ftrace_suspend_notifier);
647	unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
648
649 out:
650	mutex_unlock(&ftrace_lock);
651}