Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v4.6
 
   1/*
   2 *
   3 * Function graph tracer.
   4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   5 * Mostly borrowed from function tracer which
   6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
   7 *
   8 */
   9#include <linux/uaccess.h>
  10#include <linux/ftrace.h>
  11#include <linux/interrupt.h>
  12#include <linux/slab.h>
  13#include <linux/fs.h>
  14
  15#include "trace.h"
  16#include "trace_output.h"
  17
  18static bool kill_ftrace_graph;
  19
  20/**
  21 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  22 *
  23 * ftrace_graph_stop() is called when a severe error is detected in
  24 * the function graph tracing. This function is called by the critical
  25 * paths of function graph to keep those paths from doing any more harm.
  26 */
  27bool ftrace_graph_is_dead(void)
  28{
  29	return kill_ftrace_graph;
  30}
  31
  32/**
  33 * ftrace_graph_stop - set to permanently disable function graph tracincg
  34 *
  35 * In case of an error int function graph tracing, this is called
  36 * to try to keep function graph tracing from causing any more harm.
  37 * Usually this is pretty severe and this is called to try to at least
  38 * get a warning out to the user.
  39 */
  40void ftrace_graph_stop(void)
  41{
  42	kill_ftrace_graph = true;
  43}
  44
  45/* When set, irq functions will be ignored */
  46static int ftrace_graph_skip_irqs;
  47
  48struct fgraph_cpu_data {
  49	pid_t		last_pid;
  50	int		depth;
  51	int		depth_irq;
  52	int		ignore;
  53	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
  54};
  55
  56struct fgraph_data {
  57	struct fgraph_cpu_data __percpu *cpu_data;
  58
  59	/* Place to preserve last processed entry. */
  60	struct ftrace_graph_ent_entry	ent;
 
 
 
  61	struct ftrace_graph_ret_entry	ret;
  62	int				failed;
  63	int				cpu;
  64};
  65
  66#define TRACE_GRAPH_INDENT	2
  67
  68static unsigned int max_depth;
  69
  70static struct tracer_opt trace_opts[] = {
  71	/* Display overruns? (for self-debug purpose) */
  72	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  73	/* Display CPU ? */
  74	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  75	/* Display Overhead ? */
  76	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  77	/* Display proc name/pid */
  78	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  79	/* Display duration of execution */
  80	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  81	/* Display absolute time of an entry */
  82	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  83	/* Display interrupts */
  84	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  85	/* Display function name after trailing } */
  86	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
 
 
 
 
 
 
 
 
 
 
  87	/* Include sleep time (scheduled out) between entry and return */
  88	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
 
 
  89	/* Include time within nested functions */
  90	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
 
 
  91	{ } /* Empty entry */
  92};
  93
  94static struct tracer_flags tracer_flags = {
  95	/* Don't display overruns, proc, or tail by default */
  96	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  97	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  98	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  99	.opts = trace_opts
 100};
 101
 102static struct trace_array *graph_array;
 
 
 
 103
 104/*
 105 * DURATION column is being also used to display IRQ signs,
 106 * following values are used by print_graph_irq and others
 107 * to fill in space into DURATION column.
 108 */
 109enum {
 110	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 111	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 112	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 113};
 114
 115static void
 116print_graph_duration(struct trace_array *tr, unsigned long long duration,
 117		     struct trace_seq *s, u32 flags);
 118
 119/* Add a function return address to the trace stack on thread info.*/
 120int
 121ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
 122			 unsigned long frame_pointer)
 123{
 124	unsigned long long calltime;
 125	int index;
 126
 127	if (unlikely(ftrace_graph_is_dead()))
 128		return -EBUSY;
 129
 130	if (!current->ret_stack)
 131		return -EBUSY;
 132
 133	/*
 134	 * We must make sure the ret_stack is tested before we read
 135	 * anything else.
 136	 */
 137	smp_rmb();
 138
 139	/* The return trace stack is full */
 140	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
 141		atomic_inc(&current->trace_overrun);
 142		return -EBUSY;
 143	}
 144
 145	/*
 146	 * The curr_ret_stack is an index to ftrace return stack of
 147	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
 148	 * DEPTH) when the function graph tracer is used.  To support
 149	 * filtering out specific functions, it makes the index
 150	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
 151	 * so when it sees a negative index the ftrace will ignore
 152	 * the record.  And the index gets recovered when returning
 153	 * from the filtered function by adding the FTRACE_NOTRACE_
 154	 * DEPTH and then it'll continue to record functions normally.
 155	 *
 156	 * The curr_ret_stack is initialized to -1 and get increased
 157	 * in this function.  So it can be less than -1 only if it was
 158	 * filtered out via ftrace_graph_notrace_addr() which can be
 159	 * set from set_graph_notrace file in tracefs by user.
 160	 */
 161	if (current->curr_ret_stack < -1)
 162		return -EBUSY;
 163
 164	calltime = trace_clock_local();
 165
 166	index = ++current->curr_ret_stack;
 167	if (ftrace_graph_notrace_addr(func))
 168		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
 169	barrier();
 170	current->ret_stack[index].ret = ret;
 171	current->ret_stack[index].func = func;
 172	current->ret_stack[index].calltime = calltime;
 173	current->ret_stack[index].subtime = 0;
 174	current->ret_stack[index].fp = frame_pointer;
 175	*depth = current->curr_ret_stack;
 176
 177	return 0;
 178}
 179
 180/* Retrieve a function return address to the trace stack on thread info.*/
 181static void
 182ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
 183			unsigned long frame_pointer)
 184{
 185	int index;
 186
 187	index = current->curr_ret_stack;
 188
 189	/*
 190	 * A negative index here means that it's just returned from a
 191	 * notrace'd function.  Recover index to get an original
 192	 * return address.  See ftrace_push_return_trace().
 193	 *
 194	 * TODO: Need to check whether the stack gets corrupted.
 195	 */
 196	if (index < 0)
 197		index += FTRACE_NOTRACE_DEPTH;
 198
 199	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
 200		ftrace_graph_stop();
 201		WARN_ON(1);
 202		/* Might as well panic, otherwise we have no where to go */
 203		*ret = (unsigned long)panic;
 204		return;
 205	}
 206
 207#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
 208	/*
 209	 * The arch may choose to record the frame pointer used
 210	 * and check it here to make sure that it is what we expect it
 211	 * to be. If gcc does not set the place holder of the return
 212	 * address in the frame pointer, and does a copy instead, then
 213	 * the function graph trace will fail. This test detects this
 214	 * case.
 215	 *
 216	 * Currently, x86_32 with optimize for size (-Os) makes the latest
 217	 * gcc do the above.
 218	 *
 219	 * Note, -mfentry does not use frame pointers, and this test
 220	 *  is not needed if CC_USING_FENTRY is set.
 221	 */
 222	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
 223		ftrace_graph_stop();
 224		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
 225		     "  from func %ps return to %lx\n",
 226		     current->ret_stack[index].fp,
 227		     frame_pointer,
 228		     (void *)current->ret_stack[index].func,
 229		     current->ret_stack[index].ret);
 230		*ret = (unsigned long)panic;
 231		return;
 232	}
 233#endif
 234
 235	*ret = current->ret_stack[index].ret;
 236	trace->func = current->ret_stack[index].func;
 237	trace->calltime = current->ret_stack[index].calltime;
 238	trace->overrun = atomic_read(&current->trace_overrun);
 239	trace->depth = index;
 240}
 241
 242/*
 243 * Send the trace to the ring-buffer.
 244 * @return the original return address.
 245 */
 246unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
 247{
 248	struct ftrace_graph_ret trace;
 249	unsigned long ret;
 250
 251	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
 252	trace.rettime = trace_clock_local();
 253	barrier();
 254	current->curr_ret_stack--;
 255	/*
 256	 * The curr_ret_stack can be less than -1 only if it was
 257	 * filtered out and it's about to return from the function.
 258	 * Recover the index and continue to trace normal functions.
 259	 */
 260	if (current->curr_ret_stack < -1) {
 261		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
 262		return ret;
 263	}
 264
 265	/*
 266	 * The trace should run after decrementing the ret counter
 267	 * in case an interrupt were to come in. We don't want to
 268	 * lose the interrupt if max_depth is set.
 269	 */
 270	ftrace_graph_return(&trace);
 271
 272	if (unlikely(!ret)) {
 273		ftrace_graph_stop();
 274		WARN_ON(1);
 275		/* Might as well panic. What else to do? */
 276		ret = (unsigned long)panic;
 277	}
 
 278
 279	return ret;
 280}
 281
 282int __trace_graph_entry(struct trace_array *tr,
 
 283				struct ftrace_graph_ent *trace,
 284				unsigned long flags,
 285				int pc)
 286{
 287	struct trace_event_call *call = &event_funcgraph_entry;
 288	struct ring_buffer_event *event;
 289	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 290	struct ftrace_graph_ent_entry *entry;
 291
 292	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
 293					  sizeof(*entry), flags, pc);
 294	if (!event)
 295		return 0;
 296	entry	= ring_buffer_event_data(event);
 297	entry->graph_ent			= *trace;
 298	if (!call_filter_check_discard(call, entry, buffer, event))
 299		__buffer_unlock_commit(buffer, event);
 
 300
 301	return 1;
 302}
 
 
 
 
 
 
 
 
 
 303
 304static inline int ftrace_graph_ignore_irqs(void)
 305{
 306	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
 307		return 0;
 308
 309	return in_irq();
 310}
 311
 312int trace_graph_entry(struct ftrace_graph_ent *trace)
 
 
 
 
 
 
 313{
 314	struct trace_array *tr = graph_array;
 
 315	struct trace_array_cpu *data;
 
 316	unsigned long flags;
 
 317	long disabled;
 318	int ret;
 319	int cpu;
 320	int pc;
 321
 322	if (!ftrace_trace_task(current))
 323		return 0;
 324
 325	/* trace it when it is-nested-in or is a function enabled. */
 326	if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
 327	     ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
 328	    (max_depth && trace->depth >= max_depth))
 329		return 0;
 330
 331	/*
 332	 * Do not trace a function if it's filtered by set_graph_notrace.
 333	 * Make the index of ret stack negative to indicate that it should
 334	 * ignore further functions.  But it needs its own ret stack entry
 335	 * to recover the original index in order to continue tracing after
 336	 * returning from the function.
 337	 */
 338	if (ftrace_graph_notrace_addr(trace->func))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 339		return 1;
 340
 341	local_irq_save(flags);
 342	cpu = raw_smp_processor_id();
 343	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 344	disabled = atomic_inc_return(&data->disabled);
 345	if (likely(disabled == 1)) {
 346		pc = preempt_count();
 347		ret = __trace_graph_entry(tr, trace, flags, pc);
 
 
 
 
 
 
 348	} else {
 349		ret = 0;
 350	}
 351
 352	atomic_dec(&data->disabled);
 353	local_irq_restore(flags);
 354
 355	return ret;
 356}
 357
 358static int trace_graph_thresh_entry(struct ftrace_graph_ent *trace)
 359{
 360	if (tracing_thresh)
 361		return 1;
 362	else
 363		return trace_graph_entry(trace);
 364}
 365
 366static void
 367__trace_graph_function(struct trace_array *tr,
 368		unsigned long ip, unsigned long flags, int pc)
 369{
 370	u64 time = trace_clock_local();
 371	struct ftrace_graph_ent ent = {
 372		.func  = ip,
 373		.depth = 0,
 374	};
 375	struct ftrace_graph_ret ret = {
 376		.func     = ip,
 377		.depth    = 0,
 378		.calltime = time,
 379		.rettime  = time,
 380	};
 381
 382	__trace_graph_entry(tr, &ent, flags, pc);
 383	__trace_graph_return(tr, &ret, flags, pc);
 384}
 385
 386void
 387trace_graph_function(struct trace_array *tr,
 388		unsigned long ip, unsigned long parent_ip,
 389		unsigned long flags, int pc)
 390{
 391	__trace_graph_function(tr, ip, flags, pc);
 392}
 393
 394void __trace_graph_return(struct trace_array *tr,
 395				struct ftrace_graph_ret *trace,
 396				unsigned long flags,
 397				int pc)
 398{
 399	struct trace_event_call *call = &event_funcgraph_exit;
 400	struct ring_buffer_event *event;
 401	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 402	struct ftrace_graph_ret_entry *entry;
 403
 404	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
 405					  sizeof(*entry), flags, pc);
 406	if (!event)
 407		return;
 408	entry	= ring_buffer_event_data(event);
 409	entry->ret				= *trace;
 410	if (!call_filter_check_discard(call, entry, buffer, event))
 411		__buffer_unlock_commit(buffer, event);
 
 
 
 
 
 
 
 
 
 412}
 413
 414void trace_graph_return(struct ftrace_graph_ret *trace)
 
 415{
 416	struct trace_array *tr = graph_array;
 
 417	struct trace_array_cpu *data;
 
 418	unsigned long flags;
 
 419	long disabled;
 
 420	int cpu;
 421	int pc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 422
 423	local_irq_save(flags);
 424	cpu = raw_smp_processor_id();
 425	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 426	disabled = atomic_inc_return(&data->disabled);
 427	if (likely(disabled == 1)) {
 428		pc = preempt_count();
 429		__trace_graph_return(tr, trace, flags, pc);
 430	}
 431	atomic_dec(&data->disabled);
 432	local_irq_restore(flags);
 433}
 434
 435void set_graph_array(struct trace_array *tr)
 
 436{
 437	graph_array = tr;
 
 438
 439	/* Make graph_array visible before we start tracing */
 440
 441	smp_mb();
 442}
 
 
 
 
 
 
 
 
 
 
 443
 444static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 445{
 446	if (tracing_thresh &&
 447	    (trace->rettime - trace->calltime < tracing_thresh))
 448		return;
 449	else
 450		trace_graph_return(trace);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 451}
 452
 453static int graph_trace_init(struct trace_array *tr)
 454{
 455	int ret;
 456
 457	set_graph_array(tr);
 
 458	if (tracing_thresh)
 459		ret = register_ftrace_graph(&trace_graph_thresh_return,
 460					    &trace_graph_thresh_entry);
 461	else
 462		ret = register_ftrace_graph(&trace_graph_return,
 463					    &trace_graph_entry);
 
 
 
 
 464	if (ret)
 465		return ret;
 466	tracing_start_cmdline_record();
 467
 468	return 0;
 469}
 470
 471static void graph_trace_reset(struct trace_array *tr)
 472{
 473	tracing_stop_cmdline_record();
 474	unregister_ftrace_graph();
 475}
 476
 477static int graph_trace_update_thresh(struct trace_array *tr)
 478{
 479	graph_trace_reset(tr);
 480	return graph_trace_init(tr);
 481}
 482
 483static int max_bytes_for_cpu;
 484
 485static void print_graph_cpu(struct trace_seq *s, int cpu)
 486{
 487	/*
 488	 * Start with a space character - to make it stand out
 489	 * to the right a bit when trace output is pasted into
 490	 * email:
 491	 */
 492	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
 493}
 494
 495#define TRACE_GRAPH_PROCINFO_LENGTH	14
 496
 497static void print_graph_proc(struct trace_seq *s, pid_t pid)
 498{
 499	char comm[TASK_COMM_LEN];
 500	/* sign + log10(MAX_INT) + '\0' */
 501	char pid_str[11];
 502	int spaces = 0;
 503	int len;
 504	int i;
 505
 506	trace_find_cmdline(pid, comm);
 507	comm[7] = '\0';
 508	sprintf(pid_str, "%d", pid);
 509
 510	/* 1 stands for the "-" character */
 511	len = strlen(comm) + strlen(pid_str) + 1;
 512
 513	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
 514		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
 515
 516	/* First spaces to align center */
 517	for (i = 0; i < spaces / 2; i++)
 518		trace_seq_putc(s, ' ');
 519
 520	trace_seq_printf(s, "%s-%s", comm, pid_str);
 521
 522	/* Last spaces to align center */
 523	for (i = 0; i < spaces - (spaces / 2); i++)
 524		trace_seq_putc(s, ' ');
 525}
 526
 527
 528static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
 529{
 530	trace_seq_putc(s, ' ');
 531	trace_print_lat_fmt(s, entry);
 
 532}
 533
 534/* If the pid changed since the last trace, output this event */
 535static void
 536verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
 537{
 538	pid_t prev_pid;
 539	pid_t *last_pid;
 540
 541	if (!data)
 542		return;
 543
 544	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
 545
 546	if (*last_pid == pid)
 547		return;
 548
 549	prev_pid = *last_pid;
 550	*last_pid = pid;
 551
 552	if (prev_pid == -1)
 553		return;
 554/*
 555 * Context-switch trace line:
 556
 557 ------------------------------------------
 558 | 1)  migration/0--1  =>  sshd-1755
 559 ------------------------------------------
 560
 561 */
 562	trace_seq_puts(s, " ------------------------------------------\n");
 563	print_graph_cpu(s, cpu);
 564	print_graph_proc(s, prev_pid);
 565	trace_seq_puts(s, " => ");
 566	print_graph_proc(s, pid);
 567	trace_seq_puts(s, "\n ------------------------------------------\n\n");
 568}
 569
 570static struct ftrace_graph_ret_entry *
 571get_return_for_leaf(struct trace_iterator *iter,
 572		struct ftrace_graph_ent_entry *curr)
 573{
 574	struct fgraph_data *data = iter->private;
 575	struct ring_buffer_iter *ring_iter = NULL;
 576	struct ring_buffer_event *event;
 577	struct ftrace_graph_ret_entry *next;
 578
 579	/*
 580	 * If the previous output failed to write to the seq buffer,
 581	 * then we just reuse the data from before.
 582	 */
 583	if (data && data->failed) {
 584		curr = &data->ent;
 585		next = &data->ret;
 586	} else {
 587
 588		ring_iter = trace_buffer_iter(iter, iter->cpu);
 589
 590		/* First peek to compare current entry and the next one */
 591		if (ring_iter)
 592			event = ring_buffer_iter_peek(ring_iter, NULL);
 593		else {
 594			/*
 595			 * We need to consume the current entry to see
 596			 * the next one.
 597			 */
 598			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
 599					    NULL, NULL);
 600			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
 601						 NULL, NULL);
 602		}
 603
 604		if (!event)
 605			return NULL;
 606
 607		next = ring_buffer_event_data(event);
 608
 609		if (data) {
 610			/*
 611			 * Save current and next entries for later reference
 612			 * if the output fails.
 613			 */
 614			data->ent = *curr;
 
 
 
 615			/*
 616			 * If the next event is not a return type, then
 617			 * we only care about what type it is. Otherwise we can
 618			 * safely copy the entire event.
 619			 */
 620			if (next->ent.type == TRACE_GRAPH_RET)
 621				data->ret = *next;
 622			else
 623				data->ret.ent.type = next->ent.type;
 624		}
 625	}
 626
 627	if (next->ent.type != TRACE_GRAPH_RET)
 628		return NULL;
 629
 630	if (curr->ent.pid != next->ent.pid ||
 631			curr->graph_ent.func != next->ret.func)
 632		return NULL;
 633
 634	/* this is a leaf, now advance the iterator */
 635	if (ring_iter)
 636		ring_buffer_read(ring_iter, NULL);
 637
 638	return next;
 639}
 640
 641static void print_graph_abs_time(u64 t, struct trace_seq *s)
 642{
 643	unsigned long usecs_rem;
 644
 645	usecs_rem = do_div(t, NSEC_PER_SEC);
 646	usecs_rem /= 1000;
 647
 648	trace_seq_printf(s, "%5lu.%06lu |  ",
 649			 (unsigned long)t, usecs_rem);
 650}
 651
 652static void
 
 
 
 
 
 
 
 
 
 
 
 653print_graph_irq(struct trace_iterator *iter, unsigned long addr,
 654		enum trace_type type, int cpu, pid_t pid, u32 flags)
 655{
 656	struct trace_array *tr = iter->tr;
 657	struct trace_seq *s = &iter->seq;
 658	struct trace_entry *ent = iter->ent;
 659
 
 
 660	if (addr < (unsigned long)__irqentry_text_start ||
 661		addr >= (unsigned long)__irqentry_text_end)
 662		return;
 663
 664	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 665		/* Absolute time */
 666		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 667			print_graph_abs_time(iter->ts, s);
 668
 
 
 
 
 669		/* Cpu */
 670		if (flags & TRACE_GRAPH_PRINT_CPU)
 671			print_graph_cpu(s, cpu);
 672
 673		/* Proc */
 674		if (flags & TRACE_GRAPH_PRINT_PROC) {
 675			print_graph_proc(s, pid);
 676			trace_seq_puts(s, " | ");
 677		}
 678
 679		/* Latency format */
 680		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 681			print_graph_lat_fmt(s, ent);
 682	}
 683
 684	/* No overhead */
 685	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
 686
 687	if (type == TRACE_GRAPH_ENT)
 688		trace_seq_puts(s, "==========>");
 689	else
 690		trace_seq_puts(s, "<==========");
 691
 692	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
 693	trace_seq_putc(s, '\n');
 694}
 695
 696void
 697trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 698{
 699	unsigned long nsecs_rem = do_div(duration, 1000);
 700	/* log10(ULONG_MAX) + '\0' */
 701	char usecs_str[21];
 702	char nsecs_str[5];
 703	int len;
 704	int i;
 705
 706	sprintf(usecs_str, "%lu", (unsigned long) duration);
 707
 708	/* Print msecs */
 709	trace_seq_printf(s, "%s", usecs_str);
 710
 711	len = strlen(usecs_str);
 712
 713	/* Print nsecs (we don't want to exceed 7 numbers) */
 714	if (len < 7) {
 715		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
 716
 717		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
 718		trace_seq_printf(s, ".%s", nsecs_str);
 719		len += strlen(nsecs_str) + 1;
 720	}
 721
 722	trace_seq_puts(s, " us ");
 723
 724	/* Print remaining spaces to fit the row's width */
 725	for (i = len; i < 8; i++)
 726		trace_seq_putc(s, ' ');
 727}
 728
 729static void
 730print_graph_duration(struct trace_array *tr, unsigned long long duration,
 731		     struct trace_seq *s, u32 flags)
 732{
 733	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
 734	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 735		return;
 736
 737	/* No real adata, just filling the column with spaces */
 738	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
 739	case FLAGS_FILL_FULL:
 740		trace_seq_puts(s, "              |  ");
 741		return;
 742	case FLAGS_FILL_START:
 743		trace_seq_puts(s, "  ");
 744		return;
 745	case FLAGS_FILL_END:
 746		trace_seq_puts(s, " |");
 747		return;
 748	}
 749
 750	/* Signal a overhead of time execution to the output */
 751	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
 752		trace_seq_printf(s, "%c ", trace_find_mark(duration));
 753	else
 754		trace_seq_puts(s, "  ");
 755
 756	trace_print_graph_duration(duration, s);
 757	trace_seq_puts(s, "|  ");
 758}
 759
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 760/* Case of a leaf function on its call entry */
 761static enum print_line_t
 762print_graph_entry_leaf(struct trace_iterator *iter,
 763		struct ftrace_graph_ent_entry *entry,
 764		struct ftrace_graph_ret_entry *ret_entry,
 765		struct trace_seq *s, u32 flags)
 766{
 767	struct fgraph_data *data = iter->private;
 768	struct trace_array *tr = iter->tr;
 769	struct ftrace_graph_ret *graph_ret;
 770	struct ftrace_graph_ent *call;
 771	unsigned long long duration;
 
 
 772	int i;
 773
 774	graph_ret = &ret_entry->ret;
 775	call = &entry->graph_ent;
 776	duration = graph_ret->rettime - graph_ret->calltime;
 777
 
 
 778	if (data) {
 779		struct fgraph_cpu_data *cpu_data;
 780		int cpu = iter->cpu;
 781
 782		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 783
 784		/*
 785		 * Comments display at + 1 to depth. Since
 786		 * this is a leaf function, keep the comments
 787		 * equal to this depth.
 788		 */
 789		cpu_data->depth = call->depth - 1;
 790
 791		/* No need to keep this function around for this depth */
 792		if (call->depth < FTRACE_RETFUNC_DEPTH)
 
 793			cpu_data->enter_funcs[call->depth] = 0;
 794	}
 795
 796	/* Overhead and duration */
 797	print_graph_duration(tr, duration, s, flags);
 798
 799	/* Function */
 800	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 801		trace_seq_putc(s, ' ');
 802
 803	trace_seq_printf(s, "%ps();\n", (void *)call->func);
 
 
 
 
 
 
 
 
 
 
 
 
 804
 805	return trace_handle_return(s);
 806}
 807
 808static enum print_line_t
 809print_graph_entry_nested(struct trace_iterator *iter,
 810			 struct ftrace_graph_ent_entry *entry,
 811			 struct trace_seq *s, int cpu, u32 flags)
 812{
 813	struct ftrace_graph_ent *call = &entry->graph_ent;
 814	struct fgraph_data *data = iter->private;
 815	struct trace_array *tr = iter->tr;
 
 816	int i;
 817
 818	if (data) {
 819		struct fgraph_cpu_data *cpu_data;
 820		int cpu = iter->cpu;
 821
 822		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 823		cpu_data->depth = call->depth;
 824
 825		/* Save this function pointer to see if the exit matches */
 826		if (call->depth < FTRACE_RETFUNC_DEPTH)
 
 827			cpu_data->enter_funcs[call->depth] = call->func;
 828	}
 829
 830	/* No time */
 831	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
 832
 833	/* Function */
 834	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 835		trace_seq_putc(s, ' ');
 836
 837	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
 
 
 
 
 
 
 
 838
 839	if (trace_seq_has_overflowed(s))
 840		return TRACE_TYPE_PARTIAL_LINE;
 841
 842	/*
 843	 * we already consumed the current entry to check the next one
 844	 * and see if this is a leaf.
 845	 */
 846	return TRACE_TYPE_NO_CONSUME;
 847}
 848
 849static void
 850print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
 851		     int type, unsigned long addr, u32 flags)
 852{
 853	struct fgraph_data *data = iter->private;
 854	struct trace_entry *ent = iter->ent;
 855	struct trace_array *tr = iter->tr;
 856	int cpu = iter->cpu;
 857
 858	/* Pid */
 859	verif_pid(s, ent->pid, cpu, data);
 860
 861	if (type)
 862		/* Interrupt */
 863		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
 864
 865	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 866		return;
 867
 868	/* Absolute time */
 869	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 870		print_graph_abs_time(iter->ts, s);
 871
 
 
 
 
 872	/* Cpu */
 873	if (flags & TRACE_GRAPH_PRINT_CPU)
 874		print_graph_cpu(s, cpu);
 875
 876	/* Proc */
 877	if (flags & TRACE_GRAPH_PRINT_PROC) {
 878		print_graph_proc(s, ent->pid);
 879		trace_seq_puts(s, " | ");
 880	}
 881
 882	/* Latency format */
 883	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 884		print_graph_lat_fmt(s, ent);
 885
 886	return;
 887}
 888
 889/*
 890 * Entry check for irq code
 891 *
 892 * returns 1 if
 893 *  - we are inside irq code
 894 *  - we just entered irq code
 895 *
 896 * retunns 0 if
 897 *  - funcgraph-interrupts option is set
 898 *  - we are not inside irq code
 899 */
 900static int
 901check_irq_entry(struct trace_iterator *iter, u32 flags,
 902		unsigned long addr, int depth)
 903{
 904	int cpu = iter->cpu;
 905	int *depth_irq;
 906	struct fgraph_data *data = iter->private;
 907
 
 
 908	/*
 909	 * If we are either displaying irqs, or we got called as
 910	 * a graph event and private data does not exist,
 911	 * then we bypass the irq check.
 912	 */
 913	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 914	    (!data))
 915		return 0;
 916
 917	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 918
 919	/*
 920	 * We are inside the irq code
 921	 */
 922	if (*depth_irq >= 0)
 923		return 1;
 924
 925	if ((addr < (unsigned long)__irqentry_text_start) ||
 926	    (addr >= (unsigned long)__irqentry_text_end))
 927		return 0;
 928
 929	/*
 930	 * We are entering irq code.
 931	 */
 932	*depth_irq = depth;
 933	return 1;
 934}
 935
 936/*
 937 * Return check for irq code
 938 *
 939 * returns 1 if
 940 *  - we are inside irq code
 941 *  - we just left irq code
 942 *
 943 * returns 0 if
 944 *  - funcgraph-interrupts option is set
 945 *  - we are not inside irq code
 946 */
 947static int
 948check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
 949{
 950	int cpu = iter->cpu;
 951	int *depth_irq;
 952	struct fgraph_data *data = iter->private;
 953
 954	/*
 955	 * If we are either displaying irqs, or we got called as
 956	 * a graph event and private data does not exist,
 957	 * then we bypass the irq check.
 958	 */
 959	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 960	    (!data))
 961		return 0;
 962
 963	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 964
 965	/*
 966	 * We are not inside the irq code.
 967	 */
 968	if (*depth_irq == -1)
 969		return 0;
 970
 971	/*
 972	 * We are inside the irq code, and this is returning entry.
 973	 * Let's not trace it and clear the entry depth, since
 974	 * we are out of irq code.
 975	 *
 976	 * This condition ensures that we 'leave the irq code' once
 977	 * we are out of the entry depth. Thus protecting us from
 978	 * the RETURN entry loss.
 979	 */
 980	if (*depth_irq >= depth) {
 981		*depth_irq = -1;
 982		return 1;
 983	}
 984
 985	/*
 986	 * We are inside the irq code, and this is not the entry.
 987	 */
 988	return 1;
 989}
 990
 991static enum print_line_t
 992print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
 993			struct trace_iterator *iter, u32 flags)
 994{
 995	struct fgraph_data *data = iter->private;
 996	struct ftrace_graph_ent *call = &field->graph_ent;
 997	struct ftrace_graph_ret_entry *leaf_ret;
 998	static enum print_line_t ret;
 999	int cpu = iter->cpu;
1000
1001	if (check_irq_entry(iter, flags, call->func, call->depth))
1002		return TRACE_TYPE_HANDLED;
1003
1004	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1005
1006	leaf_ret = get_return_for_leaf(iter, field);
1007	if (leaf_ret)
1008		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1009	else
1010		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1011
1012	if (data) {
1013		/*
1014		 * If we failed to write our output, then we need to make
1015		 * note of it. Because we already consumed our entry.
1016		 */
1017		if (s->full) {
1018			data->failed = 1;
1019			data->cpu = cpu;
1020		} else
1021			data->failed = 0;
1022	}
1023
1024	return ret;
1025}
1026
1027static enum print_line_t
1028print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1029		   struct trace_entry *ent, struct trace_iterator *iter,
1030		   u32 flags)
1031{
1032	unsigned long long duration = trace->rettime - trace->calltime;
1033	struct fgraph_data *data = iter->private;
1034	struct trace_array *tr = iter->tr;
 
1035	pid_t pid = ent->pid;
1036	int cpu = iter->cpu;
1037	int func_match = 1;
1038	int i;
1039
 
 
1040	if (check_irq_return(iter, flags, trace->depth))
1041		return TRACE_TYPE_HANDLED;
1042
1043	if (data) {
1044		struct fgraph_cpu_data *cpu_data;
1045		int cpu = iter->cpu;
1046
1047		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1048
1049		/*
1050		 * Comments display at + 1 to depth. This is the
1051		 * return from a function, we now want the comments
1052		 * to display at the same level of the bracket.
1053		 */
1054		cpu_data->depth = trace->depth - 1;
1055
1056		if (trace->depth < FTRACE_RETFUNC_DEPTH) {
 
1057			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1058				func_match = 0;
1059			cpu_data->enter_funcs[trace->depth] = 0;
1060		}
1061	}
1062
1063	print_graph_prologue(iter, s, 0, 0, flags);
1064
1065	/* Overhead and duration */
1066	print_graph_duration(tr, duration, s, flags);
1067
1068	/* Closing brace */
1069	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1070		trace_seq_putc(s, ' ');
1071
1072	/*
1073	 * If the return function does not have a matching entry,
1074	 * then the entry was lost. Instead of just printing
1075	 * the '}' and letting the user guess what function this
1076	 * belongs to, write out the function name. Always do
1077	 * that if the funcgraph-tail option is enabled.
1078	 */
1079	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1080		trace_seq_puts(s, "}\n");
1081	else
1082		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
 
 
 
 
 
 
 
 
 
 
 
1083
1084	/* Overrun */
1085	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1086		trace_seq_printf(s, " (Overruns: %lu)\n",
1087				 trace->overrun);
1088
1089	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1090			cpu, pid, flags);
1091
1092	return trace_handle_return(s);
1093}
1094
1095static enum print_line_t
1096print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1097		    struct trace_iterator *iter, u32 flags)
1098{
1099	struct trace_array *tr = iter->tr;
1100	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1101	struct fgraph_data *data = iter->private;
1102	struct trace_event *event;
1103	int depth = 0;
1104	int ret;
1105	int i;
1106
1107	if (data)
1108		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1109
1110	print_graph_prologue(iter, s, 0, 0, flags);
1111
1112	/* No time */
1113	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1114
1115	/* Indentation */
1116	if (depth > 0)
1117		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1118			trace_seq_putc(s, ' ');
1119
1120	/* The comment */
1121	trace_seq_puts(s, "/* ");
1122
1123	switch (iter->ent->type) {
 
 
 
 
 
1124	case TRACE_BPRINT:
1125		ret = trace_print_bprintk_msg_only(iter);
1126		if (ret != TRACE_TYPE_HANDLED)
1127			return ret;
1128		break;
1129	case TRACE_PRINT:
1130		ret = trace_print_printk_msg_only(iter);
1131		if (ret != TRACE_TYPE_HANDLED)
1132			return ret;
1133		break;
1134	default:
1135		event = ftrace_find_event(ent->type);
1136		if (!event)
1137			return TRACE_TYPE_UNHANDLED;
1138
1139		ret = event->funcs->trace(iter, sym_flags, event);
1140		if (ret != TRACE_TYPE_HANDLED)
1141			return ret;
1142	}
1143
1144	if (trace_seq_has_overflowed(s))
1145		goto out;
1146
1147	/* Strip ending newline */
1148	if (s->buffer[s->seq.len - 1] == '\n') {
1149		s->buffer[s->seq.len - 1] = '\0';
1150		s->seq.len--;
1151	}
1152
1153	trace_seq_puts(s, " */\n");
1154 out:
1155	return trace_handle_return(s);
1156}
1157
1158
1159enum print_line_t
1160print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1161{
1162	struct ftrace_graph_ent_entry *field;
1163	struct fgraph_data *data = iter->private;
1164	struct trace_entry *entry = iter->ent;
1165	struct trace_seq *s = &iter->seq;
1166	int cpu = iter->cpu;
1167	int ret;
1168
1169	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1170		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1171		return TRACE_TYPE_HANDLED;
1172	}
1173
1174	/*
1175	 * If the last output failed, there's a possibility we need
1176	 * to print out the missing entry which would never go out.
1177	 */
1178	if (data && data->failed) {
1179		field = &data->ent;
1180		iter->cpu = data->cpu;
1181		ret = print_graph_entry(field, s, iter, flags);
1182		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1183			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1184			ret = TRACE_TYPE_NO_CONSUME;
1185		}
1186		iter->cpu = cpu;
1187		return ret;
1188	}
1189
1190	switch (entry->type) {
1191	case TRACE_GRAPH_ENT: {
1192		/*
1193		 * print_graph_entry() may consume the current event,
1194		 * thus @field may become invalid, so we need to save it.
1195		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1196		 * it can be safely saved at the stack.
1197		 */
1198		struct ftrace_graph_ent_entry saved;
1199		trace_assign_type(field, entry);
1200		saved = *field;
1201		return print_graph_entry(&saved, s, iter, flags);
1202	}
 
 
 
 
 
 
 
 
 
 
1203	case TRACE_GRAPH_RET: {
1204		struct ftrace_graph_ret_entry *field;
1205		trace_assign_type(field, entry);
1206		return print_graph_return(&field->ret, s, entry, iter, flags);
1207	}
1208	case TRACE_STACK:
1209	case TRACE_FN:
1210		/* dont trace stack and functions as comments */
1211		return TRACE_TYPE_UNHANDLED;
1212
1213	default:
1214		return print_graph_comment(s, entry, iter, flags);
1215	}
1216
1217	return TRACE_TYPE_HANDLED;
1218}
1219
1220static enum print_line_t
1221print_graph_function(struct trace_iterator *iter)
1222{
1223	return print_graph_function_flags(iter, tracer_flags.val);
1224}
1225
1226static enum print_line_t
1227print_graph_function_event(struct trace_iterator *iter, int flags,
1228			   struct trace_event *event)
1229{
1230	return print_graph_function(iter);
1231}
1232
1233static void print_lat_header(struct seq_file *s, u32 flags)
1234{
1235	static const char spaces[] = "                "	/* 16 spaces */
1236		"    "					/* 4 spaces */
1237		"                 ";			/* 17 spaces */
1238	int size = 0;
1239
1240	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1241		size += 16;
 
 
1242	if (flags & TRACE_GRAPH_PRINT_CPU)
1243		size += 4;
1244	if (flags & TRACE_GRAPH_PRINT_PROC)
1245		size += 17;
1246
1247	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1248	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1249	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1250	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1251	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1252}
1253
1254static void __print_graph_headers_flags(struct trace_array *tr,
1255					struct seq_file *s, u32 flags)
1256{
1257	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1258
1259	if (lat)
1260		print_lat_header(s, flags);
1261
1262	/* 1st line */
1263	seq_putc(s, '#');
1264	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1265		seq_puts(s, "     TIME       ");
 
 
1266	if (flags & TRACE_GRAPH_PRINT_CPU)
1267		seq_puts(s, " CPU");
1268	if (flags & TRACE_GRAPH_PRINT_PROC)
1269		seq_puts(s, "  TASK/PID       ");
1270	if (lat)
1271		seq_puts(s, "||||");
1272	if (flags & TRACE_GRAPH_PRINT_DURATION)
1273		seq_puts(s, "  DURATION   ");
1274	seq_puts(s, "               FUNCTION CALLS\n");
1275
1276	/* 2nd line */
1277	seq_putc(s, '#');
1278	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1279		seq_puts(s, "      |         ");
 
 
1280	if (flags & TRACE_GRAPH_PRINT_CPU)
1281		seq_puts(s, " |  ");
1282	if (flags & TRACE_GRAPH_PRINT_PROC)
1283		seq_puts(s, "   |    |        ");
1284	if (lat)
1285		seq_puts(s, "||||");
1286	if (flags & TRACE_GRAPH_PRINT_DURATION)
1287		seq_puts(s, "   |   |      ");
1288	seq_puts(s, "               |   |   |   |\n");
1289}
1290
1291static void print_graph_headers(struct seq_file *s)
1292{
1293	print_graph_headers_flags(s, tracer_flags.val);
1294}
1295
1296void print_graph_headers_flags(struct seq_file *s, u32 flags)
1297{
1298	struct trace_iterator *iter = s->private;
1299	struct trace_array *tr = iter->tr;
1300
1301	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1302		return;
1303
1304	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1305		/* print nothing if the buffers are empty */
1306		if (trace_empty(iter))
1307			return;
1308
1309		print_trace_header(s, iter);
1310	}
1311
1312	__print_graph_headers_flags(tr, s, flags);
1313}
1314
1315void graph_trace_open(struct trace_iterator *iter)
1316{
1317	/* pid and depth on the last trace processed */
1318	struct fgraph_data *data;
1319	gfp_t gfpflags;
1320	int cpu;
1321
1322	iter->private = NULL;
1323
1324	/* We can be called in atomic context via ftrace_dump() */
1325	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1326
1327	data = kzalloc(sizeof(*data), gfpflags);
1328	if (!data)
1329		goto out_err;
1330
1331	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1332	if (!data->cpu_data)
1333		goto out_err_free;
1334
1335	for_each_possible_cpu(cpu) {
1336		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1337		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1338		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1339		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1340
1341		*pid = -1;
1342		*depth = 0;
1343		*ignore = 0;
1344		*depth_irq = -1;
1345	}
1346
1347	iter->private = data;
1348
1349	return;
1350
1351 out_err_free:
1352	kfree(data);
1353 out_err:
1354	pr_warn("function graph tracer: not enough memory\n");
1355}
1356
1357void graph_trace_close(struct trace_iterator *iter)
1358{
1359	struct fgraph_data *data = iter->private;
1360
1361	if (data) {
1362		free_percpu(data->cpu_data);
1363		kfree(data);
1364	}
1365}
1366
1367static int
1368func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1369{
1370	if (bit == TRACE_GRAPH_PRINT_IRQS)
1371		ftrace_graph_skip_irqs = !set;
1372
1373	if (bit == TRACE_GRAPH_SLEEP_TIME)
1374		ftrace_graph_sleep_time_control(set);
1375
1376	if (bit == TRACE_GRAPH_GRAPH_TIME)
1377		ftrace_graph_graph_time_control(set);
1378
1379	return 0;
1380}
1381
1382static struct trace_event_functions graph_functions = {
1383	.trace		= print_graph_function_event,
1384};
1385
1386static struct trace_event graph_trace_entry_event = {
1387	.type		= TRACE_GRAPH_ENT,
1388	.funcs		= &graph_functions,
1389};
1390
 
 
 
 
 
 
 
1391static struct trace_event graph_trace_ret_event = {
1392	.type		= TRACE_GRAPH_RET,
1393	.funcs		= &graph_functions
1394};
1395
1396static struct tracer graph_trace __tracer_data = {
1397	.name		= "function_graph",
1398	.update_thresh	= graph_trace_update_thresh,
1399	.open		= graph_trace_open,
1400	.pipe_open	= graph_trace_open,
1401	.close		= graph_trace_close,
1402	.pipe_close	= graph_trace_close,
1403	.init		= graph_trace_init,
1404	.reset		= graph_trace_reset,
1405	.print_line	= print_graph_function,
1406	.print_header	= print_graph_headers,
1407	.flags		= &tracer_flags,
1408	.set_flag	= func_graph_set_flag,
 
1409#ifdef CONFIG_FTRACE_SELFTEST
1410	.selftest	= trace_selftest_startup_function_graph,
1411#endif
1412};
1413
1414
1415static ssize_t
1416graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1417		  loff_t *ppos)
1418{
1419	unsigned long val;
1420	int ret;
1421
1422	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1423	if (ret)
1424		return ret;
1425
1426	max_depth = val;
1427
1428	*ppos += cnt;
1429
1430	return cnt;
1431}
1432
1433static ssize_t
1434graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1435		 loff_t *ppos)
1436{
1437	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1438	int n;
1439
1440	n = sprintf(buf, "%d\n", max_depth);
1441
1442	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1443}
1444
1445static const struct file_operations graph_depth_fops = {
1446	.open		= tracing_open_generic,
1447	.write		= graph_depth_write,
1448	.read		= graph_depth_read,
1449	.llseek		= generic_file_llseek,
1450};
1451
1452static __init int init_graph_tracefs(void)
1453{
1454	struct dentry *d_tracer;
1455
1456	d_tracer = tracing_init_dentry();
1457	if (IS_ERR(d_tracer))
1458		return 0;
1459
1460	trace_create_file("max_graph_depth", 0644, d_tracer,
1461			  NULL, &graph_depth_fops);
1462
1463	return 0;
1464}
1465fs_initcall(init_graph_tracefs);
1466
1467static __init int init_graph_trace(void)
1468{
1469	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1470
1471	if (!register_trace_event(&graph_trace_entry_event)) {
1472		pr_warn("Warning: could not register graph trace events\n");
1473		return 1;
1474	}
 
 
 
 
 
 
 
1475
1476	if (!register_trace_event(&graph_trace_ret_event)) {
1477		pr_warn("Warning: could not register graph trace events\n");
1478		return 1;
1479	}
1480
1481	return register_tracer(&graph_trace);
1482}
1483
1484core_initcall(init_graph_trace);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Function graph tracer.
   5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   6 * Mostly borrowed from function tracer which
   7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
   8 *
   9 */
  10#include <linux/uaccess.h>
  11#include <linux/ftrace.h>
  12#include <linux/interrupt.h>
  13#include <linux/slab.h>
  14#include <linux/fs.h>
  15
  16#include "trace.h"
  17#include "trace_output.h"
  18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  19/* When set, irq functions will be ignored */
  20static int ftrace_graph_skip_irqs;
  21
  22struct fgraph_cpu_data {
  23	pid_t		last_pid;
  24	int		depth;
  25	int		depth_irq;
  26	int		ignore;
  27	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
  28};
  29
  30struct fgraph_data {
  31	struct fgraph_cpu_data __percpu *cpu_data;
  32
  33	/* Place to preserve last processed entry. */
  34	union {
  35		struct ftrace_graph_ent_entry	ent;
  36		struct fgraph_retaddr_ent_entry	rent;
  37	} ent;
  38	struct ftrace_graph_ret_entry	ret;
  39	int				failed;
  40	int				cpu;
  41};
  42
  43#define TRACE_GRAPH_INDENT	2
  44
  45unsigned int fgraph_max_depth;
  46
  47static struct tracer_opt trace_opts[] = {
  48	/* Display overruns? (for self-debug purpose) */
  49	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  50	/* Display CPU ? */
  51	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  52	/* Display Overhead ? */
  53	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  54	/* Display proc name/pid */
  55	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  56	/* Display duration of execution */
  57	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  58	/* Display absolute time of an entry */
  59	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  60	/* Display interrupts */
  61	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  62	/* Display function name after trailing } */
  63	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  64#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
  65	/* Display function return value ? */
  66	{ TRACER_OPT(funcgraph-retval, TRACE_GRAPH_PRINT_RETVAL) },
  67	/* Display function return value in hexadecimal format ? */
  68	{ TRACER_OPT(funcgraph-retval-hex, TRACE_GRAPH_PRINT_RETVAL_HEX) },
  69#endif
  70#ifdef CONFIG_FUNCTION_GRAPH_RETADDR
  71	/* Display function return address ? */
  72	{ TRACER_OPT(funcgraph-retaddr, TRACE_GRAPH_PRINT_RETADDR) },
  73#endif
  74	/* Include sleep time (scheduled out) between entry and return */
  75	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
  76
  77#ifdef CONFIG_FUNCTION_PROFILER
  78	/* Include time within nested functions */
  79	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
  80#endif
  81
  82	{ } /* Empty entry */
  83};
  84
  85static struct tracer_flags tracer_flags = {
  86	/* Don't display overruns, proc, or tail by default */
  87	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  88	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  89	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  90	.opts = trace_opts
  91};
  92
  93static bool tracer_flags_is_set(u32 flags)
  94{
  95	return (tracer_flags.val & flags) == flags;
  96}
  97
  98/*
  99 * DURATION column is being also used to display IRQ signs,
 100 * following values are used by print_graph_irq and others
 101 * to fill in space into DURATION column.
 102 */
 103enum {
 104	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 105	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 106	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 107};
 108
 109static void
 110print_graph_duration(struct trace_array *tr, unsigned long long duration,
 111		     struct trace_seq *s, u32 flags);
 112
 113int __trace_graph_entry(struct trace_array *tr,
 114				struct ftrace_graph_ent *trace,
 115				unsigned int trace_ctx)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116{
 117	struct ring_buffer_event *event;
 118	struct trace_buffer *buffer = tr->array_buffer.buffer;
 119	struct ftrace_graph_ent_entry *entry;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 120
 121	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
 122					  sizeof(*entry), trace_ctx);
 123	if (!event)
 124		return 0;
 125	entry	= ring_buffer_event_data(event);
 126	entry->graph_ent			= *trace;
 127	trace_buffer_unlock_commit_nostack(buffer, event);
 128
 129	return 1;
 130}
 131
 132#ifdef CONFIG_FUNCTION_GRAPH_RETADDR
 133int __trace_graph_retaddr_entry(struct trace_array *tr,
 134				struct ftrace_graph_ent *trace,
 135				unsigned int trace_ctx,
 136				unsigned long retaddr)
 137{
 
 138	struct ring_buffer_event *event;
 139	struct trace_buffer *buffer = tr->array_buffer.buffer;
 140	struct fgraph_retaddr_ent_entry *entry;
 141
 142	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RETADDR_ENT,
 143					  sizeof(*entry), trace_ctx);
 144	if (!event)
 145		return 0;
 146	entry	= ring_buffer_event_data(event);
 147	entry->graph_ent.func = trace->func;
 148	entry->graph_ent.depth = trace->depth;
 149	entry->graph_ent.retaddr = retaddr;
 150	trace_buffer_unlock_commit_nostack(buffer, event);
 151
 152	return 1;
 153}
 154#else
 155int __trace_graph_retaddr_entry(struct trace_array *tr,
 156				struct ftrace_graph_ent *trace,
 157				unsigned int trace_ctx,
 158				unsigned long retaddr)
 159{
 160	return 1;
 161}
 162#endif
 163
 164static inline int ftrace_graph_ignore_irqs(void)
 165{
 166	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
 167		return 0;
 168
 169	return in_hardirq();
 170}
 171
 172struct fgraph_times {
 173	unsigned long long		calltime;
 174	unsigned long long		sleeptime; /* may be optional! */
 175};
 176
 177int trace_graph_entry(struct ftrace_graph_ent *trace,
 178		      struct fgraph_ops *gops)
 179{
 180	unsigned long *task_var = fgraph_get_task_var(gops);
 181	struct trace_array *tr = gops->private;
 182	struct trace_array_cpu *data;
 183	struct fgraph_times *ftimes;
 184	unsigned long flags;
 185	unsigned int trace_ctx;
 186	long disabled;
 187	int ret;
 188	int cpu;
 
 
 
 
 189
 190	if (*task_var & TRACE_GRAPH_NOTRACE)
 
 
 
 191		return 0;
 192
 193	/*
 194	 * Do not trace a function if it's filtered by set_graph_notrace.
 195	 * Make the index of ret stack negative to indicate that it should
 196	 * ignore further functions.  But it needs its own ret stack entry
 197	 * to recover the original index in order to continue tracing after
 198	 * returning from the function.
 199	 */
 200	if (ftrace_graph_notrace_addr(trace->func)) {
 201		*task_var |= TRACE_GRAPH_NOTRACE;
 202		/*
 203		 * Need to return 1 to have the return called
 204		 * that will clear the NOTRACE bit.
 205		 */
 206		return 1;
 207	}
 208
 209	if (!ftrace_trace_task(tr))
 210		return 0;
 211
 212	if (ftrace_graph_ignore_func(gops, trace))
 213		return 0;
 214
 215	if (ftrace_graph_ignore_irqs())
 216		return 0;
 217
 218	if (fgraph_sleep_time) {
 219		/* Only need to record the calltime */
 220		ftimes = fgraph_reserve_data(gops->idx, sizeof(ftimes->calltime));
 221	} else {
 222		ftimes = fgraph_reserve_data(gops->idx, sizeof(*ftimes));
 223		if (ftimes)
 224			ftimes->sleeptime = current->ftrace_sleeptime;
 225	}
 226	if (!ftimes)
 227		return 0;
 228
 229	ftimes->calltime = trace_clock_local();
 230
 231	/*
 232	 * Stop here if tracing_threshold is set. We only write function return
 233	 * events to the ring buffer.
 234	 */
 235	if (tracing_thresh)
 236		return 1;
 237
 238	local_irq_save(flags);
 239	cpu = raw_smp_processor_id();
 240	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 241	disabled = atomic_inc_return(&data->disabled);
 242	if (likely(disabled == 1)) {
 243		trace_ctx = tracing_gen_ctx_flags(flags);
 244		if (unlikely(IS_ENABLED(CONFIG_FUNCTION_GRAPH_RETADDR) &&
 245			tracer_flags_is_set(TRACE_GRAPH_PRINT_RETADDR))) {
 246			unsigned long retaddr = ftrace_graph_top_ret_addr(current);
 247
 248			ret = __trace_graph_retaddr_entry(tr, trace, trace_ctx, retaddr);
 249		} else
 250			ret = __trace_graph_entry(tr, trace, trace_ctx);
 251	} else {
 252		ret = 0;
 253	}
 254
 255	atomic_dec(&data->disabled);
 256	local_irq_restore(flags);
 257
 258	return ret;
 259}
 260
 
 
 
 
 
 
 
 
 261static void
 262__trace_graph_function(struct trace_array *tr,
 263		unsigned long ip, unsigned int trace_ctx)
 264{
 265	u64 time = trace_clock_local();
 266	struct ftrace_graph_ent ent = {
 267		.func  = ip,
 268		.depth = 0,
 269	};
 270	struct ftrace_graph_ret ret = {
 271		.func     = ip,
 272		.depth    = 0,
 273		.calltime = time,
 274		.rettime  = time,
 275	};
 276
 277	__trace_graph_entry(tr, &ent, trace_ctx);
 278	__trace_graph_return(tr, &ret, trace_ctx);
 279}
 280
 281void
 282trace_graph_function(struct trace_array *tr,
 283		unsigned long ip, unsigned long parent_ip,
 284		unsigned int trace_ctx)
 285{
 286	__trace_graph_function(tr, ip, trace_ctx);
 287}
 288
 289void __trace_graph_return(struct trace_array *tr,
 290				struct ftrace_graph_ret *trace,
 291				unsigned int trace_ctx)
 
 292{
 
 293	struct ring_buffer_event *event;
 294	struct trace_buffer *buffer = tr->array_buffer.buffer;
 295	struct ftrace_graph_ret_entry *entry;
 296
 297	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
 298					  sizeof(*entry), trace_ctx);
 299	if (!event)
 300		return;
 301	entry	= ring_buffer_event_data(event);
 302	entry->ret				= *trace;
 303	trace_buffer_unlock_commit_nostack(buffer, event);
 304}
 305
 306static void handle_nosleeptime(struct ftrace_graph_ret *trace,
 307			       struct fgraph_times *ftimes,
 308			       int size)
 309{
 310	if (fgraph_sleep_time || size < sizeof(*ftimes))
 311		return;
 312
 313	ftimes->calltime += current->ftrace_sleeptime - ftimes->sleeptime;
 314}
 315
 316void trace_graph_return(struct ftrace_graph_ret *trace,
 317			struct fgraph_ops *gops)
 318{
 319	unsigned long *task_var = fgraph_get_task_var(gops);
 320	struct trace_array *tr = gops->private;
 321	struct trace_array_cpu *data;
 322	struct fgraph_times *ftimes;
 323	unsigned long flags;
 324	unsigned int trace_ctx;
 325	long disabled;
 326	int size;
 327	int cpu;
 328
 329	ftrace_graph_addr_finish(gops, trace);
 330
 331	if (*task_var & TRACE_GRAPH_NOTRACE) {
 332		*task_var &= ~TRACE_GRAPH_NOTRACE;
 333		return;
 334	}
 335
 336	ftimes = fgraph_retrieve_data(gops->idx, &size);
 337	if (!ftimes)
 338		return;
 339
 340	handle_nosleeptime(trace, ftimes, size);
 341
 342	trace->calltime = ftimes->calltime;
 343
 344	local_irq_save(flags);
 345	cpu = raw_smp_processor_id();
 346	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 347	disabled = atomic_inc_return(&data->disabled);
 348	if (likely(disabled == 1)) {
 349		trace_ctx = tracing_gen_ctx_flags(flags);
 350		__trace_graph_return(tr, trace, trace_ctx);
 351	}
 352	atomic_dec(&data->disabled);
 353	local_irq_restore(flags);
 354}
 355
 356static void trace_graph_thresh_return(struct ftrace_graph_ret *trace,
 357				      struct fgraph_ops *gops)
 358{
 359	struct fgraph_times *ftimes;
 360	int size;
 361
 362	ftrace_graph_addr_finish(gops, trace);
 363
 364	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
 365		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
 366		return;
 367	}
 368
 369	ftimes = fgraph_retrieve_data(gops->idx, &size);
 370	if (!ftimes)
 371		return;
 372
 373	handle_nosleeptime(trace, ftimes, size);
 374
 375	trace->calltime = ftimes->calltime;
 376
 
 
 377	if (tracing_thresh &&
 378	    (trace->rettime - ftimes->calltime < tracing_thresh))
 379		return;
 380	else
 381		trace_graph_return(trace, gops);
 382}
 383
 384static struct fgraph_ops funcgraph_ops = {
 385	.entryfunc = &trace_graph_entry,
 386	.retfunc = &trace_graph_return,
 387};
 388
 389int allocate_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
 390{
 391	struct fgraph_ops *gops;
 392
 393	gops = kzalloc(sizeof(*gops), GFP_KERNEL);
 394	if (!gops)
 395		return -ENOMEM;
 396
 397	gops->entryfunc = &trace_graph_entry;
 398	gops->retfunc = &trace_graph_return;
 399
 400	tr->gops = gops;
 401	gops->private = tr;
 402
 403	fgraph_init_ops(&gops->ops, ops);
 404
 405	return 0;
 406}
 407
 408void free_fgraph_ops(struct trace_array *tr)
 409{
 410	kfree(tr->gops);
 411}
 412
 413__init void init_array_fgraph_ops(struct trace_array *tr, struct ftrace_ops *ops)
 414{
 415	tr->gops = &funcgraph_ops;
 416	funcgraph_ops.private = tr;
 417	fgraph_init_ops(&tr->gops->ops, ops);
 418}
 419
 420static int graph_trace_init(struct trace_array *tr)
 421{
 422	int ret;
 423
 424	tr->gops->entryfunc = trace_graph_entry;
 425
 426	if (tracing_thresh)
 427		tr->gops->retfunc = trace_graph_thresh_return;
 
 428	else
 429		tr->gops->retfunc = trace_graph_return;
 430
 431	/* Make gops functions are visible before we start tracing */
 432	smp_mb();
 433
 434	ret = register_ftrace_graph(tr->gops);
 435	if (ret)
 436		return ret;
 437	tracing_start_cmdline_record();
 438
 439	return 0;
 440}
 441
 442static void graph_trace_reset(struct trace_array *tr)
 443{
 444	tracing_stop_cmdline_record();
 445	unregister_ftrace_graph(tr->gops);
 446}
 447
 448static int graph_trace_update_thresh(struct trace_array *tr)
 449{
 450	graph_trace_reset(tr);
 451	return graph_trace_init(tr);
 452}
 453
 454static int max_bytes_for_cpu;
 455
 456static void print_graph_cpu(struct trace_seq *s, int cpu)
 457{
 458	/*
 459	 * Start with a space character - to make it stand out
 460	 * to the right a bit when trace output is pasted into
 461	 * email:
 462	 */
 463	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
 464}
 465
 466#define TRACE_GRAPH_PROCINFO_LENGTH	14
 467
 468static void print_graph_proc(struct trace_seq *s, pid_t pid)
 469{
 470	char comm[TASK_COMM_LEN];
 471	/* sign + log10(MAX_INT) + '\0' */
 472	char pid_str[11];
 473	int spaces = 0;
 474	int len;
 475	int i;
 476
 477	trace_find_cmdline(pid, comm);
 478	comm[7] = '\0';
 479	sprintf(pid_str, "%d", pid);
 480
 481	/* 1 stands for the "-" character */
 482	len = strlen(comm) + strlen(pid_str) + 1;
 483
 484	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
 485		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
 486
 487	/* First spaces to align center */
 488	for (i = 0; i < spaces / 2; i++)
 489		trace_seq_putc(s, ' ');
 490
 491	trace_seq_printf(s, "%s-%s", comm, pid_str);
 492
 493	/* Last spaces to align center */
 494	for (i = 0; i < spaces - (spaces / 2); i++)
 495		trace_seq_putc(s, ' ');
 496}
 497
 498
 499static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
 500{
 501	trace_seq_putc(s, ' ');
 502	trace_print_lat_fmt(s, entry);
 503	trace_seq_puts(s, " | ");
 504}
 505
 506/* If the pid changed since the last trace, output this event */
 507static void
 508verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
 509{
 510	pid_t prev_pid;
 511	pid_t *last_pid;
 512
 513	if (!data)
 514		return;
 515
 516	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
 517
 518	if (*last_pid == pid)
 519		return;
 520
 521	prev_pid = *last_pid;
 522	*last_pid = pid;
 523
 524	if (prev_pid == -1)
 525		return;
 526/*
 527 * Context-switch trace line:
 528
 529 ------------------------------------------
 530 | 1)  migration/0--1  =>  sshd-1755
 531 ------------------------------------------
 532
 533 */
 534	trace_seq_puts(s, " ------------------------------------------\n");
 535	print_graph_cpu(s, cpu);
 536	print_graph_proc(s, prev_pid);
 537	trace_seq_puts(s, " => ");
 538	print_graph_proc(s, pid);
 539	trace_seq_puts(s, "\n ------------------------------------------\n\n");
 540}
 541
 542static struct ftrace_graph_ret_entry *
 543get_return_for_leaf(struct trace_iterator *iter,
 544		struct ftrace_graph_ent_entry *curr)
 545{
 546	struct fgraph_data *data = iter->private;
 547	struct ring_buffer_iter *ring_iter = NULL;
 548	struct ring_buffer_event *event;
 549	struct ftrace_graph_ret_entry *next;
 550
 551	/*
 552	 * If the previous output failed to write to the seq buffer,
 553	 * then we just reuse the data from before.
 554	 */
 555	if (data && data->failed) {
 556		curr = &data->ent.ent;
 557		next = &data->ret;
 558	} else {
 559
 560		ring_iter = trace_buffer_iter(iter, iter->cpu);
 561
 562		/* First peek to compare current entry and the next one */
 563		if (ring_iter)
 564			event = ring_buffer_iter_peek(ring_iter, NULL);
 565		else {
 566			/*
 567			 * We need to consume the current entry to see
 568			 * the next one.
 569			 */
 570			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
 571					    NULL, NULL);
 572			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
 573						 NULL, NULL);
 574		}
 575
 576		if (!event)
 577			return NULL;
 578
 579		next = ring_buffer_event_data(event);
 580
 581		if (data) {
 582			/*
 583			 * Save current and next entries for later reference
 584			 * if the output fails.
 585			 */
 586			if (unlikely(curr->ent.type == TRACE_GRAPH_RETADDR_ENT))
 587				data->ent.rent = *(struct fgraph_retaddr_ent_entry *)curr;
 588			else
 589				data->ent.ent = *curr;
 590			/*
 591			 * If the next event is not a return type, then
 592			 * we only care about what type it is. Otherwise we can
 593			 * safely copy the entire event.
 594			 */
 595			if (next->ent.type == TRACE_GRAPH_RET)
 596				data->ret = *next;
 597			else
 598				data->ret.ent.type = next->ent.type;
 599		}
 600	}
 601
 602	if (next->ent.type != TRACE_GRAPH_RET)
 603		return NULL;
 604
 605	if (curr->ent.pid != next->ent.pid ||
 606			curr->graph_ent.func != next->ret.func)
 607		return NULL;
 608
 609	/* this is a leaf, now advance the iterator */
 610	if (ring_iter)
 611		ring_buffer_iter_advance(ring_iter);
 612
 613	return next;
 614}
 615
 616static void print_graph_abs_time(u64 t, struct trace_seq *s)
 617{
 618	unsigned long usecs_rem;
 619
 620	usecs_rem = do_div(t, NSEC_PER_SEC);
 621	usecs_rem /= 1000;
 622
 623	trace_seq_printf(s, "%5lu.%06lu |  ",
 624			 (unsigned long)t, usecs_rem);
 625}
 626
 627static void
 628print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
 629{
 630	unsigned long long usecs;
 631
 632	usecs = iter->ts - iter->array_buffer->time_start;
 633	do_div(usecs, NSEC_PER_USEC);
 634
 635	trace_seq_printf(s, "%9llu us |  ", usecs);
 636}
 637
 638static void
 639print_graph_irq(struct trace_iterator *iter, unsigned long addr,
 640		enum trace_type type, int cpu, pid_t pid, u32 flags)
 641{
 642	struct trace_array *tr = iter->tr;
 643	struct trace_seq *s = &iter->seq;
 644	struct trace_entry *ent = iter->ent;
 645
 646	addr += iter->tr->text_delta;
 647
 648	if (addr < (unsigned long)__irqentry_text_start ||
 649		addr >= (unsigned long)__irqentry_text_end)
 650		return;
 651
 652	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 653		/* Absolute time */
 654		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 655			print_graph_abs_time(iter->ts, s);
 656
 657		/* Relative time */
 658		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
 659			print_graph_rel_time(iter, s);
 660
 661		/* Cpu */
 662		if (flags & TRACE_GRAPH_PRINT_CPU)
 663			print_graph_cpu(s, cpu);
 664
 665		/* Proc */
 666		if (flags & TRACE_GRAPH_PRINT_PROC) {
 667			print_graph_proc(s, pid);
 668			trace_seq_puts(s, " | ");
 669		}
 670
 671		/* Latency format */
 672		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 673			print_graph_lat_fmt(s, ent);
 674	}
 675
 676	/* No overhead */
 677	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
 678
 679	if (type == TRACE_GRAPH_ENT)
 680		trace_seq_puts(s, "==========>");
 681	else
 682		trace_seq_puts(s, "<==========");
 683
 684	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
 685	trace_seq_putc(s, '\n');
 686}
 687
 688void
 689trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 690{
 691	unsigned long nsecs_rem = do_div(duration, 1000);
 692	/* log10(ULONG_MAX) + '\0' */
 693	char usecs_str[21];
 694	char nsecs_str[5];
 695	int len;
 696	int i;
 697
 698	sprintf(usecs_str, "%lu", (unsigned long) duration);
 699
 700	/* Print msecs */
 701	trace_seq_printf(s, "%s", usecs_str);
 702
 703	len = strlen(usecs_str);
 704
 705	/* Print nsecs (we don't want to exceed 7 numbers) */
 706	if (len < 7) {
 707		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
 708
 709		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
 710		trace_seq_printf(s, ".%s", nsecs_str);
 711		len += strlen(nsecs_str) + 1;
 712	}
 713
 714	trace_seq_puts(s, " us ");
 715
 716	/* Print remaining spaces to fit the row's width */
 717	for (i = len; i < 8; i++)
 718		trace_seq_putc(s, ' ');
 719}
 720
 721static void
 722print_graph_duration(struct trace_array *tr, unsigned long long duration,
 723		     struct trace_seq *s, u32 flags)
 724{
 725	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
 726	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 727		return;
 728
 729	/* No real adata, just filling the column with spaces */
 730	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
 731	case FLAGS_FILL_FULL:
 732		trace_seq_puts(s, "              |  ");
 733		return;
 734	case FLAGS_FILL_START:
 735		trace_seq_puts(s, "  ");
 736		return;
 737	case FLAGS_FILL_END:
 738		trace_seq_puts(s, " |");
 739		return;
 740	}
 741
 742	/* Signal a overhead of time execution to the output */
 743	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
 744		trace_seq_printf(s, "%c ", trace_find_mark(duration));
 745	else
 746		trace_seq_puts(s, "  ");
 747
 748	trace_print_graph_duration(duration, s);
 749	trace_seq_puts(s, "|  ");
 750}
 751
 752#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
 753#define __TRACE_GRAPH_PRINT_RETVAL TRACE_GRAPH_PRINT_RETVAL
 754#else
 755#define __TRACE_GRAPH_PRINT_RETVAL 0
 756#endif
 757
 758#ifdef CONFIG_FUNCTION_GRAPH_RETADDR
 759#define __TRACE_GRAPH_PRINT_RETADDR TRACE_GRAPH_PRINT_RETADDR
 760static void print_graph_retaddr(struct trace_seq *s, struct fgraph_retaddr_ent_entry *entry,
 761				u32 trace_flags, bool comment)
 762{
 763	if (comment)
 764		trace_seq_puts(s, " /*");
 765
 766	trace_seq_puts(s, " <-");
 767	seq_print_ip_sym(s, entry->graph_ent.retaddr, trace_flags | TRACE_ITER_SYM_OFFSET);
 768
 769	if (comment)
 770		trace_seq_puts(s, " */");
 771}
 772#else
 773#define __TRACE_GRAPH_PRINT_RETADDR 0
 774#define print_graph_retaddr(_seq, _entry, _tflags, _comment)		do { } while (0)
 775#endif
 776
 777#if defined(CONFIG_FUNCTION_GRAPH_RETVAL) || defined(CONFIG_FUNCTION_GRAPH_RETADDR)
 778
 779static void print_graph_retval(struct trace_seq *s, struct ftrace_graph_ent_entry *entry,
 780				struct ftrace_graph_ret *graph_ret, void *func,
 781				u32 opt_flags, u32 trace_flags)
 782{
 783	unsigned long err_code = 0;
 784	unsigned long retval = 0;
 785	bool print_retaddr = false;
 786	bool print_retval = false;
 787	bool hex_format = !!(opt_flags & TRACE_GRAPH_PRINT_RETVAL_HEX);
 788
 789#ifdef CONFIG_FUNCTION_GRAPH_RETVAL
 790	retval = graph_ret->retval;
 791	print_retval = !!(opt_flags & TRACE_GRAPH_PRINT_RETVAL);
 792#endif
 793
 794#ifdef CONFIG_FUNCTION_GRAPH_RETADDR
 795	print_retaddr = !!(opt_flags & TRACE_GRAPH_PRINT_RETADDR);
 796#endif
 797
 798	if (print_retval && retval && !hex_format) {
 799		/* Check if the return value matches the negative format */
 800		if (IS_ENABLED(CONFIG_64BIT) && (retval & BIT(31)) &&
 801			(((u64)retval) >> 32) == 0) {
 802			err_code = sign_extend64(retval, 31);
 803		} else {
 804			err_code = retval;
 805		}
 806
 807		if (!IS_ERR_VALUE(err_code))
 808			err_code = 0;
 809	}
 810
 811	if (entry) {
 812		if (entry->ent.type != TRACE_GRAPH_RETADDR_ENT)
 813			print_retaddr = false;
 814
 815		trace_seq_printf(s, "%ps();", func);
 816		if (print_retval || print_retaddr)
 817			trace_seq_puts(s, " /*");
 818		else
 819			trace_seq_putc(s, '\n');
 820	} else {
 821		print_retaddr = false;
 822		trace_seq_printf(s, "} /* %ps", func);
 823	}
 824
 825	if (print_retaddr)
 826		print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry,
 827				    trace_flags, false);
 828
 829	if (print_retval) {
 830		if (hex_format || (err_code == 0))
 831			trace_seq_printf(s, " ret=0x%lx", retval);
 832		else
 833			trace_seq_printf(s, " ret=%ld", err_code);
 834	}
 835
 836	if (!entry || print_retval || print_retaddr)
 837		trace_seq_puts(s, " */\n");
 838}
 839
 840#else
 841
 842#define print_graph_retval(_seq, _ent, _ret, _func, _opt_flags, _trace_flags) do {} while (0)
 843
 844#endif
 845
 846/* Case of a leaf function on its call entry */
 847static enum print_line_t
 848print_graph_entry_leaf(struct trace_iterator *iter,
 849		struct ftrace_graph_ent_entry *entry,
 850		struct ftrace_graph_ret_entry *ret_entry,
 851		struct trace_seq *s, u32 flags)
 852{
 853	struct fgraph_data *data = iter->private;
 854	struct trace_array *tr = iter->tr;
 855	struct ftrace_graph_ret *graph_ret;
 856	struct ftrace_graph_ent *call;
 857	unsigned long long duration;
 858	unsigned long func;
 859	int cpu = iter->cpu;
 860	int i;
 861
 862	graph_ret = &ret_entry->ret;
 863	call = &entry->graph_ent;
 864	duration = graph_ret->rettime - graph_ret->calltime;
 865
 866	func = call->func + iter->tr->text_delta;
 867
 868	if (data) {
 869		struct fgraph_cpu_data *cpu_data;
 
 870
 871		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 872
 873		/*
 874		 * Comments display at + 1 to depth. Since
 875		 * this is a leaf function, keep the comments
 876		 * equal to this depth.
 877		 */
 878		cpu_data->depth = call->depth - 1;
 879
 880		/* No need to keep this function around for this depth */
 881		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 882		    !WARN_ON_ONCE(call->depth < 0))
 883			cpu_data->enter_funcs[call->depth] = 0;
 884	}
 885
 886	/* Overhead and duration */
 887	print_graph_duration(tr, duration, s, flags);
 888
 889	/* Function */
 890	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 891		trace_seq_putc(s, ' ');
 892
 893	/*
 894	 * Write out the function return value or return address
 895	 */
 896	if (flags & (__TRACE_GRAPH_PRINT_RETVAL | __TRACE_GRAPH_PRINT_RETADDR)) {
 897		print_graph_retval(s, entry, graph_ret,
 898				   (void *)graph_ret->func + iter->tr->text_delta,
 899				   flags, tr->trace_flags);
 900	} else {
 901		trace_seq_printf(s, "%ps();\n", (void *)func);
 902	}
 903
 904	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
 905			cpu, iter->ent->pid, flags);
 906
 907	return trace_handle_return(s);
 908}
 909
 910static enum print_line_t
 911print_graph_entry_nested(struct trace_iterator *iter,
 912			 struct ftrace_graph_ent_entry *entry,
 913			 struct trace_seq *s, int cpu, u32 flags)
 914{
 915	struct ftrace_graph_ent *call = &entry->graph_ent;
 916	struct fgraph_data *data = iter->private;
 917	struct trace_array *tr = iter->tr;
 918	unsigned long func;
 919	int i;
 920
 921	if (data) {
 922		struct fgraph_cpu_data *cpu_data;
 923		int cpu = iter->cpu;
 924
 925		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 926		cpu_data->depth = call->depth;
 927
 928		/* Save this function pointer to see if the exit matches */
 929		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 930		    !WARN_ON_ONCE(call->depth < 0))
 931			cpu_data->enter_funcs[call->depth] = call->func;
 932	}
 933
 934	/* No time */
 935	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
 936
 937	/* Function */
 938	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 939		trace_seq_putc(s, ' ');
 940
 941	func = call->func + iter->tr->text_delta;
 942
 943	trace_seq_printf(s, "%ps() {", (void *)func);
 944	if (flags & __TRACE_GRAPH_PRINT_RETADDR  &&
 945		entry->ent.type == TRACE_GRAPH_RETADDR_ENT)
 946		print_graph_retaddr(s, (struct fgraph_retaddr_ent_entry *)entry,
 947			tr->trace_flags, true);
 948	trace_seq_putc(s, '\n');
 949
 950	if (trace_seq_has_overflowed(s))
 951		return TRACE_TYPE_PARTIAL_LINE;
 952
 953	/*
 954	 * we already consumed the current entry to check the next one
 955	 * and see if this is a leaf.
 956	 */
 957	return TRACE_TYPE_NO_CONSUME;
 958}
 959
 960static void
 961print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
 962		     int type, unsigned long addr, u32 flags)
 963{
 964	struct fgraph_data *data = iter->private;
 965	struct trace_entry *ent = iter->ent;
 966	struct trace_array *tr = iter->tr;
 967	int cpu = iter->cpu;
 968
 969	/* Pid */
 970	verif_pid(s, ent->pid, cpu, data);
 971
 972	if (type)
 973		/* Interrupt */
 974		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
 975
 976	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 977		return;
 978
 979	/* Absolute time */
 980	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 981		print_graph_abs_time(iter->ts, s);
 982
 983	/* Relative time */
 984	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
 985		print_graph_rel_time(iter, s);
 986
 987	/* Cpu */
 988	if (flags & TRACE_GRAPH_PRINT_CPU)
 989		print_graph_cpu(s, cpu);
 990
 991	/* Proc */
 992	if (flags & TRACE_GRAPH_PRINT_PROC) {
 993		print_graph_proc(s, ent->pid);
 994		trace_seq_puts(s, " | ");
 995	}
 996
 997	/* Latency format */
 998	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 999		print_graph_lat_fmt(s, ent);
1000
1001	return;
1002}
1003
1004/*
1005 * Entry check for irq code
1006 *
1007 * returns 1 if
1008 *  - we are inside irq code
1009 *  - we just entered irq code
1010 *
1011 * returns 0 if
1012 *  - funcgraph-interrupts option is set
1013 *  - we are not inside irq code
1014 */
1015static int
1016check_irq_entry(struct trace_iterator *iter, u32 flags,
1017		unsigned long addr, int depth)
1018{
1019	int cpu = iter->cpu;
1020	int *depth_irq;
1021	struct fgraph_data *data = iter->private;
1022
1023	addr += iter->tr->text_delta;
1024
1025	/*
1026	 * If we are either displaying irqs, or we got called as
1027	 * a graph event and private data does not exist,
1028	 * then we bypass the irq check.
1029	 */
1030	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1031	    (!data))
1032		return 0;
1033
1034	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1035
1036	/*
1037	 * We are inside the irq code
1038	 */
1039	if (*depth_irq >= 0)
1040		return 1;
1041
1042	if ((addr < (unsigned long)__irqentry_text_start) ||
1043	    (addr >= (unsigned long)__irqentry_text_end))
1044		return 0;
1045
1046	/*
1047	 * We are entering irq code.
1048	 */
1049	*depth_irq = depth;
1050	return 1;
1051}
1052
1053/*
1054 * Return check for irq code
1055 *
1056 * returns 1 if
1057 *  - we are inside irq code
1058 *  - we just left irq code
1059 *
1060 * returns 0 if
1061 *  - funcgraph-interrupts option is set
1062 *  - we are not inside irq code
1063 */
1064static int
1065check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1066{
1067	int cpu = iter->cpu;
1068	int *depth_irq;
1069	struct fgraph_data *data = iter->private;
1070
1071	/*
1072	 * If we are either displaying irqs, or we got called as
1073	 * a graph event and private data does not exist,
1074	 * then we bypass the irq check.
1075	 */
1076	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1077	    (!data))
1078		return 0;
1079
1080	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1081
1082	/*
1083	 * We are not inside the irq code.
1084	 */
1085	if (*depth_irq == -1)
1086		return 0;
1087
1088	/*
1089	 * We are inside the irq code, and this is returning entry.
1090	 * Let's not trace it and clear the entry depth, since
1091	 * we are out of irq code.
1092	 *
1093	 * This condition ensures that we 'leave the irq code' once
1094	 * we are out of the entry depth. Thus protecting us from
1095	 * the RETURN entry loss.
1096	 */
1097	if (*depth_irq >= depth) {
1098		*depth_irq = -1;
1099		return 1;
1100	}
1101
1102	/*
1103	 * We are inside the irq code, and this is not the entry.
1104	 */
1105	return 1;
1106}
1107
1108static enum print_line_t
1109print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1110			struct trace_iterator *iter, u32 flags)
1111{
1112	struct fgraph_data *data = iter->private;
1113	struct ftrace_graph_ent *call = &field->graph_ent;
1114	struct ftrace_graph_ret_entry *leaf_ret;
1115	static enum print_line_t ret;
1116	int cpu = iter->cpu;
1117
1118	if (check_irq_entry(iter, flags, call->func, call->depth))
1119		return TRACE_TYPE_HANDLED;
1120
1121	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1122
1123	leaf_ret = get_return_for_leaf(iter, field);
1124	if (leaf_ret)
1125		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1126	else
1127		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1128
1129	if (data) {
1130		/*
1131		 * If we failed to write our output, then we need to make
1132		 * note of it. Because we already consumed our entry.
1133		 */
1134		if (s->full) {
1135			data->failed = 1;
1136			data->cpu = cpu;
1137		} else
1138			data->failed = 0;
1139	}
1140
1141	return ret;
1142}
1143
1144static enum print_line_t
1145print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1146		   struct trace_entry *ent, struct trace_iterator *iter,
1147		   u32 flags)
1148{
1149	unsigned long long duration = trace->rettime - trace->calltime;
1150	struct fgraph_data *data = iter->private;
1151	struct trace_array *tr = iter->tr;
1152	unsigned long func;
1153	pid_t pid = ent->pid;
1154	int cpu = iter->cpu;
1155	int func_match = 1;
1156	int i;
1157
1158	func = trace->func + iter->tr->text_delta;
1159
1160	if (check_irq_return(iter, flags, trace->depth))
1161		return TRACE_TYPE_HANDLED;
1162
1163	if (data) {
1164		struct fgraph_cpu_data *cpu_data;
1165		int cpu = iter->cpu;
1166
1167		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1168
1169		/*
1170		 * Comments display at + 1 to depth. This is the
1171		 * return from a function, we now want the comments
1172		 * to display at the same level of the bracket.
1173		 */
1174		cpu_data->depth = trace->depth - 1;
1175
1176		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1177		    !WARN_ON_ONCE(trace->depth < 0)) {
1178			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1179				func_match = 0;
1180			cpu_data->enter_funcs[trace->depth] = 0;
1181		}
1182	}
1183
1184	print_graph_prologue(iter, s, 0, 0, flags);
1185
1186	/* Overhead and duration */
1187	print_graph_duration(tr, duration, s, flags);
1188
1189	/* Closing brace */
1190	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1191		trace_seq_putc(s, ' ');
1192
1193	/*
1194	 * Always write out the function name and its return value if the
1195	 * funcgraph-retval option is enabled.
 
 
 
1196	 */
1197	if (flags & __TRACE_GRAPH_PRINT_RETVAL) {
1198		print_graph_retval(s, NULL, trace, (void *)func, flags, tr->trace_flags);
1199	} else {
1200		/*
1201		 * If the return function does not have a matching entry,
1202		 * then the entry was lost. Instead of just printing
1203		 * the '}' and letting the user guess what function this
1204		 * belongs to, write out the function name. Always do
1205		 * that if the funcgraph-tail option is enabled.
1206		 */
1207		if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1208			trace_seq_puts(s, "}\n");
1209		else
1210			trace_seq_printf(s, "} /* %ps */\n", (void *)func);
1211	}
1212
1213	/* Overrun */
1214	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1215		trace_seq_printf(s, " (Overruns: %u)\n",
1216				 trace->overrun);
1217
1218	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1219			cpu, pid, flags);
1220
1221	return trace_handle_return(s);
1222}
1223
1224static enum print_line_t
1225print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1226		    struct trace_iterator *iter, u32 flags)
1227{
1228	struct trace_array *tr = iter->tr;
1229	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1230	struct fgraph_data *data = iter->private;
1231	struct trace_event *event;
1232	int depth = 0;
1233	int ret;
1234	int i;
1235
1236	if (data)
1237		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1238
1239	print_graph_prologue(iter, s, 0, 0, flags);
1240
1241	/* No time */
1242	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1243
1244	/* Indentation */
1245	if (depth > 0)
1246		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1247			trace_seq_putc(s, ' ');
1248
1249	/* The comment */
1250	trace_seq_puts(s, "/* ");
1251
1252	switch (iter->ent->type) {
1253	case TRACE_BPUTS:
1254		ret = trace_print_bputs_msg_only(iter);
1255		if (ret != TRACE_TYPE_HANDLED)
1256			return ret;
1257		break;
1258	case TRACE_BPRINT:
1259		ret = trace_print_bprintk_msg_only(iter);
1260		if (ret != TRACE_TYPE_HANDLED)
1261			return ret;
1262		break;
1263	case TRACE_PRINT:
1264		ret = trace_print_printk_msg_only(iter);
1265		if (ret != TRACE_TYPE_HANDLED)
1266			return ret;
1267		break;
1268	default:
1269		event = ftrace_find_event(ent->type);
1270		if (!event)
1271			return TRACE_TYPE_UNHANDLED;
1272
1273		ret = event->funcs->trace(iter, sym_flags, event);
1274		if (ret != TRACE_TYPE_HANDLED)
1275			return ret;
1276	}
1277
1278	if (trace_seq_has_overflowed(s))
1279		goto out;
1280
1281	/* Strip ending newline */
1282	if (s->buffer[s->seq.len - 1] == '\n') {
1283		s->buffer[s->seq.len - 1] = '\0';
1284		s->seq.len--;
1285	}
1286
1287	trace_seq_puts(s, " */\n");
1288 out:
1289	return trace_handle_return(s);
1290}
1291
1292
1293enum print_line_t
1294print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1295{
1296	struct ftrace_graph_ent_entry *field;
1297	struct fgraph_data *data = iter->private;
1298	struct trace_entry *entry = iter->ent;
1299	struct trace_seq *s = &iter->seq;
1300	int cpu = iter->cpu;
1301	int ret;
1302
1303	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1304		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1305		return TRACE_TYPE_HANDLED;
1306	}
1307
1308	/*
1309	 * If the last output failed, there's a possibility we need
1310	 * to print out the missing entry which would never go out.
1311	 */
1312	if (data && data->failed) {
1313		field = &data->ent.ent;
1314		iter->cpu = data->cpu;
1315		ret = print_graph_entry(field, s, iter, flags);
1316		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1317			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1318			ret = TRACE_TYPE_NO_CONSUME;
1319		}
1320		iter->cpu = cpu;
1321		return ret;
1322	}
1323
1324	switch (entry->type) {
1325	case TRACE_GRAPH_ENT: {
1326		/*
1327		 * print_graph_entry() may consume the current event,
1328		 * thus @field may become invalid, so we need to save it.
1329		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1330		 * it can be safely saved at the stack.
1331		 */
1332		struct ftrace_graph_ent_entry saved;
1333		trace_assign_type(field, entry);
1334		saved = *field;
1335		return print_graph_entry(&saved, s, iter, flags);
1336	}
1337#ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1338	case TRACE_GRAPH_RETADDR_ENT: {
1339		struct fgraph_retaddr_ent_entry saved;
1340		struct fgraph_retaddr_ent_entry *rfield;
1341
1342		trace_assign_type(rfield, entry);
1343		saved = *rfield;
1344		return print_graph_entry((struct ftrace_graph_ent_entry *)&saved, s, iter, flags);
1345	}
1346#endif
1347	case TRACE_GRAPH_RET: {
1348		struct ftrace_graph_ret_entry *field;
1349		trace_assign_type(field, entry);
1350		return print_graph_return(&field->ret, s, entry, iter, flags);
1351	}
1352	case TRACE_STACK:
1353	case TRACE_FN:
1354		/* dont trace stack and functions as comments */
1355		return TRACE_TYPE_UNHANDLED;
1356
1357	default:
1358		return print_graph_comment(s, entry, iter, flags);
1359	}
1360
1361	return TRACE_TYPE_HANDLED;
1362}
1363
1364static enum print_line_t
1365print_graph_function(struct trace_iterator *iter)
1366{
1367	return print_graph_function_flags(iter, tracer_flags.val);
1368}
1369
1370static enum print_line_t
1371print_graph_function_event(struct trace_iterator *iter, int flags,
1372			   struct trace_event *event)
1373{
1374	return print_graph_function(iter);
1375}
1376
1377static void print_lat_header(struct seq_file *s, u32 flags)
1378{
1379	static const char spaces[] = "                "	/* 16 spaces */
1380		"    "					/* 4 spaces */
1381		"                 ";			/* 17 spaces */
1382	int size = 0;
1383
1384	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1385		size += 16;
1386	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1387		size += 16;
1388	if (flags & TRACE_GRAPH_PRINT_CPU)
1389		size += 4;
1390	if (flags & TRACE_GRAPH_PRINT_PROC)
1391		size += 17;
1392
1393	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1394	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1395	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1396	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1397	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1398}
1399
1400static void __print_graph_headers_flags(struct trace_array *tr,
1401					struct seq_file *s, u32 flags)
1402{
1403	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1404
1405	if (lat)
1406		print_lat_header(s, flags);
1407
1408	/* 1st line */
1409	seq_putc(s, '#');
1410	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1411		seq_puts(s, "     TIME       ");
1412	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1413		seq_puts(s, "   REL TIME     ");
1414	if (flags & TRACE_GRAPH_PRINT_CPU)
1415		seq_puts(s, " CPU");
1416	if (flags & TRACE_GRAPH_PRINT_PROC)
1417		seq_puts(s, "  TASK/PID       ");
1418	if (lat)
1419		seq_puts(s, "||||   ");
1420	if (flags & TRACE_GRAPH_PRINT_DURATION)
1421		seq_puts(s, "  DURATION   ");
1422	seq_puts(s, "               FUNCTION CALLS\n");
1423
1424	/* 2nd line */
1425	seq_putc(s, '#');
1426	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1427		seq_puts(s, "      |         ");
1428	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1429		seq_puts(s, "      |         ");
1430	if (flags & TRACE_GRAPH_PRINT_CPU)
1431		seq_puts(s, " |  ");
1432	if (flags & TRACE_GRAPH_PRINT_PROC)
1433		seq_puts(s, "   |    |        ");
1434	if (lat)
1435		seq_puts(s, "||||   ");
1436	if (flags & TRACE_GRAPH_PRINT_DURATION)
1437		seq_puts(s, "   |   |      ");
1438	seq_puts(s, "               |   |   |   |\n");
1439}
1440
1441static void print_graph_headers(struct seq_file *s)
1442{
1443	print_graph_headers_flags(s, tracer_flags.val);
1444}
1445
1446void print_graph_headers_flags(struct seq_file *s, u32 flags)
1447{
1448	struct trace_iterator *iter = s->private;
1449	struct trace_array *tr = iter->tr;
1450
1451	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1452		return;
1453
1454	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1455		/* print nothing if the buffers are empty */
1456		if (trace_empty(iter))
1457			return;
1458
1459		print_trace_header(s, iter);
1460	}
1461
1462	__print_graph_headers_flags(tr, s, flags);
1463}
1464
1465void graph_trace_open(struct trace_iterator *iter)
1466{
1467	/* pid and depth on the last trace processed */
1468	struct fgraph_data *data;
1469	gfp_t gfpflags;
1470	int cpu;
1471
1472	iter->private = NULL;
1473
1474	/* We can be called in atomic context via ftrace_dump() */
1475	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1476
1477	data = kzalloc(sizeof(*data), gfpflags);
1478	if (!data)
1479		goto out_err;
1480
1481	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1482	if (!data->cpu_data)
1483		goto out_err_free;
1484
1485	for_each_possible_cpu(cpu) {
1486		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1487		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1488		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1489		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1490
1491		*pid = -1;
1492		*depth = 0;
1493		*ignore = 0;
1494		*depth_irq = -1;
1495	}
1496
1497	iter->private = data;
1498
1499	return;
1500
1501 out_err_free:
1502	kfree(data);
1503 out_err:
1504	pr_warn("function graph tracer: not enough memory\n");
1505}
1506
1507void graph_trace_close(struct trace_iterator *iter)
1508{
1509	struct fgraph_data *data = iter->private;
1510
1511	if (data) {
1512		free_percpu(data->cpu_data);
1513		kfree(data);
1514	}
1515}
1516
1517static int
1518func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1519{
1520	if (bit == TRACE_GRAPH_PRINT_IRQS)
1521		ftrace_graph_skip_irqs = !set;
1522
1523	if (bit == TRACE_GRAPH_SLEEP_TIME)
1524		ftrace_graph_sleep_time_control(set);
1525
1526	if (bit == TRACE_GRAPH_GRAPH_TIME)
1527		ftrace_graph_graph_time_control(set);
1528
1529	return 0;
1530}
1531
1532static struct trace_event_functions graph_functions = {
1533	.trace		= print_graph_function_event,
1534};
1535
1536static struct trace_event graph_trace_entry_event = {
1537	.type		= TRACE_GRAPH_ENT,
1538	.funcs		= &graph_functions,
1539};
1540
1541#ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1542static struct trace_event graph_trace_retaddr_entry_event = {
1543	.type		= TRACE_GRAPH_RETADDR_ENT,
1544	.funcs		= &graph_functions,
1545};
1546#endif
1547
1548static struct trace_event graph_trace_ret_event = {
1549	.type		= TRACE_GRAPH_RET,
1550	.funcs		= &graph_functions
1551};
1552
1553static struct tracer graph_trace __tracer_data = {
1554	.name		= "function_graph",
1555	.update_thresh	= graph_trace_update_thresh,
1556	.open		= graph_trace_open,
1557	.pipe_open	= graph_trace_open,
1558	.close		= graph_trace_close,
1559	.pipe_close	= graph_trace_close,
1560	.init		= graph_trace_init,
1561	.reset		= graph_trace_reset,
1562	.print_line	= print_graph_function,
1563	.print_header	= print_graph_headers,
1564	.flags		= &tracer_flags,
1565	.set_flag	= func_graph_set_flag,
1566	.allow_instances = true,
1567#ifdef CONFIG_FTRACE_SELFTEST
1568	.selftest	= trace_selftest_startup_function_graph,
1569#endif
1570};
1571
1572
1573static ssize_t
1574graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1575		  loff_t *ppos)
1576{
1577	unsigned long val;
1578	int ret;
1579
1580	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1581	if (ret)
1582		return ret;
1583
1584	fgraph_max_depth = val;
1585
1586	*ppos += cnt;
1587
1588	return cnt;
1589}
1590
1591static ssize_t
1592graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1593		 loff_t *ppos)
1594{
1595	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1596	int n;
1597
1598	n = sprintf(buf, "%d\n", fgraph_max_depth);
1599
1600	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1601}
1602
1603static const struct file_operations graph_depth_fops = {
1604	.open		= tracing_open_generic,
1605	.write		= graph_depth_write,
1606	.read		= graph_depth_read,
1607	.llseek		= generic_file_llseek,
1608};
1609
1610static __init int init_graph_tracefs(void)
1611{
1612	int ret;
1613
1614	ret = tracing_init_dentry();
1615	if (ret)
1616		return 0;
1617
1618	trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1619			  NULL, &graph_depth_fops);
1620
1621	return 0;
1622}
1623fs_initcall(init_graph_tracefs);
1624
1625static __init int init_graph_trace(void)
1626{
1627	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1628
1629	if (!register_trace_event(&graph_trace_entry_event)) {
1630		pr_warn("Warning: could not register graph trace events\n");
1631		return 1;
1632	}
1633
1634#ifdef CONFIG_FUNCTION_GRAPH_RETADDR
1635	if (!register_trace_event(&graph_trace_retaddr_entry_event)) {
1636		pr_warn("Warning: could not register graph trace retaddr events\n");
1637		return 1;
1638	}
1639#endif
1640
1641	if (!register_trace_event(&graph_trace_ret_event)) {
1642		pr_warn("Warning: could not register graph trace events\n");
1643		return 1;
1644	}
1645
1646	return register_tracer(&graph_trace);
1647}
1648
1649core_initcall(init_graph_trace);