Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Function graph tracer.
   5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   6 * Mostly borrowed from function tracer which
   7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
   8 *
   9 */
  10#include <linux/uaccess.h>
  11#include <linux/ftrace.h>
  12#include <linux/interrupt.h>
  13#include <linux/slab.h>
  14#include <linux/fs.h>
  15
  16#include "trace.h"
  17#include "trace_output.h"
  18
  19static bool kill_ftrace_graph;
  20
  21/**
  22 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  23 *
  24 * ftrace_graph_stop() is called when a severe error is detected in
  25 * the function graph tracing. This function is called by the critical
  26 * paths of function graph to keep those paths from doing any more harm.
  27 */
  28bool ftrace_graph_is_dead(void)
  29{
  30	return kill_ftrace_graph;
  31}
  32
  33/**
  34 * ftrace_graph_stop - set to permanently disable function graph tracincg
  35 *
  36 * In case of an error int function graph tracing, this is called
  37 * to try to keep function graph tracing from causing any more harm.
  38 * Usually this is pretty severe and this is called to try to at least
  39 * get a warning out to the user.
  40 */
  41void ftrace_graph_stop(void)
  42{
  43	kill_ftrace_graph = true;
  44}
  45
  46/* When set, irq functions will be ignored */
  47static int ftrace_graph_skip_irqs;
  48
  49struct fgraph_cpu_data {
  50	pid_t		last_pid;
  51	int		depth;
  52	int		depth_irq;
  53	int		ignore;
  54	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
  55};
  56
  57struct fgraph_data {
  58	struct fgraph_cpu_data __percpu *cpu_data;
  59
  60	/* Place to preserve last processed entry. */
  61	struct ftrace_graph_ent_entry	ent;
  62	struct ftrace_graph_ret_entry	ret;
  63	int				failed;
  64	int				cpu;
  65};
  66
  67#define TRACE_GRAPH_INDENT	2
  68
  69unsigned int fgraph_max_depth;
  70
  71static struct tracer_opt trace_opts[] = {
  72	/* Display overruns? (for self-debug purpose) */
  73	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  74	/* Display CPU ? */
  75	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  76	/* Display Overhead ? */
  77	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  78	/* Display proc name/pid */
  79	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  80	/* Display duration of execution */
  81	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  82	/* Display absolute time of an entry */
  83	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  84	/* Display interrupts */
  85	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  86	/* Display function name after trailing } */
  87	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  88	/* Include sleep time (scheduled out) between entry and return */
  89	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
 
 
  90	/* Include time within nested functions */
  91	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
 
 
  92	{ } /* Empty entry */
  93};
  94
  95static struct tracer_flags tracer_flags = {
  96	/* Don't display overruns, proc, or tail by default */
  97	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  98	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  99	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
 100	.opts = trace_opts
 101};
 102
 103static struct trace_array *graph_array;
 104
 105/*
 106 * DURATION column is being also used to display IRQ signs,
 107 * following values are used by print_graph_irq and others
 108 * to fill in space into DURATION column.
 109 */
 110enum {
 111	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 112	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 113	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 114};
 115
 116static void
 117print_graph_duration(struct trace_array *tr, unsigned long long duration,
 118		     struct trace_seq *s, u32 flags);
 119
 120/* Add a function return address to the trace stack on thread info.*/
 121int
 122ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
 123			 unsigned long frame_pointer, unsigned long *retp)
 124{
 125	unsigned long long calltime;
 126	int index;
 127
 128	if (unlikely(ftrace_graph_is_dead()))
 129		return -EBUSY;
 130
 131	if (!current->ret_stack)
 132		return -EBUSY;
 133
 134	/*
 135	 * We must make sure the ret_stack is tested before we read
 136	 * anything else.
 137	 */
 138	smp_rmb();
 139
 140	/* The return trace stack is full */
 141	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
 142		atomic_inc(&current->trace_overrun);
 143		return -EBUSY;
 144	}
 145
 146	/*
 147	 * The curr_ret_stack is an index to ftrace return stack of
 148	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
 149	 * DEPTH) when the function graph tracer is used.  To support
 150	 * filtering out specific functions, it makes the index
 151	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
 152	 * so when it sees a negative index the ftrace will ignore
 153	 * the record.  And the index gets recovered when returning
 154	 * from the filtered function by adding the FTRACE_NOTRACE_
 155	 * DEPTH and then it'll continue to record functions normally.
 156	 *
 157	 * The curr_ret_stack is initialized to -1 and get increased
 158	 * in this function.  So it can be less than -1 only if it was
 159	 * filtered out via ftrace_graph_notrace_addr() which can be
 160	 * set from set_graph_notrace file in tracefs by user.
 161	 */
 162	if (current->curr_ret_stack < -1)
 163		return -EBUSY;
 164
 165	calltime = trace_clock_local();
 166
 167	index = ++current->curr_ret_stack;
 168	if (ftrace_graph_notrace_addr(func))
 169		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
 170	barrier();
 171	current->ret_stack[index].ret = ret;
 172	current->ret_stack[index].func = func;
 173	current->ret_stack[index].calltime = calltime;
 174#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 175	current->ret_stack[index].fp = frame_pointer;
 176#endif
 177#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 178	current->ret_stack[index].retp = retp;
 179#endif
 180	*depth = current->curr_ret_stack;
 181
 182	return 0;
 183}
 184
 185/* Retrieve a function return address to the trace stack on thread info.*/
 186static void
 187ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
 188			unsigned long frame_pointer)
 189{
 190	int index;
 191
 192	index = current->curr_ret_stack;
 193
 194	/*
 195	 * A negative index here means that it's just returned from a
 196	 * notrace'd function.  Recover index to get an original
 197	 * return address.  See ftrace_push_return_trace().
 198	 *
 199	 * TODO: Need to check whether the stack gets corrupted.
 200	 */
 201	if (index < 0)
 202		index += FTRACE_NOTRACE_DEPTH;
 203
 204	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
 205		ftrace_graph_stop();
 206		WARN_ON(1);
 207		/* Might as well panic, otherwise we have no where to go */
 208		*ret = (unsigned long)panic;
 209		return;
 210	}
 211
 212#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 213	/*
 214	 * The arch may choose to record the frame pointer used
 215	 * and check it here to make sure that it is what we expect it
 216	 * to be. If gcc does not set the place holder of the return
 217	 * address in the frame pointer, and does a copy instead, then
 218	 * the function graph trace will fail. This test detects this
 219	 * case.
 220	 *
 221	 * Currently, x86_32 with optimize for size (-Os) makes the latest
 222	 * gcc do the above.
 223	 *
 224	 * Note, -mfentry does not use frame pointers, and this test
 225	 *  is not needed if CC_USING_FENTRY is set.
 226	 */
 227	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
 228		ftrace_graph_stop();
 229		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
 230		     "  from func %ps return to %lx\n",
 231		     current->ret_stack[index].fp,
 232		     frame_pointer,
 233		     (void *)current->ret_stack[index].func,
 234		     current->ret_stack[index].ret);
 235		*ret = (unsigned long)panic;
 236		return;
 237	}
 238#endif
 239
 240	*ret = current->ret_stack[index].ret;
 241	trace->func = current->ret_stack[index].func;
 242	trace->calltime = current->ret_stack[index].calltime;
 243	trace->overrun = atomic_read(&current->trace_overrun);
 244	trace->depth = index;
 245}
 246
 247/*
 248 * Send the trace to the ring-buffer.
 249 * @return the original return address.
 250 */
 251unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
 252{
 253	struct ftrace_graph_ret trace;
 254	unsigned long ret;
 255
 256	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
 257	trace.rettime = trace_clock_local();
 258	barrier();
 259	current->curr_ret_stack--;
 260	/*
 261	 * The curr_ret_stack can be less than -1 only if it was
 262	 * filtered out and it's about to return from the function.
 263	 * Recover the index and continue to trace normal functions.
 264	 */
 265	if (current->curr_ret_stack < -1) {
 266		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
 267		return ret;
 268	}
 269
 270	/*
 271	 * The trace should run after decrementing the ret counter
 272	 * in case an interrupt were to come in. We don't want to
 273	 * lose the interrupt if max_depth is set.
 274	 */
 275	ftrace_graph_return(&trace);
 276
 277	if (unlikely(!ret)) {
 278		ftrace_graph_stop();
 279		WARN_ON(1);
 280		/* Might as well panic. What else to do? */
 281		ret = (unsigned long)panic;
 282	}
 283
 284	return ret;
 285}
 286
 287/**
 288 * ftrace_graph_ret_addr - convert a potentially modified stack return address
 289 *			   to its original value
 290 *
 291 * This function can be called by stack unwinding code to convert a found stack
 292 * return address ('ret') to its original value, in case the function graph
 293 * tracer has modified it to be 'return_to_handler'.  If the address hasn't
 294 * been modified, the unchanged value of 'ret' is returned.
 295 *
 296 * 'idx' is a state variable which should be initialized by the caller to zero
 297 * before the first call.
 298 *
 299 * 'retp' is a pointer to the return address on the stack.  It's ignored if
 300 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
 301 */
 302#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 303unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 304				    unsigned long ret, unsigned long *retp)
 305{
 306	int index = task->curr_ret_stack;
 307	int i;
 308
 309	if (ret != (unsigned long)return_to_handler)
 310		return ret;
 311
 312	if (index < -1)
 313		index += FTRACE_NOTRACE_DEPTH;
 314
 315	if (index < 0)
 316		return ret;
 317
 318	for (i = 0; i <= index; i++)
 319		if (task->ret_stack[i].retp == retp)
 320			return task->ret_stack[i].ret;
 321
 322	return ret;
 323}
 324#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
 325unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 326				    unsigned long ret, unsigned long *retp)
 327{
 328	int task_idx;
 329
 330	if (ret != (unsigned long)return_to_handler)
 331		return ret;
 332
 333	task_idx = task->curr_ret_stack;
 334
 335	if (!task->ret_stack || task_idx < *idx)
 336		return ret;
 337
 338	task_idx -= *idx;
 339	(*idx)++;
 340
 341	return task->ret_stack[task_idx].ret;
 342}
 343#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
 344
 345int __trace_graph_entry(struct trace_array *tr,
 346				struct ftrace_graph_ent *trace,
 347				unsigned long flags,
 348				int pc)
 349{
 350	struct trace_event_call *call = &event_funcgraph_entry;
 351	struct ring_buffer_event *event;
 352	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 353	struct ftrace_graph_ent_entry *entry;
 354
 355	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
 356					  sizeof(*entry), flags, pc);
 357	if (!event)
 358		return 0;
 359	entry	= ring_buffer_event_data(event);
 360	entry->graph_ent			= *trace;
 361	if (!call_filter_check_discard(call, entry, buffer, event))
 362		trace_buffer_unlock_commit_nostack(buffer, event);
 363
 364	return 1;
 365}
 366
 367static inline int ftrace_graph_ignore_irqs(void)
 368{
 369	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
 370		return 0;
 371
 372	return in_irq();
 373}
 374
 375int trace_graph_entry(struct ftrace_graph_ent *trace)
 376{
 377	struct trace_array *tr = graph_array;
 378	struct trace_array_cpu *data;
 379	unsigned long flags;
 
 380	long disabled;
 381	int ret;
 382	int cpu;
 383	int pc;
 384
 385	if (!ftrace_trace_task(tr))
 386		return 0;
 387
 388	if (ftrace_graph_ignore_func(trace))
 389		return 0;
 390
 391	if (ftrace_graph_ignore_irqs())
 392		return 0;
 393
 394	/*
 395	 * Do not trace a function if it's filtered by set_graph_notrace.
 396	 * Make the index of ret stack negative to indicate that it should
 397	 * ignore further functions.  But it needs its own ret stack entry
 398	 * to recover the original index in order to continue tracing after
 399	 * returning from the function.
 400	 */
 401	if (ftrace_graph_notrace_addr(trace->func))
 
 
 
 
 
 402		return 1;
 
 
 
 
 
 
 
 
 
 
 403
 404	/*
 405	 * Stop here if tracing_threshold is set. We only write function return
 406	 * events to the ring buffer.
 407	 */
 408	if (tracing_thresh)
 409		return 1;
 410
 411	local_irq_save(flags);
 412	cpu = raw_smp_processor_id();
 413	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 414	disabled = atomic_inc_return(&data->disabled);
 415	if (likely(disabled == 1)) {
 416		pc = preempt_count();
 417		ret = __trace_graph_entry(tr, trace, flags, pc);
 418	} else {
 419		ret = 0;
 420	}
 421
 422	atomic_dec(&data->disabled);
 423	local_irq_restore(flags);
 424
 425	return ret;
 426}
 427
 428static void
 429__trace_graph_function(struct trace_array *tr,
 430		unsigned long ip, unsigned long flags, int pc)
 431{
 432	u64 time = trace_clock_local();
 433	struct ftrace_graph_ent ent = {
 434		.func  = ip,
 435		.depth = 0,
 436	};
 437	struct ftrace_graph_ret ret = {
 438		.func     = ip,
 439		.depth    = 0,
 440		.calltime = time,
 441		.rettime  = time,
 442	};
 443
 444	__trace_graph_entry(tr, &ent, flags, pc);
 445	__trace_graph_return(tr, &ret, flags, pc);
 446}
 447
 448void
 449trace_graph_function(struct trace_array *tr,
 450		unsigned long ip, unsigned long parent_ip,
 451		unsigned long flags, int pc)
 452{
 453	__trace_graph_function(tr, ip, flags, pc);
 454}
 455
 456void __trace_graph_return(struct trace_array *tr,
 457				struct ftrace_graph_ret *trace,
 458				unsigned long flags,
 459				int pc)
 460{
 461	struct trace_event_call *call = &event_funcgraph_exit;
 462	struct ring_buffer_event *event;
 463	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 464	struct ftrace_graph_ret_entry *entry;
 465
 466	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
 467					  sizeof(*entry), flags, pc);
 468	if (!event)
 469		return;
 470	entry	= ring_buffer_event_data(event);
 471	entry->ret				= *trace;
 472	if (!call_filter_check_discard(call, entry, buffer, event))
 473		trace_buffer_unlock_commit_nostack(buffer, event);
 474}
 475
 476void trace_graph_return(struct ftrace_graph_ret *trace)
 477{
 478	struct trace_array *tr = graph_array;
 479	struct trace_array_cpu *data;
 480	unsigned long flags;
 
 481	long disabled;
 482	int cpu;
 483	int pc;
 
 
 
 
 
 
 484
 485	local_irq_save(flags);
 486	cpu = raw_smp_processor_id();
 487	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 488	disabled = atomic_inc_return(&data->disabled);
 489	if (likely(disabled == 1)) {
 490		pc = preempt_count();
 491		__trace_graph_return(tr, trace, flags, pc);
 492	}
 493	atomic_dec(&data->disabled);
 494	local_irq_restore(flags);
 495}
 496
 497void set_graph_array(struct trace_array *tr)
 498{
 499	graph_array = tr;
 500
 501	/* Make graph_array visible before we start tracing */
 502
 503	smp_mb();
 504}
 505
 506static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 507{
 
 
 
 
 
 
 
 508	if (tracing_thresh &&
 509	    (trace->rettime - trace->calltime < tracing_thresh))
 510		return;
 511	else
 512		trace_graph_return(trace);
 513}
 514
 
 
 
 
 
 
 
 
 
 
 515static int graph_trace_init(struct trace_array *tr)
 516{
 517	int ret;
 518
 519	set_graph_array(tr);
 520	if (tracing_thresh)
 521		ret = register_ftrace_graph(&trace_graph_thresh_return,
 522					    &trace_graph_entry);
 523	else
 524		ret = register_ftrace_graph(&trace_graph_return,
 525					    &trace_graph_entry);
 526	if (ret)
 527		return ret;
 528	tracing_start_cmdline_record();
 529
 530	return 0;
 531}
 532
 533static void graph_trace_reset(struct trace_array *tr)
 534{
 535	tracing_stop_cmdline_record();
 536	unregister_ftrace_graph();
 
 
 
 537}
 538
 539static int graph_trace_update_thresh(struct trace_array *tr)
 540{
 541	graph_trace_reset(tr);
 542	return graph_trace_init(tr);
 543}
 544
 545static int max_bytes_for_cpu;
 546
 547static void print_graph_cpu(struct trace_seq *s, int cpu)
 548{
 549	/*
 550	 * Start with a space character - to make it stand out
 551	 * to the right a bit when trace output is pasted into
 552	 * email:
 553	 */
 554	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
 555}
 556
 557#define TRACE_GRAPH_PROCINFO_LENGTH	14
 558
 559static void print_graph_proc(struct trace_seq *s, pid_t pid)
 560{
 561	char comm[TASK_COMM_LEN];
 562	/* sign + log10(MAX_INT) + '\0' */
 563	char pid_str[11];
 564	int spaces = 0;
 565	int len;
 566	int i;
 567
 568	trace_find_cmdline(pid, comm);
 569	comm[7] = '\0';
 570	sprintf(pid_str, "%d", pid);
 571
 572	/* 1 stands for the "-" character */
 573	len = strlen(comm) + strlen(pid_str) + 1;
 574
 575	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
 576		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
 577
 578	/* First spaces to align center */
 579	for (i = 0; i < spaces / 2; i++)
 580		trace_seq_putc(s, ' ');
 581
 582	trace_seq_printf(s, "%s-%s", comm, pid_str);
 583
 584	/* Last spaces to align center */
 585	for (i = 0; i < spaces - (spaces / 2); i++)
 586		trace_seq_putc(s, ' ');
 587}
 588
 589
 590static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
 591{
 592	trace_seq_putc(s, ' ');
 593	trace_print_lat_fmt(s, entry);
 
 594}
 595
 596/* If the pid changed since the last trace, output this event */
 597static void
 598verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
 599{
 600	pid_t prev_pid;
 601	pid_t *last_pid;
 602
 603	if (!data)
 604		return;
 605
 606	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
 607
 608	if (*last_pid == pid)
 609		return;
 610
 611	prev_pid = *last_pid;
 612	*last_pid = pid;
 613
 614	if (prev_pid == -1)
 615		return;
 616/*
 617 * Context-switch trace line:
 618
 619 ------------------------------------------
 620 | 1)  migration/0--1  =>  sshd-1755
 621 ------------------------------------------
 622
 623 */
 624	trace_seq_puts(s, " ------------------------------------------\n");
 625	print_graph_cpu(s, cpu);
 626	print_graph_proc(s, prev_pid);
 627	trace_seq_puts(s, " => ");
 628	print_graph_proc(s, pid);
 629	trace_seq_puts(s, "\n ------------------------------------------\n\n");
 630}
 631
 632static struct ftrace_graph_ret_entry *
 633get_return_for_leaf(struct trace_iterator *iter,
 634		struct ftrace_graph_ent_entry *curr)
 635{
 636	struct fgraph_data *data = iter->private;
 637	struct ring_buffer_iter *ring_iter = NULL;
 638	struct ring_buffer_event *event;
 639	struct ftrace_graph_ret_entry *next;
 640
 641	/*
 642	 * If the previous output failed to write to the seq buffer,
 643	 * then we just reuse the data from before.
 644	 */
 645	if (data && data->failed) {
 646		curr = &data->ent;
 647		next = &data->ret;
 648	} else {
 649
 650		ring_iter = trace_buffer_iter(iter, iter->cpu);
 651
 652		/* First peek to compare current entry and the next one */
 653		if (ring_iter)
 654			event = ring_buffer_iter_peek(ring_iter, NULL);
 655		else {
 656			/*
 657			 * We need to consume the current entry to see
 658			 * the next one.
 659			 */
 660			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
 661					    NULL, NULL);
 662			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
 663						 NULL, NULL);
 664		}
 665
 666		if (!event)
 667			return NULL;
 668
 669		next = ring_buffer_event_data(event);
 670
 671		if (data) {
 672			/*
 673			 * Save current and next entries for later reference
 674			 * if the output fails.
 675			 */
 676			data->ent = *curr;
 677			/*
 678			 * If the next event is not a return type, then
 679			 * we only care about what type it is. Otherwise we can
 680			 * safely copy the entire event.
 681			 */
 682			if (next->ent.type == TRACE_GRAPH_RET)
 683				data->ret = *next;
 684			else
 685				data->ret.ent.type = next->ent.type;
 686		}
 687	}
 688
 689	if (next->ent.type != TRACE_GRAPH_RET)
 690		return NULL;
 691
 692	if (curr->ent.pid != next->ent.pid ||
 693			curr->graph_ent.func != next->ret.func)
 694		return NULL;
 695
 696	/* this is a leaf, now advance the iterator */
 697	if (ring_iter)
 698		ring_buffer_read(ring_iter, NULL);
 699
 700	return next;
 701}
 702
 703static void print_graph_abs_time(u64 t, struct trace_seq *s)
 704{
 705	unsigned long usecs_rem;
 706
 707	usecs_rem = do_div(t, NSEC_PER_SEC);
 708	usecs_rem /= 1000;
 709
 710	trace_seq_printf(s, "%5lu.%06lu |  ",
 711			 (unsigned long)t, usecs_rem);
 712}
 713
 714static void
 
 
 
 
 
 
 
 
 
 
 
 715print_graph_irq(struct trace_iterator *iter, unsigned long addr,
 716		enum trace_type type, int cpu, pid_t pid, u32 flags)
 717{
 718	struct trace_array *tr = iter->tr;
 719	struct trace_seq *s = &iter->seq;
 720	struct trace_entry *ent = iter->ent;
 721
 722	if (addr < (unsigned long)__irqentry_text_start ||
 723		addr >= (unsigned long)__irqentry_text_end)
 724		return;
 725
 726	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 727		/* Absolute time */
 728		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 729			print_graph_abs_time(iter->ts, s);
 730
 
 
 
 
 731		/* Cpu */
 732		if (flags & TRACE_GRAPH_PRINT_CPU)
 733			print_graph_cpu(s, cpu);
 734
 735		/* Proc */
 736		if (flags & TRACE_GRAPH_PRINT_PROC) {
 737			print_graph_proc(s, pid);
 738			trace_seq_puts(s, " | ");
 739		}
 740
 741		/* Latency format */
 742		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 743			print_graph_lat_fmt(s, ent);
 744	}
 745
 746	/* No overhead */
 747	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
 748
 749	if (type == TRACE_GRAPH_ENT)
 750		trace_seq_puts(s, "==========>");
 751	else
 752		trace_seq_puts(s, "<==========");
 753
 754	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
 755	trace_seq_putc(s, '\n');
 756}
 757
 758void
 759trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 760{
 761	unsigned long nsecs_rem = do_div(duration, 1000);
 762	/* log10(ULONG_MAX) + '\0' */
 763	char usecs_str[21];
 764	char nsecs_str[5];
 765	int len;
 766	int i;
 767
 768	sprintf(usecs_str, "%lu", (unsigned long) duration);
 769
 770	/* Print msecs */
 771	trace_seq_printf(s, "%s", usecs_str);
 772
 773	len = strlen(usecs_str);
 774
 775	/* Print nsecs (we don't want to exceed 7 numbers) */
 776	if (len < 7) {
 777		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
 778
 779		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
 780		trace_seq_printf(s, ".%s", nsecs_str);
 781		len += strlen(nsecs_str) + 1;
 782	}
 783
 784	trace_seq_puts(s, " us ");
 785
 786	/* Print remaining spaces to fit the row's width */
 787	for (i = len; i < 8; i++)
 788		trace_seq_putc(s, ' ');
 789}
 790
 791static void
 792print_graph_duration(struct trace_array *tr, unsigned long long duration,
 793		     struct trace_seq *s, u32 flags)
 794{
 795	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
 796	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 797		return;
 798
 799	/* No real adata, just filling the column with spaces */
 800	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
 801	case FLAGS_FILL_FULL:
 802		trace_seq_puts(s, "              |  ");
 803		return;
 804	case FLAGS_FILL_START:
 805		trace_seq_puts(s, "  ");
 806		return;
 807	case FLAGS_FILL_END:
 808		trace_seq_puts(s, " |");
 809		return;
 810	}
 811
 812	/* Signal a overhead of time execution to the output */
 813	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
 814		trace_seq_printf(s, "%c ", trace_find_mark(duration));
 815	else
 816		trace_seq_puts(s, "  ");
 817
 818	trace_print_graph_duration(duration, s);
 819	trace_seq_puts(s, "|  ");
 820}
 821
 822/* Case of a leaf function on its call entry */
 823static enum print_line_t
 824print_graph_entry_leaf(struct trace_iterator *iter,
 825		struct ftrace_graph_ent_entry *entry,
 826		struct ftrace_graph_ret_entry *ret_entry,
 827		struct trace_seq *s, u32 flags)
 828{
 829	struct fgraph_data *data = iter->private;
 830	struct trace_array *tr = iter->tr;
 831	struct ftrace_graph_ret *graph_ret;
 832	struct ftrace_graph_ent *call;
 833	unsigned long long duration;
 
 834	int i;
 835
 836	graph_ret = &ret_entry->ret;
 837	call = &entry->graph_ent;
 838	duration = graph_ret->rettime - graph_ret->calltime;
 839
 840	if (data) {
 841		struct fgraph_cpu_data *cpu_data;
 842		int cpu = iter->cpu;
 843
 844		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 845
 846		/* If a graph tracer ignored set_graph_notrace */
 847		if (call->depth < -1)
 848			call->depth += FTRACE_NOTRACE_DEPTH;
 849
 850		/*
 851		 * Comments display at + 1 to depth. Since
 852		 * this is a leaf function, keep the comments
 853		 * equal to this depth.
 854		 */
 855		cpu_data->depth = call->depth - 1;
 856
 857		/* No need to keep this function around for this depth */
 858		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 859		    !WARN_ON_ONCE(call->depth < 0))
 860			cpu_data->enter_funcs[call->depth] = 0;
 861	}
 862
 863	/* Overhead and duration */
 864	print_graph_duration(tr, duration, s, flags);
 865
 866	/* Function */
 867	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 868		trace_seq_putc(s, ' ');
 869
 870	trace_seq_printf(s, "%ps();\n", (void *)call->func);
 871
 
 
 
 872	return trace_handle_return(s);
 873}
 874
 875static enum print_line_t
 876print_graph_entry_nested(struct trace_iterator *iter,
 877			 struct ftrace_graph_ent_entry *entry,
 878			 struct trace_seq *s, int cpu, u32 flags)
 879{
 880	struct ftrace_graph_ent *call = &entry->graph_ent;
 881	struct fgraph_data *data = iter->private;
 882	struct trace_array *tr = iter->tr;
 883	int i;
 884
 885	if (data) {
 886		struct fgraph_cpu_data *cpu_data;
 887		int cpu = iter->cpu;
 888
 889		/* If a graph tracer ignored set_graph_notrace */
 890		if (call->depth < -1)
 891			call->depth += FTRACE_NOTRACE_DEPTH;
 892
 893		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 894		cpu_data->depth = call->depth;
 895
 896		/* Save this function pointer to see if the exit matches */
 897		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 898		    !WARN_ON_ONCE(call->depth < 0))
 899			cpu_data->enter_funcs[call->depth] = call->func;
 900	}
 901
 902	/* No time */
 903	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
 904
 905	/* Function */
 906	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 907		trace_seq_putc(s, ' ');
 908
 909	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
 910
 911	if (trace_seq_has_overflowed(s))
 912		return TRACE_TYPE_PARTIAL_LINE;
 913
 914	/*
 915	 * we already consumed the current entry to check the next one
 916	 * and see if this is a leaf.
 917	 */
 918	return TRACE_TYPE_NO_CONSUME;
 919}
 920
 921static void
 922print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
 923		     int type, unsigned long addr, u32 flags)
 924{
 925	struct fgraph_data *data = iter->private;
 926	struct trace_entry *ent = iter->ent;
 927	struct trace_array *tr = iter->tr;
 928	int cpu = iter->cpu;
 929
 930	/* Pid */
 931	verif_pid(s, ent->pid, cpu, data);
 932
 933	if (type)
 934		/* Interrupt */
 935		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
 936
 937	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 938		return;
 939
 940	/* Absolute time */
 941	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 942		print_graph_abs_time(iter->ts, s);
 943
 
 
 
 
 944	/* Cpu */
 945	if (flags & TRACE_GRAPH_PRINT_CPU)
 946		print_graph_cpu(s, cpu);
 947
 948	/* Proc */
 949	if (flags & TRACE_GRAPH_PRINT_PROC) {
 950		print_graph_proc(s, ent->pid);
 951		trace_seq_puts(s, " | ");
 952	}
 953
 954	/* Latency format */
 955	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 956		print_graph_lat_fmt(s, ent);
 957
 958	return;
 959}
 960
 961/*
 962 * Entry check for irq code
 963 *
 964 * returns 1 if
 965 *  - we are inside irq code
 966 *  - we just entered irq code
 967 *
 968 * retunns 0 if
 969 *  - funcgraph-interrupts option is set
 970 *  - we are not inside irq code
 971 */
 972static int
 973check_irq_entry(struct trace_iterator *iter, u32 flags,
 974		unsigned long addr, int depth)
 975{
 976	int cpu = iter->cpu;
 977	int *depth_irq;
 978	struct fgraph_data *data = iter->private;
 979
 980	/*
 981	 * If we are either displaying irqs, or we got called as
 982	 * a graph event and private data does not exist,
 983	 * then we bypass the irq check.
 984	 */
 985	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 986	    (!data))
 987		return 0;
 988
 989	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 990
 991	/*
 992	 * We are inside the irq code
 993	 */
 994	if (*depth_irq >= 0)
 995		return 1;
 996
 997	if ((addr < (unsigned long)__irqentry_text_start) ||
 998	    (addr >= (unsigned long)__irqentry_text_end))
 999		return 0;
1000
1001	/*
1002	 * We are entering irq code.
1003	 */
1004	*depth_irq = depth;
1005	return 1;
1006}
1007
1008/*
1009 * Return check for irq code
1010 *
1011 * returns 1 if
1012 *  - we are inside irq code
1013 *  - we just left irq code
1014 *
1015 * returns 0 if
1016 *  - funcgraph-interrupts option is set
1017 *  - we are not inside irq code
1018 */
1019static int
1020check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1021{
1022	int cpu = iter->cpu;
1023	int *depth_irq;
1024	struct fgraph_data *data = iter->private;
1025
1026	/*
1027	 * If we are either displaying irqs, or we got called as
1028	 * a graph event and private data does not exist,
1029	 * then we bypass the irq check.
1030	 */
1031	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1032	    (!data))
1033		return 0;
1034
1035	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1036
1037	/*
1038	 * We are not inside the irq code.
1039	 */
1040	if (*depth_irq == -1)
1041		return 0;
1042
1043	/*
1044	 * We are inside the irq code, and this is returning entry.
1045	 * Let's not trace it and clear the entry depth, since
1046	 * we are out of irq code.
1047	 *
1048	 * This condition ensures that we 'leave the irq code' once
1049	 * we are out of the entry depth. Thus protecting us from
1050	 * the RETURN entry loss.
1051	 */
1052	if (*depth_irq >= depth) {
1053		*depth_irq = -1;
1054		return 1;
1055	}
1056
1057	/*
1058	 * We are inside the irq code, and this is not the entry.
1059	 */
1060	return 1;
1061}
1062
1063static enum print_line_t
1064print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1065			struct trace_iterator *iter, u32 flags)
1066{
1067	struct fgraph_data *data = iter->private;
1068	struct ftrace_graph_ent *call = &field->graph_ent;
1069	struct ftrace_graph_ret_entry *leaf_ret;
1070	static enum print_line_t ret;
1071	int cpu = iter->cpu;
1072
1073	if (check_irq_entry(iter, flags, call->func, call->depth))
1074		return TRACE_TYPE_HANDLED;
1075
1076	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1077
1078	leaf_ret = get_return_for_leaf(iter, field);
1079	if (leaf_ret)
1080		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1081	else
1082		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1083
1084	if (data) {
1085		/*
1086		 * If we failed to write our output, then we need to make
1087		 * note of it. Because we already consumed our entry.
1088		 */
1089		if (s->full) {
1090			data->failed = 1;
1091			data->cpu = cpu;
1092		} else
1093			data->failed = 0;
1094	}
1095
1096	return ret;
1097}
1098
1099static enum print_line_t
1100print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1101		   struct trace_entry *ent, struct trace_iterator *iter,
1102		   u32 flags)
1103{
1104	unsigned long long duration = trace->rettime - trace->calltime;
1105	struct fgraph_data *data = iter->private;
1106	struct trace_array *tr = iter->tr;
1107	pid_t pid = ent->pid;
1108	int cpu = iter->cpu;
1109	int func_match = 1;
1110	int i;
1111
1112	if (check_irq_return(iter, flags, trace->depth))
1113		return TRACE_TYPE_HANDLED;
1114
1115	if (data) {
1116		struct fgraph_cpu_data *cpu_data;
1117		int cpu = iter->cpu;
1118
1119		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1120
1121		/*
1122		 * Comments display at + 1 to depth. This is the
1123		 * return from a function, we now want the comments
1124		 * to display at the same level of the bracket.
1125		 */
1126		cpu_data->depth = trace->depth - 1;
1127
1128		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1129		    !WARN_ON_ONCE(trace->depth < 0)) {
1130			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1131				func_match = 0;
1132			cpu_data->enter_funcs[trace->depth] = 0;
1133		}
1134	}
1135
1136	print_graph_prologue(iter, s, 0, 0, flags);
1137
1138	/* Overhead and duration */
1139	print_graph_duration(tr, duration, s, flags);
1140
1141	/* Closing brace */
1142	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1143		trace_seq_putc(s, ' ');
1144
1145	/*
1146	 * If the return function does not have a matching entry,
1147	 * then the entry was lost. Instead of just printing
1148	 * the '}' and letting the user guess what function this
1149	 * belongs to, write out the function name. Always do
1150	 * that if the funcgraph-tail option is enabled.
1151	 */
1152	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1153		trace_seq_puts(s, "}\n");
1154	else
1155		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1156
1157	/* Overrun */
1158	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1159		trace_seq_printf(s, " (Overruns: %lu)\n",
1160				 trace->overrun);
1161
1162	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1163			cpu, pid, flags);
1164
1165	return trace_handle_return(s);
1166}
1167
1168static enum print_line_t
1169print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1170		    struct trace_iterator *iter, u32 flags)
1171{
1172	struct trace_array *tr = iter->tr;
1173	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1174	struct fgraph_data *data = iter->private;
1175	struct trace_event *event;
1176	int depth = 0;
1177	int ret;
1178	int i;
1179
1180	if (data)
1181		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1182
1183	print_graph_prologue(iter, s, 0, 0, flags);
1184
1185	/* No time */
1186	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1187
1188	/* Indentation */
1189	if (depth > 0)
1190		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1191			trace_seq_putc(s, ' ');
1192
1193	/* The comment */
1194	trace_seq_puts(s, "/* ");
1195
1196	switch (iter->ent->type) {
1197	case TRACE_BPUTS:
1198		ret = trace_print_bputs_msg_only(iter);
1199		if (ret != TRACE_TYPE_HANDLED)
1200			return ret;
1201		break;
1202	case TRACE_BPRINT:
1203		ret = trace_print_bprintk_msg_only(iter);
1204		if (ret != TRACE_TYPE_HANDLED)
1205			return ret;
1206		break;
1207	case TRACE_PRINT:
1208		ret = trace_print_printk_msg_only(iter);
1209		if (ret != TRACE_TYPE_HANDLED)
1210			return ret;
1211		break;
1212	default:
1213		event = ftrace_find_event(ent->type);
1214		if (!event)
1215			return TRACE_TYPE_UNHANDLED;
1216
1217		ret = event->funcs->trace(iter, sym_flags, event);
1218		if (ret != TRACE_TYPE_HANDLED)
1219			return ret;
1220	}
1221
1222	if (trace_seq_has_overflowed(s))
1223		goto out;
1224
1225	/* Strip ending newline */
1226	if (s->buffer[s->seq.len - 1] == '\n') {
1227		s->buffer[s->seq.len - 1] = '\0';
1228		s->seq.len--;
1229	}
1230
1231	trace_seq_puts(s, " */\n");
1232 out:
1233	return trace_handle_return(s);
1234}
1235
1236
1237enum print_line_t
1238print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1239{
1240	struct ftrace_graph_ent_entry *field;
1241	struct fgraph_data *data = iter->private;
1242	struct trace_entry *entry = iter->ent;
1243	struct trace_seq *s = &iter->seq;
1244	int cpu = iter->cpu;
1245	int ret;
1246
1247	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1248		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1249		return TRACE_TYPE_HANDLED;
1250	}
1251
1252	/*
1253	 * If the last output failed, there's a possibility we need
1254	 * to print out the missing entry which would never go out.
1255	 */
1256	if (data && data->failed) {
1257		field = &data->ent;
1258		iter->cpu = data->cpu;
1259		ret = print_graph_entry(field, s, iter, flags);
1260		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1261			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1262			ret = TRACE_TYPE_NO_CONSUME;
1263		}
1264		iter->cpu = cpu;
1265		return ret;
1266	}
1267
1268	switch (entry->type) {
1269	case TRACE_GRAPH_ENT: {
1270		/*
1271		 * print_graph_entry() may consume the current event,
1272		 * thus @field may become invalid, so we need to save it.
1273		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1274		 * it can be safely saved at the stack.
1275		 */
1276		struct ftrace_graph_ent_entry saved;
1277		trace_assign_type(field, entry);
1278		saved = *field;
1279		return print_graph_entry(&saved, s, iter, flags);
1280	}
1281	case TRACE_GRAPH_RET: {
1282		struct ftrace_graph_ret_entry *field;
1283		trace_assign_type(field, entry);
1284		return print_graph_return(&field->ret, s, entry, iter, flags);
1285	}
1286	case TRACE_STACK:
1287	case TRACE_FN:
1288		/* dont trace stack and functions as comments */
1289		return TRACE_TYPE_UNHANDLED;
1290
1291	default:
1292		return print_graph_comment(s, entry, iter, flags);
1293	}
1294
1295	return TRACE_TYPE_HANDLED;
1296}
1297
1298static enum print_line_t
1299print_graph_function(struct trace_iterator *iter)
1300{
1301	return print_graph_function_flags(iter, tracer_flags.val);
1302}
1303
1304static enum print_line_t
1305print_graph_function_event(struct trace_iterator *iter, int flags,
1306			   struct trace_event *event)
1307{
1308	return print_graph_function(iter);
1309}
1310
1311static void print_lat_header(struct seq_file *s, u32 flags)
1312{
1313	static const char spaces[] = "                "	/* 16 spaces */
1314		"    "					/* 4 spaces */
1315		"                 ";			/* 17 spaces */
1316	int size = 0;
1317
1318	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1319		size += 16;
 
 
1320	if (flags & TRACE_GRAPH_PRINT_CPU)
1321		size += 4;
1322	if (flags & TRACE_GRAPH_PRINT_PROC)
1323		size += 17;
1324
1325	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1326	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1327	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1328	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1329	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1330}
1331
1332static void __print_graph_headers_flags(struct trace_array *tr,
1333					struct seq_file *s, u32 flags)
1334{
1335	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1336
1337	if (lat)
1338		print_lat_header(s, flags);
1339
1340	/* 1st line */
1341	seq_putc(s, '#');
1342	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1343		seq_puts(s, "     TIME       ");
 
 
1344	if (flags & TRACE_GRAPH_PRINT_CPU)
1345		seq_puts(s, " CPU");
1346	if (flags & TRACE_GRAPH_PRINT_PROC)
1347		seq_puts(s, "  TASK/PID       ");
1348	if (lat)
1349		seq_puts(s, "||||");
1350	if (flags & TRACE_GRAPH_PRINT_DURATION)
1351		seq_puts(s, "  DURATION   ");
1352	seq_puts(s, "               FUNCTION CALLS\n");
1353
1354	/* 2nd line */
1355	seq_putc(s, '#');
1356	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1357		seq_puts(s, "      |         ");
 
 
1358	if (flags & TRACE_GRAPH_PRINT_CPU)
1359		seq_puts(s, " |  ");
1360	if (flags & TRACE_GRAPH_PRINT_PROC)
1361		seq_puts(s, "   |    |        ");
1362	if (lat)
1363		seq_puts(s, "||||");
1364	if (flags & TRACE_GRAPH_PRINT_DURATION)
1365		seq_puts(s, "   |   |      ");
1366	seq_puts(s, "               |   |   |   |\n");
1367}
1368
1369static void print_graph_headers(struct seq_file *s)
1370{
1371	print_graph_headers_flags(s, tracer_flags.val);
1372}
1373
1374void print_graph_headers_flags(struct seq_file *s, u32 flags)
1375{
1376	struct trace_iterator *iter = s->private;
1377	struct trace_array *tr = iter->tr;
1378
1379	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1380		return;
1381
1382	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1383		/* print nothing if the buffers are empty */
1384		if (trace_empty(iter))
1385			return;
1386
1387		print_trace_header(s, iter);
1388	}
1389
1390	__print_graph_headers_flags(tr, s, flags);
1391}
1392
1393void graph_trace_open(struct trace_iterator *iter)
1394{
1395	/* pid and depth on the last trace processed */
1396	struct fgraph_data *data;
1397	gfp_t gfpflags;
1398	int cpu;
1399
1400	iter->private = NULL;
1401
1402	/* We can be called in atomic context via ftrace_dump() */
1403	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1404
1405	data = kzalloc(sizeof(*data), gfpflags);
1406	if (!data)
1407		goto out_err;
1408
1409	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1410	if (!data->cpu_data)
1411		goto out_err_free;
1412
1413	for_each_possible_cpu(cpu) {
1414		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1415		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1416		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1417		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1418
1419		*pid = -1;
1420		*depth = 0;
1421		*ignore = 0;
1422		*depth_irq = -1;
1423	}
1424
1425	iter->private = data;
1426
1427	return;
1428
1429 out_err_free:
1430	kfree(data);
1431 out_err:
1432	pr_warn("function graph tracer: not enough memory\n");
1433}
1434
1435void graph_trace_close(struct trace_iterator *iter)
1436{
1437	struct fgraph_data *data = iter->private;
1438
1439	if (data) {
1440		free_percpu(data->cpu_data);
1441		kfree(data);
1442	}
1443}
1444
1445static int
1446func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1447{
1448	if (bit == TRACE_GRAPH_PRINT_IRQS)
1449		ftrace_graph_skip_irqs = !set;
1450
1451	if (bit == TRACE_GRAPH_SLEEP_TIME)
1452		ftrace_graph_sleep_time_control(set);
1453
1454	if (bit == TRACE_GRAPH_GRAPH_TIME)
1455		ftrace_graph_graph_time_control(set);
1456
1457	return 0;
1458}
1459
1460static struct trace_event_functions graph_functions = {
1461	.trace		= print_graph_function_event,
1462};
1463
1464static struct trace_event graph_trace_entry_event = {
1465	.type		= TRACE_GRAPH_ENT,
1466	.funcs		= &graph_functions,
1467};
1468
1469static struct trace_event graph_trace_ret_event = {
1470	.type		= TRACE_GRAPH_RET,
1471	.funcs		= &graph_functions
1472};
1473
1474static struct tracer graph_trace __tracer_data = {
1475	.name		= "function_graph",
1476	.update_thresh	= graph_trace_update_thresh,
1477	.open		= graph_trace_open,
1478	.pipe_open	= graph_trace_open,
1479	.close		= graph_trace_close,
1480	.pipe_close	= graph_trace_close,
1481	.init		= graph_trace_init,
1482	.reset		= graph_trace_reset,
1483	.print_line	= print_graph_function,
1484	.print_header	= print_graph_headers,
1485	.flags		= &tracer_flags,
1486	.set_flag	= func_graph_set_flag,
1487#ifdef CONFIG_FTRACE_SELFTEST
1488	.selftest	= trace_selftest_startup_function_graph,
1489#endif
1490};
1491
1492
1493static ssize_t
1494graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1495		  loff_t *ppos)
1496{
1497	unsigned long val;
1498	int ret;
1499
1500	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1501	if (ret)
1502		return ret;
1503
1504	fgraph_max_depth = val;
1505
1506	*ppos += cnt;
1507
1508	return cnt;
1509}
1510
1511static ssize_t
1512graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1513		 loff_t *ppos)
1514{
1515	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1516	int n;
1517
1518	n = sprintf(buf, "%d\n", fgraph_max_depth);
1519
1520	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1521}
1522
1523static const struct file_operations graph_depth_fops = {
1524	.open		= tracing_open_generic,
1525	.write		= graph_depth_write,
1526	.read		= graph_depth_read,
1527	.llseek		= generic_file_llseek,
1528};
1529
1530static __init int init_graph_tracefs(void)
1531{
1532	struct dentry *d_tracer;
1533
1534	d_tracer = tracing_init_dentry();
1535	if (IS_ERR(d_tracer))
1536		return 0;
1537
1538	trace_create_file("max_graph_depth", 0644, d_tracer,
1539			  NULL, &graph_depth_fops);
1540
1541	return 0;
1542}
1543fs_initcall(init_graph_tracefs);
1544
1545static __init int init_graph_trace(void)
1546{
1547	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1548
1549	if (!register_trace_event(&graph_trace_entry_event)) {
1550		pr_warn("Warning: could not register graph trace events\n");
1551		return 1;
1552	}
1553
1554	if (!register_trace_event(&graph_trace_ret_event)) {
1555		pr_warn("Warning: could not register graph trace events\n");
1556		return 1;
1557	}
1558
1559	return register_tracer(&graph_trace);
1560}
1561
1562core_initcall(init_graph_trace);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Function graph tracer.
   5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   6 * Mostly borrowed from function tracer which
   7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
   8 *
   9 */
  10#include <linux/uaccess.h>
  11#include <linux/ftrace.h>
  12#include <linux/interrupt.h>
  13#include <linux/slab.h>
  14#include <linux/fs.h>
  15
  16#include "trace.h"
  17#include "trace_output.h"
  18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  19/* When set, irq functions will be ignored */
  20static int ftrace_graph_skip_irqs;
  21
  22struct fgraph_cpu_data {
  23	pid_t		last_pid;
  24	int		depth;
  25	int		depth_irq;
  26	int		ignore;
  27	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
  28};
  29
  30struct fgraph_data {
  31	struct fgraph_cpu_data __percpu *cpu_data;
  32
  33	/* Place to preserve last processed entry. */
  34	struct ftrace_graph_ent_entry	ent;
  35	struct ftrace_graph_ret_entry	ret;
  36	int				failed;
  37	int				cpu;
  38};
  39
  40#define TRACE_GRAPH_INDENT	2
  41
  42unsigned int fgraph_max_depth;
  43
  44static struct tracer_opt trace_opts[] = {
  45	/* Display overruns? (for self-debug purpose) */
  46	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  47	/* Display CPU ? */
  48	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  49	/* Display Overhead ? */
  50	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  51	/* Display proc name/pid */
  52	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  53	/* Display duration of execution */
  54	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  55	/* Display absolute time of an entry */
  56	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  57	/* Display interrupts */
  58	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  59	/* Display function name after trailing } */
  60	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  61	/* Include sleep time (scheduled out) between entry and return */
  62	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
  63
  64#ifdef CONFIG_FUNCTION_PROFILER
  65	/* Include time within nested functions */
  66	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
  67#endif
  68
  69	{ } /* Empty entry */
  70};
  71
  72static struct tracer_flags tracer_flags = {
  73	/* Don't display overruns, proc, or tail by default */
  74	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  75	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  76	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  77	.opts = trace_opts
  78};
  79
  80static struct trace_array *graph_array;
  81
  82/*
  83 * DURATION column is being also used to display IRQ signs,
  84 * following values are used by print_graph_irq and others
  85 * to fill in space into DURATION column.
  86 */
  87enum {
  88	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  89	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  90	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  91};
  92
  93static void
  94print_graph_duration(struct trace_array *tr, unsigned long long duration,
  95		     struct trace_seq *s, u32 flags);
  96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  97int __trace_graph_entry(struct trace_array *tr,
  98				struct ftrace_graph_ent *trace,
  99				unsigned int trace_ctx)
 
 100{
 101	struct trace_event_call *call = &event_funcgraph_entry;
 102	struct ring_buffer_event *event;
 103	struct trace_buffer *buffer = tr->array_buffer.buffer;
 104	struct ftrace_graph_ent_entry *entry;
 105
 106	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
 107					  sizeof(*entry), trace_ctx);
 108	if (!event)
 109		return 0;
 110	entry	= ring_buffer_event_data(event);
 111	entry->graph_ent			= *trace;
 112	if (!call_filter_check_discard(call, entry, buffer, event))
 113		trace_buffer_unlock_commit_nostack(buffer, event);
 114
 115	return 1;
 116}
 117
 118static inline int ftrace_graph_ignore_irqs(void)
 119{
 120	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
 121		return 0;
 122
 123	return in_irq();
 124}
 125
 126int trace_graph_entry(struct ftrace_graph_ent *trace)
 127{
 128	struct trace_array *tr = graph_array;
 129	struct trace_array_cpu *data;
 130	unsigned long flags;
 131	unsigned int trace_ctx;
 132	long disabled;
 133	int ret;
 134	int cpu;
 
 
 
 
 
 
 
 135
 136	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
 137		return 0;
 138
 139	/*
 140	 * Do not trace a function if it's filtered by set_graph_notrace.
 141	 * Make the index of ret stack negative to indicate that it should
 142	 * ignore further functions.  But it needs its own ret stack entry
 143	 * to recover the original index in order to continue tracing after
 144	 * returning from the function.
 145	 */
 146	if (ftrace_graph_notrace_addr(trace->func)) {
 147		trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
 148		/*
 149		 * Need to return 1 to have the return called
 150		 * that will clear the NOTRACE bit.
 151		 */
 152		return 1;
 153	}
 154
 155	if (!ftrace_trace_task(tr))
 156		return 0;
 157
 158	if (ftrace_graph_ignore_func(trace))
 159		return 0;
 160
 161	if (ftrace_graph_ignore_irqs())
 162		return 0;
 163
 164	/*
 165	 * Stop here if tracing_threshold is set. We only write function return
 166	 * events to the ring buffer.
 167	 */
 168	if (tracing_thresh)
 169		return 1;
 170
 171	local_irq_save(flags);
 172	cpu = raw_smp_processor_id();
 173	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 174	disabled = atomic_inc_return(&data->disabled);
 175	if (likely(disabled == 1)) {
 176		trace_ctx = tracing_gen_ctx_flags(flags);
 177		ret = __trace_graph_entry(tr, trace, trace_ctx);
 178	} else {
 179		ret = 0;
 180	}
 181
 182	atomic_dec(&data->disabled);
 183	local_irq_restore(flags);
 184
 185	return ret;
 186}
 187
 188static void
 189__trace_graph_function(struct trace_array *tr,
 190		unsigned long ip, unsigned int trace_ctx)
 191{
 192	u64 time = trace_clock_local();
 193	struct ftrace_graph_ent ent = {
 194		.func  = ip,
 195		.depth = 0,
 196	};
 197	struct ftrace_graph_ret ret = {
 198		.func     = ip,
 199		.depth    = 0,
 200		.calltime = time,
 201		.rettime  = time,
 202	};
 203
 204	__trace_graph_entry(tr, &ent, trace_ctx);
 205	__trace_graph_return(tr, &ret, trace_ctx);
 206}
 207
 208void
 209trace_graph_function(struct trace_array *tr,
 210		unsigned long ip, unsigned long parent_ip,
 211		unsigned int trace_ctx)
 212{
 213	__trace_graph_function(tr, ip, trace_ctx);
 214}
 215
 216void __trace_graph_return(struct trace_array *tr,
 217				struct ftrace_graph_ret *trace,
 218				unsigned int trace_ctx)
 
 219{
 220	struct trace_event_call *call = &event_funcgraph_exit;
 221	struct ring_buffer_event *event;
 222	struct trace_buffer *buffer = tr->array_buffer.buffer;
 223	struct ftrace_graph_ret_entry *entry;
 224
 225	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
 226					  sizeof(*entry), trace_ctx);
 227	if (!event)
 228		return;
 229	entry	= ring_buffer_event_data(event);
 230	entry->ret				= *trace;
 231	if (!call_filter_check_discard(call, entry, buffer, event))
 232		trace_buffer_unlock_commit_nostack(buffer, event);
 233}
 234
 235void trace_graph_return(struct ftrace_graph_ret *trace)
 236{
 237	struct trace_array *tr = graph_array;
 238	struct trace_array_cpu *data;
 239	unsigned long flags;
 240	unsigned int trace_ctx;
 241	long disabled;
 242	int cpu;
 243
 244	ftrace_graph_addr_finish(trace);
 245
 246	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
 247		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
 248		return;
 249	}
 250
 251	local_irq_save(flags);
 252	cpu = raw_smp_processor_id();
 253	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 254	disabled = atomic_inc_return(&data->disabled);
 255	if (likely(disabled == 1)) {
 256		trace_ctx = tracing_gen_ctx_flags(flags);
 257		__trace_graph_return(tr, trace, trace_ctx);
 258	}
 259	atomic_dec(&data->disabled);
 260	local_irq_restore(flags);
 261}
 262
 263void set_graph_array(struct trace_array *tr)
 264{
 265	graph_array = tr;
 266
 267	/* Make graph_array visible before we start tracing */
 268
 269	smp_mb();
 270}
 271
 272static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 273{
 274	ftrace_graph_addr_finish(trace);
 275
 276	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
 277		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
 278		return;
 279	}
 280
 281	if (tracing_thresh &&
 282	    (trace->rettime - trace->calltime < tracing_thresh))
 283		return;
 284	else
 285		trace_graph_return(trace);
 286}
 287
 288static struct fgraph_ops funcgraph_thresh_ops = {
 289	.entryfunc = &trace_graph_entry,
 290	.retfunc = &trace_graph_thresh_return,
 291};
 292
 293static struct fgraph_ops funcgraph_ops = {
 294	.entryfunc = &trace_graph_entry,
 295	.retfunc = &trace_graph_return,
 296};
 297
 298static int graph_trace_init(struct trace_array *tr)
 299{
 300	int ret;
 301
 302	set_graph_array(tr);
 303	if (tracing_thresh)
 304		ret = register_ftrace_graph(&funcgraph_thresh_ops);
 
 305	else
 306		ret = register_ftrace_graph(&funcgraph_ops);
 
 307	if (ret)
 308		return ret;
 309	tracing_start_cmdline_record();
 310
 311	return 0;
 312}
 313
 314static void graph_trace_reset(struct trace_array *tr)
 315{
 316	tracing_stop_cmdline_record();
 317	if (tracing_thresh)
 318		unregister_ftrace_graph(&funcgraph_thresh_ops);
 319	else
 320		unregister_ftrace_graph(&funcgraph_ops);
 321}
 322
 323static int graph_trace_update_thresh(struct trace_array *tr)
 324{
 325	graph_trace_reset(tr);
 326	return graph_trace_init(tr);
 327}
 328
 329static int max_bytes_for_cpu;
 330
 331static void print_graph_cpu(struct trace_seq *s, int cpu)
 332{
 333	/*
 334	 * Start with a space character - to make it stand out
 335	 * to the right a bit when trace output is pasted into
 336	 * email:
 337	 */
 338	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
 339}
 340
 341#define TRACE_GRAPH_PROCINFO_LENGTH	14
 342
 343static void print_graph_proc(struct trace_seq *s, pid_t pid)
 344{
 345	char comm[TASK_COMM_LEN];
 346	/* sign + log10(MAX_INT) + '\0' */
 347	char pid_str[11];
 348	int spaces = 0;
 349	int len;
 350	int i;
 351
 352	trace_find_cmdline(pid, comm);
 353	comm[7] = '\0';
 354	sprintf(pid_str, "%d", pid);
 355
 356	/* 1 stands for the "-" character */
 357	len = strlen(comm) + strlen(pid_str) + 1;
 358
 359	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
 360		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
 361
 362	/* First spaces to align center */
 363	for (i = 0; i < spaces / 2; i++)
 364		trace_seq_putc(s, ' ');
 365
 366	trace_seq_printf(s, "%s-%s", comm, pid_str);
 367
 368	/* Last spaces to align center */
 369	for (i = 0; i < spaces - (spaces / 2); i++)
 370		trace_seq_putc(s, ' ');
 371}
 372
 373
 374static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
 375{
 376	trace_seq_putc(s, ' ');
 377	trace_print_lat_fmt(s, entry);
 378	trace_seq_puts(s, " | ");
 379}
 380
 381/* If the pid changed since the last trace, output this event */
 382static void
 383verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
 384{
 385	pid_t prev_pid;
 386	pid_t *last_pid;
 387
 388	if (!data)
 389		return;
 390
 391	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
 392
 393	if (*last_pid == pid)
 394		return;
 395
 396	prev_pid = *last_pid;
 397	*last_pid = pid;
 398
 399	if (prev_pid == -1)
 400		return;
 401/*
 402 * Context-switch trace line:
 403
 404 ------------------------------------------
 405 | 1)  migration/0--1  =>  sshd-1755
 406 ------------------------------------------
 407
 408 */
 409	trace_seq_puts(s, " ------------------------------------------\n");
 410	print_graph_cpu(s, cpu);
 411	print_graph_proc(s, prev_pid);
 412	trace_seq_puts(s, " => ");
 413	print_graph_proc(s, pid);
 414	trace_seq_puts(s, "\n ------------------------------------------\n\n");
 415}
 416
 417static struct ftrace_graph_ret_entry *
 418get_return_for_leaf(struct trace_iterator *iter,
 419		struct ftrace_graph_ent_entry *curr)
 420{
 421	struct fgraph_data *data = iter->private;
 422	struct ring_buffer_iter *ring_iter = NULL;
 423	struct ring_buffer_event *event;
 424	struct ftrace_graph_ret_entry *next;
 425
 426	/*
 427	 * If the previous output failed to write to the seq buffer,
 428	 * then we just reuse the data from before.
 429	 */
 430	if (data && data->failed) {
 431		curr = &data->ent;
 432		next = &data->ret;
 433	} else {
 434
 435		ring_iter = trace_buffer_iter(iter, iter->cpu);
 436
 437		/* First peek to compare current entry and the next one */
 438		if (ring_iter)
 439			event = ring_buffer_iter_peek(ring_iter, NULL);
 440		else {
 441			/*
 442			 * We need to consume the current entry to see
 443			 * the next one.
 444			 */
 445			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
 446					    NULL, NULL);
 447			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
 448						 NULL, NULL);
 449		}
 450
 451		if (!event)
 452			return NULL;
 453
 454		next = ring_buffer_event_data(event);
 455
 456		if (data) {
 457			/*
 458			 * Save current and next entries for later reference
 459			 * if the output fails.
 460			 */
 461			data->ent = *curr;
 462			/*
 463			 * If the next event is not a return type, then
 464			 * we only care about what type it is. Otherwise we can
 465			 * safely copy the entire event.
 466			 */
 467			if (next->ent.type == TRACE_GRAPH_RET)
 468				data->ret = *next;
 469			else
 470				data->ret.ent.type = next->ent.type;
 471		}
 472	}
 473
 474	if (next->ent.type != TRACE_GRAPH_RET)
 475		return NULL;
 476
 477	if (curr->ent.pid != next->ent.pid ||
 478			curr->graph_ent.func != next->ret.func)
 479		return NULL;
 480
 481	/* this is a leaf, now advance the iterator */
 482	if (ring_iter)
 483		ring_buffer_iter_advance(ring_iter);
 484
 485	return next;
 486}
 487
 488static void print_graph_abs_time(u64 t, struct trace_seq *s)
 489{
 490	unsigned long usecs_rem;
 491
 492	usecs_rem = do_div(t, NSEC_PER_SEC);
 493	usecs_rem /= 1000;
 494
 495	trace_seq_printf(s, "%5lu.%06lu |  ",
 496			 (unsigned long)t, usecs_rem);
 497}
 498
 499static void
 500print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
 501{
 502	unsigned long long usecs;
 503
 504	usecs = iter->ts - iter->array_buffer->time_start;
 505	do_div(usecs, NSEC_PER_USEC);
 506
 507	trace_seq_printf(s, "%9llu us |  ", usecs);
 508}
 509
 510static void
 511print_graph_irq(struct trace_iterator *iter, unsigned long addr,
 512		enum trace_type type, int cpu, pid_t pid, u32 flags)
 513{
 514	struct trace_array *tr = iter->tr;
 515	struct trace_seq *s = &iter->seq;
 516	struct trace_entry *ent = iter->ent;
 517
 518	if (addr < (unsigned long)__irqentry_text_start ||
 519		addr >= (unsigned long)__irqentry_text_end)
 520		return;
 521
 522	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 523		/* Absolute time */
 524		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 525			print_graph_abs_time(iter->ts, s);
 526
 527		/* Relative time */
 528		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
 529			print_graph_rel_time(iter, s);
 530
 531		/* Cpu */
 532		if (flags & TRACE_GRAPH_PRINT_CPU)
 533			print_graph_cpu(s, cpu);
 534
 535		/* Proc */
 536		if (flags & TRACE_GRAPH_PRINT_PROC) {
 537			print_graph_proc(s, pid);
 538			trace_seq_puts(s, " | ");
 539		}
 540
 541		/* Latency format */
 542		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 543			print_graph_lat_fmt(s, ent);
 544	}
 545
 546	/* No overhead */
 547	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
 548
 549	if (type == TRACE_GRAPH_ENT)
 550		trace_seq_puts(s, "==========>");
 551	else
 552		trace_seq_puts(s, "<==========");
 553
 554	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
 555	trace_seq_putc(s, '\n');
 556}
 557
 558void
 559trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 560{
 561	unsigned long nsecs_rem = do_div(duration, 1000);
 562	/* log10(ULONG_MAX) + '\0' */
 563	char usecs_str[21];
 564	char nsecs_str[5];
 565	int len;
 566	int i;
 567
 568	sprintf(usecs_str, "%lu", (unsigned long) duration);
 569
 570	/* Print msecs */
 571	trace_seq_printf(s, "%s", usecs_str);
 572
 573	len = strlen(usecs_str);
 574
 575	/* Print nsecs (we don't want to exceed 7 numbers) */
 576	if (len < 7) {
 577		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
 578
 579		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
 580		trace_seq_printf(s, ".%s", nsecs_str);
 581		len += strlen(nsecs_str) + 1;
 582	}
 583
 584	trace_seq_puts(s, " us ");
 585
 586	/* Print remaining spaces to fit the row's width */
 587	for (i = len; i < 8; i++)
 588		trace_seq_putc(s, ' ');
 589}
 590
 591static void
 592print_graph_duration(struct trace_array *tr, unsigned long long duration,
 593		     struct trace_seq *s, u32 flags)
 594{
 595	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
 596	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 597		return;
 598
 599	/* No real adata, just filling the column with spaces */
 600	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
 601	case FLAGS_FILL_FULL:
 602		trace_seq_puts(s, "              |  ");
 603		return;
 604	case FLAGS_FILL_START:
 605		trace_seq_puts(s, "  ");
 606		return;
 607	case FLAGS_FILL_END:
 608		trace_seq_puts(s, " |");
 609		return;
 610	}
 611
 612	/* Signal a overhead of time execution to the output */
 613	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
 614		trace_seq_printf(s, "%c ", trace_find_mark(duration));
 615	else
 616		trace_seq_puts(s, "  ");
 617
 618	trace_print_graph_duration(duration, s);
 619	trace_seq_puts(s, "|  ");
 620}
 621
 622/* Case of a leaf function on its call entry */
 623static enum print_line_t
 624print_graph_entry_leaf(struct trace_iterator *iter,
 625		struct ftrace_graph_ent_entry *entry,
 626		struct ftrace_graph_ret_entry *ret_entry,
 627		struct trace_seq *s, u32 flags)
 628{
 629	struct fgraph_data *data = iter->private;
 630	struct trace_array *tr = iter->tr;
 631	struct ftrace_graph_ret *graph_ret;
 632	struct ftrace_graph_ent *call;
 633	unsigned long long duration;
 634	int cpu = iter->cpu;
 635	int i;
 636
 637	graph_ret = &ret_entry->ret;
 638	call = &entry->graph_ent;
 639	duration = graph_ret->rettime - graph_ret->calltime;
 640
 641	if (data) {
 642		struct fgraph_cpu_data *cpu_data;
 
 643
 644		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 645
 
 
 
 
 646		/*
 647		 * Comments display at + 1 to depth. Since
 648		 * this is a leaf function, keep the comments
 649		 * equal to this depth.
 650		 */
 651		cpu_data->depth = call->depth - 1;
 652
 653		/* No need to keep this function around for this depth */
 654		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 655		    !WARN_ON_ONCE(call->depth < 0))
 656			cpu_data->enter_funcs[call->depth] = 0;
 657	}
 658
 659	/* Overhead and duration */
 660	print_graph_duration(tr, duration, s, flags);
 661
 662	/* Function */
 663	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 664		trace_seq_putc(s, ' ');
 665
 666	trace_seq_printf(s, "%ps();\n", (void *)call->func);
 667
 668	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
 669			cpu, iter->ent->pid, flags);
 670
 671	return trace_handle_return(s);
 672}
 673
 674static enum print_line_t
 675print_graph_entry_nested(struct trace_iterator *iter,
 676			 struct ftrace_graph_ent_entry *entry,
 677			 struct trace_seq *s, int cpu, u32 flags)
 678{
 679	struct ftrace_graph_ent *call = &entry->graph_ent;
 680	struct fgraph_data *data = iter->private;
 681	struct trace_array *tr = iter->tr;
 682	int i;
 683
 684	if (data) {
 685		struct fgraph_cpu_data *cpu_data;
 686		int cpu = iter->cpu;
 687
 
 
 
 
 688		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 689		cpu_data->depth = call->depth;
 690
 691		/* Save this function pointer to see if the exit matches */
 692		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 693		    !WARN_ON_ONCE(call->depth < 0))
 694			cpu_data->enter_funcs[call->depth] = call->func;
 695	}
 696
 697	/* No time */
 698	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
 699
 700	/* Function */
 701	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 702		trace_seq_putc(s, ' ');
 703
 704	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
 705
 706	if (trace_seq_has_overflowed(s))
 707		return TRACE_TYPE_PARTIAL_LINE;
 708
 709	/*
 710	 * we already consumed the current entry to check the next one
 711	 * and see if this is a leaf.
 712	 */
 713	return TRACE_TYPE_NO_CONSUME;
 714}
 715
 716static void
 717print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
 718		     int type, unsigned long addr, u32 flags)
 719{
 720	struct fgraph_data *data = iter->private;
 721	struct trace_entry *ent = iter->ent;
 722	struct trace_array *tr = iter->tr;
 723	int cpu = iter->cpu;
 724
 725	/* Pid */
 726	verif_pid(s, ent->pid, cpu, data);
 727
 728	if (type)
 729		/* Interrupt */
 730		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
 731
 732	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 733		return;
 734
 735	/* Absolute time */
 736	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 737		print_graph_abs_time(iter->ts, s);
 738
 739	/* Relative time */
 740	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
 741		print_graph_rel_time(iter, s);
 742
 743	/* Cpu */
 744	if (flags & TRACE_GRAPH_PRINT_CPU)
 745		print_graph_cpu(s, cpu);
 746
 747	/* Proc */
 748	if (flags & TRACE_GRAPH_PRINT_PROC) {
 749		print_graph_proc(s, ent->pid);
 750		trace_seq_puts(s, " | ");
 751	}
 752
 753	/* Latency format */
 754	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 755		print_graph_lat_fmt(s, ent);
 756
 757	return;
 758}
 759
 760/*
 761 * Entry check for irq code
 762 *
 763 * returns 1 if
 764 *  - we are inside irq code
 765 *  - we just entered irq code
 766 *
 767 * returns 0 if
 768 *  - funcgraph-interrupts option is set
 769 *  - we are not inside irq code
 770 */
 771static int
 772check_irq_entry(struct trace_iterator *iter, u32 flags,
 773		unsigned long addr, int depth)
 774{
 775	int cpu = iter->cpu;
 776	int *depth_irq;
 777	struct fgraph_data *data = iter->private;
 778
 779	/*
 780	 * If we are either displaying irqs, or we got called as
 781	 * a graph event and private data does not exist,
 782	 * then we bypass the irq check.
 783	 */
 784	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 785	    (!data))
 786		return 0;
 787
 788	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 789
 790	/*
 791	 * We are inside the irq code
 792	 */
 793	if (*depth_irq >= 0)
 794		return 1;
 795
 796	if ((addr < (unsigned long)__irqentry_text_start) ||
 797	    (addr >= (unsigned long)__irqentry_text_end))
 798		return 0;
 799
 800	/*
 801	 * We are entering irq code.
 802	 */
 803	*depth_irq = depth;
 804	return 1;
 805}
 806
 807/*
 808 * Return check for irq code
 809 *
 810 * returns 1 if
 811 *  - we are inside irq code
 812 *  - we just left irq code
 813 *
 814 * returns 0 if
 815 *  - funcgraph-interrupts option is set
 816 *  - we are not inside irq code
 817 */
 818static int
 819check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
 820{
 821	int cpu = iter->cpu;
 822	int *depth_irq;
 823	struct fgraph_data *data = iter->private;
 824
 825	/*
 826	 * If we are either displaying irqs, or we got called as
 827	 * a graph event and private data does not exist,
 828	 * then we bypass the irq check.
 829	 */
 830	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 831	    (!data))
 832		return 0;
 833
 834	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 835
 836	/*
 837	 * We are not inside the irq code.
 838	 */
 839	if (*depth_irq == -1)
 840		return 0;
 841
 842	/*
 843	 * We are inside the irq code, and this is returning entry.
 844	 * Let's not trace it and clear the entry depth, since
 845	 * we are out of irq code.
 846	 *
 847	 * This condition ensures that we 'leave the irq code' once
 848	 * we are out of the entry depth. Thus protecting us from
 849	 * the RETURN entry loss.
 850	 */
 851	if (*depth_irq >= depth) {
 852		*depth_irq = -1;
 853		return 1;
 854	}
 855
 856	/*
 857	 * We are inside the irq code, and this is not the entry.
 858	 */
 859	return 1;
 860}
 861
 862static enum print_line_t
 863print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
 864			struct trace_iterator *iter, u32 flags)
 865{
 866	struct fgraph_data *data = iter->private;
 867	struct ftrace_graph_ent *call = &field->graph_ent;
 868	struct ftrace_graph_ret_entry *leaf_ret;
 869	static enum print_line_t ret;
 870	int cpu = iter->cpu;
 871
 872	if (check_irq_entry(iter, flags, call->func, call->depth))
 873		return TRACE_TYPE_HANDLED;
 874
 875	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
 876
 877	leaf_ret = get_return_for_leaf(iter, field);
 878	if (leaf_ret)
 879		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
 880	else
 881		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
 882
 883	if (data) {
 884		/*
 885		 * If we failed to write our output, then we need to make
 886		 * note of it. Because we already consumed our entry.
 887		 */
 888		if (s->full) {
 889			data->failed = 1;
 890			data->cpu = cpu;
 891		} else
 892			data->failed = 0;
 893	}
 894
 895	return ret;
 896}
 897
 898static enum print_line_t
 899print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
 900		   struct trace_entry *ent, struct trace_iterator *iter,
 901		   u32 flags)
 902{
 903	unsigned long long duration = trace->rettime - trace->calltime;
 904	struct fgraph_data *data = iter->private;
 905	struct trace_array *tr = iter->tr;
 906	pid_t pid = ent->pid;
 907	int cpu = iter->cpu;
 908	int func_match = 1;
 909	int i;
 910
 911	if (check_irq_return(iter, flags, trace->depth))
 912		return TRACE_TYPE_HANDLED;
 913
 914	if (data) {
 915		struct fgraph_cpu_data *cpu_data;
 916		int cpu = iter->cpu;
 917
 918		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 919
 920		/*
 921		 * Comments display at + 1 to depth. This is the
 922		 * return from a function, we now want the comments
 923		 * to display at the same level of the bracket.
 924		 */
 925		cpu_data->depth = trace->depth - 1;
 926
 927		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
 928		    !WARN_ON_ONCE(trace->depth < 0)) {
 929			if (cpu_data->enter_funcs[trace->depth] != trace->func)
 930				func_match = 0;
 931			cpu_data->enter_funcs[trace->depth] = 0;
 932		}
 933	}
 934
 935	print_graph_prologue(iter, s, 0, 0, flags);
 936
 937	/* Overhead and duration */
 938	print_graph_duration(tr, duration, s, flags);
 939
 940	/* Closing brace */
 941	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
 942		trace_seq_putc(s, ' ');
 943
 944	/*
 945	 * If the return function does not have a matching entry,
 946	 * then the entry was lost. Instead of just printing
 947	 * the '}' and letting the user guess what function this
 948	 * belongs to, write out the function name. Always do
 949	 * that if the funcgraph-tail option is enabled.
 950	 */
 951	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
 952		trace_seq_puts(s, "}\n");
 953	else
 954		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
 955
 956	/* Overrun */
 957	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
 958		trace_seq_printf(s, " (Overruns: %u)\n",
 959				 trace->overrun);
 960
 961	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
 962			cpu, pid, flags);
 963
 964	return trace_handle_return(s);
 965}
 966
 967static enum print_line_t
 968print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
 969		    struct trace_iterator *iter, u32 flags)
 970{
 971	struct trace_array *tr = iter->tr;
 972	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
 973	struct fgraph_data *data = iter->private;
 974	struct trace_event *event;
 975	int depth = 0;
 976	int ret;
 977	int i;
 978
 979	if (data)
 980		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
 981
 982	print_graph_prologue(iter, s, 0, 0, flags);
 983
 984	/* No time */
 985	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
 986
 987	/* Indentation */
 988	if (depth > 0)
 989		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
 990			trace_seq_putc(s, ' ');
 991
 992	/* The comment */
 993	trace_seq_puts(s, "/* ");
 994
 995	switch (iter->ent->type) {
 996	case TRACE_BPUTS:
 997		ret = trace_print_bputs_msg_only(iter);
 998		if (ret != TRACE_TYPE_HANDLED)
 999			return ret;
1000		break;
1001	case TRACE_BPRINT:
1002		ret = trace_print_bprintk_msg_only(iter);
1003		if (ret != TRACE_TYPE_HANDLED)
1004			return ret;
1005		break;
1006	case TRACE_PRINT:
1007		ret = trace_print_printk_msg_only(iter);
1008		if (ret != TRACE_TYPE_HANDLED)
1009			return ret;
1010		break;
1011	default:
1012		event = ftrace_find_event(ent->type);
1013		if (!event)
1014			return TRACE_TYPE_UNHANDLED;
1015
1016		ret = event->funcs->trace(iter, sym_flags, event);
1017		if (ret != TRACE_TYPE_HANDLED)
1018			return ret;
1019	}
1020
1021	if (trace_seq_has_overflowed(s))
1022		goto out;
1023
1024	/* Strip ending newline */
1025	if (s->buffer[s->seq.len - 1] == '\n') {
1026		s->buffer[s->seq.len - 1] = '\0';
1027		s->seq.len--;
1028	}
1029
1030	trace_seq_puts(s, " */\n");
1031 out:
1032	return trace_handle_return(s);
1033}
1034
1035
1036enum print_line_t
1037print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1038{
1039	struct ftrace_graph_ent_entry *field;
1040	struct fgraph_data *data = iter->private;
1041	struct trace_entry *entry = iter->ent;
1042	struct trace_seq *s = &iter->seq;
1043	int cpu = iter->cpu;
1044	int ret;
1045
1046	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1047		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1048		return TRACE_TYPE_HANDLED;
1049	}
1050
1051	/*
1052	 * If the last output failed, there's a possibility we need
1053	 * to print out the missing entry which would never go out.
1054	 */
1055	if (data && data->failed) {
1056		field = &data->ent;
1057		iter->cpu = data->cpu;
1058		ret = print_graph_entry(field, s, iter, flags);
1059		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1060			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1061			ret = TRACE_TYPE_NO_CONSUME;
1062		}
1063		iter->cpu = cpu;
1064		return ret;
1065	}
1066
1067	switch (entry->type) {
1068	case TRACE_GRAPH_ENT: {
1069		/*
1070		 * print_graph_entry() may consume the current event,
1071		 * thus @field may become invalid, so we need to save it.
1072		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1073		 * it can be safely saved at the stack.
1074		 */
1075		struct ftrace_graph_ent_entry saved;
1076		trace_assign_type(field, entry);
1077		saved = *field;
1078		return print_graph_entry(&saved, s, iter, flags);
1079	}
1080	case TRACE_GRAPH_RET: {
1081		struct ftrace_graph_ret_entry *field;
1082		trace_assign_type(field, entry);
1083		return print_graph_return(&field->ret, s, entry, iter, flags);
1084	}
1085	case TRACE_STACK:
1086	case TRACE_FN:
1087		/* dont trace stack and functions as comments */
1088		return TRACE_TYPE_UNHANDLED;
1089
1090	default:
1091		return print_graph_comment(s, entry, iter, flags);
1092	}
1093
1094	return TRACE_TYPE_HANDLED;
1095}
1096
1097static enum print_line_t
1098print_graph_function(struct trace_iterator *iter)
1099{
1100	return print_graph_function_flags(iter, tracer_flags.val);
1101}
1102
1103static enum print_line_t
1104print_graph_function_event(struct trace_iterator *iter, int flags,
1105			   struct trace_event *event)
1106{
1107	return print_graph_function(iter);
1108}
1109
1110static void print_lat_header(struct seq_file *s, u32 flags)
1111{
1112	static const char spaces[] = "                "	/* 16 spaces */
1113		"    "					/* 4 spaces */
1114		"                 ";			/* 17 spaces */
1115	int size = 0;
1116
1117	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1118		size += 16;
1119	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1120		size += 16;
1121	if (flags & TRACE_GRAPH_PRINT_CPU)
1122		size += 4;
1123	if (flags & TRACE_GRAPH_PRINT_PROC)
1124		size += 17;
1125
1126	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1127	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1128	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1129	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1130	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1131}
1132
1133static void __print_graph_headers_flags(struct trace_array *tr,
1134					struct seq_file *s, u32 flags)
1135{
1136	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1137
1138	if (lat)
1139		print_lat_header(s, flags);
1140
1141	/* 1st line */
1142	seq_putc(s, '#');
1143	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1144		seq_puts(s, "     TIME       ");
1145	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1146		seq_puts(s, "   REL TIME     ");
1147	if (flags & TRACE_GRAPH_PRINT_CPU)
1148		seq_puts(s, " CPU");
1149	if (flags & TRACE_GRAPH_PRINT_PROC)
1150		seq_puts(s, "  TASK/PID       ");
1151	if (lat)
1152		seq_puts(s, "||||   ");
1153	if (flags & TRACE_GRAPH_PRINT_DURATION)
1154		seq_puts(s, "  DURATION   ");
1155	seq_puts(s, "               FUNCTION CALLS\n");
1156
1157	/* 2nd line */
1158	seq_putc(s, '#');
1159	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1160		seq_puts(s, "      |         ");
1161	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1162		seq_puts(s, "      |         ");
1163	if (flags & TRACE_GRAPH_PRINT_CPU)
1164		seq_puts(s, " |  ");
1165	if (flags & TRACE_GRAPH_PRINT_PROC)
1166		seq_puts(s, "   |    |        ");
1167	if (lat)
1168		seq_puts(s, "||||   ");
1169	if (flags & TRACE_GRAPH_PRINT_DURATION)
1170		seq_puts(s, "   |   |      ");
1171	seq_puts(s, "               |   |   |   |\n");
1172}
1173
1174static void print_graph_headers(struct seq_file *s)
1175{
1176	print_graph_headers_flags(s, tracer_flags.val);
1177}
1178
1179void print_graph_headers_flags(struct seq_file *s, u32 flags)
1180{
1181	struct trace_iterator *iter = s->private;
1182	struct trace_array *tr = iter->tr;
1183
1184	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1185		return;
1186
1187	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1188		/* print nothing if the buffers are empty */
1189		if (trace_empty(iter))
1190			return;
1191
1192		print_trace_header(s, iter);
1193	}
1194
1195	__print_graph_headers_flags(tr, s, flags);
1196}
1197
1198void graph_trace_open(struct trace_iterator *iter)
1199{
1200	/* pid and depth on the last trace processed */
1201	struct fgraph_data *data;
1202	gfp_t gfpflags;
1203	int cpu;
1204
1205	iter->private = NULL;
1206
1207	/* We can be called in atomic context via ftrace_dump() */
1208	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1209
1210	data = kzalloc(sizeof(*data), gfpflags);
1211	if (!data)
1212		goto out_err;
1213
1214	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1215	if (!data->cpu_data)
1216		goto out_err_free;
1217
1218	for_each_possible_cpu(cpu) {
1219		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1220		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1221		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1222		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1223
1224		*pid = -1;
1225		*depth = 0;
1226		*ignore = 0;
1227		*depth_irq = -1;
1228	}
1229
1230	iter->private = data;
1231
1232	return;
1233
1234 out_err_free:
1235	kfree(data);
1236 out_err:
1237	pr_warn("function graph tracer: not enough memory\n");
1238}
1239
1240void graph_trace_close(struct trace_iterator *iter)
1241{
1242	struct fgraph_data *data = iter->private;
1243
1244	if (data) {
1245		free_percpu(data->cpu_data);
1246		kfree(data);
1247	}
1248}
1249
1250static int
1251func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1252{
1253	if (bit == TRACE_GRAPH_PRINT_IRQS)
1254		ftrace_graph_skip_irqs = !set;
1255
1256	if (bit == TRACE_GRAPH_SLEEP_TIME)
1257		ftrace_graph_sleep_time_control(set);
1258
1259	if (bit == TRACE_GRAPH_GRAPH_TIME)
1260		ftrace_graph_graph_time_control(set);
1261
1262	return 0;
1263}
1264
1265static struct trace_event_functions graph_functions = {
1266	.trace		= print_graph_function_event,
1267};
1268
1269static struct trace_event graph_trace_entry_event = {
1270	.type		= TRACE_GRAPH_ENT,
1271	.funcs		= &graph_functions,
1272};
1273
1274static struct trace_event graph_trace_ret_event = {
1275	.type		= TRACE_GRAPH_RET,
1276	.funcs		= &graph_functions
1277};
1278
1279static struct tracer graph_trace __tracer_data = {
1280	.name		= "function_graph",
1281	.update_thresh	= graph_trace_update_thresh,
1282	.open		= graph_trace_open,
1283	.pipe_open	= graph_trace_open,
1284	.close		= graph_trace_close,
1285	.pipe_close	= graph_trace_close,
1286	.init		= graph_trace_init,
1287	.reset		= graph_trace_reset,
1288	.print_line	= print_graph_function,
1289	.print_header	= print_graph_headers,
1290	.flags		= &tracer_flags,
1291	.set_flag	= func_graph_set_flag,
1292#ifdef CONFIG_FTRACE_SELFTEST
1293	.selftest	= trace_selftest_startup_function_graph,
1294#endif
1295};
1296
1297
1298static ssize_t
1299graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1300		  loff_t *ppos)
1301{
1302	unsigned long val;
1303	int ret;
1304
1305	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1306	if (ret)
1307		return ret;
1308
1309	fgraph_max_depth = val;
1310
1311	*ppos += cnt;
1312
1313	return cnt;
1314}
1315
1316static ssize_t
1317graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1318		 loff_t *ppos)
1319{
1320	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1321	int n;
1322
1323	n = sprintf(buf, "%d\n", fgraph_max_depth);
1324
1325	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1326}
1327
1328static const struct file_operations graph_depth_fops = {
1329	.open		= tracing_open_generic,
1330	.write		= graph_depth_write,
1331	.read		= graph_depth_read,
1332	.llseek		= generic_file_llseek,
1333};
1334
1335static __init int init_graph_tracefs(void)
1336{
1337	int ret;
1338
1339	ret = tracing_init_dentry();
1340	if (ret)
1341		return 0;
1342
1343	trace_create_file("max_graph_depth", 0644, NULL,
1344			  NULL, &graph_depth_fops);
1345
1346	return 0;
1347}
1348fs_initcall(init_graph_tracefs);
1349
1350static __init int init_graph_trace(void)
1351{
1352	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1353
1354	if (!register_trace_event(&graph_trace_entry_event)) {
1355		pr_warn("Warning: could not register graph trace events\n");
1356		return 1;
1357	}
1358
1359	if (!register_trace_event(&graph_trace_ret_event)) {
1360		pr_warn("Warning: could not register graph trace events\n");
1361		return 1;
1362	}
1363
1364	return register_tracer(&graph_trace);
1365}
1366
1367core_initcall(init_graph_trace);