Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Function graph tracer.
   5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   6 * Mostly borrowed from function tracer which
   7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
   8 *
   9 */
  10#include <linux/uaccess.h>
  11#include <linux/ftrace.h>
  12#include <linux/interrupt.h>
  13#include <linux/slab.h>
  14#include <linux/fs.h>
  15
  16#include "trace.h"
  17#include "trace_output.h"
  18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  19/* When set, irq functions will be ignored */
  20static int ftrace_graph_skip_irqs;
  21
  22struct fgraph_cpu_data {
  23	pid_t		last_pid;
  24	int		depth;
  25	int		depth_irq;
  26	int		ignore;
  27	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
  28};
  29
  30struct fgraph_data {
  31	struct fgraph_cpu_data __percpu *cpu_data;
  32
  33	/* Place to preserve last processed entry. */
  34	struct ftrace_graph_ent_entry	ent;
  35	struct ftrace_graph_ret_entry	ret;
  36	int				failed;
  37	int				cpu;
  38};
  39
  40#define TRACE_GRAPH_INDENT	2
  41
  42unsigned int fgraph_max_depth;
  43
  44static struct tracer_opt trace_opts[] = {
  45	/* Display overruns? (for self-debug purpose) */
  46	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  47	/* Display CPU ? */
  48	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  49	/* Display Overhead ? */
  50	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  51	/* Display proc name/pid */
  52	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  53	/* Display duration of execution */
  54	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  55	/* Display absolute time of an entry */
  56	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  57	/* Display interrupts */
  58	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  59	/* Display function name after trailing } */
  60	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  61	/* Include sleep time (scheduled out) between entry and return */
  62	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
  63
  64#ifdef CONFIG_FUNCTION_PROFILER
  65	/* Include time within nested functions */
  66	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
  67#endif
  68
  69	{ } /* Empty entry */
  70};
  71
  72static struct tracer_flags tracer_flags = {
  73	/* Don't display overruns, proc, or tail by default */
  74	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  75	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  76	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  77	.opts = trace_opts
  78};
  79
  80static struct trace_array *graph_array;
  81
  82/*
  83 * DURATION column is being also used to display IRQ signs,
  84 * following values are used by print_graph_irq and others
  85 * to fill in space into DURATION column.
  86 */
  87enum {
  88	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  89	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  90	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  91};
  92
  93static void
  94print_graph_duration(struct trace_array *tr, unsigned long long duration,
  95		     struct trace_seq *s, u32 flags);
  96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  97int __trace_graph_entry(struct trace_array *tr,
  98				struct ftrace_graph_ent *trace,
  99				unsigned long flags,
 100				int pc)
 101{
 102	struct trace_event_call *call = &event_funcgraph_entry;
 103	struct ring_buffer_event *event;
 104	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 105	struct ftrace_graph_ent_entry *entry;
 106
 107	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
 108					  sizeof(*entry), flags, pc);
 109	if (!event)
 110		return 0;
 111	entry	= ring_buffer_event_data(event);
 112	entry->graph_ent			= *trace;
 113	if (!call_filter_check_discard(call, entry, buffer, event))
 114		trace_buffer_unlock_commit_nostack(buffer, event);
 115
 116	return 1;
 117}
 118
 119static inline int ftrace_graph_ignore_irqs(void)
 120{
 121	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
 122		return 0;
 123
 124	return in_irq();
 125}
 126
 127int trace_graph_entry(struct ftrace_graph_ent *trace)
 128{
 129	struct trace_array *tr = graph_array;
 130	struct trace_array_cpu *data;
 131	unsigned long flags;
 132	long disabled;
 133	int ret;
 134	int cpu;
 135	int pc;
 136
 137	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
 
 
 
 
 
 
 138		return 0;
 139
 140	/*
 141	 * Do not trace a function if it's filtered by set_graph_notrace.
 142	 * Make the index of ret stack negative to indicate that it should
 143	 * ignore further functions.  But it needs its own ret stack entry
 144	 * to recover the original index in order to continue tracing after
 145	 * returning from the function.
 146	 */
 147	if (ftrace_graph_notrace_addr(trace->func)) {
 148		trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
 149		/*
 150		 * Need to return 1 to have the return called
 151		 * that will clear the NOTRACE bit.
 152		 */
 153		return 1;
 154	}
 155
 156	if (!ftrace_trace_task(tr))
 157		return 0;
 158
 159	if (ftrace_graph_ignore_func(trace))
 160		return 0;
 161
 162	if (ftrace_graph_ignore_irqs())
 163		return 0;
 164
 165	/*
 166	 * Stop here if tracing_threshold is set. We only write function return
 167	 * events to the ring buffer.
 168	 */
 169	if (tracing_thresh)
 170		return 1;
 171
 172	local_irq_save(flags);
 173	cpu = raw_smp_processor_id();
 174	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 175	disabled = atomic_inc_return(&data->disabled);
 176	if (likely(disabled == 1)) {
 177		pc = preempt_count();
 178		ret = __trace_graph_entry(tr, trace, flags, pc);
 179	} else {
 180		ret = 0;
 181	}
 182
 183	atomic_dec(&data->disabled);
 184	local_irq_restore(flags);
 185
 186	return ret;
 187}
 188
 189static void
 190__trace_graph_function(struct trace_array *tr,
 191		unsigned long ip, unsigned long flags, int pc)
 192{
 193	u64 time = trace_clock_local();
 194	struct ftrace_graph_ent ent = {
 195		.func  = ip,
 196		.depth = 0,
 197	};
 198	struct ftrace_graph_ret ret = {
 199		.func     = ip,
 200		.depth    = 0,
 201		.calltime = time,
 202		.rettime  = time,
 203	};
 204
 205	__trace_graph_entry(tr, &ent, flags, pc);
 206	__trace_graph_return(tr, &ret, flags, pc);
 207}
 208
 209void
 210trace_graph_function(struct trace_array *tr,
 211		unsigned long ip, unsigned long parent_ip,
 212		unsigned long flags, int pc)
 213{
 214	__trace_graph_function(tr, ip, flags, pc);
 215}
 216
 217void __trace_graph_return(struct trace_array *tr,
 218				struct ftrace_graph_ret *trace,
 219				unsigned long flags,
 220				int pc)
 221{
 222	struct trace_event_call *call = &event_funcgraph_exit;
 223	struct ring_buffer_event *event;
 224	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 225	struct ftrace_graph_ret_entry *entry;
 226
 227	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
 228					  sizeof(*entry), flags, pc);
 229	if (!event)
 230		return;
 231	entry	= ring_buffer_event_data(event);
 232	entry->ret				= *trace;
 233	if (!call_filter_check_discard(call, entry, buffer, event))
 234		trace_buffer_unlock_commit_nostack(buffer, event);
 235}
 236
 237void trace_graph_return(struct ftrace_graph_ret *trace)
 238{
 239	struct trace_array *tr = graph_array;
 240	struct trace_array_cpu *data;
 241	unsigned long flags;
 242	long disabled;
 243	int cpu;
 244	int pc;
 245
 246	ftrace_graph_addr_finish(trace);
 247
 248	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
 249		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
 250		return;
 251	}
 252
 253	local_irq_save(flags);
 254	cpu = raw_smp_processor_id();
 255	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 256	disabled = atomic_inc_return(&data->disabled);
 257	if (likely(disabled == 1)) {
 258		pc = preempt_count();
 259		__trace_graph_return(tr, trace, flags, pc);
 260	}
 261	atomic_dec(&data->disabled);
 262	local_irq_restore(flags);
 263}
 264
 265void set_graph_array(struct trace_array *tr)
 266{
 267	graph_array = tr;
 268
 269	/* Make graph_array visible before we start tracing */
 270
 271	smp_mb();
 272}
 273
 274static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 275{
 276	ftrace_graph_addr_finish(trace);
 277
 278	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
 279		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
 280		return;
 281	}
 282
 283	if (tracing_thresh &&
 284	    (trace->rettime - trace->calltime < tracing_thresh))
 285		return;
 286	else
 287		trace_graph_return(trace);
 288}
 289
 290static struct fgraph_ops funcgraph_thresh_ops = {
 291	.entryfunc = &trace_graph_entry,
 292	.retfunc = &trace_graph_thresh_return,
 293};
 294
 295static struct fgraph_ops funcgraph_ops = {
 296	.entryfunc = &trace_graph_entry,
 297	.retfunc = &trace_graph_return,
 298};
 299
 300static int graph_trace_init(struct trace_array *tr)
 301{
 302	int ret;
 303
 304	set_graph_array(tr);
 305	if (tracing_thresh)
 306		ret = register_ftrace_graph(&funcgraph_thresh_ops);
 
 307	else
 308		ret = register_ftrace_graph(&funcgraph_ops);
 
 309	if (ret)
 310		return ret;
 311	tracing_start_cmdline_record();
 312
 313	return 0;
 314}
 315
 316static void graph_trace_reset(struct trace_array *tr)
 317{
 318	tracing_stop_cmdline_record();
 319	if (tracing_thresh)
 320		unregister_ftrace_graph(&funcgraph_thresh_ops);
 321	else
 322		unregister_ftrace_graph(&funcgraph_ops);
 323}
 324
 325static int graph_trace_update_thresh(struct trace_array *tr)
 326{
 327	graph_trace_reset(tr);
 328	return graph_trace_init(tr);
 329}
 330
 331static int max_bytes_for_cpu;
 332
 333static void print_graph_cpu(struct trace_seq *s, int cpu)
 334{
 335	/*
 336	 * Start with a space character - to make it stand out
 337	 * to the right a bit when trace output is pasted into
 338	 * email:
 339	 */
 340	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
 341}
 342
 343#define TRACE_GRAPH_PROCINFO_LENGTH	14
 344
 345static void print_graph_proc(struct trace_seq *s, pid_t pid)
 346{
 347	char comm[TASK_COMM_LEN];
 348	/* sign + log10(MAX_INT) + '\0' */
 349	char pid_str[11];
 350	int spaces = 0;
 351	int len;
 352	int i;
 353
 354	trace_find_cmdline(pid, comm);
 355	comm[7] = '\0';
 356	sprintf(pid_str, "%d", pid);
 357
 358	/* 1 stands for the "-" character */
 359	len = strlen(comm) + strlen(pid_str) + 1;
 360
 361	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
 362		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
 363
 364	/* First spaces to align center */
 365	for (i = 0; i < spaces / 2; i++)
 366		trace_seq_putc(s, ' ');
 367
 368	trace_seq_printf(s, "%s-%s", comm, pid_str);
 369
 370	/* Last spaces to align center */
 371	for (i = 0; i < spaces - (spaces / 2); i++)
 372		trace_seq_putc(s, ' ');
 373}
 374
 375
 376static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
 377{
 378	trace_seq_putc(s, ' ');
 379	trace_print_lat_fmt(s, entry);
 380	trace_seq_puts(s, " | ");
 381}
 382
 383/* If the pid changed since the last trace, output this event */
 384static void
 385verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
 386{
 387	pid_t prev_pid;
 388	pid_t *last_pid;
 389
 390	if (!data)
 391		return;
 392
 393	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
 394
 395	if (*last_pid == pid)
 396		return;
 397
 398	prev_pid = *last_pid;
 399	*last_pid = pid;
 400
 401	if (prev_pid == -1)
 402		return;
 403/*
 404 * Context-switch trace line:
 405
 406 ------------------------------------------
 407 | 1)  migration/0--1  =>  sshd-1755
 408 ------------------------------------------
 409
 410 */
 411	trace_seq_puts(s, " ------------------------------------------\n");
 412	print_graph_cpu(s, cpu);
 413	print_graph_proc(s, prev_pid);
 414	trace_seq_puts(s, " => ");
 415	print_graph_proc(s, pid);
 416	trace_seq_puts(s, "\n ------------------------------------------\n\n");
 417}
 418
 419static struct ftrace_graph_ret_entry *
 420get_return_for_leaf(struct trace_iterator *iter,
 421		struct ftrace_graph_ent_entry *curr)
 422{
 423	struct fgraph_data *data = iter->private;
 424	struct ring_buffer_iter *ring_iter = NULL;
 425	struct ring_buffer_event *event;
 426	struct ftrace_graph_ret_entry *next;
 427
 428	/*
 429	 * If the previous output failed to write to the seq buffer,
 430	 * then we just reuse the data from before.
 431	 */
 432	if (data && data->failed) {
 433		curr = &data->ent;
 434		next = &data->ret;
 435	} else {
 436
 437		ring_iter = trace_buffer_iter(iter, iter->cpu);
 438
 439		/* First peek to compare current entry and the next one */
 440		if (ring_iter)
 441			event = ring_buffer_iter_peek(ring_iter, NULL);
 442		else {
 443			/*
 444			 * We need to consume the current entry to see
 445			 * the next one.
 446			 */
 447			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
 448					    NULL, NULL);
 449			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
 450						 NULL, NULL);
 451		}
 452
 453		if (!event)
 454			return NULL;
 455
 456		next = ring_buffer_event_data(event);
 457
 458		if (data) {
 459			/*
 460			 * Save current and next entries for later reference
 461			 * if the output fails.
 462			 */
 463			data->ent = *curr;
 464			/*
 465			 * If the next event is not a return type, then
 466			 * we only care about what type it is. Otherwise we can
 467			 * safely copy the entire event.
 468			 */
 469			if (next->ent.type == TRACE_GRAPH_RET)
 470				data->ret = *next;
 471			else
 472				data->ret.ent.type = next->ent.type;
 473		}
 474	}
 475
 476	if (next->ent.type != TRACE_GRAPH_RET)
 477		return NULL;
 478
 479	if (curr->ent.pid != next->ent.pid ||
 480			curr->graph_ent.func != next->ret.func)
 481		return NULL;
 482
 483	/* this is a leaf, now advance the iterator */
 484	if (ring_iter)
 485		ring_buffer_read(ring_iter, NULL);
 486
 487	return next;
 488}
 489
 490static void print_graph_abs_time(u64 t, struct trace_seq *s)
 491{
 492	unsigned long usecs_rem;
 493
 494	usecs_rem = do_div(t, NSEC_PER_SEC);
 495	usecs_rem /= 1000;
 496
 497	trace_seq_printf(s, "%5lu.%06lu |  ",
 498			 (unsigned long)t, usecs_rem);
 499}
 500
 501static void
 502print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
 503{
 504	unsigned long long usecs;
 505
 506	usecs = iter->ts - iter->trace_buffer->time_start;
 507	do_div(usecs, NSEC_PER_USEC);
 508
 509	trace_seq_printf(s, "%9llu us |  ", usecs);
 510}
 511
 512static void
 513print_graph_irq(struct trace_iterator *iter, unsigned long addr,
 514		enum trace_type type, int cpu, pid_t pid, u32 flags)
 515{
 516	struct trace_array *tr = iter->tr;
 517	struct trace_seq *s = &iter->seq;
 518	struct trace_entry *ent = iter->ent;
 519
 520	if (addr < (unsigned long)__irqentry_text_start ||
 521		addr >= (unsigned long)__irqentry_text_end)
 522		return;
 523
 524	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 525		/* Absolute time */
 526		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 527			print_graph_abs_time(iter->ts, s);
 528
 529		/* Relative time */
 530		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
 531			print_graph_rel_time(iter, s);
 532
 533		/* Cpu */
 534		if (flags & TRACE_GRAPH_PRINT_CPU)
 535			print_graph_cpu(s, cpu);
 536
 537		/* Proc */
 538		if (flags & TRACE_GRAPH_PRINT_PROC) {
 539			print_graph_proc(s, pid);
 540			trace_seq_puts(s, " | ");
 541		}
 542
 543		/* Latency format */
 544		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 545			print_graph_lat_fmt(s, ent);
 546	}
 547
 548	/* No overhead */
 549	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
 550
 551	if (type == TRACE_GRAPH_ENT)
 552		trace_seq_puts(s, "==========>");
 553	else
 554		trace_seq_puts(s, "<==========");
 555
 556	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
 557	trace_seq_putc(s, '\n');
 558}
 559
 560void
 561trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 562{
 563	unsigned long nsecs_rem = do_div(duration, 1000);
 564	/* log10(ULONG_MAX) + '\0' */
 565	char usecs_str[21];
 566	char nsecs_str[5];
 567	int len;
 568	int i;
 569
 570	sprintf(usecs_str, "%lu", (unsigned long) duration);
 571
 572	/* Print msecs */
 573	trace_seq_printf(s, "%s", usecs_str);
 574
 575	len = strlen(usecs_str);
 576
 577	/* Print nsecs (we don't want to exceed 7 numbers) */
 578	if (len < 7) {
 579		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
 580
 581		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
 582		trace_seq_printf(s, ".%s", nsecs_str);
 583		len += strlen(nsecs_str) + 1;
 584	}
 585
 586	trace_seq_puts(s, " us ");
 587
 588	/* Print remaining spaces to fit the row's width */
 589	for (i = len; i < 8; i++)
 590		trace_seq_putc(s, ' ');
 591}
 592
 593static void
 594print_graph_duration(struct trace_array *tr, unsigned long long duration,
 595		     struct trace_seq *s, u32 flags)
 596{
 597	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
 598	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 599		return;
 600
 601	/* No real adata, just filling the column with spaces */
 602	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
 603	case FLAGS_FILL_FULL:
 604		trace_seq_puts(s, "              |  ");
 605		return;
 606	case FLAGS_FILL_START:
 607		trace_seq_puts(s, "  ");
 608		return;
 609	case FLAGS_FILL_END:
 610		trace_seq_puts(s, " |");
 611		return;
 612	}
 613
 614	/* Signal a overhead of time execution to the output */
 615	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
 616		trace_seq_printf(s, "%c ", trace_find_mark(duration));
 617	else
 618		trace_seq_puts(s, "  ");
 619
 620	trace_print_graph_duration(duration, s);
 621	trace_seq_puts(s, "|  ");
 622}
 623
 624/* Case of a leaf function on its call entry */
 625static enum print_line_t
 626print_graph_entry_leaf(struct trace_iterator *iter,
 627		struct ftrace_graph_ent_entry *entry,
 628		struct ftrace_graph_ret_entry *ret_entry,
 629		struct trace_seq *s, u32 flags)
 630{
 631	struct fgraph_data *data = iter->private;
 632	struct trace_array *tr = iter->tr;
 633	struct ftrace_graph_ret *graph_ret;
 634	struct ftrace_graph_ent *call;
 635	unsigned long long duration;
 636	int cpu = iter->cpu;
 637	int i;
 638
 639	graph_ret = &ret_entry->ret;
 640	call = &entry->graph_ent;
 641	duration = graph_ret->rettime - graph_ret->calltime;
 642
 643	if (data) {
 644		struct fgraph_cpu_data *cpu_data;
 
 645
 646		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 647
 
 
 
 
 648		/*
 649		 * Comments display at + 1 to depth. Since
 650		 * this is a leaf function, keep the comments
 651		 * equal to this depth.
 652		 */
 653		cpu_data->depth = call->depth - 1;
 654
 655		/* No need to keep this function around for this depth */
 656		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 657		    !WARN_ON_ONCE(call->depth < 0))
 658			cpu_data->enter_funcs[call->depth] = 0;
 659	}
 660
 661	/* Overhead and duration */
 662	print_graph_duration(tr, duration, s, flags);
 663
 664	/* Function */
 665	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 666		trace_seq_putc(s, ' ');
 667
 668	trace_seq_printf(s, "%ps();\n", (void *)call->func);
 669
 670	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
 671			cpu, iter->ent->pid, flags);
 672
 673	return trace_handle_return(s);
 674}
 675
 676static enum print_line_t
 677print_graph_entry_nested(struct trace_iterator *iter,
 678			 struct ftrace_graph_ent_entry *entry,
 679			 struct trace_seq *s, int cpu, u32 flags)
 680{
 681	struct ftrace_graph_ent *call = &entry->graph_ent;
 682	struct fgraph_data *data = iter->private;
 683	struct trace_array *tr = iter->tr;
 684	int i;
 685
 686	if (data) {
 687		struct fgraph_cpu_data *cpu_data;
 688		int cpu = iter->cpu;
 689
 
 
 
 
 690		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 691		cpu_data->depth = call->depth;
 692
 693		/* Save this function pointer to see if the exit matches */
 694		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 695		    !WARN_ON_ONCE(call->depth < 0))
 696			cpu_data->enter_funcs[call->depth] = call->func;
 697	}
 698
 699	/* No time */
 700	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
 701
 702	/* Function */
 703	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 704		trace_seq_putc(s, ' ');
 705
 706	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
 707
 708	if (trace_seq_has_overflowed(s))
 709		return TRACE_TYPE_PARTIAL_LINE;
 710
 711	/*
 712	 * we already consumed the current entry to check the next one
 713	 * and see if this is a leaf.
 714	 */
 715	return TRACE_TYPE_NO_CONSUME;
 716}
 717
 718static void
 719print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
 720		     int type, unsigned long addr, u32 flags)
 721{
 722	struct fgraph_data *data = iter->private;
 723	struct trace_entry *ent = iter->ent;
 724	struct trace_array *tr = iter->tr;
 725	int cpu = iter->cpu;
 726
 727	/* Pid */
 728	verif_pid(s, ent->pid, cpu, data);
 729
 730	if (type)
 731		/* Interrupt */
 732		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
 733
 734	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 735		return;
 736
 737	/* Absolute time */
 738	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 739		print_graph_abs_time(iter->ts, s);
 740
 741	/* Relative time */
 742	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
 743		print_graph_rel_time(iter, s);
 744
 745	/* Cpu */
 746	if (flags & TRACE_GRAPH_PRINT_CPU)
 747		print_graph_cpu(s, cpu);
 748
 749	/* Proc */
 750	if (flags & TRACE_GRAPH_PRINT_PROC) {
 751		print_graph_proc(s, ent->pid);
 752		trace_seq_puts(s, " | ");
 753	}
 754
 755	/* Latency format */
 756	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 757		print_graph_lat_fmt(s, ent);
 758
 759	return;
 760}
 761
 762/*
 763 * Entry check for irq code
 764 *
 765 * returns 1 if
 766 *  - we are inside irq code
 767 *  - we just entered irq code
 768 *
 769 * retunns 0 if
 770 *  - funcgraph-interrupts option is set
 771 *  - we are not inside irq code
 772 */
 773static int
 774check_irq_entry(struct trace_iterator *iter, u32 flags,
 775		unsigned long addr, int depth)
 776{
 777	int cpu = iter->cpu;
 778	int *depth_irq;
 779	struct fgraph_data *data = iter->private;
 780
 781	/*
 782	 * If we are either displaying irqs, or we got called as
 783	 * a graph event and private data does not exist,
 784	 * then we bypass the irq check.
 785	 */
 786	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 787	    (!data))
 788		return 0;
 789
 790	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 791
 792	/*
 793	 * We are inside the irq code
 794	 */
 795	if (*depth_irq >= 0)
 796		return 1;
 797
 798	if ((addr < (unsigned long)__irqentry_text_start) ||
 799	    (addr >= (unsigned long)__irqentry_text_end))
 800		return 0;
 801
 802	/*
 803	 * We are entering irq code.
 804	 */
 805	*depth_irq = depth;
 806	return 1;
 807}
 808
 809/*
 810 * Return check for irq code
 811 *
 812 * returns 1 if
 813 *  - we are inside irq code
 814 *  - we just left irq code
 815 *
 816 * returns 0 if
 817 *  - funcgraph-interrupts option is set
 818 *  - we are not inside irq code
 819 */
 820static int
 821check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
 822{
 823	int cpu = iter->cpu;
 824	int *depth_irq;
 825	struct fgraph_data *data = iter->private;
 826
 827	/*
 828	 * If we are either displaying irqs, or we got called as
 829	 * a graph event and private data does not exist,
 830	 * then we bypass the irq check.
 831	 */
 832	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 833	    (!data))
 834		return 0;
 835
 836	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 837
 838	/*
 839	 * We are not inside the irq code.
 840	 */
 841	if (*depth_irq == -1)
 842		return 0;
 843
 844	/*
 845	 * We are inside the irq code, and this is returning entry.
 846	 * Let's not trace it and clear the entry depth, since
 847	 * we are out of irq code.
 848	 *
 849	 * This condition ensures that we 'leave the irq code' once
 850	 * we are out of the entry depth. Thus protecting us from
 851	 * the RETURN entry loss.
 852	 */
 853	if (*depth_irq >= depth) {
 854		*depth_irq = -1;
 855		return 1;
 856	}
 857
 858	/*
 859	 * We are inside the irq code, and this is not the entry.
 860	 */
 861	return 1;
 862}
 863
 864static enum print_line_t
 865print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
 866			struct trace_iterator *iter, u32 flags)
 867{
 868	struct fgraph_data *data = iter->private;
 869	struct ftrace_graph_ent *call = &field->graph_ent;
 870	struct ftrace_graph_ret_entry *leaf_ret;
 871	static enum print_line_t ret;
 872	int cpu = iter->cpu;
 873
 874	if (check_irq_entry(iter, flags, call->func, call->depth))
 875		return TRACE_TYPE_HANDLED;
 876
 877	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
 878
 879	leaf_ret = get_return_for_leaf(iter, field);
 880	if (leaf_ret)
 881		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
 882	else
 883		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
 884
 885	if (data) {
 886		/*
 887		 * If we failed to write our output, then we need to make
 888		 * note of it. Because we already consumed our entry.
 889		 */
 890		if (s->full) {
 891			data->failed = 1;
 892			data->cpu = cpu;
 893		} else
 894			data->failed = 0;
 895	}
 896
 897	return ret;
 898}
 899
 900static enum print_line_t
 901print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
 902		   struct trace_entry *ent, struct trace_iterator *iter,
 903		   u32 flags)
 904{
 905	unsigned long long duration = trace->rettime - trace->calltime;
 906	struct fgraph_data *data = iter->private;
 907	struct trace_array *tr = iter->tr;
 908	pid_t pid = ent->pid;
 909	int cpu = iter->cpu;
 910	int func_match = 1;
 911	int i;
 912
 913	if (check_irq_return(iter, flags, trace->depth))
 914		return TRACE_TYPE_HANDLED;
 915
 916	if (data) {
 917		struct fgraph_cpu_data *cpu_data;
 918		int cpu = iter->cpu;
 919
 920		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 921
 922		/*
 923		 * Comments display at + 1 to depth. This is the
 924		 * return from a function, we now want the comments
 925		 * to display at the same level of the bracket.
 926		 */
 927		cpu_data->depth = trace->depth - 1;
 928
 929		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
 930		    !WARN_ON_ONCE(trace->depth < 0)) {
 931			if (cpu_data->enter_funcs[trace->depth] != trace->func)
 932				func_match = 0;
 933			cpu_data->enter_funcs[trace->depth] = 0;
 934		}
 935	}
 936
 937	print_graph_prologue(iter, s, 0, 0, flags);
 938
 939	/* Overhead and duration */
 940	print_graph_duration(tr, duration, s, flags);
 941
 942	/* Closing brace */
 943	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
 944		trace_seq_putc(s, ' ');
 945
 946	/*
 947	 * If the return function does not have a matching entry,
 948	 * then the entry was lost. Instead of just printing
 949	 * the '}' and letting the user guess what function this
 950	 * belongs to, write out the function name. Always do
 951	 * that if the funcgraph-tail option is enabled.
 952	 */
 953	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
 954		trace_seq_puts(s, "}\n");
 955	else
 956		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
 957
 958	/* Overrun */
 959	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
 960		trace_seq_printf(s, " (Overruns: %lu)\n",
 961				 trace->overrun);
 962
 963	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
 964			cpu, pid, flags);
 965
 966	return trace_handle_return(s);
 967}
 968
 969static enum print_line_t
 970print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
 971		    struct trace_iterator *iter, u32 flags)
 972{
 973	struct trace_array *tr = iter->tr;
 974	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
 975	struct fgraph_data *data = iter->private;
 976	struct trace_event *event;
 977	int depth = 0;
 978	int ret;
 979	int i;
 980
 981	if (data)
 982		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
 983
 984	print_graph_prologue(iter, s, 0, 0, flags);
 985
 986	/* No time */
 987	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
 988
 989	/* Indentation */
 990	if (depth > 0)
 991		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
 992			trace_seq_putc(s, ' ');
 993
 994	/* The comment */
 995	trace_seq_puts(s, "/* ");
 996
 997	switch (iter->ent->type) {
 998	case TRACE_BPUTS:
 999		ret = trace_print_bputs_msg_only(iter);
1000		if (ret != TRACE_TYPE_HANDLED)
1001			return ret;
1002		break;
1003	case TRACE_BPRINT:
1004		ret = trace_print_bprintk_msg_only(iter);
1005		if (ret != TRACE_TYPE_HANDLED)
1006			return ret;
1007		break;
1008	case TRACE_PRINT:
1009		ret = trace_print_printk_msg_only(iter);
1010		if (ret != TRACE_TYPE_HANDLED)
1011			return ret;
1012		break;
1013	default:
1014		event = ftrace_find_event(ent->type);
1015		if (!event)
1016			return TRACE_TYPE_UNHANDLED;
1017
1018		ret = event->funcs->trace(iter, sym_flags, event);
1019		if (ret != TRACE_TYPE_HANDLED)
1020			return ret;
1021	}
1022
1023	if (trace_seq_has_overflowed(s))
1024		goto out;
1025
1026	/* Strip ending newline */
1027	if (s->buffer[s->seq.len - 1] == '\n') {
1028		s->buffer[s->seq.len - 1] = '\0';
1029		s->seq.len--;
1030	}
1031
1032	trace_seq_puts(s, " */\n");
1033 out:
1034	return trace_handle_return(s);
1035}
1036
1037
1038enum print_line_t
1039print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1040{
1041	struct ftrace_graph_ent_entry *field;
1042	struct fgraph_data *data = iter->private;
1043	struct trace_entry *entry = iter->ent;
1044	struct trace_seq *s = &iter->seq;
1045	int cpu = iter->cpu;
1046	int ret;
1047
1048	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1049		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1050		return TRACE_TYPE_HANDLED;
1051	}
1052
1053	/*
1054	 * If the last output failed, there's a possibility we need
1055	 * to print out the missing entry which would never go out.
1056	 */
1057	if (data && data->failed) {
1058		field = &data->ent;
1059		iter->cpu = data->cpu;
1060		ret = print_graph_entry(field, s, iter, flags);
1061		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1062			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1063			ret = TRACE_TYPE_NO_CONSUME;
1064		}
1065		iter->cpu = cpu;
1066		return ret;
1067	}
1068
1069	switch (entry->type) {
1070	case TRACE_GRAPH_ENT: {
1071		/*
1072		 * print_graph_entry() may consume the current event,
1073		 * thus @field may become invalid, so we need to save it.
1074		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1075		 * it can be safely saved at the stack.
1076		 */
1077		struct ftrace_graph_ent_entry saved;
1078		trace_assign_type(field, entry);
1079		saved = *field;
1080		return print_graph_entry(&saved, s, iter, flags);
1081	}
1082	case TRACE_GRAPH_RET: {
1083		struct ftrace_graph_ret_entry *field;
1084		trace_assign_type(field, entry);
1085		return print_graph_return(&field->ret, s, entry, iter, flags);
1086	}
1087	case TRACE_STACK:
1088	case TRACE_FN:
1089		/* dont trace stack and functions as comments */
1090		return TRACE_TYPE_UNHANDLED;
1091
1092	default:
1093		return print_graph_comment(s, entry, iter, flags);
1094	}
1095
1096	return TRACE_TYPE_HANDLED;
1097}
1098
1099static enum print_line_t
1100print_graph_function(struct trace_iterator *iter)
1101{
1102	return print_graph_function_flags(iter, tracer_flags.val);
1103}
1104
1105static enum print_line_t
1106print_graph_function_event(struct trace_iterator *iter, int flags,
1107			   struct trace_event *event)
1108{
1109	return print_graph_function(iter);
1110}
1111
1112static void print_lat_header(struct seq_file *s, u32 flags)
1113{
1114	static const char spaces[] = "                "	/* 16 spaces */
1115		"    "					/* 4 spaces */
1116		"                 ";			/* 17 spaces */
1117	int size = 0;
1118
1119	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1120		size += 16;
1121	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1122		size += 16;
1123	if (flags & TRACE_GRAPH_PRINT_CPU)
1124		size += 4;
1125	if (flags & TRACE_GRAPH_PRINT_PROC)
1126		size += 17;
1127
1128	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1129	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1130	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1131	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1132	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1133}
1134
1135static void __print_graph_headers_flags(struct trace_array *tr,
1136					struct seq_file *s, u32 flags)
1137{
1138	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1139
1140	if (lat)
1141		print_lat_header(s, flags);
1142
1143	/* 1st line */
1144	seq_putc(s, '#');
1145	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1146		seq_puts(s, "     TIME       ");
1147	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1148		seq_puts(s, "   REL TIME     ");
1149	if (flags & TRACE_GRAPH_PRINT_CPU)
1150		seq_puts(s, " CPU");
1151	if (flags & TRACE_GRAPH_PRINT_PROC)
1152		seq_puts(s, "  TASK/PID       ");
1153	if (lat)
1154		seq_puts(s, "||||   ");
1155	if (flags & TRACE_GRAPH_PRINT_DURATION)
1156		seq_puts(s, "  DURATION   ");
1157	seq_puts(s, "               FUNCTION CALLS\n");
1158
1159	/* 2nd line */
1160	seq_putc(s, '#');
1161	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1162		seq_puts(s, "      |         ");
1163	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1164		seq_puts(s, "      |         ");
1165	if (flags & TRACE_GRAPH_PRINT_CPU)
1166		seq_puts(s, " |  ");
1167	if (flags & TRACE_GRAPH_PRINT_PROC)
1168		seq_puts(s, "   |    |        ");
1169	if (lat)
1170		seq_puts(s, "||||   ");
1171	if (flags & TRACE_GRAPH_PRINT_DURATION)
1172		seq_puts(s, "   |   |      ");
1173	seq_puts(s, "               |   |   |   |\n");
1174}
1175
1176static void print_graph_headers(struct seq_file *s)
1177{
1178	print_graph_headers_flags(s, tracer_flags.val);
1179}
1180
1181void print_graph_headers_flags(struct seq_file *s, u32 flags)
1182{
1183	struct trace_iterator *iter = s->private;
1184	struct trace_array *tr = iter->tr;
1185
1186	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1187		return;
1188
1189	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1190		/* print nothing if the buffers are empty */
1191		if (trace_empty(iter))
1192			return;
1193
1194		print_trace_header(s, iter);
1195	}
1196
1197	__print_graph_headers_flags(tr, s, flags);
1198}
1199
1200void graph_trace_open(struct trace_iterator *iter)
1201{
1202	/* pid and depth on the last trace processed */
1203	struct fgraph_data *data;
1204	gfp_t gfpflags;
1205	int cpu;
1206
1207	iter->private = NULL;
1208
1209	/* We can be called in atomic context via ftrace_dump() */
1210	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1211
1212	data = kzalloc(sizeof(*data), gfpflags);
1213	if (!data)
1214		goto out_err;
1215
1216	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1217	if (!data->cpu_data)
1218		goto out_err_free;
1219
1220	for_each_possible_cpu(cpu) {
1221		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1222		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1223		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1224		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1225
1226		*pid = -1;
1227		*depth = 0;
1228		*ignore = 0;
1229		*depth_irq = -1;
1230	}
1231
1232	iter->private = data;
1233
1234	return;
1235
1236 out_err_free:
1237	kfree(data);
1238 out_err:
1239	pr_warn("function graph tracer: not enough memory\n");
1240}
1241
1242void graph_trace_close(struct trace_iterator *iter)
1243{
1244	struct fgraph_data *data = iter->private;
1245
1246	if (data) {
1247		free_percpu(data->cpu_data);
1248		kfree(data);
1249	}
1250}
1251
1252static int
1253func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1254{
1255	if (bit == TRACE_GRAPH_PRINT_IRQS)
1256		ftrace_graph_skip_irqs = !set;
1257
1258	if (bit == TRACE_GRAPH_SLEEP_TIME)
1259		ftrace_graph_sleep_time_control(set);
1260
1261	if (bit == TRACE_GRAPH_GRAPH_TIME)
1262		ftrace_graph_graph_time_control(set);
1263
1264	return 0;
1265}
1266
1267static struct trace_event_functions graph_functions = {
1268	.trace		= print_graph_function_event,
1269};
1270
1271static struct trace_event graph_trace_entry_event = {
1272	.type		= TRACE_GRAPH_ENT,
1273	.funcs		= &graph_functions,
1274};
1275
1276static struct trace_event graph_trace_ret_event = {
1277	.type		= TRACE_GRAPH_RET,
1278	.funcs		= &graph_functions
1279};
1280
1281static struct tracer graph_trace __tracer_data = {
1282	.name		= "function_graph",
1283	.update_thresh	= graph_trace_update_thresh,
1284	.open		= graph_trace_open,
1285	.pipe_open	= graph_trace_open,
1286	.close		= graph_trace_close,
1287	.pipe_close	= graph_trace_close,
1288	.init		= graph_trace_init,
1289	.reset		= graph_trace_reset,
1290	.print_line	= print_graph_function,
1291	.print_header	= print_graph_headers,
1292	.flags		= &tracer_flags,
1293	.set_flag	= func_graph_set_flag,
1294#ifdef CONFIG_FTRACE_SELFTEST
1295	.selftest	= trace_selftest_startup_function_graph,
1296#endif
1297};
1298
1299
1300static ssize_t
1301graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1302		  loff_t *ppos)
1303{
1304	unsigned long val;
1305	int ret;
1306
1307	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1308	if (ret)
1309		return ret;
1310
1311	fgraph_max_depth = val;
1312
1313	*ppos += cnt;
1314
1315	return cnt;
1316}
1317
1318static ssize_t
1319graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1320		 loff_t *ppos)
1321{
1322	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1323	int n;
1324
1325	n = sprintf(buf, "%d\n", fgraph_max_depth);
1326
1327	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1328}
1329
1330static const struct file_operations graph_depth_fops = {
1331	.open		= tracing_open_generic,
1332	.write		= graph_depth_write,
1333	.read		= graph_depth_read,
1334	.llseek		= generic_file_llseek,
1335};
1336
1337static __init int init_graph_tracefs(void)
1338{
1339	struct dentry *d_tracer;
1340
1341	d_tracer = tracing_init_dentry();
1342	if (IS_ERR(d_tracer))
1343		return 0;
1344
1345	trace_create_file("max_graph_depth", 0644, d_tracer,
1346			  NULL, &graph_depth_fops);
1347
1348	return 0;
1349}
1350fs_initcall(init_graph_tracefs);
1351
1352static __init int init_graph_trace(void)
1353{
1354	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1355
1356	if (!register_trace_event(&graph_trace_entry_event)) {
1357		pr_warn("Warning: could not register graph trace events\n");
1358		return 1;
1359	}
1360
1361	if (!register_trace_event(&graph_trace_ret_event)) {
1362		pr_warn("Warning: could not register graph trace events\n");
1363		return 1;
1364	}
1365
1366	return register_tracer(&graph_trace);
1367}
1368
1369core_initcall(init_graph_trace);
v4.10.11
 
   1/*
   2 *
   3 * Function graph tracer.
   4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   5 * Mostly borrowed from function tracer which
   6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
   7 *
   8 */
   9#include <linux/uaccess.h>
  10#include <linux/ftrace.h>
  11#include <linux/interrupt.h>
  12#include <linux/slab.h>
  13#include <linux/fs.h>
  14
  15#include "trace.h"
  16#include "trace_output.h"
  17
  18static bool kill_ftrace_graph;
  19
  20/**
  21 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  22 *
  23 * ftrace_graph_stop() is called when a severe error is detected in
  24 * the function graph tracing. This function is called by the critical
  25 * paths of function graph to keep those paths from doing any more harm.
  26 */
  27bool ftrace_graph_is_dead(void)
  28{
  29	return kill_ftrace_graph;
  30}
  31
  32/**
  33 * ftrace_graph_stop - set to permanently disable function graph tracincg
  34 *
  35 * In case of an error int function graph tracing, this is called
  36 * to try to keep function graph tracing from causing any more harm.
  37 * Usually this is pretty severe and this is called to try to at least
  38 * get a warning out to the user.
  39 */
  40void ftrace_graph_stop(void)
  41{
  42	kill_ftrace_graph = true;
  43}
  44
  45/* When set, irq functions will be ignored */
  46static int ftrace_graph_skip_irqs;
  47
  48struct fgraph_cpu_data {
  49	pid_t		last_pid;
  50	int		depth;
  51	int		depth_irq;
  52	int		ignore;
  53	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
  54};
  55
  56struct fgraph_data {
  57	struct fgraph_cpu_data __percpu *cpu_data;
  58
  59	/* Place to preserve last processed entry. */
  60	struct ftrace_graph_ent_entry	ent;
  61	struct ftrace_graph_ret_entry	ret;
  62	int				failed;
  63	int				cpu;
  64};
  65
  66#define TRACE_GRAPH_INDENT	2
  67
  68unsigned int fgraph_max_depth;
  69
  70static struct tracer_opt trace_opts[] = {
  71	/* Display overruns? (for self-debug purpose) */
  72	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  73	/* Display CPU ? */
  74	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  75	/* Display Overhead ? */
  76	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  77	/* Display proc name/pid */
  78	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  79	/* Display duration of execution */
  80	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  81	/* Display absolute time of an entry */
  82	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  83	/* Display interrupts */
  84	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  85	/* Display function name after trailing } */
  86	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  87	/* Include sleep time (scheduled out) between entry and return */
  88	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
 
 
  89	/* Include time within nested functions */
  90	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
 
 
  91	{ } /* Empty entry */
  92};
  93
  94static struct tracer_flags tracer_flags = {
  95	/* Don't display overruns, proc, or tail by default */
  96	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  97	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  98	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  99	.opts = trace_opts
 100};
 101
 102static struct trace_array *graph_array;
 103
 104/*
 105 * DURATION column is being also used to display IRQ signs,
 106 * following values are used by print_graph_irq and others
 107 * to fill in space into DURATION column.
 108 */
 109enum {
 110	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 111	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 112	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 113};
 114
 115static void
 116print_graph_duration(struct trace_array *tr, unsigned long long duration,
 117		     struct trace_seq *s, u32 flags);
 118
 119/* Add a function return address to the trace stack on thread info.*/
 120int
 121ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
 122			 unsigned long frame_pointer, unsigned long *retp)
 123{
 124	unsigned long long calltime;
 125	int index;
 126
 127	if (unlikely(ftrace_graph_is_dead()))
 128		return -EBUSY;
 129
 130	if (!current->ret_stack)
 131		return -EBUSY;
 132
 133	/*
 134	 * We must make sure the ret_stack is tested before we read
 135	 * anything else.
 136	 */
 137	smp_rmb();
 138
 139	/* The return trace stack is full */
 140	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
 141		atomic_inc(&current->trace_overrun);
 142		return -EBUSY;
 143	}
 144
 145	/*
 146	 * The curr_ret_stack is an index to ftrace return stack of
 147	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
 148	 * DEPTH) when the function graph tracer is used.  To support
 149	 * filtering out specific functions, it makes the index
 150	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
 151	 * so when it sees a negative index the ftrace will ignore
 152	 * the record.  And the index gets recovered when returning
 153	 * from the filtered function by adding the FTRACE_NOTRACE_
 154	 * DEPTH and then it'll continue to record functions normally.
 155	 *
 156	 * The curr_ret_stack is initialized to -1 and get increased
 157	 * in this function.  So it can be less than -1 only if it was
 158	 * filtered out via ftrace_graph_notrace_addr() which can be
 159	 * set from set_graph_notrace file in tracefs by user.
 160	 */
 161	if (current->curr_ret_stack < -1)
 162		return -EBUSY;
 163
 164	calltime = trace_clock_local();
 165
 166	index = ++current->curr_ret_stack;
 167	if (ftrace_graph_notrace_addr(func))
 168		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
 169	barrier();
 170	current->ret_stack[index].ret = ret;
 171	current->ret_stack[index].func = func;
 172	current->ret_stack[index].calltime = calltime;
 173#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 174	current->ret_stack[index].fp = frame_pointer;
 175#endif
 176#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 177	current->ret_stack[index].retp = retp;
 178#endif
 179	*depth = current->curr_ret_stack;
 180
 181	return 0;
 182}
 183
 184/* Retrieve a function return address to the trace stack on thread info.*/
 185static void
 186ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
 187			unsigned long frame_pointer)
 188{
 189	int index;
 190
 191	index = current->curr_ret_stack;
 192
 193	/*
 194	 * A negative index here means that it's just returned from a
 195	 * notrace'd function.  Recover index to get an original
 196	 * return address.  See ftrace_push_return_trace().
 197	 *
 198	 * TODO: Need to check whether the stack gets corrupted.
 199	 */
 200	if (index < 0)
 201		index += FTRACE_NOTRACE_DEPTH;
 202
 203	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
 204		ftrace_graph_stop();
 205		WARN_ON(1);
 206		/* Might as well panic, otherwise we have no where to go */
 207		*ret = (unsigned long)panic;
 208		return;
 209	}
 210
 211#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 212	/*
 213	 * The arch may choose to record the frame pointer used
 214	 * and check it here to make sure that it is what we expect it
 215	 * to be. If gcc does not set the place holder of the return
 216	 * address in the frame pointer, and does a copy instead, then
 217	 * the function graph trace will fail. This test detects this
 218	 * case.
 219	 *
 220	 * Currently, x86_32 with optimize for size (-Os) makes the latest
 221	 * gcc do the above.
 222	 *
 223	 * Note, -mfentry does not use frame pointers, and this test
 224	 *  is not needed if CC_USING_FENTRY is set.
 225	 */
 226	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
 227		ftrace_graph_stop();
 228		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
 229		     "  from func %ps return to %lx\n",
 230		     current->ret_stack[index].fp,
 231		     frame_pointer,
 232		     (void *)current->ret_stack[index].func,
 233		     current->ret_stack[index].ret);
 234		*ret = (unsigned long)panic;
 235		return;
 236	}
 237#endif
 238
 239	*ret = current->ret_stack[index].ret;
 240	trace->func = current->ret_stack[index].func;
 241	trace->calltime = current->ret_stack[index].calltime;
 242	trace->overrun = atomic_read(&current->trace_overrun);
 243	trace->depth = index;
 244}
 245
 246/*
 247 * Send the trace to the ring-buffer.
 248 * @return the original return address.
 249 */
 250unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
 251{
 252	struct ftrace_graph_ret trace;
 253	unsigned long ret;
 254
 255	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
 256	trace.rettime = trace_clock_local();
 257	barrier();
 258	current->curr_ret_stack--;
 259	/*
 260	 * The curr_ret_stack can be less than -1 only if it was
 261	 * filtered out and it's about to return from the function.
 262	 * Recover the index and continue to trace normal functions.
 263	 */
 264	if (current->curr_ret_stack < -1) {
 265		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
 266		return ret;
 267	}
 268
 269	/*
 270	 * The trace should run after decrementing the ret counter
 271	 * in case an interrupt were to come in. We don't want to
 272	 * lose the interrupt if max_depth is set.
 273	 */
 274	ftrace_graph_return(&trace);
 275
 276	if (unlikely(!ret)) {
 277		ftrace_graph_stop();
 278		WARN_ON(1);
 279		/* Might as well panic. What else to do? */
 280		ret = (unsigned long)panic;
 281	}
 282
 283	return ret;
 284}
 285
 286/**
 287 * ftrace_graph_ret_addr - convert a potentially modified stack return address
 288 *			   to its original value
 289 *
 290 * This function can be called by stack unwinding code to convert a found stack
 291 * return address ('ret') to its original value, in case the function graph
 292 * tracer has modified it to be 'return_to_handler'.  If the address hasn't
 293 * been modified, the unchanged value of 'ret' is returned.
 294 *
 295 * 'idx' is a state variable which should be initialized by the caller to zero
 296 * before the first call.
 297 *
 298 * 'retp' is a pointer to the return address on the stack.  It's ignored if
 299 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
 300 */
 301#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 302unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 303				    unsigned long ret, unsigned long *retp)
 304{
 305	int index = task->curr_ret_stack;
 306	int i;
 307
 308	if (ret != (unsigned long)return_to_handler)
 309		return ret;
 310
 311	if (index < -1)
 312		index += FTRACE_NOTRACE_DEPTH;
 313
 314	if (index < 0)
 315		return ret;
 316
 317	for (i = 0; i <= index; i++)
 318		if (task->ret_stack[i].retp == retp)
 319			return task->ret_stack[i].ret;
 320
 321	return ret;
 322}
 323#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
 324unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 325				    unsigned long ret, unsigned long *retp)
 326{
 327	int task_idx;
 328
 329	if (ret != (unsigned long)return_to_handler)
 330		return ret;
 331
 332	task_idx = task->curr_ret_stack;
 333
 334	if (!task->ret_stack || task_idx < *idx)
 335		return ret;
 336
 337	task_idx -= *idx;
 338	(*idx)++;
 339
 340	return task->ret_stack[task_idx].ret;
 341}
 342#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
 343
 344int __trace_graph_entry(struct trace_array *tr,
 345				struct ftrace_graph_ent *trace,
 346				unsigned long flags,
 347				int pc)
 348{
 349	struct trace_event_call *call = &event_funcgraph_entry;
 350	struct ring_buffer_event *event;
 351	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 352	struct ftrace_graph_ent_entry *entry;
 353
 354	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
 355					  sizeof(*entry), flags, pc);
 356	if (!event)
 357		return 0;
 358	entry	= ring_buffer_event_data(event);
 359	entry->graph_ent			= *trace;
 360	if (!call_filter_check_discard(call, entry, buffer, event))
 361		trace_buffer_unlock_commit_nostack(buffer, event);
 362
 363	return 1;
 364}
 365
 366static inline int ftrace_graph_ignore_irqs(void)
 367{
 368	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
 369		return 0;
 370
 371	return in_irq();
 372}
 373
 374int trace_graph_entry(struct ftrace_graph_ent *trace)
 375{
 376	struct trace_array *tr = graph_array;
 377	struct trace_array_cpu *data;
 378	unsigned long flags;
 379	long disabled;
 380	int ret;
 381	int cpu;
 382	int pc;
 383
 384	if (!ftrace_trace_task(tr))
 385		return 0;
 386
 387	if (ftrace_graph_ignore_func(trace))
 388		return 0;
 389
 390	if (ftrace_graph_ignore_irqs())
 391		return 0;
 392
 393	/*
 394	 * Do not trace a function if it's filtered by set_graph_notrace.
 395	 * Make the index of ret stack negative to indicate that it should
 396	 * ignore further functions.  But it needs its own ret stack entry
 397	 * to recover the original index in order to continue tracing after
 398	 * returning from the function.
 399	 */
 400	if (ftrace_graph_notrace_addr(trace->func))
 
 
 
 
 
 401		return 1;
 
 
 
 
 
 
 
 
 
 
 402
 403	/*
 404	 * Stop here if tracing_threshold is set. We only write function return
 405	 * events to the ring buffer.
 406	 */
 407	if (tracing_thresh)
 408		return 1;
 409
 410	local_irq_save(flags);
 411	cpu = raw_smp_processor_id();
 412	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 413	disabled = atomic_inc_return(&data->disabled);
 414	if (likely(disabled == 1)) {
 415		pc = preempt_count();
 416		ret = __trace_graph_entry(tr, trace, flags, pc);
 417	} else {
 418		ret = 0;
 419	}
 420
 421	atomic_dec(&data->disabled);
 422	local_irq_restore(flags);
 423
 424	return ret;
 425}
 426
 427static void
 428__trace_graph_function(struct trace_array *tr,
 429		unsigned long ip, unsigned long flags, int pc)
 430{
 431	u64 time = trace_clock_local();
 432	struct ftrace_graph_ent ent = {
 433		.func  = ip,
 434		.depth = 0,
 435	};
 436	struct ftrace_graph_ret ret = {
 437		.func     = ip,
 438		.depth    = 0,
 439		.calltime = time,
 440		.rettime  = time,
 441	};
 442
 443	__trace_graph_entry(tr, &ent, flags, pc);
 444	__trace_graph_return(tr, &ret, flags, pc);
 445}
 446
 447void
 448trace_graph_function(struct trace_array *tr,
 449		unsigned long ip, unsigned long parent_ip,
 450		unsigned long flags, int pc)
 451{
 452	__trace_graph_function(tr, ip, flags, pc);
 453}
 454
 455void __trace_graph_return(struct trace_array *tr,
 456				struct ftrace_graph_ret *trace,
 457				unsigned long flags,
 458				int pc)
 459{
 460	struct trace_event_call *call = &event_funcgraph_exit;
 461	struct ring_buffer_event *event;
 462	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 463	struct ftrace_graph_ret_entry *entry;
 464
 465	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
 466					  sizeof(*entry), flags, pc);
 467	if (!event)
 468		return;
 469	entry	= ring_buffer_event_data(event);
 470	entry->ret				= *trace;
 471	if (!call_filter_check_discard(call, entry, buffer, event))
 472		trace_buffer_unlock_commit_nostack(buffer, event);
 473}
 474
 475void trace_graph_return(struct ftrace_graph_ret *trace)
 476{
 477	struct trace_array *tr = graph_array;
 478	struct trace_array_cpu *data;
 479	unsigned long flags;
 480	long disabled;
 481	int cpu;
 482	int pc;
 483
 
 
 
 
 
 
 
 484	local_irq_save(flags);
 485	cpu = raw_smp_processor_id();
 486	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 487	disabled = atomic_inc_return(&data->disabled);
 488	if (likely(disabled == 1)) {
 489		pc = preempt_count();
 490		__trace_graph_return(tr, trace, flags, pc);
 491	}
 492	atomic_dec(&data->disabled);
 493	local_irq_restore(flags);
 494}
 495
 496void set_graph_array(struct trace_array *tr)
 497{
 498	graph_array = tr;
 499
 500	/* Make graph_array visible before we start tracing */
 501
 502	smp_mb();
 503}
 504
 505static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 506{
 
 
 
 
 
 
 
 507	if (tracing_thresh &&
 508	    (trace->rettime - trace->calltime < tracing_thresh))
 509		return;
 510	else
 511		trace_graph_return(trace);
 512}
 513
 
 
 
 
 
 
 
 
 
 
 514static int graph_trace_init(struct trace_array *tr)
 515{
 516	int ret;
 517
 518	set_graph_array(tr);
 519	if (tracing_thresh)
 520		ret = register_ftrace_graph(&trace_graph_thresh_return,
 521					    &trace_graph_entry);
 522	else
 523		ret = register_ftrace_graph(&trace_graph_return,
 524					    &trace_graph_entry);
 525	if (ret)
 526		return ret;
 527	tracing_start_cmdline_record();
 528
 529	return 0;
 530}
 531
 532static void graph_trace_reset(struct trace_array *tr)
 533{
 534	tracing_stop_cmdline_record();
 535	unregister_ftrace_graph();
 
 
 
 536}
 537
 538static int graph_trace_update_thresh(struct trace_array *tr)
 539{
 540	graph_trace_reset(tr);
 541	return graph_trace_init(tr);
 542}
 543
 544static int max_bytes_for_cpu;
 545
 546static void print_graph_cpu(struct trace_seq *s, int cpu)
 547{
 548	/*
 549	 * Start with a space character - to make it stand out
 550	 * to the right a bit when trace output is pasted into
 551	 * email:
 552	 */
 553	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
 554}
 555
 556#define TRACE_GRAPH_PROCINFO_LENGTH	14
 557
 558static void print_graph_proc(struct trace_seq *s, pid_t pid)
 559{
 560	char comm[TASK_COMM_LEN];
 561	/* sign + log10(MAX_INT) + '\0' */
 562	char pid_str[11];
 563	int spaces = 0;
 564	int len;
 565	int i;
 566
 567	trace_find_cmdline(pid, comm);
 568	comm[7] = '\0';
 569	sprintf(pid_str, "%d", pid);
 570
 571	/* 1 stands for the "-" character */
 572	len = strlen(comm) + strlen(pid_str) + 1;
 573
 574	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
 575		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
 576
 577	/* First spaces to align center */
 578	for (i = 0; i < spaces / 2; i++)
 579		trace_seq_putc(s, ' ');
 580
 581	trace_seq_printf(s, "%s-%s", comm, pid_str);
 582
 583	/* Last spaces to align center */
 584	for (i = 0; i < spaces - (spaces / 2); i++)
 585		trace_seq_putc(s, ' ');
 586}
 587
 588
 589static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
 590{
 591	trace_seq_putc(s, ' ');
 592	trace_print_lat_fmt(s, entry);
 
 593}
 594
 595/* If the pid changed since the last trace, output this event */
 596static void
 597verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
 598{
 599	pid_t prev_pid;
 600	pid_t *last_pid;
 601
 602	if (!data)
 603		return;
 604
 605	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
 606
 607	if (*last_pid == pid)
 608		return;
 609
 610	prev_pid = *last_pid;
 611	*last_pid = pid;
 612
 613	if (prev_pid == -1)
 614		return;
 615/*
 616 * Context-switch trace line:
 617
 618 ------------------------------------------
 619 | 1)  migration/0--1  =>  sshd-1755
 620 ------------------------------------------
 621
 622 */
 623	trace_seq_puts(s, " ------------------------------------------\n");
 624	print_graph_cpu(s, cpu);
 625	print_graph_proc(s, prev_pid);
 626	trace_seq_puts(s, " => ");
 627	print_graph_proc(s, pid);
 628	trace_seq_puts(s, "\n ------------------------------------------\n\n");
 629}
 630
 631static struct ftrace_graph_ret_entry *
 632get_return_for_leaf(struct trace_iterator *iter,
 633		struct ftrace_graph_ent_entry *curr)
 634{
 635	struct fgraph_data *data = iter->private;
 636	struct ring_buffer_iter *ring_iter = NULL;
 637	struct ring_buffer_event *event;
 638	struct ftrace_graph_ret_entry *next;
 639
 640	/*
 641	 * If the previous output failed to write to the seq buffer,
 642	 * then we just reuse the data from before.
 643	 */
 644	if (data && data->failed) {
 645		curr = &data->ent;
 646		next = &data->ret;
 647	} else {
 648
 649		ring_iter = trace_buffer_iter(iter, iter->cpu);
 650
 651		/* First peek to compare current entry and the next one */
 652		if (ring_iter)
 653			event = ring_buffer_iter_peek(ring_iter, NULL);
 654		else {
 655			/*
 656			 * We need to consume the current entry to see
 657			 * the next one.
 658			 */
 659			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
 660					    NULL, NULL);
 661			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
 662						 NULL, NULL);
 663		}
 664
 665		if (!event)
 666			return NULL;
 667
 668		next = ring_buffer_event_data(event);
 669
 670		if (data) {
 671			/*
 672			 * Save current and next entries for later reference
 673			 * if the output fails.
 674			 */
 675			data->ent = *curr;
 676			/*
 677			 * If the next event is not a return type, then
 678			 * we only care about what type it is. Otherwise we can
 679			 * safely copy the entire event.
 680			 */
 681			if (next->ent.type == TRACE_GRAPH_RET)
 682				data->ret = *next;
 683			else
 684				data->ret.ent.type = next->ent.type;
 685		}
 686	}
 687
 688	if (next->ent.type != TRACE_GRAPH_RET)
 689		return NULL;
 690
 691	if (curr->ent.pid != next->ent.pid ||
 692			curr->graph_ent.func != next->ret.func)
 693		return NULL;
 694
 695	/* this is a leaf, now advance the iterator */
 696	if (ring_iter)
 697		ring_buffer_read(ring_iter, NULL);
 698
 699	return next;
 700}
 701
 702static void print_graph_abs_time(u64 t, struct trace_seq *s)
 703{
 704	unsigned long usecs_rem;
 705
 706	usecs_rem = do_div(t, NSEC_PER_SEC);
 707	usecs_rem /= 1000;
 708
 709	trace_seq_printf(s, "%5lu.%06lu |  ",
 710			 (unsigned long)t, usecs_rem);
 711}
 712
 713static void
 
 
 
 
 
 
 
 
 
 
 
 714print_graph_irq(struct trace_iterator *iter, unsigned long addr,
 715		enum trace_type type, int cpu, pid_t pid, u32 flags)
 716{
 717	struct trace_array *tr = iter->tr;
 718	struct trace_seq *s = &iter->seq;
 719	struct trace_entry *ent = iter->ent;
 720
 721	if (addr < (unsigned long)__irqentry_text_start ||
 722		addr >= (unsigned long)__irqentry_text_end)
 723		return;
 724
 725	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 726		/* Absolute time */
 727		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 728			print_graph_abs_time(iter->ts, s);
 729
 
 
 
 
 730		/* Cpu */
 731		if (flags & TRACE_GRAPH_PRINT_CPU)
 732			print_graph_cpu(s, cpu);
 733
 734		/* Proc */
 735		if (flags & TRACE_GRAPH_PRINT_PROC) {
 736			print_graph_proc(s, pid);
 737			trace_seq_puts(s, " | ");
 738		}
 739
 740		/* Latency format */
 741		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 742			print_graph_lat_fmt(s, ent);
 743	}
 744
 745	/* No overhead */
 746	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
 747
 748	if (type == TRACE_GRAPH_ENT)
 749		trace_seq_puts(s, "==========>");
 750	else
 751		trace_seq_puts(s, "<==========");
 752
 753	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
 754	trace_seq_putc(s, '\n');
 755}
 756
 757void
 758trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 759{
 760	unsigned long nsecs_rem = do_div(duration, 1000);
 761	/* log10(ULONG_MAX) + '\0' */
 762	char usecs_str[21];
 763	char nsecs_str[5];
 764	int len;
 765	int i;
 766
 767	sprintf(usecs_str, "%lu", (unsigned long) duration);
 768
 769	/* Print msecs */
 770	trace_seq_printf(s, "%s", usecs_str);
 771
 772	len = strlen(usecs_str);
 773
 774	/* Print nsecs (we don't want to exceed 7 numbers) */
 775	if (len < 7) {
 776		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
 777
 778		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
 779		trace_seq_printf(s, ".%s", nsecs_str);
 780		len += strlen(nsecs_str) + 1;
 781	}
 782
 783	trace_seq_puts(s, " us ");
 784
 785	/* Print remaining spaces to fit the row's width */
 786	for (i = len; i < 8; i++)
 787		trace_seq_putc(s, ' ');
 788}
 789
 790static void
 791print_graph_duration(struct trace_array *tr, unsigned long long duration,
 792		     struct trace_seq *s, u32 flags)
 793{
 794	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
 795	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 796		return;
 797
 798	/* No real adata, just filling the column with spaces */
 799	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
 800	case FLAGS_FILL_FULL:
 801		trace_seq_puts(s, "              |  ");
 802		return;
 803	case FLAGS_FILL_START:
 804		trace_seq_puts(s, "  ");
 805		return;
 806	case FLAGS_FILL_END:
 807		trace_seq_puts(s, " |");
 808		return;
 809	}
 810
 811	/* Signal a overhead of time execution to the output */
 812	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
 813		trace_seq_printf(s, "%c ", trace_find_mark(duration));
 814	else
 815		trace_seq_puts(s, "  ");
 816
 817	trace_print_graph_duration(duration, s);
 818	trace_seq_puts(s, "|  ");
 819}
 820
 821/* Case of a leaf function on its call entry */
 822static enum print_line_t
 823print_graph_entry_leaf(struct trace_iterator *iter,
 824		struct ftrace_graph_ent_entry *entry,
 825		struct ftrace_graph_ret_entry *ret_entry,
 826		struct trace_seq *s, u32 flags)
 827{
 828	struct fgraph_data *data = iter->private;
 829	struct trace_array *tr = iter->tr;
 830	struct ftrace_graph_ret *graph_ret;
 831	struct ftrace_graph_ent *call;
 832	unsigned long long duration;
 
 833	int i;
 834
 835	graph_ret = &ret_entry->ret;
 836	call = &entry->graph_ent;
 837	duration = graph_ret->rettime - graph_ret->calltime;
 838
 839	if (data) {
 840		struct fgraph_cpu_data *cpu_data;
 841		int cpu = iter->cpu;
 842
 843		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 844
 845		/* If a graph tracer ignored set_graph_notrace */
 846		if (call->depth < -1)
 847			call->depth += FTRACE_NOTRACE_DEPTH;
 848
 849		/*
 850		 * Comments display at + 1 to depth. Since
 851		 * this is a leaf function, keep the comments
 852		 * equal to this depth.
 853		 */
 854		cpu_data->depth = call->depth - 1;
 855
 856		/* No need to keep this function around for this depth */
 857		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 858		    !WARN_ON_ONCE(call->depth < 0))
 859			cpu_data->enter_funcs[call->depth] = 0;
 860	}
 861
 862	/* Overhead and duration */
 863	print_graph_duration(tr, duration, s, flags);
 864
 865	/* Function */
 866	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 867		trace_seq_putc(s, ' ');
 868
 869	trace_seq_printf(s, "%ps();\n", (void *)call->func);
 870
 
 
 
 871	return trace_handle_return(s);
 872}
 873
 874static enum print_line_t
 875print_graph_entry_nested(struct trace_iterator *iter,
 876			 struct ftrace_graph_ent_entry *entry,
 877			 struct trace_seq *s, int cpu, u32 flags)
 878{
 879	struct ftrace_graph_ent *call = &entry->graph_ent;
 880	struct fgraph_data *data = iter->private;
 881	struct trace_array *tr = iter->tr;
 882	int i;
 883
 884	if (data) {
 885		struct fgraph_cpu_data *cpu_data;
 886		int cpu = iter->cpu;
 887
 888		/* If a graph tracer ignored set_graph_notrace */
 889		if (call->depth < -1)
 890			call->depth += FTRACE_NOTRACE_DEPTH;
 891
 892		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 893		cpu_data->depth = call->depth;
 894
 895		/* Save this function pointer to see if the exit matches */
 896		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 897		    !WARN_ON_ONCE(call->depth < 0))
 898			cpu_data->enter_funcs[call->depth] = call->func;
 899	}
 900
 901	/* No time */
 902	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
 903
 904	/* Function */
 905	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 906		trace_seq_putc(s, ' ');
 907
 908	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
 909
 910	if (trace_seq_has_overflowed(s))
 911		return TRACE_TYPE_PARTIAL_LINE;
 912
 913	/*
 914	 * we already consumed the current entry to check the next one
 915	 * and see if this is a leaf.
 916	 */
 917	return TRACE_TYPE_NO_CONSUME;
 918}
 919
 920static void
 921print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
 922		     int type, unsigned long addr, u32 flags)
 923{
 924	struct fgraph_data *data = iter->private;
 925	struct trace_entry *ent = iter->ent;
 926	struct trace_array *tr = iter->tr;
 927	int cpu = iter->cpu;
 928
 929	/* Pid */
 930	verif_pid(s, ent->pid, cpu, data);
 931
 932	if (type)
 933		/* Interrupt */
 934		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
 935
 936	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 937		return;
 938
 939	/* Absolute time */
 940	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 941		print_graph_abs_time(iter->ts, s);
 942
 
 
 
 
 943	/* Cpu */
 944	if (flags & TRACE_GRAPH_PRINT_CPU)
 945		print_graph_cpu(s, cpu);
 946
 947	/* Proc */
 948	if (flags & TRACE_GRAPH_PRINT_PROC) {
 949		print_graph_proc(s, ent->pid);
 950		trace_seq_puts(s, " | ");
 951	}
 952
 953	/* Latency format */
 954	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 955		print_graph_lat_fmt(s, ent);
 956
 957	return;
 958}
 959
 960/*
 961 * Entry check for irq code
 962 *
 963 * returns 1 if
 964 *  - we are inside irq code
 965 *  - we just entered irq code
 966 *
 967 * retunns 0 if
 968 *  - funcgraph-interrupts option is set
 969 *  - we are not inside irq code
 970 */
 971static int
 972check_irq_entry(struct trace_iterator *iter, u32 flags,
 973		unsigned long addr, int depth)
 974{
 975	int cpu = iter->cpu;
 976	int *depth_irq;
 977	struct fgraph_data *data = iter->private;
 978
 979	/*
 980	 * If we are either displaying irqs, or we got called as
 981	 * a graph event and private data does not exist,
 982	 * then we bypass the irq check.
 983	 */
 984	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 985	    (!data))
 986		return 0;
 987
 988	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 989
 990	/*
 991	 * We are inside the irq code
 992	 */
 993	if (*depth_irq >= 0)
 994		return 1;
 995
 996	if ((addr < (unsigned long)__irqentry_text_start) ||
 997	    (addr >= (unsigned long)__irqentry_text_end))
 998		return 0;
 999
1000	/*
1001	 * We are entering irq code.
1002	 */
1003	*depth_irq = depth;
1004	return 1;
1005}
1006
1007/*
1008 * Return check for irq code
1009 *
1010 * returns 1 if
1011 *  - we are inside irq code
1012 *  - we just left irq code
1013 *
1014 * returns 0 if
1015 *  - funcgraph-interrupts option is set
1016 *  - we are not inside irq code
1017 */
1018static int
1019check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1020{
1021	int cpu = iter->cpu;
1022	int *depth_irq;
1023	struct fgraph_data *data = iter->private;
1024
1025	/*
1026	 * If we are either displaying irqs, or we got called as
1027	 * a graph event and private data does not exist,
1028	 * then we bypass the irq check.
1029	 */
1030	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1031	    (!data))
1032		return 0;
1033
1034	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1035
1036	/*
1037	 * We are not inside the irq code.
1038	 */
1039	if (*depth_irq == -1)
1040		return 0;
1041
1042	/*
1043	 * We are inside the irq code, and this is returning entry.
1044	 * Let's not trace it and clear the entry depth, since
1045	 * we are out of irq code.
1046	 *
1047	 * This condition ensures that we 'leave the irq code' once
1048	 * we are out of the entry depth. Thus protecting us from
1049	 * the RETURN entry loss.
1050	 */
1051	if (*depth_irq >= depth) {
1052		*depth_irq = -1;
1053		return 1;
1054	}
1055
1056	/*
1057	 * We are inside the irq code, and this is not the entry.
1058	 */
1059	return 1;
1060}
1061
1062static enum print_line_t
1063print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1064			struct trace_iterator *iter, u32 flags)
1065{
1066	struct fgraph_data *data = iter->private;
1067	struct ftrace_graph_ent *call = &field->graph_ent;
1068	struct ftrace_graph_ret_entry *leaf_ret;
1069	static enum print_line_t ret;
1070	int cpu = iter->cpu;
1071
1072	if (check_irq_entry(iter, flags, call->func, call->depth))
1073		return TRACE_TYPE_HANDLED;
1074
1075	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1076
1077	leaf_ret = get_return_for_leaf(iter, field);
1078	if (leaf_ret)
1079		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1080	else
1081		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1082
1083	if (data) {
1084		/*
1085		 * If we failed to write our output, then we need to make
1086		 * note of it. Because we already consumed our entry.
1087		 */
1088		if (s->full) {
1089			data->failed = 1;
1090			data->cpu = cpu;
1091		} else
1092			data->failed = 0;
1093	}
1094
1095	return ret;
1096}
1097
1098static enum print_line_t
1099print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1100		   struct trace_entry *ent, struct trace_iterator *iter,
1101		   u32 flags)
1102{
1103	unsigned long long duration = trace->rettime - trace->calltime;
1104	struct fgraph_data *data = iter->private;
1105	struct trace_array *tr = iter->tr;
1106	pid_t pid = ent->pid;
1107	int cpu = iter->cpu;
1108	int func_match = 1;
1109	int i;
1110
1111	if (check_irq_return(iter, flags, trace->depth))
1112		return TRACE_TYPE_HANDLED;
1113
1114	if (data) {
1115		struct fgraph_cpu_data *cpu_data;
1116		int cpu = iter->cpu;
1117
1118		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1119
1120		/*
1121		 * Comments display at + 1 to depth. This is the
1122		 * return from a function, we now want the comments
1123		 * to display at the same level of the bracket.
1124		 */
1125		cpu_data->depth = trace->depth - 1;
1126
1127		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1128		    !WARN_ON_ONCE(trace->depth < 0)) {
1129			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1130				func_match = 0;
1131			cpu_data->enter_funcs[trace->depth] = 0;
1132		}
1133	}
1134
1135	print_graph_prologue(iter, s, 0, 0, flags);
1136
1137	/* Overhead and duration */
1138	print_graph_duration(tr, duration, s, flags);
1139
1140	/* Closing brace */
1141	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1142		trace_seq_putc(s, ' ');
1143
1144	/*
1145	 * If the return function does not have a matching entry,
1146	 * then the entry was lost. Instead of just printing
1147	 * the '}' and letting the user guess what function this
1148	 * belongs to, write out the function name. Always do
1149	 * that if the funcgraph-tail option is enabled.
1150	 */
1151	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1152		trace_seq_puts(s, "}\n");
1153	else
1154		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1155
1156	/* Overrun */
1157	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1158		trace_seq_printf(s, " (Overruns: %lu)\n",
1159				 trace->overrun);
1160
1161	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1162			cpu, pid, flags);
1163
1164	return trace_handle_return(s);
1165}
1166
1167static enum print_line_t
1168print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1169		    struct trace_iterator *iter, u32 flags)
1170{
1171	struct trace_array *tr = iter->tr;
1172	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1173	struct fgraph_data *data = iter->private;
1174	struct trace_event *event;
1175	int depth = 0;
1176	int ret;
1177	int i;
1178
1179	if (data)
1180		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1181
1182	print_graph_prologue(iter, s, 0, 0, flags);
1183
1184	/* No time */
1185	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1186
1187	/* Indentation */
1188	if (depth > 0)
1189		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1190			trace_seq_putc(s, ' ');
1191
1192	/* The comment */
1193	trace_seq_puts(s, "/* ");
1194
1195	switch (iter->ent->type) {
1196	case TRACE_BPUTS:
1197		ret = trace_print_bputs_msg_only(iter);
1198		if (ret != TRACE_TYPE_HANDLED)
1199			return ret;
1200		break;
1201	case TRACE_BPRINT:
1202		ret = trace_print_bprintk_msg_only(iter);
1203		if (ret != TRACE_TYPE_HANDLED)
1204			return ret;
1205		break;
1206	case TRACE_PRINT:
1207		ret = trace_print_printk_msg_only(iter);
1208		if (ret != TRACE_TYPE_HANDLED)
1209			return ret;
1210		break;
1211	default:
1212		event = ftrace_find_event(ent->type);
1213		if (!event)
1214			return TRACE_TYPE_UNHANDLED;
1215
1216		ret = event->funcs->trace(iter, sym_flags, event);
1217		if (ret != TRACE_TYPE_HANDLED)
1218			return ret;
1219	}
1220
1221	if (trace_seq_has_overflowed(s))
1222		goto out;
1223
1224	/* Strip ending newline */
1225	if (s->buffer[s->seq.len - 1] == '\n') {
1226		s->buffer[s->seq.len - 1] = '\0';
1227		s->seq.len--;
1228	}
1229
1230	trace_seq_puts(s, " */\n");
1231 out:
1232	return trace_handle_return(s);
1233}
1234
1235
1236enum print_line_t
1237print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1238{
1239	struct ftrace_graph_ent_entry *field;
1240	struct fgraph_data *data = iter->private;
1241	struct trace_entry *entry = iter->ent;
1242	struct trace_seq *s = &iter->seq;
1243	int cpu = iter->cpu;
1244	int ret;
1245
1246	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1247		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1248		return TRACE_TYPE_HANDLED;
1249	}
1250
1251	/*
1252	 * If the last output failed, there's a possibility we need
1253	 * to print out the missing entry which would never go out.
1254	 */
1255	if (data && data->failed) {
1256		field = &data->ent;
1257		iter->cpu = data->cpu;
1258		ret = print_graph_entry(field, s, iter, flags);
1259		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1260			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1261			ret = TRACE_TYPE_NO_CONSUME;
1262		}
1263		iter->cpu = cpu;
1264		return ret;
1265	}
1266
1267	switch (entry->type) {
1268	case TRACE_GRAPH_ENT: {
1269		/*
1270		 * print_graph_entry() may consume the current event,
1271		 * thus @field may become invalid, so we need to save it.
1272		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1273		 * it can be safely saved at the stack.
1274		 */
1275		struct ftrace_graph_ent_entry saved;
1276		trace_assign_type(field, entry);
1277		saved = *field;
1278		return print_graph_entry(&saved, s, iter, flags);
1279	}
1280	case TRACE_GRAPH_RET: {
1281		struct ftrace_graph_ret_entry *field;
1282		trace_assign_type(field, entry);
1283		return print_graph_return(&field->ret, s, entry, iter, flags);
1284	}
1285	case TRACE_STACK:
1286	case TRACE_FN:
1287		/* dont trace stack and functions as comments */
1288		return TRACE_TYPE_UNHANDLED;
1289
1290	default:
1291		return print_graph_comment(s, entry, iter, flags);
1292	}
1293
1294	return TRACE_TYPE_HANDLED;
1295}
1296
1297static enum print_line_t
1298print_graph_function(struct trace_iterator *iter)
1299{
1300	return print_graph_function_flags(iter, tracer_flags.val);
1301}
1302
1303static enum print_line_t
1304print_graph_function_event(struct trace_iterator *iter, int flags,
1305			   struct trace_event *event)
1306{
1307	return print_graph_function(iter);
1308}
1309
1310static void print_lat_header(struct seq_file *s, u32 flags)
1311{
1312	static const char spaces[] = "                "	/* 16 spaces */
1313		"    "					/* 4 spaces */
1314		"                 ";			/* 17 spaces */
1315	int size = 0;
1316
1317	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1318		size += 16;
 
 
1319	if (flags & TRACE_GRAPH_PRINT_CPU)
1320		size += 4;
1321	if (flags & TRACE_GRAPH_PRINT_PROC)
1322		size += 17;
1323
1324	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1325	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1326	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1327	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1328	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1329}
1330
1331static void __print_graph_headers_flags(struct trace_array *tr,
1332					struct seq_file *s, u32 flags)
1333{
1334	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1335
1336	if (lat)
1337		print_lat_header(s, flags);
1338
1339	/* 1st line */
1340	seq_putc(s, '#');
1341	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1342		seq_puts(s, "     TIME       ");
 
 
1343	if (flags & TRACE_GRAPH_PRINT_CPU)
1344		seq_puts(s, " CPU");
1345	if (flags & TRACE_GRAPH_PRINT_PROC)
1346		seq_puts(s, "  TASK/PID       ");
1347	if (lat)
1348		seq_puts(s, "||||");
1349	if (flags & TRACE_GRAPH_PRINT_DURATION)
1350		seq_puts(s, "  DURATION   ");
1351	seq_puts(s, "               FUNCTION CALLS\n");
1352
1353	/* 2nd line */
1354	seq_putc(s, '#');
1355	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1356		seq_puts(s, "      |         ");
 
 
1357	if (flags & TRACE_GRAPH_PRINT_CPU)
1358		seq_puts(s, " |  ");
1359	if (flags & TRACE_GRAPH_PRINT_PROC)
1360		seq_puts(s, "   |    |        ");
1361	if (lat)
1362		seq_puts(s, "||||");
1363	if (flags & TRACE_GRAPH_PRINT_DURATION)
1364		seq_puts(s, "   |   |      ");
1365	seq_puts(s, "               |   |   |   |\n");
1366}
1367
1368static void print_graph_headers(struct seq_file *s)
1369{
1370	print_graph_headers_flags(s, tracer_flags.val);
1371}
1372
1373void print_graph_headers_flags(struct seq_file *s, u32 flags)
1374{
1375	struct trace_iterator *iter = s->private;
1376	struct trace_array *tr = iter->tr;
1377
1378	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1379		return;
1380
1381	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1382		/* print nothing if the buffers are empty */
1383		if (trace_empty(iter))
1384			return;
1385
1386		print_trace_header(s, iter);
1387	}
1388
1389	__print_graph_headers_flags(tr, s, flags);
1390}
1391
1392void graph_trace_open(struct trace_iterator *iter)
1393{
1394	/* pid and depth on the last trace processed */
1395	struct fgraph_data *data;
1396	gfp_t gfpflags;
1397	int cpu;
1398
1399	iter->private = NULL;
1400
1401	/* We can be called in atomic context via ftrace_dump() */
1402	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1403
1404	data = kzalloc(sizeof(*data), gfpflags);
1405	if (!data)
1406		goto out_err;
1407
1408	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1409	if (!data->cpu_data)
1410		goto out_err_free;
1411
1412	for_each_possible_cpu(cpu) {
1413		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1414		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1415		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1416		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1417
1418		*pid = -1;
1419		*depth = 0;
1420		*ignore = 0;
1421		*depth_irq = -1;
1422	}
1423
1424	iter->private = data;
1425
1426	return;
1427
1428 out_err_free:
1429	kfree(data);
1430 out_err:
1431	pr_warn("function graph tracer: not enough memory\n");
1432}
1433
1434void graph_trace_close(struct trace_iterator *iter)
1435{
1436	struct fgraph_data *data = iter->private;
1437
1438	if (data) {
1439		free_percpu(data->cpu_data);
1440		kfree(data);
1441	}
1442}
1443
1444static int
1445func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1446{
1447	if (bit == TRACE_GRAPH_PRINT_IRQS)
1448		ftrace_graph_skip_irqs = !set;
1449
1450	if (bit == TRACE_GRAPH_SLEEP_TIME)
1451		ftrace_graph_sleep_time_control(set);
1452
1453	if (bit == TRACE_GRAPH_GRAPH_TIME)
1454		ftrace_graph_graph_time_control(set);
1455
1456	return 0;
1457}
1458
1459static struct trace_event_functions graph_functions = {
1460	.trace		= print_graph_function_event,
1461};
1462
1463static struct trace_event graph_trace_entry_event = {
1464	.type		= TRACE_GRAPH_ENT,
1465	.funcs		= &graph_functions,
1466};
1467
1468static struct trace_event graph_trace_ret_event = {
1469	.type		= TRACE_GRAPH_RET,
1470	.funcs		= &graph_functions
1471};
1472
1473static struct tracer graph_trace __tracer_data = {
1474	.name		= "function_graph",
1475	.update_thresh	= graph_trace_update_thresh,
1476	.open		= graph_trace_open,
1477	.pipe_open	= graph_trace_open,
1478	.close		= graph_trace_close,
1479	.pipe_close	= graph_trace_close,
1480	.init		= graph_trace_init,
1481	.reset		= graph_trace_reset,
1482	.print_line	= print_graph_function,
1483	.print_header	= print_graph_headers,
1484	.flags		= &tracer_flags,
1485	.set_flag	= func_graph_set_flag,
1486#ifdef CONFIG_FTRACE_SELFTEST
1487	.selftest	= trace_selftest_startup_function_graph,
1488#endif
1489};
1490
1491
1492static ssize_t
1493graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1494		  loff_t *ppos)
1495{
1496	unsigned long val;
1497	int ret;
1498
1499	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1500	if (ret)
1501		return ret;
1502
1503	fgraph_max_depth = val;
1504
1505	*ppos += cnt;
1506
1507	return cnt;
1508}
1509
1510static ssize_t
1511graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1512		 loff_t *ppos)
1513{
1514	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1515	int n;
1516
1517	n = sprintf(buf, "%d\n", fgraph_max_depth);
1518
1519	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1520}
1521
1522static const struct file_operations graph_depth_fops = {
1523	.open		= tracing_open_generic,
1524	.write		= graph_depth_write,
1525	.read		= graph_depth_read,
1526	.llseek		= generic_file_llseek,
1527};
1528
1529static __init int init_graph_tracefs(void)
1530{
1531	struct dentry *d_tracer;
1532
1533	d_tracer = tracing_init_dentry();
1534	if (IS_ERR(d_tracer))
1535		return 0;
1536
1537	trace_create_file("max_graph_depth", 0644, d_tracer,
1538			  NULL, &graph_depth_fops);
1539
1540	return 0;
1541}
1542fs_initcall(init_graph_tracefs);
1543
1544static __init int init_graph_trace(void)
1545{
1546	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1547
1548	if (!register_trace_event(&graph_trace_entry_event)) {
1549		pr_warn("Warning: could not register graph trace events\n");
1550		return 1;
1551	}
1552
1553	if (!register_trace_event(&graph_trace_ret_event)) {
1554		pr_warn("Warning: could not register graph trace events\n");
1555		return 1;
1556	}
1557
1558	return register_tracer(&graph_trace);
1559}
1560
1561core_initcall(init_graph_trace);