Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *
   4 * Function graph tracer.
   5 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   6 * Mostly borrowed from function tracer which
   7 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
   8 *
   9 */
  10#include <linux/uaccess.h>
  11#include <linux/ftrace.h>
  12#include <linux/interrupt.h>
  13#include <linux/slab.h>
  14#include <linux/fs.h>
  15
  16#include "trace.h"
  17#include "trace_output.h"
  18
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  19/* When set, irq functions will be ignored */
  20static int ftrace_graph_skip_irqs;
  21
  22struct fgraph_cpu_data {
  23	pid_t		last_pid;
  24	int		depth;
  25	int		depth_irq;
  26	int		ignore;
  27	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
  28};
  29
  30struct fgraph_data {
  31	struct fgraph_cpu_data __percpu *cpu_data;
  32
  33	/* Place to preserve last processed entry. */
  34	struct ftrace_graph_ent_entry	ent;
  35	struct ftrace_graph_ret_entry	ret;
  36	int				failed;
  37	int				cpu;
  38};
  39
  40#define TRACE_GRAPH_INDENT	2
  41
  42unsigned int fgraph_max_depth;
  43
  44static struct tracer_opt trace_opts[] = {
  45	/* Display overruns? (for self-debug purpose) */
  46	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  47	/* Display CPU ? */
  48	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  49	/* Display Overhead ? */
  50	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  51	/* Display proc name/pid */
  52	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  53	/* Display duration of execution */
  54	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  55	/* Display absolute time of an entry */
  56	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  57	/* Display interrupts */
  58	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  59	/* Display function name after trailing } */
  60	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  61	/* Include sleep time (scheduled out) between entry and return */
  62	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
  63
  64#ifdef CONFIG_FUNCTION_PROFILER
  65	/* Include time within nested functions */
  66	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
  67#endif
  68
  69	{ } /* Empty entry */
  70};
  71
  72static struct tracer_flags tracer_flags = {
  73	/* Don't display overruns, proc, or tail by default */
  74	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  75	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  76	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  77	.opts = trace_opts
  78};
  79
  80static struct trace_array *graph_array;
  81
  82/*
  83 * DURATION column is being also used to display IRQ signs,
  84 * following values are used by print_graph_irq and others
  85 * to fill in space into DURATION column.
  86 */
  87enum {
  88	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  89	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  90	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
  91};
  92
  93static void
  94print_graph_duration(struct trace_array *tr, unsigned long long duration,
  95		     struct trace_seq *s, u32 flags);
  96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  97int __trace_graph_entry(struct trace_array *tr,
  98				struct ftrace_graph_ent *trace,
  99				unsigned int trace_ctx)
 
 100{
 101	struct trace_event_call *call = &event_funcgraph_entry;
 102	struct ring_buffer_event *event;
 103	struct trace_buffer *buffer = tr->array_buffer.buffer;
 104	struct ftrace_graph_ent_entry *entry;
 105
 106	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
 107					  sizeof(*entry), trace_ctx);
 108	if (!event)
 109		return 0;
 110	entry	= ring_buffer_event_data(event);
 111	entry->graph_ent			= *trace;
 112	if (!call_filter_check_discard(call, entry, buffer, event))
 113		trace_buffer_unlock_commit_nostack(buffer, event);
 114
 115	return 1;
 116}
 117
 118static inline int ftrace_graph_ignore_irqs(void)
 119{
 120	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
 121		return 0;
 122
 123	return in_hardirq();
 124}
 125
 126int trace_graph_entry(struct ftrace_graph_ent *trace)
 127{
 128	struct trace_array *tr = graph_array;
 129	struct trace_array_cpu *data;
 130	unsigned long flags;
 131	unsigned int trace_ctx;
 132	long disabled;
 133	int ret;
 134	int cpu;
 
 
 
 
 
 
 
 135
 136	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT))
 137		return 0;
 138
 139	/*
 140	 * Do not trace a function if it's filtered by set_graph_notrace.
 141	 * Make the index of ret stack negative to indicate that it should
 142	 * ignore further functions.  But it needs its own ret stack entry
 143	 * to recover the original index in order to continue tracing after
 144	 * returning from the function.
 145	 */
 146	if (ftrace_graph_notrace_addr(trace->func)) {
 147		trace_recursion_set(TRACE_GRAPH_NOTRACE_BIT);
 148		/*
 149		 * Need to return 1 to have the return called
 150		 * that will clear the NOTRACE bit.
 151		 */
 152		return 1;
 153	}
 154
 155	if (!ftrace_trace_task(tr))
 156		return 0;
 157
 158	if (ftrace_graph_ignore_func(trace))
 159		return 0;
 160
 161	if (ftrace_graph_ignore_irqs())
 162		return 0;
 163
 164	/*
 165	 * Stop here if tracing_threshold is set. We only write function return
 166	 * events to the ring buffer.
 167	 */
 168	if (tracing_thresh)
 169		return 1;
 170
 171	local_irq_save(flags);
 172	cpu = raw_smp_processor_id();
 173	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 174	disabled = atomic_inc_return(&data->disabled);
 175	if (likely(disabled == 1)) {
 176		trace_ctx = tracing_gen_ctx_flags(flags);
 177		ret = __trace_graph_entry(tr, trace, trace_ctx);
 178	} else {
 179		ret = 0;
 180	}
 181
 182	atomic_dec(&data->disabled);
 183	local_irq_restore(flags);
 184
 185	return ret;
 186}
 187
 188static void
 189__trace_graph_function(struct trace_array *tr,
 190		unsigned long ip, unsigned int trace_ctx)
 191{
 192	u64 time = trace_clock_local();
 193	struct ftrace_graph_ent ent = {
 194		.func  = ip,
 195		.depth = 0,
 196	};
 197	struct ftrace_graph_ret ret = {
 198		.func     = ip,
 199		.depth    = 0,
 200		.calltime = time,
 201		.rettime  = time,
 202	};
 203
 204	__trace_graph_entry(tr, &ent, trace_ctx);
 205	__trace_graph_return(tr, &ret, trace_ctx);
 206}
 207
 208void
 209trace_graph_function(struct trace_array *tr,
 210		unsigned long ip, unsigned long parent_ip,
 211		unsigned int trace_ctx)
 212{
 213	__trace_graph_function(tr, ip, trace_ctx);
 214}
 215
 216void __trace_graph_return(struct trace_array *tr,
 217				struct ftrace_graph_ret *trace,
 218				unsigned int trace_ctx)
 
 219{
 220	struct trace_event_call *call = &event_funcgraph_exit;
 221	struct ring_buffer_event *event;
 222	struct trace_buffer *buffer = tr->array_buffer.buffer;
 223	struct ftrace_graph_ret_entry *entry;
 224
 225	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
 226					  sizeof(*entry), trace_ctx);
 227	if (!event)
 228		return;
 229	entry	= ring_buffer_event_data(event);
 230	entry->ret				= *trace;
 231	if (!call_filter_check_discard(call, entry, buffer, event))
 232		trace_buffer_unlock_commit_nostack(buffer, event);
 233}
 234
 235void trace_graph_return(struct ftrace_graph_ret *trace)
 236{
 237	struct trace_array *tr = graph_array;
 238	struct trace_array_cpu *data;
 239	unsigned long flags;
 240	unsigned int trace_ctx;
 241	long disabled;
 242	int cpu;
 243
 244	ftrace_graph_addr_finish(trace);
 245
 246	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
 247		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
 248		return;
 249	}
 250
 251	local_irq_save(flags);
 252	cpu = raw_smp_processor_id();
 253	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 254	disabled = atomic_inc_return(&data->disabled);
 255	if (likely(disabled == 1)) {
 256		trace_ctx = tracing_gen_ctx_flags(flags);
 257		__trace_graph_return(tr, trace, trace_ctx);
 258	}
 259	atomic_dec(&data->disabled);
 260	local_irq_restore(flags);
 261}
 262
 263void set_graph_array(struct trace_array *tr)
 264{
 265	graph_array = tr;
 266
 267	/* Make graph_array visible before we start tracing */
 268
 269	smp_mb();
 270}
 271
 272static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 273{
 274	ftrace_graph_addr_finish(trace);
 275
 276	if (trace_recursion_test(TRACE_GRAPH_NOTRACE_BIT)) {
 277		trace_recursion_clear(TRACE_GRAPH_NOTRACE_BIT);
 278		return;
 279	}
 280
 281	if (tracing_thresh &&
 282	    (trace->rettime - trace->calltime < tracing_thresh))
 283		return;
 284	else
 285		trace_graph_return(trace);
 286}
 287
 288static struct fgraph_ops funcgraph_thresh_ops = {
 289	.entryfunc = &trace_graph_entry,
 290	.retfunc = &trace_graph_thresh_return,
 291};
 292
 293static struct fgraph_ops funcgraph_ops = {
 294	.entryfunc = &trace_graph_entry,
 295	.retfunc = &trace_graph_return,
 296};
 297
 298static int graph_trace_init(struct trace_array *tr)
 299{
 300	int ret;
 301
 302	set_graph_array(tr);
 303	if (tracing_thresh)
 304		ret = register_ftrace_graph(&funcgraph_thresh_ops);
 
 305	else
 306		ret = register_ftrace_graph(&funcgraph_ops);
 
 307	if (ret)
 308		return ret;
 309	tracing_start_cmdline_record();
 310
 311	return 0;
 312}
 313
 314static void graph_trace_reset(struct trace_array *tr)
 315{
 316	tracing_stop_cmdline_record();
 317	if (tracing_thresh)
 318		unregister_ftrace_graph(&funcgraph_thresh_ops);
 319	else
 320		unregister_ftrace_graph(&funcgraph_ops);
 321}
 322
 323static int graph_trace_update_thresh(struct trace_array *tr)
 324{
 325	graph_trace_reset(tr);
 326	return graph_trace_init(tr);
 327}
 328
 329static int max_bytes_for_cpu;
 330
 331static void print_graph_cpu(struct trace_seq *s, int cpu)
 332{
 333	/*
 334	 * Start with a space character - to make it stand out
 335	 * to the right a bit when trace output is pasted into
 336	 * email:
 337	 */
 338	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
 339}
 340
 341#define TRACE_GRAPH_PROCINFO_LENGTH	14
 342
 343static void print_graph_proc(struct trace_seq *s, pid_t pid)
 344{
 345	char comm[TASK_COMM_LEN];
 346	/* sign + log10(MAX_INT) + '\0' */
 347	char pid_str[11];
 348	int spaces = 0;
 349	int len;
 350	int i;
 351
 352	trace_find_cmdline(pid, comm);
 353	comm[7] = '\0';
 354	sprintf(pid_str, "%d", pid);
 355
 356	/* 1 stands for the "-" character */
 357	len = strlen(comm) + strlen(pid_str) + 1;
 358
 359	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
 360		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
 361
 362	/* First spaces to align center */
 363	for (i = 0; i < spaces / 2; i++)
 364		trace_seq_putc(s, ' ');
 365
 366	trace_seq_printf(s, "%s-%s", comm, pid_str);
 367
 368	/* Last spaces to align center */
 369	for (i = 0; i < spaces - (spaces / 2); i++)
 370		trace_seq_putc(s, ' ');
 371}
 372
 373
 374static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
 375{
 376	trace_seq_putc(s, ' ');
 377	trace_print_lat_fmt(s, entry);
 378	trace_seq_puts(s, " | ");
 379}
 380
 381/* If the pid changed since the last trace, output this event */
 382static void
 383verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
 384{
 385	pid_t prev_pid;
 386	pid_t *last_pid;
 387
 388	if (!data)
 389		return;
 390
 391	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
 392
 393	if (*last_pid == pid)
 394		return;
 395
 396	prev_pid = *last_pid;
 397	*last_pid = pid;
 398
 399	if (prev_pid == -1)
 400		return;
 401/*
 402 * Context-switch trace line:
 403
 404 ------------------------------------------
 405 | 1)  migration/0--1  =>  sshd-1755
 406 ------------------------------------------
 407
 408 */
 409	trace_seq_puts(s, " ------------------------------------------\n");
 410	print_graph_cpu(s, cpu);
 411	print_graph_proc(s, prev_pid);
 412	trace_seq_puts(s, " => ");
 413	print_graph_proc(s, pid);
 414	trace_seq_puts(s, "\n ------------------------------------------\n\n");
 415}
 416
 417static struct ftrace_graph_ret_entry *
 418get_return_for_leaf(struct trace_iterator *iter,
 419		struct ftrace_graph_ent_entry *curr)
 420{
 421	struct fgraph_data *data = iter->private;
 422	struct ring_buffer_iter *ring_iter = NULL;
 423	struct ring_buffer_event *event;
 424	struct ftrace_graph_ret_entry *next;
 425
 426	/*
 427	 * If the previous output failed to write to the seq buffer,
 428	 * then we just reuse the data from before.
 429	 */
 430	if (data && data->failed) {
 431		curr = &data->ent;
 432		next = &data->ret;
 433	} else {
 434
 435		ring_iter = trace_buffer_iter(iter, iter->cpu);
 436
 437		/* First peek to compare current entry and the next one */
 438		if (ring_iter)
 439			event = ring_buffer_iter_peek(ring_iter, NULL);
 440		else {
 441			/*
 442			 * We need to consume the current entry to see
 443			 * the next one.
 444			 */
 445			ring_buffer_consume(iter->array_buffer->buffer, iter->cpu,
 446					    NULL, NULL);
 447			event = ring_buffer_peek(iter->array_buffer->buffer, iter->cpu,
 448						 NULL, NULL);
 449		}
 450
 451		if (!event)
 452			return NULL;
 453
 454		next = ring_buffer_event_data(event);
 455
 456		if (data) {
 457			/*
 458			 * Save current and next entries for later reference
 459			 * if the output fails.
 460			 */
 461			data->ent = *curr;
 462			/*
 463			 * If the next event is not a return type, then
 464			 * we only care about what type it is. Otherwise we can
 465			 * safely copy the entire event.
 466			 */
 467			if (next->ent.type == TRACE_GRAPH_RET)
 468				data->ret = *next;
 469			else
 470				data->ret.ent.type = next->ent.type;
 471		}
 472	}
 473
 474	if (next->ent.type != TRACE_GRAPH_RET)
 475		return NULL;
 476
 477	if (curr->ent.pid != next->ent.pid ||
 478			curr->graph_ent.func != next->ret.func)
 479		return NULL;
 480
 481	/* this is a leaf, now advance the iterator */
 482	if (ring_iter)
 483		ring_buffer_iter_advance(ring_iter);
 484
 485	return next;
 486}
 487
 488static void print_graph_abs_time(u64 t, struct trace_seq *s)
 489{
 490	unsigned long usecs_rem;
 491
 492	usecs_rem = do_div(t, NSEC_PER_SEC);
 493	usecs_rem /= 1000;
 494
 495	trace_seq_printf(s, "%5lu.%06lu |  ",
 496			 (unsigned long)t, usecs_rem);
 497}
 498
 499static void
 500print_graph_rel_time(struct trace_iterator *iter, struct trace_seq *s)
 501{
 502	unsigned long long usecs;
 503
 504	usecs = iter->ts - iter->array_buffer->time_start;
 505	do_div(usecs, NSEC_PER_USEC);
 506
 507	trace_seq_printf(s, "%9llu us |  ", usecs);
 508}
 509
 510static void
 511print_graph_irq(struct trace_iterator *iter, unsigned long addr,
 512		enum trace_type type, int cpu, pid_t pid, u32 flags)
 513{
 514	struct trace_array *tr = iter->tr;
 515	struct trace_seq *s = &iter->seq;
 516	struct trace_entry *ent = iter->ent;
 517
 518	if (addr < (unsigned long)__irqentry_text_start ||
 519		addr >= (unsigned long)__irqentry_text_end)
 520		return;
 521
 522	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 523		/* Absolute time */
 524		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 525			print_graph_abs_time(iter->ts, s);
 526
 527		/* Relative time */
 528		if (flags & TRACE_GRAPH_PRINT_REL_TIME)
 529			print_graph_rel_time(iter, s);
 530
 531		/* Cpu */
 532		if (flags & TRACE_GRAPH_PRINT_CPU)
 533			print_graph_cpu(s, cpu);
 534
 535		/* Proc */
 536		if (flags & TRACE_GRAPH_PRINT_PROC) {
 537			print_graph_proc(s, pid);
 538			trace_seq_puts(s, " | ");
 539		}
 540
 541		/* Latency format */
 542		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 543			print_graph_lat_fmt(s, ent);
 544	}
 545
 546	/* No overhead */
 547	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
 548
 549	if (type == TRACE_GRAPH_ENT)
 550		trace_seq_puts(s, "==========>");
 551	else
 552		trace_seq_puts(s, "<==========");
 553
 554	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
 555	trace_seq_putc(s, '\n');
 556}
 557
 558void
 559trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 560{
 561	unsigned long nsecs_rem = do_div(duration, 1000);
 562	/* log10(ULONG_MAX) + '\0' */
 563	char usecs_str[21];
 564	char nsecs_str[5];
 565	int len;
 566	int i;
 567
 568	sprintf(usecs_str, "%lu", (unsigned long) duration);
 569
 570	/* Print msecs */
 571	trace_seq_printf(s, "%s", usecs_str);
 572
 573	len = strlen(usecs_str);
 574
 575	/* Print nsecs (we don't want to exceed 7 numbers) */
 576	if (len < 7) {
 577		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
 578
 579		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
 580		trace_seq_printf(s, ".%s", nsecs_str);
 581		len += strlen(nsecs_str) + 1;
 582	}
 583
 584	trace_seq_puts(s, " us ");
 585
 586	/* Print remaining spaces to fit the row's width */
 587	for (i = len; i < 8; i++)
 588		trace_seq_putc(s, ' ');
 589}
 590
 591static void
 592print_graph_duration(struct trace_array *tr, unsigned long long duration,
 593		     struct trace_seq *s, u32 flags)
 594{
 595	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
 596	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 597		return;
 598
 599	/* No real adata, just filling the column with spaces */
 600	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
 601	case FLAGS_FILL_FULL:
 602		trace_seq_puts(s, "              |  ");
 603		return;
 604	case FLAGS_FILL_START:
 605		trace_seq_puts(s, "  ");
 606		return;
 607	case FLAGS_FILL_END:
 608		trace_seq_puts(s, " |");
 609		return;
 610	}
 611
 612	/* Signal a overhead of time execution to the output */
 613	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
 614		trace_seq_printf(s, "%c ", trace_find_mark(duration));
 615	else
 616		trace_seq_puts(s, "  ");
 617
 618	trace_print_graph_duration(duration, s);
 619	trace_seq_puts(s, "|  ");
 620}
 621
 622/* Case of a leaf function on its call entry */
 623static enum print_line_t
 624print_graph_entry_leaf(struct trace_iterator *iter,
 625		struct ftrace_graph_ent_entry *entry,
 626		struct ftrace_graph_ret_entry *ret_entry,
 627		struct trace_seq *s, u32 flags)
 628{
 629	struct fgraph_data *data = iter->private;
 630	struct trace_array *tr = iter->tr;
 631	struct ftrace_graph_ret *graph_ret;
 632	struct ftrace_graph_ent *call;
 633	unsigned long long duration;
 634	int cpu = iter->cpu;
 635	int i;
 636
 637	graph_ret = &ret_entry->ret;
 638	call = &entry->graph_ent;
 639	duration = graph_ret->rettime - graph_ret->calltime;
 640
 641	if (data) {
 642		struct fgraph_cpu_data *cpu_data;
 
 643
 644		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 645
 
 
 
 
 646		/*
 647		 * Comments display at + 1 to depth. Since
 648		 * this is a leaf function, keep the comments
 649		 * equal to this depth.
 650		 */
 651		cpu_data->depth = call->depth - 1;
 652
 653		/* No need to keep this function around for this depth */
 654		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 655		    !WARN_ON_ONCE(call->depth < 0))
 656			cpu_data->enter_funcs[call->depth] = 0;
 657	}
 658
 659	/* Overhead and duration */
 660	print_graph_duration(tr, duration, s, flags);
 661
 662	/* Function */
 663	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 664		trace_seq_putc(s, ' ');
 665
 666	trace_seq_printf(s, "%ps();\n", (void *)call->func);
 667
 668	print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
 669			cpu, iter->ent->pid, flags);
 670
 671	return trace_handle_return(s);
 672}
 673
 674static enum print_line_t
 675print_graph_entry_nested(struct trace_iterator *iter,
 676			 struct ftrace_graph_ent_entry *entry,
 677			 struct trace_seq *s, int cpu, u32 flags)
 678{
 679	struct ftrace_graph_ent *call = &entry->graph_ent;
 680	struct fgraph_data *data = iter->private;
 681	struct trace_array *tr = iter->tr;
 682	int i;
 683
 684	if (data) {
 685		struct fgraph_cpu_data *cpu_data;
 686		int cpu = iter->cpu;
 687
 
 
 
 
 688		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 689		cpu_data->depth = call->depth;
 690
 691		/* Save this function pointer to see if the exit matches */
 692		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 693		    !WARN_ON_ONCE(call->depth < 0))
 694			cpu_data->enter_funcs[call->depth] = call->func;
 695	}
 696
 697	/* No time */
 698	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
 699
 700	/* Function */
 701	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 702		trace_seq_putc(s, ' ');
 703
 704	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
 705
 706	if (trace_seq_has_overflowed(s))
 707		return TRACE_TYPE_PARTIAL_LINE;
 708
 709	/*
 710	 * we already consumed the current entry to check the next one
 711	 * and see if this is a leaf.
 712	 */
 713	return TRACE_TYPE_NO_CONSUME;
 714}
 715
 716static void
 717print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
 718		     int type, unsigned long addr, u32 flags)
 719{
 720	struct fgraph_data *data = iter->private;
 721	struct trace_entry *ent = iter->ent;
 722	struct trace_array *tr = iter->tr;
 723	int cpu = iter->cpu;
 724
 725	/* Pid */
 726	verif_pid(s, ent->pid, cpu, data);
 727
 728	if (type)
 729		/* Interrupt */
 730		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
 731
 732	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 733		return;
 734
 735	/* Absolute time */
 736	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 737		print_graph_abs_time(iter->ts, s);
 738
 739	/* Relative time */
 740	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
 741		print_graph_rel_time(iter, s);
 742
 743	/* Cpu */
 744	if (flags & TRACE_GRAPH_PRINT_CPU)
 745		print_graph_cpu(s, cpu);
 746
 747	/* Proc */
 748	if (flags & TRACE_GRAPH_PRINT_PROC) {
 749		print_graph_proc(s, ent->pid);
 750		trace_seq_puts(s, " | ");
 751	}
 752
 753	/* Latency format */
 754	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 755		print_graph_lat_fmt(s, ent);
 756
 757	return;
 758}
 759
 760/*
 761 * Entry check for irq code
 762 *
 763 * returns 1 if
 764 *  - we are inside irq code
 765 *  - we just entered irq code
 766 *
 767 * returns 0 if
 768 *  - funcgraph-interrupts option is set
 769 *  - we are not inside irq code
 770 */
 771static int
 772check_irq_entry(struct trace_iterator *iter, u32 flags,
 773		unsigned long addr, int depth)
 774{
 775	int cpu = iter->cpu;
 776	int *depth_irq;
 777	struct fgraph_data *data = iter->private;
 778
 779	/*
 780	 * If we are either displaying irqs, or we got called as
 781	 * a graph event and private data does not exist,
 782	 * then we bypass the irq check.
 783	 */
 784	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 785	    (!data))
 786		return 0;
 787
 788	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 789
 790	/*
 791	 * We are inside the irq code
 792	 */
 793	if (*depth_irq >= 0)
 794		return 1;
 795
 796	if ((addr < (unsigned long)__irqentry_text_start) ||
 797	    (addr >= (unsigned long)__irqentry_text_end))
 798		return 0;
 799
 800	/*
 801	 * We are entering irq code.
 802	 */
 803	*depth_irq = depth;
 804	return 1;
 805}
 806
 807/*
 808 * Return check for irq code
 809 *
 810 * returns 1 if
 811 *  - we are inside irq code
 812 *  - we just left irq code
 813 *
 814 * returns 0 if
 815 *  - funcgraph-interrupts option is set
 816 *  - we are not inside irq code
 817 */
 818static int
 819check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
 820{
 821	int cpu = iter->cpu;
 822	int *depth_irq;
 823	struct fgraph_data *data = iter->private;
 824
 825	/*
 826	 * If we are either displaying irqs, or we got called as
 827	 * a graph event and private data does not exist,
 828	 * then we bypass the irq check.
 829	 */
 830	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 831	    (!data))
 832		return 0;
 833
 834	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 835
 836	/*
 837	 * We are not inside the irq code.
 838	 */
 839	if (*depth_irq == -1)
 840		return 0;
 841
 842	/*
 843	 * We are inside the irq code, and this is returning entry.
 844	 * Let's not trace it and clear the entry depth, since
 845	 * we are out of irq code.
 846	 *
 847	 * This condition ensures that we 'leave the irq code' once
 848	 * we are out of the entry depth. Thus protecting us from
 849	 * the RETURN entry loss.
 850	 */
 851	if (*depth_irq >= depth) {
 852		*depth_irq = -1;
 853		return 1;
 854	}
 855
 856	/*
 857	 * We are inside the irq code, and this is not the entry.
 858	 */
 859	return 1;
 860}
 861
 862static enum print_line_t
 863print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
 864			struct trace_iterator *iter, u32 flags)
 865{
 866	struct fgraph_data *data = iter->private;
 867	struct ftrace_graph_ent *call = &field->graph_ent;
 868	struct ftrace_graph_ret_entry *leaf_ret;
 869	static enum print_line_t ret;
 870	int cpu = iter->cpu;
 871
 872	if (check_irq_entry(iter, flags, call->func, call->depth))
 873		return TRACE_TYPE_HANDLED;
 874
 875	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
 876
 877	leaf_ret = get_return_for_leaf(iter, field);
 878	if (leaf_ret)
 879		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
 880	else
 881		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
 882
 883	if (data) {
 884		/*
 885		 * If we failed to write our output, then we need to make
 886		 * note of it. Because we already consumed our entry.
 887		 */
 888		if (s->full) {
 889			data->failed = 1;
 890			data->cpu = cpu;
 891		} else
 892			data->failed = 0;
 893	}
 894
 895	return ret;
 896}
 897
 898static enum print_line_t
 899print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
 900		   struct trace_entry *ent, struct trace_iterator *iter,
 901		   u32 flags)
 902{
 903	unsigned long long duration = trace->rettime - trace->calltime;
 904	struct fgraph_data *data = iter->private;
 905	struct trace_array *tr = iter->tr;
 906	pid_t pid = ent->pid;
 907	int cpu = iter->cpu;
 908	int func_match = 1;
 909	int i;
 910
 911	if (check_irq_return(iter, flags, trace->depth))
 912		return TRACE_TYPE_HANDLED;
 913
 914	if (data) {
 915		struct fgraph_cpu_data *cpu_data;
 916		int cpu = iter->cpu;
 917
 918		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 919
 920		/*
 921		 * Comments display at + 1 to depth. This is the
 922		 * return from a function, we now want the comments
 923		 * to display at the same level of the bracket.
 924		 */
 925		cpu_data->depth = trace->depth - 1;
 926
 927		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
 928		    !WARN_ON_ONCE(trace->depth < 0)) {
 929			if (cpu_data->enter_funcs[trace->depth] != trace->func)
 930				func_match = 0;
 931			cpu_data->enter_funcs[trace->depth] = 0;
 932		}
 933	}
 934
 935	print_graph_prologue(iter, s, 0, 0, flags);
 936
 937	/* Overhead and duration */
 938	print_graph_duration(tr, duration, s, flags);
 939
 940	/* Closing brace */
 941	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
 942		trace_seq_putc(s, ' ');
 943
 944	/*
 945	 * If the return function does not have a matching entry,
 946	 * then the entry was lost. Instead of just printing
 947	 * the '}' and letting the user guess what function this
 948	 * belongs to, write out the function name. Always do
 949	 * that if the funcgraph-tail option is enabled.
 950	 */
 951	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
 952		trace_seq_puts(s, "}\n");
 953	else
 954		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
 955
 956	/* Overrun */
 957	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
 958		trace_seq_printf(s, " (Overruns: %u)\n",
 959				 trace->overrun);
 960
 961	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
 962			cpu, pid, flags);
 963
 964	return trace_handle_return(s);
 965}
 966
 967static enum print_line_t
 968print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
 969		    struct trace_iterator *iter, u32 flags)
 970{
 971	struct trace_array *tr = iter->tr;
 972	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
 973	struct fgraph_data *data = iter->private;
 974	struct trace_event *event;
 975	int depth = 0;
 976	int ret;
 977	int i;
 978
 979	if (data)
 980		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
 981
 982	print_graph_prologue(iter, s, 0, 0, flags);
 983
 984	/* No time */
 985	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
 986
 987	/* Indentation */
 988	if (depth > 0)
 989		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
 990			trace_seq_putc(s, ' ');
 991
 992	/* The comment */
 993	trace_seq_puts(s, "/* ");
 994
 995	switch (iter->ent->type) {
 996	case TRACE_BPUTS:
 997		ret = trace_print_bputs_msg_only(iter);
 998		if (ret != TRACE_TYPE_HANDLED)
 999			return ret;
1000		break;
1001	case TRACE_BPRINT:
1002		ret = trace_print_bprintk_msg_only(iter);
1003		if (ret != TRACE_TYPE_HANDLED)
1004			return ret;
1005		break;
1006	case TRACE_PRINT:
1007		ret = trace_print_printk_msg_only(iter);
1008		if (ret != TRACE_TYPE_HANDLED)
1009			return ret;
1010		break;
1011	default:
1012		event = ftrace_find_event(ent->type);
1013		if (!event)
1014			return TRACE_TYPE_UNHANDLED;
1015
1016		ret = event->funcs->trace(iter, sym_flags, event);
1017		if (ret != TRACE_TYPE_HANDLED)
1018			return ret;
1019	}
1020
1021	if (trace_seq_has_overflowed(s))
1022		goto out;
1023
1024	/* Strip ending newline */
1025	if (s->buffer[s->seq.len - 1] == '\n') {
1026		s->buffer[s->seq.len - 1] = '\0';
1027		s->seq.len--;
1028	}
1029
1030	trace_seq_puts(s, " */\n");
1031 out:
1032	return trace_handle_return(s);
1033}
1034
1035
1036enum print_line_t
1037print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1038{
1039	struct ftrace_graph_ent_entry *field;
1040	struct fgraph_data *data = iter->private;
1041	struct trace_entry *entry = iter->ent;
1042	struct trace_seq *s = &iter->seq;
1043	int cpu = iter->cpu;
1044	int ret;
1045
1046	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1047		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1048		return TRACE_TYPE_HANDLED;
1049	}
1050
1051	/*
1052	 * If the last output failed, there's a possibility we need
1053	 * to print out the missing entry which would never go out.
1054	 */
1055	if (data && data->failed) {
1056		field = &data->ent;
1057		iter->cpu = data->cpu;
1058		ret = print_graph_entry(field, s, iter, flags);
1059		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1060			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1061			ret = TRACE_TYPE_NO_CONSUME;
1062		}
1063		iter->cpu = cpu;
1064		return ret;
1065	}
1066
1067	switch (entry->type) {
1068	case TRACE_GRAPH_ENT: {
1069		/*
1070		 * print_graph_entry() may consume the current event,
1071		 * thus @field may become invalid, so we need to save it.
1072		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1073		 * it can be safely saved at the stack.
1074		 */
1075		struct ftrace_graph_ent_entry saved;
1076		trace_assign_type(field, entry);
1077		saved = *field;
1078		return print_graph_entry(&saved, s, iter, flags);
1079	}
1080	case TRACE_GRAPH_RET: {
1081		struct ftrace_graph_ret_entry *field;
1082		trace_assign_type(field, entry);
1083		return print_graph_return(&field->ret, s, entry, iter, flags);
1084	}
1085	case TRACE_STACK:
1086	case TRACE_FN:
1087		/* dont trace stack and functions as comments */
1088		return TRACE_TYPE_UNHANDLED;
1089
1090	default:
1091		return print_graph_comment(s, entry, iter, flags);
1092	}
1093
1094	return TRACE_TYPE_HANDLED;
1095}
1096
1097static enum print_line_t
1098print_graph_function(struct trace_iterator *iter)
1099{
1100	return print_graph_function_flags(iter, tracer_flags.val);
1101}
1102
1103static enum print_line_t
1104print_graph_function_event(struct trace_iterator *iter, int flags,
1105			   struct trace_event *event)
1106{
1107	return print_graph_function(iter);
1108}
1109
1110static void print_lat_header(struct seq_file *s, u32 flags)
1111{
1112	static const char spaces[] = "                "	/* 16 spaces */
1113		"    "					/* 4 spaces */
1114		"                 ";			/* 17 spaces */
1115	int size = 0;
1116
1117	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1118		size += 16;
1119	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1120		size += 16;
1121	if (flags & TRACE_GRAPH_PRINT_CPU)
1122		size += 4;
1123	if (flags & TRACE_GRAPH_PRINT_PROC)
1124		size += 17;
1125
1126	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1127	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1128	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1129	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1130	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1131}
1132
1133static void __print_graph_headers_flags(struct trace_array *tr,
1134					struct seq_file *s, u32 flags)
1135{
1136	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1137
1138	if (lat)
1139		print_lat_header(s, flags);
1140
1141	/* 1st line */
1142	seq_putc(s, '#');
1143	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1144		seq_puts(s, "     TIME       ");
1145	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1146		seq_puts(s, "   REL TIME     ");
1147	if (flags & TRACE_GRAPH_PRINT_CPU)
1148		seq_puts(s, " CPU");
1149	if (flags & TRACE_GRAPH_PRINT_PROC)
1150		seq_puts(s, "  TASK/PID       ");
1151	if (lat)
1152		seq_puts(s, "||||   ");
1153	if (flags & TRACE_GRAPH_PRINT_DURATION)
1154		seq_puts(s, "  DURATION   ");
1155	seq_puts(s, "               FUNCTION CALLS\n");
1156
1157	/* 2nd line */
1158	seq_putc(s, '#');
1159	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1160		seq_puts(s, "      |         ");
1161	if (flags & TRACE_GRAPH_PRINT_REL_TIME)
1162		seq_puts(s, "      |         ");
1163	if (flags & TRACE_GRAPH_PRINT_CPU)
1164		seq_puts(s, " |  ");
1165	if (flags & TRACE_GRAPH_PRINT_PROC)
1166		seq_puts(s, "   |    |        ");
1167	if (lat)
1168		seq_puts(s, "||||   ");
1169	if (flags & TRACE_GRAPH_PRINT_DURATION)
1170		seq_puts(s, "   |   |      ");
1171	seq_puts(s, "               |   |   |   |\n");
1172}
1173
1174static void print_graph_headers(struct seq_file *s)
1175{
1176	print_graph_headers_flags(s, tracer_flags.val);
1177}
1178
1179void print_graph_headers_flags(struct seq_file *s, u32 flags)
1180{
1181	struct trace_iterator *iter = s->private;
1182	struct trace_array *tr = iter->tr;
1183
1184	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1185		return;
1186
1187	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1188		/* print nothing if the buffers are empty */
1189		if (trace_empty(iter))
1190			return;
1191
1192		print_trace_header(s, iter);
1193	}
1194
1195	__print_graph_headers_flags(tr, s, flags);
1196}
1197
1198void graph_trace_open(struct trace_iterator *iter)
1199{
1200	/* pid and depth on the last trace processed */
1201	struct fgraph_data *data;
1202	gfp_t gfpflags;
1203	int cpu;
1204
1205	iter->private = NULL;
1206
1207	/* We can be called in atomic context via ftrace_dump() */
1208	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1209
1210	data = kzalloc(sizeof(*data), gfpflags);
1211	if (!data)
1212		goto out_err;
1213
1214	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1215	if (!data->cpu_data)
1216		goto out_err_free;
1217
1218	for_each_possible_cpu(cpu) {
1219		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1220		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1221		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1222		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1223
1224		*pid = -1;
1225		*depth = 0;
1226		*ignore = 0;
1227		*depth_irq = -1;
1228	}
1229
1230	iter->private = data;
1231
1232	return;
1233
1234 out_err_free:
1235	kfree(data);
1236 out_err:
1237	pr_warn("function graph tracer: not enough memory\n");
1238}
1239
1240void graph_trace_close(struct trace_iterator *iter)
1241{
1242	struct fgraph_data *data = iter->private;
1243
1244	if (data) {
1245		free_percpu(data->cpu_data);
1246		kfree(data);
1247	}
1248}
1249
1250static int
1251func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1252{
1253	if (bit == TRACE_GRAPH_PRINT_IRQS)
1254		ftrace_graph_skip_irqs = !set;
1255
1256	if (bit == TRACE_GRAPH_SLEEP_TIME)
1257		ftrace_graph_sleep_time_control(set);
1258
1259	if (bit == TRACE_GRAPH_GRAPH_TIME)
1260		ftrace_graph_graph_time_control(set);
1261
1262	return 0;
1263}
1264
1265static struct trace_event_functions graph_functions = {
1266	.trace		= print_graph_function_event,
1267};
1268
1269static struct trace_event graph_trace_entry_event = {
1270	.type		= TRACE_GRAPH_ENT,
1271	.funcs		= &graph_functions,
1272};
1273
1274static struct trace_event graph_trace_ret_event = {
1275	.type		= TRACE_GRAPH_RET,
1276	.funcs		= &graph_functions
1277};
1278
1279static struct tracer graph_trace __tracer_data = {
1280	.name		= "function_graph",
1281	.update_thresh	= graph_trace_update_thresh,
1282	.open		= graph_trace_open,
1283	.pipe_open	= graph_trace_open,
1284	.close		= graph_trace_close,
1285	.pipe_close	= graph_trace_close,
1286	.init		= graph_trace_init,
1287	.reset		= graph_trace_reset,
1288	.print_line	= print_graph_function,
1289	.print_header	= print_graph_headers,
1290	.flags		= &tracer_flags,
1291	.set_flag	= func_graph_set_flag,
1292#ifdef CONFIG_FTRACE_SELFTEST
1293	.selftest	= trace_selftest_startup_function_graph,
1294#endif
1295};
1296
1297
1298static ssize_t
1299graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1300		  loff_t *ppos)
1301{
1302	unsigned long val;
1303	int ret;
1304
1305	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1306	if (ret)
1307		return ret;
1308
1309	fgraph_max_depth = val;
1310
1311	*ppos += cnt;
1312
1313	return cnt;
1314}
1315
1316static ssize_t
1317graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1318		 loff_t *ppos)
1319{
1320	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1321	int n;
1322
1323	n = sprintf(buf, "%d\n", fgraph_max_depth);
1324
1325	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1326}
1327
1328static const struct file_operations graph_depth_fops = {
1329	.open		= tracing_open_generic,
1330	.write		= graph_depth_write,
1331	.read		= graph_depth_read,
1332	.llseek		= generic_file_llseek,
1333};
1334
1335static __init int init_graph_tracefs(void)
1336{
1337	int ret;
1338
1339	ret = tracing_init_dentry();
1340	if (ret)
1341		return 0;
1342
1343	trace_create_file("max_graph_depth", TRACE_MODE_WRITE, NULL,
1344			  NULL, &graph_depth_fops);
1345
1346	return 0;
1347}
1348fs_initcall(init_graph_tracefs);
1349
1350static __init int init_graph_trace(void)
1351{
1352	max_bytes_for_cpu = snprintf(NULL, 0, "%u", nr_cpu_ids - 1);
1353
1354	if (!register_trace_event(&graph_trace_entry_event)) {
1355		pr_warn("Warning: could not register graph trace events\n");
1356		return 1;
1357	}
1358
1359	if (!register_trace_event(&graph_trace_ret_event)) {
1360		pr_warn("Warning: could not register graph trace events\n");
1361		return 1;
1362	}
1363
1364	return register_tracer(&graph_trace);
1365}
1366
1367core_initcall(init_graph_trace);
v4.10.11
 
   1/*
   2 *
   3 * Function graph tracer.
   4 * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
   5 * Mostly borrowed from function tracer which
   6 * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
   7 *
   8 */
   9#include <linux/uaccess.h>
  10#include <linux/ftrace.h>
  11#include <linux/interrupt.h>
  12#include <linux/slab.h>
  13#include <linux/fs.h>
  14
  15#include "trace.h"
  16#include "trace_output.h"
  17
  18static bool kill_ftrace_graph;
  19
  20/**
  21 * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
  22 *
  23 * ftrace_graph_stop() is called when a severe error is detected in
  24 * the function graph tracing. This function is called by the critical
  25 * paths of function graph to keep those paths from doing any more harm.
  26 */
  27bool ftrace_graph_is_dead(void)
  28{
  29	return kill_ftrace_graph;
  30}
  31
  32/**
  33 * ftrace_graph_stop - set to permanently disable function graph tracincg
  34 *
  35 * In case of an error int function graph tracing, this is called
  36 * to try to keep function graph tracing from causing any more harm.
  37 * Usually this is pretty severe and this is called to try to at least
  38 * get a warning out to the user.
  39 */
  40void ftrace_graph_stop(void)
  41{
  42	kill_ftrace_graph = true;
  43}
  44
  45/* When set, irq functions will be ignored */
  46static int ftrace_graph_skip_irqs;
  47
  48struct fgraph_cpu_data {
  49	pid_t		last_pid;
  50	int		depth;
  51	int		depth_irq;
  52	int		ignore;
  53	unsigned long	enter_funcs[FTRACE_RETFUNC_DEPTH];
  54};
  55
  56struct fgraph_data {
  57	struct fgraph_cpu_data __percpu *cpu_data;
  58
  59	/* Place to preserve last processed entry. */
  60	struct ftrace_graph_ent_entry	ent;
  61	struct ftrace_graph_ret_entry	ret;
  62	int				failed;
  63	int				cpu;
  64};
  65
  66#define TRACE_GRAPH_INDENT	2
  67
  68unsigned int fgraph_max_depth;
  69
  70static struct tracer_opt trace_opts[] = {
  71	/* Display overruns? (for self-debug purpose) */
  72	{ TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
  73	/* Display CPU ? */
  74	{ TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
  75	/* Display Overhead ? */
  76	{ TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
  77	/* Display proc name/pid */
  78	{ TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
  79	/* Display duration of execution */
  80	{ TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
  81	/* Display absolute time of an entry */
  82	{ TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
  83	/* Display interrupts */
  84	{ TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
  85	/* Display function name after trailing } */
  86	{ TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
  87	/* Include sleep time (scheduled out) between entry and return */
  88	{ TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
 
 
  89	/* Include time within nested functions */
  90	{ TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
 
 
  91	{ } /* Empty entry */
  92};
  93
  94static struct tracer_flags tracer_flags = {
  95	/* Don't display overruns, proc, or tail by default */
  96	.val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
  97	       TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
  98	       TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
  99	.opts = trace_opts
 100};
 101
 102static struct trace_array *graph_array;
 103
 104/*
 105 * DURATION column is being also used to display IRQ signs,
 106 * following values are used by print_graph_irq and others
 107 * to fill in space into DURATION column.
 108 */
 109enum {
 110	FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 111	FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 112	FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
 113};
 114
 115static void
 116print_graph_duration(struct trace_array *tr, unsigned long long duration,
 117		     struct trace_seq *s, u32 flags);
 118
 119/* Add a function return address to the trace stack on thread info.*/
 120int
 121ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
 122			 unsigned long frame_pointer, unsigned long *retp)
 123{
 124	unsigned long long calltime;
 125	int index;
 126
 127	if (unlikely(ftrace_graph_is_dead()))
 128		return -EBUSY;
 129
 130	if (!current->ret_stack)
 131		return -EBUSY;
 132
 133	/*
 134	 * We must make sure the ret_stack is tested before we read
 135	 * anything else.
 136	 */
 137	smp_rmb();
 138
 139	/* The return trace stack is full */
 140	if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
 141		atomic_inc(&current->trace_overrun);
 142		return -EBUSY;
 143	}
 144
 145	/*
 146	 * The curr_ret_stack is an index to ftrace return stack of
 147	 * current task.  Its value should be in [0, FTRACE_RETFUNC_
 148	 * DEPTH) when the function graph tracer is used.  To support
 149	 * filtering out specific functions, it makes the index
 150	 * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
 151	 * so when it sees a negative index the ftrace will ignore
 152	 * the record.  And the index gets recovered when returning
 153	 * from the filtered function by adding the FTRACE_NOTRACE_
 154	 * DEPTH and then it'll continue to record functions normally.
 155	 *
 156	 * The curr_ret_stack is initialized to -1 and get increased
 157	 * in this function.  So it can be less than -1 only if it was
 158	 * filtered out via ftrace_graph_notrace_addr() which can be
 159	 * set from set_graph_notrace file in tracefs by user.
 160	 */
 161	if (current->curr_ret_stack < -1)
 162		return -EBUSY;
 163
 164	calltime = trace_clock_local();
 165
 166	index = ++current->curr_ret_stack;
 167	if (ftrace_graph_notrace_addr(func))
 168		current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
 169	barrier();
 170	current->ret_stack[index].ret = ret;
 171	current->ret_stack[index].func = func;
 172	current->ret_stack[index].calltime = calltime;
 173#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 174	current->ret_stack[index].fp = frame_pointer;
 175#endif
 176#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 177	current->ret_stack[index].retp = retp;
 178#endif
 179	*depth = current->curr_ret_stack;
 180
 181	return 0;
 182}
 183
 184/* Retrieve a function return address to the trace stack on thread info.*/
 185static void
 186ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
 187			unsigned long frame_pointer)
 188{
 189	int index;
 190
 191	index = current->curr_ret_stack;
 192
 193	/*
 194	 * A negative index here means that it's just returned from a
 195	 * notrace'd function.  Recover index to get an original
 196	 * return address.  See ftrace_push_return_trace().
 197	 *
 198	 * TODO: Need to check whether the stack gets corrupted.
 199	 */
 200	if (index < 0)
 201		index += FTRACE_NOTRACE_DEPTH;
 202
 203	if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
 204		ftrace_graph_stop();
 205		WARN_ON(1);
 206		/* Might as well panic, otherwise we have no where to go */
 207		*ret = (unsigned long)panic;
 208		return;
 209	}
 210
 211#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
 212	/*
 213	 * The arch may choose to record the frame pointer used
 214	 * and check it here to make sure that it is what we expect it
 215	 * to be. If gcc does not set the place holder of the return
 216	 * address in the frame pointer, and does a copy instead, then
 217	 * the function graph trace will fail. This test detects this
 218	 * case.
 219	 *
 220	 * Currently, x86_32 with optimize for size (-Os) makes the latest
 221	 * gcc do the above.
 222	 *
 223	 * Note, -mfentry does not use frame pointers, and this test
 224	 *  is not needed if CC_USING_FENTRY is set.
 225	 */
 226	if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
 227		ftrace_graph_stop();
 228		WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
 229		     "  from func %ps return to %lx\n",
 230		     current->ret_stack[index].fp,
 231		     frame_pointer,
 232		     (void *)current->ret_stack[index].func,
 233		     current->ret_stack[index].ret);
 234		*ret = (unsigned long)panic;
 235		return;
 236	}
 237#endif
 238
 239	*ret = current->ret_stack[index].ret;
 240	trace->func = current->ret_stack[index].func;
 241	trace->calltime = current->ret_stack[index].calltime;
 242	trace->overrun = atomic_read(&current->trace_overrun);
 243	trace->depth = index;
 244}
 245
 246/*
 247 * Send the trace to the ring-buffer.
 248 * @return the original return address.
 249 */
 250unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
 251{
 252	struct ftrace_graph_ret trace;
 253	unsigned long ret;
 254
 255	ftrace_pop_return_trace(&trace, &ret, frame_pointer);
 256	trace.rettime = trace_clock_local();
 257	barrier();
 258	current->curr_ret_stack--;
 259	/*
 260	 * The curr_ret_stack can be less than -1 only if it was
 261	 * filtered out and it's about to return from the function.
 262	 * Recover the index and continue to trace normal functions.
 263	 */
 264	if (current->curr_ret_stack < -1) {
 265		current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
 266		return ret;
 267	}
 268
 269	/*
 270	 * The trace should run after decrementing the ret counter
 271	 * in case an interrupt were to come in. We don't want to
 272	 * lose the interrupt if max_depth is set.
 273	 */
 274	ftrace_graph_return(&trace);
 275
 276	if (unlikely(!ret)) {
 277		ftrace_graph_stop();
 278		WARN_ON(1);
 279		/* Might as well panic. What else to do? */
 280		ret = (unsigned long)panic;
 281	}
 282
 283	return ret;
 284}
 285
 286/**
 287 * ftrace_graph_ret_addr - convert a potentially modified stack return address
 288 *			   to its original value
 289 *
 290 * This function can be called by stack unwinding code to convert a found stack
 291 * return address ('ret') to its original value, in case the function graph
 292 * tracer has modified it to be 'return_to_handler'.  If the address hasn't
 293 * been modified, the unchanged value of 'ret' is returned.
 294 *
 295 * 'idx' is a state variable which should be initialized by the caller to zero
 296 * before the first call.
 297 *
 298 * 'retp' is a pointer to the return address on the stack.  It's ignored if
 299 * the arch doesn't have HAVE_FUNCTION_GRAPH_RET_ADDR_PTR defined.
 300 */
 301#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
 302unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 303				    unsigned long ret, unsigned long *retp)
 304{
 305	int index = task->curr_ret_stack;
 306	int i;
 307
 308	if (ret != (unsigned long)return_to_handler)
 309		return ret;
 310
 311	if (index < -1)
 312		index += FTRACE_NOTRACE_DEPTH;
 313
 314	if (index < 0)
 315		return ret;
 316
 317	for (i = 0; i <= index; i++)
 318		if (task->ret_stack[i].retp == retp)
 319			return task->ret_stack[i].ret;
 320
 321	return ret;
 322}
 323#else /* !HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
 324unsigned long ftrace_graph_ret_addr(struct task_struct *task, int *idx,
 325				    unsigned long ret, unsigned long *retp)
 326{
 327	int task_idx;
 328
 329	if (ret != (unsigned long)return_to_handler)
 330		return ret;
 331
 332	task_idx = task->curr_ret_stack;
 333
 334	if (!task->ret_stack || task_idx < *idx)
 335		return ret;
 336
 337	task_idx -= *idx;
 338	(*idx)++;
 339
 340	return task->ret_stack[task_idx].ret;
 341}
 342#endif /* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR */
 343
 344int __trace_graph_entry(struct trace_array *tr,
 345				struct ftrace_graph_ent *trace,
 346				unsigned long flags,
 347				int pc)
 348{
 349	struct trace_event_call *call = &event_funcgraph_entry;
 350	struct ring_buffer_event *event;
 351	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 352	struct ftrace_graph_ent_entry *entry;
 353
 354	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
 355					  sizeof(*entry), flags, pc);
 356	if (!event)
 357		return 0;
 358	entry	= ring_buffer_event_data(event);
 359	entry->graph_ent			= *trace;
 360	if (!call_filter_check_discard(call, entry, buffer, event))
 361		trace_buffer_unlock_commit_nostack(buffer, event);
 362
 363	return 1;
 364}
 365
 366static inline int ftrace_graph_ignore_irqs(void)
 367{
 368	if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
 369		return 0;
 370
 371	return in_irq();
 372}
 373
 374int trace_graph_entry(struct ftrace_graph_ent *trace)
 375{
 376	struct trace_array *tr = graph_array;
 377	struct trace_array_cpu *data;
 378	unsigned long flags;
 
 379	long disabled;
 380	int ret;
 381	int cpu;
 382	int pc;
 383
 384	if (!ftrace_trace_task(tr))
 385		return 0;
 386
 387	if (ftrace_graph_ignore_func(trace))
 388		return 0;
 389
 390	if (ftrace_graph_ignore_irqs())
 391		return 0;
 392
 393	/*
 394	 * Do not trace a function if it's filtered by set_graph_notrace.
 395	 * Make the index of ret stack negative to indicate that it should
 396	 * ignore further functions.  But it needs its own ret stack entry
 397	 * to recover the original index in order to continue tracing after
 398	 * returning from the function.
 399	 */
 400	if (ftrace_graph_notrace_addr(trace->func))
 
 
 
 
 
 401		return 1;
 
 
 
 
 
 
 
 
 
 
 402
 403	/*
 404	 * Stop here if tracing_threshold is set. We only write function return
 405	 * events to the ring buffer.
 406	 */
 407	if (tracing_thresh)
 408		return 1;
 409
 410	local_irq_save(flags);
 411	cpu = raw_smp_processor_id();
 412	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 413	disabled = atomic_inc_return(&data->disabled);
 414	if (likely(disabled == 1)) {
 415		pc = preempt_count();
 416		ret = __trace_graph_entry(tr, trace, flags, pc);
 417	} else {
 418		ret = 0;
 419	}
 420
 421	atomic_dec(&data->disabled);
 422	local_irq_restore(flags);
 423
 424	return ret;
 425}
 426
 427static void
 428__trace_graph_function(struct trace_array *tr,
 429		unsigned long ip, unsigned long flags, int pc)
 430{
 431	u64 time = trace_clock_local();
 432	struct ftrace_graph_ent ent = {
 433		.func  = ip,
 434		.depth = 0,
 435	};
 436	struct ftrace_graph_ret ret = {
 437		.func     = ip,
 438		.depth    = 0,
 439		.calltime = time,
 440		.rettime  = time,
 441	};
 442
 443	__trace_graph_entry(tr, &ent, flags, pc);
 444	__trace_graph_return(tr, &ret, flags, pc);
 445}
 446
 447void
 448trace_graph_function(struct trace_array *tr,
 449		unsigned long ip, unsigned long parent_ip,
 450		unsigned long flags, int pc)
 451{
 452	__trace_graph_function(tr, ip, flags, pc);
 453}
 454
 455void __trace_graph_return(struct trace_array *tr,
 456				struct ftrace_graph_ret *trace,
 457				unsigned long flags,
 458				int pc)
 459{
 460	struct trace_event_call *call = &event_funcgraph_exit;
 461	struct ring_buffer_event *event;
 462	struct ring_buffer *buffer = tr->trace_buffer.buffer;
 463	struct ftrace_graph_ret_entry *entry;
 464
 465	event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
 466					  sizeof(*entry), flags, pc);
 467	if (!event)
 468		return;
 469	entry	= ring_buffer_event_data(event);
 470	entry->ret				= *trace;
 471	if (!call_filter_check_discard(call, entry, buffer, event))
 472		trace_buffer_unlock_commit_nostack(buffer, event);
 473}
 474
 475void trace_graph_return(struct ftrace_graph_ret *trace)
 476{
 477	struct trace_array *tr = graph_array;
 478	struct trace_array_cpu *data;
 479	unsigned long flags;
 
 480	long disabled;
 481	int cpu;
 482	int pc;
 
 
 
 
 
 
 483
 484	local_irq_save(flags);
 485	cpu = raw_smp_processor_id();
 486	data = per_cpu_ptr(tr->trace_buffer.data, cpu);
 487	disabled = atomic_inc_return(&data->disabled);
 488	if (likely(disabled == 1)) {
 489		pc = preempt_count();
 490		__trace_graph_return(tr, trace, flags, pc);
 491	}
 492	atomic_dec(&data->disabled);
 493	local_irq_restore(flags);
 494}
 495
 496void set_graph_array(struct trace_array *tr)
 497{
 498	graph_array = tr;
 499
 500	/* Make graph_array visible before we start tracing */
 501
 502	smp_mb();
 503}
 504
 505static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
 506{
 
 
 
 
 
 
 
 507	if (tracing_thresh &&
 508	    (trace->rettime - trace->calltime < tracing_thresh))
 509		return;
 510	else
 511		trace_graph_return(trace);
 512}
 513
 
 
 
 
 
 
 
 
 
 
 514static int graph_trace_init(struct trace_array *tr)
 515{
 516	int ret;
 517
 518	set_graph_array(tr);
 519	if (tracing_thresh)
 520		ret = register_ftrace_graph(&trace_graph_thresh_return,
 521					    &trace_graph_entry);
 522	else
 523		ret = register_ftrace_graph(&trace_graph_return,
 524					    &trace_graph_entry);
 525	if (ret)
 526		return ret;
 527	tracing_start_cmdline_record();
 528
 529	return 0;
 530}
 531
 532static void graph_trace_reset(struct trace_array *tr)
 533{
 534	tracing_stop_cmdline_record();
 535	unregister_ftrace_graph();
 
 
 
 536}
 537
 538static int graph_trace_update_thresh(struct trace_array *tr)
 539{
 540	graph_trace_reset(tr);
 541	return graph_trace_init(tr);
 542}
 543
 544static int max_bytes_for_cpu;
 545
 546static void print_graph_cpu(struct trace_seq *s, int cpu)
 547{
 548	/*
 549	 * Start with a space character - to make it stand out
 550	 * to the right a bit when trace output is pasted into
 551	 * email:
 552	 */
 553	trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
 554}
 555
 556#define TRACE_GRAPH_PROCINFO_LENGTH	14
 557
 558static void print_graph_proc(struct trace_seq *s, pid_t pid)
 559{
 560	char comm[TASK_COMM_LEN];
 561	/* sign + log10(MAX_INT) + '\0' */
 562	char pid_str[11];
 563	int spaces = 0;
 564	int len;
 565	int i;
 566
 567	trace_find_cmdline(pid, comm);
 568	comm[7] = '\0';
 569	sprintf(pid_str, "%d", pid);
 570
 571	/* 1 stands for the "-" character */
 572	len = strlen(comm) + strlen(pid_str) + 1;
 573
 574	if (len < TRACE_GRAPH_PROCINFO_LENGTH)
 575		spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
 576
 577	/* First spaces to align center */
 578	for (i = 0; i < spaces / 2; i++)
 579		trace_seq_putc(s, ' ');
 580
 581	trace_seq_printf(s, "%s-%s", comm, pid_str);
 582
 583	/* Last spaces to align center */
 584	for (i = 0; i < spaces - (spaces / 2); i++)
 585		trace_seq_putc(s, ' ');
 586}
 587
 588
 589static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
 590{
 591	trace_seq_putc(s, ' ');
 592	trace_print_lat_fmt(s, entry);
 
 593}
 594
 595/* If the pid changed since the last trace, output this event */
 596static void
 597verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
 598{
 599	pid_t prev_pid;
 600	pid_t *last_pid;
 601
 602	if (!data)
 603		return;
 604
 605	last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
 606
 607	if (*last_pid == pid)
 608		return;
 609
 610	prev_pid = *last_pid;
 611	*last_pid = pid;
 612
 613	if (prev_pid == -1)
 614		return;
 615/*
 616 * Context-switch trace line:
 617
 618 ------------------------------------------
 619 | 1)  migration/0--1  =>  sshd-1755
 620 ------------------------------------------
 621
 622 */
 623	trace_seq_puts(s, " ------------------------------------------\n");
 624	print_graph_cpu(s, cpu);
 625	print_graph_proc(s, prev_pid);
 626	trace_seq_puts(s, " => ");
 627	print_graph_proc(s, pid);
 628	trace_seq_puts(s, "\n ------------------------------------------\n\n");
 629}
 630
 631static struct ftrace_graph_ret_entry *
 632get_return_for_leaf(struct trace_iterator *iter,
 633		struct ftrace_graph_ent_entry *curr)
 634{
 635	struct fgraph_data *data = iter->private;
 636	struct ring_buffer_iter *ring_iter = NULL;
 637	struct ring_buffer_event *event;
 638	struct ftrace_graph_ret_entry *next;
 639
 640	/*
 641	 * If the previous output failed to write to the seq buffer,
 642	 * then we just reuse the data from before.
 643	 */
 644	if (data && data->failed) {
 645		curr = &data->ent;
 646		next = &data->ret;
 647	} else {
 648
 649		ring_iter = trace_buffer_iter(iter, iter->cpu);
 650
 651		/* First peek to compare current entry and the next one */
 652		if (ring_iter)
 653			event = ring_buffer_iter_peek(ring_iter, NULL);
 654		else {
 655			/*
 656			 * We need to consume the current entry to see
 657			 * the next one.
 658			 */
 659			ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
 660					    NULL, NULL);
 661			event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
 662						 NULL, NULL);
 663		}
 664
 665		if (!event)
 666			return NULL;
 667
 668		next = ring_buffer_event_data(event);
 669
 670		if (data) {
 671			/*
 672			 * Save current and next entries for later reference
 673			 * if the output fails.
 674			 */
 675			data->ent = *curr;
 676			/*
 677			 * If the next event is not a return type, then
 678			 * we only care about what type it is. Otherwise we can
 679			 * safely copy the entire event.
 680			 */
 681			if (next->ent.type == TRACE_GRAPH_RET)
 682				data->ret = *next;
 683			else
 684				data->ret.ent.type = next->ent.type;
 685		}
 686	}
 687
 688	if (next->ent.type != TRACE_GRAPH_RET)
 689		return NULL;
 690
 691	if (curr->ent.pid != next->ent.pid ||
 692			curr->graph_ent.func != next->ret.func)
 693		return NULL;
 694
 695	/* this is a leaf, now advance the iterator */
 696	if (ring_iter)
 697		ring_buffer_read(ring_iter, NULL);
 698
 699	return next;
 700}
 701
 702static void print_graph_abs_time(u64 t, struct trace_seq *s)
 703{
 704	unsigned long usecs_rem;
 705
 706	usecs_rem = do_div(t, NSEC_PER_SEC);
 707	usecs_rem /= 1000;
 708
 709	trace_seq_printf(s, "%5lu.%06lu |  ",
 710			 (unsigned long)t, usecs_rem);
 711}
 712
 713static void
 
 
 
 
 
 
 
 
 
 
 
 714print_graph_irq(struct trace_iterator *iter, unsigned long addr,
 715		enum trace_type type, int cpu, pid_t pid, u32 flags)
 716{
 717	struct trace_array *tr = iter->tr;
 718	struct trace_seq *s = &iter->seq;
 719	struct trace_entry *ent = iter->ent;
 720
 721	if (addr < (unsigned long)__irqentry_text_start ||
 722		addr >= (unsigned long)__irqentry_text_end)
 723		return;
 724
 725	if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
 726		/* Absolute time */
 727		if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 728			print_graph_abs_time(iter->ts, s);
 729
 
 
 
 
 730		/* Cpu */
 731		if (flags & TRACE_GRAPH_PRINT_CPU)
 732			print_graph_cpu(s, cpu);
 733
 734		/* Proc */
 735		if (flags & TRACE_GRAPH_PRINT_PROC) {
 736			print_graph_proc(s, pid);
 737			trace_seq_puts(s, " | ");
 738		}
 739
 740		/* Latency format */
 741		if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 742			print_graph_lat_fmt(s, ent);
 743	}
 744
 745	/* No overhead */
 746	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
 747
 748	if (type == TRACE_GRAPH_ENT)
 749		trace_seq_puts(s, "==========>");
 750	else
 751		trace_seq_puts(s, "<==========");
 752
 753	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
 754	trace_seq_putc(s, '\n');
 755}
 756
 757void
 758trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
 759{
 760	unsigned long nsecs_rem = do_div(duration, 1000);
 761	/* log10(ULONG_MAX) + '\0' */
 762	char usecs_str[21];
 763	char nsecs_str[5];
 764	int len;
 765	int i;
 766
 767	sprintf(usecs_str, "%lu", (unsigned long) duration);
 768
 769	/* Print msecs */
 770	trace_seq_printf(s, "%s", usecs_str);
 771
 772	len = strlen(usecs_str);
 773
 774	/* Print nsecs (we don't want to exceed 7 numbers) */
 775	if (len < 7) {
 776		size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
 777
 778		snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
 779		trace_seq_printf(s, ".%s", nsecs_str);
 780		len += strlen(nsecs_str) + 1;
 781	}
 782
 783	trace_seq_puts(s, " us ");
 784
 785	/* Print remaining spaces to fit the row's width */
 786	for (i = len; i < 8; i++)
 787		trace_seq_putc(s, ' ');
 788}
 789
 790static void
 791print_graph_duration(struct trace_array *tr, unsigned long long duration,
 792		     struct trace_seq *s, u32 flags)
 793{
 794	if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
 795	    !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 796		return;
 797
 798	/* No real adata, just filling the column with spaces */
 799	switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
 800	case FLAGS_FILL_FULL:
 801		trace_seq_puts(s, "              |  ");
 802		return;
 803	case FLAGS_FILL_START:
 804		trace_seq_puts(s, "  ");
 805		return;
 806	case FLAGS_FILL_END:
 807		trace_seq_puts(s, " |");
 808		return;
 809	}
 810
 811	/* Signal a overhead of time execution to the output */
 812	if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
 813		trace_seq_printf(s, "%c ", trace_find_mark(duration));
 814	else
 815		trace_seq_puts(s, "  ");
 816
 817	trace_print_graph_duration(duration, s);
 818	trace_seq_puts(s, "|  ");
 819}
 820
 821/* Case of a leaf function on its call entry */
 822static enum print_line_t
 823print_graph_entry_leaf(struct trace_iterator *iter,
 824		struct ftrace_graph_ent_entry *entry,
 825		struct ftrace_graph_ret_entry *ret_entry,
 826		struct trace_seq *s, u32 flags)
 827{
 828	struct fgraph_data *data = iter->private;
 829	struct trace_array *tr = iter->tr;
 830	struct ftrace_graph_ret *graph_ret;
 831	struct ftrace_graph_ent *call;
 832	unsigned long long duration;
 
 833	int i;
 834
 835	graph_ret = &ret_entry->ret;
 836	call = &entry->graph_ent;
 837	duration = graph_ret->rettime - graph_ret->calltime;
 838
 839	if (data) {
 840		struct fgraph_cpu_data *cpu_data;
 841		int cpu = iter->cpu;
 842
 843		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 844
 845		/* If a graph tracer ignored set_graph_notrace */
 846		if (call->depth < -1)
 847			call->depth += FTRACE_NOTRACE_DEPTH;
 848
 849		/*
 850		 * Comments display at + 1 to depth. Since
 851		 * this is a leaf function, keep the comments
 852		 * equal to this depth.
 853		 */
 854		cpu_data->depth = call->depth - 1;
 855
 856		/* No need to keep this function around for this depth */
 857		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 858		    !WARN_ON_ONCE(call->depth < 0))
 859			cpu_data->enter_funcs[call->depth] = 0;
 860	}
 861
 862	/* Overhead and duration */
 863	print_graph_duration(tr, duration, s, flags);
 864
 865	/* Function */
 866	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 867		trace_seq_putc(s, ' ');
 868
 869	trace_seq_printf(s, "%ps();\n", (void *)call->func);
 870
 
 
 
 871	return trace_handle_return(s);
 872}
 873
 874static enum print_line_t
 875print_graph_entry_nested(struct trace_iterator *iter,
 876			 struct ftrace_graph_ent_entry *entry,
 877			 struct trace_seq *s, int cpu, u32 flags)
 878{
 879	struct ftrace_graph_ent *call = &entry->graph_ent;
 880	struct fgraph_data *data = iter->private;
 881	struct trace_array *tr = iter->tr;
 882	int i;
 883
 884	if (data) {
 885		struct fgraph_cpu_data *cpu_data;
 886		int cpu = iter->cpu;
 887
 888		/* If a graph tracer ignored set_graph_notrace */
 889		if (call->depth < -1)
 890			call->depth += FTRACE_NOTRACE_DEPTH;
 891
 892		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
 893		cpu_data->depth = call->depth;
 894
 895		/* Save this function pointer to see if the exit matches */
 896		if (call->depth < FTRACE_RETFUNC_DEPTH &&
 897		    !WARN_ON_ONCE(call->depth < 0))
 898			cpu_data->enter_funcs[call->depth] = call->func;
 899	}
 900
 901	/* No time */
 902	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
 903
 904	/* Function */
 905	for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
 906		trace_seq_putc(s, ' ');
 907
 908	trace_seq_printf(s, "%ps() {\n", (void *)call->func);
 909
 910	if (trace_seq_has_overflowed(s))
 911		return TRACE_TYPE_PARTIAL_LINE;
 912
 913	/*
 914	 * we already consumed the current entry to check the next one
 915	 * and see if this is a leaf.
 916	 */
 917	return TRACE_TYPE_NO_CONSUME;
 918}
 919
 920static void
 921print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
 922		     int type, unsigned long addr, u32 flags)
 923{
 924	struct fgraph_data *data = iter->private;
 925	struct trace_entry *ent = iter->ent;
 926	struct trace_array *tr = iter->tr;
 927	int cpu = iter->cpu;
 928
 929	/* Pid */
 930	verif_pid(s, ent->pid, cpu, data);
 931
 932	if (type)
 933		/* Interrupt */
 934		print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
 935
 936	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
 937		return;
 938
 939	/* Absolute time */
 940	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
 941		print_graph_abs_time(iter->ts, s);
 942
 
 
 
 
 943	/* Cpu */
 944	if (flags & TRACE_GRAPH_PRINT_CPU)
 945		print_graph_cpu(s, cpu);
 946
 947	/* Proc */
 948	if (flags & TRACE_GRAPH_PRINT_PROC) {
 949		print_graph_proc(s, ent->pid);
 950		trace_seq_puts(s, " | ");
 951	}
 952
 953	/* Latency format */
 954	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
 955		print_graph_lat_fmt(s, ent);
 956
 957	return;
 958}
 959
 960/*
 961 * Entry check for irq code
 962 *
 963 * returns 1 if
 964 *  - we are inside irq code
 965 *  - we just entered irq code
 966 *
 967 * retunns 0 if
 968 *  - funcgraph-interrupts option is set
 969 *  - we are not inside irq code
 970 */
 971static int
 972check_irq_entry(struct trace_iterator *iter, u32 flags,
 973		unsigned long addr, int depth)
 974{
 975	int cpu = iter->cpu;
 976	int *depth_irq;
 977	struct fgraph_data *data = iter->private;
 978
 979	/*
 980	 * If we are either displaying irqs, or we got called as
 981	 * a graph event and private data does not exist,
 982	 * then we bypass the irq check.
 983	 */
 984	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
 985	    (!data))
 986		return 0;
 987
 988	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
 989
 990	/*
 991	 * We are inside the irq code
 992	 */
 993	if (*depth_irq >= 0)
 994		return 1;
 995
 996	if ((addr < (unsigned long)__irqentry_text_start) ||
 997	    (addr >= (unsigned long)__irqentry_text_end))
 998		return 0;
 999
1000	/*
1001	 * We are entering irq code.
1002	 */
1003	*depth_irq = depth;
1004	return 1;
1005}
1006
1007/*
1008 * Return check for irq code
1009 *
1010 * returns 1 if
1011 *  - we are inside irq code
1012 *  - we just left irq code
1013 *
1014 * returns 0 if
1015 *  - funcgraph-interrupts option is set
1016 *  - we are not inside irq code
1017 */
1018static int
1019check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
1020{
1021	int cpu = iter->cpu;
1022	int *depth_irq;
1023	struct fgraph_data *data = iter->private;
1024
1025	/*
1026	 * If we are either displaying irqs, or we got called as
1027	 * a graph event and private data does not exist,
1028	 * then we bypass the irq check.
1029	 */
1030	if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
1031	    (!data))
1032		return 0;
1033
1034	depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1035
1036	/*
1037	 * We are not inside the irq code.
1038	 */
1039	if (*depth_irq == -1)
1040		return 0;
1041
1042	/*
1043	 * We are inside the irq code, and this is returning entry.
1044	 * Let's not trace it and clear the entry depth, since
1045	 * we are out of irq code.
1046	 *
1047	 * This condition ensures that we 'leave the irq code' once
1048	 * we are out of the entry depth. Thus protecting us from
1049	 * the RETURN entry loss.
1050	 */
1051	if (*depth_irq >= depth) {
1052		*depth_irq = -1;
1053		return 1;
1054	}
1055
1056	/*
1057	 * We are inside the irq code, and this is not the entry.
1058	 */
1059	return 1;
1060}
1061
1062static enum print_line_t
1063print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
1064			struct trace_iterator *iter, u32 flags)
1065{
1066	struct fgraph_data *data = iter->private;
1067	struct ftrace_graph_ent *call = &field->graph_ent;
1068	struct ftrace_graph_ret_entry *leaf_ret;
1069	static enum print_line_t ret;
1070	int cpu = iter->cpu;
1071
1072	if (check_irq_entry(iter, flags, call->func, call->depth))
1073		return TRACE_TYPE_HANDLED;
1074
1075	print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1076
1077	leaf_ret = get_return_for_leaf(iter, field);
1078	if (leaf_ret)
1079		ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1080	else
1081		ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1082
1083	if (data) {
1084		/*
1085		 * If we failed to write our output, then we need to make
1086		 * note of it. Because we already consumed our entry.
1087		 */
1088		if (s->full) {
1089			data->failed = 1;
1090			data->cpu = cpu;
1091		} else
1092			data->failed = 0;
1093	}
1094
1095	return ret;
1096}
1097
1098static enum print_line_t
1099print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1100		   struct trace_entry *ent, struct trace_iterator *iter,
1101		   u32 flags)
1102{
1103	unsigned long long duration = trace->rettime - trace->calltime;
1104	struct fgraph_data *data = iter->private;
1105	struct trace_array *tr = iter->tr;
1106	pid_t pid = ent->pid;
1107	int cpu = iter->cpu;
1108	int func_match = 1;
1109	int i;
1110
1111	if (check_irq_return(iter, flags, trace->depth))
1112		return TRACE_TYPE_HANDLED;
1113
1114	if (data) {
1115		struct fgraph_cpu_data *cpu_data;
1116		int cpu = iter->cpu;
1117
1118		cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1119
1120		/*
1121		 * Comments display at + 1 to depth. This is the
1122		 * return from a function, we now want the comments
1123		 * to display at the same level of the bracket.
1124		 */
1125		cpu_data->depth = trace->depth - 1;
1126
1127		if (trace->depth < FTRACE_RETFUNC_DEPTH &&
1128		    !WARN_ON_ONCE(trace->depth < 0)) {
1129			if (cpu_data->enter_funcs[trace->depth] != trace->func)
1130				func_match = 0;
1131			cpu_data->enter_funcs[trace->depth] = 0;
1132		}
1133	}
1134
1135	print_graph_prologue(iter, s, 0, 0, flags);
1136
1137	/* Overhead and duration */
1138	print_graph_duration(tr, duration, s, flags);
1139
1140	/* Closing brace */
1141	for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1142		trace_seq_putc(s, ' ');
1143
1144	/*
1145	 * If the return function does not have a matching entry,
1146	 * then the entry was lost. Instead of just printing
1147	 * the '}' and letting the user guess what function this
1148	 * belongs to, write out the function name. Always do
1149	 * that if the funcgraph-tail option is enabled.
1150	 */
1151	if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1152		trace_seq_puts(s, "}\n");
1153	else
1154		trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1155
1156	/* Overrun */
1157	if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1158		trace_seq_printf(s, " (Overruns: %lu)\n",
1159				 trace->overrun);
1160
1161	print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1162			cpu, pid, flags);
1163
1164	return trace_handle_return(s);
1165}
1166
1167static enum print_line_t
1168print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1169		    struct trace_iterator *iter, u32 flags)
1170{
1171	struct trace_array *tr = iter->tr;
1172	unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1173	struct fgraph_data *data = iter->private;
1174	struct trace_event *event;
1175	int depth = 0;
1176	int ret;
1177	int i;
1178
1179	if (data)
1180		depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1181
1182	print_graph_prologue(iter, s, 0, 0, flags);
1183
1184	/* No time */
1185	print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1186
1187	/* Indentation */
1188	if (depth > 0)
1189		for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1190			trace_seq_putc(s, ' ');
1191
1192	/* The comment */
1193	trace_seq_puts(s, "/* ");
1194
1195	switch (iter->ent->type) {
1196	case TRACE_BPUTS:
1197		ret = trace_print_bputs_msg_only(iter);
1198		if (ret != TRACE_TYPE_HANDLED)
1199			return ret;
1200		break;
1201	case TRACE_BPRINT:
1202		ret = trace_print_bprintk_msg_only(iter);
1203		if (ret != TRACE_TYPE_HANDLED)
1204			return ret;
1205		break;
1206	case TRACE_PRINT:
1207		ret = trace_print_printk_msg_only(iter);
1208		if (ret != TRACE_TYPE_HANDLED)
1209			return ret;
1210		break;
1211	default:
1212		event = ftrace_find_event(ent->type);
1213		if (!event)
1214			return TRACE_TYPE_UNHANDLED;
1215
1216		ret = event->funcs->trace(iter, sym_flags, event);
1217		if (ret != TRACE_TYPE_HANDLED)
1218			return ret;
1219	}
1220
1221	if (trace_seq_has_overflowed(s))
1222		goto out;
1223
1224	/* Strip ending newline */
1225	if (s->buffer[s->seq.len - 1] == '\n') {
1226		s->buffer[s->seq.len - 1] = '\0';
1227		s->seq.len--;
1228	}
1229
1230	trace_seq_puts(s, " */\n");
1231 out:
1232	return trace_handle_return(s);
1233}
1234
1235
1236enum print_line_t
1237print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1238{
1239	struct ftrace_graph_ent_entry *field;
1240	struct fgraph_data *data = iter->private;
1241	struct trace_entry *entry = iter->ent;
1242	struct trace_seq *s = &iter->seq;
1243	int cpu = iter->cpu;
1244	int ret;
1245
1246	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1247		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1248		return TRACE_TYPE_HANDLED;
1249	}
1250
1251	/*
1252	 * If the last output failed, there's a possibility we need
1253	 * to print out the missing entry which would never go out.
1254	 */
1255	if (data && data->failed) {
1256		field = &data->ent;
1257		iter->cpu = data->cpu;
1258		ret = print_graph_entry(field, s, iter, flags);
1259		if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1260			per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1261			ret = TRACE_TYPE_NO_CONSUME;
1262		}
1263		iter->cpu = cpu;
1264		return ret;
1265	}
1266
1267	switch (entry->type) {
1268	case TRACE_GRAPH_ENT: {
1269		/*
1270		 * print_graph_entry() may consume the current event,
1271		 * thus @field may become invalid, so we need to save it.
1272		 * sizeof(struct ftrace_graph_ent_entry) is very small,
1273		 * it can be safely saved at the stack.
1274		 */
1275		struct ftrace_graph_ent_entry saved;
1276		trace_assign_type(field, entry);
1277		saved = *field;
1278		return print_graph_entry(&saved, s, iter, flags);
1279	}
1280	case TRACE_GRAPH_RET: {
1281		struct ftrace_graph_ret_entry *field;
1282		trace_assign_type(field, entry);
1283		return print_graph_return(&field->ret, s, entry, iter, flags);
1284	}
1285	case TRACE_STACK:
1286	case TRACE_FN:
1287		/* dont trace stack and functions as comments */
1288		return TRACE_TYPE_UNHANDLED;
1289
1290	default:
1291		return print_graph_comment(s, entry, iter, flags);
1292	}
1293
1294	return TRACE_TYPE_HANDLED;
1295}
1296
1297static enum print_line_t
1298print_graph_function(struct trace_iterator *iter)
1299{
1300	return print_graph_function_flags(iter, tracer_flags.val);
1301}
1302
1303static enum print_line_t
1304print_graph_function_event(struct trace_iterator *iter, int flags,
1305			   struct trace_event *event)
1306{
1307	return print_graph_function(iter);
1308}
1309
1310static void print_lat_header(struct seq_file *s, u32 flags)
1311{
1312	static const char spaces[] = "                "	/* 16 spaces */
1313		"    "					/* 4 spaces */
1314		"                 ";			/* 17 spaces */
1315	int size = 0;
1316
1317	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1318		size += 16;
 
 
1319	if (flags & TRACE_GRAPH_PRINT_CPU)
1320		size += 4;
1321	if (flags & TRACE_GRAPH_PRINT_PROC)
1322		size += 17;
1323
1324	seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1325	seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1326	seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1327	seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1328	seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1329}
1330
1331static void __print_graph_headers_flags(struct trace_array *tr,
1332					struct seq_file *s, u32 flags)
1333{
1334	int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1335
1336	if (lat)
1337		print_lat_header(s, flags);
1338
1339	/* 1st line */
1340	seq_putc(s, '#');
1341	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1342		seq_puts(s, "     TIME       ");
 
 
1343	if (flags & TRACE_GRAPH_PRINT_CPU)
1344		seq_puts(s, " CPU");
1345	if (flags & TRACE_GRAPH_PRINT_PROC)
1346		seq_puts(s, "  TASK/PID       ");
1347	if (lat)
1348		seq_puts(s, "||||");
1349	if (flags & TRACE_GRAPH_PRINT_DURATION)
1350		seq_puts(s, "  DURATION   ");
1351	seq_puts(s, "               FUNCTION CALLS\n");
1352
1353	/* 2nd line */
1354	seq_putc(s, '#');
1355	if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1356		seq_puts(s, "      |         ");
 
 
1357	if (flags & TRACE_GRAPH_PRINT_CPU)
1358		seq_puts(s, " |  ");
1359	if (flags & TRACE_GRAPH_PRINT_PROC)
1360		seq_puts(s, "   |    |        ");
1361	if (lat)
1362		seq_puts(s, "||||");
1363	if (flags & TRACE_GRAPH_PRINT_DURATION)
1364		seq_puts(s, "   |   |      ");
1365	seq_puts(s, "               |   |   |   |\n");
1366}
1367
1368static void print_graph_headers(struct seq_file *s)
1369{
1370	print_graph_headers_flags(s, tracer_flags.val);
1371}
1372
1373void print_graph_headers_flags(struct seq_file *s, u32 flags)
1374{
1375	struct trace_iterator *iter = s->private;
1376	struct trace_array *tr = iter->tr;
1377
1378	if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1379		return;
1380
1381	if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1382		/* print nothing if the buffers are empty */
1383		if (trace_empty(iter))
1384			return;
1385
1386		print_trace_header(s, iter);
1387	}
1388
1389	__print_graph_headers_flags(tr, s, flags);
1390}
1391
1392void graph_trace_open(struct trace_iterator *iter)
1393{
1394	/* pid and depth on the last trace processed */
1395	struct fgraph_data *data;
1396	gfp_t gfpflags;
1397	int cpu;
1398
1399	iter->private = NULL;
1400
1401	/* We can be called in atomic context via ftrace_dump() */
1402	gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1403
1404	data = kzalloc(sizeof(*data), gfpflags);
1405	if (!data)
1406		goto out_err;
1407
1408	data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1409	if (!data->cpu_data)
1410		goto out_err_free;
1411
1412	for_each_possible_cpu(cpu) {
1413		pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1414		int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1415		int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1416		int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1417
1418		*pid = -1;
1419		*depth = 0;
1420		*ignore = 0;
1421		*depth_irq = -1;
1422	}
1423
1424	iter->private = data;
1425
1426	return;
1427
1428 out_err_free:
1429	kfree(data);
1430 out_err:
1431	pr_warn("function graph tracer: not enough memory\n");
1432}
1433
1434void graph_trace_close(struct trace_iterator *iter)
1435{
1436	struct fgraph_data *data = iter->private;
1437
1438	if (data) {
1439		free_percpu(data->cpu_data);
1440		kfree(data);
1441	}
1442}
1443
1444static int
1445func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1446{
1447	if (bit == TRACE_GRAPH_PRINT_IRQS)
1448		ftrace_graph_skip_irqs = !set;
1449
1450	if (bit == TRACE_GRAPH_SLEEP_TIME)
1451		ftrace_graph_sleep_time_control(set);
1452
1453	if (bit == TRACE_GRAPH_GRAPH_TIME)
1454		ftrace_graph_graph_time_control(set);
1455
1456	return 0;
1457}
1458
1459static struct trace_event_functions graph_functions = {
1460	.trace		= print_graph_function_event,
1461};
1462
1463static struct trace_event graph_trace_entry_event = {
1464	.type		= TRACE_GRAPH_ENT,
1465	.funcs		= &graph_functions,
1466};
1467
1468static struct trace_event graph_trace_ret_event = {
1469	.type		= TRACE_GRAPH_RET,
1470	.funcs		= &graph_functions
1471};
1472
1473static struct tracer graph_trace __tracer_data = {
1474	.name		= "function_graph",
1475	.update_thresh	= graph_trace_update_thresh,
1476	.open		= graph_trace_open,
1477	.pipe_open	= graph_trace_open,
1478	.close		= graph_trace_close,
1479	.pipe_close	= graph_trace_close,
1480	.init		= graph_trace_init,
1481	.reset		= graph_trace_reset,
1482	.print_line	= print_graph_function,
1483	.print_header	= print_graph_headers,
1484	.flags		= &tracer_flags,
1485	.set_flag	= func_graph_set_flag,
1486#ifdef CONFIG_FTRACE_SELFTEST
1487	.selftest	= trace_selftest_startup_function_graph,
1488#endif
1489};
1490
1491
1492static ssize_t
1493graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1494		  loff_t *ppos)
1495{
1496	unsigned long val;
1497	int ret;
1498
1499	ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1500	if (ret)
1501		return ret;
1502
1503	fgraph_max_depth = val;
1504
1505	*ppos += cnt;
1506
1507	return cnt;
1508}
1509
1510static ssize_t
1511graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1512		 loff_t *ppos)
1513{
1514	char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1515	int n;
1516
1517	n = sprintf(buf, "%d\n", fgraph_max_depth);
1518
1519	return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1520}
1521
1522static const struct file_operations graph_depth_fops = {
1523	.open		= tracing_open_generic,
1524	.write		= graph_depth_write,
1525	.read		= graph_depth_read,
1526	.llseek		= generic_file_llseek,
1527};
1528
1529static __init int init_graph_tracefs(void)
1530{
1531	struct dentry *d_tracer;
1532
1533	d_tracer = tracing_init_dentry();
1534	if (IS_ERR(d_tracer))
1535		return 0;
1536
1537	trace_create_file("max_graph_depth", 0644, d_tracer,
1538			  NULL, &graph_depth_fops);
1539
1540	return 0;
1541}
1542fs_initcall(init_graph_tracefs);
1543
1544static __init int init_graph_trace(void)
1545{
1546	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1547
1548	if (!register_trace_event(&graph_trace_entry_event)) {
1549		pr_warn("Warning: could not register graph trace events\n");
1550		return 1;
1551	}
1552
1553	if (!register_trace_event(&graph_trace_ret_event)) {
1554		pr_warn("Warning: could not register graph trace events\n");
1555		return 1;
1556	}
1557
1558	return register_tracer(&graph_trace);
1559}
1560
1561core_initcall(init_graph_trace);