Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * builtin-timechart.c - make an svg timechart of system activity
   4 *
   5 * (C) Copyright 2009 Intel Corporation
   6 *
   7 * Authors:
   8 *     Arjan van de Ven <arjan@linux.intel.com>
 
 
 
 
 
   9 */
  10
  11#include <errno.h>
  12#include <inttypes.h>
  13
  14#include "builtin.h"
 
 
 
  15#include "util/color.h"
  16#include <linux/list.h>
  17#include "util/evlist.h" // for struct evsel_str_handler
 
  18#include "util/evsel.h"
  19#include <linux/kernel.h>
  20#include <linux/rbtree.h>
  21#include <linux/time64.h>
  22#include <linux/zalloc.h>
  23#include "util/symbol.h"
  24#include "util/thread.h"
  25#include "util/callchain.h"
 
  26
  27#include "perf.h"
  28#include "util/header.h"
  29#include <subcmd/pager.h>
  30#include <subcmd/parse-options.h>
  31#include "util/parse-events.h"
  32#include "util/event.h"
  33#include "util/session.h"
  34#include "util/svghelper.h"
  35#include "util/tool.h"
  36#include "util/data.h"
  37#include "util/debug.h"
  38#include "util/string2.h"
  39#include "util/tracepoint.h"
  40#include <linux/err.h>
  41#include <traceevent/event-parse.h>
  42
  43#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
  44FILE *open_memstream(char **ptr, size_t *sizeloc);
  45#endif
  46
  47#define SUPPORT_OLD_POWER_EVENTS 1
  48#define PWR_EVENT_EXIT -1
  49
  50struct per_pid;
  51struct power_event;
  52struct wake_event;
  53
  54struct timechart {
  55	struct perf_tool	tool;
  56	struct per_pid		*all_data;
  57	struct power_event	*power_events;
  58	struct wake_event	*wake_events;
  59	int			proc_num;
  60	unsigned int		numcpus;
  61	u64			min_freq,	/* Lowest CPU frequency seen */
  62				max_freq,	/* Highest CPU frequency seen */
  63				turbo_frequency,
  64				first_time, last_time;
  65	bool			power_only,
  66				tasks_only,
  67				with_backtrace,
  68				topology;
  69	bool			force;
  70	/* IO related settings */
  71	bool			io_only,
  72				skip_eagain;
  73	u64			io_events;
  74	u64			min_time,
  75				merge_dist;
  76};
  77
  78struct per_pidcomm;
  79struct cpu_sample;
  80struct io_sample;
  81
  82/*
  83 * Datastructure layout:
  84 * We keep an list of "pid"s, matching the kernels notion of a task struct.
  85 * Each "pid" entry, has a list of "comm"s.
  86 *	this is because we want to track different programs different, while
  87 *	exec will reuse the original pid (by design).
  88 * Each comm has a list of samples that will be used to draw
  89 * final graph.
  90 */
  91
  92struct per_pid {
  93	struct per_pid *next;
  94
  95	int		pid;
  96	int		ppid;
  97
  98	u64		start_time;
  99	u64		end_time;
 100	u64		total_time;
 101	u64		total_bytes;
 102	int		display;
 103
 104	struct per_pidcomm *all;
 105	struct per_pidcomm *current;
 106};
 107
 108
 109struct per_pidcomm {
 110	struct per_pidcomm *next;
 111
 112	u64		start_time;
 113	u64		end_time;
 114	u64		total_time;
 115	u64		max_bytes;
 116	u64		total_bytes;
 117
 118	int		Y;
 119	int		display;
 120
 121	long		state;
 122	u64		state_since;
 123
 124	char		*comm;
 125
 126	struct cpu_sample *samples;
 127	struct io_sample  *io_samples;
 128};
 129
 130struct sample_wrapper {
 131	struct sample_wrapper *next;
 132
 133	u64		timestamp;
 134	unsigned char	data[];
 135};
 136
 137#define TYPE_NONE	0
 138#define TYPE_RUNNING	1
 139#define TYPE_WAITING	2
 140#define TYPE_BLOCKED	3
 141
 142struct cpu_sample {
 143	struct cpu_sample *next;
 144
 145	u64 start_time;
 146	u64 end_time;
 147	int type;
 148	int cpu;
 149	const char *backtrace;
 150};
 151
 152enum {
 153	IOTYPE_READ,
 154	IOTYPE_WRITE,
 155	IOTYPE_SYNC,
 156	IOTYPE_TX,
 157	IOTYPE_RX,
 158	IOTYPE_POLL,
 159};
 160
 161struct io_sample {
 162	struct io_sample *next;
 163
 164	u64 start_time;
 165	u64 end_time;
 166	u64 bytes;
 167	int type;
 168	int fd;
 169	int err;
 170	int merges;
 171};
 172
 173#define CSTATE 1
 174#define PSTATE 2
 175
 176struct power_event {
 177	struct power_event *next;
 178	int type;
 179	int state;
 180	u64 start_time;
 181	u64 end_time;
 182	int cpu;
 183};
 184
 185struct wake_event {
 186	struct wake_event *next;
 187	int waker;
 188	int wakee;
 189	u64 time;
 190	const char *backtrace;
 191};
 192
 193struct process_filter {
 194	char			*name;
 195	int			pid;
 196	struct process_filter	*next;
 197};
 198
 199static struct process_filter *process_filter;
 200
 201
 202static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
 203{
 204	struct per_pid *cursor = tchart->all_data;
 205
 206	while (cursor) {
 207		if (cursor->pid == pid)
 208			return cursor;
 209		cursor = cursor->next;
 210	}
 211	cursor = zalloc(sizeof(*cursor));
 212	assert(cursor != NULL);
 213	cursor->pid = pid;
 214	cursor->next = tchart->all_data;
 215	tchart->all_data = cursor;
 216	return cursor;
 217}
 218
 219static struct per_pidcomm *create_pidcomm(struct per_pid *p)
 220{
 221	struct per_pidcomm *c;
 222
 223	c = zalloc(sizeof(*c));
 224	if (!c)
 225		return NULL;
 226	p->current = c;
 227	c->next = p->all;
 228	p->all = c;
 229	return c;
 230}
 231
 232static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
 233{
 234	struct per_pid *p;
 235	struct per_pidcomm *c;
 236	p = find_create_pid(tchart, pid);
 237	c = p->all;
 238	while (c) {
 239		if (c->comm && strcmp(c->comm, comm) == 0) {
 240			p->current = c;
 241			return;
 242		}
 243		if (!c->comm) {
 244			c->comm = strdup(comm);
 245			p->current = c;
 246			return;
 247		}
 248		c = c->next;
 249	}
 250	c = create_pidcomm(p);
 251	assert(c != NULL);
 252	c->comm = strdup(comm);
 
 
 
 253}
 254
 255static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
 256{
 257	struct per_pid *p, *pp;
 258	p = find_create_pid(tchart, pid);
 259	pp = find_create_pid(tchart, ppid);
 260	p->ppid = ppid;
 261	if (pp->current && pp->current->comm && !p->current)
 262		pid_set_comm(tchart, pid, pp->current->comm);
 263
 264	p->start_time = timestamp;
 265	if (p->current && !p->current->start_time) {
 266		p->current->start_time = timestamp;
 267		p->current->state_since = timestamp;
 268	}
 269}
 270
 271static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
 272{
 273	struct per_pid *p;
 274	p = find_create_pid(tchart, pid);
 275	p->end_time = timestamp;
 276	if (p->current)
 277		p->current->end_time = timestamp;
 278}
 279
 280static void pid_put_sample(struct timechart *tchart, int pid, int type,
 281			   unsigned int cpu, u64 start, u64 end,
 282			   const char *backtrace)
 283{
 284	struct per_pid *p;
 285	struct per_pidcomm *c;
 286	struct cpu_sample *sample;
 287
 288	p = find_create_pid(tchart, pid);
 289	c = p->current;
 290	if (!c) {
 291		c = create_pidcomm(p);
 292		assert(c != NULL);
 
 
 
 293	}
 294
 295	sample = zalloc(sizeof(*sample));
 296	assert(sample != NULL);
 297	sample->start_time = start;
 298	sample->end_time = end;
 299	sample->type = type;
 300	sample->next = c->samples;
 301	sample->cpu = cpu;
 302	sample->backtrace = backtrace;
 303	c->samples = sample;
 304
 305	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
 306		c->total_time += (end-start);
 307		p->total_time += (end-start);
 308	}
 309
 310	if (c->start_time == 0 || c->start_time > start)
 311		c->start_time = start;
 312	if (p->start_time == 0 || p->start_time > start)
 313		p->start_time = start;
 314}
 315
 316#define MAX_CPUS 4096
 317
 318static u64 cpus_cstate_start_times[MAX_CPUS];
 319static int cpus_cstate_state[MAX_CPUS];
 320static u64 cpus_pstate_start_times[MAX_CPUS];
 321static u64 cpus_pstate_state[MAX_CPUS];
 322
 323static int process_comm_event(struct perf_tool *tool,
 324			      union perf_event *event,
 325			      struct perf_sample *sample __maybe_unused,
 326			      struct machine *machine __maybe_unused)
 327{
 328	struct timechart *tchart = container_of(tool, struct timechart, tool);
 329	pid_set_comm(tchart, event->comm.tid, event->comm.comm);
 330	return 0;
 331}
 332
 333static int process_fork_event(struct perf_tool *tool,
 334			      union perf_event *event,
 335			      struct perf_sample *sample __maybe_unused,
 336			      struct machine *machine __maybe_unused)
 337{
 338	struct timechart *tchart = container_of(tool, struct timechart, tool);
 339	pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
 340	return 0;
 341}
 342
 343static int process_exit_event(struct perf_tool *tool,
 344			      union perf_event *event,
 345			      struct perf_sample *sample __maybe_unused,
 346			      struct machine *machine __maybe_unused)
 347{
 348	struct timechart *tchart = container_of(tool, struct timechart, tool);
 349	pid_exit(tchart, event->fork.pid, event->fork.time);
 350	return 0;
 351}
 352
 353#ifdef SUPPORT_OLD_POWER_EVENTS
 354static int use_old_power_events;
 355#endif
 356
 357static void c_state_start(int cpu, u64 timestamp, int state)
 358{
 359	cpus_cstate_start_times[cpu] = timestamp;
 360	cpus_cstate_state[cpu] = state;
 361}
 362
 363static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
 364{
 365	struct power_event *pwr = zalloc(sizeof(*pwr));
 366
 367	if (!pwr)
 368		return;
 369
 370	pwr->state = cpus_cstate_state[cpu];
 371	pwr->start_time = cpus_cstate_start_times[cpu];
 372	pwr->end_time = timestamp;
 373	pwr->cpu = cpu;
 374	pwr->type = CSTATE;
 375	pwr->next = tchart->power_events;
 376
 377	tchart->power_events = pwr;
 378}
 379
 380static struct power_event *p_state_end(struct timechart *tchart, int cpu,
 381					u64 timestamp)
 382{
 383	struct power_event *pwr = zalloc(sizeof(*pwr));
 384
 
 
 
 
 385	if (!pwr)
 386		return NULL;
 387
 388	pwr->state = cpus_pstate_state[cpu];
 389	pwr->start_time = cpus_pstate_start_times[cpu];
 390	pwr->end_time = timestamp;
 391	pwr->cpu = cpu;
 392	pwr->type = PSTATE;
 393	pwr->next = tchart->power_events;
 
 394	if (!pwr->start_time)
 395		pwr->start_time = tchart->first_time;
 396
 397	tchart->power_events = pwr;
 398	return pwr;
 399}
 400
 401static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
 402{
 403	struct power_event *pwr;
 404
 405	if (new_freq > 8000000) /* detect invalid data */
 406		return;
 407
 408	pwr = p_state_end(tchart, cpu, timestamp);
 409	if (!pwr)
 410		return;
 411
 412	cpus_pstate_state[cpu] = new_freq;
 413	cpus_pstate_start_times[cpu] = timestamp;
 414
 415	if ((u64)new_freq > tchart->max_freq)
 416		tchart->max_freq = new_freq;
 417
 418	if (new_freq < tchart->min_freq || tchart->min_freq == 0)
 419		tchart->min_freq = new_freq;
 420
 421	if (new_freq == tchart->max_freq - 1000)
 422		tchart->turbo_frequency = tchart->max_freq;
 423}
 424
 425static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
 426			 int waker, int wakee, u8 flags, const char *backtrace)
 427{
 428	struct per_pid *p;
 429	struct wake_event *we = zalloc(sizeof(*we));
 430
 431	if (!we)
 432		return;
 433
 434	we->time = timestamp;
 435	we->waker = waker;
 436	we->backtrace = backtrace;
 437
 438	if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
 439		we->waker = -1;
 440
 441	we->wakee = wakee;
 442	we->next = tchart->wake_events;
 443	tchart->wake_events = we;
 444	p = find_create_pid(tchart, we->wakee);
 445
 446	if (p && p->current && p->current->state == TYPE_NONE) {
 447		p->current->state_since = timestamp;
 448		p->current->state = TYPE_WAITING;
 449	}
 450	if (p && p->current && p->current->state == TYPE_BLOCKED) {
 451		pid_put_sample(tchart, p->pid, p->current->state, cpu,
 452			       p->current->state_since, timestamp, NULL);
 453		p->current->state_since = timestamp;
 454		p->current->state = TYPE_WAITING;
 455	}
 456}
 457
 458static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
 459			 int prev_pid, int next_pid, u64 prev_state,
 460			 const char *backtrace)
 461{
 462	struct per_pid *p = NULL, *prev_p;
 463
 464	prev_p = find_create_pid(tchart, prev_pid);
 465
 466	p = find_create_pid(tchart, next_pid);
 467
 468	if (prev_p->current && prev_p->current->state != TYPE_NONE)
 469		pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
 470			       prev_p->current->state_since, timestamp,
 471			       backtrace);
 472	if (p && p->current) {
 473		if (p->current->state != TYPE_NONE)
 474			pid_put_sample(tchart, next_pid, p->current->state, cpu,
 475				       p->current->state_since, timestamp,
 476				       backtrace);
 477
 478		p->current->state_since = timestamp;
 479		p->current->state = TYPE_RUNNING;
 480	}
 481
 482	if (prev_p->current) {
 483		prev_p->current->state = TYPE_NONE;
 484		prev_p->current->state_since = timestamp;
 485		if (prev_state & 2)
 486			prev_p->current->state = TYPE_BLOCKED;
 487		if (prev_state == 0)
 488			prev_p->current->state = TYPE_WAITING;
 489	}
 490}
 491
 492static const char *cat_backtrace(union perf_event *event,
 493				 struct perf_sample *sample,
 494				 struct machine *machine)
 495{
 496	struct addr_location al;
 497	unsigned int i;
 498	char *p = NULL;
 499	size_t p_len;
 500	u8 cpumode = PERF_RECORD_MISC_USER;
 501	struct addr_location tal;
 502	struct ip_callchain *chain = sample->callchain;
 503	FILE *f = open_memstream(&p, &p_len);
 504
 505	if (!f) {
 506		perror("open_memstream error");
 507		return NULL;
 508	}
 509
 510	if (!chain)
 511		goto exit;
 512
 513	if (machine__resolve(machine, &al, sample) < 0) {
 514		fprintf(stderr, "problem processing %d event, skipping it.\n",
 515			event->header.type);
 516		goto exit;
 517	}
 518
 519	for (i = 0; i < chain->nr; i++) {
 520		u64 ip;
 521
 522		if (callchain_param.order == ORDER_CALLEE)
 523			ip = chain->ips[i];
 524		else
 525			ip = chain->ips[chain->nr - i - 1];
 526
 527		if (ip >= PERF_CONTEXT_MAX) {
 528			switch (ip) {
 529			case PERF_CONTEXT_HV:
 530				cpumode = PERF_RECORD_MISC_HYPERVISOR;
 531				break;
 532			case PERF_CONTEXT_KERNEL:
 533				cpumode = PERF_RECORD_MISC_KERNEL;
 534				break;
 535			case PERF_CONTEXT_USER:
 536				cpumode = PERF_RECORD_MISC_USER;
 537				break;
 538			default:
 539				pr_debug("invalid callchain context: "
 540					 "%"PRId64"\n", (s64) ip);
 541
 542				/*
 543				 * It seems the callchain is corrupted.
 544				 * Discard all.
 545				 */
 546				zfree(&p);
 547				goto exit_put;
 548			}
 549			continue;
 550		}
 551
 552		tal.filtered = 0;
 553		if (thread__find_symbol(al.thread, cpumode, ip, &tal))
 554			fprintf(f, "..... %016" PRIx64 " %s\n", ip, tal.sym->name);
 
 
 
 
 555		else
 556			fprintf(f, "..... %016" PRIx64 "\n", ip);
 557	}
 558exit_put:
 559	addr_location__put(&al);
 560exit:
 561	fclose(f);
 562
 563	return p;
 564}
 565
 566typedef int (*tracepoint_handler)(struct timechart *tchart,
 567				  struct evsel *evsel,
 568				  struct perf_sample *sample,
 569				  const char *backtrace);
 570
 571static int process_sample_event(struct perf_tool *tool,
 572				union perf_event *event,
 573				struct perf_sample *sample,
 574				struct evsel *evsel,
 575				struct machine *machine)
 576{
 577	struct timechart *tchart = container_of(tool, struct timechart, tool);
 578
 579	if (evsel->core.attr.sample_type & PERF_SAMPLE_TIME) {
 580		if (!tchart->first_time || tchart->first_time > sample->time)
 581			tchart->first_time = sample->time;
 582		if (tchart->last_time < sample->time)
 583			tchart->last_time = sample->time;
 584	}
 585
 586	if (evsel->handler != NULL) {
 587		tracepoint_handler f = evsel->handler;
 588		return f(tchart, evsel, sample,
 589			 cat_backtrace(event, sample, machine));
 590	}
 591
 592	return 0;
 593}
 594
 595static int
 596process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
 597			struct evsel *evsel,
 598			struct perf_sample *sample,
 599			const char *backtrace __maybe_unused)
 600{
 601	u32 state  = evsel__intval(evsel, sample, "state");
 602	u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
 603
 604	if (state == (u32)PWR_EVENT_EXIT)
 605		c_state_end(tchart, cpu_id, sample->time);
 606	else
 607		c_state_start(cpu_id, sample->time, state);
 608	return 0;
 609}
 610
 611static int
 612process_sample_cpu_frequency(struct timechart *tchart,
 613			     struct evsel *evsel,
 614			     struct perf_sample *sample,
 615			     const char *backtrace __maybe_unused)
 616{
 617	u32 state  = evsel__intval(evsel, sample, "state");
 618	u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
 619
 620	p_state_change(tchart, cpu_id, sample->time, state);
 621	return 0;
 622}
 623
 624static int
 625process_sample_sched_wakeup(struct timechart *tchart,
 626			    struct evsel *evsel,
 627			    struct perf_sample *sample,
 628			    const char *backtrace)
 629{
 630	u8 flags  = evsel__intval(evsel, sample, "common_flags");
 631	int waker = evsel__intval(evsel, sample, "common_pid");
 632	int wakee = evsel__intval(evsel, sample, "pid");
 633
 634	sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
 635	return 0;
 636}
 637
 638static int
 639process_sample_sched_switch(struct timechart *tchart,
 640			    struct evsel *evsel,
 641			    struct perf_sample *sample,
 642			    const char *backtrace)
 643{
 644	int prev_pid   = evsel__intval(evsel, sample, "prev_pid");
 645	int next_pid   = evsel__intval(evsel, sample, "next_pid");
 646	u64 prev_state = evsel__intval(evsel, sample, "prev_state");
 647
 648	sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
 649		     prev_state, backtrace);
 650	return 0;
 651}
 652
 653#ifdef SUPPORT_OLD_POWER_EVENTS
 654static int
 655process_sample_power_start(struct timechart *tchart __maybe_unused,
 656			   struct evsel *evsel,
 657			   struct perf_sample *sample,
 658			   const char *backtrace __maybe_unused)
 659{
 660	u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
 661	u64 value  = evsel__intval(evsel, sample, "value");
 662
 663	c_state_start(cpu_id, sample->time, value);
 664	return 0;
 665}
 666
 667static int
 668process_sample_power_end(struct timechart *tchart,
 669			 struct evsel *evsel __maybe_unused,
 670			 struct perf_sample *sample,
 671			 const char *backtrace __maybe_unused)
 672{
 673	c_state_end(tchart, sample->cpu, sample->time);
 674	return 0;
 675}
 676
 677static int
 678process_sample_power_frequency(struct timechart *tchart,
 679			       struct evsel *evsel,
 680			       struct perf_sample *sample,
 681			       const char *backtrace __maybe_unused)
 682{
 683	u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
 684	u64 value  = evsel__intval(evsel, sample, "value");
 685
 686	p_state_change(tchart, cpu_id, sample->time, value);
 687	return 0;
 688}
 689#endif /* SUPPORT_OLD_POWER_EVENTS */
 690
 691/*
 692 * After the last sample we need to wrap up the current C/P state
 693 * and close out each CPU for these.
 694 */
 695static void end_sample_processing(struct timechart *tchart)
 696{
 697	u64 cpu;
 698	struct power_event *pwr;
 699
 700	for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
 701		/* C state */
 702#if 0
 703		pwr = zalloc(sizeof(*pwr));
 704		if (!pwr)
 705			return;
 706
 707		pwr->state = cpus_cstate_state[cpu];
 708		pwr->start_time = cpus_cstate_start_times[cpu];
 709		pwr->end_time = tchart->last_time;
 710		pwr->cpu = cpu;
 711		pwr->type = CSTATE;
 712		pwr->next = tchart->power_events;
 713
 714		tchart->power_events = pwr;
 715#endif
 716		/* P state */
 717
 718		pwr = p_state_end(tchart, cpu, tchart->last_time);
 719		if (!pwr)
 720			return;
 721
 
 
 
 
 
 
 
 
 
 722		if (!pwr->state)
 723			pwr->state = tchart->min_freq;
 
 724	}
 725}
 726
 727static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
 728			       u64 start, int fd)
 729{
 730	struct per_pid *p = find_create_pid(tchart, pid);
 731	struct per_pidcomm *c = p->current;
 732	struct io_sample *sample;
 733	struct io_sample *prev;
 734
 735	if (!c) {
 736		c = create_pidcomm(p);
 737		if (!c)
 738			return -ENOMEM;
 
 
 
 739	}
 740
 741	prev = c->io_samples;
 742
 743	if (prev && prev->start_time && !prev->end_time) {
 744		pr_warning("Skip invalid start event: "
 745			   "previous event already started!\n");
 746
 747		/* remove previous event that has been started,
 748		 * we are not sure we will ever get an end for it */
 749		c->io_samples = prev->next;
 750		free(prev);
 751		return 0;
 752	}
 753
 754	sample = zalloc(sizeof(*sample));
 755	if (!sample)
 756		return -ENOMEM;
 757	sample->start_time = start;
 758	sample->type = type;
 759	sample->fd = fd;
 760	sample->next = c->io_samples;
 761	c->io_samples = sample;
 762
 763	if (c->start_time == 0 || c->start_time > start)
 764		c->start_time = start;
 765
 766	return 0;
 767}
 768
 769static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
 770			     u64 end, long ret)
 771{
 772	struct per_pid *p = find_create_pid(tchart, pid);
 773	struct per_pidcomm *c = p->current;
 774	struct io_sample *sample, *prev;
 775
 776	if (!c) {
 777		pr_warning("Invalid pidcomm!\n");
 778		return -1;
 779	}
 780
 781	sample = c->io_samples;
 782
 783	if (!sample) /* skip partially captured events */
 784		return 0;
 785
 786	if (sample->end_time) {
 787		pr_warning("Skip invalid end event: "
 788			   "previous event already ended!\n");
 789		return 0;
 790	}
 791
 792	if (sample->type != type) {
 793		pr_warning("Skip invalid end event: invalid event type!\n");
 794		return 0;
 795	}
 796
 797	sample->end_time = end;
 798	prev = sample->next;
 799
 800	/* we want to be able to see small and fast transfers, so make them
 801	 * at least min_time long, but don't overlap them */
 802	if (sample->end_time - sample->start_time < tchart->min_time)
 803		sample->end_time = sample->start_time + tchart->min_time;
 804	if (prev && sample->start_time < prev->end_time) {
 805		if (prev->err) /* try to make errors more visible */
 806			sample->start_time = prev->end_time;
 807		else
 808			prev->end_time = sample->start_time;
 809	}
 810
 811	if (ret < 0) {
 812		sample->err = ret;
 813	} else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
 814		   type == IOTYPE_TX || type == IOTYPE_RX) {
 815
 816		if ((u64)ret > c->max_bytes)
 817			c->max_bytes = ret;
 818
 819		c->total_bytes += ret;
 820		p->total_bytes += ret;
 821		sample->bytes = ret;
 822	}
 823
 824	/* merge two requests to make svg smaller and render-friendly */
 825	if (prev &&
 826	    prev->type == sample->type &&
 827	    prev->err == sample->err &&
 828	    prev->fd == sample->fd &&
 829	    prev->end_time + tchart->merge_dist >= sample->start_time) {
 830
 831		sample->bytes += prev->bytes;
 832		sample->merges += prev->merges + 1;
 833
 834		sample->start_time = prev->start_time;
 835		sample->next = prev->next;
 836		free(prev);
 837
 838		if (!sample->err && sample->bytes > c->max_bytes)
 839			c->max_bytes = sample->bytes;
 840	}
 841
 842	tchart->io_events++;
 843
 844	return 0;
 845}
 846
 847static int
 848process_enter_read(struct timechart *tchart,
 849		   struct evsel *evsel,
 850		   struct perf_sample *sample)
 851{
 852	long fd = evsel__intval(evsel, sample, "fd");
 853	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
 854				   sample->time, fd);
 855}
 856
 857static int
 858process_exit_read(struct timechart *tchart,
 859		  struct evsel *evsel,
 860		  struct perf_sample *sample)
 861{
 862	long ret = evsel__intval(evsel, sample, "ret");
 863	return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
 864				 sample->time, ret);
 865}
 866
 867static int
 868process_enter_write(struct timechart *tchart,
 869		    struct evsel *evsel,
 870		    struct perf_sample *sample)
 871{
 872	long fd = evsel__intval(evsel, sample, "fd");
 873	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
 874				   sample->time, fd);
 875}
 876
 877static int
 878process_exit_write(struct timechart *tchart,
 879		   struct evsel *evsel,
 880		   struct perf_sample *sample)
 881{
 882	long ret = evsel__intval(evsel, sample, "ret");
 883	return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
 884				 sample->time, ret);
 885}
 886
 887static int
 888process_enter_sync(struct timechart *tchart,
 889		   struct evsel *evsel,
 890		   struct perf_sample *sample)
 891{
 892	long fd = evsel__intval(evsel, sample, "fd");
 893	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
 894				   sample->time, fd);
 895}
 896
 897static int
 898process_exit_sync(struct timechart *tchart,
 899		  struct evsel *evsel,
 900		  struct perf_sample *sample)
 901{
 902	long ret = evsel__intval(evsel, sample, "ret");
 903	return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
 904				 sample->time, ret);
 905}
 906
 907static int
 908process_enter_tx(struct timechart *tchart,
 909		 struct evsel *evsel,
 910		 struct perf_sample *sample)
 911{
 912	long fd = evsel__intval(evsel, sample, "fd");
 913	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
 914				   sample->time, fd);
 915}
 916
 917static int
 918process_exit_tx(struct timechart *tchart,
 919		struct evsel *evsel,
 920		struct perf_sample *sample)
 921{
 922	long ret = evsel__intval(evsel, sample, "ret");
 923	return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
 924				 sample->time, ret);
 925}
 926
 927static int
 928process_enter_rx(struct timechart *tchart,
 929		 struct evsel *evsel,
 930		 struct perf_sample *sample)
 931{
 932	long fd = evsel__intval(evsel, sample, "fd");
 933	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
 934				   sample->time, fd);
 935}
 936
 937static int
 938process_exit_rx(struct timechart *tchart,
 939		struct evsel *evsel,
 940		struct perf_sample *sample)
 941{
 942	long ret = evsel__intval(evsel, sample, "ret");
 943	return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
 944				 sample->time, ret);
 945}
 946
 947static int
 948process_enter_poll(struct timechart *tchart,
 949		   struct evsel *evsel,
 950		   struct perf_sample *sample)
 951{
 952	long fd = evsel__intval(evsel, sample, "fd");
 953	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
 954				   sample->time, fd);
 955}
 956
 957static int
 958process_exit_poll(struct timechart *tchart,
 959		  struct evsel *evsel,
 960		  struct perf_sample *sample)
 961{
 962	long ret = evsel__intval(evsel, sample, "ret");
 963	return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
 964				 sample->time, ret);
 965}
 966
 967/*
 968 * Sort the pid datastructure
 969 */
 970static void sort_pids(struct timechart *tchart)
 971{
 972	struct per_pid *new_list, *p, *cursor, *prev;
 973	/* sort by ppid first, then by pid, lowest to highest */
 974
 975	new_list = NULL;
 976
 977	while (tchart->all_data) {
 978		p = tchart->all_data;
 979		tchart->all_data = p->next;
 980		p->next = NULL;
 981
 982		if (new_list == NULL) {
 983			new_list = p;
 984			p->next = NULL;
 985			continue;
 986		}
 987		prev = NULL;
 988		cursor = new_list;
 989		while (cursor) {
 990			if (cursor->ppid > p->ppid ||
 991				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
 992				/* must insert before */
 993				if (prev) {
 994					p->next = prev->next;
 995					prev->next = p;
 996					cursor = NULL;
 997					continue;
 998				} else {
 999					p->next = new_list;
1000					new_list = p;
1001					cursor = NULL;
1002					continue;
1003				}
1004			}
1005
1006			prev = cursor;
1007			cursor = cursor->next;
1008			if (!cursor)
1009				prev->next = p;
1010		}
1011	}
1012	tchart->all_data = new_list;
1013}
1014
1015
1016static void draw_c_p_states(struct timechart *tchart)
1017{
1018	struct power_event *pwr;
1019	pwr = tchart->power_events;
1020
1021	/*
1022	 * two pass drawing so that the P state bars are on top of the C state blocks
1023	 */
1024	while (pwr) {
1025		if (pwr->type == CSTATE)
1026			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1027		pwr = pwr->next;
1028	}
1029
1030	pwr = tchart->power_events;
1031	while (pwr) {
1032		if (pwr->type == PSTATE) {
1033			if (!pwr->state)
1034				pwr->state = tchart->min_freq;
1035			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1036		}
1037		pwr = pwr->next;
1038	}
1039}
1040
1041static void draw_wakeups(struct timechart *tchart)
1042{
1043	struct wake_event *we;
1044	struct per_pid *p;
1045	struct per_pidcomm *c;
1046
1047	we = tchart->wake_events;
1048	while (we) {
1049		int from = 0, to = 0;
1050		char *task_from = NULL, *task_to = NULL;
1051
1052		/* locate the column of the waker and wakee */
1053		p = tchart->all_data;
1054		while (p) {
1055			if (p->pid == we->waker || p->pid == we->wakee) {
1056				c = p->all;
1057				while (c) {
1058					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
1059						if (p->pid == we->waker && !from) {
1060							from = c->Y;
1061							task_from = strdup(c->comm);
1062						}
1063						if (p->pid == we->wakee && !to) {
1064							to = c->Y;
1065							task_to = strdup(c->comm);
1066						}
1067					}
1068					c = c->next;
1069				}
1070				c = p->all;
1071				while (c) {
1072					if (p->pid == we->waker && !from) {
1073						from = c->Y;
1074						task_from = strdup(c->comm);
1075					}
1076					if (p->pid == we->wakee && !to) {
1077						to = c->Y;
1078						task_to = strdup(c->comm);
1079					}
1080					c = c->next;
1081				}
1082			}
1083			p = p->next;
1084		}
1085
1086		if (!task_from) {
1087			task_from = malloc(40);
1088			sprintf(task_from, "[%i]", we->waker);
1089		}
1090		if (!task_to) {
1091			task_to = malloc(40);
1092			sprintf(task_to, "[%i]", we->wakee);
1093		}
1094
1095		if (we->waker == -1)
1096			svg_interrupt(we->time, to, we->backtrace);
1097		else if (from && to && abs(from - to) == 1)
1098			svg_wakeline(we->time, from, to, we->backtrace);
1099		else
1100			svg_partial_wakeline(we->time, from, task_from, to,
1101					     task_to, we->backtrace);
1102		we = we->next;
1103
1104		free(task_from);
1105		free(task_to);
1106	}
1107}
1108
1109static void draw_cpu_usage(struct timechart *tchart)
1110{
1111	struct per_pid *p;
1112	struct per_pidcomm *c;
1113	struct cpu_sample *sample;
1114	p = tchart->all_data;
1115	while (p) {
1116		c = p->all;
1117		while (c) {
1118			sample = c->samples;
1119			while (sample) {
1120				if (sample->type == TYPE_RUNNING) {
1121					svg_process(sample->cpu,
1122						    sample->start_time,
1123						    sample->end_time,
1124						    p->pid,
1125						    c->comm,
1126						    sample->backtrace);
1127				}
1128
1129				sample = sample->next;
1130			}
1131			c = c->next;
1132		}
1133		p = p->next;
1134	}
1135}
1136
1137static void draw_io_bars(struct timechart *tchart)
1138{
1139	const char *suf;
1140	double bytes;
1141	char comm[256];
1142	struct per_pid *p;
1143	struct per_pidcomm *c;
1144	struct io_sample *sample;
1145	int Y = 1;
1146
1147	p = tchart->all_data;
1148	while (p) {
1149		c = p->all;
1150		while (c) {
1151			if (!c->display) {
1152				c->Y = 0;
1153				c = c->next;
1154				continue;
1155			}
1156
1157			svg_box(Y, c->start_time, c->end_time, "process3");
1158			sample = c->io_samples;
1159			for (sample = c->io_samples; sample; sample = sample->next) {
1160				double h = (double)sample->bytes / c->max_bytes;
1161
1162				if (tchart->skip_eagain &&
1163				    sample->err == -EAGAIN)
1164					continue;
1165
1166				if (sample->err)
1167					h = 1;
1168
1169				if (sample->type == IOTYPE_SYNC)
1170					svg_fbox(Y,
1171						sample->start_time,
1172						sample->end_time,
1173						1,
1174						sample->err ? "error" : "sync",
1175						sample->fd,
1176						sample->err,
1177						sample->merges);
1178				else if (sample->type == IOTYPE_POLL)
1179					svg_fbox(Y,
1180						sample->start_time,
1181						sample->end_time,
1182						1,
1183						sample->err ? "error" : "poll",
1184						sample->fd,
1185						sample->err,
1186						sample->merges);
1187				else if (sample->type == IOTYPE_READ)
1188					svg_ubox(Y,
1189						sample->start_time,
1190						sample->end_time,
1191						h,
1192						sample->err ? "error" : "disk",
1193						sample->fd,
1194						sample->err,
1195						sample->merges);
1196				else if (sample->type == IOTYPE_WRITE)
1197					svg_lbox(Y,
1198						sample->start_time,
1199						sample->end_time,
1200						h,
1201						sample->err ? "error" : "disk",
1202						sample->fd,
1203						sample->err,
1204						sample->merges);
1205				else if (sample->type == IOTYPE_RX)
1206					svg_ubox(Y,
1207						sample->start_time,
1208						sample->end_time,
1209						h,
1210						sample->err ? "error" : "net",
1211						sample->fd,
1212						sample->err,
1213						sample->merges);
1214				else if (sample->type == IOTYPE_TX)
1215					svg_lbox(Y,
1216						sample->start_time,
1217						sample->end_time,
1218						h,
1219						sample->err ? "error" : "net",
1220						sample->fd,
1221						sample->err,
1222						sample->merges);
1223			}
1224
1225			suf = "";
1226			bytes = c->total_bytes;
1227			if (bytes > 1024) {
1228				bytes = bytes / 1024;
1229				suf = "K";
1230			}
1231			if (bytes > 1024) {
1232				bytes = bytes / 1024;
1233				suf = "M";
1234			}
1235			if (bytes > 1024) {
1236				bytes = bytes / 1024;
1237				suf = "G";
1238			}
1239
1240
1241			sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
1242			svg_text(Y, c->start_time, comm);
1243
1244			c->Y = Y;
1245			Y++;
1246			c = c->next;
1247		}
1248		p = p->next;
1249	}
1250}
1251
1252static void draw_process_bars(struct timechart *tchart)
1253{
1254	struct per_pid *p;
1255	struct per_pidcomm *c;
1256	struct cpu_sample *sample;
1257	int Y = 0;
1258
1259	Y = 2 * tchart->numcpus + 2;
1260
1261	p = tchart->all_data;
1262	while (p) {
1263		c = p->all;
1264		while (c) {
1265			if (!c->display) {
1266				c->Y = 0;
1267				c = c->next;
1268				continue;
1269			}
1270
1271			svg_box(Y, c->start_time, c->end_time, "process");
1272			sample = c->samples;
1273			while (sample) {
1274				if (sample->type == TYPE_RUNNING)
1275					svg_running(Y, sample->cpu,
1276						    sample->start_time,
1277						    sample->end_time,
1278						    sample->backtrace);
1279				if (sample->type == TYPE_BLOCKED)
1280					svg_blocked(Y, sample->cpu,
1281						    sample->start_time,
1282						    sample->end_time,
1283						    sample->backtrace);
1284				if (sample->type == TYPE_WAITING)
1285					svg_waiting(Y, sample->cpu,
1286						    sample->start_time,
1287						    sample->end_time,
1288						    sample->backtrace);
1289				sample = sample->next;
1290			}
1291
1292			if (c->comm) {
1293				char comm[256];
1294				if (c->total_time > 5000000000) /* 5 seconds */
1295					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC);
1296				else
1297					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC);
1298
1299				svg_text(Y, c->start_time, comm);
1300			}
1301			c->Y = Y;
1302			Y++;
1303			c = c->next;
1304		}
1305		p = p->next;
1306	}
1307}
1308
1309static void add_process_filter(const char *string)
1310{
1311	int pid = strtoull(string, NULL, 10);
1312	struct process_filter *filt = malloc(sizeof(*filt));
1313
1314	if (!filt)
1315		return;
1316
1317	filt->name = strdup(string);
1318	filt->pid  = pid;
1319	filt->next = process_filter;
1320
1321	process_filter = filt;
1322}
1323
1324static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
1325{
1326	struct process_filter *filt;
1327	if (!process_filter)
1328		return 1;
1329
1330	filt = process_filter;
1331	while (filt) {
1332		if (filt->pid && p->pid == filt->pid)
1333			return 1;
1334		if (strcmp(filt->name, c->comm) == 0)
1335			return 1;
1336		filt = filt->next;
1337	}
1338	return 0;
1339}
1340
1341static int determine_display_tasks_filtered(struct timechart *tchart)
1342{
1343	struct per_pid *p;
1344	struct per_pidcomm *c;
1345	int count = 0;
1346
1347	p = tchart->all_data;
1348	while (p) {
1349		p->display = 0;
1350		if (p->start_time == 1)
1351			p->start_time = tchart->first_time;
1352
1353		/* no exit marker, task kept running to the end */
1354		if (p->end_time == 0)
1355			p->end_time = tchart->last_time;
1356
1357		c = p->all;
1358
1359		while (c) {
1360			c->display = 0;
1361
1362			if (c->start_time == 1)
1363				c->start_time = tchart->first_time;
1364
1365			if (passes_filter(p, c)) {
1366				c->display = 1;
1367				p->display = 1;
1368				count++;
1369			}
1370
1371			if (c->end_time == 0)
1372				c->end_time = tchart->last_time;
1373
1374			c = c->next;
1375		}
1376		p = p->next;
1377	}
1378	return count;
1379}
1380
1381static int determine_display_tasks(struct timechart *tchart, u64 threshold)
1382{
1383	struct per_pid *p;
1384	struct per_pidcomm *c;
1385	int count = 0;
1386
1387	p = tchart->all_data;
1388	while (p) {
1389		p->display = 0;
1390		if (p->start_time == 1)
1391			p->start_time = tchart->first_time;
1392
1393		/* no exit marker, task kept running to the end */
1394		if (p->end_time == 0)
1395			p->end_time = tchart->last_time;
1396		if (p->total_time >= threshold)
1397			p->display = 1;
1398
1399		c = p->all;
1400
1401		while (c) {
1402			c->display = 0;
1403
1404			if (c->start_time == 1)
1405				c->start_time = tchart->first_time;
1406
1407			if (c->total_time >= threshold) {
1408				c->display = 1;
1409				count++;
1410			}
1411
1412			if (c->end_time == 0)
1413				c->end_time = tchart->last_time;
1414
1415			c = c->next;
1416		}
1417		p = p->next;
1418	}
1419	return count;
1420}
1421
1422static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
1423{
1424	struct per_pid *p;
1425	struct per_pidcomm *c;
1426	int count = 0;
1427
1428	p = timechart->all_data;
1429	while (p) {
1430		/* no exit marker, task kept running to the end */
1431		if (p->end_time == 0)
1432			p->end_time = timechart->last_time;
1433
1434		c = p->all;
1435
1436		while (c) {
1437			c->display = 0;
1438
1439			if (c->total_bytes >= threshold) {
1440				c->display = 1;
1441				count++;
1442			}
1443
1444			if (c->end_time == 0)
1445				c->end_time = timechart->last_time;
1446
1447			c = c->next;
1448		}
1449		p = p->next;
1450	}
1451	return count;
1452}
1453
1454#define BYTES_THRESH (1 * 1024 * 1024)
1455#define TIME_THRESH 10000000
1456
1457static void write_svg_file(struct timechart *tchart, const char *filename)
1458{
1459	u64 i;
1460	int count;
1461	int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
1462
1463	if (tchart->power_only)
1464		tchart->proc_num = 0;
1465
1466	/* We'd like to show at least proc_num tasks;
1467	 * be less picky if we have fewer */
1468	do {
1469		if (process_filter)
1470			count = determine_display_tasks_filtered(tchart);
1471		else if (tchart->io_events)
1472			count = determine_display_io_tasks(tchart, thresh);
1473		else
1474			count = determine_display_tasks(tchart, thresh);
1475		thresh /= 10;
1476	} while (!process_filter && thresh && count < tchart->proc_num);
1477
1478	if (!tchart->proc_num)
1479		count = 0;
1480
1481	if (tchart->io_events) {
1482		open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
1483
1484		svg_time_grid(0.5);
1485		svg_io_legenda();
1486
1487		draw_io_bars(tchart);
1488	} else {
1489		open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
1490
1491		svg_time_grid(0);
1492
1493		svg_legenda();
1494
1495		for (i = 0; i < tchart->numcpus; i++)
1496			svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
1497
1498		draw_cpu_usage(tchart);
1499		if (tchart->proc_num)
1500			draw_process_bars(tchart);
1501		if (!tchart->tasks_only)
1502			draw_c_p_states(tchart);
1503		if (tchart->proc_num)
1504			draw_wakeups(tchart);
1505	}
1506
1507	svg_close();
1508}
1509
1510static int process_header(struct perf_file_section *section __maybe_unused,
1511			  struct perf_header *ph,
1512			  int feat,
1513			  int fd __maybe_unused,
1514			  void *data)
1515{
1516	struct timechart *tchart = data;
1517
1518	switch (feat) {
1519	case HEADER_NRCPUS:
1520		tchart->numcpus = ph->env.nr_cpus_avail;
1521		break;
1522
1523	case HEADER_CPU_TOPOLOGY:
1524		if (!tchart->topology)
1525			break;
1526
1527		if (svg_build_topology_map(&ph->env))
 
 
 
1528			fprintf(stderr, "problem building topology\n");
1529		break;
1530
1531	default:
1532		break;
1533	}
1534
1535	return 0;
1536}
1537
1538static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1539{
1540	const struct evsel_str_handler power_tracepoints[] = {
1541		{ "power:cpu_idle",		process_sample_cpu_idle },
1542		{ "power:cpu_frequency",	process_sample_cpu_frequency },
1543		{ "sched:sched_wakeup",		process_sample_sched_wakeup },
1544		{ "sched:sched_switch",		process_sample_sched_switch },
1545#ifdef SUPPORT_OLD_POWER_EVENTS
1546		{ "power:power_start",		process_sample_power_start },
1547		{ "power:power_end",		process_sample_power_end },
1548		{ "power:power_frequency",	process_sample_power_frequency },
1549#endif
1550
1551		{ "syscalls:sys_enter_read",		process_enter_read },
1552		{ "syscalls:sys_enter_pread64",		process_enter_read },
1553		{ "syscalls:sys_enter_readv",		process_enter_read },
1554		{ "syscalls:sys_enter_preadv",		process_enter_read },
1555		{ "syscalls:sys_enter_write",		process_enter_write },
1556		{ "syscalls:sys_enter_pwrite64",	process_enter_write },
1557		{ "syscalls:sys_enter_writev",		process_enter_write },
1558		{ "syscalls:sys_enter_pwritev",		process_enter_write },
1559		{ "syscalls:sys_enter_sync",		process_enter_sync },
1560		{ "syscalls:sys_enter_sync_file_range",	process_enter_sync },
1561		{ "syscalls:sys_enter_fsync",		process_enter_sync },
1562		{ "syscalls:sys_enter_msync",		process_enter_sync },
1563		{ "syscalls:sys_enter_recvfrom",	process_enter_rx },
1564		{ "syscalls:sys_enter_recvmmsg",	process_enter_rx },
1565		{ "syscalls:sys_enter_recvmsg",		process_enter_rx },
1566		{ "syscalls:sys_enter_sendto",		process_enter_tx },
1567		{ "syscalls:sys_enter_sendmsg",		process_enter_tx },
1568		{ "syscalls:sys_enter_sendmmsg",	process_enter_tx },
1569		{ "syscalls:sys_enter_epoll_pwait",	process_enter_poll },
1570		{ "syscalls:sys_enter_epoll_wait",	process_enter_poll },
1571		{ "syscalls:sys_enter_poll",		process_enter_poll },
1572		{ "syscalls:sys_enter_ppoll",		process_enter_poll },
1573		{ "syscalls:sys_enter_pselect6",	process_enter_poll },
1574		{ "syscalls:sys_enter_select",		process_enter_poll },
1575
1576		{ "syscalls:sys_exit_read",		process_exit_read },
1577		{ "syscalls:sys_exit_pread64",		process_exit_read },
1578		{ "syscalls:sys_exit_readv",		process_exit_read },
1579		{ "syscalls:sys_exit_preadv",		process_exit_read },
1580		{ "syscalls:sys_exit_write",		process_exit_write },
1581		{ "syscalls:sys_exit_pwrite64",		process_exit_write },
1582		{ "syscalls:sys_exit_writev",		process_exit_write },
1583		{ "syscalls:sys_exit_pwritev",		process_exit_write },
1584		{ "syscalls:sys_exit_sync",		process_exit_sync },
1585		{ "syscalls:sys_exit_sync_file_range",	process_exit_sync },
1586		{ "syscalls:sys_exit_fsync",		process_exit_sync },
1587		{ "syscalls:sys_exit_msync",		process_exit_sync },
1588		{ "syscalls:sys_exit_recvfrom",		process_exit_rx },
1589		{ "syscalls:sys_exit_recvmmsg",		process_exit_rx },
1590		{ "syscalls:sys_exit_recvmsg",		process_exit_rx },
1591		{ "syscalls:sys_exit_sendto",		process_exit_tx },
1592		{ "syscalls:sys_exit_sendmsg",		process_exit_tx },
1593		{ "syscalls:sys_exit_sendmmsg",		process_exit_tx },
1594		{ "syscalls:sys_exit_epoll_pwait",	process_exit_poll },
1595		{ "syscalls:sys_exit_epoll_wait",	process_exit_poll },
1596		{ "syscalls:sys_exit_poll",		process_exit_poll },
1597		{ "syscalls:sys_exit_ppoll",		process_exit_poll },
1598		{ "syscalls:sys_exit_pselect6",		process_exit_poll },
1599		{ "syscalls:sys_exit_select",		process_exit_poll },
1600	};
1601	struct perf_data data = {
1602		.path  = input_name,
1603		.mode  = PERF_DATA_MODE_READ,
1604		.force = tchart->force,
1605	};
1606
1607	struct perf_session *session = perf_session__new(&data, &tchart->tool);
 
1608	int ret = -EINVAL;
1609
1610	if (IS_ERR(session))
1611		return PTR_ERR(session);
1612
1613	symbol__init(&session->header.env);
1614
1615	(void)perf_header__process_sections(&session->header,
1616					    perf_data__fd(session->data),
1617					    tchart,
1618					    process_header);
1619
1620	if (!perf_session__has_traces(session, "timechart record"))
1621		goto out_delete;
1622
1623	if (perf_session__set_tracepoints_handlers(session,
1624						   power_tracepoints)) {
1625		pr_err("Initializing session tracepoint handlers failed\n");
1626		goto out_delete;
1627	}
1628
1629	ret = perf_session__process_events(session);
1630	if (ret)
1631		goto out_delete;
1632
1633	end_sample_processing(tchart);
1634
1635	sort_pids(tchart);
1636
1637	write_svg_file(tchart, output_name);
1638
1639	pr_info("Written %2.1f seconds of trace to %s.\n",
1640		(tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name);
1641out_delete:
1642	perf_session__delete(session);
1643	return ret;
1644}
1645
1646static int timechart__io_record(int argc, const char **argv)
1647{
1648	unsigned int rec_argc, i;
1649	const char **rec_argv;
1650	const char **p;
1651	char *filter = NULL;
1652
1653	const char * const common_args[] = {
1654		"record", "-a", "-R", "-c", "1",
1655	};
1656	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1657
1658	const char * const disk_events[] = {
1659		"syscalls:sys_enter_read",
1660		"syscalls:sys_enter_pread64",
1661		"syscalls:sys_enter_readv",
1662		"syscalls:sys_enter_preadv",
1663		"syscalls:sys_enter_write",
1664		"syscalls:sys_enter_pwrite64",
1665		"syscalls:sys_enter_writev",
1666		"syscalls:sys_enter_pwritev",
1667		"syscalls:sys_enter_sync",
1668		"syscalls:sys_enter_sync_file_range",
1669		"syscalls:sys_enter_fsync",
1670		"syscalls:sys_enter_msync",
1671
1672		"syscalls:sys_exit_read",
1673		"syscalls:sys_exit_pread64",
1674		"syscalls:sys_exit_readv",
1675		"syscalls:sys_exit_preadv",
1676		"syscalls:sys_exit_write",
1677		"syscalls:sys_exit_pwrite64",
1678		"syscalls:sys_exit_writev",
1679		"syscalls:sys_exit_pwritev",
1680		"syscalls:sys_exit_sync",
1681		"syscalls:sys_exit_sync_file_range",
1682		"syscalls:sys_exit_fsync",
1683		"syscalls:sys_exit_msync",
1684	};
1685	unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
1686
1687	const char * const net_events[] = {
1688		"syscalls:sys_enter_recvfrom",
1689		"syscalls:sys_enter_recvmmsg",
1690		"syscalls:sys_enter_recvmsg",
1691		"syscalls:sys_enter_sendto",
1692		"syscalls:sys_enter_sendmsg",
1693		"syscalls:sys_enter_sendmmsg",
1694
1695		"syscalls:sys_exit_recvfrom",
1696		"syscalls:sys_exit_recvmmsg",
1697		"syscalls:sys_exit_recvmsg",
1698		"syscalls:sys_exit_sendto",
1699		"syscalls:sys_exit_sendmsg",
1700		"syscalls:sys_exit_sendmmsg",
1701	};
1702	unsigned int net_events_nr = ARRAY_SIZE(net_events);
1703
1704	const char * const poll_events[] = {
1705		"syscalls:sys_enter_epoll_pwait",
1706		"syscalls:sys_enter_epoll_wait",
1707		"syscalls:sys_enter_poll",
1708		"syscalls:sys_enter_ppoll",
1709		"syscalls:sys_enter_pselect6",
1710		"syscalls:sys_enter_select",
1711
1712		"syscalls:sys_exit_epoll_pwait",
1713		"syscalls:sys_exit_epoll_wait",
1714		"syscalls:sys_exit_poll",
1715		"syscalls:sys_exit_ppoll",
1716		"syscalls:sys_exit_pselect6",
1717		"syscalls:sys_exit_select",
1718	};
1719	unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
1720
1721	rec_argc = common_args_nr +
1722		disk_events_nr * 4 +
1723		net_events_nr * 4 +
1724		poll_events_nr * 4 +
1725		argc;
1726	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1727
1728	if (rec_argv == NULL)
1729		return -ENOMEM;
1730
1731	if (asprintf(&filter, "common_pid != %d", getpid()) < 0) {
1732		free(rec_argv);
1733		return -ENOMEM;
1734	}
1735
1736	p = rec_argv;
1737	for (i = 0; i < common_args_nr; i++)
1738		*p++ = strdup(common_args[i]);
1739
1740	for (i = 0; i < disk_events_nr; i++) {
1741		if (!is_valid_tracepoint(disk_events[i])) {
1742			rec_argc -= 4;
1743			continue;
1744		}
1745
1746		*p++ = "-e";
1747		*p++ = strdup(disk_events[i]);
1748		*p++ = "--filter";
1749		*p++ = filter;
1750	}
1751	for (i = 0; i < net_events_nr; i++) {
1752		if (!is_valid_tracepoint(net_events[i])) {
1753			rec_argc -= 4;
1754			continue;
1755		}
1756
1757		*p++ = "-e";
1758		*p++ = strdup(net_events[i]);
1759		*p++ = "--filter";
1760		*p++ = filter;
1761	}
1762	for (i = 0; i < poll_events_nr; i++) {
1763		if (!is_valid_tracepoint(poll_events[i])) {
1764			rec_argc -= 4;
1765			continue;
1766		}
1767
1768		*p++ = "-e";
1769		*p++ = strdup(poll_events[i]);
1770		*p++ = "--filter";
1771		*p++ = filter;
1772	}
1773
1774	for (i = 0; i < (unsigned int)argc; i++)
1775		*p++ = argv[i];
1776
1777	return cmd_record(rec_argc, rec_argv);
1778}
1779
1780
1781static int timechart__record(struct timechart *tchart, int argc, const char **argv)
1782{
1783	unsigned int rec_argc, i, j;
1784	const char **rec_argv;
1785	const char **p;
1786	unsigned int record_elems;
1787
1788	const char * const common_args[] = {
1789		"record", "-a", "-R", "-c", "1",
1790	};
1791	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1792
1793	const char * const backtrace_args[] = {
1794		"-g",
1795	};
1796	unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
1797
1798	const char * const power_args[] = {
1799		"-e", "power:cpu_frequency",
1800		"-e", "power:cpu_idle",
1801	};
1802	unsigned int power_args_nr = ARRAY_SIZE(power_args);
1803
1804	const char * const old_power_args[] = {
1805#ifdef SUPPORT_OLD_POWER_EVENTS
1806		"-e", "power:power_start",
1807		"-e", "power:power_end",
1808		"-e", "power:power_frequency",
1809#endif
1810	};
1811	unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
1812
1813	const char * const tasks_args[] = {
1814		"-e", "sched:sched_wakeup",
1815		"-e", "sched:sched_switch",
1816	};
1817	unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
1818
1819#ifdef SUPPORT_OLD_POWER_EVENTS
1820	if (!is_valid_tracepoint("power:cpu_idle") &&
1821	    is_valid_tracepoint("power:power_start")) {
1822		use_old_power_events = 1;
1823		power_args_nr = 0;
1824	} else {
1825		old_power_args_nr = 0;
1826	}
1827#endif
1828
1829	if (tchart->power_only)
1830		tasks_args_nr = 0;
1831
1832	if (tchart->tasks_only) {
1833		power_args_nr = 0;
1834		old_power_args_nr = 0;
1835	}
1836
1837	if (!tchart->with_backtrace)
1838		backtrace_args_no = 0;
1839
1840	record_elems = common_args_nr + tasks_args_nr +
1841		power_args_nr + old_power_args_nr + backtrace_args_no;
1842
1843	rec_argc = record_elems + argc;
1844	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1845
1846	if (rec_argv == NULL)
1847		return -ENOMEM;
1848
1849	p = rec_argv;
1850	for (i = 0; i < common_args_nr; i++)
1851		*p++ = strdup(common_args[i]);
1852
1853	for (i = 0; i < backtrace_args_no; i++)
1854		*p++ = strdup(backtrace_args[i]);
1855
1856	for (i = 0; i < tasks_args_nr; i++)
1857		*p++ = strdup(tasks_args[i]);
1858
1859	for (i = 0; i < power_args_nr; i++)
1860		*p++ = strdup(power_args[i]);
1861
1862	for (i = 0; i < old_power_args_nr; i++)
1863		*p++ = strdup(old_power_args[i]);
1864
1865	for (j = 0; j < (unsigned int)argc; j++)
1866		*p++ = argv[j];
1867
1868	return cmd_record(rec_argc, rec_argv);
1869}
1870
1871static int
1872parse_process(const struct option *opt __maybe_unused, const char *arg,
1873	      int __maybe_unused unset)
1874{
1875	if (arg)
1876		add_process_filter(arg);
1877	return 0;
1878}
1879
1880static int
1881parse_highlight(const struct option *opt __maybe_unused, const char *arg,
1882		int __maybe_unused unset)
1883{
1884	unsigned long duration = strtoul(arg, NULL, 0);
1885
1886	if (svg_highlight || svg_highlight_name)
1887		return -1;
1888
1889	if (duration)
1890		svg_highlight = duration;
1891	else
1892		svg_highlight_name = strdup(arg);
1893
1894	return 0;
1895}
1896
1897static int
1898parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
1899{
1900	char unit = 'n';
1901	u64 *value = opt->value;
1902
1903	if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
1904		switch (unit) {
1905		case 'm':
1906			*value *= NSEC_PER_MSEC;
1907			break;
1908		case 'u':
1909			*value *= NSEC_PER_USEC;
1910			break;
1911		case 'n':
1912			break;
1913		default:
1914			return -1;
1915		}
1916	}
1917
1918	return 0;
1919}
1920
1921int cmd_timechart(int argc, const char **argv)
 
1922{
1923	struct timechart tchart = {
1924		.tool = {
1925			.comm		 = process_comm_event,
1926			.fork		 = process_fork_event,
1927			.exit		 = process_exit_event,
1928			.sample		 = process_sample_event,
1929			.ordered_events	 = true,
1930		},
1931		.proc_num = 15,
1932		.min_time = NSEC_PER_MSEC,
1933		.merge_dist = 1000,
1934	};
1935	const char *output_name = "output.svg";
1936	const struct option timechart_common_options[] = {
1937	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1938	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, "output processes data only"),
1939	OPT_END()
1940	};
1941	const struct option timechart_options[] = {
1942	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1943	OPT_STRING('o', "output", &output_name, "file", "output file name"),
1944	OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1945	OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
1946		      "highlight tasks. Pass duration in ns or process name.",
1947		       parse_highlight),
 
 
 
1948	OPT_CALLBACK('p', "process", NULL, "process",
1949		      "process selector. Pass a pid or process name.",
1950		       parse_process),
1951	OPT_CALLBACK(0, "symfs", NULL, "directory",
1952		     "Look for files with symbols relative to this directory",
1953		     symbol__config_symfs),
1954	OPT_INTEGER('n', "proc-num", &tchart.proc_num,
1955		    "min. number of tasks to print"),
1956	OPT_BOOLEAN('t', "topology", &tchart.topology,
1957		    "sort CPUs according to topology"),
1958	OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
1959		    "skip EAGAIN errors"),
1960	OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
1961		     "all IO faster than min-time will visually appear longer",
1962		     parse_time),
1963	OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
1964		     "merge events that are merge-dist us apart",
1965		     parse_time),
1966	OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
1967	OPT_PARENT(timechart_common_options),
1968	};
1969	const char * const timechart_subcommands[] = { "record", NULL };
1970	const char *timechart_usage[] = {
1971		"perf timechart [<options>] {record}",
1972		NULL
1973	};
 
1974	const struct option timechart_record_options[] = {
 
 
 
1975	OPT_BOOLEAN('I', "io-only", &tchart.io_only,
1976		    "record only IO data"),
1977	OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
1978	OPT_PARENT(timechart_common_options),
1979	};
1980	const char * const timechart_record_usage[] = {
1981		"perf timechart record [<options>]",
1982		NULL
1983	};
1984	argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
1985			timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
1986
1987	if (tchart.power_only && tchart.tasks_only) {
1988		pr_err("-P and -T options cannot be used at the same time.\n");
1989		return -1;
1990	}
1991
1992	if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
1993		argc = parse_options(argc, argv, timechart_record_options,
1994				     timechart_record_usage,
1995				     PARSE_OPT_STOP_AT_NON_OPTION);
1996
1997		if (tchart.power_only && tchart.tasks_only) {
1998			pr_err("-P and -T options cannot be used at the same time.\n");
1999			return -1;
2000		}
2001
2002		if (tchart.io_only)
2003			return timechart__io_record(argc, argv);
2004		else
2005			return timechart__record(&tchart, argc, argv);
2006	} else if (argc)
2007		usage_with_options(timechart_usage, timechart_options);
2008
2009	setup_pager();
2010
2011	return __cmd_timechart(&tchart, output_name);
2012}
v4.10.11
 
   1/*
   2 * builtin-timechart.c - make an svg timechart of system activity
   3 *
   4 * (C) Copyright 2009 Intel Corporation
   5 *
   6 * Authors:
   7 *     Arjan van de Ven <arjan@linux.intel.com>
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; version 2
  12 * of the License.
  13 */
  14
  15#include <traceevent/event-parse.h>
 
  16
  17#include "builtin.h"
  18
  19#include "util/util.h"
  20
  21#include "util/color.h"
  22#include <linux/list.h>
  23#include "util/cache.h"
  24#include "util/evlist.h"
  25#include "util/evsel.h"
 
  26#include <linux/rbtree.h>
  27#include <linux/time64.h>
 
  28#include "util/symbol.h"
 
  29#include "util/callchain.h"
  30#include "util/strlist.h"
  31
  32#include "perf.h"
  33#include "util/header.h"
 
  34#include <subcmd/parse-options.h>
  35#include "util/parse-events.h"
  36#include "util/event.h"
  37#include "util/session.h"
  38#include "util/svghelper.h"
  39#include "util/tool.h"
  40#include "util/data.h"
  41#include "util/debug.h"
 
 
 
 
 
 
 
 
  42
  43#define SUPPORT_OLD_POWER_EVENTS 1
  44#define PWR_EVENT_EXIT -1
  45
  46struct per_pid;
  47struct power_event;
  48struct wake_event;
  49
  50struct timechart {
  51	struct perf_tool	tool;
  52	struct per_pid		*all_data;
  53	struct power_event	*power_events;
  54	struct wake_event	*wake_events;
  55	int			proc_num;
  56	unsigned int		numcpus;
  57	u64			min_freq,	/* Lowest CPU frequency seen */
  58				max_freq,	/* Highest CPU frequency seen */
  59				turbo_frequency,
  60				first_time, last_time;
  61	bool			power_only,
  62				tasks_only,
  63				with_backtrace,
  64				topology;
  65	bool			force;
  66	/* IO related settings */
  67	bool			io_only,
  68				skip_eagain;
  69	u64			io_events;
  70	u64			min_time,
  71				merge_dist;
  72};
  73
  74struct per_pidcomm;
  75struct cpu_sample;
  76struct io_sample;
  77
  78/*
  79 * Datastructure layout:
  80 * We keep an list of "pid"s, matching the kernels notion of a task struct.
  81 * Each "pid" entry, has a list of "comm"s.
  82 *	this is because we want to track different programs different, while
  83 *	exec will reuse the original pid (by design).
  84 * Each comm has a list of samples that will be used to draw
  85 * final graph.
  86 */
  87
  88struct per_pid {
  89	struct per_pid *next;
  90
  91	int		pid;
  92	int		ppid;
  93
  94	u64		start_time;
  95	u64		end_time;
  96	u64		total_time;
  97	u64		total_bytes;
  98	int		display;
  99
 100	struct per_pidcomm *all;
 101	struct per_pidcomm *current;
 102};
 103
 104
 105struct per_pidcomm {
 106	struct per_pidcomm *next;
 107
 108	u64		start_time;
 109	u64		end_time;
 110	u64		total_time;
 111	u64		max_bytes;
 112	u64		total_bytes;
 113
 114	int		Y;
 115	int		display;
 116
 117	long		state;
 118	u64		state_since;
 119
 120	char		*comm;
 121
 122	struct cpu_sample *samples;
 123	struct io_sample  *io_samples;
 124};
 125
 126struct sample_wrapper {
 127	struct sample_wrapper *next;
 128
 129	u64		timestamp;
 130	unsigned char	data[0];
 131};
 132
 133#define TYPE_NONE	0
 134#define TYPE_RUNNING	1
 135#define TYPE_WAITING	2
 136#define TYPE_BLOCKED	3
 137
 138struct cpu_sample {
 139	struct cpu_sample *next;
 140
 141	u64 start_time;
 142	u64 end_time;
 143	int type;
 144	int cpu;
 145	const char *backtrace;
 146};
 147
 148enum {
 149	IOTYPE_READ,
 150	IOTYPE_WRITE,
 151	IOTYPE_SYNC,
 152	IOTYPE_TX,
 153	IOTYPE_RX,
 154	IOTYPE_POLL,
 155};
 156
 157struct io_sample {
 158	struct io_sample *next;
 159
 160	u64 start_time;
 161	u64 end_time;
 162	u64 bytes;
 163	int type;
 164	int fd;
 165	int err;
 166	int merges;
 167};
 168
 169#define CSTATE 1
 170#define PSTATE 2
 171
 172struct power_event {
 173	struct power_event *next;
 174	int type;
 175	int state;
 176	u64 start_time;
 177	u64 end_time;
 178	int cpu;
 179};
 180
 181struct wake_event {
 182	struct wake_event *next;
 183	int waker;
 184	int wakee;
 185	u64 time;
 186	const char *backtrace;
 187};
 188
 189struct process_filter {
 190	char			*name;
 191	int			pid;
 192	struct process_filter	*next;
 193};
 194
 195static struct process_filter *process_filter;
 196
 197
 198static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
 199{
 200	struct per_pid *cursor = tchart->all_data;
 201
 202	while (cursor) {
 203		if (cursor->pid == pid)
 204			return cursor;
 205		cursor = cursor->next;
 206	}
 207	cursor = zalloc(sizeof(*cursor));
 208	assert(cursor != NULL);
 209	cursor->pid = pid;
 210	cursor->next = tchart->all_data;
 211	tchart->all_data = cursor;
 212	return cursor;
 213}
 214
 
 
 
 
 
 
 
 
 
 
 
 
 
 215static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
 216{
 217	struct per_pid *p;
 218	struct per_pidcomm *c;
 219	p = find_create_pid(tchart, pid);
 220	c = p->all;
 221	while (c) {
 222		if (c->comm && strcmp(c->comm, comm) == 0) {
 223			p->current = c;
 224			return;
 225		}
 226		if (!c->comm) {
 227			c->comm = strdup(comm);
 228			p->current = c;
 229			return;
 230		}
 231		c = c->next;
 232	}
 233	c = zalloc(sizeof(*c));
 234	assert(c != NULL);
 235	c->comm = strdup(comm);
 236	p->current = c;
 237	c->next = p->all;
 238	p->all = c;
 239}
 240
 241static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
 242{
 243	struct per_pid *p, *pp;
 244	p = find_create_pid(tchart, pid);
 245	pp = find_create_pid(tchart, ppid);
 246	p->ppid = ppid;
 247	if (pp->current && pp->current->comm && !p->current)
 248		pid_set_comm(tchart, pid, pp->current->comm);
 249
 250	p->start_time = timestamp;
 251	if (p->current && !p->current->start_time) {
 252		p->current->start_time = timestamp;
 253		p->current->state_since = timestamp;
 254	}
 255}
 256
 257static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
 258{
 259	struct per_pid *p;
 260	p = find_create_pid(tchart, pid);
 261	p->end_time = timestamp;
 262	if (p->current)
 263		p->current->end_time = timestamp;
 264}
 265
 266static void pid_put_sample(struct timechart *tchart, int pid, int type,
 267			   unsigned int cpu, u64 start, u64 end,
 268			   const char *backtrace)
 269{
 270	struct per_pid *p;
 271	struct per_pidcomm *c;
 272	struct cpu_sample *sample;
 273
 274	p = find_create_pid(tchart, pid);
 275	c = p->current;
 276	if (!c) {
 277		c = zalloc(sizeof(*c));
 278		assert(c != NULL);
 279		p->current = c;
 280		c->next = p->all;
 281		p->all = c;
 282	}
 283
 284	sample = zalloc(sizeof(*sample));
 285	assert(sample != NULL);
 286	sample->start_time = start;
 287	sample->end_time = end;
 288	sample->type = type;
 289	sample->next = c->samples;
 290	sample->cpu = cpu;
 291	sample->backtrace = backtrace;
 292	c->samples = sample;
 293
 294	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
 295		c->total_time += (end-start);
 296		p->total_time += (end-start);
 297	}
 298
 299	if (c->start_time == 0 || c->start_time > start)
 300		c->start_time = start;
 301	if (p->start_time == 0 || p->start_time > start)
 302		p->start_time = start;
 303}
 304
 305#define MAX_CPUS 4096
 306
 307static u64 cpus_cstate_start_times[MAX_CPUS];
 308static int cpus_cstate_state[MAX_CPUS];
 309static u64 cpus_pstate_start_times[MAX_CPUS];
 310static u64 cpus_pstate_state[MAX_CPUS];
 311
 312static int process_comm_event(struct perf_tool *tool,
 313			      union perf_event *event,
 314			      struct perf_sample *sample __maybe_unused,
 315			      struct machine *machine __maybe_unused)
 316{
 317	struct timechart *tchart = container_of(tool, struct timechart, tool);
 318	pid_set_comm(tchart, event->comm.tid, event->comm.comm);
 319	return 0;
 320}
 321
 322static int process_fork_event(struct perf_tool *tool,
 323			      union perf_event *event,
 324			      struct perf_sample *sample __maybe_unused,
 325			      struct machine *machine __maybe_unused)
 326{
 327	struct timechart *tchart = container_of(tool, struct timechart, tool);
 328	pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
 329	return 0;
 330}
 331
 332static int process_exit_event(struct perf_tool *tool,
 333			      union perf_event *event,
 334			      struct perf_sample *sample __maybe_unused,
 335			      struct machine *machine __maybe_unused)
 336{
 337	struct timechart *tchart = container_of(tool, struct timechart, tool);
 338	pid_exit(tchart, event->fork.pid, event->fork.time);
 339	return 0;
 340}
 341
 342#ifdef SUPPORT_OLD_POWER_EVENTS
 343static int use_old_power_events;
 344#endif
 345
 346static void c_state_start(int cpu, u64 timestamp, int state)
 347{
 348	cpus_cstate_start_times[cpu] = timestamp;
 349	cpus_cstate_state[cpu] = state;
 350}
 351
 352static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
 353{
 354	struct power_event *pwr = zalloc(sizeof(*pwr));
 355
 356	if (!pwr)
 357		return;
 358
 359	pwr->state = cpus_cstate_state[cpu];
 360	pwr->start_time = cpus_cstate_start_times[cpu];
 361	pwr->end_time = timestamp;
 362	pwr->cpu = cpu;
 363	pwr->type = CSTATE;
 364	pwr->next = tchart->power_events;
 365
 366	tchart->power_events = pwr;
 367}
 368
 369static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
 
 370{
 371	struct power_event *pwr;
 372
 373	if (new_freq > 8000000) /* detect invalid data */
 374		return;
 375
 376	pwr = zalloc(sizeof(*pwr));
 377	if (!pwr)
 378		return;
 379
 380	pwr->state = cpus_pstate_state[cpu];
 381	pwr->start_time = cpus_pstate_start_times[cpu];
 382	pwr->end_time = timestamp;
 383	pwr->cpu = cpu;
 384	pwr->type = PSTATE;
 385	pwr->next = tchart->power_events;
 386
 387	if (!pwr->start_time)
 388		pwr->start_time = tchart->first_time;
 389
 390	tchart->power_events = pwr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 391
 392	cpus_pstate_state[cpu] = new_freq;
 393	cpus_pstate_start_times[cpu] = timestamp;
 394
 395	if ((u64)new_freq > tchart->max_freq)
 396		tchart->max_freq = new_freq;
 397
 398	if (new_freq < tchart->min_freq || tchart->min_freq == 0)
 399		tchart->min_freq = new_freq;
 400
 401	if (new_freq == tchart->max_freq - 1000)
 402		tchart->turbo_frequency = tchart->max_freq;
 403}
 404
 405static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
 406			 int waker, int wakee, u8 flags, const char *backtrace)
 407{
 408	struct per_pid *p;
 409	struct wake_event *we = zalloc(sizeof(*we));
 410
 411	if (!we)
 412		return;
 413
 414	we->time = timestamp;
 415	we->waker = waker;
 416	we->backtrace = backtrace;
 417
 418	if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
 419		we->waker = -1;
 420
 421	we->wakee = wakee;
 422	we->next = tchart->wake_events;
 423	tchart->wake_events = we;
 424	p = find_create_pid(tchart, we->wakee);
 425
 426	if (p && p->current && p->current->state == TYPE_NONE) {
 427		p->current->state_since = timestamp;
 428		p->current->state = TYPE_WAITING;
 429	}
 430	if (p && p->current && p->current->state == TYPE_BLOCKED) {
 431		pid_put_sample(tchart, p->pid, p->current->state, cpu,
 432			       p->current->state_since, timestamp, NULL);
 433		p->current->state_since = timestamp;
 434		p->current->state = TYPE_WAITING;
 435	}
 436}
 437
 438static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
 439			 int prev_pid, int next_pid, u64 prev_state,
 440			 const char *backtrace)
 441{
 442	struct per_pid *p = NULL, *prev_p;
 443
 444	prev_p = find_create_pid(tchart, prev_pid);
 445
 446	p = find_create_pid(tchart, next_pid);
 447
 448	if (prev_p->current && prev_p->current->state != TYPE_NONE)
 449		pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
 450			       prev_p->current->state_since, timestamp,
 451			       backtrace);
 452	if (p && p->current) {
 453		if (p->current->state != TYPE_NONE)
 454			pid_put_sample(tchart, next_pid, p->current->state, cpu,
 455				       p->current->state_since, timestamp,
 456				       backtrace);
 457
 458		p->current->state_since = timestamp;
 459		p->current->state = TYPE_RUNNING;
 460	}
 461
 462	if (prev_p->current) {
 463		prev_p->current->state = TYPE_NONE;
 464		prev_p->current->state_since = timestamp;
 465		if (prev_state & 2)
 466			prev_p->current->state = TYPE_BLOCKED;
 467		if (prev_state == 0)
 468			prev_p->current->state = TYPE_WAITING;
 469	}
 470}
 471
 472static const char *cat_backtrace(union perf_event *event,
 473				 struct perf_sample *sample,
 474				 struct machine *machine)
 475{
 476	struct addr_location al;
 477	unsigned int i;
 478	char *p = NULL;
 479	size_t p_len;
 480	u8 cpumode = PERF_RECORD_MISC_USER;
 481	struct addr_location tal;
 482	struct ip_callchain *chain = sample->callchain;
 483	FILE *f = open_memstream(&p, &p_len);
 484
 485	if (!f) {
 486		perror("open_memstream error");
 487		return NULL;
 488	}
 489
 490	if (!chain)
 491		goto exit;
 492
 493	if (machine__resolve(machine, &al, sample) < 0) {
 494		fprintf(stderr, "problem processing %d event, skipping it.\n",
 495			event->header.type);
 496		goto exit;
 497	}
 498
 499	for (i = 0; i < chain->nr; i++) {
 500		u64 ip;
 501
 502		if (callchain_param.order == ORDER_CALLEE)
 503			ip = chain->ips[i];
 504		else
 505			ip = chain->ips[chain->nr - i - 1];
 506
 507		if (ip >= PERF_CONTEXT_MAX) {
 508			switch (ip) {
 509			case PERF_CONTEXT_HV:
 510				cpumode = PERF_RECORD_MISC_HYPERVISOR;
 511				break;
 512			case PERF_CONTEXT_KERNEL:
 513				cpumode = PERF_RECORD_MISC_KERNEL;
 514				break;
 515			case PERF_CONTEXT_USER:
 516				cpumode = PERF_RECORD_MISC_USER;
 517				break;
 518			default:
 519				pr_debug("invalid callchain context: "
 520					 "%"PRId64"\n", (s64) ip);
 521
 522				/*
 523				 * It seems the callchain is corrupted.
 524				 * Discard all.
 525				 */
 526				zfree(&p);
 527				goto exit_put;
 528			}
 529			continue;
 530		}
 531
 532		tal.filtered = 0;
 533		thread__find_addr_location(al.thread, cpumode,
 534					   MAP__FUNCTION, ip, &tal);
 535
 536		if (tal.sym)
 537			fprintf(f, "..... %016" PRIx64 " %s\n", ip,
 538				tal.sym->name);
 539		else
 540			fprintf(f, "..... %016" PRIx64 "\n", ip);
 541	}
 542exit_put:
 543	addr_location__put(&al);
 544exit:
 545	fclose(f);
 546
 547	return p;
 548}
 549
 550typedef int (*tracepoint_handler)(struct timechart *tchart,
 551				  struct perf_evsel *evsel,
 552				  struct perf_sample *sample,
 553				  const char *backtrace);
 554
 555static int process_sample_event(struct perf_tool *tool,
 556				union perf_event *event,
 557				struct perf_sample *sample,
 558				struct perf_evsel *evsel,
 559				struct machine *machine)
 560{
 561	struct timechart *tchart = container_of(tool, struct timechart, tool);
 562
 563	if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
 564		if (!tchart->first_time || tchart->first_time > sample->time)
 565			tchart->first_time = sample->time;
 566		if (tchart->last_time < sample->time)
 567			tchart->last_time = sample->time;
 568	}
 569
 570	if (evsel->handler != NULL) {
 571		tracepoint_handler f = evsel->handler;
 572		return f(tchart, evsel, sample,
 573			 cat_backtrace(event, sample, machine));
 574	}
 575
 576	return 0;
 577}
 578
 579static int
 580process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
 581			struct perf_evsel *evsel,
 582			struct perf_sample *sample,
 583			const char *backtrace __maybe_unused)
 584{
 585	u32 state = perf_evsel__intval(evsel, sample, "state");
 586	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 587
 588	if (state == (u32)PWR_EVENT_EXIT)
 589		c_state_end(tchart, cpu_id, sample->time);
 590	else
 591		c_state_start(cpu_id, sample->time, state);
 592	return 0;
 593}
 594
 595static int
 596process_sample_cpu_frequency(struct timechart *tchart,
 597			     struct perf_evsel *evsel,
 598			     struct perf_sample *sample,
 599			     const char *backtrace __maybe_unused)
 600{
 601	u32 state = perf_evsel__intval(evsel, sample, "state");
 602	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 603
 604	p_state_change(tchart, cpu_id, sample->time, state);
 605	return 0;
 606}
 607
 608static int
 609process_sample_sched_wakeup(struct timechart *tchart,
 610			    struct perf_evsel *evsel,
 611			    struct perf_sample *sample,
 612			    const char *backtrace)
 613{
 614	u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
 615	int waker = perf_evsel__intval(evsel, sample, "common_pid");
 616	int wakee = perf_evsel__intval(evsel, sample, "pid");
 617
 618	sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
 619	return 0;
 620}
 621
 622static int
 623process_sample_sched_switch(struct timechart *tchart,
 624			    struct perf_evsel *evsel,
 625			    struct perf_sample *sample,
 626			    const char *backtrace)
 627{
 628	int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
 629	int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
 630	u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
 631
 632	sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
 633		     prev_state, backtrace);
 634	return 0;
 635}
 636
 637#ifdef SUPPORT_OLD_POWER_EVENTS
 638static int
 639process_sample_power_start(struct timechart *tchart __maybe_unused,
 640			   struct perf_evsel *evsel,
 641			   struct perf_sample *sample,
 642			   const char *backtrace __maybe_unused)
 643{
 644	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 645	u64 value = perf_evsel__intval(evsel, sample, "value");
 646
 647	c_state_start(cpu_id, sample->time, value);
 648	return 0;
 649}
 650
 651static int
 652process_sample_power_end(struct timechart *tchart,
 653			 struct perf_evsel *evsel __maybe_unused,
 654			 struct perf_sample *sample,
 655			 const char *backtrace __maybe_unused)
 656{
 657	c_state_end(tchart, sample->cpu, sample->time);
 658	return 0;
 659}
 660
 661static int
 662process_sample_power_frequency(struct timechart *tchart,
 663			       struct perf_evsel *evsel,
 664			       struct perf_sample *sample,
 665			       const char *backtrace __maybe_unused)
 666{
 667	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 668	u64 value = perf_evsel__intval(evsel, sample, "value");
 669
 670	p_state_change(tchart, cpu_id, sample->time, value);
 671	return 0;
 672}
 673#endif /* SUPPORT_OLD_POWER_EVENTS */
 674
 675/*
 676 * After the last sample we need to wrap up the current C/P state
 677 * and close out each CPU for these.
 678 */
 679static void end_sample_processing(struct timechart *tchart)
 680{
 681	u64 cpu;
 682	struct power_event *pwr;
 683
 684	for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
 685		/* C state */
 686#if 0
 687		pwr = zalloc(sizeof(*pwr));
 688		if (!pwr)
 689			return;
 690
 691		pwr->state = cpus_cstate_state[cpu];
 692		pwr->start_time = cpus_cstate_start_times[cpu];
 693		pwr->end_time = tchart->last_time;
 694		pwr->cpu = cpu;
 695		pwr->type = CSTATE;
 696		pwr->next = tchart->power_events;
 697
 698		tchart->power_events = pwr;
 699#endif
 700		/* P state */
 701
 702		pwr = zalloc(sizeof(*pwr));
 703		if (!pwr)
 704			return;
 705
 706		pwr->state = cpus_pstate_state[cpu];
 707		pwr->start_time = cpus_pstate_start_times[cpu];
 708		pwr->end_time = tchart->last_time;
 709		pwr->cpu = cpu;
 710		pwr->type = PSTATE;
 711		pwr->next = tchart->power_events;
 712
 713		if (!pwr->start_time)
 714			pwr->start_time = tchart->first_time;
 715		if (!pwr->state)
 716			pwr->state = tchart->min_freq;
 717		tchart->power_events = pwr;
 718	}
 719}
 720
 721static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
 722			       u64 start, int fd)
 723{
 724	struct per_pid *p = find_create_pid(tchart, pid);
 725	struct per_pidcomm *c = p->current;
 726	struct io_sample *sample;
 727	struct io_sample *prev;
 728
 729	if (!c) {
 730		c = zalloc(sizeof(*c));
 731		if (!c)
 732			return -ENOMEM;
 733		p->current = c;
 734		c->next = p->all;
 735		p->all = c;
 736	}
 737
 738	prev = c->io_samples;
 739
 740	if (prev && prev->start_time && !prev->end_time) {
 741		pr_warning("Skip invalid start event: "
 742			   "previous event already started!\n");
 743
 744		/* remove previous event that has been started,
 745		 * we are not sure we will ever get an end for it */
 746		c->io_samples = prev->next;
 747		free(prev);
 748		return 0;
 749	}
 750
 751	sample = zalloc(sizeof(*sample));
 752	if (!sample)
 753		return -ENOMEM;
 754	sample->start_time = start;
 755	sample->type = type;
 756	sample->fd = fd;
 757	sample->next = c->io_samples;
 758	c->io_samples = sample;
 759
 760	if (c->start_time == 0 || c->start_time > start)
 761		c->start_time = start;
 762
 763	return 0;
 764}
 765
 766static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
 767			     u64 end, long ret)
 768{
 769	struct per_pid *p = find_create_pid(tchart, pid);
 770	struct per_pidcomm *c = p->current;
 771	struct io_sample *sample, *prev;
 772
 773	if (!c) {
 774		pr_warning("Invalid pidcomm!\n");
 775		return -1;
 776	}
 777
 778	sample = c->io_samples;
 779
 780	if (!sample) /* skip partially captured events */
 781		return 0;
 782
 783	if (sample->end_time) {
 784		pr_warning("Skip invalid end event: "
 785			   "previous event already ended!\n");
 786		return 0;
 787	}
 788
 789	if (sample->type != type) {
 790		pr_warning("Skip invalid end event: invalid event type!\n");
 791		return 0;
 792	}
 793
 794	sample->end_time = end;
 795	prev = sample->next;
 796
 797	/* we want to be able to see small and fast transfers, so make them
 798	 * at least min_time long, but don't overlap them */
 799	if (sample->end_time - sample->start_time < tchart->min_time)
 800		sample->end_time = sample->start_time + tchart->min_time;
 801	if (prev && sample->start_time < prev->end_time) {
 802		if (prev->err) /* try to make errors more visible */
 803			sample->start_time = prev->end_time;
 804		else
 805			prev->end_time = sample->start_time;
 806	}
 807
 808	if (ret < 0) {
 809		sample->err = ret;
 810	} else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
 811		   type == IOTYPE_TX || type == IOTYPE_RX) {
 812
 813		if ((u64)ret > c->max_bytes)
 814			c->max_bytes = ret;
 815
 816		c->total_bytes += ret;
 817		p->total_bytes += ret;
 818		sample->bytes = ret;
 819	}
 820
 821	/* merge two requests to make svg smaller and render-friendly */
 822	if (prev &&
 823	    prev->type == sample->type &&
 824	    prev->err == sample->err &&
 825	    prev->fd == sample->fd &&
 826	    prev->end_time + tchart->merge_dist >= sample->start_time) {
 827
 828		sample->bytes += prev->bytes;
 829		sample->merges += prev->merges + 1;
 830
 831		sample->start_time = prev->start_time;
 832		sample->next = prev->next;
 833		free(prev);
 834
 835		if (!sample->err && sample->bytes > c->max_bytes)
 836			c->max_bytes = sample->bytes;
 837	}
 838
 839	tchart->io_events++;
 840
 841	return 0;
 842}
 843
 844static int
 845process_enter_read(struct timechart *tchart,
 846		   struct perf_evsel *evsel,
 847		   struct perf_sample *sample)
 848{
 849	long fd = perf_evsel__intval(evsel, sample, "fd");
 850	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
 851				   sample->time, fd);
 852}
 853
 854static int
 855process_exit_read(struct timechart *tchart,
 856		  struct perf_evsel *evsel,
 857		  struct perf_sample *sample)
 858{
 859	long ret = perf_evsel__intval(evsel, sample, "ret");
 860	return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
 861				 sample->time, ret);
 862}
 863
 864static int
 865process_enter_write(struct timechart *tchart,
 866		    struct perf_evsel *evsel,
 867		    struct perf_sample *sample)
 868{
 869	long fd = perf_evsel__intval(evsel, sample, "fd");
 870	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
 871				   sample->time, fd);
 872}
 873
 874static int
 875process_exit_write(struct timechart *tchart,
 876		   struct perf_evsel *evsel,
 877		   struct perf_sample *sample)
 878{
 879	long ret = perf_evsel__intval(evsel, sample, "ret");
 880	return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
 881				 sample->time, ret);
 882}
 883
 884static int
 885process_enter_sync(struct timechart *tchart,
 886		   struct perf_evsel *evsel,
 887		   struct perf_sample *sample)
 888{
 889	long fd = perf_evsel__intval(evsel, sample, "fd");
 890	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
 891				   sample->time, fd);
 892}
 893
 894static int
 895process_exit_sync(struct timechart *tchart,
 896		  struct perf_evsel *evsel,
 897		  struct perf_sample *sample)
 898{
 899	long ret = perf_evsel__intval(evsel, sample, "ret");
 900	return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
 901				 sample->time, ret);
 902}
 903
 904static int
 905process_enter_tx(struct timechart *tchart,
 906		 struct perf_evsel *evsel,
 907		 struct perf_sample *sample)
 908{
 909	long fd = perf_evsel__intval(evsel, sample, "fd");
 910	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
 911				   sample->time, fd);
 912}
 913
 914static int
 915process_exit_tx(struct timechart *tchart,
 916		struct perf_evsel *evsel,
 917		struct perf_sample *sample)
 918{
 919	long ret = perf_evsel__intval(evsel, sample, "ret");
 920	return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
 921				 sample->time, ret);
 922}
 923
 924static int
 925process_enter_rx(struct timechart *tchart,
 926		 struct perf_evsel *evsel,
 927		 struct perf_sample *sample)
 928{
 929	long fd = perf_evsel__intval(evsel, sample, "fd");
 930	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
 931				   sample->time, fd);
 932}
 933
 934static int
 935process_exit_rx(struct timechart *tchart,
 936		struct perf_evsel *evsel,
 937		struct perf_sample *sample)
 938{
 939	long ret = perf_evsel__intval(evsel, sample, "ret");
 940	return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
 941				 sample->time, ret);
 942}
 943
 944static int
 945process_enter_poll(struct timechart *tchart,
 946		   struct perf_evsel *evsel,
 947		   struct perf_sample *sample)
 948{
 949	long fd = perf_evsel__intval(evsel, sample, "fd");
 950	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
 951				   sample->time, fd);
 952}
 953
 954static int
 955process_exit_poll(struct timechart *tchart,
 956		  struct perf_evsel *evsel,
 957		  struct perf_sample *sample)
 958{
 959	long ret = perf_evsel__intval(evsel, sample, "ret");
 960	return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
 961				 sample->time, ret);
 962}
 963
 964/*
 965 * Sort the pid datastructure
 966 */
 967static void sort_pids(struct timechart *tchart)
 968{
 969	struct per_pid *new_list, *p, *cursor, *prev;
 970	/* sort by ppid first, then by pid, lowest to highest */
 971
 972	new_list = NULL;
 973
 974	while (tchart->all_data) {
 975		p = tchart->all_data;
 976		tchart->all_data = p->next;
 977		p->next = NULL;
 978
 979		if (new_list == NULL) {
 980			new_list = p;
 981			p->next = NULL;
 982			continue;
 983		}
 984		prev = NULL;
 985		cursor = new_list;
 986		while (cursor) {
 987			if (cursor->ppid > p->ppid ||
 988				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
 989				/* must insert before */
 990				if (prev) {
 991					p->next = prev->next;
 992					prev->next = p;
 993					cursor = NULL;
 994					continue;
 995				} else {
 996					p->next = new_list;
 997					new_list = p;
 998					cursor = NULL;
 999					continue;
1000				}
1001			}
1002
1003			prev = cursor;
1004			cursor = cursor->next;
1005			if (!cursor)
1006				prev->next = p;
1007		}
1008	}
1009	tchart->all_data = new_list;
1010}
1011
1012
1013static void draw_c_p_states(struct timechart *tchart)
1014{
1015	struct power_event *pwr;
1016	pwr = tchart->power_events;
1017
1018	/*
1019	 * two pass drawing so that the P state bars are on top of the C state blocks
1020	 */
1021	while (pwr) {
1022		if (pwr->type == CSTATE)
1023			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1024		pwr = pwr->next;
1025	}
1026
1027	pwr = tchart->power_events;
1028	while (pwr) {
1029		if (pwr->type == PSTATE) {
1030			if (!pwr->state)
1031				pwr->state = tchart->min_freq;
1032			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1033		}
1034		pwr = pwr->next;
1035	}
1036}
1037
1038static void draw_wakeups(struct timechart *tchart)
1039{
1040	struct wake_event *we;
1041	struct per_pid *p;
1042	struct per_pidcomm *c;
1043
1044	we = tchart->wake_events;
1045	while (we) {
1046		int from = 0, to = 0;
1047		char *task_from = NULL, *task_to = NULL;
1048
1049		/* locate the column of the waker and wakee */
1050		p = tchart->all_data;
1051		while (p) {
1052			if (p->pid == we->waker || p->pid == we->wakee) {
1053				c = p->all;
1054				while (c) {
1055					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
1056						if (p->pid == we->waker && !from) {
1057							from = c->Y;
1058							task_from = strdup(c->comm);
1059						}
1060						if (p->pid == we->wakee && !to) {
1061							to = c->Y;
1062							task_to = strdup(c->comm);
1063						}
1064					}
1065					c = c->next;
1066				}
1067				c = p->all;
1068				while (c) {
1069					if (p->pid == we->waker && !from) {
1070						from = c->Y;
1071						task_from = strdup(c->comm);
1072					}
1073					if (p->pid == we->wakee && !to) {
1074						to = c->Y;
1075						task_to = strdup(c->comm);
1076					}
1077					c = c->next;
1078				}
1079			}
1080			p = p->next;
1081		}
1082
1083		if (!task_from) {
1084			task_from = malloc(40);
1085			sprintf(task_from, "[%i]", we->waker);
1086		}
1087		if (!task_to) {
1088			task_to = malloc(40);
1089			sprintf(task_to, "[%i]", we->wakee);
1090		}
1091
1092		if (we->waker == -1)
1093			svg_interrupt(we->time, to, we->backtrace);
1094		else if (from && to && abs(from - to) == 1)
1095			svg_wakeline(we->time, from, to, we->backtrace);
1096		else
1097			svg_partial_wakeline(we->time, from, task_from, to,
1098					     task_to, we->backtrace);
1099		we = we->next;
1100
1101		free(task_from);
1102		free(task_to);
1103	}
1104}
1105
1106static void draw_cpu_usage(struct timechart *tchart)
1107{
1108	struct per_pid *p;
1109	struct per_pidcomm *c;
1110	struct cpu_sample *sample;
1111	p = tchart->all_data;
1112	while (p) {
1113		c = p->all;
1114		while (c) {
1115			sample = c->samples;
1116			while (sample) {
1117				if (sample->type == TYPE_RUNNING) {
1118					svg_process(sample->cpu,
1119						    sample->start_time,
1120						    sample->end_time,
1121						    p->pid,
1122						    c->comm,
1123						    sample->backtrace);
1124				}
1125
1126				sample = sample->next;
1127			}
1128			c = c->next;
1129		}
1130		p = p->next;
1131	}
1132}
1133
1134static void draw_io_bars(struct timechart *tchart)
1135{
1136	const char *suf;
1137	double bytes;
1138	char comm[256];
1139	struct per_pid *p;
1140	struct per_pidcomm *c;
1141	struct io_sample *sample;
1142	int Y = 1;
1143
1144	p = tchart->all_data;
1145	while (p) {
1146		c = p->all;
1147		while (c) {
1148			if (!c->display) {
1149				c->Y = 0;
1150				c = c->next;
1151				continue;
1152			}
1153
1154			svg_box(Y, c->start_time, c->end_time, "process3");
1155			sample = c->io_samples;
1156			for (sample = c->io_samples; sample; sample = sample->next) {
1157				double h = (double)sample->bytes / c->max_bytes;
1158
1159				if (tchart->skip_eagain &&
1160				    sample->err == -EAGAIN)
1161					continue;
1162
1163				if (sample->err)
1164					h = 1;
1165
1166				if (sample->type == IOTYPE_SYNC)
1167					svg_fbox(Y,
1168						sample->start_time,
1169						sample->end_time,
1170						1,
1171						sample->err ? "error" : "sync",
1172						sample->fd,
1173						sample->err,
1174						sample->merges);
1175				else if (sample->type == IOTYPE_POLL)
1176					svg_fbox(Y,
1177						sample->start_time,
1178						sample->end_time,
1179						1,
1180						sample->err ? "error" : "poll",
1181						sample->fd,
1182						sample->err,
1183						sample->merges);
1184				else if (sample->type == IOTYPE_READ)
1185					svg_ubox(Y,
1186						sample->start_time,
1187						sample->end_time,
1188						h,
1189						sample->err ? "error" : "disk",
1190						sample->fd,
1191						sample->err,
1192						sample->merges);
1193				else if (sample->type == IOTYPE_WRITE)
1194					svg_lbox(Y,
1195						sample->start_time,
1196						sample->end_time,
1197						h,
1198						sample->err ? "error" : "disk",
1199						sample->fd,
1200						sample->err,
1201						sample->merges);
1202				else if (sample->type == IOTYPE_RX)
1203					svg_ubox(Y,
1204						sample->start_time,
1205						sample->end_time,
1206						h,
1207						sample->err ? "error" : "net",
1208						sample->fd,
1209						sample->err,
1210						sample->merges);
1211				else if (sample->type == IOTYPE_TX)
1212					svg_lbox(Y,
1213						sample->start_time,
1214						sample->end_time,
1215						h,
1216						sample->err ? "error" : "net",
1217						sample->fd,
1218						sample->err,
1219						sample->merges);
1220			}
1221
1222			suf = "";
1223			bytes = c->total_bytes;
1224			if (bytes > 1024) {
1225				bytes = bytes / 1024;
1226				suf = "K";
1227			}
1228			if (bytes > 1024) {
1229				bytes = bytes / 1024;
1230				suf = "M";
1231			}
1232			if (bytes > 1024) {
1233				bytes = bytes / 1024;
1234				suf = "G";
1235			}
1236
1237
1238			sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
1239			svg_text(Y, c->start_time, comm);
1240
1241			c->Y = Y;
1242			Y++;
1243			c = c->next;
1244		}
1245		p = p->next;
1246	}
1247}
1248
1249static void draw_process_bars(struct timechart *tchart)
1250{
1251	struct per_pid *p;
1252	struct per_pidcomm *c;
1253	struct cpu_sample *sample;
1254	int Y = 0;
1255
1256	Y = 2 * tchart->numcpus + 2;
1257
1258	p = tchart->all_data;
1259	while (p) {
1260		c = p->all;
1261		while (c) {
1262			if (!c->display) {
1263				c->Y = 0;
1264				c = c->next;
1265				continue;
1266			}
1267
1268			svg_box(Y, c->start_time, c->end_time, "process");
1269			sample = c->samples;
1270			while (sample) {
1271				if (sample->type == TYPE_RUNNING)
1272					svg_running(Y, sample->cpu,
1273						    sample->start_time,
1274						    sample->end_time,
1275						    sample->backtrace);
1276				if (sample->type == TYPE_BLOCKED)
1277					svg_blocked(Y, sample->cpu,
1278						    sample->start_time,
1279						    sample->end_time,
1280						    sample->backtrace);
1281				if (sample->type == TYPE_WAITING)
1282					svg_waiting(Y, sample->cpu,
1283						    sample->start_time,
1284						    sample->end_time,
1285						    sample->backtrace);
1286				sample = sample->next;
1287			}
1288
1289			if (c->comm) {
1290				char comm[256];
1291				if (c->total_time > 5000000000) /* 5 seconds */
1292					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC);
1293				else
1294					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC);
1295
1296				svg_text(Y, c->start_time, comm);
1297			}
1298			c->Y = Y;
1299			Y++;
1300			c = c->next;
1301		}
1302		p = p->next;
1303	}
1304}
1305
1306static void add_process_filter(const char *string)
1307{
1308	int pid = strtoull(string, NULL, 10);
1309	struct process_filter *filt = malloc(sizeof(*filt));
1310
1311	if (!filt)
1312		return;
1313
1314	filt->name = strdup(string);
1315	filt->pid  = pid;
1316	filt->next = process_filter;
1317
1318	process_filter = filt;
1319}
1320
1321static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
1322{
1323	struct process_filter *filt;
1324	if (!process_filter)
1325		return 1;
1326
1327	filt = process_filter;
1328	while (filt) {
1329		if (filt->pid && p->pid == filt->pid)
1330			return 1;
1331		if (strcmp(filt->name, c->comm) == 0)
1332			return 1;
1333		filt = filt->next;
1334	}
1335	return 0;
1336}
1337
1338static int determine_display_tasks_filtered(struct timechart *tchart)
1339{
1340	struct per_pid *p;
1341	struct per_pidcomm *c;
1342	int count = 0;
1343
1344	p = tchart->all_data;
1345	while (p) {
1346		p->display = 0;
1347		if (p->start_time == 1)
1348			p->start_time = tchart->first_time;
1349
1350		/* no exit marker, task kept running to the end */
1351		if (p->end_time == 0)
1352			p->end_time = tchart->last_time;
1353
1354		c = p->all;
1355
1356		while (c) {
1357			c->display = 0;
1358
1359			if (c->start_time == 1)
1360				c->start_time = tchart->first_time;
1361
1362			if (passes_filter(p, c)) {
1363				c->display = 1;
1364				p->display = 1;
1365				count++;
1366			}
1367
1368			if (c->end_time == 0)
1369				c->end_time = tchart->last_time;
1370
1371			c = c->next;
1372		}
1373		p = p->next;
1374	}
1375	return count;
1376}
1377
1378static int determine_display_tasks(struct timechart *tchart, u64 threshold)
1379{
1380	struct per_pid *p;
1381	struct per_pidcomm *c;
1382	int count = 0;
1383
1384	p = tchart->all_data;
1385	while (p) {
1386		p->display = 0;
1387		if (p->start_time == 1)
1388			p->start_time = tchart->first_time;
1389
1390		/* no exit marker, task kept running to the end */
1391		if (p->end_time == 0)
1392			p->end_time = tchart->last_time;
1393		if (p->total_time >= threshold)
1394			p->display = 1;
1395
1396		c = p->all;
1397
1398		while (c) {
1399			c->display = 0;
1400
1401			if (c->start_time == 1)
1402				c->start_time = tchart->first_time;
1403
1404			if (c->total_time >= threshold) {
1405				c->display = 1;
1406				count++;
1407			}
1408
1409			if (c->end_time == 0)
1410				c->end_time = tchart->last_time;
1411
1412			c = c->next;
1413		}
1414		p = p->next;
1415	}
1416	return count;
1417}
1418
1419static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
1420{
1421	struct per_pid *p;
1422	struct per_pidcomm *c;
1423	int count = 0;
1424
1425	p = timechart->all_data;
1426	while (p) {
1427		/* no exit marker, task kept running to the end */
1428		if (p->end_time == 0)
1429			p->end_time = timechart->last_time;
1430
1431		c = p->all;
1432
1433		while (c) {
1434			c->display = 0;
1435
1436			if (c->total_bytes >= threshold) {
1437				c->display = 1;
1438				count++;
1439			}
1440
1441			if (c->end_time == 0)
1442				c->end_time = timechart->last_time;
1443
1444			c = c->next;
1445		}
1446		p = p->next;
1447	}
1448	return count;
1449}
1450
1451#define BYTES_THRESH (1 * 1024 * 1024)
1452#define TIME_THRESH 10000000
1453
1454static void write_svg_file(struct timechart *tchart, const char *filename)
1455{
1456	u64 i;
1457	int count;
1458	int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
1459
1460	if (tchart->power_only)
1461		tchart->proc_num = 0;
1462
1463	/* We'd like to show at least proc_num tasks;
1464	 * be less picky if we have fewer */
1465	do {
1466		if (process_filter)
1467			count = determine_display_tasks_filtered(tchart);
1468		else if (tchart->io_events)
1469			count = determine_display_io_tasks(tchart, thresh);
1470		else
1471			count = determine_display_tasks(tchart, thresh);
1472		thresh /= 10;
1473	} while (!process_filter && thresh && count < tchart->proc_num);
1474
1475	if (!tchart->proc_num)
1476		count = 0;
1477
1478	if (tchart->io_events) {
1479		open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
1480
1481		svg_time_grid(0.5);
1482		svg_io_legenda();
1483
1484		draw_io_bars(tchart);
1485	} else {
1486		open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
1487
1488		svg_time_grid(0);
1489
1490		svg_legenda();
1491
1492		for (i = 0; i < tchart->numcpus; i++)
1493			svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
1494
1495		draw_cpu_usage(tchart);
1496		if (tchart->proc_num)
1497			draw_process_bars(tchart);
1498		if (!tchart->tasks_only)
1499			draw_c_p_states(tchart);
1500		if (tchart->proc_num)
1501			draw_wakeups(tchart);
1502	}
1503
1504	svg_close();
1505}
1506
1507static int process_header(struct perf_file_section *section __maybe_unused,
1508			  struct perf_header *ph,
1509			  int feat,
1510			  int fd __maybe_unused,
1511			  void *data)
1512{
1513	struct timechart *tchart = data;
1514
1515	switch (feat) {
1516	case HEADER_NRCPUS:
1517		tchart->numcpus = ph->env.nr_cpus_avail;
1518		break;
1519
1520	case HEADER_CPU_TOPOLOGY:
1521		if (!tchart->topology)
1522			break;
1523
1524		if (svg_build_topology_map(ph->env.sibling_cores,
1525					   ph->env.nr_sibling_cores,
1526					   ph->env.sibling_threads,
1527					   ph->env.nr_sibling_threads))
1528			fprintf(stderr, "problem building topology\n");
1529		break;
1530
1531	default:
1532		break;
1533	}
1534
1535	return 0;
1536}
1537
1538static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1539{
1540	const struct perf_evsel_str_handler power_tracepoints[] = {
1541		{ "power:cpu_idle",		process_sample_cpu_idle },
1542		{ "power:cpu_frequency",	process_sample_cpu_frequency },
1543		{ "sched:sched_wakeup",		process_sample_sched_wakeup },
1544		{ "sched:sched_switch",		process_sample_sched_switch },
1545#ifdef SUPPORT_OLD_POWER_EVENTS
1546		{ "power:power_start",		process_sample_power_start },
1547		{ "power:power_end",		process_sample_power_end },
1548		{ "power:power_frequency",	process_sample_power_frequency },
1549#endif
1550
1551		{ "syscalls:sys_enter_read",		process_enter_read },
1552		{ "syscalls:sys_enter_pread64",		process_enter_read },
1553		{ "syscalls:sys_enter_readv",		process_enter_read },
1554		{ "syscalls:sys_enter_preadv",		process_enter_read },
1555		{ "syscalls:sys_enter_write",		process_enter_write },
1556		{ "syscalls:sys_enter_pwrite64",	process_enter_write },
1557		{ "syscalls:sys_enter_writev",		process_enter_write },
1558		{ "syscalls:sys_enter_pwritev",		process_enter_write },
1559		{ "syscalls:sys_enter_sync",		process_enter_sync },
1560		{ "syscalls:sys_enter_sync_file_range",	process_enter_sync },
1561		{ "syscalls:sys_enter_fsync",		process_enter_sync },
1562		{ "syscalls:sys_enter_msync",		process_enter_sync },
1563		{ "syscalls:sys_enter_recvfrom",	process_enter_rx },
1564		{ "syscalls:sys_enter_recvmmsg",	process_enter_rx },
1565		{ "syscalls:sys_enter_recvmsg",		process_enter_rx },
1566		{ "syscalls:sys_enter_sendto",		process_enter_tx },
1567		{ "syscalls:sys_enter_sendmsg",		process_enter_tx },
1568		{ "syscalls:sys_enter_sendmmsg",	process_enter_tx },
1569		{ "syscalls:sys_enter_epoll_pwait",	process_enter_poll },
1570		{ "syscalls:sys_enter_epoll_wait",	process_enter_poll },
1571		{ "syscalls:sys_enter_poll",		process_enter_poll },
1572		{ "syscalls:sys_enter_ppoll",		process_enter_poll },
1573		{ "syscalls:sys_enter_pselect6",	process_enter_poll },
1574		{ "syscalls:sys_enter_select",		process_enter_poll },
1575
1576		{ "syscalls:sys_exit_read",		process_exit_read },
1577		{ "syscalls:sys_exit_pread64",		process_exit_read },
1578		{ "syscalls:sys_exit_readv",		process_exit_read },
1579		{ "syscalls:sys_exit_preadv",		process_exit_read },
1580		{ "syscalls:sys_exit_write",		process_exit_write },
1581		{ "syscalls:sys_exit_pwrite64",		process_exit_write },
1582		{ "syscalls:sys_exit_writev",		process_exit_write },
1583		{ "syscalls:sys_exit_pwritev",		process_exit_write },
1584		{ "syscalls:sys_exit_sync",		process_exit_sync },
1585		{ "syscalls:sys_exit_sync_file_range",	process_exit_sync },
1586		{ "syscalls:sys_exit_fsync",		process_exit_sync },
1587		{ "syscalls:sys_exit_msync",		process_exit_sync },
1588		{ "syscalls:sys_exit_recvfrom",		process_exit_rx },
1589		{ "syscalls:sys_exit_recvmmsg",		process_exit_rx },
1590		{ "syscalls:sys_exit_recvmsg",		process_exit_rx },
1591		{ "syscalls:sys_exit_sendto",		process_exit_tx },
1592		{ "syscalls:sys_exit_sendmsg",		process_exit_tx },
1593		{ "syscalls:sys_exit_sendmmsg",		process_exit_tx },
1594		{ "syscalls:sys_exit_epoll_pwait",	process_exit_poll },
1595		{ "syscalls:sys_exit_epoll_wait",	process_exit_poll },
1596		{ "syscalls:sys_exit_poll",		process_exit_poll },
1597		{ "syscalls:sys_exit_ppoll",		process_exit_poll },
1598		{ "syscalls:sys_exit_pselect6",		process_exit_poll },
1599		{ "syscalls:sys_exit_select",		process_exit_poll },
1600	};
1601	struct perf_data_file file = {
1602		.path = input_name,
1603		.mode = PERF_DATA_MODE_READ,
1604		.force = tchart->force,
1605	};
1606
1607	struct perf_session *session = perf_session__new(&file, false,
1608							 &tchart->tool);
1609	int ret = -EINVAL;
1610
1611	if (session == NULL)
1612		return -1;
1613
1614	symbol__init(&session->header.env);
1615
1616	(void)perf_header__process_sections(&session->header,
1617					    perf_data_file__fd(session->file),
1618					    tchart,
1619					    process_header);
1620
1621	if (!perf_session__has_traces(session, "timechart record"))
1622		goto out_delete;
1623
1624	if (perf_session__set_tracepoints_handlers(session,
1625						   power_tracepoints)) {
1626		pr_err("Initializing session tracepoint handlers failed\n");
1627		goto out_delete;
1628	}
1629
1630	ret = perf_session__process_events(session);
1631	if (ret)
1632		goto out_delete;
1633
1634	end_sample_processing(tchart);
1635
1636	sort_pids(tchart);
1637
1638	write_svg_file(tchart, output_name);
1639
1640	pr_info("Written %2.1f seconds of trace to %s.\n",
1641		(tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name);
1642out_delete:
1643	perf_session__delete(session);
1644	return ret;
1645}
1646
1647static int timechart__io_record(int argc, const char **argv)
1648{
1649	unsigned int rec_argc, i;
1650	const char **rec_argv;
1651	const char **p;
1652	char *filter = NULL;
1653
1654	const char * const common_args[] = {
1655		"record", "-a", "-R", "-c", "1",
1656	};
1657	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1658
1659	const char * const disk_events[] = {
1660		"syscalls:sys_enter_read",
1661		"syscalls:sys_enter_pread64",
1662		"syscalls:sys_enter_readv",
1663		"syscalls:sys_enter_preadv",
1664		"syscalls:sys_enter_write",
1665		"syscalls:sys_enter_pwrite64",
1666		"syscalls:sys_enter_writev",
1667		"syscalls:sys_enter_pwritev",
1668		"syscalls:sys_enter_sync",
1669		"syscalls:sys_enter_sync_file_range",
1670		"syscalls:sys_enter_fsync",
1671		"syscalls:sys_enter_msync",
1672
1673		"syscalls:sys_exit_read",
1674		"syscalls:sys_exit_pread64",
1675		"syscalls:sys_exit_readv",
1676		"syscalls:sys_exit_preadv",
1677		"syscalls:sys_exit_write",
1678		"syscalls:sys_exit_pwrite64",
1679		"syscalls:sys_exit_writev",
1680		"syscalls:sys_exit_pwritev",
1681		"syscalls:sys_exit_sync",
1682		"syscalls:sys_exit_sync_file_range",
1683		"syscalls:sys_exit_fsync",
1684		"syscalls:sys_exit_msync",
1685	};
1686	unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
1687
1688	const char * const net_events[] = {
1689		"syscalls:sys_enter_recvfrom",
1690		"syscalls:sys_enter_recvmmsg",
1691		"syscalls:sys_enter_recvmsg",
1692		"syscalls:sys_enter_sendto",
1693		"syscalls:sys_enter_sendmsg",
1694		"syscalls:sys_enter_sendmmsg",
1695
1696		"syscalls:sys_exit_recvfrom",
1697		"syscalls:sys_exit_recvmmsg",
1698		"syscalls:sys_exit_recvmsg",
1699		"syscalls:sys_exit_sendto",
1700		"syscalls:sys_exit_sendmsg",
1701		"syscalls:sys_exit_sendmmsg",
1702	};
1703	unsigned int net_events_nr = ARRAY_SIZE(net_events);
1704
1705	const char * const poll_events[] = {
1706		"syscalls:sys_enter_epoll_pwait",
1707		"syscalls:sys_enter_epoll_wait",
1708		"syscalls:sys_enter_poll",
1709		"syscalls:sys_enter_ppoll",
1710		"syscalls:sys_enter_pselect6",
1711		"syscalls:sys_enter_select",
1712
1713		"syscalls:sys_exit_epoll_pwait",
1714		"syscalls:sys_exit_epoll_wait",
1715		"syscalls:sys_exit_poll",
1716		"syscalls:sys_exit_ppoll",
1717		"syscalls:sys_exit_pselect6",
1718		"syscalls:sys_exit_select",
1719	};
1720	unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
1721
1722	rec_argc = common_args_nr +
1723		disk_events_nr * 4 +
1724		net_events_nr * 4 +
1725		poll_events_nr * 4 +
1726		argc;
1727	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1728
1729	if (rec_argv == NULL)
1730		return -ENOMEM;
1731
1732	if (asprintf(&filter, "common_pid != %d", getpid()) < 0)
 
1733		return -ENOMEM;
 
1734
1735	p = rec_argv;
1736	for (i = 0; i < common_args_nr; i++)
1737		*p++ = strdup(common_args[i]);
1738
1739	for (i = 0; i < disk_events_nr; i++) {
1740		if (!is_valid_tracepoint(disk_events[i])) {
1741			rec_argc -= 4;
1742			continue;
1743		}
1744
1745		*p++ = "-e";
1746		*p++ = strdup(disk_events[i]);
1747		*p++ = "--filter";
1748		*p++ = filter;
1749	}
1750	for (i = 0; i < net_events_nr; i++) {
1751		if (!is_valid_tracepoint(net_events[i])) {
1752			rec_argc -= 4;
1753			continue;
1754		}
1755
1756		*p++ = "-e";
1757		*p++ = strdup(net_events[i]);
1758		*p++ = "--filter";
1759		*p++ = filter;
1760	}
1761	for (i = 0; i < poll_events_nr; i++) {
1762		if (!is_valid_tracepoint(poll_events[i])) {
1763			rec_argc -= 4;
1764			continue;
1765		}
1766
1767		*p++ = "-e";
1768		*p++ = strdup(poll_events[i]);
1769		*p++ = "--filter";
1770		*p++ = filter;
1771	}
1772
1773	for (i = 0; i < (unsigned int)argc; i++)
1774		*p++ = argv[i];
1775
1776	return cmd_record(rec_argc, rec_argv, NULL);
1777}
1778
1779
1780static int timechart__record(struct timechart *tchart, int argc, const char **argv)
1781{
1782	unsigned int rec_argc, i, j;
1783	const char **rec_argv;
1784	const char **p;
1785	unsigned int record_elems;
1786
1787	const char * const common_args[] = {
1788		"record", "-a", "-R", "-c", "1",
1789	};
1790	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1791
1792	const char * const backtrace_args[] = {
1793		"-g",
1794	};
1795	unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
1796
1797	const char * const power_args[] = {
1798		"-e", "power:cpu_frequency",
1799		"-e", "power:cpu_idle",
1800	};
1801	unsigned int power_args_nr = ARRAY_SIZE(power_args);
1802
1803	const char * const old_power_args[] = {
1804#ifdef SUPPORT_OLD_POWER_EVENTS
1805		"-e", "power:power_start",
1806		"-e", "power:power_end",
1807		"-e", "power:power_frequency",
1808#endif
1809	};
1810	unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
1811
1812	const char * const tasks_args[] = {
1813		"-e", "sched:sched_wakeup",
1814		"-e", "sched:sched_switch",
1815	};
1816	unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
1817
1818#ifdef SUPPORT_OLD_POWER_EVENTS
1819	if (!is_valid_tracepoint("power:cpu_idle") &&
1820	    is_valid_tracepoint("power:power_start")) {
1821		use_old_power_events = 1;
1822		power_args_nr = 0;
1823	} else {
1824		old_power_args_nr = 0;
1825	}
1826#endif
1827
1828	if (tchart->power_only)
1829		tasks_args_nr = 0;
1830
1831	if (tchart->tasks_only) {
1832		power_args_nr = 0;
1833		old_power_args_nr = 0;
1834	}
1835
1836	if (!tchart->with_backtrace)
1837		backtrace_args_no = 0;
1838
1839	record_elems = common_args_nr + tasks_args_nr +
1840		power_args_nr + old_power_args_nr + backtrace_args_no;
1841
1842	rec_argc = record_elems + argc;
1843	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1844
1845	if (rec_argv == NULL)
1846		return -ENOMEM;
1847
1848	p = rec_argv;
1849	for (i = 0; i < common_args_nr; i++)
1850		*p++ = strdup(common_args[i]);
1851
1852	for (i = 0; i < backtrace_args_no; i++)
1853		*p++ = strdup(backtrace_args[i]);
1854
1855	for (i = 0; i < tasks_args_nr; i++)
1856		*p++ = strdup(tasks_args[i]);
1857
1858	for (i = 0; i < power_args_nr; i++)
1859		*p++ = strdup(power_args[i]);
1860
1861	for (i = 0; i < old_power_args_nr; i++)
1862		*p++ = strdup(old_power_args[i]);
1863
1864	for (j = 0; j < (unsigned int)argc; j++)
1865		*p++ = argv[j];
1866
1867	return cmd_record(rec_argc, rec_argv, NULL);
1868}
1869
1870static int
1871parse_process(const struct option *opt __maybe_unused, const char *arg,
1872	      int __maybe_unused unset)
1873{
1874	if (arg)
1875		add_process_filter(arg);
1876	return 0;
1877}
1878
1879static int
1880parse_highlight(const struct option *opt __maybe_unused, const char *arg,
1881		int __maybe_unused unset)
1882{
1883	unsigned long duration = strtoul(arg, NULL, 0);
1884
1885	if (svg_highlight || svg_highlight_name)
1886		return -1;
1887
1888	if (duration)
1889		svg_highlight = duration;
1890	else
1891		svg_highlight_name = strdup(arg);
1892
1893	return 0;
1894}
1895
1896static int
1897parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
1898{
1899	char unit = 'n';
1900	u64 *value = opt->value;
1901
1902	if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
1903		switch (unit) {
1904		case 'm':
1905			*value *= NSEC_PER_MSEC;
1906			break;
1907		case 'u':
1908			*value *= NSEC_PER_USEC;
1909			break;
1910		case 'n':
1911			break;
1912		default:
1913			return -1;
1914		}
1915	}
1916
1917	return 0;
1918}
1919
1920int cmd_timechart(int argc, const char **argv,
1921		  const char *prefix __maybe_unused)
1922{
1923	struct timechart tchart = {
1924		.tool = {
1925			.comm		 = process_comm_event,
1926			.fork		 = process_fork_event,
1927			.exit		 = process_exit_event,
1928			.sample		 = process_sample_event,
1929			.ordered_events	 = true,
1930		},
1931		.proc_num = 15,
1932		.min_time = NSEC_PER_MSEC,
1933		.merge_dist = 1000,
1934	};
1935	const char *output_name = "output.svg";
 
 
 
 
 
1936	const struct option timechart_options[] = {
1937	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1938	OPT_STRING('o', "output", &output_name, "file", "output file name"),
1939	OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1940	OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
1941		      "highlight tasks. Pass duration in ns or process name.",
1942		       parse_highlight),
1943	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1944	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
1945		    "output processes data only"),
1946	OPT_CALLBACK('p', "process", NULL, "process",
1947		      "process selector. Pass a pid or process name.",
1948		       parse_process),
1949	OPT_CALLBACK(0, "symfs", NULL, "directory",
1950		     "Look for files with symbols relative to this directory",
1951		     symbol__config_symfs),
1952	OPT_INTEGER('n', "proc-num", &tchart.proc_num,
1953		    "min. number of tasks to print"),
1954	OPT_BOOLEAN('t', "topology", &tchart.topology,
1955		    "sort CPUs according to topology"),
1956	OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
1957		    "skip EAGAIN errors"),
1958	OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
1959		     "all IO faster than min-time will visually appear longer",
1960		     parse_time),
1961	OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
1962		     "merge events that are merge-dist us apart",
1963		     parse_time),
1964	OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
1965	OPT_END()
1966	};
1967	const char * const timechart_subcommands[] = { "record", NULL };
1968	const char *timechart_usage[] = {
1969		"perf timechart [<options>] {record}",
1970		NULL
1971	};
1972
1973	const struct option timechart_record_options[] = {
1974	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1975	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
1976		    "output processes data only"),
1977	OPT_BOOLEAN('I', "io-only", &tchart.io_only,
1978		    "record only IO data"),
1979	OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
1980	OPT_END()
1981	};
1982	const char * const timechart_record_usage[] = {
1983		"perf timechart record [<options>]",
1984		NULL
1985	};
1986	argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
1987			timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
1988
1989	if (tchart.power_only && tchart.tasks_only) {
1990		pr_err("-P and -T options cannot be used at the same time.\n");
1991		return -1;
1992	}
1993
1994	if (argc && !strncmp(argv[0], "rec", 3)) {
1995		argc = parse_options(argc, argv, timechart_record_options,
1996				     timechart_record_usage,
1997				     PARSE_OPT_STOP_AT_NON_OPTION);
1998
1999		if (tchart.power_only && tchart.tasks_only) {
2000			pr_err("-P and -T options cannot be used at the same time.\n");
2001			return -1;
2002		}
2003
2004		if (tchart.io_only)
2005			return timechart__io_record(argc, argv);
2006		else
2007			return timechart__record(&tchart, argc, argv);
2008	} else if (argc)
2009		usage_with_options(timechart_usage, timechart_options);
2010
2011	setup_pager();
2012
2013	return __cmd_timechart(&tchart, output_name);
2014}