Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * builtin-timechart.c - make an svg timechart of system activity
   4 *
   5 * (C) Copyright 2009 Intel Corporation
   6 *
   7 * Authors:
   8 *     Arjan van de Ven <arjan@linux.intel.com>
 
 
 
 
 
   9 */
  10
  11#include <errno.h>
  12#include <inttypes.h>
 
  13
  14#include "builtin.h"
 
 
 
  15#include "util/color.h"
  16#include <linux/list.h>
  17#include "util/evlist.h" // for struct evsel_str_handler
 
  18#include "util/evsel.h"
  19#include <linux/kernel.h>
  20#include <linux/rbtree.h>
  21#include <linux/time64.h>
  22#include <linux/zalloc.h>
  23#include "util/symbol.h"
  24#include "util/thread.h"
  25#include "util/callchain.h"
  26
  27#include "perf.h"
  28#include "util/header.h"
  29#include <subcmd/pager.h>
  30#include <subcmd/parse-options.h>
  31#include "util/parse-events.h"
  32#include "util/event.h"
  33#include "util/session.h"
  34#include "util/svghelper.h"
  35#include "util/tool.h"
  36#include "util/data.h"
  37#include "util/debug.h"
  38#include <linux/err.h>
  39
  40#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
  41FILE *open_memstream(char **ptr, size_t *sizeloc);
  42#endif
  43
  44#define SUPPORT_OLD_POWER_EVENTS 1
  45#define PWR_EVENT_EXIT -1
  46
  47struct per_pid;
  48struct power_event;
  49struct wake_event;
  50
  51struct timechart {
  52	struct perf_tool	tool;
  53	struct per_pid		*all_data;
  54	struct power_event	*power_events;
  55	struct wake_event	*wake_events;
  56	int			proc_num;
  57	unsigned int		numcpus;
  58	u64			min_freq,	/* Lowest CPU frequency seen */
  59				max_freq,	/* Highest CPU frequency seen */
  60				turbo_frequency,
  61				first_time, last_time;
  62	bool			power_only,
  63				tasks_only,
  64				with_backtrace,
  65				topology;
  66	bool			force;
  67	/* IO related settings */
  68	bool			io_only,
  69				skip_eagain;
  70	u64			io_events;
  71	u64			min_time,
  72				merge_dist;
  73};
  74
  75struct per_pidcomm;
  76struct cpu_sample;
  77struct io_sample;
  78
  79/*
  80 * Datastructure layout:
  81 * We keep an list of "pid"s, matching the kernels notion of a task struct.
  82 * Each "pid" entry, has a list of "comm"s.
  83 *	this is because we want to track different programs different, while
  84 *	exec will reuse the original pid (by design).
  85 * Each comm has a list of samples that will be used to draw
  86 * final graph.
  87 */
  88
  89struct per_pid {
  90	struct per_pid *next;
  91
  92	int		pid;
  93	int		ppid;
  94
  95	u64		start_time;
  96	u64		end_time;
  97	u64		total_time;
  98	u64		total_bytes;
  99	int		display;
 100
 101	struct per_pidcomm *all;
 102	struct per_pidcomm *current;
 103};
 104
 105
 106struct per_pidcomm {
 107	struct per_pidcomm *next;
 108
 109	u64		start_time;
 110	u64		end_time;
 111	u64		total_time;
 112	u64		max_bytes;
 113	u64		total_bytes;
 114
 115	int		Y;
 116	int		display;
 117
 118	long		state;
 119	u64		state_since;
 120
 121	char		*comm;
 122
 123	struct cpu_sample *samples;
 124	struct io_sample  *io_samples;
 125};
 126
 127struct sample_wrapper {
 128	struct sample_wrapper *next;
 129
 130	u64		timestamp;
 131	unsigned char	data[0];
 132};
 133
 134#define TYPE_NONE	0
 135#define TYPE_RUNNING	1
 136#define TYPE_WAITING	2
 137#define TYPE_BLOCKED	3
 138
 139struct cpu_sample {
 140	struct cpu_sample *next;
 141
 142	u64 start_time;
 143	u64 end_time;
 144	int type;
 145	int cpu;
 146	const char *backtrace;
 147};
 148
 149enum {
 150	IOTYPE_READ,
 151	IOTYPE_WRITE,
 152	IOTYPE_SYNC,
 153	IOTYPE_TX,
 154	IOTYPE_RX,
 155	IOTYPE_POLL,
 156};
 157
 158struct io_sample {
 159	struct io_sample *next;
 160
 161	u64 start_time;
 162	u64 end_time;
 163	u64 bytes;
 164	int type;
 165	int fd;
 166	int err;
 167	int merges;
 168};
 169
 170#define CSTATE 1
 171#define PSTATE 2
 172
 173struct power_event {
 174	struct power_event *next;
 175	int type;
 176	int state;
 177	u64 start_time;
 178	u64 end_time;
 179	int cpu;
 180};
 181
 182struct wake_event {
 183	struct wake_event *next;
 184	int waker;
 185	int wakee;
 186	u64 time;
 187	const char *backtrace;
 188};
 189
 190struct process_filter {
 191	char			*name;
 192	int			pid;
 193	struct process_filter	*next;
 194};
 195
 196static struct process_filter *process_filter;
 197
 198
 199static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
 200{
 201	struct per_pid *cursor = tchart->all_data;
 202
 203	while (cursor) {
 204		if (cursor->pid == pid)
 205			return cursor;
 206		cursor = cursor->next;
 207	}
 208	cursor = zalloc(sizeof(*cursor));
 209	assert(cursor != NULL);
 210	cursor->pid = pid;
 211	cursor->next = tchart->all_data;
 212	tchart->all_data = cursor;
 213	return cursor;
 214}
 215
 216static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
 217{
 218	struct per_pid *p;
 219	struct per_pidcomm *c;
 220	p = find_create_pid(tchart, pid);
 221	c = p->all;
 222	while (c) {
 223		if (c->comm && strcmp(c->comm, comm) == 0) {
 224			p->current = c;
 225			return;
 226		}
 227		if (!c->comm) {
 228			c->comm = strdup(comm);
 229			p->current = c;
 230			return;
 231		}
 232		c = c->next;
 233	}
 234	c = zalloc(sizeof(*c));
 235	assert(c != NULL);
 236	c->comm = strdup(comm);
 237	p->current = c;
 238	c->next = p->all;
 239	p->all = c;
 240}
 241
 242static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
 243{
 244	struct per_pid *p, *pp;
 245	p = find_create_pid(tchart, pid);
 246	pp = find_create_pid(tchart, ppid);
 247	p->ppid = ppid;
 248	if (pp->current && pp->current->comm && !p->current)
 249		pid_set_comm(tchart, pid, pp->current->comm);
 250
 251	p->start_time = timestamp;
 252	if (p->current && !p->current->start_time) {
 253		p->current->start_time = timestamp;
 254		p->current->state_since = timestamp;
 255	}
 256}
 257
 258static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
 259{
 260	struct per_pid *p;
 261	p = find_create_pid(tchart, pid);
 262	p->end_time = timestamp;
 263	if (p->current)
 264		p->current->end_time = timestamp;
 265}
 266
 267static void pid_put_sample(struct timechart *tchart, int pid, int type,
 268			   unsigned int cpu, u64 start, u64 end,
 269			   const char *backtrace)
 270{
 271	struct per_pid *p;
 272	struct per_pidcomm *c;
 273	struct cpu_sample *sample;
 274
 275	p = find_create_pid(tchart, pid);
 276	c = p->current;
 277	if (!c) {
 278		c = zalloc(sizeof(*c));
 279		assert(c != NULL);
 280		p->current = c;
 281		c->next = p->all;
 282		p->all = c;
 283	}
 284
 285	sample = zalloc(sizeof(*sample));
 286	assert(sample != NULL);
 287	sample->start_time = start;
 288	sample->end_time = end;
 289	sample->type = type;
 290	sample->next = c->samples;
 291	sample->cpu = cpu;
 292	sample->backtrace = backtrace;
 293	c->samples = sample;
 294
 295	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
 296		c->total_time += (end-start);
 297		p->total_time += (end-start);
 298	}
 299
 300	if (c->start_time == 0 || c->start_time > start)
 301		c->start_time = start;
 302	if (p->start_time == 0 || p->start_time > start)
 303		p->start_time = start;
 304}
 305
 306#define MAX_CPUS 4096
 307
 308static u64 cpus_cstate_start_times[MAX_CPUS];
 309static int cpus_cstate_state[MAX_CPUS];
 310static u64 cpus_pstate_start_times[MAX_CPUS];
 311static u64 cpus_pstate_state[MAX_CPUS];
 312
 313static int process_comm_event(struct perf_tool *tool,
 314			      union perf_event *event,
 315			      struct perf_sample *sample __maybe_unused,
 316			      struct machine *machine __maybe_unused)
 317{
 318	struct timechart *tchart = container_of(tool, struct timechart, tool);
 319	pid_set_comm(tchart, event->comm.tid, event->comm.comm);
 320	return 0;
 321}
 322
 323static int process_fork_event(struct perf_tool *tool,
 324			      union perf_event *event,
 325			      struct perf_sample *sample __maybe_unused,
 326			      struct machine *machine __maybe_unused)
 327{
 328	struct timechart *tchart = container_of(tool, struct timechart, tool);
 329	pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
 330	return 0;
 331}
 332
 333static int process_exit_event(struct perf_tool *tool,
 334			      union perf_event *event,
 335			      struct perf_sample *sample __maybe_unused,
 336			      struct machine *machine __maybe_unused)
 337{
 338	struct timechart *tchart = container_of(tool, struct timechart, tool);
 339	pid_exit(tchart, event->fork.pid, event->fork.time);
 340	return 0;
 341}
 342
 343#ifdef SUPPORT_OLD_POWER_EVENTS
 344static int use_old_power_events;
 345#endif
 346
 347static void c_state_start(int cpu, u64 timestamp, int state)
 348{
 349	cpus_cstate_start_times[cpu] = timestamp;
 350	cpus_cstate_state[cpu] = state;
 351}
 352
 353static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
 354{
 355	struct power_event *pwr = zalloc(sizeof(*pwr));
 356
 357	if (!pwr)
 358		return;
 359
 360	pwr->state = cpus_cstate_state[cpu];
 361	pwr->start_time = cpus_cstate_start_times[cpu];
 362	pwr->end_time = timestamp;
 363	pwr->cpu = cpu;
 364	pwr->type = CSTATE;
 365	pwr->next = tchart->power_events;
 366
 367	tchart->power_events = pwr;
 368}
 369
 370static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
 371{
 372	struct power_event *pwr;
 373
 374	if (new_freq > 8000000) /* detect invalid data */
 375		return;
 376
 377	pwr = zalloc(sizeof(*pwr));
 378	if (!pwr)
 379		return;
 380
 381	pwr->state = cpus_pstate_state[cpu];
 382	pwr->start_time = cpus_pstate_start_times[cpu];
 383	pwr->end_time = timestamp;
 384	pwr->cpu = cpu;
 385	pwr->type = PSTATE;
 386	pwr->next = tchart->power_events;
 387
 388	if (!pwr->start_time)
 389		pwr->start_time = tchart->first_time;
 390
 391	tchart->power_events = pwr;
 392
 393	cpus_pstate_state[cpu] = new_freq;
 394	cpus_pstate_start_times[cpu] = timestamp;
 395
 396	if ((u64)new_freq > tchart->max_freq)
 397		tchart->max_freq = new_freq;
 398
 399	if (new_freq < tchart->min_freq || tchart->min_freq == 0)
 400		tchart->min_freq = new_freq;
 401
 402	if (new_freq == tchart->max_freq - 1000)
 403		tchart->turbo_frequency = tchart->max_freq;
 404}
 405
 406static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
 407			 int waker, int wakee, u8 flags, const char *backtrace)
 408{
 409	struct per_pid *p;
 410	struct wake_event *we = zalloc(sizeof(*we));
 411
 412	if (!we)
 413		return;
 414
 415	we->time = timestamp;
 416	we->waker = waker;
 417	we->backtrace = backtrace;
 418
 419	if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
 420		we->waker = -1;
 421
 422	we->wakee = wakee;
 423	we->next = tchart->wake_events;
 424	tchart->wake_events = we;
 425	p = find_create_pid(tchart, we->wakee);
 426
 427	if (p && p->current && p->current->state == TYPE_NONE) {
 428		p->current->state_since = timestamp;
 429		p->current->state = TYPE_WAITING;
 430	}
 431	if (p && p->current && p->current->state == TYPE_BLOCKED) {
 432		pid_put_sample(tchart, p->pid, p->current->state, cpu,
 433			       p->current->state_since, timestamp, NULL);
 434		p->current->state_since = timestamp;
 435		p->current->state = TYPE_WAITING;
 436	}
 437}
 438
 439static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
 440			 int prev_pid, int next_pid, u64 prev_state,
 441			 const char *backtrace)
 442{
 443	struct per_pid *p = NULL, *prev_p;
 444
 445	prev_p = find_create_pid(tchart, prev_pid);
 446
 447	p = find_create_pid(tchart, next_pid);
 448
 449	if (prev_p->current && prev_p->current->state != TYPE_NONE)
 450		pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
 451			       prev_p->current->state_since, timestamp,
 452			       backtrace);
 453	if (p && p->current) {
 454		if (p->current->state != TYPE_NONE)
 455			pid_put_sample(tchart, next_pid, p->current->state, cpu,
 456				       p->current->state_since, timestamp,
 457				       backtrace);
 458
 459		p->current->state_since = timestamp;
 460		p->current->state = TYPE_RUNNING;
 461	}
 462
 463	if (prev_p->current) {
 464		prev_p->current->state = TYPE_NONE;
 465		prev_p->current->state_since = timestamp;
 466		if (prev_state & 2)
 467			prev_p->current->state = TYPE_BLOCKED;
 468		if (prev_state == 0)
 469			prev_p->current->state = TYPE_WAITING;
 470	}
 471}
 472
 473static const char *cat_backtrace(union perf_event *event,
 474				 struct perf_sample *sample,
 475				 struct machine *machine)
 476{
 477	struct addr_location al;
 478	unsigned int i;
 479	char *p = NULL;
 480	size_t p_len;
 481	u8 cpumode = PERF_RECORD_MISC_USER;
 482	struct addr_location tal;
 483	struct ip_callchain *chain = sample->callchain;
 484	FILE *f = open_memstream(&p, &p_len);
 485
 486	if (!f) {
 487		perror("open_memstream error");
 488		return NULL;
 489	}
 490
 491	if (!chain)
 492		goto exit;
 493
 494	if (machine__resolve(machine, &al, sample) < 0) {
 495		fprintf(stderr, "problem processing %d event, skipping it.\n",
 496			event->header.type);
 497		goto exit;
 498	}
 499
 500	for (i = 0; i < chain->nr; i++) {
 501		u64 ip;
 502
 503		if (callchain_param.order == ORDER_CALLEE)
 504			ip = chain->ips[i];
 505		else
 506			ip = chain->ips[chain->nr - i - 1];
 507
 508		if (ip >= PERF_CONTEXT_MAX) {
 509			switch (ip) {
 510			case PERF_CONTEXT_HV:
 511				cpumode = PERF_RECORD_MISC_HYPERVISOR;
 512				break;
 513			case PERF_CONTEXT_KERNEL:
 514				cpumode = PERF_RECORD_MISC_KERNEL;
 515				break;
 516			case PERF_CONTEXT_USER:
 517				cpumode = PERF_RECORD_MISC_USER;
 518				break;
 519			default:
 520				pr_debug("invalid callchain context: "
 521					 "%"PRId64"\n", (s64) ip);
 522
 523				/*
 524				 * It seems the callchain is corrupted.
 525				 * Discard all.
 526				 */
 527				zfree(&p);
 528				goto exit_put;
 529			}
 530			continue;
 531		}
 532
 533		tal.filtered = 0;
 534		if (thread__find_symbol(al.thread, cpumode, ip, &tal))
 535			fprintf(f, "..... %016" PRIx64 " %s\n", ip, tal.sym->name);
 
 
 
 
 536		else
 537			fprintf(f, "..... %016" PRIx64 "\n", ip);
 538	}
 539exit_put:
 540	addr_location__put(&al);
 541exit:
 542	fclose(f);
 543
 544	return p;
 545}
 546
 547typedef int (*tracepoint_handler)(struct timechart *tchart,
 548				  struct evsel *evsel,
 549				  struct perf_sample *sample,
 550				  const char *backtrace);
 551
 552static int process_sample_event(struct perf_tool *tool,
 553				union perf_event *event,
 554				struct perf_sample *sample,
 555				struct evsel *evsel,
 556				struct machine *machine)
 557{
 558	struct timechart *tchart = container_of(tool, struct timechart, tool);
 559
 560	if (evsel->core.attr.sample_type & PERF_SAMPLE_TIME) {
 561		if (!tchart->first_time || tchart->first_time > sample->time)
 562			tchart->first_time = sample->time;
 563		if (tchart->last_time < sample->time)
 564			tchart->last_time = sample->time;
 565	}
 566
 567	if (evsel->handler != NULL) {
 568		tracepoint_handler f = evsel->handler;
 569		return f(tchart, evsel, sample,
 570			 cat_backtrace(event, sample, machine));
 571	}
 572
 573	return 0;
 574}
 575
 576static int
 577process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
 578			struct evsel *evsel,
 579			struct perf_sample *sample,
 580			const char *backtrace __maybe_unused)
 581{
 582	u32 state = perf_evsel__intval(evsel, sample, "state");
 583	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 584
 585	if (state == (u32)PWR_EVENT_EXIT)
 586		c_state_end(tchart, cpu_id, sample->time);
 587	else
 588		c_state_start(cpu_id, sample->time, state);
 589	return 0;
 590}
 591
 592static int
 593process_sample_cpu_frequency(struct timechart *tchart,
 594			     struct evsel *evsel,
 595			     struct perf_sample *sample,
 596			     const char *backtrace __maybe_unused)
 597{
 598	u32 state = perf_evsel__intval(evsel, sample, "state");
 599	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 600
 601	p_state_change(tchart, cpu_id, sample->time, state);
 602	return 0;
 603}
 604
 605static int
 606process_sample_sched_wakeup(struct timechart *tchart,
 607			    struct evsel *evsel,
 608			    struct perf_sample *sample,
 609			    const char *backtrace)
 610{
 611	u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
 612	int waker = perf_evsel__intval(evsel, sample, "common_pid");
 613	int wakee = perf_evsel__intval(evsel, sample, "pid");
 614
 615	sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
 616	return 0;
 617}
 618
 619static int
 620process_sample_sched_switch(struct timechart *tchart,
 621			    struct evsel *evsel,
 622			    struct perf_sample *sample,
 623			    const char *backtrace)
 624{
 625	int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
 626	int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
 627	u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
 628
 629	sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
 630		     prev_state, backtrace);
 631	return 0;
 632}
 633
 634#ifdef SUPPORT_OLD_POWER_EVENTS
 635static int
 636process_sample_power_start(struct timechart *tchart __maybe_unused,
 637			   struct evsel *evsel,
 638			   struct perf_sample *sample,
 639			   const char *backtrace __maybe_unused)
 640{
 641	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 642	u64 value = perf_evsel__intval(evsel, sample, "value");
 643
 644	c_state_start(cpu_id, sample->time, value);
 645	return 0;
 646}
 647
 648static int
 649process_sample_power_end(struct timechart *tchart,
 650			 struct evsel *evsel __maybe_unused,
 651			 struct perf_sample *sample,
 652			 const char *backtrace __maybe_unused)
 653{
 654	c_state_end(tchart, sample->cpu, sample->time);
 655	return 0;
 656}
 657
 658static int
 659process_sample_power_frequency(struct timechart *tchart,
 660			       struct evsel *evsel,
 661			       struct perf_sample *sample,
 662			       const char *backtrace __maybe_unused)
 663{
 664	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 665	u64 value = perf_evsel__intval(evsel, sample, "value");
 666
 667	p_state_change(tchart, cpu_id, sample->time, value);
 668	return 0;
 669}
 670#endif /* SUPPORT_OLD_POWER_EVENTS */
 671
 672/*
 673 * After the last sample we need to wrap up the current C/P state
 674 * and close out each CPU for these.
 675 */
 676static void end_sample_processing(struct timechart *tchart)
 677{
 678	u64 cpu;
 679	struct power_event *pwr;
 680
 681	for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
 682		/* C state */
 683#if 0
 684		pwr = zalloc(sizeof(*pwr));
 685		if (!pwr)
 686			return;
 687
 688		pwr->state = cpus_cstate_state[cpu];
 689		pwr->start_time = cpus_cstate_start_times[cpu];
 690		pwr->end_time = tchart->last_time;
 691		pwr->cpu = cpu;
 692		pwr->type = CSTATE;
 693		pwr->next = tchart->power_events;
 694
 695		tchart->power_events = pwr;
 696#endif
 697		/* P state */
 698
 699		pwr = zalloc(sizeof(*pwr));
 700		if (!pwr)
 701			return;
 702
 703		pwr->state = cpus_pstate_state[cpu];
 704		pwr->start_time = cpus_pstate_start_times[cpu];
 705		pwr->end_time = tchart->last_time;
 706		pwr->cpu = cpu;
 707		pwr->type = PSTATE;
 708		pwr->next = tchart->power_events;
 709
 710		if (!pwr->start_time)
 711			pwr->start_time = tchart->first_time;
 712		if (!pwr->state)
 713			pwr->state = tchart->min_freq;
 714		tchart->power_events = pwr;
 715	}
 716}
 717
 718static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
 719			       u64 start, int fd)
 720{
 721	struct per_pid *p = find_create_pid(tchart, pid);
 722	struct per_pidcomm *c = p->current;
 723	struct io_sample *sample;
 724	struct io_sample *prev;
 725
 726	if (!c) {
 727		c = zalloc(sizeof(*c));
 728		if (!c)
 729			return -ENOMEM;
 730		p->current = c;
 731		c->next = p->all;
 732		p->all = c;
 733	}
 734
 735	prev = c->io_samples;
 736
 737	if (prev && prev->start_time && !prev->end_time) {
 738		pr_warning("Skip invalid start event: "
 739			   "previous event already started!\n");
 740
 741		/* remove previous event that has been started,
 742		 * we are not sure we will ever get an end for it */
 743		c->io_samples = prev->next;
 744		free(prev);
 745		return 0;
 746	}
 747
 748	sample = zalloc(sizeof(*sample));
 749	if (!sample)
 750		return -ENOMEM;
 751	sample->start_time = start;
 752	sample->type = type;
 753	sample->fd = fd;
 754	sample->next = c->io_samples;
 755	c->io_samples = sample;
 756
 757	if (c->start_time == 0 || c->start_time > start)
 758		c->start_time = start;
 759
 760	return 0;
 761}
 762
 763static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
 764			     u64 end, long ret)
 765{
 766	struct per_pid *p = find_create_pid(tchart, pid);
 767	struct per_pidcomm *c = p->current;
 768	struct io_sample *sample, *prev;
 769
 770	if (!c) {
 771		pr_warning("Invalid pidcomm!\n");
 772		return -1;
 773	}
 774
 775	sample = c->io_samples;
 776
 777	if (!sample) /* skip partially captured events */
 778		return 0;
 779
 780	if (sample->end_time) {
 781		pr_warning("Skip invalid end event: "
 782			   "previous event already ended!\n");
 783		return 0;
 784	}
 785
 786	if (sample->type != type) {
 787		pr_warning("Skip invalid end event: invalid event type!\n");
 788		return 0;
 789	}
 790
 791	sample->end_time = end;
 792	prev = sample->next;
 793
 794	/* we want to be able to see small and fast transfers, so make them
 795	 * at least min_time long, but don't overlap them */
 796	if (sample->end_time - sample->start_time < tchart->min_time)
 797		sample->end_time = sample->start_time + tchart->min_time;
 798	if (prev && sample->start_time < prev->end_time) {
 799		if (prev->err) /* try to make errors more visible */
 800			sample->start_time = prev->end_time;
 801		else
 802			prev->end_time = sample->start_time;
 803	}
 804
 805	if (ret < 0) {
 806		sample->err = ret;
 807	} else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
 808		   type == IOTYPE_TX || type == IOTYPE_RX) {
 809
 810		if ((u64)ret > c->max_bytes)
 811			c->max_bytes = ret;
 812
 813		c->total_bytes += ret;
 814		p->total_bytes += ret;
 815		sample->bytes = ret;
 816	}
 817
 818	/* merge two requests to make svg smaller and render-friendly */
 819	if (prev &&
 820	    prev->type == sample->type &&
 821	    prev->err == sample->err &&
 822	    prev->fd == sample->fd &&
 823	    prev->end_time + tchart->merge_dist >= sample->start_time) {
 824
 825		sample->bytes += prev->bytes;
 826		sample->merges += prev->merges + 1;
 827
 828		sample->start_time = prev->start_time;
 829		sample->next = prev->next;
 830		free(prev);
 831
 832		if (!sample->err && sample->bytes > c->max_bytes)
 833			c->max_bytes = sample->bytes;
 834	}
 835
 836	tchart->io_events++;
 837
 838	return 0;
 839}
 840
 841static int
 842process_enter_read(struct timechart *tchart,
 843		   struct evsel *evsel,
 844		   struct perf_sample *sample)
 845{
 846	long fd = perf_evsel__intval(evsel, sample, "fd");
 847	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
 848				   sample->time, fd);
 849}
 850
 851static int
 852process_exit_read(struct timechart *tchart,
 853		  struct evsel *evsel,
 854		  struct perf_sample *sample)
 855{
 856	long ret = perf_evsel__intval(evsel, sample, "ret");
 857	return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
 858				 sample->time, ret);
 859}
 860
 861static int
 862process_enter_write(struct timechart *tchart,
 863		    struct evsel *evsel,
 864		    struct perf_sample *sample)
 865{
 866	long fd = perf_evsel__intval(evsel, sample, "fd");
 867	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
 868				   sample->time, fd);
 869}
 870
 871static int
 872process_exit_write(struct timechart *tchart,
 873		   struct evsel *evsel,
 874		   struct perf_sample *sample)
 875{
 876	long ret = perf_evsel__intval(evsel, sample, "ret");
 877	return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
 878				 sample->time, ret);
 879}
 880
 881static int
 882process_enter_sync(struct timechart *tchart,
 883		   struct evsel *evsel,
 884		   struct perf_sample *sample)
 885{
 886	long fd = perf_evsel__intval(evsel, sample, "fd");
 887	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
 888				   sample->time, fd);
 889}
 890
 891static int
 892process_exit_sync(struct timechart *tchart,
 893		  struct evsel *evsel,
 894		  struct perf_sample *sample)
 895{
 896	long ret = perf_evsel__intval(evsel, sample, "ret");
 897	return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
 898				 sample->time, ret);
 899}
 900
 901static int
 902process_enter_tx(struct timechart *tchart,
 903		 struct evsel *evsel,
 904		 struct perf_sample *sample)
 905{
 906	long fd = perf_evsel__intval(evsel, sample, "fd");
 907	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
 908				   sample->time, fd);
 909}
 910
 911static int
 912process_exit_tx(struct timechart *tchart,
 913		struct evsel *evsel,
 914		struct perf_sample *sample)
 915{
 916	long ret = perf_evsel__intval(evsel, sample, "ret");
 917	return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
 918				 sample->time, ret);
 919}
 920
 921static int
 922process_enter_rx(struct timechart *tchart,
 923		 struct evsel *evsel,
 924		 struct perf_sample *sample)
 925{
 926	long fd = perf_evsel__intval(evsel, sample, "fd");
 927	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
 928				   sample->time, fd);
 929}
 930
 931static int
 932process_exit_rx(struct timechart *tchart,
 933		struct evsel *evsel,
 934		struct perf_sample *sample)
 935{
 936	long ret = perf_evsel__intval(evsel, sample, "ret");
 937	return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
 938				 sample->time, ret);
 939}
 940
 941static int
 942process_enter_poll(struct timechart *tchart,
 943		   struct evsel *evsel,
 944		   struct perf_sample *sample)
 945{
 946	long fd = perf_evsel__intval(evsel, sample, "fd");
 947	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
 948				   sample->time, fd);
 949}
 950
 951static int
 952process_exit_poll(struct timechart *tchart,
 953		  struct evsel *evsel,
 954		  struct perf_sample *sample)
 955{
 956	long ret = perf_evsel__intval(evsel, sample, "ret");
 957	return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
 958				 sample->time, ret);
 959}
 960
 961/*
 962 * Sort the pid datastructure
 963 */
 964static void sort_pids(struct timechart *tchart)
 965{
 966	struct per_pid *new_list, *p, *cursor, *prev;
 967	/* sort by ppid first, then by pid, lowest to highest */
 968
 969	new_list = NULL;
 970
 971	while (tchart->all_data) {
 972		p = tchart->all_data;
 973		tchart->all_data = p->next;
 974		p->next = NULL;
 975
 976		if (new_list == NULL) {
 977			new_list = p;
 978			p->next = NULL;
 979			continue;
 980		}
 981		prev = NULL;
 982		cursor = new_list;
 983		while (cursor) {
 984			if (cursor->ppid > p->ppid ||
 985				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
 986				/* must insert before */
 987				if (prev) {
 988					p->next = prev->next;
 989					prev->next = p;
 990					cursor = NULL;
 991					continue;
 992				} else {
 993					p->next = new_list;
 994					new_list = p;
 995					cursor = NULL;
 996					continue;
 997				}
 998			}
 999
1000			prev = cursor;
1001			cursor = cursor->next;
1002			if (!cursor)
1003				prev->next = p;
1004		}
1005	}
1006	tchart->all_data = new_list;
1007}
1008
1009
1010static void draw_c_p_states(struct timechart *tchart)
1011{
1012	struct power_event *pwr;
1013	pwr = tchart->power_events;
1014
1015	/*
1016	 * two pass drawing so that the P state bars are on top of the C state blocks
1017	 */
1018	while (pwr) {
1019		if (pwr->type == CSTATE)
1020			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1021		pwr = pwr->next;
1022	}
1023
1024	pwr = tchart->power_events;
1025	while (pwr) {
1026		if (pwr->type == PSTATE) {
1027			if (!pwr->state)
1028				pwr->state = tchart->min_freq;
1029			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1030		}
1031		pwr = pwr->next;
1032	}
1033}
1034
1035static void draw_wakeups(struct timechart *tchart)
1036{
1037	struct wake_event *we;
1038	struct per_pid *p;
1039	struct per_pidcomm *c;
1040
1041	we = tchart->wake_events;
1042	while (we) {
1043		int from = 0, to = 0;
1044		char *task_from = NULL, *task_to = NULL;
1045
1046		/* locate the column of the waker and wakee */
1047		p = tchart->all_data;
1048		while (p) {
1049			if (p->pid == we->waker || p->pid == we->wakee) {
1050				c = p->all;
1051				while (c) {
1052					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
1053						if (p->pid == we->waker && !from) {
1054							from = c->Y;
1055							task_from = strdup(c->comm);
1056						}
1057						if (p->pid == we->wakee && !to) {
1058							to = c->Y;
1059							task_to = strdup(c->comm);
1060						}
1061					}
1062					c = c->next;
1063				}
1064				c = p->all;
1065				while (c) {
1066					if (p->pid == we->waker && !from) {
1067						from = c->Y;
1068						task_from = strdup(c->comm);
1069					}
1070					if (p->pid == we->wakee && !to) {
1071						to = c->Y;
1072						task_to = strdup(c->comm);
1073					}
1074					c = c->next;
1075				}
1076			}
1077			p = p->next;
1078		}
1079
1080		if (!task_from) {
1081			task_from = malloc(40);
1082			sprintf(task_from, "[%i]", we->waker);
1083		}
1084		if (!task_to) {
1085			task_to = malloc(40);
1086			sprintf(task_to, "[%i]", we->wakee);
1087		}
1088
1089		if (we->waker == -1)
1090			svg_interrupt(we->time, to, we->backtrace);
1091		else if (from && to && abs(from - to) == 1)
1092			svg_wakeline(we->time, from, to, we->backtrace);
1093		else
1094			svg_partial_wakeline(we->time, from, task_from, to,
1095					     task_to, we->backtrace);
1096		we = we->next;
1097
1098		free(task_from);
1099		free(task_to);
1100	}
1101}
1102
1103static void draw_cpu_usage(struct timechart *tchart)
1104{
1105	struct per_pid *p;
1106	struct per_pidcomm *c;
1107	struct cpu_sample *sample;
1108	p = tchart->all_data;
1109	while (p) {
1110		c = p->all;
1111		while (c) {
1112			sample = c->samples;
1113			while (sample) {
1114				if (sample->type == TYPE_RUNNING) {
1115					svg_process(sample->cpu,
1116						    sample->start_time,
1117						    sample->end_time,
1118						    p->pid,
1119						    c->comm,
1120						    sample->backtrace);
1121				}
1122
1123				sample = sample->next;
1124			}
1125			c = c->next;
1126		}
1127		p = p->next;
1128	}
1129}
1130
1131static void draw_io_bars(struct timechart *tchart)
1132{
1133	const char *suf;
1134	double bytes;
1135	char comm[256];
1136	struct per_pid *p;
1137	struct per_pidcomm *c;
1138	struct io_sample *sample;
1139	int Y = 1;
1140
1141	p = tchart->all_data;
1142	while (p) {
1143		c = p->all;
1144		while (c) {
1145			if (!c->display) {
1146				c->Y = 0;
1147				c = c->next;
1148				continue;
1149			}
1150
1151			svg_box(Y, c->start_time, c->end_time, "process3");
1152			sample = c->io_samples;
1153			for (sample = c->io_samples; sample; sample = sample->next) {
1154				double h = (double)sample->bytes / c->max_bytes;
1155
1156				if (tchart->skip_eagain &&
1157				    sample->err == -EAGAIN)
1158					continue;
1159
1160				if (sample->err)
1161					h = 1;
1162
1163				if (sample->type == IOTYPE_SYNC)
1164					svg_fbox(Y,
1165						sample->start_time,
1166						sample->end_time,
1167						1,
1168						sample->err ? "error" : "sync",
1169						sample->fd,
1170						sample->err,
1171						sample->merges);
1172				else if (sample->type == IOTYPE_POLL)
1173					svg_fbox(Y,
1174						sample->start_time,
1175						sample->end_time,
1176						1,
1177						sample->err ? "error" : "poll",
1178						sample->fd,
1179						sample->err,
1180						sample->merges);
1181				else if (sample->type == IOTYPE_READ)
1182					svg_ubox(Y,
1183						sample->start_time,
1184						sample->end_time,
1185						h,
1186						sample->err ? "error" : "disk",
1187						sample->fd,
1188						sample->err,
1189						sample->merges);
1190				else if (sample->type == IOTYPE_WRITE)
1191					svg_lbox(Y,
1192						sample->start_time,
1193						sample->end_time,
1194						h,
1195						sample->err ? "error" : "disk",
1196						sample->fd,
1197						sample->err,
1198						sample->merges);
1199				else if (sample->type == IOTYPE_RX)
1200					svg_ubox(Y,
1201						sample->start_time,
1202						sample->end_time,
1203						h,
1204						sample->err ? "error" : "net",
1205						sample->fd,
1206						sample->err,
1207						sample->merges);
1208				else if (sample->type == IOTYPE_TX)
1209					svg_lbox(Y,
1210						sample->start_time,
1211						sample->end_time,
1212						h,
1213						sample->err ? "error" : "net",
1214						sample->fd,
1215						sample->err,
1216						sample->merges);
1217			}
1218
1219			suf = "";
1220			bytes = c->total_bytes;
1221			if (bytes > 1024) {
1222				bytes = bytes / 1024;
1223				suf = "K";
1224			}
1225			if (bytes > 1024) {
1226				bytes = bytes / 1024;
1227				suf = "M";
1228			}
1229			if (bytes > 1024) {
1230				bytes = bytes / 1024;
1231				suf = "G";
1232			}
1233
1234
1235			sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
1236			svg_text(Y, c->start_time, comm);
1237
1238			c->Y = Y;
1239			Y++;
1240			c = c->next;
1241		}
1242		p = p->next;
1243	}
1244}
1245
1246static void draw_process_bars(struct timechart *tchart)
1247{
1248	struct per_pid *p;
1249	struct per_pidcomm *c;
1250	struct cpu_sample *sample;
1251	int Y = 0;
1252
1253	Y = 2 * tchart->numcpus + 2;
1254
1255	p = tchart->all_data;
1256	while (p) {
1257		c = p->all;
1258		while (c) {
1259			if (!c->display) {
1260				c->Y = 0;
1261				c = c->next;
1262				continue;
1263			}
1264
1265			svg_box(Y, c->start_time, c->end_time, "process");
1266			sample = c->samples;
1267			while (sample) {
1268				if (sample->type == TYPE_RUNNING)
1269					svg_running(Y, sample->cpu,
1270						    sample->start_time,
1271						    sample->end_time,
1272						    sample->backtrace);
1273				if (sample->type == TYPE_BLOCKED)
1274					svg_blocked(Y, sample->cpu,
1275						    sample->start_time,
1276						    sample->end_time,
1277						    sample->backtrace);
1278				if (sample->type == TYPE_WAITING)
1279					svg_waiting(Y, sample->cpu,
1280						    sample->start_time,
1281						    sample->end_time,
1282						    sample->backtrace);
1283				sample = sample->next;
1284			}
1285
1286			if (c->comm) {
1287				char comm[256];
1288				if (c->total_time > 5000000000) /* 5 seconds */
1289					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC);
1290				else
1291					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC);
1292
1293				svg_text(Y, c->start_time, comm);
1294			}
1295			c->Y = Y;
1296			Y++;
1297			c = c->next;
1298		}
1299		p = p->next;
1300	}
1301}
1302
1303static void add_process_filter(const char *string)
1304{
1305	int pid = strtoull(string, NULL, 10);
1306	struct process_filter *filt = malloc(sizeof(*filt));
1307
1308	if (!filt)
1309		return;
1310
1311	filt->name = strdup(string);
1312	filt->pid  = pid;
1313	filt->next = process_filter;
1314
1315	process_filter = filt;
1316}
1317
1318static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
1319{
1320	struct process_filter *filt;
1321	if (!process_filter)
1322		return 1;
1323
1324	filt = process_filter;
1325	while (filt) {
1326		if (filt->pid && p->pid == filt->pid)
1327			return 1;
1328		if (strcmp(filt->name, c->comm) == 0)
1329			return 1;
1330		filt = filt->next;
1331	}
1332	return 0;
1333}
1334
1335static int determine_display_tasks_filtered(struct timechart *tchart)
1336{
1337	struct per_pid *p;
1338	struct per_pidcomm *c;
1339	int count = 0;
1340
1341	p = tchart->all_data;
1342	while (p) {
1343		p->display = 0;
1344		if (p->start_time == 1)
1345			p->start_time = tchart->first_time;
1346
1347		/* no exit marker, task kept running to the end */
1348		if (p->end_time == 0)
1349			p->end_time = tchart->last_time;
1350
1351		c = p->all;
1352
1353		while (c) {
1354			c->display = 0;
1355
1356			if (c->start_time == 1)
1357				c->start_time = tchart->first_time;
1358
1359			if (passes_filter(p, c)) {
1360				c->display = 1;
1361				p->display = 1;
1362				count++;
1363			}
1364
1365			if (c->end_time == 0)
1366				c->end_time = tchart->last_time;
1367
1368			c = c->next;
1369		}
1370		p = p->next;
1371	}
1372	return count;
1373}
1374
1375static int determine_display_tasks(struct timechart *tchart, u64 threshold)
1376{
1377	struct per_pid *p;
1378	struct per_pidcomm *c;
1379	int count = 0;
1380
1381	p = tchart->all_data;
1382	while (p) {
1383		p->display = 0;
1384		if (p->start_time == 1)
1385			p->start_time = tchart->first_time;
1386
1387		/* no exit marker, task kept running to the end */
1388		if (p->end_time == 0)
1389			p->end_time = tchart->last_time;
1390		if (p->total_time >= threshold)
1391			p->display = 1;
1392
1393		c = p->all;
1394
1395		while (c) {
1396			c->display = 0;
1397
1398			if (c->start_time == 1)
1399				c->start_time = tchart->first_time;
1400
1401			if (c->total_time >= threshold) {
1402				c->display = 1;
1403				count++;
1404			}
1405
1406			if (c->end_time == 0)
1407				c->end_time = tchart->last_time;
1408
1409			c = c->next;
1410		}
1411		p = p->next;
1412	}
1413	return count;
1414}
1415
1416static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
1417{
1418	struct per_pid *p;
1419	struct per_pidcomm *c;
1420	int count = 0;
1421
1422	p = timechart->all_data;
1423	while (p) {
1424		/* no exit marker, task kept running to the end */
1425		if (p->end_time == 0)
1426			p->end_time = timechart->last_time;
1427
1428		c = p->all;
1429
1430		while (c) {
1431			c->display = 0;
1432
1433			if (c->total_bytes >= threshold) {
1434				c->display = 1;
1435				count++;
1436			}
1437
1438			if (c->end_time == 0)
1439				c->end_time = timechart->last_time;
1440
1441			c = c->next;
1442		}
1443		p = p->next;
1444	}
1445	return count;
1446}
1447
1448#define BYTES_THRESH (1 * 1024 * 1024)
1449#define TIME_THRESH 10000000
1450
1451static void write_svg_file(struct timechart *tchart, const char *filename)
1452{
1453	u64 i;
1454	int count;
1455	int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
1456
1457	if (tchart->power_only)
1458		tchart->proc_num = 0;
1459
1460	/* We'd like to show at least proc_num tasks;
1461	 * be less picky if we have fewer */
1462	do {
1463		if (process_filter)
1464			count = determine_display_tasks_filtered(tchart);
1465		else if (tchart->io_events)
1466			count = determine_display_io_tasks(tchart, thresh);
1467		else
1468			count = determine_display_tasks(tchart, thresh);
1469		thresh /= 10;
1470	} while (!process_filter && thresh && count < tchart->proc_num);
1471
1472	if (!tchart->proc_num)
1473		count = 0;
1474
1475	if (tchart->io_events) {
1476		open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
1477
1478		svg_time_grid(0.5);
1479		svg_io_legenda();
1480
1481		draw_io_bars(tchart);
1482	} else {
1483		open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
1484
1485		svg_time_grid(0);
1486
1487		svg_legenda();
1488
1489		for (i = 0; i < tchart->numcpus; i++)
1490			svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
1491
1492		draw_cpu_usage(tchart);
1493		if (tchart->proc_num)
1494			draw_process_bars(tchart);
1495		if (!tchart->tasks_only)
1496			draw_c_p_states(tchart);
1497		if (tchart->proc_num)
1498			draw_wakeups(tchart);
1499	}
1500
1501	svg_close();
1502}
1503
1504static int process_header(struct perf_file_section *section __maybe_unused,
1505			  struct perf_header *ph,
1506			  int feat,
1507			  int fd __maybe_unused,
1508			  void *data)
1509{
1510	struct timechart *tchart = data;
1511
1512	switch (feat) {
1513	case HEADER_NRCPUS:
1514		tchart->numcpus = ph->env.nr_cpus_avail;
1515		break;
1516
1517	case HEADER_CPU_TOPOLOGY:
1518		if (!tchart->topology)
1519			break;
1520
1521		if (svg_build_topology_map(&ph->env))
 
 
 
1522			fprintf(stderr, "problem building topology\n");
1523		break;
1524
1525	default:
1526		break;
1527	}
1528
1529	return 0;
1530}
1531
1532static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1533{
1534	const struct evsel_str_handler power_tracepoints[] = {
1535		{ "power:cpu_idle",		process_sample_cpu_idle },
1536		{ "power:cpu_frequency",	process_sample_cpu_frequency },
1537		{ "sched:sched_wakeup",		process_sample_sched_wakeup },
1538		{ "sched:sched_switch",		process_sample_sched_switch },
1539#ifdef SUPPORT_OLD_POWER_EVENTS
1540		{ "power:power_start",		process_sample_power_start },
1541		{ "power:power_end",		process_sample_power_end },
1542		{ "power:power_frequency",	process_sample_power_frequency },
1543#endif
1544
1545		{ "syscalls:sys_enter_read",		process_enter_read },
1546		{ "syscalls:sys_enter_pread64",		process_enter_read },
1547		{ "syscalls:sys_enter_readv",		process_enter_read },
1548		{ "syscalls:sys_enter_preadv",		process_enter_read },
1549		{ "syscalls:sys_enter_write",		process_enter_write },
1550		{ "syscalls:sys_enter_pwrite64",	process_enter_write },
1551		{ "syscalls:sys_enter_writev",		process_enter_write },
1552		{ "syscalls:sys_enter_pwritev",		process_enter_write },
1553		{ "syscalls:sys_enter_sync",		process_enter_sync },
1554		{ "syscalls:sys_enter_sync_file_range",	process_enter_sync },
1555		{ "syscalls:sys_enter_fsync",		process_enter_sync },
1556		{ "syscalls:sys_enter_msync",		process_enter_sync },
1557		{ "syscalls:sys_enter_recvfrom",	process_enter_rx },
1558		{ "syscalls:sys_enter_recvmmsg",	process_enter_rx },
1559		{ "syscalls:sys_enter_recvmsg",		process_enter_rx },
1560		{ "syscalls:sys_enter_sendto",		process_enter_tx },
1561		{ "syscalls:sys_enter_sendmsg",		process_enter_tx },
1562		{ "syscalls:sys_enter_sendmmsg",	process_enter_tx },
1563		{ "syscalls:sys_enter_epoll_pwait",	process_enter_poll },
1564		{ "syscalls:sys_enter_epoll_wait",	process_enter_poll },
1565		{ "syscalls:sys_enter_poll",		process_enter_poll },
1566		{ "syscalls:sys_enter_ppoll",		process_enter_poll },
1567		{ "syscalls:sys_enter_pselect6",	process_enter_poll },
1568		{ "syscalls:sys_enter_select",		process_enter_poll },
1569
1570		{ "syscalls:sys_exit_read",		process_exit_read },
1571		{ "syscalls:sys_exit_pread64",		process_exit_read },
1572		{ "syscalls:sys_exit_readv",		process_exit_read },
1573		{ "syscalls:sys_exit_preadv",		process_exit_read },
1574		{ "syscalls:sys_exit_write",		process_exit_write },
1575		{ "syscalls:sys_exit_pwrite64",		process_exit_write },
1576		{ "syscalls:sys_exit_writev",		process_exit_write },
1577		{ "syscalls:sys_exit_pwritev",		process_exit_write },
1578		{ "syscalls:sys_exit_sync",		process_exit_sync },
1579		{ "syscalls:sys_exit_sync_file_range",	process_exit_sync },
1580		{ "syscalls:sys_exit_fsync",		process_exit_sync },
1581		{ "syscalls:sys_exit_msync",		process_exit_sync },
1582		{ "syscalls:sys_exit_recvfrom",		process_exit_rx },
1583		{ "syscalls:sys_exit_recvmmsg",		process_exit_rx },
1584		{ "syscalls:sys_exit_recvmsg",		process_exit_rx },
1585		{ "syscalls:sys_exit_sendto",		process_exit_tx },
1586		{ "syscalls:sys_exit_sendmsg",		process_exit_tx },
1587		{ "syscalls:sys_exit_sendmmsg",		process_exit_tx },
1588		{ "syscalls:sys_exit_epoll_pwait",	process_exit_poll },
1589		{ "syscalls:sys_exit_epoll_wait",	process_exit_poll },
1590		{ "syscalls:sys_exit_poll",		process_exit_poll },
1591		{ "syscalls:sys_exit_ppoll",		process_exit_poll },
1592		{ "syscalls:sys_exit_pselect6",		process_exit_poll },
1593		{ "syscalls:sys_exit_select",		process_exit_poll },
1594	};
1595	struct perf_data data = {
1596		.path  = input_name,
1597		.mode  = PERF_DATA_MODE_READ,
1598		.force = tchart->force,
 
 
1599	};
1600
1601	struct perf_session *session = perf_session__new(&data, false,
1602							 &tchart->tool);
1603	int ret = -EINVAL;
1604
1605	if (IS_ERR(session))
1606		return PTR_ERR(session);
1607
1608	symbol__init(&session->header.env);
1609
1610	(void)perf_header__process_sections(&session->header,
1611					    perf_data__fd(session->data),
1612					    tchart,
1613					    process_header);
1614
1615	if (!perf_session__has_traces(session, "timechart record"))
1616		goto out_delete;
1617
1618	if (perf_session__set_tracepoints_handlers(session,
1619						   power_tracepoints)) {
1620		pr_err("Initializing session tracepoint handlers failed\n");
1621		goto out_delete;
1622	}
1623
1624	ret = perf_session__process_events(session);
1625	if (ret)
1626		goto out_delete;
1627
1628	end_sample_processing(tchart);
1629
1630	sort_pids(tchart);
1631
1632	write_svg_file(tchart, output_name);
1633
1634	pr_info("Written %2.1f seconds of trace to %s.\n",
1635		(tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name);
1636out_delete:
1637	perf_session__delete(session);
1638	return ret;
1639}
1640
1641static int timechart__io_record(int argc, const char **argv)
1642{
1643	unsigned int rec_argc, i;
1644	const char **rec_argv;
1645	const char **p;
1646	char *filter = NULL;
1647
1648	const char * const common_args[] = {
1649		"record", "-a", "-R", "-c", "1",
1650	};
1651	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1652
1653	const char * const disk_events[] = {
1654		"syscalls:sys_enter_read",
1655		"syscalls:sys_enter_pread64",
1656		"syscalls:sys_enter_readv",
1657		"syscalls:sys_enter_preadv",
1658		"syscalls:sys_enter_write",
1659		"syscalls:sys_enter_pwrite64",
1660		"syscalls:sys_enter_writev",
1661		"syscalls:sys_enter_pwritev",
1662		"syscalls:sys_enter_sync",
1663		"syscalls:sys_enter_sync_file_range",
1664		"syscalls:sys_enter_fsync",
1665		"syscalls:sys_enter_msync",
1666
1667		"syscalls:sys_exit_read",
1668		"syscalls:sys_exit_pread64",
1669		"syscalls:sys_exit_readv",
1670		"syscalls:sys_exit_preadv",
1671		"syscalls:sys_exit_write",
1672		"syscalls:sys_exit_pwrite64",
1673		"syscalls:sys_exit_writev",
1674		"syscalls:sys_exit_pwritev",
1675		"syscalls:sys_exit_sync",
1676		"syscalls:sys_exit_sync_file_range",
1677		"syscalls:sys_exit_fsync",
1678		"syscalls:sys_exit_msync",
1679	};
1680	unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
1681
1682	const char * const net_events[] = {
1683		"syscalls:sys_enter_recvfrom",
1684		"syscalls:sys_enter_recvmmsg",
1685		"syscalls:sys_enter_recvmsg",
1686		"syscalls:sys_enter_sendto",
1687		"syscalls:sys_enter_sendmsg",
1688		"syscalls:sys_enter_sendmmsg",
1689
1690		"syscalls:sys_exit_recvfrom",
1691		"syscalls:sys_exit_recvmmsg",
1692		"syscalls:sys_exit_recvmsg",
1693		"syscalls:sys_exit_sendto",
1694		"syscalls:sys_exit_sendmsg",
1695		"syscalls:sys_exit_sendmmsg",
1696	};
1697	unsigned int net_events_nr = ARRAY_SIZE(net_events);
1698
1699	const char * const poll_events[] = {
1700		"syscalls:sys_enter_epoll_pwait",
1701		"syscalls:sys_enter_epoll_wait",
1702		"syscalls:sys_enter_poll",
1703		"syscalls:sys_enter_ppoll",
1704		"syscalls:sys_enter_pselect6",
1705		"syscalls:sys_enter_select",
1706
1707		"syscalls:sys_exit_epoll_pwait",
1708		"syscalls:sys_exit_epoll_wait",
1709		"syscalls:sys_exit_poll",
1710		"syscalls:sys_exit_ppoll",
1711		"syscalls:sys_exit_pselect6",
1712		"syscalls:sys_exit_select",
1713	};
1714	unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
1715
1716	rec_argc = common_args_nr +
1717		disk_events_nr * 4 +
1718		net_events_nr * 4 +
1719		poll_events_nr * 4 +
1720		argc;
1721	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1722
1723	if (rec_argv == NULL)
1724		return -ENOMEM;
1725
1726	if (asprintf(&filter, "common_pid != %d", getpid()) < 0) {
1727		free(rec_argv);
1728		return -ENOMEM;
1729	}
1730
1731	p = rec_argv;
1732	for (i = 0; i < common_args_nr; i++)
1733		*p++ = strdup(common_args[i]);
1734
1735	for (i = 0; i < disk_events_nr; i++) {
1736		if (!is_valid_tracepoint(disk_events[i])) {
1737			rec_argc -= 4;
1738			continue;
1739		}
1740
1741		*p++ = "-e";
1742		*p++ = strdup(disk_events[i]);
1743		*p++ = "--filter";
1744		*p++ = filter;
1745	}
1746	for (i = 0; i < net_events_nr; i++) {
1747		if (!is_valid_tracepoint(net_events[i])) {
1748			rec_argc -= 4;
1749			continue;
1750		}
1751
1752		*p++ = "-e";
1753		*p++ = strdup(net_events[i]);
1754		*p++ = "--filter";
1755		*p++ = filter;
1756	}
1757	for (i = 0; i < poll_events_nr; i++) {
1758		if (!is_valid_tracepoint(poll_events[i])) {
1759			rec_argc -= 4;
1760			continue;
1761		}
1762
1763		*p++ = "-e";
1764		*p++ = strdup(poll_events[i]);
1765		*p++ = "--filter";
1766		*p++ = filter;
1767	}
1768
1769	for (i = 0; i < (unsigned int)argc; i++)
1770		*p++ = argv[i];
1771
1772	return cmd_record(rec_argc, rec_argv);
1773}
1774
1775
1776static int timechart__record(struct timechart *tchart, int argc, const char **argv)
1777{
1778	unsigned int rec_argc, i, j;
1779	const char **rec_argv;
1780	const char **p;
1781	unsigned int record_elems;
1782
1783	const char * const common_args[] = {
1784		"record", "-a", "-R", "-c", "1",
1785	};
1786	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1787
1788	const char * const backtrace_args[] = {
1789		"-g",
1790	};
1791	unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
1792
1793	const char * const power_args[] = {
1794		"-e", "power:cpu_frequency",
1795		"-e", "power:cpu_idle",
1796	};
1797	unsigned int power_args_nr = ARRAY_SIZE(power_args);
1798
1799	const char * const old_power_args[] = {
1800#ifdef SUPPORT_OLD_POWER_EVENTS
1801		"-e", "power:power_start",
1802		"-e", "power:power_end",
1803		"-e", "power:power_frequency",
1804#endif
1805	};
1806	unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
1807
1808	const char * const tasks_args[] = {
1809		"-e", "sched:sched_wakeup",
1810		"-e", "sched:sched_switch",
1811	};
1812	unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
1813
1814#ifdef SUPPORT_OLD_POWER_EVENTS
1815	if (!is_valid_tracepoint("power:cpu_idle") &&
1816	    is_valid_tracepoint("power:power_start")) {
1817		use_old_power_events = 1;
1818		power_args_nr = 0;
1819	} else {
1820		old_power_args_nr = 0;
1821	}
1822#endif
1823
1824	if (tchart->power_only)
1825		tasks_args_nr = 0;
1826
1827	if (tchart->tasks_only) {
1828		power_args_nr = 0;
1829		old_power_args_nr = 0;
1830	}
1831
1832	if (!tchart->with_backtrace)
1833		backtrace_args_no = 0;
1834
1835	record_elems = common_args_nr + tasks_args_nr +
1836		power_args_nr + old_power_args_nr + backtrace_args_no;
1837
1838	rec_argc = record_elems + argc;
1839	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1840
1841	if (rec_argv == NULL)
1842		return -ENOMEM;
1843
1844	p = rec_argv;
1845	for (i = 0; i < common_args_nr; i++)
1846		*p++ = strdup(common_args[i]);
1847
1848	for (i = 0; i < backtrace_args_no; i++)
1849		*p++ = strdup(backtrace_args[i]);
1850
1851	for (i = 0; i < tasks_args_nr; i++)
1852		*p++ = strdup(tasks_args[i]);
1853
1854	for (i = 0; i < power_args_nr; i++)
1855		*p++ = strdup(power_args[i]);
1856
1857	for (i = 0; i < old_power_args_nr; i++)
1858		*p++ = strdup(old_power_args[i]);
1859
1860	for (j = 0; j < (unsigned int)argc; j++)
1861		*p++ = argv[j];
1862
1863	return cmd_record(rec_argc, rec_argv);
1864}
1865
1866static int
1867parse_process(const struct option *opt __maybe_unused, const char *arg,
1868	      int __maybe_unused unset)
1869{
1870	if (arg)
1871		add_process_filter(arg);
1872	return 0;
1873}
1874
1875static int
1876parse_highlight(const struct option *opt __maybe_unused, const char *arg,
1877		int __maybe_unused unset)
1878{
1879	unsigned long duration = strtoul(arg, NULL, 0);
1880
1881	if (svg_highlight || svg_highlight_name)
1882		return -1;
1883
1884	if (duration)
1885		svg_highlight = duration;
1886	else
1887		svg_highlight_name = strdup(arg);
1888
1889	return 0;
1890}
1891
1892static int
1893parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
1894{
1895	char unit = 'n';
1896	u64 *value = opt->value;
1897
1898	if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
1899		switch (unit) {
1900		case 'm':
1901			*value *= NSEC_PER_MSEC;
1902			break;
1903		case 'u':
1904			*value *= NSEC_PER_USEC;
1905			break;
1906		case 'n':
1907			break;
1908		default:
1909			return -1;
1910		}
1911	}
1912
1913	return 0;
1914}
1915
1916int cmd_timechart(int argc, const char **argv)
1917{
1918	struct timechart tchart = {
1919		.tool = {
1920			.comm		 = process_comm_event,
1921			.fork		 = process_fork_event,
1922			.exit		 = process_exit_event,
1923			.sample		 = process_sample_event,
1924			.ordered_events	 = true,
1925		},
1926		.proc_num = 15,
1927		.min_time = NSEC_PER_MSEC,
1928		.merge_dist = 1000,
1929	};
1930	const char *output_name = "output.svg";
1931	const struct option timechart_common_options[] = {
1932	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1933	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, "output processes data only"),
1934	OPT_END()
1935	};
1936	const struct option timechart_options[] = {
1937	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1938	OPT_STRING('o', "output", &output_name, "file", "output file name"),
1939	OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1940	OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
1941		      "highlight tasks. Pass duration in ns or process name.",
1942		       parse_highlight),
1943	OPT_CALLBACK('p', "process", NULL, "process",
1944		      "process selector. Pass a pid or process name.",
1945		       parse_process),
1946	OPT_CALLBACK(0, "symfs", NULL, "directory",
1947		     "Look for files with symbols relative to this directory",
1948		     symbol__config_symfs),
1949	OPT_INTEGER('n', "proc-num", &tchart.proc_num,
1950		    "min. number of tasks to print"),
1951	OPT_BOOLEAN('t', "topology", &tchart.topology,
1952		    "sort CPUs according to topology"),
1953	OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
1954		    "skip EAGAIN errors"),
1955	OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
1956		     "all IO faster than min-time will visually appear longer",
1957		     parse_time),
1958	OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
1959		     "merge events that are merge-dist us apart",
1960		     parse_time),
1961	OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
1962	OPT_PARENT(timechart_common_options),
1963	};
1964	const char * const timechart_subcommands[] = { "record", NULL };
1965	const char *timechart_usage[] = {
1966		"perf timechart [<options>] {record}",
1967		NULL
1968	};
1969	const struct option timechart_record_options[] = {
1970	OPT_BOOLEAN('I', "io-only", &tchart.io_only,
1971		    "record only IO data"),
1972	OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
1973	OPT_PARENT(timechart_common_options),
1974	};
1975	const char * const timechart_record_usage[] = {
1976		"perf timechart record [<options>]",
1977		NULL
1978	};
1979	argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
1980			timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
1981
1982	if (tchart.power_only && tchart.tasks_only) {
1983		pr_err("-P and -T options cannot be used at the same time.\n");
1984		return -1;
1985	}
1986
1987	if (argc && !strncmp(argv[0], "rec", 3)) {
1988		argc = parse_options(argc, argv, timechart_record_options,
1989				     timechart_record_usage,
1990				     PARSE_OPT_STOP_AT_NON_OPTION);
1991
1992		if (tchart.power_only && tchart.tasks_only) {
1993			pr_err("-P and -T options cannot be used at the same time.\n");
1994			return -1;
1995		}
1996
1997		if (tchart.io_only)
1998			return timechart__io_record(argc, argv);
1999		else
2000			return timechart__record(&tchart, argc, argv);
2001	} else if (argc)
2002		usage_with_options(timechart_usage, timechart_options);
2003
2004	setup_pager();
2005
2006	return __cmd_timechart(&tchart, output_name);
2007}
v4.17
 
   1/*
   2 * builtin-timechart.c - make an svg timechart of system activity
   3 *
   4 * (C) Copyright 2009 Intel Corporation
   5 *
   6 * Authors:
   7 *     Arjan van de Ven <arjan@linux.intel.com>
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; version 2
  12 * of the License.
  13 */
  14
  15#include <errno.h>
  16#include <inttypes.h>
  17#include <traceevent/event-parse.h>
  18
  19#include "builtin.h"
  20
  21#include "util/util.h"
  22
  23#include "util/color.h"
  24#include <linux/list.h>
  25#include "util/cache.h"
  26#include "util/evlist.h"
  27#include "util/evsel.h"
  28#include <linux/kernel.h>
  29#include <linux/rbtree.h>
  30#include <linux/time64.h>
 
  31#include "util/symbol.h"
  32#include "util/thread.h"
  33#include "util/callchain.h"
  34
  35#include "perf.h"
  36#include "util/header.h"
 
  37#include <subcmd/parse-options.h>
  38#include "util/parse-events.h"
  39#include "util/event.h"
  40#include "util/session.h"
  41#include "util/svghelper.h"
  42#include "util/tool.h"
  43#include "util/data.h"
  44#include "util/debug.h"
 
 
 
 
 
  45
  46#define SUPPORT_OLD_POWER_EVENTS 1
  47#define PWR_EVENT_EXIT -1
  48
  49struct per_pid;
  50struct power_event;
  51struct wake_event;
  52
  53struct timechart {
  54	struct perf_tool	tool;
  55	struct per_pid		*all_data;
  56	struct power_event	*power_events;
  57	struct wake_event	*wake_events;
  58	int			proc_num;
  59	unsigned int		numcpus;
  60	u64			min_freq,	/* Lowest CPU frequency seen */
  61				max_freq,	/* Highest CPU frequency seen */
  62				turbo_frequency,
  63				first_time, last_time;
  64	bool			power_only,
  65				tasks_only,
  66				with_backtrace,
  67				topology;
  68	bool			force;
  69	/* IO related settings */
  70	bool			io_only,
  71				skip_eagain;
  72	u64			io_events;
  73	u64			min_time,
  74				merge_dist;
  75};
  76
  77struct per_pidcomm;
  78struct cpu_sample;
  79struct io_sample;
  80
  81/*
  82 * Datastructure layout:
  83 * We keep an list of "pid"s, matching the kernels notion of a task struct.
  84 * Each "pid" entry, has a list of "comm"s.
  85 *	this is because we want to track different programs different, while
  86 *	exec will reuse the original pid (by design).
  87 * Each comm has a list of samples that will be used to draw
  88 * final graph.
  89 */
  90
  91struct per_pid {
  92	struct per_pid *next;
  93
  94	int		pid;
  95	int		ppid;
  96
  97	u64		start_time;
  98	u64		end_time;
  99	u64		total_time;
 100	u64		total_bytes;
 101	int		display;
 102
 103	struct per_pidcomm *all;
 104	struct per_pidcomm *current;
 105};
 106
 107
 108struct per_pidcomm {
 109	struct per_pidcomm *next;
 110
 111	u64		start_time;
 112	u64		end_time;
 113	u64		total_time;
 114	u64		max_bytes;
 115	u64		total_bytes;
 116
 117	int		Y;
 118	int		display;
 119
 120	long		state;
 121	u64		state_since;
 122
 123	char		*comm;
 124
 125	struct cpu_sample *samples;
 126	struct io_sample  *io_samples;
 127};
 128
 129struct sample_wrapper {
 130	struct sample_wrapper *next;
 131
 132	u64		timestamp;
 133	unsigned char	data[0];
 134};
 135
 136#define TYPE_NONE	0
 137#define TYPE_RUNNING	1
 138#define TYPE_WAITING	2
 139#define TYPE_BLOCKED	3
 140
 141struct cpu_sample {
 142	struct cpu_sample *next;
 143
 144	u64 start_time;
 145	u64 end_time;
 146	int type;
 147	int cpu;
 148	const char *backtrace;
 149};
 150
 151enum {
 152	IOTYPE_READ,
 153	IOTYPE_WRITE,
 154	IOTYPE_SYNC,
 155	IOTYPE_TX,
 156	IOTYPE_RX,
 157	IOTYPE_POLL,
 158};
 159
 160struct io_sample {
 161	struct io_sample *next;
 162
 163	u64 start_time;
 164	u64 end_time;
 165	u64 bytes;
 166	int type;
 167	int fd;
 168	int err;
 169	int merges;
 170};
 171
 172#define CSTATE 1
 173#define PSTATE 2
 174
 175struct power_event {
 176	struct power_event *next;
 177	int type;
 178	int state;
 179	u64 start_time;
 180	u64 end_time;
 181	int cpu;
 182};
 183
 184struct wake_event {
 185	struct wake_event *next;
 186	int waker;
 187	int wakee;
 188	u64 time;
 189	const char *backtrace;
 190};
 191
 192struct process_filter {
 193	char			*name;
 194	int			pid;
 195	struct process_filter	*next;
 196};
 197
 198static struct process_filter *process_filter;
 199
 200
 201static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
 202{
 203	struct per_pid *cursor = tchart->all_data;
 204
 205	while (cursor) {
 206		if (cursor->pid == pid)
 207			return cursor;
 208		cursor = cursor->next;
 209	}
 210	cursor = zalloc(sizeof(*cursor));
 211	assert(cursor != NULL);
 212	cursor->pid = pid;
 213	cursor->next = tchart->all_data;
 214	tchart->all_data = cursor;
 215	return cursor;
 216}
 217
 218static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
 219{
 220	struct per_pid *p;
 221	struct per_pidcomm *c;
 222	p = find_create_pid(tchart, pid);
 223	c = p->all;
 224	while (c) {
 225		if (c->comm && strcmp(c->comm, comm) == 0) {
 226			p->current = c;
 227			return;
 228		}
 229		if (!c->comm) {
 230			c->comm = strdup(comm);
 231			p->current = c;
 232			return;
 233		}
 234		c = c->next;
 235	}
 236	c = zalloc(sizeof(*c));
 237	assert(c != NULL);
 238	c->comm = strdup(comm);
 239	p->current = c;
 240	c->next = p->all;
 241	p->all = c;
 242}
 243
 244static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
 245{
 246	struct per_pid *p, *pp;
 247	p = find_create_pid(tchart, pid);
 248	pp = find_create_pid(tchart, ppid);
 249	p->ppid = ppid;
 250	if (pp->current && pp->current->comm && !p->current)
 251		pid_set_comm(tchart, pid, pp->current->comm);
 252
 253	p->start_time = timestamp;
 254	if (p->current && !p->current->start_time) {
 255		p->current->start_time = timestamp;
 256		p->current->state_since = timestamp;
 257	}
 258}
 259
 260static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
 261{
 262	struct per_pid *p;
 263	p = find_create_pid(tchart, pid);
 264	p->end_time = timestamp;
 265	if (p->current)
 266		p->current->end_time = timestamp;
 267}
 268
 269static void pid_put_sample(struct timechart *tchart, int pid, int type,
 270			   unsigned int cpu, u64 start, u64 end,
 271			   const char *backtrace)
 272{
 273	struct per_pid *p;
 274	struct per_pidcomm *c;
 275	struct cpu_sample *sample;
 276
 277	p = find_create_pid(tchart, pid);
 278	c = p->current;
 279	if (!c) {
 280		c = zalloc(sizeof(*c));
 281		assert(c != NULL);
 282		p->current = c;
 283		c->next = p->all;
 284		p->all = c;
 285	}
 286
 287	sample = zalloc(sizeof(*sample));
 288	assert(sample != NULL);
 289	sample->start_time = start;
 290	sample->end_time = end;
 291	sample->type = type;
 292	sample->next = c->samples;
 293	sample->cpu = cpu;
 294	sample->backtrace = backtrace;
 295	c->samples = sample;
 296
 297	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
 298		c->total_time += (end-start);
 299		p->total_time += (end-start);
 300	}
 301
 302	if (c->start_time == 0 || c->start_time > start)
 303		c->start_time = start;
 304	if (p->start_time == 0 || p->start_time > start)
 305		p->start_time = start;
 306}
 307
 308#define MAX_CPUS 4096
 309
 310static u64 cpus_cstate_start_times[MAX_CPUS];
 311static int cpus_cstate_state[MAX_CPUS];
 312static u64 cpus_pstate_start_times[MAX_CPUS];
 313static u64 cpus_pstate_state[MAX_CPUS];
 314
 315static int process_comm_event(struct perf_tool *tool,
 316			      union perf_event *event,
 317			      struct perf_sample *sample __maybe_unused,
 318			      struct machine *machine __maybe_unused)
 319{
 320	struct timechart *tchart = container_of(tool, struct timechart, tool);
 321	pid_set_comm(tchart, event->comm.tid, event->comm.comm);
 322	return 0;
 323}
 324
 325static int process_fork_event(struct perf_tool *tool,
 326			      union perf_event *event,
 327			      struct perf_sample *sample __maybe_unused,
 328			      struct machine *machine __maybe_unused)
 329{
 330	struct timechart *tchart = container_of(tool, struct timechart, tool);
 331	pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
 332	return 0;
 333}
 334
 335static int process_exit_event(struct perf_tool *tool,
 336			      union perf_event *event,
 337			      struct perf_sample *sample __maybe_unused,
 338			      struct machine *machine __maybe_unused)
 339{
 340	struct timechart *tchart = container_of(tool, struct timechart, tool);
 341	pid_exit(tchart, event->fork.pid, event->fork.time);
 342	return 0;
 343}
 344
 345#ifdef SUPPORT_OLD_POWER_EVENTS
 346static int use_old_power_events;
 347#endif
 348
 349static void c_state_start(int cpu, u64 timestamp, int state)
 350{
 351	cpus_cstate_start_times[cpu] = timestamp;
 352	cpus_cstate_state[cpu] = state;
 353}
 354
 355static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
 356{
 357	struct power_event *pwr = zalloc(sizeof(*pwr));
 358
 359	if (!pwr)
 360		return;
 361
 362	pwr->state = cpus_cstate_state[cpu];
 363	pwr->start_time = cpus_cstate_start_times[cpu];
 364	pwr->end_time = timestamp;
 365	pwr->cpu = cpu;
 366	pwr->type = CSTATE;
 367	pwr->next = tchart->power_events;
 368
 369	tchart->power_events = pwr;
 370}
 371
 372static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
 373{
 374	struct power_event *pwr;
 375
 376	if (new_freq > 8000000) /* detect invalid data */
 377		return;
 378
 379	pwr = zalloc(sizeof(*pwr));
 380	if (!pwr)
 381		return;
 382
 383	pwr->state = cpus_pstate_state[cpu];
 384	pwr->start_time = cpus_pstate_start_times[cpu];
 385	pwr->end_time = timestamp;
 386	pwr->cpu = cpu;
 387	pwr->type = PSTATE;
 388	pwr->next = tchart->power_events;
 389
 390	if (!pwr->start_time)
 391		pwr->start_time = tchart->first_time;
 392
 393	tchart->power_events = pwr;
 394
 395	cpus_pstate_state[cpu] = new_freq;
 396	cpus_pstate_start_times[cpu] = timestamp;
 397
 398	if ((u64)new_freq > tchart->max_freq)
 399		tchart->max_freq = new_freq;
 400
 401	if (new_freq < tchart->min_freq || tchart->min_freq == 0)
 402		tchart->min_freq = new_freq;
 403
 404	if (new_freq == tchart->max_freq - 1000)
 405		tchart->turbo_frequency = tchart->max_freq;
 406}
 407
 408static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
 409			 int waker, int wakee, u8 flags, const char *backtrace)
 410{
 411	struct per_pid *p;
 412	struct wake_event *we = zalloc(sizeof(*we));
 413
 414	if (!we)
 415		return;
 416
 417	we->time = timestamp;
 418	we->waker = waker;
 419	we->backtrace = backtrace;
 420
 421	if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
 422		we->waker = -1;
 423
 424	we->wakee = wakee;
 425	we->next = tchart->wake_events;
 426	tchart->wake_events = we;
 427	p = find_create_pid(tchart, we->wakee);
 428
 429	if (p && p->current && p->current->state == TYPE_NONE) {
 430		p->current->state_since = timestamp;
 431		p->current->state = TYPE_WAITING;
 432	}
 433	if (p && p->current && p->current->state == TYPE_BLOCKED) {
 434		pid_put_sample(tchart, p->pid, p->current->state, cpu,
 435			       p->current->state_since, timestamp, NULL);
 436		p->current->state_since = timestamp;
 437		p->current->state = TYPE_WAITING;
 438	}
 439}
 440
 441static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
 442			 int prev_pid, int next_pid, u64 prev_state,
 443			 const char *backtrace)
 444{
 445	struct per_pid *p = NULL, *prev_p;
 446
 447	prev_p = find_create_pid(tchart, prev_pid);
 448
 449	p = find_create_pid(tchart, next_pid);
 450
 451	if (prev_p->current && prev_p->current->state != TYPE_NONE)
 452		pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
 453			       prev_p->current->state_since, timestamp,
 454			       backtrace);
 455	if (p && p->current) {
 456		if (p->current->state != TYPE_NONE)
 457			pid_put_sample(tchart, next_pid, p->current->state, cpu,
 458				       p->current->state_since, timestamp,
 459				       backtrace);
 460
 461		p->current->state_since = timestamp;
 462		p->current->state = TYPE_RUNNING;
 463	}
 464
 465	if (prev_p->current) {
 466		prev_p->current->state = TYPE_NONE;
 467		prev_p->current->state_since = timestamp;
 468		if (prev_state & 2)
 469			prev_p->current->state = TYPE_BLOCKED;
 470		if (prev_state == 0)
 471			prev_p->current->state = TYPE_WAITING;
 472	}
 473}
 474
 475static const char *cat_backtrace(union perf_event *event,
 476				 struct perf_sample *sample,
 477				 struct machine *machine)
 478{
 479	struct addr_location al;
 480	unsigned int i;
 481	char *p = NULL;
 482	size_t p_len;
 483	u8 cpumode = PERF_RECORD_MISC_USER;
 484	struct addr_location tal;
 485	struct ip_callchain *chain = sample->callchain;
 486	FILE *f = open_memstream(&p, &p_len);
 487
 488	if (!f) {
 489		perror("open_memstream error");
 490		return NULL;
 491	}
 492
 493	if (!chain)
 494		goto exit;
 495
 496	if (machine__resolve(machine, &al, sample) < 0) {
 497		fprintf(stderr, "problem processing %d event, skipping it.\n",
 498			event->header.type);
 499		goto exit;
 500	}
 501
 502	for (i = 0; i < chain->nr; i++) {
 503		u64 ip;
 504
 505		if (callchain_param.order == ORDER_CALLEE)
 506			ip = chain->ips[i];
 507		else
 508			ip = chain->ips[chain->nr - i - 1];
 509
 510		if (ip >= PERF_CONTEXT_MAX) {
 511			switch (ip) {
 512			case PERF_CONTEXT_HV:
 513				cpumode = PERF_RECORD_MISC_HYPERVISOR;
 514				break;
 515			case PERF_CONTEXT_KERNEL:
 516				cpumode = PERF_RECORD_MISC_KERNEL;
 517				break;
 518			case PERF_CONTEXT_USER:
 519				cpumode = PERF_RECORD_MISC_USER;
 520				break;
 521			default:
 522				pr_debug("invalid callchain context: "
 523					 "%"PRId64"\n", (s64) ip);
 524
 525				/*
 526				 * It seems the callchain is corrupted.
 527				 * Discard all.
 528				 */
 529				zfree(&p);
 530				goto exit_put;
 531			}
 532			continue;
 533		}
 534
 535		tal.filtered = 0;
 536		thread__find_addr_location(al.thread, cpumode,
 537					   MAP__FUNCTION, ip, &tal);
 538
 539		if (tal.sym)
 540			fprintf(f, "..... %016" PRIx64 " %s\n", ip,
 541				tal.sym->name);
 542		else
 543			fprintf(f, "..... %016" PRIx64 "\n", ip);
 544	}
 545exit_put:
 546	addr_location__put(&al);
 547exit:
 548	fclose(f);
 549
 550	return p;
 551}
 552
 553typedef int (*tracepoint_handler)(struct timechart *tchart,
 554				  struct perf_evsel *evsel,
 555				  struct perf_sample *sample,
 556				  const char *backtrace);
 557
 558static int process_sample_event(struct perf_tool *tool,
 559				union perf_event *event,
 560				struct perf_sample *sample,
 561				struct perf_evsel *evsel,
 562				struct machine *machine)
 563{
 564	struct timechart *tchart = container_of(tool, struct timechart, tool);
 565
 566	if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
 567		if (!tchart->first_time || tchart->first_time > sample->time)
 568			tchart->first_time = sample->time;
 569		if (tchart->last_time < sample->time)
 570			tchart->last_time = sample->time;
 571	}
 572
 573	if (evsel->handler != NULL) {
 574		tracepoint_handler f = evsel->handler;
 575		return f(tchart, evsel, sample,
 576			 cat_backtrace(event, sample, machine));
 577	}
 578
 579	return 0;
 580}
 581
 582static int
 583process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
 584			struct perf_evsel *evsel,
 585			struct perf_sample *sample,
 586			const char *backtrace __maybe_unused)
 587{
 588	u32 state = perf_evsel__intval(evsel, sample, "state");
 589	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 590
 591	if (state == (u32)PWR_EVENT_EXIT)
 592		c_state_end(tchart, cpu_id, sample->time);
 593	else
 594		c_state_start(cpu_id, sample->time, state);
 595	return 0;
 596}
 597
 598static int
 599process_sample_cpu_frequency(struct timechart *tchart,
 600			     struct perf_evsel *evsel,
 601			     struct perf_sample *sample,
 602			     const char *backtrace __maybe_unused)
 603{
 604	u32 state = perf_evsel__intval(evsel, sample, "state");
 605	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 606
 607	p_state_change(tchart, cpu_id, sample->time, state);
 608	return 0;
 609}
 610
 611static int
 612process_sample_sched_wakeup(struct timechart *tchart,
 613			    struct perf_evsel *evsel,
 614			    struct perf_sample *sample,
 615			    const char *backtrace)
 616{
 617	u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
 618	int waker = perf_evsel__intval(evsel, sample, "common_pid");
 619	int wakee = perf_evsel__intval(evsel, sample, "pid");
 620
 621	sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
 622	return 0;
 623}
 624
 625static int
 626process_sample_sched_switch(struct timechart *tchart,
 627			    struct perf_evsel *evsel,
 628			    struct perf_sample *sample,
 629			    const char *backtrace)
 630{
 631	int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
 632	int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
 633	u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
 634
 635	sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
 636		     prev_state, backtrace);
 637	return 0;
 638}
 639
 640#ifdef SUPPORT_OLD_POWER_EVENTS
 641static int
 642process_sample_power_start(struct timechart *tchart __maybe_unused,
 643			   struct perf_evsel *evsel,
 644			   struct perf_sample *sample,
 645			   const char *backtrace __maybe_unused)
 646{
 647	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 648	u64 value = perf_evsel__intval(evsel, sample, "value");
 649
 650	c_state_start(cpu_id, sample->time, value);
 651	return 0;
 652}
 653
 654static int
 655process_sample_power_end(struct timechart *tchart,
 656			 struct perf_evsel *evsel __maybe_unused,
 657			 struct perf_sample *sample,
 658			 const char *backtrace __maybe_unused)
 659{
 660	c_state_end(tchart, sample->cpu, sample->time);
 661	return 0;
 662}
 663
 664static int
 665process_sample_power_frequency(struct timechart *tchart,
 666			       struct perf_evsel *evsel,
 667			       struct perf_sample *sample,
 668			       const char *backtrace __maybe_unused)
 669{
 670	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 671	u64 value = perf_evsel__intval(evsel, sample, "value");
 672
 673	p_state_change(tchart, cpu_id, sample->time, value);
 674	return 0;
 675}
 676#endif /* SUPPORT_OLD_POWER_EVENTS */
 677
 678/*
 679 * After the last sample we need to wrap up the current C/P state
 680 * and close out each CPU for these.
 681 */
 682static void end_sample_processing(struct timechart *tchart)
 683{
 684	u64 cpu;
 685	struct power_event *pwr;
 686
 687	for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
 688		/* C state */
 689#if 0
 690		pwr = zalloc(sizeof(*pwr));
 691		if (!pwr)
 692			return;
 693
 694		pwr->state = cpus_cstate_state[cpu];
 695		pwr->start_time = cpus_cstate_start_times[cpu];
 696		pwr->end_time = tchart->last_time;
 697		pwr->cpu = cpu;
 698		pwr->type = CSTATE;
 699		pwr->next = tchart->power_events;
 700
 701		tchart->power_events = pwr;
 702#endif
 703		/* P state */
 704
 705		pwr = zalloc(sizeof(*pwr));
 706		if (!pwr)
 707			return;
 708
 709		pwr->state = cpus_pstate_state[cpu];
 710		pwr->start_time = cpus_pstate_start_times[cpu];
 711		pwr->end_time = tchart->last_time;
 712		pwr->cpu = cpu;
 713		pwr->type = PSTATE;
 714		pwr->next = tchart->power_events;
 715
 716		if (!pwr->start_time)
 717			pwr->start_time = tchart->first_time;
 718		if (!pwr->state)
 719			pwr->state = tchart->min_freq;
 720		tchart->power_events = pwr;
 721	}
 722}
 723
 724static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
 725			       u64 start, int fd)
 726{
 727	struct per_pid *p = find_create_pid(tchart, pid);
 728	struct per_pidcomm *c = p->current;
 729	struct io_sample *sample;
 730	struct io_sample *prev;
 731
 732	if (!c) {
 733		c = zalloc(sizeof(*c));
 734		if (!c)
 735			return -ENOMEM;
 736		p->current = c;
 737		c->next = p->all;
 738		p->all = c;
 739	}
 740
 741	prev = c->io_samples;
 742
 743	if (prev && prev->start_time && !prev->end_time) {
 744		pr_warning("Skip invalid start event: "
 745			   "previous event already started!\n");
 746
 747		/* remove previous event that has been started,
 748		 * we are not sure we will ever get an end for it */
 749		c->io_samples = prev->next;
 750		free(prev);
 751		return 0;
 752	}
 753
 754	sample = zalloc(sizeof(*sample));
 755	if (!sample)
 756		return -ENOMEM;
 757	sample->start_time = start;
 758	sample->type = type;
 759	sample->fd = fd;
 760	sample->next = c->io_samples;
 761	c->io_samples = sample;
 762
 763	if (c->start_time == 0 || c->start_time > start)
 764		c->start_time = start;
 765
 766	return 0;
 767}
 768
 769static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
 770			     u64 end, long ret)
 771{
 772	struct per_pid *p = find_create_pid(tchart, pid);
 773	struct per_pidcomm *c = p->current;
 774	struct io_sample *sample, *prev;
 775
 776	if (!c) {
 777		pr_warning("Invalid pidcomm!\n");
 778		return -1;
 779	}
 780
 781	sample = c->io_samples;
 782
 783	if (!sample) /* skip partially captured events */
 784		return 0;
 785
 786	if (sample->end_time) {
 787		pr_warning("Skip invalid end event: "
 788			   "previous event already ended!\n");
 789		return 0;
 790	}
 791
 792	if (sample->type != type) {
 793		pr_warning("Skip invalid end event: invalid event type!\n");
 794		return 0;
 795	}
 796
 797	sample->end_time = end;
 798	prev = sample->next;
 799
 800	/* we want to be able to see small and fast transfers, so make them
 801	 * at least min_time long, but don't overlap them */
 802	if (sample->end_time - sample->start_time < tchart->min_time)
 803		sample->end_time = sample->start_time + tchart->min_time;
 804	if (prev && sample->start_time < prev->end_time) {
 805		if (prev->err) /* try to make errors more visible */
 806			sample->start_time = prev->end_time;
 807		else
 808			prev->end_time = sample->start_time;
 809	}
 810
 811	if (ret < 0) {
 812		sample->err = ret;
 813	} else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
 814		   type == IOTYPE_TX || type == IOTYPE_RX) {
 815
 816		if ((u64)ret > c->max_bytes)
 817			c->max_bytes = ret;
 818
 819		c->total_bytes += ret;
 820		p->total_bytes += ret;
 821		sample->bytes = ret;
 822	}
 823
 824	/* merge two requests to make svg smaller and render-friendly */
 825	if (prev &&
 826	    prev->type == sample->type &&
 827	    prev->err == sample->err &&
 828	    prev->fd == sample->fd &&
 829	    prev->end_time + tchart->merge_dist >= sample->start_time) {
 830
 831		sample->bytes += prev->bytes;
 832		sample->merges += prev->merges + 1;
 833
 834		sample->start_time = prev->start_time;
 835		sample->next = prev->next;
 836		free(prev);
 837
 838		if (!sample->err && sample->bytes > c->max_bytes)
 839			c->max_bytes = sample->bytes;
 840	}
 841
 842	tchart->io_events++;
 843
 844	return 0;
 845}
 846
 847static int
 848process_enter_read(struct timechart *tchart,
 849		   struct perf_evsel *evsel,
 850		   struct perf_sample *sample)
 851{
 852	long fd = perf_evsel__intval(evsel, sample, "fd");
 853	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
 854				   sample->time, fd);
 855}
 856
 857static int
 858process_exit_read(struct timechart *tchart,
 859		  struct perf_evsel *evsel,
 860		  struct perf_sample *sample)
 861{
 862	long ret = perf_evsel__intval(evsel, sample, "ret");
 863	return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
 864				 sample->time, ret);
 865}
 866
 867static int
 868process_enter_write(struct timechart *tchart,
 869		    struct perf_evsel *evsel,
 870		    struct perf_sample *sample)
 871{
 872	long fd = perf_evsel__intval(evsel, sample, "fd");
 873	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
 874				   sample->time, fd);
 875}
 876
 877static int
 878process_exit_write(struct timechart *tchart,
 879		   struct perf_evsel *evsel,
 880		   struct perf_sample *sample)
 881{
 882	long ret = perf_evsel__intval(evsel, sample, "ret");
 883	return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
 884				 sample->time, ret);
 885}
 886
 887static int
 888process_enter_sync(struct timechart *tchart,
 889		   struct perf_evsel *evsel,
 890		   struct perf_sample *sample)
 891{
 892	long fd = perf_evsel__intval(evsel, sample, "fd");
 893	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
 894				   sample->time, fd);
 895}
 896
 897static int
 898process_exit_sync(struct timechart *tchart,
 899		  struct perf_evsel *evsel,
 900		  struct perf_sample *sample)
 901{
 902	long ret = perf_evsel__intval(evsel, sample, "ret");
 903	return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
 904				 sample->time, ret);
 905}
 906
 907static int
 908process_enter_tx(struct timechart *tchart,
 909		 struct perf_evsel *evsel,
 910		 struct perf_sample *sample)
 911{
 912	long fd = perf_evsel__intval(evsel, sample, "fd");
 913	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
 914				   sample->time, fd);
 915}
 916
 917static int
 918process_exit_tx(struct timechart *tchart,
 919		struct perf_evsel *evsel,
 920		struct perf_sample *sample)
 921{
 922	long ret = perf_evsel__intval(evsel, sample, "ret");
 923	return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
 924				 sample->time, ret);
 925}
 926
 927static int
 928process_enter_rx(struct timechart *tchart,
 929		 struct perf_evsel *evsel,
 930		 struct perf_sample *sample)
 931{
 932	long fd = perf_evsel__intval(evsel, sample, "fd");
 933	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
 934				   sample->time, fd);
 935}
 936
 937static int
 938process_exit_rx(struct timechart *tchart,
 939		struct perf_evsel *evsel,
 940		struct perf_sample *sample)
 941{
 942	long ret = perf_evsel__intval(evsel, sample, "ret");
 943	return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
 944				 sample->time, ret);
 945}
 946
 947static int
 948process_enter_poll(struct timechart *tchart,
 949		   struct perf_evsel *evsel,
 950		   struct perf_sample *sample)
 951{
 952	long fd = perf_evsel__intval(evsel, sample, "fd");
 953	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
 954				   sample->time, fd);
 955}
 956
 957static int
 958process_exit_poll(struct timechart *tchart,
 959		  struct perf_evsel *evsel,
 960		  struct perf_sample *sample)
 961{
 962	long ret = perf_evsel__intval(evsel, sample, "ret");
 963	return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
 964				 sample->time, ret);
 965}
 966
 967/*
 968 * Sort the pid datastructure
 969 */
 970static void sort_pids(struct timechart *tchart)
 971{
 972	struct per_pid *new_list, *p, *cursor, *prev;
 973	/* sort by ppid first, then by pid, lowest to highest */
 974
 975	new_list = NULL;
 976
 977	while (tchart->all_data) {
 978		p = tchart->all_data;
 979		tchart->all_data = p->next;
 980		p->next = NULL;
 981
 982		if (new_list == NULL) {
 983			new_list = p;
 984			p->next = NULL;
 985			continue;
 986		}
 987		prev = NULL;
 988		cursor = new_list;
 989		while (cursor) {
 990			if (cursor->ppid > p->ppid ||
 991				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
 992				/* must insert before */
 993				if (prev) {
 994					p->next = prev->next;
 995					prev->next = p;
 996					cursor = NULL;
 997					continue;
 998				} else {
 999					p->next = new_list;
1000					new_list = p;
1001					cursor = NULL;
1002					continue;
1003				}
1004			}
1005
1006			prev = cursor;
1007			cursor = cursor->next;
1008			if (!cursor)
1009				prev->next = p;
1010		}
1011	}
1012	tchart->all_data = new_list;
1013}
1014
1015
1016static void draw_c_p_states(struct timechart *tchart)
1017{
1018	struct power_event *pwr;
1019	pwr = tchart->power_events;
1020
1021	/*
1022	 * two pass drawing so that the P state bars are on top of the C state blocks
1023	 */
1024	while (pwr) {
1025		if (pwr->type == CSTATE)
1026			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1027		pwr = pwr->next;
1028	}
1029
1030	pwr = tchart->power_events;
1031	while (pwr) {
1032		if (pwr->type == PSTATE) {
1033			if (!pwr->state)
1034				pwr->state = tchart->min_freq;
1035			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1036		}
1037		pwr = pwr->next;
1038	}
1039}
1040
1041static void draw_wakeups(struct timechart *tchart)
1042{
1043	struct wake_event *we;
1044	struct per_pid *p;
1045	struct per_pidcomm *c;
1046
1047	we = tchart->wake_events;
1048	while (we) {
1049		int from = 0, to = 0;
1050		char *task_from = NULL, *task_to = NULL;
1051
1052		/* locate the column of the waker and wakee */
1053		p = tchart->all_data;
1054		while (p) {
1055			if (p->pid == we->waker || p->pid == we->wakee) {
1056				c = p->all;
1057				while (c) {
1058					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
1059						if (p->pid == we->waker && !from) {
1060							from = c->Y;
1061							task_from = strdup(c->comm);
1062						}
1063						if (p->pid == we->wakee && !to) {
1064							to = c->Y;
1065							task_to = strdup(c->comm);
1066						}
1067					}
1068					c = c->next;
1069				}
1070				c = p->all;
1071				while (c) {
1072					if (p->pid == we->waker && !from) {
1073						from = c->Y;
1074						task_from = strdup(c->comm);
1075					}
1076					if (p->pid == we->wakee && !to) {
1077						to = c->Y;
1078						task_to = strdup(c->comm);
1079					}
1080					c = c->next;
1081				}
1082			}
1083			p = p->next;
1084		}
1085
1086		if (!task_from) {
1087			task_from = malloc(40);
1088			sprintf(task_from, "[%i]", we->waker);
1089		}
1090		if (!task_to) {
1091			task_to = malloc(40);
1092			sprintf(task_to, "[%i]", we->wakee);
1093		}
1094
1095		if (we->waker == -1)
1096			svg_interrupt(we->time, to, we->backtrace);
1097		else if (from && to && abs(from - to) == 1)
1098			svg_wakeline(we->time, from, to, we->backtrace);
1099		else
1100			svg_partial_wakeline(we->time, from, task_from, to,
1101					     task_to, we->backtrace);
1102		we = we->next;
1103
1104		free(task_from);
1105		free(task_to);
1106	}
1107}
1108
1109static void draw_cpu_usage(struct timechart *tchart)
1110{
1111	struct per_pid *p;
1112	struct per_pidcomm *c;
1113	struct cpu_sample *sample;
1114	p = tchart->all_data;
1115	while (p) {
1116		c = p->all;
1117		while (c) {
1118			sample = c->samples;
1119			while (sample) {
1120				if (sample->type == TYPE_RUNNING) {
1121					svg_process(sample->cpu,
1122						    sample->start_time,
1123						    sample->end_time,
1124						    p->pid,
1125						    c->comm,
1126						    sample->backtrace);
1127				}
1128
1129				sample = sample->next;
1130			}
1131			c = c->next;
1132		}
1133		p = p->next;
1134	}
1135}
1136
1137static void draw_io_bars(struct timechart *tchart)
1138{
1139	const char *suf;
1140	double bytes;
1141	char comm[256];
1142	struct per_pid *p;
1143	struct per_pidcomm *c;
1144	struct io_sample *sample;
1145	int Y = 1;
1146
1147	p = tchart->all_data;
1148	while (p) {
1149		c = p->all;
1150		while (c) {
1151			if (!c->display) {
1152				c->Y = 0;
1153				c = c->next;
1154				continue;
1155			}
1156
1157			svg_box(Y, c->start_time, c->end_time, "process3");
1158			sample = c->io_samples;
1159			for (sample = c->io_samples; sample; sample = sample->next) {
1160				double h = (double)sample->bytes / c->max_bytes;
1161
1162				if (tchart->skip_eagain &&
1163				    sample->err == -EAGAIN)
1164					continue;
1165
1166				if (sample->err)
1167					h = 1;
1168
1169				if (sample->type == IOTYPE_SYNC)
1170					svg_fbox(Y,
1171						sample->start_time,
1172						sample->end_time,
1173						1,
1174						sample->err ? "error" : "sync",
1175						sample->fd,
1176						sample->err,
1177						sample->merges);
1178				else if (sample->type == IOTYPE_POLL)
1179					svg_fbox(Y,
1180						sample->start_time,
1181						sample->end_time,
1182						1,
1183						sample->err ? "error" : "poll",
1184						sample->fd,
1185						sample->err,
1186						sample->merges);
1187				else if (sample->type == IOTYPE_READ)
1188					svg_ubox(Y,
1189						sample->start_time,
1190						sample->end_time,
1191						h,
1192						sample->err ? "error" : "disk",
1193						sample->fd,
1194						sample->err,
1195						sample->merges);
1196				else if (sample->type == IOTYPE_WRITE)
1197					svg_lbox(Y,
1198						sample->start_time,
1199						sample->end_time,
1200						h,
1201						sample->err ? "error" : "disk",
1202						sample->fd,
1203						sample->err,
1204						sample->merges);
1205				else if (sample->type == IOTYPE_RX)
1206					svg_ubox(Y,
1207						sample->start_time,
1208						sample->end_time,
1209						h,
1210						sample->err ? "error" : "net",
1211						sample->fd,
1212						sample->err,
1213						sample->merges);
1214				else if (sample->type == IOTYPE_TX)
1215					svg_lbox(Y,
1216						sample->start_time,
1217						sample->end_time,
1218						h,
1219						sample->err ? "error" : "net",
1220						sample->fd,
1221						sample->err,
1222						sample->merges);
1223			}
1224
1225			suf = "";
1226			bytes = c->total_bytes;
1227			if (bytes > 1024) {
1228				bytes = bytes / 1024;
1229				suf = "K";
1230			}
1231			if (bytes > 1024) {
1232				bytes = bytes / 1024;
1233				suf = "M";
1234			}
1235			if (bytes > 1024) {
1236				bytes = bytes / 1024;
1237				suf = "G";
1238			}
1239
1240
1241			sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
1242			svg_text(Y, c->start_time, comm);
1243
1244			c->Y = Y;
1245			Y++;
1246			c = c->next;
1247		}
1248		p = p->next;
1249	}
1250}
1251
1252static void draw_process_bars(struct timechart *tchart)
1253{
1254	struct per_pid *p;
1255	struct per_pidcomm *c;
1256	struct cpu_sample *sample;
1257	int Y = 0;
1258
1259	Y = 2 * tchart->numcpus + 2;
1260
1261	p = tchart->all_data;
1262	while (p) {
1263		c = p->all;
1264		while (c) {
1265			if (!c->display) {
1266				c->Y = 0;
1267				c = c->next;
1268				continue;
1269			}
1270
1271			svg_box(Y, c->start_time, c->end_time, "process");
1272			sample = c->samples;
1273			while (sample) {
1274				if (sample->type == TYPE_RUNNING)
1275					svg_running(Y, sample->cpu,
1276						    sample->start_time,
1277						    sample->end_time,
1278						    sample->backtrace);
1279				if (sample->type == TYPE_BLOCKED)
1280					svg_blocked(Y, sample->cpu,
1281						    sample->start_time,
1282						    sample->end_time,
1283						    sample->backtrace);
1284				if (sample->type == TYPE_WAITING)
1285					svg_waiting(Y, sample->cpu,
1286						    sample->start_time,
1287						    sample->end_time,
1288						    sample->backtrace);
1289				sample = sample->next;
1290			}
1291
1292			if (c->comm) {
1293				char comm[256];
1294				if (c->total_time > 5000000000) /* 5 seconds */
1295					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC);
1296				else
1297					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC);
1298
1299				svg_text(Y, c->start_time, comm);
1300			}
1301			c->Y = Y;
1302			Y++;
1303			c = c->next;
1304		}
1305		p = p->next;
1306	}
1307}
1308
1309static void add_process_filter(const char *string)
1310{
1311	int pid = strtoull(string, NULL, 10);
1312	struct process_filter *filt = malloc(sizeof(*filt));
1313
1314	if (!filt)
1315		return;
1316
1317	filt->name = strdup(string);
1318	filt->pid  = pid;
1319	filt->next = process_filter;
1320
1321	process_filter = filt;
1322}
1323
1324static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
1325{
1326	struct process_filter *filt;
1327	if (!process_filter)
1328		return 1;
1329
1330	filt = process_filter;
1331	while (filt) {
1332		if (filt->pid && p->pid == filt->pid)
1333			return 1;
1334		if (strcmp(filt->name, c->comm) == 0)
1335			return 1;
1336		filt = filt->next;
1337	}
1338	return 0;
1339}
1340
1341static int determine_display_tasks_filtered(struct timechart *tchart)
1342{
1343	struct per_pid *p;
1344	struct per_pidcomm *c;
1345	int count = 0;
1346
1347	p = tchart->all_data;
1348	while (p) {
1349		p->display = 0;
1350		if (p->start_time == 1)
1351			p->start_time = tchart->first_time;
1352
1353		/* no exit marker, task kept running to the end */
1354		if (p->end_time == 0)
1355			p->end_time = tchart->last_time;
1356
1357		c = p->all;
1358
1359		while (c) {
1360			c->display = 0;
1361
1362			if (c->start_time == 1)
1363				c->start_time = tchart->first_time;
1364
1365			if (passes_filter(p, c)) {
1366				c->display = 1;
1367				p->display = 1;
1368				count++;
1369			}
1370
1371			if (c->end_time == 0)
1372				c->end_time = tchart->last_time;
1373
1374			c = c->next;
1375		}
1376		p = p->next;
1377	}
1378	return count;
1379}
1380
1381static int determine_display_tasks(struct timechart *tchart, u64 threshold)
1382{
1383	struct per_pid *p;
1384	struct per_pidcomm *c;
1385	int count = 0;
1386
1387	p = tchart->all_data;
1388	while (p) {
1389		p->display = 0;
1390		if (p->start_time == 1)
1391			p->start_time = tchart->first_time;
1392
1393		/* no exit marker, task kept running to the end */
1394		if (p->end_time == 0)
1395			p->end_time = tchart->last_time;
1396		if (p->total_time >= threshold)
1397			p->display = 1;
1398
1399		c = p->all;
1400
1401		while (c) {
1402			c->display = 0;
1403
1404			if (c->start_time == 1)
1405				c->start_time = tchart->first_time;
1406
1407			if (c->total_time >= threshold) {
1408				c->display = 1;
1409				count++;
1410			}
1411
1412			if (c->end_time == 0)
1413				c->end_time = tchart->last_time;
1414
1415			c = c->next;
1416		}
1417		p = p->next;
1418	}
1419	return count;
1420}
1421
1422static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
1423{
1424	struct per_pid *p;
1425	struct per_pidcomm *c;
1426	int count = 0;
1427
1428	p = timechart->all_data;
1429	while (p) {
1430		/* no exit marker, task kept running to the end */
1431		if (p->end_time == 0)
1432			p->end_time = timechart->last_time;
1433
1434		c = p->all;
1435
1436		while (c) {
1437			c->display = 0;
1438
1439			if (c->total_bytes >= threshold) {
1440				c->display = 1;
1441				count++;
1442			}
1443
1444			if (c->end_time == 0)
1445				c->end_time = timechart->last_time;
1446
1447			c = c->next;
1448		}
1449		p = p->next;
1450	}
1451	return count;
1452}
1453
1454#define BYTES_THRESH (1 * 1024 * 1024)
1455#define TIME_THRESH 10000000
1456
1457static void write_svg_file(struct timechart *tchart, const char *filename)
1458{
1459	u64 i;
1460	int count;
1461	int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
1462
1463	if (tchart->power_only)
1464		tchart->proc_num = 0;
1465
1466	/* We'd like to show at least proc_num tasks;
1467	 * be less picky if we have fewer */
1468	do {
1469		if (process_filter)
1470			count = determine_display_tasks_filtered(tchart);
1471		else if (tchart->io_events)
1472			count = determine_display_io_tasks(tchart, thresh);
1473		else
1474			count = determine_display_tasks(tchart, thresh);
1475		thresh /= 10;
1476	} while (!process_filter && thresh && count < tchart->proc_num);
1477
1478	if (!tchart->proc_num)
1479		count = 0;
1480
1481	if (tchart->io_events) {
1482		open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
1483
1484		svg_time_grid(0.5);
1485		svg_io_legenda();
1486
1487		draw_io_bars(tchart);
1488	} else {
1489		open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
1490
1491		svg_time_grid(0);
1492
1493		svg_legenda();
1494
1495		for (i = 0; i < tchart->numcpus; i++)
1496			svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
1497
1498		draw_cpu_usage(tchart);
1499		if (tchart->proc_num)
1500			draw_process_bars(tchart);
1501		if (!tchart->tasks_only)
1502			draw_c_p_states(tchart);
1503		if (tchart->proc_num)
1504			draw_wakeups(tchart);
1505	}
1506
1507	svg_close();
1508}
1509
1510static int process_header(struct perf_file_section *section __maybe_unused,
1511			  struct perf_header *ph,
1512			  int feat,
1513			  int fd __maybe_unused,
1514			  void *data)
1515{
1516	struct timechart *tchart = data;
1517
1518	switch (feat) {
1519	case HEADER_NRCPUS:
1520		tchart->numcpus = ph->env.nr_cpus_avail;
1521		break;
1522
1523	case HEADER_CPU_TOPOLOGY:
1524		if (!tchart->topology)
1525			break;
1526
1527		if (svg_build_topology_map(ph->env.sibling_cores,
1528					   ph->env.nr_sibling_cores,
1529					   ph->env.sibling_threads,
1530					   ph->env.nr_sibling_threads))
1531			fprintf(stderr, "problem building topology\n");
1532		break;
1533
1534	default:
1535		break;
1536	}
1537
1538	return 0;
1539}
1540
1541static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1542{
1543	const struct perf_evsel_str_handler power_tracepoints[] = {
1544		{ "power:cpu_idle",		process_sample_cpu_idle },
1545		{ "power:cpu_frequency",	process_sample_cpu_frequency },
1546		{ "sched:sched_wakeup",		process_sample_sched_wakeup },
1547		{ "sched:sched_switch",		process_sample_sched_switch },
1548#ifdef SUPPORT_OLD_POWER_EVENTS
1549		{ "power:power_start",		process_sample_power_start },
1550		{ "power:power_end",		process_sample_power_end },
1551		{ "power:power_frequency",	process_sample_power_frequency },
1552#endif
1553
1554		{ "syscalls:sys_enter_read",		process_enter_read },
1555		{ "syscalls:sys_enter_pread64",		process_enter_read },
1556		{ "syscalls:sys_enter_readv",		process_enter_read },
1557		{ "syscalls:sys_enter_preadv",		process_enter_read },
1558		{ "syscalls:sys_enter_write",		process_enter_write },
1559		{ "syscalls:sys_enter_pwrite64",	process_enter_write },
1560		{ "syscalls:sys_enter_writev",		process_enter_write },
1561		{ "syscalls:sys_enter_pwritev",		process_enter_write },
1562		{ "syscalls:sys_enter_sync",		process_enter_sync },
1563		{ "syscalls:sys_enter_sync_file_range",	process_enter_sync },
1564		{ "syscalls:sys_enter_fsync",		process_enter_sync },
1565		{ "syscalls:sys_enter_msync",		process_enter_sync },
1566		{ "syscalls:sys_enter_recvfrom",	process_enter_rx },
1567		{ "syscalls:sys_enter_recvmmsg",	process_enter_rx },
1568		{ "syscalls:sys_enter_recvmsg",		process_enter_rx },
1569		{ "syscalls:sys_enter_sendto",		process_enter_tx },
1570		{ "syscalls:sys_enter_sendmsg",		process_enter_tx },
1571		{ "syscalls:sys_enter_sendmmsg",	process_enter_tx },
1572		{ "syscalls:sys_enter_epoll_pwait",	process_enter_poll },
1573		{ "syscalls:sys_enter_epoll_wait",	process_enter_poll },
1574		{ "syscalls:sys_enter_poll",		process_enter_poll },
1575		{ "syscalls:sys_enter_ppoll",		process_enter_poll },
1576		{ "syscalls:sys_enter_pselect6",	process_enter_poll },
1577		{ "syscalls:sys_enter_select",		process_enter_poll },
1578
1579		{ "syscalls:sys_exit_read",		process_exit_read },
1580		{ "syscalls:sys_exit_pread64",		process_exit_read },
1581		{ "syscalls:sys_exit_readv",		process_exit_read },
1582		{ "syscalls:sys_exit_preadv",		process_exit_read },
1583		{ "syscalls:sys_exit_write",		process_exit_write },
1584		{ "syscalls:sys_exit_pwrite64",		process_exit_write },
1585		{ "syscalls:sys_exit_writev",		process_exit_write },
1586		{ "syscalls:sys_exit_pwritev",		process_exit_write },
1587		{ "syscalls:sys_exit_sync",		process_exit_sync },
1588		{ "syscalls:sys_exit_sync_file_range",	process_exit_sync },
1589		{ "syscalls:sys_exit_fsync",		process_exit_sync },
1590		{ "syscalls:sys_exit_msync",		process_exit_sync },
1591		{ "syscalls:sys_exit_recvfrom",		process_exit_rx },
1592		{ "syscalls:sys_exit_recvmmsg",		process_exit_rx },
1593		{ "syscalls:sys_exit_recvmsg",		process_exit_rx },
1594		{ "syscalls:sys_exit_sendto",		process_exit_tx },
1595		{ "syscalls:sys_exit_sendmsg",		process_exit_tx },
1596		{ "syscalls:sys_exit_sendmmsg",		process_exit_tx },
1597		{ "syscalls:sys_exit_epoll_pwait",	process_exit_poll },
1598		{ "syscalls:sys_exit_epoll_wait",	process_exit_poll },
1599		{ "syscalls:sys_exit_poll",		process_exit_poll },
1600		{ "syscalls:sys_exit_ppoll",		process_exit_poll },
1601		{ "syscalls:sys_exit_pselect6",		process_exit_poll },
1602		{ "syscalls:sys_exit_select",		process_exit_poll },
1603	};
1604	struct perf_data data = {
1605		.file      = {
1606			.path = input_name,
1607		},
1608		.mode      = PERF_DATA_MODE_READ,
1609		.force     = tchart->force,
1610	};
1611
1612	struct perf_session *session = perf_session__new(&data, false,
1613							 &tchart->tool);
1614	int ret = -EINVAL;
1615
1616	if (session == NULL)
1617		return -1;
1618
1619	symbol__init(&session->header.env);
1620
1621	(void)perf_header__process_sections(&session->header,
1622					    perf_data__fd(session->data),
1623					    tchart,
1624					    process_header);
1625
1626	if (!perf_session__has_traces(session, "timechart record"))
1627		goto out_delete;
1628
1629	if (perf_session__set_tracepoints_handlers(session,
1630						   power_tracepoints)) {
1631		pr_err("Initializing session tracepoint handlers failed\n");
1632		goto out_delete;
1633	}
1634
1635	ret = perf_session__process_events(session);
1636	if (ret)
1637		goto out_delete;
1638
1639	end_sample_processing(tchart);
1640
1641	sort_pids(tchart);
1642
1643	write_svg_file(tchart, output_name);
1644
1645	pr_info("Written %2.1f seconds of trace to %s.\n",
1646		(tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name);
1647out_delete:
1648	perf_session__delete(session);
1649	return ret;
1650}
1651
1652static int timechart__io_record(int argc, const char **argv)
1653{
1654	unsigned int rec_argc, i;
1655	const char **rec_argv;
1656	const char **p;
1657	char *filter = NULL;
1658
1659	const char * const common_args[] = {
1660		"record", "-a", "-R", "-c", "1",
1661	};
1662	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1663
1664	const char * const disk_events[] = {
1665		"syscalls:sys_enter_read",
1666		"syscalls:sys_enter_pread64",
1667		"syscalls:sys_enter_readv",
1668		"syscalls:sys_enter_preadv",
1669		"syscalls:sys_enter_write",
1670		"syscalls:sys_enter_pwrite64",
1671		"syscalls:sys_enter_writev",
1672		"syscalls:sys_enter_pwritev",
1673		"syscalls:sys_enter_sync",
1674		"syscalls:sys_enter_sync_file_range",
1675		"syscalls:sys_enter_fsync",
1676		"syscalls:sys_enter_msync",
1677
1678		"syscalls:sys_exit_read",
1679		"syscalls:sys_exit_pread64",
1680		"syscalls:sys_exit_readv",
1681		"syscalls:sys_exit_preadv",
1682		"syscalls:sys_exit_write",
1683		"syscalls:sys_exit_pwrite64",
1684		"syscalls:sys_exit_writev",
1685		"syscalls:sys_exit_pwritev",
1686		"syscalls:sys_exit_sync",
1687		"syscalls:sys_exit_sync_file_range",
1688		"syscalls:sys_exit_fsync",
1689		"syscalls:sys_exit_msync",
1690	};
1691	unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
1692
1693	const char * const net_events[] = {
1694		"syscalls:sys_enter_recvfrom",
1695		"syscalls:sys_enter_recvmmsg",
1696		"syscalls:sys_enter_recvmsg",
1697		"syscalls:sys_enter_sendto",
1698		"syscalls:sys_enter_sendmsg",
1699		"syscalls:sys_enter_sendmmsg",
1700
1701		"syscalls:sys_exit_recvfrom",
1702		"syscalls:sys_exit_recvmmsg",
1703		"syscalls:sys_exit_recvmsg",
1704		"syscalls:sys_exit_sendto",
1705		"syscalls:sys_exit_sendmsg",
1706		"syscalls:sys_exit_sendmmsg",
1707	};
1708	unsigned int net_events_nr = ARRAY_SIZE(net_events);
1709
1710	const char * const poll_events[] = {
1711		"syscalls:sys_enter_epoll_pwait",
1712		"syscalls:sys_enter_epoll_wait",
1713		"syscalls:sys_enter_poll",
1714		"syscalls:sys_enter_ppoll",
1715		"syscalls:sys_enter_pselect6",
1716		"syscalls:sys_enter_select",
1717
1718		"syscalls:sys_exit_epoll_pwait",
1719		"syscalls:sys_exit_epoll_wait",
1720		"syscalls:sys_exit_poll",
1721		"syscalls:sys_exit_ppoll",
1722		"syscalls:sys_exit_pselect6",
1723		"syscalls:sys_exit_select",
1724	};
1725	unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
1726
1727	rec_argc = common_args_nr +
1728		disk_events_nr * 4 +
1729		net_events_nr * 4 +
1730		poll_events_nr * 4 +
1731		argc;
1732	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1733
1734	if (rec_argv == NULL)
1735		return -ENOMEM;
1736
1737	if (asprintf(&filter, "common_pid != %d", getpid()) < 0) {
1738		free(rec_argv);
1739		return -ENOMEM;
1740	}
1741
1742	p = rec_argv;
1743	for (i = 0; i < common_args_nr; i++)
1744		*p++ = strdup(common_args[i]);
1745
1746	for (i = 0; i < disk_events_nr; i++) {
1747		if (!is_valid_tracepoint(disk_events[i])) {
1748			rec_argc -= 4;
1749			continue;
1750		}
1751
1752		*p++ = "-e";
1753		*p++ = strdup(disk_events[i]);
1754		*p++ = "--filter";
1755		*p++ = filter;
1756	}
1757	for (i = 0; i < net_events_nr; i++) {
1758		if (!is_valid_tracepoint(net_events[i])) {
1759			rec_argc -= 4;
1760			continue;
1761		}
1762
1763		*p++ = "-e";
1764		*p++ = strdup(net_events[i]);
1765		*p++ = "--filter";
1766		*p++ = filter;
1767	}
1768	for (i = 0; i < poll_events_nr; i++) {
1769		if (!is_valid_tracepoint(poll_events[i])) {
1770			rec_argc -= 4;
1771			continue;
1772		}
1773
1774		*p++ = "-e";
1775		*p++ = strdup(poll_events[i]);
1776		*p++ = "--filter";
1777		*p++ = filter;
1778	}
1779
1780	for (i = 0; i < (unsigned int)argc; i++)
1781		*p++ = argv[i];
1782
1783	return cmd_record(rec_argc, rec_argv);
1784}
1785
1786
1787static int timechart__record(struct timechart *tchart, int argc, const char **argv)
1788{
1789	unsigned int rec_argc, i, j;
1790	const char **rec_argv;
1791	const char **p;
1792	unsigned int record_elems;
1793
1794	const char * const common_args[] = {
1795		"record", "-a", "-R", "-c", "1",
1796	};
1797	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1798
1799	const char * const backtrace_args[] = {
1800		"-g",
1801	};
1802	unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
1803
1804	const char * const power_args[] = {
1805		"-e", "power:cpu_frequency",
1806		"-e", "power:cpu_idle",
1807	};
1808	unsigned int power_args_nr = ARRAY_SIZE(power_args);
1809
1810	const char * const old_power_args[] = {
1811#ifdef SUPPORT_OLD_POWER_EVENTS
1812		"-e", "power:power_start",
1813		"-e", "power:power_end",
1814		"-e", "power:power_frequency",
1815#endif
1816	};
1817	unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
1818
1819	const char * const tasks_args[] = {
1820		"-e", "sched:sched_wakeup",
1821		"-e", "sched:sched_switch",
1822	};
1823	unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
1824
1825#ifdef SUPPORT_OLD_POWER_EVENTS
1826	if (!is_valid_tracepoint("power:cpu_idle") &&
1827	    is_valid_tracepoint("power:power_start")) {
1828		use_old_power_events = 1;
1829		power_args_nr = 0;
1830	} else {
1831		old_power_args_nr = 0;
1832	}
1833#endif
1834
1835	if (tchart->power_only)
1836		tasks_args_nr = 0;
1837
1838	if (tchart->tasks_only) {
1839		power_args_nr = 0;
1840		old_power_args_nr = 0;
1841	}
1842
1843	if (!tchart->with_backtrace)
1844		backtrace_args_no = 0;
1845
1846	record_elems = common_args_nr + tasks_args_nr +
1847		power_args_nr + old_power_args_nr + backtrace_args_no;
1848
1849	rec_argc = record_elems + argc;
1850	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1851
1852	if (rec_argv == NULL)
1853		return -ENOMEM;
1854
1855	p = rec_argv;
1856	for (i = 0; i < common_args_nr; i++)
1857		*p++ = strdup(common_args[i]);
1858
1859	for (i = 0; i < backtrace_args_no; i++)
1860		*p++ = strdup(backtrace_args[i]);
1861
1862	for (i = 0; i < tasks_args_nr; i++)
1863		*p++ = strdup(tasks_args[i]);
1864
1865	for (i = 0; i < power_args_nr; i++)
1866		*p++ = strdup(power_args[i]);
1867
1868	for (i = 0; i < old_power_args_nr; i++)
1869		*p++ = strdup(old_power_args[i]);
1870
1871	for (j = 0; j < (unsigned int)argc; j++)
1872		*p++ = argv[j];
1873
1874	return cmd_record(rec_argc, rec_argv);
1875}
1876
1877static int
1878parse_process(const struct option *opt __maybe_unused, const char *arg,
1879	      int __maybe_unused unset)
1880{
1881	if (arg)
1882		add_process_filter(arg);
1883	return 0;
1884}
1885
1886static int
1887parse_highlight(const struct option *opt __maybe_unused, const char *arg,
1888		int __maybe_unused unset)
1889{
1890	unsigned long duration = strtoul(arg, NULL, 0);
1891
1892	if (svg_highlight || svg_highlight_name)
1893		return -1;
1894
1895	if (duration)
1896		svg_highlight = duration;
1897	else
1898		svg_highlight_name = strdup(arg);
1899
1900	return 0;
1901}
1902
1903static int
1904parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
1905{
1906	char unit = 'n';
1907	u64 *value = opt->value;
1908
1909	if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
1910		switch (unit) {
1911		case 'm':
1912			*value *= NSEC_PER_MSEC;
1913			break;
1914		case 'u':
1915			*value *= NSEC_PER_USEC;
1916			break;
1917		case 'n':
1918			break;
1919		default:
1920			return -1;
1921		}
1922	}
1923
1924	return 0;
1925}
1926
1927int cmd_timechart(int argc, const char **argv)
1928{
1929	struct timechart tchart = {
1930		.tool = {
1931			.comm		 = process_comm_event,
1932			.fork		 = process_fork_event,
1933			.exit		 = process_exit_event,
1934			.sample		 = process_sample_event,
1935			.ordered_events	 = true,
1936		},
1937		.proc_num = 15,
1938		.min_time = NSEC_PER_MSEC,
1939		.merge_dist = 1000,
1940	};
1941	const char *output_name = "output.svg";
1942	const struct option timechart_common_options[] = {
1943	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1944	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, "output processes data only"),
1945	OPT_END()
1946	};
1947	const struct option timechart_options[] = {
1948	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1949	OPT_STRING('o', "output", &output_name, "file", "output file name"),
1950	OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1951	OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
1952		      "highlight tasks. Pass duration in ns or process name.",
1953		       parse_highlight),
1954	OPT_CALLBACK('p', "process", NULL, "process",
1955		      "process selector. Pass a pid or process name.",
1956		       parse_process),
1957	OPT_CALLBACK(0, "symfs", NULL, "directory",
1958		     "Look for files with symbols relative to this directory",
1959		     symbol__config_symfs),
1960	OPT_INTEGER('n', "proc-num", &tchart.proc_num,
1961		    "min. number of tasks to print"),
1962	OPT_BOOLEAN('t', "topology", &tchart.topology,
1963		    "sort CPUs according to topology"),
1964	OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
1965		    "skip EAGAIN errors"),
1966	OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
1967		     "all IO faster than min-time will visually appear longer",
1968		     parse_time),
1969	OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
1970		     "merge events that are merge-dist us apart",
1971		     parse_time),
1972	OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
1973	OPT_PARENT(timechart_common_options),
1974	};
1975	const char * const timechart_subcommands[] = { "record", NULL };
1976	const char *timechart_usage[] = {
1977		"perf timechart [<options>] {record}",
1978		NULL
1979	};
1980	const struct option timechart_record_options[] = {
1981	OPT_BOOLEAN('I', "io-only", &tchart.io_only,
1982		    "record only IO data"),
1983	OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
1984	OPT_PARENT(timechart_common_options),
1985	};
1986	const char * const timechart_record_usage[] = {
1987		"perf timechart record [<options>]",
1988		NULL
1989	};
1990	argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
1991			timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
1992
1993	if (tchart.power_only && tchart.tasks_only) {
1994		pr_err("-P and -T options cannot be used at the same time.\n");
1995		return -1;
1996	}
1997
1998	if (argc && !strncmp(argv[0], "rec", 3)) {
1999		argc = parse_options(argc, argv, timechart_record_options,
2000				     timechart_record_usage,
2001				     PARSE_OPT_STOP_AT_NON_OPTION);
2002
2003		if (tchart.power_only && tchart.tasks_only) {
2004			pr_err("-P and -T options cannot be used at the same time.\n");
2005			return -1;
2006		}
2007
2008		if (tchart.io_only)
2009			return timechart__io_record(argc, argv);
2010		else
2011			return timechart__record(&tchart, argc, argv);
2012	} else if (argc)
2013		usage_with_options(timechart_usage, timechart_options);
2014
2015	setup_pager();
2016
2017	return __cmd_timechart(&tchart, output_name);
2018}