Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * builtin-timechart.c - make an svg timechart of system activity
   3 *
   4 * (C) Copyright 2009 Intel Corporation
   5 *
   6 * Authors:
   7 *     Arjan van de Ven <arjan@linux.intel.com>
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; version 2
  12 * of the License.
  13 */
  14
  15#include <traceevent/event-parse.h>
  16
  17#include "builtin.h"
  18
  19#include "util/util.h"
  20
  21#include "util/color.h"
  22#include <linux/list.h>
  23#include "util/cache.h"
  24#include "util/evlist.h"
  25#include "util/evsel.h"
  26#include <linux/rbtree.h>
  27#include "util/symbol.h"
  28#include "util/callchain.h"
  29#include "util/strlist.h"
  30
  31#include "perf.h"
  32#include "util/header.h"
  33#include "util/parse-options.h"
  34#include "util/parse-events.h"
  35#include "util/event.h"
  36#include "util/session.h"
  37#include "util/svghelper.h"
  38#include "util/tool.h"
  39#include "util/data.h"
 
  40
  41#define SUPPORT_OLD_POWER_EVENTS 1
  42#define PWR_EVENT_EXIT -1
  43
  44struct per_pid;
  45struct power_event;
  46struct wake_event;
  47
  48struct timechart {
  49	struct perf_tool	tool;
  50	struct per_pid		*all_data;
  51	struct power_event	*power_events;
  52	struct wake_event	*wake_events;
  53	int			proc_num;
  54	unsigned int		numcpus;
  55	u64			min_freq,	/* Lowest CPU frequency seen */
  56				max_freq,	/* Highest CPU frequency seen */
  57				turbo_frequency,
  58				first_time, last_time;
  59	bool			power_only,
  60				tasks_only,
  61				with_backtrace,
  62				topology;
 
 
 
 
 
 
 
  63};
  64
  65struct per_pidcomm;
  66struct cpu_sample;
 
  67
  68/*
  69 * Datastructure layout:
  70 * We keep an list of "pid"s, matching the kernels notion of a task struct.
  71 * Each "pid" entry, has a list of "comm"s.
  72 *	this is because we want to track different programs different, while
  73 *	exec will reuse the original pid (by design).
  74 * Each comm has a list of samples that will be used to draw
  75 * final graph.
  76 */
  77
  78struct per_pid {
  79	struct per_pid *next;
  80
  81	int		pid;
  82	int		ppid;
  83
  84	u64		start_time;
  85	u64		end_time;
  86	u64		total_time;
 
  87	int		display;
  88
  89	struct per_pidcomm *all;
  90	struct per_pidcomm *current;
  91};
  92
  93
  94struct per_pidcomm {
  95	struct per_pidcomm *next;
  96
  97	u64		start_time;
  98	u64		end_time;
  99	u64		total_time;
 
 
 100
 101	int		Y;
 102	int		display;
 103
 104	long		state;
 105	u64		state_since;
 106
 107	char		*comm;
 108
 109	struct cpu_sample *samples;
 
 110};
 111
 112struct sample_wrapper {
 113	struct sample_wrapper *next;
 114
 115	u64		timestamp;
 116	unsigned char	data[0];
 117};
 118
 119#define TYPE_NONE	0
 120#define TYPE_RUNNING	1
 121#define TYPE_WAITING	2
 122#define TYPE_BLOCKED	3
 123
 124struct cpu_sample {
 125	struct cpu_sample *next;
 126
 127	u64 start_time;
 128	u64 end_time;
 129	int type;
 130	int cpu;
 131	const char *backtrace;
 132};
 133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 134#define CSTATE 1
 135#define PSTATE 2
 136
 137struct power_event {
 138	struct power_event *next;
 139	int type;
 140	int state;
 141	u64 start_time;
 142	u64 end_time;
 143	int cpu;
 144};
 145
 146struct wake_event {
 147	struct wake_event *next;
 148	int waker;
 149	int wakee;
 150	u64 time;
 151	const char *backtrace;
 152};
 153
 154struct process_filter {
 155	char			*name;
 156	int			pid;
 157	struct process_filter	*next;
 158};
 159
 160static struct process_filter *process_filter;
 161
 162
 163static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
 164{
 165	struct per_pid *cursor = tchart->all_data;
 166
 167	while (cursor) {
 168		if (cursor->pid == pid)
 169			return cursor;
 170		cursor = cursor->next;
 171	}
 172	cursor = zalloc(sizeof(*cursor));
 173	assert(cursor != NULL);
 174	cursor->pid = pid;
 175	cursor->next = tchart->all_data;
 176	tchart->all_data = cursor;
 177	return cursor;
 178}
 179
 180static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
 181{
 182	struct per_pid *p;
 183	struct per_pidcomm *c;
 184	p = find_create_pid(tchart, pid);
 185	c = p->all;
 186	while (c) {
 187		if (c->comm && strcmp(c->comm, comm) == 0) {
 188			p->current = c;
 189			return;
 190		}
 191		if (!c->comm) {
 192			c->comm = strdup(comm);
 193			p->current = c;
 194			return;
 195		}
 196		c = c->next;
 197	}
 198	c = zalloc(sizeof(*c));
 199	assert(c != NULL);
 200	c->comm = strdup(comm);
 201	p->current = c;
 202	c->next = p->all;
 203	p->all = c;
 204}
 205
 206static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
 207{
 208	struct per_pid *p, *pp;
 209	p = find_create_pid(tchart, pid);
 210	pp = find_create_pid(tchart, ppid);
 211	p->ppid = ppid;
 212	if (pp->current && pp->current->comm && !p->current)
 213		pid_set_comm(tchart, pid, pp->current->comm);
 214
 215	p->start_time = timestamp;
 216	if (p->current) {
 217		p->current->start_time = timestamp;
 218		p->current->state_since = timestamp;
 219	}
 220}
 221
 222static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
 223{
 224	struct per_pid *p;
 225	p = find_create_pid(tchart, pid);
 226	p->end_time = timestamp;
 227	if (p->current)
 228		p->current->end_time = timestamp;
 229}
 230
 231static void pid_put_sample(struct timechart *tchart, int pid, int type,
 232			   unsigned int cpu, u64 start, u64 end,
 233			   const char *backtrace)
 234{
 235	struct per_pid *p;
 236	struct per_pidcomm *c;
 237	struct cpu_sample *sample;
 238
 239	p = find_create_pid(tchart, pid);
 240	c = p->current;
 241	if (!c) {
 242		c = zalloc(sizeof(*c));
 243		assert(c != NULL);
 244		p->current = c;
 245		c->next = p->all;
 246		p->all = c;
 247	}
 248
 249	sample = zalloc(sizeof(*sample));
 250	assert(sample != NULL);
 251	sample->start_time = start;
 252	sample->end_time = end;
 253	sample->type = type;
 254	sample->next = c->samples;
 255	sample->cpu = cpu;
 256	sample->backtrace = backtrace;
 257	c->samples = sample;
 258
 259	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
 260		c->total_time += (end-start);
 261		p->total_time += (end-start);
 262	}
 263
 264	if (c->start_time == 0 || c->start_time > start)
 265		c->start_time = start;
 266	if (p->start_time == 0 || p->start_time > start)
 267		p->start_time = start;
 268}
 269
 270#define MAX_CPUS 4096
 271
 272static u64 cpus_cstate_start_times[MAX_CPUS];
 273static int cpus_cstate_state[MAX_CPUS];
 274static u64 cpus_pstate_start_times[MAX_CPUS];
 275static u64 cpus_pstate_state[MAX_CPUS];
 276
 277static int process_comm_event(struct perf_tool *tool,
 278			      union perf_event *event,
 279			      struct perf_sample *sample __maybe_unused,
 280			      struct machine *machine __maybe_unused)
 281{
 282	struct timechart *tchart = container_of(tool, struct timechart, tool);
 283	pid_set_comm(tchart, event->comm.tid, event->comm.comm);
 284	return 0;
 285}
 286
 287static int process_fork_event(struct perf_tool *tool,
 288			      union perf_event *event,
 289			      struct perf_sample *sample __maybe_unused,
 290			      struct machine *machine __maybe_unused)
 291{
 292	struct timechart *tchart = container_of(tool, struct timechart, tool);
 293	pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
 294	return 0;
 295}
 296
 297static int process_exit_event(struct perf_tool *tool,
 298			      union perf_event *event,
 299			      struct perf_sample *sample __maybe_unused,
 300			      struct machine *machine __maybe_unused)
 301{
 302	struct timechart *tchart = container_of(tool, struct timechart, tool);
 303	pid_exit(tchart, event->fork.pid, event->fork.time);
 304	return 0;
 305}
 306
 307#ifdef SUPPORT_OLD_POWER_EVENTS
 308static int use_old_power_events;
 309#endif
 310
 311static void c_state_start(int cpu, u64 timestamp, int state)
 312{
 313	cpus_cstate_start_times[cpu] = timestamp;
 314	cpus_cstate_state[cpu] = state;
 315}
 316
 317static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
 318{
 319	struct power_event *pwr = zalloc(sizeof(*pwr));
 320
 321	if (!pwr)
 322		return;
 323
 324	pwr->state = cpus_cstate_state[cpu];
 325	pwr->start_time = cpus_cstate_start_times[cpu];
 326	pwr->end_time = timestamp;
 327	pwr->cpu = cpu;
 328	pwr->type = CSTATE;
 329	pwr->next = tchart->power_events;
 330
 331	tchart->power_events = pwr;
 332}
 333
 334static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
 335{
 336	struct power_event *pwr;
 337
 338	if (new_freq > 8000000) /* detect invalid data */
 339		return;
 340
 341	pwr = zalloc(sizeof(*pwr));
 342	if (!pwr)
 343		return;
 344
 345	pwr->state = cpus_pstate_state[cpu];
 346	pwr->start_time = cpus_pstate_start_times[cpu];
 347	pwr->end_time = timestamp;
 348	pwr->cpu = cpu;
 349	pwr->type = PSTATE;
 350	pwr->next = tchart->power_events;
 351
 352	if (!pwr->start_time)
 353		pwr->start_time = tchart->first_time;
 354
 355	tchart->power_events = pwr;
 356
 357	cpus_pstate_state[cpu] = new_freq;
 358	cpus_pstate_start_times[cpu] = timestamp;
 359
 360	if ((u64)new_freq > tchart->max_freq)
 361		tchart->max_freq = new_freq;
 362
 363	if (new_freq < tchart->min_freq || tchart->min_freq == 0)
 364		tchart->min_freq = new_freq;
 365
 366	if (new_freq == tchart->max_freq - 1000)
 367		tchart->turbo_frequency = tchart->max_freq;
 368}
 369
 370static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
 371			 int waker, int wakee, u8 flags, const char *backtrace)
 372{
 373	struct per_pid *p;
 374	struct wake_event *we = zalloc(sizeof(*we));
 375
 376	if (!we)
 377		return;
 378
 379	we->time = timestamp;
 380	we->waker = waker;
 381	we->backtrace = backtrace;
 382
 383	if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
 384		we->waker = -1;
 385
 386	we->wakee = wakee;
 387	we->next = tchart->wake_events;
 388	tchart->wake_events = we;
 389	p = find_create_pid(tchart, we->wakee);
 390
 391	if (p && p->current && p->current->state == TYPE_NONE) {
 392		p->current->state_since = timestamp;
 393		p->current->state = TYPE_WAITING;
 394	}
 395	if (p && p->current && p->current->state == TYPE_BLOCKED) {
 396		pid_put_sample(tchart, p->pid, p->current->state, cpu,
 397			       p->current->state_since, timestamp, NULL);
 398		p->current->state_since = timestamp;
 399		p->current->state = TYPE_WAITING;
 400	}
 401}
 402
 403static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
 404			 int prev_pid, int next_pid, u64 prev_state,
 405			 const char *backtrace)
 406{
 407	struct per_pid *p = NULL, *prev_p;
 408
 409	prev_p = find_create_pid(tchart, prev_pid);
 410
 411	p = find_create_pid(tchart, next_pid);
 412
 413	if (prev_p->current && prev_p->current->state != TYPE_NONE)
 414		pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
 415			       prev_p->current->state_since, timestamp,
 416			       backtrace);
 417	if (p && p->current) {
 418		if (p->current->state != TYPE_NONE)
 419			pid_put_sample(tchart, next_pid, p->current->state, cpu,
 420				       p->current->state_since, timestamp,
 421				       backtrace);
 422
 423		p->current->state_since = timestamp;
 424		p->current->state = TYPE_RUNNING;
 425	}
 426
 427	if (prev_p->current) {
 428		prev_p->current->state = TYPE_NONE;
 429		prev_p->current->state_since = timestamp;
 430		if (prev_state & 2)
 431			prev_p->current->state = TYPE_BLOCKED;
 432		if (prev_state == 0)
 433			prev_p->current->state = TYPE_WAITING;
 434	}
 435}
 436
 437static const char *cat_backtrace(union perf_event *event,
 438				 struct perf_sample *sample,
 439				 struct machine *machine)
 440{
 441	struct addr_location al;
 442	unsigned int i;
 443	char *p = NULL;
 444	size_t p_len;
 445	u8 cpumode = PERF_RECORD_MISC_USER;
 446	struct addr_location tal;
 447	struct ip_callchain *chain = sample->callchain;
 448	FILE *f = open_memstream(&p, &p_len);
 449
 450	if (!f) {
 451		perror("open_memstream error");
 452		return NULL;
 453	}
 454
 455	if (!chain)
 456		goto exit;
 457
 458	if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
 459		fprintf(stderr, "problem processing %d event, skipping it.\n",
 460			event->header.type);
 461		goto exit;
 462	}
 463
 464	for (i = 0; i < chain->nr; i++) {
 465		u64 ip;
 466
 467		if (callchain_param.order == ORDER_CALLEE)
 468			ip = chain->ips[i];
 469		else
 470			ip = chain->ips[chain->nr - i - 1];
 471
 472		if (ip >= PERF_CONTEXT_MAX) {
 473			switch (ip) {
 474			case PERF_CONTEXT_HV:
 475				cpumode = PERF_RECORD_MISC_HYPERVISOR;
 476				break;
 477			case PERF_CONTEXT_KERNEL:
 478				cpumode = PERF_RECORD_MISC_KERNEL;
 479				break;
 480			case PERF_CONTEXT_USER:
 481				cpumode = PERF_RECORD_MISC_USER;
 482				break;
 483			default:
 484				pr_debug("invalid callchain context: "
 485					 "%"PRId64"\n", (s64) ip);
 486
 487				/*
 488				 * It seems the callchain is corrupted.
 489				 * Discard all.
 490				 */
 491				zfree(&p);
 492				goto exit;
 493			}
 494			continue;
 495		}
 496
 497		tal.filtered = 0;
 498		thread__find_addr_location(al.thread, machine, cpumode,
 499					   MAP__FUNCTION, ip, &tal);
 500
 501		if (tal.sym)
 502			fprintf(f, "..... %016" PRIx64 " %s\n", ip,
 503				tal.sym->name);
 504		else
 505			fprintf(f, "..... %016" PRIx64 "\n", ip);
 506	}
 507
 
 508exit:
 509	fclose(f);
 510
 511	return p;
 512}
 513
 514typedef int (*tracepoint_handler)(struct timechart *tchart,
 515				  struct perf_evsel *evsel,
 516				  struct perf_sample *sample,
 517				  const char *backtrace);
 518
 519static int process_sample_event(struct perf_tool *tool,
 520				union perf_event *event,
 521				struct perf_sample *sample,
 522				struct perf_evsel *evsel,
 523				struct machine *machine)
 524{
 525	struct timechart *tchart = container_of(tool, struct timechart, tool);
 526
 527	if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
 528		if (!tchart->first_time || tchart->first_time > sample->time)
 529			tchart->first_time = sample->time;
 530		if (tchart->last_time < sample->time)
 531			tchart->last_time = sample->time;
 532	}
 533
 534	if (evsel->handler != NULL) {
 535		tracepoint_handler f = evsel->handler;
 536		return f(tchart, evsel, sample,
 537			 cat_backtrace(event, sample, machine));
 538	}
 539
 540	return 0;
 541}
 542
 543static int
 544process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
 545			struct perf_evsel *evsel,
 546			struct perf_sample *sample,
 547			const char *backtrace __maybe_unused)
 548{
 549	u32 state = perf_evsel__intval(evsel, sample, "state");
 550	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 551
 552	if (state == (u32)PWR_EVENT_EXIT)
 553		c_state_end(tchart, cpu_id, sample->time);
 554	else
 555		c_state_start(cpu_id, sample->time, state);
 556	return 0;
 557}
 558
 559static int
 560process_sample_cpu_frequency(struct timechart *tchart,
 561			     struct perf_evsel *evsel,
 562			     struct perf_sample *sample,
 563			     const char *backtrace __maybe_unused)
 564{
 565	u32 state = perf_evsel__intval(evsel, sample, "state");
 566	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 567
 568	p_state_change(tchart, cpu_id, sample->time, state);
 569	return 0;
 570}
 571
 572static int
 573process_sample_sched_wakeup(struct timechart *tchart,
 574			    struct perf_evsel *evsel,
 575			    struct perf_sample *sample,
 576			    const char *backtrace)
 577{
 578	u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
 579	int waker = perf_evsel__intval(evsel, sample, "common_pid");
 580	int wakee = perf_evsel__intval(evsel, sample, "pid");
 581
 582	sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
 583	return 0;
 584}
 585
 586static int
 587process_sample_sched_switch(struct timechart *tchart,
 588			    struct perf_evsel *evsel,
 589			    struct perf_sample *sample,
 590			    const char *backtrace)
 591{
 592	int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
 593	int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
 594	u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
 595
 596	sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
 597		     prev_state, backtrace);
 598	return 0;
 599}
 600
 601#ifdef SUPPORT_OLD_POWER_EVENTS
 602static int
 603process_sample_power_start(struct timechart *tchart __maybe_unused,
 604			   struct perf_evsel *evsel,
 605			   struct perf_sample *sample,
 606			   const char *backtrace __maybe_unused)
 607{
 608	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 609	u64 value = perf_evsel__intval(evsel, sample, "value");
 610
 611	c_state_start(cpu_id, sample->time, value);
 612	return 0;
 613}
 614
 615static int
 616process_sample_power_end(struct timechart *tchart,
 617			 struct perf_evsel *evsel __maybe_unused,
 618			 struct perf_sample *sample,
 619			 const char *backtrace __maybe_unused)
 620{
 621	c_state_end(tchart, sample->cpu, sample->time);
 622	return 0;
 623}
 624
 625static int
 626process_sample_power_frequency(struct timechart *tchart,
 627			       struct perf_evsel *evsel,
 628			       struct perf_sample *sample,
 629			       const char *backtrace __maybe_unused)
 630{
 631	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 632	u64 value = perf_evsel__intval(evsel, sample, "value");
 633
 634	p_state_change(tchart, cpu_id, sample->time, value);
 635	return 0;
 636}
 637#endif /* SUPPORT_OLD_POWER_EVENTS */
 638
 639/*
 640 * After the last sample we need to wrap up the current C/P state
 641 * and close out each CPU for these.
 642 */
 643static void end_sample_processing(struct timechart *tchart)
 644{
 645	u64 cpu;
 646	struct power_event *pwr;
 647
 648	for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
 649		/* C state */
 650#if 0
 651		pwr = zalloc(sizeof(*pwr));
 652		if (!pwr)
 653			return;
 654
 655		pwr->state = cpus_cstate_state[cpu];
 656		pwr->start_time = cpus_cstate_start_times[cpu];
 657		pwr->end_time = tchart->last_time;
 658		pwr->cpu = cpu;
 659		pwr->type = CSTATE;
 660		pwr->next = tchart->power_events;
 661
 662		tchart->power_events = pwr;
 663#endif
 664		/* P state */
 665
 666		pwr = zalloc(sizeof(*pwr));
 667		if (!pwr)
 668			return;
 669
 670		pwr->state = cpus_pstate_state[cpu];
 671		pwr->start_time = cpus_pstate_start_times[cpu];
 672		pwr->end_time = tchart->last_time;
 673		pwr->cpu = cpu;
 674		pwr->type = PSTATE;
 675		pwr->next = tchart->power_events;
 676
 677		if (!pwr->start_time)
 678			pwr->start_time = tchart->first_time;
 679		if (!pwr->state)
 680			pwr->state = tchart->min_freq;
 681		tchart->power_events = pwr;
 682	}
 683}
 684
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 685/*
 686 * Sort the pid datastructure
 687 */
 688static void sort_pids(struct timechart *tchart)
 689{
 690	struct per_pid *new_list, *p, *cursor, *prev;
 691	/* sort by ppid first, then by pid, lowest to highest */
 692
 693	new_list = NULL;
 694
 695	while (tchart->all_data) {
 696		p = tchart->all_data;
 697		tchart->all_data = p->next;
 698		p->next = NULL;
 699
 700		if (new_list == NULL) {
 701			new_list = p;
 702			p->next = NULL;
 703			continue;
 704		}
 705		prev = NULL;
 706		cursor = new_list;
 707		while (cursor) {
 708			if (cursor->ppid > p->ppid ||
 709				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
 710				/* must insert before */
 711				if (prev) {
 712					p->next = prev->next;
 713					prev->next = p;
 714					cursor = NULL;
 715					continue;
 716				} else {
 717					p->next = new_list;
 718					new_list = p;
 719					cursor = NULL;
 720					continue;
 721				}
 722			}
 723
 724			prev = cursor;
 725			cursor = cursor->next;
 726			if (!cursor)
 727				prev->next = p;
 728		}
 729	}
 730	tchart->all_data = new_list;
 731}
 732
 733
 734static void draw_c_p_states(struct timechart *tchart)
 735{
 736	struct power_event *pwr;
 737	pwr = tchart->power_events;
 738
 739	/*
 740	 * two pass drawing so that the P state bars are on top of the C state blocks
 741	 */
 742	while (pwr) {
 743		if (pwr->type == CSTATE)
 744			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
 745		pwr = pwr->next;
 746	}
 747
 748	pwr = tchart->power_events;
 749	while (pwr) {
 750		if (pwr->type == PSTATE) {
 751			if (!pwr->state)
 752				pwr->state = tchart->min_freq;
 753			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
 754		}
 755		pwr = pwr->next;
 756	}
 757}
 758
 759static void draw_wakeups(struct timechart *tchart)
 760{
 761	struct wake_event *we;
 762	struct per_pid *p;
 763	struct per_pidcomm *c;
 764
 765	we = tchart->wake_events;
 766	while (we) {
 767		int from = 0, to = 0;
 768		char *task_from = NULL, *task_to = NULL;
 769
 770		/* locate the column of the waker and wakee */
 771		p = tchart->all_data;
 772		while (p) {
 773			if (p->pid == we->waker || p->pid == we->wakee) {
 774				c = p->all;
 775				while (c) {
 776					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
 777						if (p->pid == we->waker && !from) {
 778							from = c->Y;
 779							task_from = strdup(c->comm);
 780						}
 781						if (p->pid == we->wakee && !to) {
 782							to = c->Y;
 783							task_to = strdup(c->comm);
 784						}
 785					}
 786					c = c->next;
 787				}
 788				c = p->all;
 789				while (c) {
 790					if (p->pid == we->waker && !from) {
 791						from = c->Y;
 792						task_from = strdup(c->comm);
 793					}
 794					if (p->pid == we->wakee && !to) {
 795						to = c->Y;
 796						task_to = strdup(c->comm);
 797					}
 798					c = c->next;
 799				}
 800			}
 801			p = p->next;
 802		}
 803
 804		if (!task_from) {
 805			task_from = malloc(40);
 806			sprintf(task_from, "[%i]", we->waker);
 807		}
 808		if (!task_to) {
 809			task_to = malloc(40);
 810			sprintf(task_to, "[%i]", we->wakee);
 811		}
 812
 813		if (we->waker == -1)
 814			svg_interrupt(we->time, to, we->backtrace);
 815		else if (from && to && abs(from - to) == 1)
 816			svg_wakeline(we->time, from, to, we->backtrace);
 817		else
 818			svg_partial_wakeline(we->time, from, task_from, to,
 819					     task_to, we->backtrace);
 820		we = we->next;
 821
 822		free(task_from);
 823		free(task_to);
 824	}
 825}
 826
 827static void draw_cpu_usage(struct timechart *tchart)
 828{
 829	struct per_pid *p;
 830	struct per_pidcomm *c;
 831	struct cpu_sample *sample;
 832	p = tchart->all_data;
 833	while (p) {
 834		c = p->all;
 835		while (c) {
 836			sample = c->samples;
 837			while (sample) {
 838				if (sample->type == TYPE_RUNNING) {
 839					svg_process(sample->cpu,
 840						    sample->start_time,
 841						    sample->end_time,
 842						    p->pid,
 843						    c->comm,
 844						    sample->backtrace);
 845				}
 846
 847				sample = sample->next;
 848			}
 849			c = c->next;
 850		}
 851		p = p->next;
 852	}
 853}
 854
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 855static void draw_process_bars(struct timechart *tchart)
 856{
 857	struct per_pid *p;
 858	struct per_pidcomm *c;
 859	struct cpu_sample *sample;
 860	int Y = 0;
 861
 862	Y = 2 * tchart->numcpus + 2;
 863
 864	p = tchart->all_data;
 865	while (p) {
 866		c = p->all;
 867		while (c) {
 868			if (!c->display) {
 869				c->Y = 0;
 870				c = c->next;
 871				continue;
 872			}
 873
 874			svg_box(Y, c->start_time, c->end_time, "process");
 875			sample = c->samples;
 876			while (sample) {
 877				if (sample->type == TYPE_RUNNING)
 878					svg_running(Y, sample->cpu,
 879						    sample->start_time,
 880						    sample->end_time,
 881						    sample->backtrace);
 882				if (sample->type == TYPE_BLOCKED)
 883					svg_blocked(Y, sample->cpu,
 884						    sample->start_time,
 885						    sample->end_time,
 886						    sample->backtrace);
 887				if (sample->type == TYPE_WAITING)
 888					svg_waiting(Y, sample->cpu,
 889						    sample->start_time,
 890						    sample->end_time,
 891						    sample->backtrace);
 892				sample = sample->next;
 893			}
 894
 895			if (c->comm) {
 896				char comm[256];
 897				if (c->total_time > 5000000000) /* 5 seconds */
 898					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
 899				else
 900					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
 901
 902				svg_text(Y, c->start_time, comm);
 903			}
 904			c->Y = Y;
 905			Y++;
 906			c = c->next;
 907		}
 908		p = p->next;
 909	}
 910}
 911
 912static void add_process_filter(const char *string)
 913{
 914	int pid = strtoull(string, NULL, 10);
 915	struct process_filter *filt = malloc(sizeof(*filt));
 916
 917	if (!filt)
 918		return;
 919
 920	filt->name = strdup(string);
 921	filt->pid  = pid;
 922	filt->next = process_filter;
 923
 924	process_filter = filt;
 925}
 926
 927static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
 928{
 929	struct process_filter *filt;
 930	if (!process_filter)
 931		return 1;
 932
 933	filt = process_filter;
 934	while (filt) {
 935		if (filt->pid && p->pid == filt->pid)
 936			return 1;
 937		if (strcmp(filt->name, c->comm) == 0)
 938			return 1;
 939		filt = filt->next;
 940	}
 941	return 0;
 942}
 943
 944static int determine_display_tasks_filtered(struct timechart *tchart)
 945{
 946	struct per_pid *p;
 947	struct per_pidcomm *c;
 948	int count = 0;
 949
 950	p = tchart->all_data;
 951	while (p) {
 952		p->display = 0;
 953		if (p->start_time == 1)
 954			p->start_time = tchart->first_time;
 955
 956		/* no exit marker, task kept running to the end */
 957		if (p->end_time == 0)
 958			p->end_time = tchart->last_time;
 959
 960		c = p->all;
 961
 962		while (c) {
 963			c->display = 0;
 964
 965			if (c->start_time == 1)
 966				c->start_time = tchart->first_time;
 967
 968			if (passes_filter(p, c)) {
 969				c->display = 1;
 970				p->display = 1;
 971				count++;
 972			}
 973
 974			if (c->end_time == 0)
 975				c->end_time = tchart->last_time;
 976
 977			c = c->next;
 978		}
 979		p = p->next;
 980	}
 981	return count;
 982}
 983
 984static int determine_display_tasks(struct timechart *tchart, u64 threshold)
 985{
 986	struct per_pid *p;
 987	struct per_pidcomm *c;
 988	int count = 0;
 989
 990	if (process_filter)
 991		return determine_display_tasks_filtered(tchart);
 992
 993	p = tchart->all_data;
 994	while (p) {
 995		p->display = 0;
 996		if (p->start_time == 1)
 997			p->start_time = tchart->first_time;
 998
 999		/* no exit marker, task kept running to the end */
1000		if (p->end_time == 0)
1001			p->end_time = tchart->last_time;
1002		if (p->total_time >= threshold)
1003			p->display = 1;
1004
1005		c = p->all;
1006
1007		while (c) {
1008			c->display = 0;
1009
1010			if (c->start_time == 1)
1011				c->start_time = tchart->first_time;
1012
1013			if (c->total_time >= threshold) {
1014				c->display = 1;
1015				count++;
1016			}
1017
1018			if (c->end_time == 0)
1019				c->end_time = tchart->last_time;
1020
1021			c = c->next;
1022		}
1023		p = p->next;
1024	}
1025	return count;
1026}
1027
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1028
 
 
 
 
 
 
1029
 
1030#define TIME_THRESH 10000000
1031
1032static void write_svg_file(struct timechart *tchart, const char *filename)
1033{
1034	u64 i;
1035	int count;
1036	int thresh = TIME_THRESH;
1037
1038	if (tchart->power_only)
1039		tchart->proc_num = 0;
1040
1041	/* We'd like to show at least proc_num tasks;
1042	 * be less picky if we have fewer */
1043	do {
1044		count = determine_display_tasks(tchart, thresh);
 
 
 
 
 
1045		thresh /= 10;
1046	} while (!process_filter && thresh && count < tchart->proc_num);
1047
1048	if (!tchart->proc_num)
1049		count = 0;
1050
1051	open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
 
 
 
 
 
 
 
 
 
 
1052
1053	svg_time_grid();
1054	svg_legenda();
1055
1056	for (i = 0; i < tchart->numcpus; i++)
1057		svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
1058
1059	draw_cpu_usage(tchart);
1060	if (tchart->proc_num)
1061		draw_process_bars(tchart);
1062	if (!tchart->tasks_only)
1063		draw_c_p_states(tchart);
1064	if (tchart->proc_num)
1065		draw_wakeups(tchart);
 
1066
1067	svg_close();
1068}
1069
1070static int process_header(struct perf_file_section *section __maybe_unused,
1071			  struct perf_header *ph,
1072			  int feat,
1073			  int fd __maybe_unused,
1074			  void *data)
1075{
1076	struct timechart *tchart = data;
1077
1078	switch (feat) {
1079	case HEADER_NRCPUS:
1080		tchart->numcpus = ph->env.nr_cpus_avail;
1081		break;
1082
1083	case HEADER_CPU_TOPOLOGY:
1084		if (!tchart->topology)
1085			break;
1086
1087		if (svg_build_topology_map(ph->env.sibling_cores,
1088					   ph->env.nr_sibling_cores,
1089					   ph->env.sibling_threads,
1090					   ph->env.nr_sibling_threads))
1091			fprintf(stderr, "problem building topology\n");
1092		break;
1093
1094	default:
1095		break;
1096	}
1097
1098	return 0;
1099}
1100
1101static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1102{
1103	const struct perf_evsel_str_handler power_tracepoints[] = {
1104		{ "power:cpu_idle",		process_sample_cpu_idle },
1105		{ "power:cpu_frequency",	process_sample_cpu_frequency },
1106		{ "sched:sched_wakeup",		process_sample_sched_wakeup },
1107		{ "sched:sched_switch",		process_sample_sched_switch },
1108#ifdef SUPPORT_OLD_POWER_EVENTS
1109		{ "power:power_start",		process_sample_power_start },
1110		{ "power:power_end",		process_sample_power_end },
1111		{ "power:power_frequency",	process_sample_power_frequency },
1112#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1113	};
1114	struct perf_data_file file = {
1115		.path = input_name,
1116		.mode = PERF_DATA_MODE_READ,
 
1117	};
1118
1119	struct perf_session *session = perf_session__new(&file, false,
1120							 &tchart->tool);
1121	int ret = -EINVAL;
1122
1123	if (session == NULL)
1124		return -ENOMEM;
 
 
1125
1126	(void)perf_header__process_sections(&session->header,
1127					    perf_data_file__fd(session->file),
1128					    tchart,
1129					    process_header);
1130
1131	if (!perf_session__has_traces(session, "timechart record"))
1132		goto out_delete;
1133
1134	if (perf_session__set_tracepoints_handlers(session,
1135						   power_tracepoints)) {
1136		pr_err("Initializing session tracepoint handlers failed\n");
1137		goto out_delete;
1138	}
1139
1140	ret = perf_session__process_events(session, &tchart->tool);
1141	if (ret)
1142		goto out_delete;
1143
1144	end_sample_processing(tchart);
1145
1146	sort_pids(tchart);
1147
1148	write_svg_file(tchart, output_name);
1149
1150	pr_info("Written %2.1f seconds of trace to %s.\n",
1151		(tchart->last_time - tchart->first_time) / 1000000000.0, output_name);
1152out_delete:
1153	perf_session__delete(session);
1154	return ret;
1155}
1156
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1157static int timechart__record(struct timechart *tchart, int argc, const char **argv)
1158{
1159	unsigned int rec_argc, i, j;
1160	const char **rec_argv;
1161	const char **p;
1162	unsigned int record_elems;
1163
1164	const char * const common_args[] = {
1165		"record", "-a", "-R", "-c", "1",
1166	};
1167	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1168
1169	const char * const backtrace_args[] = {
1170		"-g",
1171	};
1172	unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
1173
1174	const char * const power_args[] = {
1175		"-e", "power:cpu_frequency",
1176		"-e", "power:cpu_idle",
1177	};
1178	unsigned int power_args_nr = ARRAY_SIZE(power_args);
1179
1180	const char * const old_power_args[] = {
1181#ifdef SUPPORT_OLD_POWER_EVENTS
1182		"-e", "power:power_start",
1183		"-e", "power:power_end",
1184		"-e", "power:power_frequency",
1185#endif
1186	};
1187	unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
1188
1189	const char * const tasks_args[] = {
1190		"-e", "sched:sched_wakeup",
1191		"-e", "sched:sched_switch",
1192	};
1193	unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
1194
1195#ifdef SUPPORT_OLD_POWER_EVENTS
1196	if (!is_valid_tracepoint("power:cpu_idle") &&
1197	    is_valid_tracepoint("power:power_start")) {
1198		use_old_power_events = 1;
1199		power_args_nr = 0;
1200	} else {
1201		old_power_args_nr = 0;
1202	}
1203#endif
1204
1205	if (tchart->power_only)
1206		tasks_args_nr = 0;
1207
1208	if (tchart->tasks_only) {
1209		power_args_nr = 0;
1210		old_power_args_nr = 0;
1211	}
1212
1213	if (!tchart->with_backtrace)
1214		backtrace_args_no = 0;
1215
1216	record_elems = common_args_nr + tasks_args_nr +
1217		power_args_nr + old_power_args_nr + backtrace_args_no;
1218
1219	rec_argc = record_elems + argc;
1220	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1221
1222	if (rec_argv == NULL)
1223		return -ENOMEM;
1224
1225	p = rec_argv;
1226	for (i = 0; i < common_args_nr; i++)
1227		*p++ = strdup(common_args[i]);
1228
1229	for (i = 0; i < backtrace_args_no; i++)
1230		*p++ = strdup(backtrace_args[i]);
1231
1232	for (i = 0; i < tasks_args_nr; i++)
1233		*p++ = strdup(tasks_args[i]);
1234
1235	for (i = 0; i < power_args_nr; i++)
1236		*p++ = strdup(power_args[i]);
1237
1238	for (i = 0; i < old_power_args_nr; i++)
1239		*p++ = strdup(old_power_args[i]);
1240
1241	for (j = 0; j < (unsigned int)argc; j++)
1242		*p++ = argv[j];
1243
1244	return cmd_record(rec_argc, rec_argv, NULL);
1245}
1246
1247static int
1248parse_process(const struct option *opt __maybe_unused, const char *arg,
1249	      int __maybe_unused unset)
1250{
1251	if (arg)
1252		add_process_filter(arg);
1253	return 0;
1254}
1255
1256static int
1257parse_highlight(const struct option *opt __maybe_unused, const char *arg,
1258		int __maybe_unused unset)
1259{
1260	unsigned long duration = strtoul(arg, NULL, 0);
1261
1262	if (svg_highlight || svg_highlight_name)
1263		return -1;
1264
1265	if (duration)
1266		svg_highlight = duration;
1267	else
1268		svg_highlight_name = strdup(arg);
1269
1270	return 0;
1271}
1272
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1273int cmd_timechart(int argc, const char **argv,
1274		  const char *prefix __maybe_unused)
1275{
1276	struct timechart tchart = {
1277		.tool = {
1278			.comm		 = process_comm_event,
1279			.fork		 = process_fork_event,
1280			.exit		 = process_exit_event,
1281			.sample		 = process_sample_event,
1282			.ordered_samples = true,
1283		},
1284		.proc_num = 15,
 
 
1285	};
1286	const char *output_name = "output.svg";
1287	const struct option timechart_options[] = {
1288	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1289	OPT_STRING('o', "output", &output_name, "file", "output file name"),
1290	OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1291	OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
1292		      "highlight tasks. Pass duration in ns or process name.",
1293		       parse_highlight),
1294	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1295	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
1296		    "output processes data only"),
1297	OPT_CALLBACK('p', "process", NULL, "process",
1298		      "process selector. Pass a pid or process name.",
1299		       parse_process),
1300	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1301		    "Look for files with symbols relative to this directory"),
1302	OPT_INTEGER('n', "proc-num", &tchart.proc_num,
1303		    "min. number of tasks to print"),
1304	OPT_BOOLEAN('t', "topology", &tchart.topology,
1305		    "sort CPUs according to topology"),
 
 
 
 
 
 
 
 
 
1306	OPT_END()
1307	};
1308	const char * const timechart_usage[] = {
 
1309		"perf timechart [<options>] {record}",
1310		NULL
1311	};
1312
1313	const struct option record_options[] = {
1314	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1315	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
1316		    "output processes data only"),
 
 
1317	OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
1318	OPT_END()
1319	};
1320	const char * const record_usage[] = {
1321		"perf timechart record [<options>]",
1322		NULL
1323	};
1324	argc = parse_options(argc, argv, timechart_options, timechart_usage,
1325			PARSE_OPT_STOP_AT_NON_OPTION);
1326
1327	if (tchart.power_only && tchart.tasks_only) {
1328		pr_err("-P and -T options cannot be used at the same time.\n");
1329		return -1;
1330	}
1331
1332	symbol__init();
1333
1334	if (argc && !strncmp(argv[0], "rec", 3)) {
1335		argc = parse_options(argc, argv, record_options, record_usage,
 
1336				     PARSE_OPT_STOP_AT_NON_OPTION);
1337
1338		if (tchart.power_only && tchart.tasks_only) {
1339			pr_err("-P and -T options cannot be used at the same time.\n");
1340			return -1;
1341		}
1342
1343		return timechart__record(&tchart, argc, argv);
 
 
 
1344	} else if (argc)
1345		usage_with_options(timechart_usage, timechart_options);
1346
1347	setup_pager();
1348
1349	return __cmd_timechart(&tchart, output_name);
1350}
v4.6
   1/*
   2 * builtin-timechart.c - make an svg timechart of system activity
   3 *
   4 * (C) Copyright 2009 Intel Corporation
   5 *
   6 * Authors:
   7 *     Arjan van de Ven <arjan@linux.intel.com>
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; version 2
  12 * of the License.
  13 */
  14
  15#include <traceevent/event-parse.h>
  16
  17#include "builtin.h"
  18
  19#include "util/util.h"
  20
  21#include "util/color.h"
  22#include <linux/list.h>
  23#include "util/cache.h"
  24#include "util/evlist.h"
  25#include "util/evsel.h"
  26#include <linux/rbtree.h>
  27#include "util/symbol.h"
  28#include "util/callchain.h"
  29#include "util/strlist.h"
  30
  31#include "perf.h"
  32#include "util/header.h"
  33#include <subcmd/parse-options.h>
  34#include "util/parse-events.h"
  35#include "util/event.h"
  36#include "util/session.h"
  37#include "util/svghelper.h"
  38#include "util/tool.h"
  39#include "util/data.h"
  40#include "util/debug.h"
  41
  42#define SUPPORT_OLD_POWER_EVENTS 1
  43#define PWR_EVENT_EXIT -1
  44
  45struct per_pid;
  46struct power_event;
  47struct wake_event;
  48
  49struct timechart {
  50	struct perf_tool	tool;
  51	struct per_pid		*all_data;
  52	struct power_event	*power_events;
  53	struct wake_event	*wake_events;
  54	int			proc_num;
  55	unsigned int		numcpus;
  56	u64			min_freq,	/* Lowest CPU frequency seen */
  57				max_freq,	/* Highest CPU frequency seen */
  58				turbo_frequency,
  59				first_time, last_time;
  60	bool			power_only,
  61				tasks_only,
  62				with_backtrace,
  63				topology;
  64	bool			force;
  65	/* IO related settings */
  66	bool			io_only,
  67				skip_eagain;
  68	u64			io_events;
  69	u64			min_time,
  70				merge_dist;
  71};
  72
  73struct per_pidcomm;
  74struct cpu_sample;
  75struct io_sample;
  76
  77/*
  78 * Datastructure layout:
  79 * We keep an list of "pid"s, matching the kernels notion of a task struct.
  80 * Each "pid" entry, has a list of "comm"s.
  81 *	this is because we want to track different programs different, while
  82 *	exec will reuse the original pid (by design).
  83 * Each comm has a list of samples that will be used to draw
  84 * final graph.
  85 */
  86
  87struct per_pid {
  88	struct per_pid *next;
  89
  90	int		pid;
  91	int		ppid;
  92
  93	u64		start_time;
  94	u64		end_time;
  95	u64		total_time;
  96	u64		total_bytes;
  97	int		display;
  98
  99	struct per_pidcomm *all;
 100	struct per_pidcomm *current;
 101};
 102
 103
 104struct per_pidcomm {
 105	struct per_pidcomm *next;
 106
 107	u64		start_time;
 108	u64		end_time;
 109	u64		total_time;
 110	u64		max_bytes;
 111	u64		total_bytes;
 112
 113	int		Y;
 114	int		display;
 115
 116	long		state;
 117	u64		state_since;
 118
 119	char		*comm;
 120
 121	struct cpu_sample *samples;
 122	struct io_sample  *io_samples;
 123};
 124
 125struct sample_wrapper {
 126	struct sample_wrapper *next;
 127
 128	u64		timestamp;
 129	unsigned char	data[0];
 130};
 131
 132#define TYPE_NONE	0
 133#define TYPE_RUNNING	1
 134#define TYPE_WAITING	2
 135#define TYPE_BLOCKED	3
 136
 137struct cpu_sample {
 138	struct cpu_sample *next;
 139
 140	u64 start_time;
 141	u64 end_time;
 142	int type;
 143	int cpu;
 144	const char *backtrace;
 145};
 146
 147enum {
 148	IOTYPE_READ,
 149	IOTYPE_WRITE,
 150	IOTYPE_SYNC,
 151	IOTYPE_TX,
 152	IOTYPE_RX,
 153	IOTYPE_POLL,
 154};
 155
 156struct io_sample {
 157	struct io_sample *next;
 158
 159	u64 start_time;
 160	u64 end_time;
 161	u64 bytes;
 162	int type;
 163	int fd;
 164	int err;
 165	int merges;
 166};
 167
 168#define CSTATE 1
 169#define PSTATE 2
 170
 171struct power_event {
 172	struct power_event *next;
 173	int type;
 174	int state;
 175	u64 start_time;
 176	u64 end_time;
 177	int cpu;
 178};
 179
 180struct wake_event {
 181	struct wake_event *next;
 182	int waker;
 183	int wakee;
 184	u64 time;
 185	const char *backtrace;
 186};
 187
 188struct process_filter {
 189	char			*name;
 190	int			pid;
 191	struct process_filter	*next;
 192};
 193
 194static struct process_filter *process_filter;
 195
 196
 197static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
 198{
 199	struct per_pid *cursor = tchart->all_data;
 200
 201	while (cursor) {
 202		if (cursor->pid == pid)
 203			return cursor;
 204		cursor = cursor->next;
 205	}
 206	cursor = zalloc(sizeof(*cursor));
 207	assert(cursor != NULL);
 208	cursor->pid = pid;
 209	cursor->next = tchart->all_data;
 210	tchart->all_data = cursor;
 211	return cursor;
 212}
 213
 214static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
 215{
 216	struct per_pid *p;
 217	struct per_pidcomm *c;
 218	p = find_create_pid(tchart, pid);
 219	c = p->all;
 220	while (c) {
 221		if (c->comm && strcmp(c->comm, comm) == 0) {
 222			p->current = c;
 223			return;
 224		}
 225		if (!c->comm) {
 226			c->comm = strdup(comm);
 227			p->current = c;
 228			return;
 229		}
 230		c = c->next;
 231	}
 232	c = zalloc(sizeof(*c));
 233	assert(c != NULL);
 234	c->comm = strdup(comm);
 235	p->current = c;
 236	c->next = p->all;
 237	p->all = c;
 238}
 239
 240static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
 241{
 242	struct per_pid *p, *pp;
 243	p = find_create_pid(tchart, pid);
 244	pp = find_create_pid(tchart, ppid);
 245	p->ppid = ppid;
 246	if (pp->current && pp->current->comm && !p->current)
 247		pid_set_comm(tchart, pid, pp->current->comm);
 248
 249	p->start_time = timestamp;
 250	if (p->current && !p->current->start_time) {
 251		p->current->start_time = timestamp;
 252		p->current->state_since = timestamp;
 253	}
 254}
 255
 256static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
 257{
 258	struct per_pid *p;
 259	p = find_create_pid(tchart, pid);
 260	p->end_time = timestamp;
 261	if (p->current)
 262		p->current->end_time = timestamp;
 263}
 264
 265static void pid_put_sample(struct timechart *tchart, int pid, int type,
 266			   unsigned int cpu, u64 start, u64 end,
 267			   const char *backtrace)
 268{
 269	struct per_pid *p;
 270	struct per_pidcomm *c;
 271	struct cpu_sample *sample;
 272
 273	p = find_create_pid(tchart, pid);
 274	c = p->current;
 275	if (!c) {
 276		c = zalloc(sizeof(*c));
 277		assert(c != NULL);
 278		p->current = c;
 279		c->next = p->all;
 280		p->all = c;
 281	}
 282
 283	sample = zalloc(sizeof(*sample));
 284	assert(sample != NULL);
 285	sample->start_time = start;
 286	sample->end_time = end;
 287	sample->type = type;
 288	sample->next = c->samples;
 289	sample->cpu = cpu;
 290	sample->backtrace = backtrace;
 291	c->samples = sample;
 292
 293	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
 294		c->total_time += (end-start);
 295		p->total_time += (end-start);
 296	}
 297
 298	if (c->start_time == 0 || c->start_time > start)
 299		c->start_time = start;
 300	if (p->start_time == 0 || p->start_time > start)
 301		p->start_time = start;
 302}
 303
 304#define MAX_CPUS 4096
 305
 306static u64 cpus_cstate_start_times[MAX_CPUS];
 307static int cpus_cstate_state[MAX_CPUS];
 308static u64 cpus_pstate_start_times[MAX_CPUS];
 309static u64 cpus_pstate_state[MAX_CPUS];
 310
 311static int process_comm_event(struct perf_tool *tool,
 312			      union perf_event *event,
 313			      struct perf_sample *sample __maybe_unused,
 314			      struct machine *machine __maybe_unused)
 315{
 316	struct timechart *tchart = container_of(tool, struct timechart, tool);
 317	pid_set_comm(tchart, event->comm.tid, event->comm.comm);
 318	return 0;
 319}
 320
 321static int process_fork_event(struct perf_tool *tool,
 322			      union perf_event *event,
 323			      struct perf_sample *sample __maybe_unused,
 324			      struct machine *machine __maybe_unused)
 325{
 326	struct timechart *tchart = container_of(tool, struct timechart, tool);
 327	pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
 328	return 0;
 329}
 330
 331static int process_exit_event(struct perf_tool *tool,
 332			      union perf_event *event,
 333			      struct perf_sample *sample __maybe_unused,
 334			      struct machine *machine __maybe_unused)
 335{
 336	struct timechart *tchart = container_of(tool, struct timechart, tool);
 337	pid_exit(tchart, event->fork.pid, event->fork.time);
 338	return 0;
 339}
 340
 341#ifdef SUPPORT_OLD_POWER_EVENTS
 342static int use_old_power_events;
 343#endif
 344
 345static void c_state_start(int cpu, u64 timestamp, int state)
 346{
 347	cpus_cstate_start_times[cpu] = timestamp;
 348	cpus_cstate_state[cpu] = state;
 349}
 350
 351static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
 352{
 353	struct power_event *pwr = zalloc(sizeof(*pwr));
 354
 355	if (!pwr)
 356		return;
 357
 358	pwr->state = cpus_cstate_state[cpu];
 359	pwr->start_time = cpus_cstate_start_times[cpu];
 360	pwr->end_time = timestamp;
 361	pwr->cpu = cpu;
 362	pwr->type = CSTATE;
 363	pwr->next = tchart->power_events;
 364
 365	tchart->power_events = pwr;
 366}
 367
 368static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
 369{
 370	struct power_event *pwr;
 371
 372	if (new_freq > 8000000) /* detect invalid data */
 373		return;
 374
 375	pwr = zalloc(sizeof(*pwr));
 376	if (!pwr)
 377		return;
 378
 379	pwr->state = cpus_pstate_state[cpu];
 380	pwr->start_time = cpus_pstate_start_times[cpu];
 381	pwr->end_time = timestamp;
 382	pwr->cpu = cpu;
 383	pwr->type = PSTATE;
 384	pwr->next = tchart->power_events;
 385
 386	if (!pwr->start_time)
 387		pwr->start_time = tchart->first_time;
 388
 389	tchart->power_events = pwr;
 390
 391	cpus_pstate_state[cpu] = new_freq;
 392	cpus_pstate_start_times[cpu] = timestamp;
 393
 394	if ((u64)new_freq > tchart->max_freq)
 395		tchart->max_freq = new_freq;
 396
 397	if (new_freq < tchart->min_freq || tchart->min_freq == 0)
 398		tchart->min_freq = new_freq;
 399
 400	if (new_freq == tchart->max_freq - 1000)
 401		tchart->turbo_frequency = tchart->max_freq;
 402}
 403
 404static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
 405			 int waker, int wakee, u8 flags, const char *backtrace)
 406{
 407	struct per_pid *p;
 408	struct wake_event *we = zalloc(sizeof(*we));
 409
 410	if (!we)
 411		return;
 412
 413	we->time = timestamp;
 414	we->waker = waker;
 415	we->backtrace = backtrace;
 416
 417	if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
 418		we->waker = -1;
 419
 420	we->wakee = wakee;
 421	we->next = tchart->wake_events;
 422	tchart->wake_events = we;
 423	p = find_create_pid(tchart, we->wakee);
 424
 425	if (p && p->current && p->current->state == TYPE_NONE) {
 426		p->current->state_since = timestamp;
 427		p->current->state = TYPE_WAITING;
 428	}
 429	if (p && p->current && p->current->state == TYPE_BLOCKED) {
 430		pid_put_sample(tchart, p->pid, p->current->state, cpu,
 431			       p->current->state_since, timestamp, NULL);
 432		p->current->state_since = timestamp;
 433		p->current->state = TYPE_WAITING;
 434	}
 435}
 436
 437static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
 438			 int prev_pid, int next_pid, u64 prev_state,
 439			 const char *backtrace)
 440{
 441	struct per_pid *p = NULL, *prev_p;
 442
 443	prev_p = find_create_pid(tchart, prev_pid);
 444
 445	p = find_create_pid(tchart, next_pid);
 446
 447	if (prev_p->current && prev_p->current->state != TYPE_NONE)
 448		pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
 449			       prev_p->current->state_since, timestamp,
 450			       backtrace);
 451	if (p && p->current) {
 452		if (p->current->state != TYPE_NONE)
 453			pid_put_sample(tchart, next_pid, p->current->state, cpu,
 454				       p->current->state_since, timestamp,
 455				       backtrace);
 456
 457		p->current->state_since = timestamp;
 458		p->current->state = TYPE_RUNNING;
 459	}
 460
 461	if (prev_p->current) {
 462		prev_p->current->state = TYPE_NONE;
 463		prev_p->current->state_since = timestamp;
 464		if (prev_state & 2)
 465			prev_p->current->state = TYPE_BLOCKED;
 466		if (prev_state == 0)
 467			prev_p->current->state = TYPE_WAITING;
 468	}
 469}
 470
 471static const char *cat_backtrace(union perf_event *event,
 472				 struct perf_sample *sample,
 473				 struct machine *machine)
 474{
 475	struct addr_location al;
 476	unsigned int i;
 477	char *p = NULL;
 478	size_t p_len;
 479	u8 cpumode = PERF_RECORD_MISC_USER;
 480	struct addr_location tal;
 481	struct ip_callchain *chain = sample->callchain;
 482	FILE *f = open_memstream(&p, &p_len);
 483
 484	if (!f) {
 485		perror("open_memstream error");
 486		return NULL;
 487	}
 488
 489	if (!chain)
 490		goto exit;
 491
 492	if (machine__resolve(machine, &al, sample) < 0) {
 493		fprintf(stderr, "problem processing %d event, skipping it.\n",
 494			event->header.type);
 495		goto exit;
 496	}
 497
 498	for (i = 0; i < chain->nr; i++) {
 499		u64 ip;
 500
 501		if (callchain_param.order == ORDER_CALLEE)
 502			ip = chain->ips[i];
 503		else
 504			ip = chain->ips[chain->nr - i - 1];
 505
 506		if (ip >= PERF_CONTEXT_MAX) {
 507			switch (ip) {
 508			case PERF_CONTEXT_HV:
 509				cpumode = PERF_RECORD_MISC_HYPERVISOR;
 510				break;
 511			case PERF_CONTEXT_KERNEL:
 512				cpumode = PERF_RECORD_MISC_KERNEL;
 513				break;
 514			case PERF_CONTEXT_USER:
 515				cpumode = PERF_RECORD_MISC_USER;
 516				break;
 517			default:
 518				pr_debug("invalid callchain context: "
 519					 "%"PRId64"\n", (s64) ip);
 520
 521				/*
 522				 * It seems the callchain is corrupted.
 523				 * Discard all.
 524				 */
 525				zfree(&p);
 526				goto exit_put;
 527			}
 528			continue;
 529		}
 530
 531		tal.filtered = 0;
 532		thread__find_addr_location(al.thread, cpumode,
 533					   MAP__FUNCTION, ip, &tal);
 534
 535		if (tal.sym)
 536			fprintf(f, "..... %016" PRIx64 " %s\n", ip,
 537				tal.sym->name);
 538		else
 539			fprintf(f, "..... %016" PRIx64 "\n", ip);
 540	}
 541exit_put:
 542	addr_location__put(&al);
 543exit:
 544	fclose(f);
 545
 546	return p;
 547}
 548
 549typedef int (*tracepoint_handler)(struct timechart *tchart,
 550				  struct perf_evsel *evsel,
 551				  struct perf_sample *sample,
 552				  const char *backtrace);
 553
 554static int process_sample_event(struct perf_tool *tool,
 555				union perf_event *event,
 556				struct perf_sample *sample,
 557				struct perf_evsel *evsel,
 558				struct machine *machine)
 559{
 560	struct timechart *tchart = container_of(tool, struct timechart, tool);
 561
 562	if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
 563		if (!tchart->first_time || tchart->first_time > sample->time)
 564			tchart->first_time = sample->time;
 565		if (tchart->last_time < sample->time)
 566			tchart->last_time = sample->time;
 567	}
 568
 569	if (evsel->handler != NULL) {
 570		tracepoint_handler f = evsel->handler;
 571		return f(tchart, evsel, sample,
 572			 cat_backtrace(event, sample, machine));
 573	}
 574
 575	return 0;
 576}
 577
 578static int
 579process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
 580			struct perf_evsel *evsel,
 581			struct perf_sample *sample,
 582			const char *backtrace __maybe_unused)
 583{
 584	u32 state = perf_evsel__intval(evsel, sample, "state");
 585	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 586
 587	if (state == (u32)PWR_EVENT_EXIT)
 588		c_state_end(tchart, cpu_id, sample->time);
 589	else
 590		c_state_start(cpu_id, sample->time, state);
 591	return 0;
 592}
 593
 594static int
 595process_sample_cpu_frequency(struct timechart *tchart,
 596			     struct perf_evsel *evsel,
 597			     struct perf_sample *sample,
 598			     const char *backtrace __maybe_unused)
 599{
 600	u32 state = perf_evsel__intval(evsel, sample, "state");
 601	u32 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 602
 603	p_state_change(tchart, cpu_id, sample->time, state);
 604	return 0;
 605}
 606
 607static int
 608process_sample_sched_wakeup(struct timechart *tchart,
 609			    struct perf_evsel *evsel,
 610			    struct perf_sample *sample,
 611			    const char *backtrace)
 612{
 613	u8 flags = perf_evsel__intval(evsel, sample, "common_flags");
 614	int waker = perf_evsel__intval(evsel, sample, "common_pid");
 615	int wakee = perf_evsel__intval(evsel, sample, "pid");
 616
 617	sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
 618	return 0;
 619}
 620
 621static int
 622process_sample_sched_switch(struct timechart *tchart,
 623			    struct perf_evsel *evsel,
 624			    struct perf_sample *sample,
 625			    const char *backtrace)
 626{
 627	int prev_pid = perf_evsel__intval(evsel, sample, "prev_pid");
 628	int next_pid = perf_evsel__intval(evsel, sample, "next_pid");
 629	u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state");
 630
 631	sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
 632		     prev_state, backtrace);
 633	return 0;
 634}
 635
 636#ifdef SUPPORT_OLD_POWER_EVENTS
 637static int
 638process_sample_power_start(struct timechart *tchart __maybe_unused,
 639			   struct perf_evsel *evsel,
 640			   struct perf_sample *sample,
 641			   const char *backtrace __maybe_unused)
 642{
 643	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 644	u64 value = perf_evsel__intval(evsel, sample, "value");
 645
 646	c_state_start(cpu_id, sample->time, value);
 647	return 0;
 648}
 649
 650static int
 651process_sample_power_end(struct timechart *tchart,
 652			 struct perf_evsel *evsel __maybe_unused,
 653			 struct perf_sample *sample,
 654			 const char *backtrace __maybe_unused)
 655{
 656	c_state_end(tchart, sample->cpu, sample->time);
 657	return 0;
 658}
 659
 660static int
 661process_sample_power_frequency(struct timechart *tchart,
 662			       struct perf_evsel *evsel,
 663			       struct perf_sample *sample,
 664			       const char *backtrace __maybe_unused)
 665{
 666	u64 cpu_id = perf_evsel__intval(evsel, sample, "cpu_id");
 667	u64 value = perf_evsel__intval(evsel, sample, "value");
 668
 669	p_state_change(tchart, cpu_id, sample->time, value);
 670	return 0;
 671}
 672#endif /* SUPPORT_OLD_POWER_EVENTS */
 673
 674/*
 675 * After the last sample we need to wrap up the current C/P state
 676 * and close out each CPU for these.
 677 */
 678static void end_sample_processing(struct timechart *tchart)
 679{
 680	u64 cpu;
 681	struct power_event *pwr;
 682
 683	for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
 684		/* C state */
 685#if 0
 686		pwr = zalloc(sizeof(*pwr));
 687		if (!pwr)
 688			return;
 689
 690		pwr->state = cpus_cstate_state[cpu];
 691		pwr->start_time = cpus_cstate_start_times[cpu];
 692		pwr->end_time = tchart->last_time;
 693		pwr->cpu = cpu;
 694		pwr->type = CSTATE;
 695		pwr->next = tchart->power_events;
 696
 697		tchart->power_events = pwr;
 698#endif
 699		/* P state */
 700
 701		pwr = zalloc(sizeof(*pwr));
 702		if (!pwr)
 703			return;
 704
 705		pwr->state = cpus_pstate_state[cpu];
 706		pwr->start_time = cpus_pstate_start_times[cpu];
 707		pwr->end_time = tchart->last_time;
 708		pwr->cpu = cpu;
 709		pwr->type = PSTATE;
 710		pwr->next = tchart->power_events;
 711
 712		if (!pwr->start_time)
 713			pwr->start_time = tchart->first_time;
 714		if (!pwr->state)
 715			pwr->state = tchart->min_freq;
 716		tchart->power_events = pwr;
 717	}
 718}
 719
 720static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
 721			       u64 start, int fd)
 722{
 723	struct per_pid *p = find_create_pid(tchart, pid);
 724	struct per_pidcomm *c = p->current;
 725	struct io_sample *sample;
 726	struct io_sample *prev;
 727
 728	if (!c) {
 729		c = zalloc(sizeof(*c));
 730		if (!c)
 731			return -ENOMEM;
 732		p->current = c;
 733		c->next = p->all;
 734		p->all = c;
 735	}
 736
 737	prev = c->io_samples;
 738
 739	if (prev && prev->start_time && !prev->end_time) {
 740		pr_warning("Skip invalid start event: "
 741			   "previous event already started!\n");
 742
 743		/* remove previous event that has been started,
 744		 * we are not sure we will ever get an end for it */
 745		c->io_samples = prev->next;
 746		free(prev);
 747		return 0;
 748	}
 749
 750	sample = zalloc(sizeof(*sample));
 751	if (!sample)
 752		return -ENOMEM;
 753	sample->start_time = start;
 754	sample->type = type;
 755	sample->fd = fd;
 756	sample->next = c->io_samples;
 757	c->io_samples = sample;
 758
 759	if (c->start_time == 0 || c->start_time > start)
 760		c->start_time = start;
 761
 762	return 0;
 763}
 764
 765static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
 766			     u64 end, long ret)
 767{
 768	struct per_pid *p = find_create_pid(tchart, pid);
 769	struct per_pidcomm *c = p->current;
 770	struct io_sample *sample, *prev;
 771
 772	if (!c) {
 773		pr_warning("Invalid pidcomm!\n");
 774		return -1;
 775	}
 776
 777	sample = c->io_samples;
 778
 779	if (!sample) /* skip partially captured events */
 780		return 0;
 781
 782	if (sample->end_time) {
 783		pr_warning("Skip invalid end event: "
 784			   "previous event already ended!\n");
 785		return 0;
 786	}
 787
 788	if (sample->type != type) {
 789		pr_warning("Skip invalid end event: invalid event type!\n");
 790		return 0;
 791	}
 792
 793	sample->end_time = end;
 794	prev = sample->next;
 795
 796	/* we want to be able to see small and fast transfers, so make them
 797	 * at least min_time long, but don't overlap them */
 798	if (sample->end_time - sample->start_time < tchart->min_time)
 799		sample->end_time = sample->start_time + tchart->min_time;
 800	if (prev && sample->start_time < prev->end_time) {
 801		if (prev->err) /* try to make errors more visible */
 802			sample->start_time = prev->end_time;
 803		else
 804			prev->end_time = sample->start_time;
 805	}
 806
 807	if (ret < 0) {
 808		sample->err = ret;
 809	} else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
 810		   type == IOTYPE_TX || type == IOTYPE_RX) {
 811
 812		if ((u64)ret > c->max_bytes)
 813			c->max_bytes = ret;
 814
 815		c->total_bytes += ret;
 816		p->total_bytes += ret;
 817		sample->bytes = ret;
 818	}
 819
 820	/* merge two requests to make svg smaller and render-friendly */
 821	if (prev &&
 822	    prev->type == sample->type &&
 823	    prev->err == sample->err &&
 824	    prev->fd == sample->fd &&
 825	    prev->end_time + tchart->merge_dist >= sample->start_time) {
 826
 827		sample->bytes += prev->bytes;
 828		sample->merges += prev->merges + 1;
 829
 830		sample->start_time = prev->start_time;
 831		sample->next = prev->next;
 832		free(prev);
 833
 834		if (!sample->err && sample->bytes > c->max_bytes)
 835			c->max_bytes = sample->bytes;
 836	}
 837
 838	tchart->io_events++;
 839
 840	return 0;
 841}
 842
 843static int
 844process_enter_read(struct timechart *tchart,
 845		   struct perf_evsel *evsel,
 846		   struct perf_sample *sample)
 847{
 848	long fd = perf_evsel__intval(evsel, sample, "fd");
 849	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
 850				   sample->time, fd);
 851}
 852
 853static int
 854process_exit_read(struct timechart *tchart,
 855		  struct perf_evsel *evsel,
 856		  struct perf_sample *sample)
 857{
 858	long ret = perf_evsel__intval(evsel, sample, "ret");
 859	return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
 860				 sample->time, ret);
 861}
 862
 863static int
 864process_enter_write(struct timechart *tchart,
 865		    struct perf_evsel *evsel,
 866		    struct perf_sample *sample)
 867{
 868	long fd = perf_evsel__intval(evsel, sample, "fd");
 869	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
 870				   sample->time, fd);
 871}
 872
 873static int
 874process_exit_write(struct timechart *tchart,
 875		   struct perf_evsel *evsel,
 876		   struct perf_sample *sample)
 877{
 878	long ret = perf_evsel__intval(evsel, sample, "ret");
 879	return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
 880				 sample->time, ret);
 881}
 882
 883static int
 884process_enter_sync(struct timechart *tchart,
 885		   struct perf_evsel *evsel,
 886		   struct perf_sample *sample)
 887{
 888	long fd = perf_evsel__intval(evsel, sample, "fd");
 889	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
 890				   sample->time, fd);
 891}
 892
 893static int
 894process_exit_sync(struct timechart *tchart,
 895		  struct perf_evsel *evsel,
 896		  struct perf_sample *sample)
 897{
 898	long ret = perf_evsel__intval(evsel, sample, "ret");
 899	return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
 900				 sample->time, ret);
 901}
 902
 903static int
 904process_enter_tx(struct timechart *tchart,
 905		 struct perf_evsel *evsel,
 906		 struct perf_sample *sample)
 907{
 908	long fd = perf_evsel__intval(evsel, sample, "fd");
 909	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
 910				   sample->time, fd);
 911}
 912
 913static int
 914process_exit_tx(struct timechart *tchart,
 915		struct perf_evsel *evsel,
 916		struct perf_sample *sample)
 917{
 918	long ret = perf_evsel__intval(evsel, sample, "ret");
 919	return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
 920				 sample->time, ret);
 921}
 922
 923static int
 924process_enter_rx(struct timechart *tchart,
 925		 struct perf_evsel *evsel,
 926		 struct perf_sample *sample)
 927{
 928	long fd = perf_evsel__intval(evsel, sample, "fd");
 929	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
 930				   sample->time, fd);
 931}
 932
 933static int
 934process_exit_rx(struct timechart *tchart,
 935		struct perf_evsel *evsel,
 936		struct perf_sample *sample)
 937{
 938	long ret = perf_evsel__intval(evsel, sample, "ret");
 939	return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
 940				 sample->time, ret);
 941}
 942
 943static int
 944process_enter_poll(struct timechart *tchart,
 945		   struct perf_evsel *evsel,
 946		   struct perf_sample *sample)
 947{
 948	long fd = perf_evsel__intval(evsel, sample, "fd");
 949	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
 950				   sample->time, fd);
 951}
 952
 953static int
 954process_exit_poll(struct timechart *tchart,
 955		  struct perf_evsel *evsel,
 956		  struct perf_sample *sample)
 957{
 958	long ret = perf_evsel__intval(evsel, sample, "ret");
 959	return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
 960				 sample->time, ret);
 961}
 962
 963/*
 964 * Sort the pid datastructure
 965 */
 966static void sort_pids(struct timechart *tchart)
 967{
 968	struct per_pid *new_list, *p, *cursor, *prev;
 969	/* sort by ppid first, then by pid, lowest to highest */
 970
 971	new_list = NULL;
 972
 973	while (tchart->all_data) {
 974		p = tchart->all_data;
 975		tchart->all_data = p->next;
 976		p->next = NULL;
 977
 978		if (new_list == NULL) {
 979			new_list = p;
 980			p->next = NULL;
 981			continue;
 982		}
 983		prev = NULL;
 984		cursor = new_list;
 985		while (cursor) {
 986			if (cursor->ppid > p->ppid ||
 987				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
 988				/* must insert before */
 989				if (prev) {
 990					p->next = prev->next;
 991					prev->next = p;
 992					cursor = NULL;
 993					continue;
 994				} else {
 995					p->next = new_list;
 996					new_list = p;
 997					cursor = NULL;
 998					continue;
 999				}
1000			}
1001
1002			prev = cursor;
1003			cursor = cursor->next;
1004			if (!cursor)
1005				prev->next = p;
1006		}
1007	}
1008	tchart->all_data = new_list;
1009}
1010
1011
1012static void draw_c_p_states(struct timechart *tchart)
1013{
1014	struct power_event *pwr;
1015	pwr = tchart->power_events;
1016
1017	/*
1018	 * two pass drawing so that the P state bars are on top of the C state blocks
1019	 */
1020	while (pwr) {
1021		if (pwr->type == CSTATE)
1022			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1023		pwr = pwr->next;
1024	}
1025
1026	pwr = tchart->power_events;
1027	while (pwr) {
1028		if (pwr->type == PSTATE) {
1029			if (!pwr->state)
1030				pwr->state = tchart->min_freq;
1031			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1032		}
1033		pwr = pwr->next;
1034	}
1035}
1036
1037static void draw_wakeups(struct timechart *tchart)
1038{
1039	struct wake_event *we;
1040	struct per_pid *p;
1041	struct per_pidcomm *c;
1042
1043	we = tchart->wake_events;
1044	while (we) {
1045		int from = 0, to = 0;
1046		char *task_from = NULL, *task_to = NULL;
1047
1048		/* locate the column of the waker and wakee */
1049		p = tchart->all_data;
1050		while (p) {
1051			if (p->pid == we->waker || p->pid == we->wakee) {
1052				c = p->all;
1053				while (c) {
1054					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
1055						if (p->pid == we->waker && !from) {
1056							from = c->Y;
1057							task_from = strdup(c->comm);
1058						}
1059						if (p->pid == we->wakee && !to) {
1060							to = c->Y;
1061							task_to = strdup(c->comm);
1062						}
1063					}
1064					c = c->next;
1065				}
1066				c = p->all;
1067				while (c) {
1068					if (p->pid == we->waker && !from) {
1069						from = c->Y;
1070						task_from = strdup(c->comm);
1071					}
1072					if (p->pid == we->wakee && !to) {
1073						to = c->Y;
1074						task_to = strdup(c->comm);
1075					}
1076					c = c->next;
1077				}
1078			}
1079			p = p->next;
1080		}
1081
1082		if (!task_from) {
1083			task_from = malloc(40);
1084			sprintf(task_from, "[%i]", we->waker);
1085		}
1086		if (!task_to) {
1087			task_to = malloc(40);
1088			sprintf(task_to, "[%i]", we->wakee);
1089		}
1090
1091		if (we->waker == -1)
1092			svg_interrupt(we->time, to, we->backtrace);
1093		else if (from && to && abs(from - to) == 1)
1094			svg_wakeline(we->time, from, to, we->backtrace);
1095		else
1096			svg_partial_wakeline(we->time, from, task_from, to,
1097					     task_to, we->backtrace);
1098		we = we->next;
1099
1100		free(task_from);
1101		free(task_to);
1102	}
1103}
1104
1105static void draw_cpu_usage(struct timechart *tchart)
1106{
1107	struct per_pid *p;
1108	struct per_pidcomm *c;
1109	struct cpu_sample *sample;
1110	p = tchart->all_data;
1111	while (p) {
1112		c = p->all;
1113		while (c) {
1114			sample = c->samples;
1115			while (sample) {
1116				if (sample->type == TYPE_RUNNING) {
1117					svg_process(sample->cpu,
1118						    sample->start_time,
1119						    sample->end_time,
1120						    p->pid,
1121						    c->comm,
1122						    sample->backtrace);
1123				}
1124
1125				sample = sample->next;
1126			}
1127			c = c->next;
1128		}
1129		p = p->next;
1130	}
1131}
1132
1133static void draw_io_bars(struct timechart *tchart)
1134{
1135	const char *suf;
1136	double bytes;
1137	char comm[256];
1138	struct per_pid *p;
1139	struct per_pidcomm *c;
1140	struct io_sample *sample;
1141	int Y = 1;
1142
1143	p = tchart->all_data;
1144	while (p) {
1145		c = p->all;
1146		while (c) {
1147			if (!c->display) {
1148				c->Y = 0;
1149				c = c->next;
1150				continue;
1151			}
1152
1153			svg_box(Y, c->start_time, c->end_time, "process3");
1154			sample = c->io_samples;
1155			for (sample = c->io_samples; sample; sample = sample->next) {
1156				double h = (double)sample->bytes / c->max_bytes;
1157
1158				if (tchart->skip_eagain &&
1159				    sample->err == -EAGAIN)
1160					continue;
1161
1162				if (sample->err)
1163					h = 1;
1164
1165				if (sample->type == IOTYPE_SYNC)
1166					svg_fbox(Y,
1167						sample->start_time,
1168						sample->end_time,
1169						1,
1170						sample->err ? "error" : "sync",
1171						sample->fd,
1172						sample->err,
1173						sample->merges);
1174				else if (sample->type == IOTYPE_POLL)
1175					svg_fbox(Y,
1176						sample->start_time,
1177						sample->end_time,
1178						1,
1179						sample->err ? "error" : "poll",
1180						sample->fd,
1181						sample->err,
1182						sample->merges);
1183				else if (sample->type == IOTYPE_READ)
1184					svg_ubox(Y,
1185						sample->start_time,
1186						sample->end_time,
1187						h,
1188						sample->err ? "error" : "disk",
1189						sample->fd,
1190						sample->err,
1191						sample->merges);
1192				else if (sample->type == IOTYPE_WRITE)
1193					svg_lbox(Y,
1194						sample->start_time,
1195						sample->end_time,
1196						h,
1197						sample->err ? "error" : "disk",
1198						sample->fd,
1199						sample->err,
1200						sample->merges);
1201				else if (sample->type == IOTYPE_RX)
1202					svg_ubox(Y,
1203						sample->start_time,
1204						sample->end_time,
1205						h,
1206						sample->err ? "error" : "net",
1207						sample->fd,
1208						sample->err,
1209						sample->merges);
1210				else if (sample->type == IOTYPE_TX)
1211					svg_lbox(Y,
1212						sample->start_time,
1213						sample->end_time,
1214						h,
1215						sample->err ? "error" : "net",
1216						sample->fd,
1217						sample->err,
1218						sample->merges);
1219			}
1220
1221			suf = "";
1222			bytes = c->total_bytes;
1223			if (bytes > 1024) {
1224				bytes = bytes / 1024;
1225				suf = "K";
1226			}
1227			if (bytes > 1024) {
1228				bytes = bytes / 1024;
1229				suf = "M";
1230			}
1231			if (bytes > 1024) {
1232				bytes = bytes / 1024;
1233				suf = "G";
1234			}
1235
1236
1237			sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
1238			svg_text(Y, c->start_time, comm);
1239
1240			c->Y = Y;
1241			Y++;
1242			c = c->next;
1243		}
1244		p = p->next;
1245	}
1246}
1247
1248static void draw_process_bars(struct timechart *tchart)
1249{
1250	struct per_pid *p;
1251	struct per_pidcomm *c;
1252	struct cpu_sample *sample;
1253	int Y = 0;
1254
1255	Y = 2 * tchart->numcpus + 2;
1256
1257	p = tchart->all_data;
1258	while (p) {
1259		c = p->all;
1260		while (c) {
1261			if (!c->display) {
1262				c->Y = 0;
1263				c = c->next;
1264				continue;
1265			}
1266
1267			svg_box(Y, c->start_time, c->end_time, "process");
1268			sample = c->samples;
1269			while (sample) {
1270				if (sample->type == TYPE_RUNNING)
1271					svg_running(Y, sample->cpu,
1272						    sample->start_time,
1273						    sample->end_time,
1274						    sample->backtrace);
1275				if (sample->type == TYPE_BLOCKED)
1276					svg_blocked(Y, sample->cpu,
1277						    sample->start_time,
1278						    sample->end_time,
1279						    sample->backtrace);
1280				if (sample->type == TYPE_WAITING)
1281					svg_waiting(Y, sample->cpu,
1282						    sample->start_time,
1283						    sample->end_time,
1284						    sample->backtrace);
1285				sample = sample->next;
1286			}
1287
1288			if (c->comm) {
1289				char comm[256];
1290				if (c->total_time > 5000000000) /* 5 seconds */
1291					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
1292				else
1293					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
1294
1295				svg_text(Y, c->start_time, comm);
1296			}
1297			c->Y = Y;
1298			Y++;
1299			c = c->next;
1300		}
1301		p = p->next;
1302	}
1303}
1304
1305static void add_process_filter(const char *string)
1306{
1307	int pid = strtoull(string, NULL, 10);
1308	struct process_filter *filt = malloc(sizeof(*filt));
1309
1310	if (!filt)
1311		return;
1312
1313	filt->name = strdup(string);
1314	filt->pid  = pid;
1315	filt->next = process_filter;
1316
1317	process_filter = filt;
1318}
1319
1320static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
1321{
1322	struct process_filter *filt;
1323	if (!process_filter)
1324		return 1;
1325
1326	filt = process_filter;
1327	while (filt) {
1328		if (filt->pid && p->pid == filt->pid)
1329			return 1;
1330		if (strcmp(filt->name, c->comm) == 0)
1331			return 1;
1332		filt = filt->next;
1333	}
1334	return 0;
1335}
1336
1337static int determine_display_tasks_filtered(struct timechart *tchart)
1338{
1339	struct per_pid *p;
1340	struct per_pidcomm *c;
1341	int count = 0;
1342
1343	p = tchart->all_data;
1344	while (p) {
1345		p->display = 0;
1346		if (p->start_time == 1)
1347			p->start_time = tchart->first_time;
1348
1349		/* no exit marker, task kept running to the end */
1350		if (p->end_time == 0)
1351			p->end_time = tchart->last_time;
1352
1353		c = p->all;
1354
1355		while (c) {
1356			c->display = 0;
1357
1358			if (c->start_time == 1)
1359				c->start_time = tchart->first_time;
1360
1361			if (passes_filter(p, c)) {
1362				c->display = 1;
1363				p->display = 1;
1364				count++;
1365			}
1366
1367			if (c->end_time == 0)
1368				c->end_time = tchart->last_time;
1369
1370			c = c->next;
1371		}
1372		p = p->next;
1373	}
1374	return count;
1375}
1376
1377static int determine_display_tasks(struct timechart *tchart, u64 threshold)
1378{
1379	struct per_pid *p;
1380	struct per_pidcomm *c;
1381	int count = 0;
1382
 
 
 
1383	p = tchart->all_data;
1384	while (p) {
1385		p->display = 0;
1386		if (p->start_time == 1)
1387			p->start_time = tchart->first_time;
1388
1389		/* no exit marker, task kept running to the end */
1390		if (p->end_time == 0)
1391			p->end_time = tchart->last_time;
1392		if (p->total_time >= threshold)
1393			p->display = 1;
1394
1395		c = p->all;
1396
1397		while (c) {
1398			c->display = 0;
1399
1400			if (c->start_time == 1)
1401				c->start_time = tchart->first_time;
1402
1403			if (c->total_time >= threshold) {
1404				c->display = 1;
1405				count++;
1406			}
1407
1408			if (c->end_time == 0)
1409				c->end_time = tchart->last_time;
1410
1411			c = c->next;
1412		}
1413		p = p->next;
1414	}
1415	return count;
1416}
1417
1418static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
1419{
1420	struct per_pid *p;
1421	struct per_pidcomm *c;
1422	int count = 0;
1423
1424	p = timechart->all_data;
1425	while (p) {
1426		/* no exit marker, task kept running to the end */
1427		if (p->end_time == 0)
1428			p->end_time = timechart->last_time;
1429
1430		c = p->all;
1431
1432		while (c) {
1433			c->display = 0;
1434
1435			if (c->total_bytes >= threshold) {
1436				c->display = 1;
1437				count++;
1438			}
1439
1440			if (c->end_time == 0)
1441				c->end_time = timechart->last_time;
1442
1443			c = c->next;
1444		}
1445		p = p->next;
1446	}
1447	return count;
1448}
1449
1450#define BYTES_THRESH (1 * 1024 * 1024)
1451#define TIME_THRESH 10000000
1452
1453static void write_svg_file(struct timechart *tchart, const char *filename)
1454{
1455	u64 i;
1456	int count;
1457	int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
1458
1459	if (tchart->power_only)
1460		tchart->proc_num = 0;
1461
1462	/* We'd like to show at least proc_num tasks;
1463	 * be less picky if we have fewer */
1464	do {
1465		if (process_filter)
1466			count = determine_display_tasks_filtered(tchart);
1467		else if (tchart->io_events)
1468			count = determine_display_io_tasks(tchart, thresh);
1469		else
1470			count = determine_display_tasks(tchart, thresh);
1471		thresh /= 10;
1472	} while (!process_filter && thresh && count < tchart->proc_num);
1473
1474	if (!tchart->proc_num)
1475		count = 0;
1476
1477	if (tchart->io_events) {
1478		open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
1479
1480		svg_time_grid(0.5);
1481		svg_io_legenda();
1482
1483		draw_io_bars(tchart);
1484	} else {
1485		open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
1486
1487		svg_time_grid(0);
1488
1489		svg_legenda();
 
1490
1491		for (i = 0; i < tchart->numcpus; i++)
1492			svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
1493
1494		draw_cpu_usage(tchart);
1495		if (tchart->proc_num)
1496			draw_process_bars(tchart);
1497		if (!tchart->tasks_only)
1498			draw_c_p_states(tchart);
1499		if (tchart->proc_num)
1500			draw_wakeups(tchart);
1501	}
1502
1503	svg_close();
1504}
1505
1506static int process_header(struct perf_file_section *section __maybe_unused,
1507			  struct perf_header *ph,
1508			  int feat,
1509			  int fd __maybe_unused,
1510			  void *data)
1511{
1512	struct timechart *tchart = data;
1513
1514	switch (feat) {
1515	case HEADER_NRCPUS:
1516		tchart->numcpus = ph->env.nr_cpus_avail;
1517		break;
1518
1519	case HEADER_CPU_TOPOLOGY:
1520		if (!tchart->topology)
1521			break;
1522
1523		if (svg_build_topology_map(ph->env.sibling_cores,
1524					   ph->env.nr_sibling_cores,
1525					   ph->env.sibling_threads,
1526					   ph->env.nr_sibling_threads))
1527			fprintf(stderr, "problem building topology\n");
1528		break;
1529
1530	default:
1531		break;
1532	}
1533
1534	return 0;
1535}
1536
1537static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1538{
1539	const struct perf_evsel_str_handler power_tracepoints[] = {
1540		{ "power:cpu_idle",		process_sample_cpu_idle },
1541		{ "power:cpu_frequency",	process_sample_cpu_frequency },
1542		{ "sched:sched_wakeup",		process_sample_sched_wakeup },
1543		{ "sched:sched_switch",		process_sample_sched_switch },
1544#ifdef SUPPORT_OLD_POWER_EVENTS
1545		{ "power:power_start",		process_sample_power_start },
1546		{ "power:power_end",		process_sample_power_end },
1547		{ "power:power_frequency",	process_sample_power_frequency },
1548#endif
1549
1550		{ "syscalls:sys_enter_read",		process_enter_read },
1551		{ "syscalls:sys_enter_pread64",		process_enter_read },
1552		{ "syscalls:sys_enter_readv",		process_enter_read },
1553		{ "syscalls:sys_enter_preadv",		process_enter_read },
1554		{ "syscalls:sys_enter_write",		process_enter_write },
1555		{ "syscalls:sys_enter_pwrite64",	process_enter_write },
1556		{ "syscalls:sys_enter_writev",		process_enter_write },
1557		{ "syscalls:sys_enter_pwritev",		process_enter_write },
1558		{ "syscalls:sys_enter_sync",		process_enter_sync },
1559		{ "syscalls:sys_enter_sync_file_range",	process_enter_sync },
1560		{ "syscalls:sys_enter_fsync",		process_enter_sync },
1561		{ "syscalls:sys_enter_msync",		process_enter_sync },
1562		{ "syscalls:sys_enter_recvfrom",	process_enter_rx },
1563		{ "syscalls:sys_enter_recvmmsg",	process_enter_rx },
1564		{ "syscalls:sys_enter_recvmsg",		process_enter_rx },
1565		{ "syscalls:sys_enter_sendto",		process_enter_tx },
1566		{ "syscalls:sys_enter_sendmsg",		process_enter_tx },
1567		{ "syscalls:sys_enter_sendmmsg",	process_enter_tx },
1568		{ "syscalls:sys_enter_epoll_pwait",	process_enter_poll },
1569		{ "syscalls:sys_enter_epoll_wait",	process_enter_poll },
1570		{ "syscalls:sys_enter_poll",		process_enter_poll },
1571		{ "syscalls:sys_enter_ppoll",		process_enter_poll },
1572		{ "syscalls:sys_enter_pselect6",	process_enter_poll },
1573		{ "syscalls:sys_enter_select",		process_enter_poll },
1574
1575		{ "syscalls:sys_exit_read",		process_exit_read },
1576		{ "syscalls:sys_exit_pread64",		process_exit_read },
1577		{ "syscalls:sys_exit_readv",		process_exit_read },
1578		{ "syscalls:sys_exit_preadv",		process_exit_read },
1579		{ "syscalls:sys_exit_write",		process_exit_write },
1580		{ "syscalls:sys_exit_pwrite64",		process_exit_write },
1581		{ "syscalls:sys_exit_writev",		process_exit_write },
1582		{ "syscalls:sys_exit_pwritev",		process_exit_write },
1583		{ "syscalls:sys_exit_sync",		process_exit_sync },
1584		{ "syscalls:sys_exit_sync_file_range",	process_exit_sync },
1585		{ "syscalls:sys_exit_fsync",		process_exit_sync },
1586		{ "syscalls:sys_exit_msync",		process_exit_sync },
1587		{ "syscalls:sys_exit_recvfrom",		process_exit_rx },
1588		{ "syscalls:sys_exit_recvmmsg",		process_exit_rx },
1589		{ "syscalls:sys_exit_recvmsg",		process_exit_rx },
1590		{ "syscalls:sys_exit_sendto",		process_exit_tx },
1591		{ "syscalls:sys_exit_sendmsg",		process_exit_tx },
1592		{ "syscalls:sys_exit_sendmmsg",		process_exit_tx },
1593		{ "syscalls:sys_exit_epoll_pwait",	process_exit_poll },
1594		{ "syscalls:sys_exit_epoll_wait",	process_exit_poll },
1595		{ "syscalls:sys_exit_poll",		process_exit_poll },
1596		{ "syscalls:sys_exit_ppoll",		process_exit_poll },
1597		{ "syscalls:sys_exit_pselect6",		process_exit_poll },
1598		{ "syscalls:sys_exit_select",		process_exit_poll },
1599	};
1600	struct perf_data_file file = {
1601		.path = input_name,
1602		.mode = PERF_DATA_MODE_READ,
1603		.force = tchart->force,
1604	};
1605
1606	struct perf_session *session = perf_session__new(&file, false,
1607							 &tchart->tool);
1608	int ret = -EINVAL;
1609
1610	if (session == NULL)
1611		return -1;
1612
1613	symbol__init(&session->header.env);
1614
1615	(void)perf_header__process_sections(&session->header,
1616					    perf_data_file__fd(session->file),
1617					    tchart,
1618					    process_header);
1619
1620	if (!perf_session__has_traces(session, "timechart record"))
1621		goto out_delete;
1622
1623	if (perf_session__set_tracepoints_handlers(session,
1624						   power_tracepoints)) {
1625		pr_err("Initializing session tracepoint handlers failed\n");
1626		goto out_delete;
1627	}
1628
1629	ret = perf_session__process_events(session);
1630	if (ret)
1631		goto out_delete;
1632
1633	end_sample_processing(tchart);
1634
1635	sort_pids(tchart);
1636
1637	write_svg_file(tchart, output_name);
1638
1639	pr_info("Written %2.1f seconds of trace to %s.\n",
1640		(tchart->last_time - tchart->first_time) / 1000000000.0, output_name);
1641out_delete:
1642	perf_session__delete(session);
1643	return ret;
1644}
1645
1646static int timechart__io_record(int argc, const char **argv)
1647{
1648	unsigned int rec_argc, i;
1649	const char **rec_argv;
1650	const char **p;
1651	char *filter = NULL;
1652
1653	const char * const common_args[] = {
1654		"record", "-a", "-R", "-c", "1",
1655	};
1656	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1657
1658	const char * const disk_events[] = {
1659		"syscalls:sys_enter_read",
1660		"syscalls:sys_enter_pread64",
1661		"syscalls:sys_enter_readv",
1662		"syscalls:sys_enter_preadv",
1663		"syscalls:sys_enter_write",
1664		"syscalls:sys_enter_pwrite64",
1665		"syscalls:sys_enter_writev",
1666		"syscalls:sys_enter_pwritev",
1667		"syscalls:sys_enter_sync",
1668		"syscalls:sys_enter_sync_file_range",
1669		"syscalls:sys_enter_fsync",
1670		"syscalls:sys_enter_msync",
1671
1672		"syscalls:sys_exit_read",
1673		"syscalls:sys_exit_pread64",
1674		"syscalls:sys_exit_readv",
1675		"syscalls:sys_exit_preadv",
1676		"syscalls:sys_exit_write",
1677		"syscalls:sys_exit_pwrite64",
1678		"syscalls:sys_exit_writev",
1679		"syscalls:sys_exit_pwritev",
1680		"syscalls:sys_exit_sync",
1681		"syscalls:sys_exit_sync_file_range",
1682		"syscalls:sys_exit_fsync",
1683		"syscalls:sys_exit_msync",
1684	};
1685	unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
1686
1687	const char * const net_events[] = {
1688		"syscalls:sys_enter_recvfrom",
1689		"syscalls:sys_enter_recvmmsg",
1690		"syscalls:sys_enter_recvmsg",
1691		"syscalls:sys_enter_sendto",
1692		"syscalls:sys_enter_sendmsg",
1693		"syscalls:sys_enter_sendmmsg",
1694
1695		"syscalls:sys_exit_recvfrom",
1696		"syscalls:sys_exit_recvmmsg",
1697		"syscalls:sys_exit_recvmsg",
1698		"syscalls:sys_exit_sendto",
1699		"syscalls:sys_exit_sendmsg",
1700		"syscalls:sys_exit_sendmmsg",
1701	};
1702	unsigned int net_events_nr = ARRAY_SIZE(net_events);
1703
1704	const char * const poll_events[] = {
1705		"syscalls:sys_enter_epoll_pwait",
1706		"syscalls:sys_enter_epoll_wait",
1707		"syscalls:sys_enter_poll",
1708		"syscalls:sys_enter_ppoll",
1709		"syscalls:sys_enter_pselect6",
1710		"syscalls:sys_enter_select",
1711
1712		"syscalls:sys_exit_epoll_pwait",
1713		"syscalls:sys_exit_epoll_wait",
1714		"syscalls:sys_exit_poll",
1715		"syscalls:sys_exit_ppoll",
1716		"syscalls:sys_exit_pselect6",
1717		"syscalls:sys_exit_select",
1718	};
1719	unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
1720
1721	rec_argc = common_args_nr +
1722		disk_events_nr * 4 +
1723		net_events_nr * 4 +
1724		poll_events_nr * 4 +
1725		argc;
1726	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1727
1728	if (rec_argv == NULL)
1729		return -ENOMEM;
1730
1731	if (asprintf(&filter, "common_pid != %d", getpid()) < 0)
1732		return -ENOMEM;
1733
1734	p = rec_argv;
1735	for (i = 0; i < common_args_nr; i++)
1736		*p++ = strdup(common_args[i]);
1737
1738	for (i = 0; i < disk_events_nr; i++) {
1739		if (!is_valid_tracepoint(disk_events[i])) {
1740			rec_argc -= 4;
1741			continue;
1742		}
1743
1744		*p++ = "-e";
1745		*p++ = strdup(disk_events[i]);
1746		*p++ = "--filter";
1747		*p++ = filter;
1748	}
1749	for (i = 0; i < net_events_nr; i++) {
1750		if (!is_valid_tracepoint(net_events[i])) {
1751			rec_argc -= 4;
1752			continue;
1753		}
1754
1755		*p++ = "-e";
1756		*p++ = strdup(net_events[i]);
1757		*p++ = "--filter";
1758		*p++ = filter;
1759	}
1760	for (i = 0; i < poll_events_nr; i++) {
1761		if (!is_valid_tracepoint(poll_events[i])) {
1762			rec_argc -= 4;
1763			continue;
1764		}
1765
1766		*p++ = "-e";
1767		*p++ = strdup(poll_events[i]);
1768		*p++ = "--filter";
1769		*p++ = filter;
1770	}
1771
1772	for (i = 0; i < (unsigned int)argc; i++)
1773		*p++ = argv[i];
1774
1775	return cmd_record(rec_argc, rec_argv, NULL);
1776}
1777
1778
1779static int timechart__record(struct timechart *tchart, int argc, const char **argv)
1780{
1781	unsigned int rec_argc, i, j;
1782	const char **rec_argv;
1783	const char **p;
1784	unsigned int record_elems;
1785
1786	const char * const common_args[] = {
1787		"record", "-a", "-R", "-c", "1",
1788	};
1789	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1790
1791	const char * const backtrace_args[] = {
1792		"-g",
1793	};
1794	unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
1795
1796	const char * const power_args[] = {
1797		"-e", "power:cpu_frequency",
1798		"-e", "power:cpu_idle",
1799	};
1800	unsigned int power_args_nr = ARRAY_SIZE(power_args);
1801
1802	const char * const old_power_args[] = {
1803#ifdef SUPPORT_OLD_POWER_EVENTS
1804		"-e", "power:power_start",
1805		"-e", "power:power_end",
1806		"-e", "power:power_frequency",
1807#endif
1808	};
1809	unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
1810
1811	const char * const tasks_args[] = {
1812		"-e", "sched:sched_wakeup",
1813		"-e", "sched:sched_switch",
1814	};
1815	unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
1816
1817#ifdef SUPPORT_OLD_POWER_EVENTS
1818	if (!is_valid_tracepoint("power:cpu_idle") &&
1819	    is_valid_tracepoint("power:power_start")) {
1820		use_old_power_events = 1;
1821		power_args_nr = 0;
1822	} else {
1823		old_power_args_nr = 0;
1824	}
1825#endif
1826
1827	if (tchart->power_only)
1828		tasks_args_nr = 0;
1829
1830	if (tchart->tasks_only) {
1831		power_args_nr = 0;
1832		old_power_args_nr = 0;
1833	}
1834
1835	if (!tchart->with_backtrace)
1836		backtrace_args_no = 0;
1837
1838	record_elems = common_args_nr + tasks_args_nr +
1839		power_args_nr + old_power_args_nr + backtrace_args_no;
1840
1841	rec_argc = record_elems + argc;
1842	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1843
1844	if (rec_argv == NULL)
1845		return -ENOMEM;
1846
1847	p = rec_argv;
1848	for (i = 0; i < common_args_nr; i++)
1849		*p++ = strdup(common_args[i]);
1850
1851	for (i = 0; i < backtrace_args_no; i++)
1852		*p++ = strdup(backtrace_args[i]);
1853
1854	for (i = 0; i < tasks_args_nr; i++)
1855		*p++ = strdup(tasks_args[i]);
1856
1857	for (i = 0; i < power_args_nr; i++)
1858		*p++ = strdup(power_args[i]);
1859
1860	for (i = 0; i < old_power_args_nr; i++)
1861		*p++ = strdup(old_power_args[i]);
1862
1863	for (j = 0; j < (unsigned int)argc; j++)
1864		*p++ = argv[j];
1865
1866	return cmd_record(rec_argc, rec_argv, NULL);
1867}
1868
1869static int
1870parse_process(const struct option *opt __maybe_unused, const char *arg,
1871	      int __maybe_unused unset)
1872{
1873	if (arg)
1874		add_process_filter(arg);
1875	return 0;
1876}
1877
1878static int
1879parse_highlight(const struct option *opt __maybe_unused, const char *arg,
1880		int __maybe_unused unset)
1881{
1882	unsigned long duration = strtoul(arg, NULL, 0);
1883
1884	if (svg_highlight || svg_highlight_name)
1885		return -1;
1886
1887	if (duration)
1888		svg_highlight = duration;
1889	else
1890		svg_highlight_name = strdup(arg);
1891
1892	return 0;
1893}
1894
1895static int
1896parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
1897{
1898	char unit = 'n';
1899	u64 *value = opt->value;
1900
1901	if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
1902		switch (unit) {
1903		case 'm':
1904			*value *= 1000000;
1905			break;
1906		case 'u':
1907			*value *= 1000;
1908			break;
1909		case 'n':
1910			break;
1911		default:
1912			return -1;
1913		}
1914	}
1915
1916	return 0;
1917}
1918
1919int cmd_timechart(int argc, const char **argv,
1920		  const char *prefix __maybe_unused)
1921{
1922	struct timechart tchart = {
1923		.tool = {
1924			.comm		 = process_comm_event,
1925			.fork		 = process_fork_event,
1926			.exit		 = process_exit_event,
1927			.sample		 = process_sample_event,
1928			.ordered_events	 = true,
1929		},
1930		.proc_num = 15,
1931		.min_time = 1000000,
1932		.merge_dist = 1000,
1933	};
1934	const char *output_name = "output.svg";
1935	const struct option timechart_options[] = {
1936	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1937	OPT_STRING('o', "output", &output_name, "file", "output file name"),
1938	OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1939	OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
1940		      "highlight tasks. Pass duration in ns or process name.",
1941		       parse_highlight),
1942	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1943	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
1944		    "output processes data only"),
1945	OPT_CALLBACK('p', "process", NULL, "process",
1946		      "process selector. Pass a pid or process name.",
1947		       parse_process),
1948	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1949		    "Look for files with symbols relative to this directory"),
1950	OPT_INTEGER('n', "proc-num", &tchart.proc_num,
1951		    "min. number of tasks to print"),
1952	OPT_BOOLEAN('t', "topology", &tchart.topology,
1953		    "sort CPUs according to topology"),
1954	OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
1955		    "skip EAGAIN errors"),
1956	OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
1957		     "all IO faster than min-time will visually appear longer",
1958		     parse_time),
1959	OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
1960		     "merge events that are merge-dist us apart",
1961		     parse_time),
1962	OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
1963	OPT_END()
1964	};
1965	const char * const timechart_subcommands[] = { "record", NULL };
1966	const char *timechart_usage[] = {
1967		"perf timechart [<options>] {record}",
1968		NULL
1969	};
1970
1971	const struct option timechart_record_options[] = {
1972	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1973	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only,
1974		    "output processes data only"),
1975	OPT_BOOLEAN('I', "io-only", &tchart.io_only,
1976		    "record only IO data"),
1977	OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
1978	OPT_END()
1979	};
1980	const char * const timechart_record_usage[] = {
1981		"perf timechart record [<options>]",
1982		NULL
1983	};
1984	argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
1985			timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
1986
1987	if (tchart.power_only && tchart.tasks_only) {
1988		pr_err("-P and -T options cannot be used at the same time.\n");
1989		return -1;
1990	}
1991
 
 
1992	if (argc && !strncmp(argv[0], "rec", 3)) {
1993		argc = parse_options(argc, argv, timechart_record_options,
1994				     timechart_record_usage,
1995				     PARSE_OPT_STOP_AT_NON_OPTION);
1996
1997		if (tchart.power_only && tchart.tasks_only) {
1998			pr_err("-P and -T options cannot be used at the same time.\n");
1999			return -1;
2000		}
2001
2002		if (tchart.io_only)
2003			return timechart__io_record(argc, argv);
2004		else
2005			return timechart__record(&tchart, argc, argv);
2006	} else if (argc)
2007		usage_with_options(timechart_usage, timechart_options);
2008
2009	setup_pager();
2010
2011	return __cmd_timechart(&tchart, output_name);
2012}