Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/*
   2 * builtin-timechart.c - make an svg timechart of system activity
   3 *
   4 * (C) Copyright 2009 Intel Corporation
   5 *
   6 * Authors:
   7 *     Arjan van de Ven <arjan@linux.intel.com>
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License
  11 * as published by the Free Software Foundation; version 2
  12 * of the License.
  13 */
  14
  15#include "builtin.h"
  16
  17#include "util/util.h"
  18
 
  19#include "util/color.h"
  20#include <linux/list.h>
  21#include "util/cache.h"
  22#include "util/evsel.h"
 
  23#include <linux/rbtree.h>
 
 
  24#include "util/symbol.h"
 
  25#include "util/callchain.h"
  26#include "util/strlist.h"
  27
  28#include "perf.h"
  29#include "util/header.h"
  30#include "util/parse-options.h"
 
  31#include "util/parse-events.h"
  32#include "util/event.h"
  33#include "util/session.h"
  34#include "util/svghelper.h"
  35#include "util/tool.h"
 
 
 
 
 
 
 
 
 
 
 
  36
  37#define SUPPORT_OLD_POWER_EVENTS 1
  38#define PWR_EVENT_EXIT -1
  39
  40
  41static const char	*input_name;
  42static const char	*output_name = "output.svg";
  43
  44static unsigned int	numcpus;
  45static u64		min_freq;	/* Lowest CPU frequency seen */
  46static u64		max_freq;	/* Highest CPU frequency seen */
  47static u64		turbo_frequency;
  48
  49static u64		first_time, last_time;
  50
  51static bool		power_only;
  52
  53
  54struct per_pid;
  55struct per_pidcomm;
  56
  57struct cpu_sample;
  58struct power_event;
  59struct wake_event;
  60
  61struct sample_wrapper;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  62
  63/*
  64 * Datastructure layout:
  65 * We keep an list of "pid"s, matching the kernels notion of a task struct.
  66 * Each "pid" entry, has a list of "comm"s.
  67 *	this is because we want to track different programs different, while
  68 *	exec will reuse the original pid (by design).
  69 * Each comm has a list of samples that will be used to draw
  70 * final graph.
  71 */
  72
  73struct per_pid {
  74	struct per_pid *next;
  75
  76	int		pid;
  77	int		ppid;
  78
  79	u64		start_time;
  80	u64		end_time;
  81	u64		total_time;
 
  82	int		display;
  83
  84	struct per_pidcomm *all;
  85	struct per_pidcomm *current;
  86};
  87
  88
  89struct per_pidcomm {
  90	struct per_pidcomm *next;
  91
  92	u64		start_time;
  93	u64		end_time;
  94	u64		total_time;
 
 
  95
  96	int		Y;
  97	int		display;
  98
  99	long		state;
 100	u64		state_since;
 101
 102	char		*comm;
 103
 104	struct cpu_sample *samples;
 
 105};
 106
 107struct sample_wrapper {
 108	struct sample_wrapper *next;
 109
 110	u64		timestamp;
 111	unsigned char	data[0];
 112};
 113
 114#define TYPE_NONE	0
 115#define TYPE_RUNNING	1
 116#define TYPE_WAITING	2
 117#define TYPE_BLOCKED	3
 118
 119struct cpu_sample {
 120	struct cpu_sample *next;
 121
 122	u64 start_time;
 123	u64 end_time;
 124	int type;
 125	int cpu;
 
 126};
 127
 128static struct per_pid *all_data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 129
 130#define CSTATE 1
 131#define PSTATE 2
 132
 133struct power_event {
 134	struct power_event *next;
 135	int type;
 136	int state;
 137	u64 start_time;
 138	u64 end_time;
 139	int cpu;
 140};
 141
 142struct wake_event {
 143	struct wake_event *next;
 144	int waker;
 145	int wakee;
 146	u64 time;
 
 147};
 148
 149static struct power_event    *power_events;
 150static struct wake_event     *wake_events;
 151
 152struct process_filter;
 153struct process_filter {
 154	char			*name;
 155	int			pid;
 156	struct process_filter	*next;
 157};
 158
 159static struct process_filter *process_filter;
 160
 161
 162static struct per_pid *find_create_pid(int pid)
 163{
 164	struct per_pid *cursor = all_data;
 165
 166	while (cursor) {
 167		if (cursor->pid == pid)
 168			return cursor;
 169		cursor = cursor->next;
 170	}
 171	cursor = malloc(sizeof(struct per_pid));
 172	assert(cursor != NULL);
 173	memset(cursor, 0, sizeof(struct per_pid));
 174	cursor->pid = pid;
 175	cursor->next = all_data;
 176	all_data = cursor;
 177	return cursor;
 178}
 179
 180static void pid_set_comm(int pid, char *comm)
 
 
 
 
 
 
 
 
 
 
 
 
 
 181{
 182	struct per_pid *p;
 183	struct per_pidcomm *c;
 184	p = find_create_pid(pid);
 185	c = p->all;
 186	while (c) {
 187		if (c->comm && strcmp(c->comm, comm) == 0) {
 188			p->current = c;
 189			return;
 190		}
 191		if (!c->comm) {
 192			c->comm = strdup(comm);
 193			p->current = c;
 194			return;
 195		}
 196		c = c->next;
 197	}
 198	c = malloc(sizeof(struct per_pidcomm));
 199	assert(c != NULL);
 200	memset(c, 0, sizeof(struct per_pidcomm));
 201	c->comm = strdup(comm);
 202	p->current = c;
 203	c->next = p->all;
 204	p->all = c;
 205}
 206
 207static void pid_fork(int pid, int ppid, u64 timestamp)
 208{
 209	struct per_pid *p, *pp;
 210	p = find_create_pid(pid);
 211	pp = find_create_pid(ppid);
 212	p->ppid = ppid;
 213	if (pp->current && pp->current->comm && !p->current)
 214		pid_set_comm(pid, pp->current->comm);
 215
 216	p->start_time = timestamp;
 217	if (p->current) {
 218		p->current->start_time = timestamp;
 219		p->current->state_since = timestamp;
 220	}
 221}
 222
 223static void pid_exit(int pid, u64 timestamp)
 224{
 225	struct per_pid *p;
 226	p = find_create_pid(pid);
 227	p->end_time = timestamp;
 228	if (p->current)
 229		p->current->end_time = timestamp;
 230}
 231
 232static void
 233pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
 
 234{
 235	struct per_pid *p;
 236	struct per_pidcomm *c;
 237	struct cpu_sample *sample;
 238
 239	p = find_create_pid(pid);
 240	c = p->current;
 241	if (!c) {
 242		c = malloc(sizeof(struct per_pidcomm));
 243		assert(c != NULL);
 244		memset(c, 0, sizeof(struct per_pidcomm));
 245		p->current = c;
 246		c->next = p->all;
 247		p->all = c;
 248	}
 249
 250	sample = malloc(sizeof(struct cpu_sample));
 251	assert(sample != NULL);
 252	memset(sample, 0, sizeof(struct cpu_sample));
 253	sample->start_time = start;
 254	sample->end_time = end;
 255	sample->type = type;
 256	sample->next = c->samples;
 257	sample->cpu = cpu;
 
 258	c->samples = sample;
 259
 260	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
 261		c->total_time += (end-start);
 262		p->total_time += (end-start);
 263	}
 264
 265	if (c->start_time == 0 || c->start_time > start)
 266		c->start_time = start;
 267	if (p->start_time == 0 || p->start_time > start)
 268		p->start_time = start;
 269}
 270
 271#define MAX_CPUS 4096
 272
 273static u64 cpus_cstate_start_times[MAX_CPUS];
 274static int cpus_cstate_state[MAX_CPUS];
 275static u64 cpus_pstate_start_times[MAX_CPUS];
 276static u64 cpus_pstate_state[MAX_CPUS];
 277
 278static int process_comm_event(struct perf_tool *tool __used,
 279			      union perf_event *event,
 280			      struct perf_sample *sample __used,
 281			      struct machine *machine __used)
 282{
 283	pid_set_comm(event->comm.tid, event->comm.comm);
 
 284	return 0;
 285}
 286
 287static int process_fork_event(struct perf_tool *tool __used,
 288			      union perf_event *event,
 289			      struct perf_sample *sample __used,
 290			      struct machine *machine __used)
 291{
 292	pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
 
 293	return 0;
 294}
 295
 296static int process_exit_event(struct perf_tool *tool __used,
 297			      union perf_event *event,
 298			      struct perf_sample *sample __used,
 299			      struct machine *machine __used)
 300{
 301	pid_exit(event->fork.pid, event->fork.time);
 
 302	return 0;
 303}
 304
 305struct trace_entry {
 306	unsigned short		type;
 307	unsigned char		flags;
 308	unsigned char		preempt_count;
 309	int			pid;
 310	int			lock_depth;
 311};
 312
 313#ifdef SUPPORT_OLD_POWER_EVENTS
 314static int use_old_power_events;
 315struct power_entry_old {
 316	struct trace_entry te;
 317	u64	type;
 318	u64	value;
 319	u64	cpu_id;
 320};
 321#endif
 322
 323struct power_processor_entry {
 324	struct trace_entry te;
 325	u32	state;
 326	u32	cpu_id;
 327};
 328
 329#define TASK_COMM_LEN 16
 330struct wakeup_entry {
 331	struct trace_entry te;
 332	char comm[TASK_COMM_LEN];
 333	int   pid;
 334	int   prio;
 335	int   success;
 336};
 337
 338/*
 339 * trace_flag_type is an enumeration that holds different
 340 * states when a trace occurs. These are:
 341 *  IRQS_OFF            - interrupts were disabled
 342 *  IRQS_NOSUPPORT      - arch does not support irqs_disabled_flags
 343 *  NEED_RESCED         - reschedule is requested
 344 *  HARDIRQ             - inside an interrupt handler
 345 *  SOFTIRQ             - inside a softirq handler
 346 */
 347enum trace_flag_type {
 348	TRACE_FLAG_IRQS_OFF		= 0x01,
 349	TRACE_FLAG_IRQS_NOSUPPORT	= 0x02,
 350	TRACE_FLAG_NEED_RESCHED		= 0x04,
 351	TRACE_FLAG_HARDIRQ		= 0x08,
 352	TRACE_FLAG_SOFTIRQ		= 0x10,
 353};
 354
 355
 356
 357struct sched_switch {
 358	struct trace_entry te;
 359	char prev_comm[TASK_COMM_LEN];
 360	int  prev_pid;
 361	int  prev_prio;
 362	long prev_state; /* Arjan weeps. */
 363	char next_comm[TASK_COMM_LEN];
 364	int  next_pid;
 365	int  next_prio;
 366};
 367
 368static void c_state_start(int cpu, u64 timestamp, int state)
 369{
 370	cpus_cstate_start_times[cpu] = timestamp;
 371	cpus_cstate_state[cpu] = state;
 372}
 373
 374static void c_state_end(int cpu, u64 timestamp)
 375{
 376	struct power_event *pwr;
 377	pwr = malloc(sizeof(struct power_event));
 378	if (!pwr)
 379		return;
 380	memset(pwr, 0, sizeof(struct power_event));
 381
 382	pwr->state = cpus_cstate_state[cpu];
 383	pwr->start_time = cpus_cstate_start_times[cpu];
 384	pwr->end_time = timestamp;
 385	pwr->cpu = cpu;
 386	pwr->type = CSTATE;
 387	pwr->next = power_events;
 388
 389	power_events = pwr;
 390}
 391
 392static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
 
 393{
 394	struct power_event *pwr;
 395	pwr = malloc(sizeof(struct power_event));
 396
 397	if (new_freq > 8000000) /* detect invalid data */
 398		return;
 399
 400	if (!pwr)
 401		return;
 402	memset(pwr, 0, sizeof(struct power_event));
 403
 404	pwr->state = cpus_pstate_state[cpu];
 405	pwr->start_time = cpus_pstate_start_times[cpu];
 406	pwr->end_time = timestamp;
 407	pwr->cpu = cpu;
 408	pwr->type = PSTATE;
 409	pwr->next = power_events;
 410
 411	if (!pwr->start_time)
 412		pwr->start_time = first_time;
 
 
 
 
 
 
 
 
 
 
 
 413
 414	power_events = pwr;
 
 
 415
 416	cpus_pstate_state[cpu] = new_freq;
 417	cpus_pstate_start_times[cpu] = timestamp;
 418
 419	if ((u64)new_freq > max_freq)
 420		max_freq = new_freq;
 421
 422	if (new_freq < min_freq || min_freq == 0)
 423		min_freq = new_freq;
 424
 425	if (new_freq == max_freq - 1000)
 426			turbo_frequency = max_freq;
 427}
 428
 429static void
 430sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
 431{
 432	struct wake_event *we;
 433	struct per_pid *p;
 434	struct wakeup_entry *wake = (void *)te;
 435
 436	we = malloc(sizeof(struct wake_event));
 437	if (!we)
 438		return;
 439
 440	memset(we, 0, sizeof(struct wake_event));
 441	we->time = timestamp;
 442	we->waker = pid;
 
 443
 444	if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
 445		we->waker = -1;
 446
 447	we->wakee = wake->pid;
 448	we->next = wake_events;
 449	wake_events = we;
 450	p = find_create_pid(we->wakee);
 451
 452	if (p && p->current && p->current->state == TYPE_NONE) {
 453		p->current->state_since = timestamp;
 454		p->current->state = TYPE_WAITING;
 455	}
 456	if (p && p->current && p->current->state == TYPE_BLOCKED) {
 457		pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
 
 458		p->current->state_since = timestamp;
 459		p->current->state = TYPE_WAITING;
 460	}
 461}
 462
 463static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
 
 
 464{
 465	struct per_pid *p = NULL, *prev_p;
 466	struct sched_switch *sw = (void *)te;
 467
 468
 469	prev_p = find_create_pid(sw->prev_pid);
 470
 471	p = find_create_pid(sw->next_pid);
 472
 473	if (prev_p->current && prev_p->current->state != TYPE_NONE)
 474		pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
 
 
 475	if (p && p->current) {
 476		if (p->current->state != TYPE_NONE)
 477			pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
 
 
 478
 479		p->current->state_since = timestamp;
 480		p->current->state = TYPE_RUNNING;
 481	}
 482
 483	if (prev_p->current) {
 484		prev_p->current->state = TYPE_NONE;
 485		prev_p->current->state_since = timestamp;
 486		if (sw->prev_state & 2)
 487			prev_p->current->state = TYPE_BLOCKED;
 488		if (sw->prev_state == 0)
 489			prev_p->current->state = TYPE_WAITING;
 490	}
 491}
 492
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 493
 494static int process_sample_event(struct perf_tool *tool __used,
 495				union perf_event *event __used,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 496				struct perf_sample *sample,
 497				struct perf_evsel *evsel,
 498				struct machine *machine __used)
 499{
 500	struct trace_entry *te;
 501
 502	if (evsel->attr.sample_type & PERF_SAMPLE_TIME) {
 503		if (!first_time || first_time > sample->time)
 504			first_time = sample->time;
 505		if (last_time < sample->time)
 506			last_time = sample->time;
 
 
 
 
 
 
 507	}
 508
 509	te = (void *)sample->raw_data;
 510	if ((evsel->attr.sample_type & PERF_SAMPLE_RAW) && sample->raw_size > 0) {
 511		char *event_str;
 512#ifdef SUPPORT_OLD_POWER_EVENTS
 513		struct power_entry_old *peo;
 514		peo = (void *)te;
 515#endif
 516		/*
 517		 * FIXME: use evsel, its already mapped from id to perf_evsel,
 518		 * remove perf_header__find_event infrastructure bits.
 519		 * Mapping all these "power:cpu_idle" strings to the tracepoint
 520		 * ID and then just comparing against evsel->attr.config.
 521		 *
 522		 * e.g.:
 523		 *
 524		 * if (evsel->attr.config == power_cpu_idle_id)
 525		 */
 526		event_str = perf_header__find_event(te->type);
 527
 528		if (!event_str)
 529			return 0;
 530
 531		if (sample->cpu > numcpus)
 532			numcpus = sample->cpu;
 533
 534		if (strcmp(event_str, "power:cpu_idle") == 0) {
 535			struct power_processor_entry *ppe = (void *)te;
 536			if (ppe->state == (u32)PWR_EVENT_EXIT)
 537				c_state_end(ppe->cpu_id, sample->time);
 538			else
 539				c_state_start(ppe->cpu_id, sample->time,
 540					      ppe->state);
 541		}
 542		else if (strcmp(event_str, "power:cpu_frequency") == 0) {
 543			struct power_processor_entry *ppe = (void *)te;
 544			p_state_change(ppe->cpu_id, sample->time, ppe->state);
 545		}
 546
 547		else if (strcmp(event_str, "sched:sched_wakeup") == 0)
 548			sched_wakeup(sample->cpu, sample->time, sample->pid, te);
 
 
 
 
 
 
 
 
 
 
 549
 550		else if (strcmp(event_str, "sched:sched_switch") == 0)
 551			sched_switch(sample->cpu, sample->time, te);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 552
 553#ifdef SUPPORT_OLD_POWER_EVENTS
 554		if (use_old_power_events) {
 555			if (strcmp(event_str, "power:power_start") == 0)
 556				c_state_start(peo->cpu_id, sample->time,
 557					      peo->value);
 558
 559			else if (strcmp(event_str, "power:power_end") == 0)
 560				c_state_end(sample->cpu, sample->time);
 561
 562			else if (strcmp(event_str,
 563					"power:power_frequency") == 0)
 564				p_state_change(peo->cpu_id, sample->time,
 565					       peo->value);
 566		}
 567#endif
 568	}
 
 
 
 
 
 569	return 0;
 570}
 571
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 572/*
 573 * After the last sample we need to wrap up the current C/P state
 574 * and close out each CPU for these.
 575 */
 576static void end_sample_processing(void)
 577{
 578	u64 cpu;
 579	struct power_event *pwr;
 580
 581	for (cpu = 0; cpu <= numcpus; cpu++) {
 582		pwr = malloc(sizeof(struct power_event));
 
 
 583		if (!pwr)
 584			return;
 585		memset(pwr, 0, sizeof(struct power_event));
 586
 587		/* C state */
 588#if 0
 589		pwr->state = cpus_cstate_state[cpu];
 590		pwr->start_time = cpus_cstate_start_times[cpu];
 591		pwr->end_time = last_time;
 592		pwr->cpu = cpu;
 593		pwr->type = CSTATE;
 594		pwr->next = power_events;
 595
 596		power_events = pwr;
 597#endif
 598		/* P state */
 599
 600		pwr = malloc(sizeof(struct power_event));
 601		if (!pwr)
 602			return;
 603		memset(pwr, 0, sizeof(struct power_event));
 604
 605		pwr->state = cpus_pstate_state[cpu];
 606		pwr->start_time = cpus_pstate_start_times[cpu];
 607		pwr->end_time = last_time;
 608		pwr->cpu = cpu;
 609		pwr->type = PSTATE;
 610		pwr->next = power_events;
 611
 612		if (!pwr->start_time)
 613			pwr->start_time = first_time;
 614		if (!pwr->state)
 615			pwr->state = min_freq;
 616		power_events = pwr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 617	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 618}
 619
 620/*
 621 * Sort the pid datastructure
 622 */
 623static void sort_pids(void)
 624{
 625	struct per_pid *new_list, *p, *cursor, *prev;
 626	/* sort by ppid first, then by pid, lowest to highest */
 627
 628	new_list = NULL;
 629
 630	while (all_data) {
 631		p = all_data;
 632		all_data = p->next;
 633		p->next = NULL;
 634
 635		if (new_list == NULL) {
 636			new_list = p;
 637			p->next = NULL;
 638			continue;
 639		}
 640		prev = NULL;
 641		cursor = new_list;
 642		while (cursor) {
 643			if (cursor->ppid > p->ppid ||
 644				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
 645				/* must insert before */
 646				if (prev) {
 647					p->next = prev->next;
 648					prev->next = p;
 649					cursor = NULL;
 650					continue;
 651				} else {
 652					p->next = new_list;
 653					new_list = p;
 654					cursor = NULL;
 655					continue;
 656				}
 657			}
 658
 659			prev = cursor;
 660			cursor = cursor->next;
 661			if (!cursor)
 662				prev->next = p;
 663		}
 664	}
 665	all_data = new_list;
 666}
 667
 668
 669static void draw_c_p_states(void)
 670{
 671	struct power_event *pwr;
 672	pwr = power_events;
 673
 674	/*
 675	 * two pass drawing so that the P state bars are on top of the C state blocks
 676	 */
 677	while (pwr) {
 678		if (pwr->type == CSTATE)
 679			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
 680		pwr = pwr->next;
 681	}
 682
 683	pwr = power_events;
 684	while (pwr) {
 685		if (pwr->type == PSTATE) {
 686			if (!pwr->state)
 687				pwr->state = min_freq;
 688			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
 689		}
 690		pwr = pwr->next;
 691	}
 692}
 693
 694static void draw_wakeups(void)
 695{
 696	struct wake_event *we;
 697	struct per_pid *p;
 698	struct per_pidcomm *c;
 699
 700	we = wake_events;
 701	while (we) {
 702		int from = 0, to = 0;
 703		char *task_from = NULL, *task_to = NULL;
 704
 705		/* locate the column of the waker and wakee */
 706		p = all_data;
 707		while (p) {
 708			if (p->pid == we->waker || p->pid == we->wakee) {
 709				c = p->all;
 710				while (c) {
 711					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
 712						if (p->pid == we->waker && !from) {
 713							from = c->Y;
 714							task_from = strdup(c->comm);
 715						}
 716						if (p->pid == we->wakee && !to) {
 717							to = c->Y;
 718							task_to = strdup(c->comm);
 719						}
 720					}
 721					c = c->next;
 722				}
 723				c = p->all;
 724				while (c) {
 725					if (p->pid == we->waker && !from) {
 726						from = c->Y;
 727						task_from = strdup(c->comm);
 728					}
 729					if (p->pid == we->wakee && !to) {
 730						to = c->Y;
 731						task_to = strdup(c->comm);
 732					}
 733					c = c->next;
 734				}
 735			}
 736			p = p->next;
 737		}
 738
 739		if (!task_from) {
 740			task_from = malloc(40);
 741			sprintf(task_from, "[%i]", we->waker);
 742		}
 743		if (!task_to) {
 744			task_to = malloc(40);
 745			sprintf(task_to, "[%i]", we->wakee);
 746		}
 747
 748		if (we->waker == -1)
 749			svg_interrupt(we->time, to);
 750		else if (from && to && abs(from - to) == 1)
 751			svg_wakeline(we->time, from, to);
 752		else
 753			svg_partial_wakeline(we->time, from, task_from, to, task_to);
 
 754		we = we->next;
 755
 756		free(task_from);
 757		free(task_to);
 758	}
 759}
 760
 761static void draw_cpu_usage(void)
 762{
 763	struct per_pid *p;
 764	struct per_pidcomm *c;
 765	struct cpu_sample *sample;
 766	p = all_data;
 767	while (p) {
 768		c = p->all;
 769		while (c) {
 770			sample = c->samples;
 771			while (sample) {
 772				if (sample->type == TYPE_RUNNING)
 773					svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
 
 
 
 
 
 
 774
 775				sample = sample->next;
 776			}
 777			c = c->next;
 778		}
 779		p = p->next;
 780	}
 781}
 782
 783static void draw_process_bars(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 784{
 785	struct per_pid *p;
 786	struct per_pidcomm *c;
 787	struct cpu_sample *sample;
 788	int Y = 0;
 789
 790	Y = 2 * numcpus + 2;
 791
 792	p = all_data;
 793	while (p) {
 794		c = p->all;
 795		while (c) {
 796			if (!c->display) {
 797				c->Y = 0;
 798				c = c->next;
 799				continue;
 800			}
 801
 802			svg_box(Y, c->start_time, c->end_time, "process");
 803			sample = c->samples;
 804			while (sample) {
 805				if (sample->type == TYPE_RUNNING)
 806					svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
 
 
 
 807				if (sample->type == TYPE_BLOCKED)
 808					svg_box(Y, sample->start_time, sample->end_time, "blocked");
 
 
 
 809				if (sample->type == TYPE_WAITING)
 810					svg_waiting(Y, sample->start_time, sample->end_time);
 
 
 
 811				sample = sample->next;
 812			}
 813
 814			if (c->comm) {
 815				char comm[256];
 816				if (c->total_time > 5000000000) /* 5 seconds */
 817					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
 818				else
 819					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
 820
 821				svg_text(Y, c->start_time, comm);
 822			}
 823			c->Y = Y;
 824			Y++;
 825			c = c->next;
 826		}
 827		p = p->next;
 828	}
 829}
 830
 831static void add_process_filter(const char *string)
 832{
 833	struct process_filter *filt;
 834	int pid;
 835
 836	pid = strtoull(string, NULL, 10);
 837	filt = malloc(sizeof(struct process_filter));
 838	if (!filt)
 839		return;
 840
 841	filt->name = strdup(string);
 842	filt->pid  = pid;
 843	filt->next = process_filter;
 844
 845	process_filter = filt;
 846}
 847
 848static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
 849{
 850	struct process_filter *filt;
 851	if (!process_filter)
 852		return 1;
 853
 854	filt = process_filter;
 855	while (filt) {
 856		if (filt->pid && p->pid == filt->pid)
 857			return 1;
 858		if (strcmp(filt->name, c->comm) == 0)
 859			return 1;
 860		filt = filt->next;
 861	}
 862	return 0;
 863}
 864
 865static int determine_display_tasks_filtered(void)
 866{
 867	struct per_pid *p;
 868	struct per_pidcomm *c;
 869	int count = 0;
 870
 871	p = all_data;
 872	while (p) {
 873		p->display = 0;
 874		if (p->start_time == 1)
 875			p->start_time = first_time;
 876
 877		/* no exit marker, task kept running to the end */
 878		if (p->end_time == 0)
 879			p->end_time = last_time;
 880
 881		c = p->all;
 882
 883		while (c) {
 884			c->display = 0;
 885
 886			if (c->start_time == 1)
 887				c->start_time = first_time;
 888
 889			if (passes_filter(p, c)) {
 890				c->display = 1;
 891				p->display = 1;
 892				count++;
 893			}
 894
 895			if (c->end_time == 0)
 896				c->end_time = last_time;
 897
 898			c = c->next;
 899		}
 900		p = p->next;
 901	}
 902	return count;
 903}
 904
 905static int determine_display_tasks(u64 threshold)
 906{
 907	struct per_pid *p;
 908	struct per_pidcomm *c;
 909	int count = 0;
 910
 911	if (process_filter)
 912		return determine_display_tasks_filtered();
 913
 914	p = all_data;
 915	while (p) {
 916		p->display = 0;
 917		if (p->start_time == 1)
 918			p->start_time = first_time;
 919
 920		/* no exit marker, task kept running to the end */
 921		if (p->end_time == 0)
 922			p->end_time = last_time;
 923		if (p->total_time >= threshold && !power_only)
 924			p->display = 1;
 925
 926		c = p->all;
 927
 928		while (c) {
 929			c->display = 0;
 930
 931			if (c->start_time == 1)
 932				c->start_time = first_time;
 933
 934			if (c->total_time >= threshold && !power_only) {
 935				c->display = 1;
 936				count++;
 937			}
 938
 939			if (c->end_time == 0)
 940				c->end_time = last_time;
 941
 942			c = c->next;
 943		}
 944		p = p->next;
 945	}
 946	return count;
 947}
 948
 
 
 
 
 
 
 
 
 
 
 
 949
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 950
 
 951#define TIME_THRESH 10000000
 952
 953static void write_svg_file(const char *filename)
 954{
 955	u64 i;
 956	int count;
 
 
 
 
 957
 958	numcpus++;
 
 
 
 
 
 
 
 
 
 
 
 
 
 959
 
 
 960
 961	count = determine_display_tasks(TIME_THRESH);
 
 962
 963	/* We'd like to show at least 15 tasks; be less picky if we have fewer */
 964	if (count < 15)
 965		count = determine_display_tasks(TIME_THRESH / 10);
 966
 967	open_svg(filename, numcpus, count, first_time, last_time);
 968
 969	svg_time_grid();
 970	svg_legenda();
 971
 972	for (i = 0; i < numcpus; i++)
 973		svg_cpu_box(i, max_freq, turbo_frequency);
 974
 975	draw_cpu_usage();
 976	draw_process_bars();
 977	draw_c_p_states();
 978	draw_wakeups();
 
 
 
 
 979
 980	svg_close();
 981}
 982
 983static struct perf_tool perf_timechart = {
 984	.comm			= process_comm_event,
 985	.fork			= process_fork_event,
 986	.exit			= process_exit_event,
 987	.sample			= process_sample_event,
 988	.ordered_samples	= true,
 989};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 990
 991static int __cmd_timechart(void)
 
 
 
 992{
 993	struct perf_session *session = perf_session__new(input_name, O_RDONLY,
 994							 0, false, &perf_timechart);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 995	int ret = -EINVAL;
 996
 997	if (session == NULL)
 998		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 999
1000	if (!perf_session__has_traces(session, "timechart record"))
1001		goto out_delete;
1002
1003	ret = perf_session__process_events(session, &perf_timechart);
 
 
 
 
 
 
1004	if (ret)
1005		goto out_delete;
1006
1007	end_sample_processing();
1008
1009	sort_pids();
1010
1011	write_svg_file(output_name);
1012
1013	pr_info("Written %2.1f seconds of trace to %s.\n",
1014		(last_time - first_time) / 1000000000.0, output_name);
1015out_delete:
1016	perf_session__delete(session);
1017	return ret;
1018}
1019
1020static const char * const timechart_usage[] = {
1021	"perf timechart [<options>] {record}",
1022	NULL
1023};
 
 
1024
1025#ifdef SUPPORT_OLD_POWER_EVENTS
1026static const char * const record_old_args[] = {
1027	"record",
1028	"-a",
1029	"-R",
1030	"-f",
1031	"-c", "1",
1032	"-e", "power:power_start",
1033	"-e", "power:power_end",
1034	"-e", "power:power_frequency",
1035	"-e", "sched:sched_wakeup",
1036	"-e", "sched:sched_switch",
1037};
1038#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1039
1040static const char * const record_new_args[] = {
1041	"record",
1042	"-a",
1043	"-R",
1044	"-f",
1045	"-c", "1",
1046	"-e", "power:cpu_frequency",
1047	"-e", "power:cpu_idle",
1048	"-e", "sched:sched_wakeup",
1049	"-e", "sched:sched_switch",
1050};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1051
1052static int __cmd_record(int argc, const char **argv)
 
 
 
 
 
 
 
1053{
1054	unsigned int rec_argc, i, j;
1055	const char **rec_argv;
1056	const char * const *record_args = record_new_args;
1057	unsigned int record_elems = ARRAY_SIZE(record_new_args);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1058
1059#ifdef SUPPORT_OLD_POWER_EVENTS
1060	if (!is_valid_tracepoint("power:cpu_idle") &&
1061	    is_valid_tracepoint("power:power_start")) {
1062		use_old_power_events = 1;
1063		record_args = record_old_args;
1064		record_elems = ARRAY_SIZE(record_old_args);
 
1065	}
1066#endif
1067
1068	rec_argc = record_elems + argc - 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1069	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1070
1071	if (rec_argv == NULL)
1072		return -ENOMEM;
1073
1074	for (i = 0; i < record_elems; i++)
1075		rec_argv[i] = strdup(record_args[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
1076
1077	for (j = 1; j < (unsigned int)argc; j++, i++)
1078		rec_argv[i] = argv[j];
1079
1080	return cmd_record(i, rec_argv, NULL);
1081}
1082
1083static int
1084parse_process(const struct option *opt __used, const char *arg, int __used unset)
 
1085{
1086	if (arg)
1087		add_process_filter(arg);
1088	return 0;
1089}
1090
1091static const struct option options[] = {
1092	OPT_STRING('i', "input", &input_name, "file",
1093		    "input file name"),
1094	OPT_STRING('o', "output", &output_name, "file",
1095		    "output file name"),
1096	OPT_INTEGER('w', "width", &svg_page_width,
1097		    "page width"),
1098	OPT_BOOLEAN('P', "power-only", &power_only,
1099		    "output power data only"),
1100	OPT_CALLBACK('p', "process", NULL, "process",
1101		      "process selector. Pass a pid or process name.",
1102		       parse_process),
1103	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1104		    "Look for files with symbols relative to this directory"),
1105	OPT_END()
1106};
1107
 
 
 
 
 
 
 
1108
1109int cmd_timechart(int argc, const char **argv, const char *prefix __used)
 
1110{
1111	argc = parse_options(argc, argv, options, timechart_usage,
1112			PARSE_OPT_STOP_AT_NON_OPTION);
1113
1114	symbol__init();
 
 
 
 
 
 
 
 
 
 
 
 
 
1115
1116	if (argc && !strncmp(argv[0], "rec", 3))
1117		return __cmd_record(argc, argv);
1118	else if (argc)
1119		usage_with_options(timechart_usage, options);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1120
1121	setup_pager();
1122
1123	return __cmd_timechart();
 
 
 
 
 
 
1124}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * builtin-timechart.c - make an svg timechart of system activity
   4 *
   5 * (C) Copyright 2009 Intel Corporation
   6 *
   7 * Authors:
   8 *     Arjan van de Ven <arjan@linux.intel.com>
 
 
 
 
 
   9 */
  10
  11#include <errno.h>
  12#include <inttypes.h>
 
  13
  14#include "builtin.h"
  15#include "util/color.h"
  16#include <linux/list.h>
  17#include "util/evlist.h" // for struct evsel_str_handler
  18#include "util/evsel.h"
  19#include <linux/kernel.h>
  20#include <linux/rbtree.h>
  21#include <linux/time64.h>
  22#include <linux/zalloc.h>
  23#include "util/symbol.h"
  24#include "util/thread.h"
  25#include "util/callchain.h"
 
  26
 
  27#include "util/header.h"
  28#include <subcmd/pager.h>
  29#include <subcmd/parse-options.h>
  30#include "util/parse-events.h"
  31#include "util/event.h"
  32#include "util/session.h"
  33#include "util/svghelper.h"
  34#include "util/tool.h"
  35#include "util/data.h"
  36#include "util/debug.h"
  37#include "util/string2.h"
  38#include "util/tracepoint.h"
  39#include "util/util.h"
  40#include <linux/err.h>
  41#include <event-parse.h>
  42
  43#ifdef LACKS_OPEN_MEMSTREAM_PROTOTYPE
  44FILE *open_memstream(char **ptr, size_t *sizeloc);
  45#endif
  46
  47#define SUPPORT_OLD_POWER_EVENTS 1
  48#define PWR_EVENT_EXIT -1
  49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50struct per_pid;
 
 
 
  51struct power_event;
  52struct wake_event;
  53
  54struct timechart {
  55	struct perf_tool	tool;
  56	struct per_pid		*all_data;
  57	struct power_event	*power_events;
  58	struct wake_event	*wake_events;
  59	int			proc_num;
  60	unsigned int		numcpus;
  61	u64			min_freq,	/* Lowest CPU frequency seen */
  62				max_freq,	/* Highest CPU frequency seen */
  63				turbo_frequency,
  64				first_time, last_time;
  65	bool			power_only,
  66				tasks_only,
  67				with_backtrace,
  68				topology;
  69	bool			force;
  70	/* IO related settings */
  71	bool			io_only,
  72				skip_eagain;
  73	u64			io_events;
  74	u64			min_time,
  75				merge_dist;
  76};
  77
  78struct per_pidcomm;
  79struct cpu_sample;
  80struct io_sample;
  81
  82/*
  83 * Datastructure layout:
  84 * We keep an list of "pid"s, matching the kernels notion of a task struct.
  85 * Each "pid" entry, has a list of "comm"s.
  86 *	this is because we want to track different programs different, while
  87 *	exec will reuse the original pid (by design).
  88 * Each comm has a list of samples that will be used to draw
  89 * final graph.
  90 */
  91
  92struct per_pid {
  93	struct per_pid *next;
  94
  95	int		pid;
  96	int		ppid;
  97
  98	u64		start_time;
  99	u64		end_time;
 100	u64		total_time;
 101	u64		total_bytes;
 102	int		display;
 103
 104	struct per_pidcomm *all;
 105	struct per_pidcomm *current;
 106};
 107
 108
 109struct per_pidcomm {
 110	struct per_pidcomm *next;
 111
 112	u64		start_time;
 113	u64		end_time;
 114	u64		total_time;
 115	u64		max_bytes;
 116	u64		total_bytes;
 117
 118	int		Y;
 119	int		display;
 120
 121	long		state;
 122	u64		state_since;
 123
 124	char		*comm;
 125
 126	struct cpu_sample *samples;
 127	struct io_sample  *io_samples;
 128};
 129
 130struct sample_wrapper {
 131	struct sample_wrapper *next;
 132
 133	u64		timestamp;
 134	unsigned char	data[];
 135};
 136
 137#define TYPE_NONE	0
 138#define TYPE_RUNNING	1
 139#define TYPE_WAITING	2
 140#define TYPE_BLOCKED	3
 141
 142struct cpu_sample {
 143	struct cpu_sample *next;
 144
 145	u64 start_time;
 146	u64 end_time;
 147	int type;
 148	int cpu;
 149	const char *backtrace;
 150};
 151
 152enum {
 153	IOTYPE_READ,
 154	IOTYPE_WRITE,
 155	IOTYPE_SYNC,
 156	IOTYPE_TX,
 157	IOTYPE_RX,
 158	IOTYPE_POLL,
 159};
 160
 161struct io_sample {
 162	struct io_sample *next;
 163
 164	u64 start_time;
 165	u64 end_time;
 166	u64 bytes;
 167	int type;
 168	int fd;
 169	int err;
 170	int merges;
 171};
 172
 173#define CSTATE 1
 174#define PSTATE 2
 175
 176struct power_event {
 177	struct power_event *next;
 178	int type;
 179	int state;
 180	u64 start_time;
 181	u64 end_time;
 182	int cpu;
 183};
 184
 185struct wake_event {
 186	struct wake_event *next;
 187	int waker;
 188	int wakee;
 189	u64 time;
 190	const char *backtrace;
 191};
 192
 
 
 
 
 193struct process_filter {
 194	char			*name;
 195	int			pid;
 196	struct process_filter	*next;
 197};
 198
 199static struct process_filter *process_filter;
 200
 201
 202static struct per_pid *find_create_pid(struct timechart *tchart, int pid)
 203{
 204	struct per_pid *cursor = tchart->all_data;
 205
 206	while (cursor) {
 207		if (cursor->pid == pid)
 208			return cursor;
 209		cursor = cursor->next;
 210	}
 211	cursor = zalloc(sizeof(*cursor));
 212	assert(cursor != NULL);
 
 213	cursor->pid = pid;
 214	cursor->next = tchart->all_data;
 215	tchart->all_data = cursor;
 216	return cursor;
 217}
 218
 219static struct per_pidcomm *create_pidcomm(struct per_pid *p)
 220{
 221	struct per_pidcomm *c;
 222
 223	c = zalloc(sizeof(*c));
 224	if (!c)
 225		return NULL;
 226	p->current = c;
 227	c->next = p->all;
 228	p->all = c;
 229	return c;
 230}
 231
 232static void pid_set_comm(struct timechart *tchart, int pid, char *comm)
 233{
 234	struct per_pid *p;
 235	struct per_pidcomm *c;
 236	p = find_create_pid(tchart, pid);
 237	c = p->all;
 238	while (c) {
 239		if (c->comm && strcmp(c->comm, comm) == 0) {
 240			p->current = c;
 241			return;
 242		}
 243		if (!c->comm) {
 244			c->comm = strdup(comm);
 245			p->current = c;
 246			return;
 247		}
 248		c = c->next;
 249	}
 250	c = create_pidcomm(p);
 251	assert(c != NULL);
 
 252	c->comm = strdup(comm);
 
 
 
 253}
 254
 255static void pid_fork(struct timechart *tchart, int pid, int ppid, u64 timestamp)
 256{
 257	struct per_pid *p, *pp;
 258	p = find_create_pid(tchart, pid);
 259	pp = find_create_pid(tchart, ppid);
 260	p->ppid = ppid;
 261	if (pp->current && pp->current->comm && !p->current)
 262		pid_set_comm(tchart, pid, pp->current->comm);
 263
 264	p->start_time = timestamp;
 265	if (p->current && !p->current->start_time) {
 266		p->current->start_time = timestamp;
 267		p->current->state_since = timestamp;
 268	}
 269}
 270
 271static void pid_exit(struct timechart *tchart, int pid, u64 timestamp)
 272{
 273	struct per_pid *p;
 274	p = find_create_pid(tchart, pid);
 275	p->end_time = timestamp;
 276	if (p->current)
 277		p->current->end_time = timestamp;
 278}
 279
 280static void pid_put_sample(struct timechart *tchart, int pid, int type,
 281			   unsigned int cpu, u64 start, u64 end,
 282			   const char *backtrace)
 283{
 284	struct per_pid *p;
 285	struct per_pidcomm *c;
 286	struct cpu_sample *sample;
 287
 288	p = find_create_pid(tchart, pid);
 289	c = p->current;
 290	if (!c) {
 291		c = create_pidcomm(p);
 292		assert(c != NULL);
 
 
 
 
 293	}
 294
 295	sample = zalloc(sizeof(*sample));
 296	assert(sample != NULL);
 
 297	sample->start_time = start;
 298	sample->end_time = end;
 299	sample->type = type;
 300	sample->next = c->samples;
 301	sample->cpu = cpu;
 302	sample->backtrace = backtrace;
 303	c->samples = sample;
 304
 305	if (sample->type == TYPE_RUNNING && end > start && start > 0) {
 306		c->total_time += (end-start);
 307		p->total_time += (end-start);
 308	}
 309
 310	if (c->start_time == 0 || c->start_time > start)
 311		c->start_time = start;
 312	if (p->start_time == 0 || p->start_time > start)
 313		p->start_time = start;
 314}
 315
 316#define MAX_CPUS 4096
 317
 318static u64 *cpus_cstate_start_times;
 319static int *cpus_cstate_state;
 320static u64 *cpus_pstate_start_times;
 321static u64 *cpus_pstate_state;
 322
 323static int process_comm_event(const struct perf_tool *tool,
 324			      union perf_event *event,
 325			      struct perf_sample *sample __maybe_unused,
 326			      struct machine *machine __maybe_unused)
 327{
 328	struct timechart *tchart = container_of(tool, struct timechart, tool);
 329	pid_set_comm(tchart, event->comm.tid, event->comm.comm);
 330	return 0;
 331}
 332
 333static int process_fork_event(const struct perf_tool *tool,
 334			      union perf_event *event,
 335			      struct perf_sample *sample __maybe_unused,
 336			      struct machine *machine __maybe_unused)
 337{
 338	struct timechart *tchart = container_of(tool, struct timechart, tool);
 339	pid_fork(tchart, event->fork.pid, event->fork.ppid, event->fork.time);
 340	return 0;
 341}
 342
 343static int process_exit_event(const struct perf_tool *tool,
 344			      union perf_event *event,
 345			      struct perf_sample *sample __maybe_unused,
 346			      struct machine *machine __maybe_unused)
 347{
 348	struct timechart *tchart = container_of(tool, struct timechart, tool);
 349	pid_exit(tchart, event->fork.pid, event->fork.time);
 350	return 0;
 351}
 352
 
 
 
 
 
 
 
 
 353#ifdef SUPPORT_OLD_POWER_EVENTS
 354static int use_old_power_events;
 
 
 
 
 
 
 355#endif
 356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 357static void c_state_start(int cpu, u64 timestamp, int state)
 358{
 359	cpus_cstate_start_times[cpu] = timestamp;
 360	cpus_cstate_state[cpu] = state;
 361}
 362
 363static void c_state_end(struct timechart *tchart, int cpu, u64 timestamp)
 364{
 365	struct power_event *pwr = zalloc(sizeof(*pwr));
 366
 367	if (!pwr)
 368		return;
 
 369
 370	pwr->state = cpus_cstate_state[cpu];
 371	pwr->start_time = cpus_cstate_start_times[cpu];
 372	pwr->end_time = timestamp;
 373	pwr->cpu = cpu;
 374	pwr->type = CSTATE;
 375	pwr->next = tchart->power_events;
 376
 377	tchart->power_events = pwr;
 378}
 379
 380static struct power_event *p_state_end(struct timechart *tchart, int cpu,
 381					u64 timestamp)
 382{
 383	struct power_event *pwr = zalloc(sizeof(*pwr));
 
 
 
 
 384
 385	if (!pwr)
 386		return NULL;
 
 387
 388	pwr->state = cpus_pstate_state[cpu];
 389	pwr->start_time = cpus_pstate_start_times[cpu];
 390	pwr->end_time = timestamp;
 391	pwr->cpu = cpu;
 392	pwr->type = PSTATE;
 393	pwr->next = tchart->power_events;
 
 394	if (!pwr->start_time)
 395		pwr->start_time = tchart->first_time;
 396
 397	tchart->power_events = pwr;
 398	return pwr;
 399}
 400
 401static void p_state_change(struct timechart *tchart, int cpu, u64 timestamp, u64 new_freq)
 402{
 403	struct power_event *pwr;
 404
 405	if (new_freq > 8000000) /* detect invalid data */
 406		return;
 407
 408	pwr = p_state_end(tchart, cpu, timestamp);
 409	if (!pwr)
 410		return;
 411
 412	cpus_pstate_state[cpu] = new_freq;
 413	cpus_pstate_start_times[cpu] = timestamp;
 414
 415	if ((u64)new_freq > tchart->max_freq)
 416		tchart->max_freq = new_freq;
 417
 418	if (new_freq < tchart->min_freq || tchart->min_freq == 0)
 419		tchart->min_freq = new_freq;
 420
 421	if (new_freq == tchart->max_freq - 1000)
 422		tchart->turbo_frequency = tchart->max_freq;
 423}
 424
 425static void sched_wakeup(struct timechart *tchart, int cpu, u64 timestamp,
 426			 int waker, int wakee, u8 flags, const char *backtrace)
 427{
 
 428	struct per_pid *p;
 429	struct wake_event *we = zalloc(sizeof(*we));
 430
 
 431	if (!we)
 432		return;
 433
 
 434	we->time = timestamp;
 435	we->waker = waker;
 436	we->backtrace = backtrace;
 437
 438	if ((flags & TRACE_FLAG_HARDIRQ) || (flags & TRACE_FLAG_SOFTIRQ))
 439		we->waker = -1;
 440
 441	we->wakee = wakee;
 442	we->next = tchart->wake_events;
 443	tchart->wake_events = we;
 444	p = find_create_pid(tchart, we->wakee);
 445
 446	if (p && p->current && p->current->state == TYPE_NONE) {
 447		p->current->state_since = timestamp;
 448		p->current->state = TYPE_WAITING;
 449	}
 450	if (p && p->current && p->current->state == TYPE_BLOCKED) {
 451		pid_put_sample(tchart, p->pid, p->current->state, cpu,
 452			       p->current->state_since, timestamp, NULL);
 453		p->current->state_since = timestamp;
 454		p->current->state = TYPE_WAITING;
 455	}
 456}
 457
 458static void sched_switch(struct timechart *tchart, int cpu, u64 timestamp,
 459			 int prev_pid, int next_pid, u64 prev_state,
 460			 const char *backtrace)
 461{
 462	struct per_pid *p = NULL, *prev_p;
 
 
 463
 464	prev_p = find_create_pid(tchart, prev_pid);
 465
 466	p = find_create_pid(tchart, next_pid);
 467
 468	if (prev_p->current && prev_p->current->state != TYPE_NONE)
 469		pid_put_sample(tchart, prev_pid, TYPE_RUNNING, cpu,
 470			       prev_p->current->state_since, timestamp,
 471			       backtrace);
 472	if (p && p->current) {
 473		if (p->current->state != TYPE_NONE)
 474			pid_put_sample(tchart, next_pid, p->current->state, cpu,
 475				       p->current->state_since, timestamp,
 476				       backtrace);
 477
 478		p->current->state_since = timestamp;
 479		p->current->state = TYPE_RUNNING;
 480	}
 481
 482	if (prev_p->current) {
 483		prev_p->current->state = TYPE_NONE;
 484		prev_p->current->state_since = timestamp;
 485		if (prev_state & 2)
 486			prev_p->current->state = TYPE_BLOCKED;
 487		if (prev_state == 0)
 488			prev_p->current->state = TYPE_WAITING;
 489	}
 490}
 491
 492static const char *cat_backtrace(union perf_event *event,
 493				 struct perf_sample *sample,
 494				 struct machine *machine)
 495{
 496	struct addr_location al;
 497	unsigned int i;
 498	char *p = NULL;
 499	size_t p_len;
 500	u8 cpumode = PERF_RECORD_MISC_USER;
 501	struct ip_callchain *chain = sample->callchain;
 502	FILE *f = open_memstream(&p, &p_len);
 503
 504	if (!f) {
 505		perror("open_memstream error");
 506		return NULL;
 507	}
 508
 509	addr_location__init(&al);
 510	if (!chain)
 511		goto exit;
 512
 513	if (machine__resolve(machine, &al, sample) < 0) {
 514		fprintf(stderr, "problem processing %d event, skipping it.\n",
 515			event->header.type);
 516		goto exit;
 517	}
 518
 519	for (i = 0; i < chain->nr; i++) {
 520		u64 ip;
 521		struct addr_location tal;
 522
 523		if (callchain_param.order == ORDER_CALLEE)
 524			ip = chain->ips[i];
 525		else
 526			ip = chain->ips[chain->nr - i - 1];
 527
 528		if (ip >= PERF_CONTEXT_MAX) {
 529			switch (ip) {
 530			case PERF_CONTEXT_HV:
 531				cpumode = PERF_RECORD_MISC_HYPERVISOR;
 532				break;
 533			case PERF_CONTEXT_KERNEL:
 534				cpumode = PERF_RECORD_MISC_KERNEL;
 535				break;
 536			case PERF_CONTEXT_USER:
 537				cpumode = PERF_RECORD_MISC_USER;
 538				break;
 539			default:
 540				pr_debug("invalid callchain context: "
 541					 "%"PRId64"\n", (s64) ip);
 542
 543				/*
 544				 * It seems the callchain is corrupted.
 545				 * Discard all.
 546				 */
 547				zfree(&p);
 548				goto exit;
 549			}
 550			continue;
 551		}
 552
 553		addr_location__init(&tal);
 554		tal.filtered = 0;
 555		if (thread__find_symbol(al.thread, cpumode, ip, &tal))
 556			fprintf(f, "..... %016" PRIx64 " %s\n", ip, tal.sym->name);
 557		else
 558			fprintf(f, "..... %016" PRIx64 "\n", ip);
 559
 560		addr_location__exit(&tal);
 561	}
 562exit:
 563	addr_location__exit(&al);
 564	fclose(f);
 565
 566	return p;
 567}
 568
 569typedef int (*tracepoint_handler)(struct timechart *tchart,
 570				  struct evsel *evsel,
 571				  struct perf_sample *sample,
 572				  const char *backtrace);
 573
 574static int process_sample_event(const struct perf_tool *tool,
 575				union perf_event *event,
 576				struct perf_sample *sample,
 577				struct evsel *evsel,
 578				struct machine *machine)
 579{
 580	struct timechart *tchart = container_of(tool, struct timechart, tool);
 581
 582	if (evsel->core.attr.sample_type & PERF_SAMPLE_TIME) {
 583		if (!tchart->first_time || tchart->first_time > sample->time)
 584			tchart->first_time = sample->time;
 585		if (tchart->last_time < sample->time)
 586			tchart->last_time = sample->time;
 587	}
 588
 589	if (evsel->handler != NULL) {
 590		tracepoint_handler f = evsel->handler;
 591		return f(tchart, evsel, sample,
 592			 cat_backtrace(event, sample, machine));
 593	}
 594
 595	return 0;
 596}
 597
 598static int
 599process_sample_cpu_idle(struct timechart *tchart __maybe_unused,
 600			struct evsel *evsel,
 601			struct perf_sample *sample,
 602			const char *backtrace __maybe_unused)
 603{
 604	u32 state  = evsel__intval(evsel, sample, "state");
 605	u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
 606
 607	if (state == (u32)PWR_EVENT_EXIT)
 608		c_state_end(tchart, cpu_id, sample->time);
 609	else
 610		c_state_start(cpu_id, sample->time, state);
 611	return 0;
 612}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 613
 614static int
 615process_sample_cpu_frequency(struct timechart *tchart,
 616			     struct evsel *evsel,
 617			     struct perf_sample *sample,
 618			     const char *backtrace __maybe_unused)
 619{
 620	u32 state  = evsel__intval(evsel, sample, "state");
 621	u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
 622
 623	p_state_change(tchart, cpu_id, sample->time, state);
 624	return 0;
 625}
 626
 627static int
 628process_sample_sched_wakeup(struct timechart *tchart,
 629			    struct evsel *evsel,
 630			    struct perf_sample *sample,
 631			    const char *backtrace)
 632{
 633	u8 flags  = evsel__intval(evsel, sample, "common_flags");
 634	int waker = evsel__intval(evsel, sample, "common_pid");
 635	int wakee = evsel__intval(evsel, sample, "pid");
 636
 637	sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
 638	return 0;
 639}
 640
 641static int
 642process_sample_sched_switch(struct timechart *tchart,
 643			    struct evsel *evsel,
 644			    struct perf_sample *sample,
 645			    const char *backtrace)
 646{
 647	int prev_pid   = evsel__intval(evsel, sample, "prev_pid");
 648	int next_pid   = evsel__intval(evsel, sample, "next_pid");
 649	u64 prev_state = evsel__intval(evsel, sample, "prev_state");
 650
 651	sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
 652		     prev_state, backtrace);
 653	return 0;
 654}
 655
 656#ifdef SUPPORT_OLD_POWER_EVENTS
 657static int
 658process_sample_power_start(struct timechart *tchart __maybe_unused,
 659			   struct evsel *evsel,
 660			   struct perf_sample *sample,
 661			   const char *backtrace __maybe_unused)
 662{
 663	u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
 664	u64 value  = evsel__intval(evsel, sample, "value");
 665
 666	c_state_start(cpu_id, sample->time, value);
 667	return 0;
 668}
 669
 670static int
 671process_sample_power_end(struct timechart *tchart,
 672			 struct evsel *evsel __maybe_unused,
 673			 struct perf_sample *sample,
 674			 const char *backtrace __maybe_unused)
 675{
 676	c_state_end(tchart, sample->cpu, sample->time);
 677	return 0;
 678}
 679
 680static int
 681process_sample_power_frequency(struct timechart *tchart,
 682			       struct evsel *evsel,
 683			       struct perf_sample *sample,
 684			       const char *backtrace __maybe_unused)
 685{
 686	u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
 687	u64 value  = evsel__intval(evsel, sample, "value");
 688
 689	p_state_change(tchart, cpu_id, sample->time, value);
 690	return 0;
 691}
 692#endif /* SUPPORT_OLD_POWER_EVENTS */
 693
 694/*
 695 * After the last sample we need to wrap up the current C/P state
 696 * and close out each CPU for these.
 697 */
 698static void end_sample_processing(struct timechart *tchart)
 699{
 700	u64 cpu;
 701	struct power_event *pwr;
 702
 703	for (cpu = 0; cpu <= tchart->numcpus; cpu++) {
 704		/* C state */
 705#if 0
 706		pwr = zalloc(sizeof(*pwr));
 707		if (!pwr)
 708			return;
 
 709
 
 
 710		pwr->state = cpus_cstate_state[cpu];
 711		pwr->start_time = cpus_cstate_start_times[cpu];
 712		pwr->end_time = tchart->last_time;
 713		pwr->cpu = cpu;
 714		pwr->type = CSTATE;
 715		pwr->next = tchart->power_events;
 716
 717		tchart->power_events = pwr;
 718#endif
 719		/* P state */
 720
 721		pwr = p_state_end(tchart, cpu, tchart->last_time);
 722		if (!pwr)
 723			return;
 
 724
 
 
 
 
 
 
 
 
 
 725		if (!pwr->state)
 726			pwr->state = tchart->min_freq;
 727	}
 728}
 729
 730static int pid_begin_io_sample(struct timechart *tchart, int pid, int type,
 731			       u64 start, int fd)
 732{
 733	struct per_pid *p = find_create_pid(tchart, pid);
 734	struct per_pidcomm *c = p->current;
 735	struct io_sample *sample;
 736	struct io_sample *prev;
 737
 738	if (!c) {
 739		c = create_pidcomm(p);
 740		if (!c)
 741			return -ENOMEM;
 742	}
 743
 744	prev = c->io_samples;
 745
 746	if (prev && prev->start_time && !prev->end_time) {
 747		pr_warning("Skip invalid start event: "
 748			   "previous event already started!\n");
 749
 750		/* remove previous event that has been started,
 751		 * we are not sure we will ever get an end for it */
 752		c->io_samples = prev->next;
 753		free(prev);
 754		return 0;
 755	}
 756
 757	sample = zalloc(sizeof(*sample));
 758	if (!sample)
 759		return -ENOMEM;
 760	sample->start_time = start;
 761	sample->type = type;
 762	sample->fd = fd;
 763	sample->next = c->io_samples;
 764	c->io_samples = sample;
 765
 766	if (c->start_time == 0 || c->start_time > start)
 767		c->start_time = start;
 768
 769	return 0;
 770}
 771
 772static int pid_end_io_sample(struct timechart *tchart, int pid, int type,
 773			     u64 end, long ret)
 774{
 775	struct per_pid *p = find_create_pid(tchart, pid);
 776	struct per_pidcomm *c = p->current;
 777	struct io_sample *sample, *prev;
 778
 779	if (!c) {
 780		pr_warning("Invalid pidcomm!\n");
 781		return -1;
 782	}
 783
 784	sample = c->io_samples;
 785
 786	if (!sample) /* skip partially captured events */
 787		return 0;
 788
 789	if (sample->end_time) {
 790		pr_warning("Skip invalid end event: "
 791			   "previous event already ended!\n");
 792		return 0;
 793	}
 794
 795	if (sample->type != type) {
 796		pr_warning("Skip invalid end event: invalid event type!\n");
 797		return 0;
 798	}
 799
 800	sample->end_time = end;
 801	prev = sample->next;
 802
 803	/* we want to be able to see small and fast transfers, so make them
 804	 * at least min_time long, but don't overlap them */
 805	if (sample->end_time - sample->start_time < tchart->min_time)
 806		sample->end_time = sample->start_time + tchart->min_time;
 807	if (prev && sample->start_time < prev->end_time) {
 808		if (prev->err) /* try to make errors more visible */
 809			sample->start_time = prev->end_time;
 810		else
 811			prev->end_time = sample->start_time;
 812	}
 813
 814	if (ret < 0) {
 815		sample->err = ret;
 816	} else if (type == IOTYPE_READ || type == IOTYPE_WRITE ||
 817		   type == IOTYPE_TX || type == IOTYPE_RX) {
 818
 819		if ((u64)ret > c->max_bytes)
 820			c->max_bytes = ret;
 821
 822		c->total_bytes += ret;
 823		p->total_bytes += ret;
 824		sample->bytes = ret;
 825	}
 826
 827	/* merge two requests to make svg smaller and render-friendly */
 828	if (prev &&
 829	    prev->type == sample->type &&
 830	    prev->err == sample->err &&
 831	    prev->fd == sample->fd &&
 832	    prev->end_time + tchart->merge_dist >= sample->start_time) {
 833
 834		sample->bytes += prev->bytes;
 835		sample->merges += prev->merges + 1;
 836
 837		sample->start_time = prev->start_time;
 838		sample->next = prev->next;
 839		free(prev);
 840
 841		if (!sample->err && sample->bytes > c->max_bytes)
 842			c->max_bytes = sample->bytes;
 843	}
 844
 845	tchart->io_events++;
 846
 847	return 0;
 848}
 849
 850static int
 851process_enter_read(struct timechart *tchart,
 852		   struct evsel *evsel,
 853		   struct perf_sample *sample)
 854{
 855	long fd = evsel__intval(evsel, sample, "fd");
 856	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
 857				   sample->time, fd);
 858}
 859
 860static int
 861process_exit_read(struct timechart *tchart,
 862		  struct evsel *evsel,
 863		  struct perf_sample *sample)
 864{
 865	long ret = evsel__intval(evsel, sample, "ret");
 866	return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
 867				 sample->time, ret);
 868}
 869
 870static int
 871process_enter_write(struct timechart *tchart,
 872		    struct evsel *evsel,
 873		    struct perf_sample *sample)
 874{
 875	long fd = evsel__intval(evsel, sample, "fd");
 876	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
 877				   sample->time, fd);
 878}
 879
 880static int
 881process_exit_write(struct timechart *tchart,
 882		   struct evsel *evsel,
 883		   struct perf_sample *sample)
 884{
 885	long ret = evsel__intval(evsel, sample, "ret");
 886	return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
 887				 sample->time, ret);
 888}
 889
 890static int
 891process_enter_sync(struct timechart *tchart,
 892		   struct evsel *evsel,
 893		   struct perf_sample *sample)
 894{
 895	long fd = evsel__intval(evsel, sample, "fd");
 896	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
 897				   sample->time, fd);
 898}
 899
 900static int
 901process_exit_sync(struct timechart *tchart,
 902		  struct evsel *evsel,
 903		  struct perf_sample *sample)
 904{
 905	long ret = evsel__intval(evsel, sample, "ret");
 906	return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
 907				 sample->time, ret);
 908}
 909
 910static int
 911process_enter_tx(struct timechart *tchart,
 912		 struct evsel *evsel,
 913		 struct perf_sample *sample)
 914{
 915	long fd = evsel__intval(evsel, sample, "fd");
 916	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
 917				   sample->time, fd);
 918}
 919
 920static int
 921process_exit_tx(struct timechart *tchart,
 922		struct evsel *evsel,
 923		struct perf_sample *sample)
 924{
 925	long ret = evsel__intval(evsel, sample, "ret");
 926	return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
 927				 sample->time, ret);
 928}
 929
 930static int
 931process_enter_rx(struct timechart *tchart,
 932		 struct evsel *evsel,
 933		 struct perf_sample *sample)
 934{
 935	long fd = evsel__intval(evsel, sample, "fd");
 936	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
 937				   sample->time, fd);
 938}
 939
 940static int
 941process_exit_rx(struct timechart *tchart,
 942		struct evsel *evsel,
 943		struct perf_sample *sample)
 944{
 945	long ret = evsel__intval(evsel, sample, "ret");
 946	return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
 947				 sample->time, ret);
 948}
 949
 950static int
 951process_enter_poll(struct timechart *tchart,
 952		   struct evsel *evsel,
 953		   struct perf_sample *sample)
 954{
 955	long fd = evsel__intval(evsel, sample, "fd");
 956	return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
 957				   sample->time, fd);
 958}
 959
 960static int
 961process_exit_poll(struct timechart *tchart,
 962		  struct evsel *evsel,
 963		  struct perf_sample *sample)
 964{
 965	long ret = evsel__intval(evsel, sample, "ret");
 966	return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
 967				 sample->time, ret);
 968}
 969
 970/*
 971 * Sort the pid datastructure
 972 */
 973static void sort_pids(struct timechart *tchart)
 974{
 975	struct per_pid *new_list, *p, *cursor, *prev;
 976	/* sort by ppid first, then by pid, lowest to highest */
 977
 978	new_list = NULL;
 979
 980	while (tchart->all_data) {
 981		p = tchart->all_data;
 982		tchart->all_data = p->next;
 983		p->next = NULL;
 984
 985		if (new_list == NULL) {
 986			new_list = p;
 987			p->next = NULL;
 988			continue;
 989		}
 990		prev = NULL;
 991		cursor = new_list;
 992		while (cursor) {
 993			if (cursor->ppid > p->ppid ||
 994				(cursor->ppid == p->ppid && cursor->pid > p->pid)) {
 995				/* must insert before */
 996				if (prev) {
 997					p->next = prev->next;
 998					prev->next = p;
 999					cursor = NULL;
1000					continue;
1001				} else {
1002					p->next = new_list;
1003					new_list = p;
1004					cursor = NULL;
1005					continue;
1006				}
1007			}
1008
1009			prev = cursor;
1010			cursor = cursor->next;
1011			if (!cursor)
1012				prev->next = p;
1013		}
1014	}
1015	tchart->all_data = new_list;
1016}
1017
1018
1019static void draw_c_p_states(struct timechart *tchart)
1020{
1021	struct power_event *pwr;
1022	pwr = tchart->power_events;
1023
1024	/*
1025	 * two pass drawing so that the P state bars are on top of the C state blocks
1026	 */
1027	while (pwr) {
1028		if (pwr->type == CSTATE)
1029			svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1030		pwr = pwr->next;
1031	}
1032
1033	pwr = tchart->power_events;
1034	while (pwr) {
1035		if (pwr->type == PSTATE) {
1036			if (!pwr->state)
1037				pwr->state = tchart->min_freq;
1038			svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
1039		}
1040		pwr = pwr->next;
1041	}
1042}
1043
1044static void draw_wakeups(struct timechart *tchart)
1045{
1046	struct wake_event *we;
1047	struct per_pid *p;
1048	struct per_pidcomm *c;
1049
1050	we = tchart->wake_events;
1051	while (we) {
1052		int from = 0, to = 0;
1053		char *task_from = NULL, *task_to = NULL;
1054
1055		/* locate the column of the waker and wakee */
1056		p = tchart->all_data;
1057		while (p) {
1058			if (p->pid == we->waker || p->pid == we->wakee) {
1059				c = p->all;
1060				while (c) {
1061					if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
1062						if (p->pid == we->waker && !from) {
1063							from = c->Y;
1064							task_from = strdup(c->comm);
1065						}
1066						if (p->pid == we->wakee && !to) {
1067							to = c->Y;
1068							task_to = strdup(c->comm);
1069						}
1070					}
1071					c = c->next;
1072				}
1073				c = p->all;
1074				while (c) {
1075					if (p->pid == we->waker && !from) {
1076						from = c->Y;
1077						task_from = strdup(c->comm);
1078					}
1079					if (p->pid == we->wakee && !to) {
1080						to = c->Y;
1081						task_to = strdup(c->comm);
1082					}
1083					c = c->next;
1084				}
1085			}
1086			p = p->next;
1087		}
1088
1089		if (!task_from) {
1090			task_from = malloc(40);
1091			sprintf(task_from, "[%i]", we->waker);
1092		}
1093		if (!task_to) {
1094			task_to = malloc(40);
1095			sprintf(task_to, "[%i]", we->wakee);
1096		}
1097
1098		if (we->waker == -1)
1099			svg_interrupt(we->time, to, we->backtrace);
1100		else if (from && to && abs(from - to) == 1)
1101			svg_wakeline(we->time, from, to, we->backtrace);
1102		else
1103			svg_partial_wakeline(we->time, from, task_from, to,
1104					     task_to, we->backtrace);
1105		we = we->next;
1106
1107		free(task_from);
1108		free(task_to);
1109	}
1110}
1111
1112static void draw_cpu_usage(struct timechart *tchart)
1113{
1114	struct per_pid *p;
1115	struct per_pidcomm *c;
1116	struct cpu_sample *sample;
1117	p = tchart->all_data;
1118	while (p) {
1119		c = p->all;
1120		while (c) {
1121			sample = c->samples;
1122			while (sample) {
1123				if (sample->type == TYPE_RUNNING) {
1124					svg_process(sample->cpu,
1125						    sample->start_time,
1126						    sample->end_time,
1127						    p->pid,
1128						    c->comm,
1129						    sample->backtrace);
1130				}
1131
1132				sample = sample->next;
1133			}
1134			c = c->next;
1135		}
1136		p = p->next;
1137	}
1138}
1139
1140static void draw_io_bars(struct timechart *tchart)
1141{
1142	const char *suf;
1143	double bytes;
1144	char comm[256];
1145	struct per_pid *p;
1146	struct per_pidcomm *c;
1147	struct io_sample *sample;
1148	int Y = 1;
1149
1150	p = tchart->all_data;
1151	while (p) {
1152		c = p->all;
1153		while (c) {
1154			if (!c->display) {
1155				c->Y = 0;
1156				c = c->next;
1157				continue;
1158			}
1159
1160			svg_box(Y, c->start_time, c->end_time, "process3");
1161			for (sample = c->io_samples; sample; sample = sample->next) {
1162				double h = (double)sample->bytes / c->max_bytes;
1163
1164				if (tchart->skip_eagain &&
1165				    sample->err == -EAGAIN)
1166					continue;
1167
1168				if (sample->err)
1169					h = 1;
1170
1171				if (sample->type == IOTYPE_SYNC)
1172					svg_fbox(Y,
1173						sample->start_time,
1174						sample->end_time,
1175						1,
1176						sample->err ? "error" : "sync",
1177						sample->fd,
1178						sample->err,
1179						sample->merges);
1180				else if (sample->type == IOTYPE_POLL)
1181					svg_fbox(Y,
1182						sample->start_time,
1183						sample->end_time,
1184						1,
1185						sample->err ? "error" : "poll",
1186						sample->fd,
1187						sample->err,
1188						sample->merges);
1189				else if (sample->type == IOTYPE_READ)
1190					svg_ubox(Y,
1191						sample->start_time,
1192						sample->end_time,
1193						h,
1194						sample->err ? "error" : "disk",
1195						sample->fd,
1196						sample->err,
1197						sample->merges);
1198				else if (sample->type == IOTYPE_WRITE)
1199					svg_lbox(Y,
1200						sample->start_time,
1201						sample->end_time,
1202						h,
1203						sample->err ? "error" : "disk",
1204						sample->fd,
1205						sample->err,
1206						sample->merges);
1207				else if (sample->type == IOTYPE_RX)
1208					svg_ubox(Y,
1209						sample->start_time,
1210						sample->end_time,
1211						h,
1212						sample->err ? "error" : "net",
1213						sample->fd,
1214						sample->err,
1215						sample->merges);
1216				else if (sample->type == IOTYPE_TX)
1217					svg_lbox(Y,
1218						sample->start_time,
1219						sample->end_time,
1220						h,
1221						sample->err ? "error" : "net",
1222						sample->fd,
1223						sample->err,
1224						sample->merges);
1225			}
1226
1227			suf = "";
1228			bytes = c->total_bytes;
1229			if (bytes > 1024) {
1230				bytes = bytes / 1024;
1231				suf = "K";
1232			}
1233			if (bytes > 1024) {
1234				bytes = bytes / 1024;
1235				suf = "M";
1236			}
1237			if (bytes > 1024) {
1238				bytes = bytes / 1024;
1239				suf = "G";
1240			}
1241
1242
1243			sprintf(comm, "%s:%i (%3.1f %sbytes)", c->comm ?: "", p->pid, bytes, suf);
1244			svg_text(Y, c->start_time, comm);
1245
1246			c->Y = Y;
1247			Y++;
1248			c = c->next;
1249		}
1250		p = p->next;
1251	}
1252}
1253
1254static void draw_process_bars(struct timechart *tchart)
1255{
1256	struct per_pid *p;
1257	struct per_pidcomm *c;
1258	struct cpu_sample *sample;
1259	int Y = 0;
1260
1261	Y = 2 * tchart->numcpus + 2;
1262
1263	p = tchart->all_data;
1264	while (p) {
1265		c = p->all;
1266		while (c) {
1267			if (!c->display) {
1268				c->Y = 0;
1269				c = c->next;
1270				continue;
1271			}
1272
1273			svg_box(Y, c->start_time, c->end_time, "process");
1274			sample = c->samples;
1275			while (sample) {
1276				if (sample->type == TYPE_RUNNING)
1277					svg_running(Y, sample->cpu,
1278						    sample->start_time,
1279						    sample->end_time,
1280						    sample->backtrace);
1281				if (sample->type == TYPE_BLOCKED)
1282					svg_blocked(Y, sample->cpu,
1283						    sample->start_time,
1284						    sample->end_time,
1285						    sample->backtrace);
1286				if (sample->type == TYPE_WAITING)
1287					svg_waiting(Y, sample->cpu,
1288						    sample->start_time,
1289						    sample->end_time,
1290						    sample->backtrace);
1291				sample = sample->next;
1292			}
1293
1294			if (c->comm) {
1295				char comm[256];
1296				if (c->total_time > 5000000000) /* 5 seconds */
1297					sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / (double)NSEC_PER_SEC);
1298				else
1299					sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / (double)NSEC_PER_MSEC);
1300
1301				svg_text(Y, c->start_time, comm);
1302			}
1303			c->Y = Y;
1304			Y++;
1305			c = c->next;
1306		}
1307		p = p->next;
1308	}
1309}
1310
1311static void add_process_filter(const char *string)
1312{
1313	int pid = strtoull(string, NULL, 10);
1314	struct process_filter *filt = malloc(sizeof(*filt));
1315
 
 
1316	if (!filt)
1317		return;
1318
1319	filt->name = strdup(string);
1320	filt->pid  = pid;
1321	filt->next = process_filter;
1322
1323	process_filter = filt;
1324}
1325
1326static int passes_filter(struct per_pid *p, struct per_pidcomm *c)
1327{
1328	struct process_filter *filt;
1329	if (!process_filter)
1330		return 1;
1331
1332	filt = process_filter;
1333	while (filt) {
1334		if (filt->pid && p->pid == filt->pid)
1335			return 1;
1336		if (strcmp(filt->name, c->comm) == 0)
1337			return 1;
1338		filt = filt->next;
1339	}
1340	return 0;
1341}
1342
1343static int determine_display_tasks_filtered(struct timechart *tchart)
1344{
1345	struct per_pid *p;
1346	struct per_pidcomm *c;
1347	int count = 0;
1348
1349	p = tchart->all_data;
1350	while (p) {
1351		p->display = 0;
1352		if (p->start_time == 1)
1353			p->start_time = tchart->first_time;
1354
1355		/* no exit marker, task kept running to the end */
1356		if (p->end_time == 0)
1357			p->end_time = tchart->last_time;
1358
1359		c = p->all;
1360
1361		while (c) {
1362			c->display = 0;
1363
1364			if (c->start_time == 1)
1365				c->start_time = tchart->first_time;
1366
1367			if (passes_filter(p, c)) {
1368				c->display = 1;
1369				p->display = 1;
1370				count++;
1371			}
1372
1373			if (c->end_time == 0)
1374				c->end_time = tchart->last_time;
1375
1376			c = c->next;
1377		}
1378		p = p->next;
1379	}
1380	return count;
1381}
1382
1383static int determine_display_tasks(struct timechart *tchart, u64 threshold)
1384{
1385	struct per_pid *p;
1386	struct per_pidcomm *c;
1387	int count = 0;
1388
1389	p = tchart->all_data;
 
 
 
1390	while (p) {
1391		p->display = 0;
1392		if (p->start_time == 1)
1393			p->start_time = tchart->first_time;
1394
1395		/* no exit marker, task kept running to the end */
1396		if (p->end_time == 0)
1397			p->end_time = tchart->last_time;
1398		if (p->total_time >= threshold)
1399			p->display = 1;
1400
1401		c = p->all;
1402
1403		while (c) {
1404			c->display = 0;
1405
1406			if (c->start_time == 1)
1407				c->start_time = tchart->first_time;
1408
1409			if (c->total_time >= threshold) {
1410				c->display = 1;
1411				count++;
1412			}
1413
1414			if (c->end_time == 0)
1415				c->end_time = tchart->last_time;
1416
1417			c = c->next;
1418		}
1419		p = p->next;
1420	}
1421	return count;
1422}
1423
1424static int determine_display_io_tasks(struct timechart *timechart, u64 threshold)
1425{
1426	struct per_pid *p;
1427	struct per_pidcomm *c;
1428	int count = 0;
1429
1430	p = timechart->all_data;
1431	while (p) {
1432		/* no exit marker, task kept running to the end */
1433		if (p->end_time == 0)
1434			p->end_time = timechart->last_time;
1435
1436		c = p->all;
1437
1438		while (c) {
1439			c->display = 0;
1440
1441			if (c->total_bytes >= threshold) {
1442				c->display = 1;
1443				count++;
1444			}
1445
1446			if (c->end_time == 0)
1447				c->end_time = timechart->last_time;
1448
1449			c = c->next;
1450		}
1451		p = p->next;
1452	}
1453	return count;
1454}
1455
1456#define BYTES_THRESH (1 * 1024 * 1024)
1457#define TIME_THRESH 10000000
1458
1459static void write_svg_file(struct timechart *tchart, const char *filename)
1460{
1461	u64 i;
1462	int count;
1463	int thresh = tchart->io_events ? BYTES_THRESH : TIME_THRESH;
1464
1465	if (tchart->power_only)
1466		tchart->proc_num = 0;
1467
1468	/* We'd like to show at least proc_num tasks;
1469	 * be less picky if we have fewer */
1470	do {
1471		if (process_filter)
1472			count = determine_display_tasks_filtered(tchart);
1473		else if (tchart->io_events)
1474			count = determine_display_io_tasks(tchart, thresh);
1475		else
1476			count = determine_display_tasks(tchart, thresh);
1477		thresh /= 10;
1478	} while (!process_filter && thresh && count < tchart->proc_num);
1479
1480	if (!tchart->proc_num)
1481		count = 0;
1482
1483	if (tchart->io_events) {
1484		open_svg(filename, 0, count, tchart->first_time, tchart->last_time);
1485
1486		svg_time_grid(0.5);
1487		svg_io_legenda();
1488
1489		draw_io_bars(tchart);
1490	} else {
1491		open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time);
1492
1493		svg_time_grid(0);
1494
1495		svg_legenda();
 
1496
1497		for (i = 0; i < tchart->numcpus; i++)
1498			svg_cpu_box(i, tchart->max_freq, tchart->turbo_frequency);
1499
1500		draw_cpu_usage(tchart);
1501		if (tchart->proc_num)
1502			draw_process_bars(tchart);
1503		if (!tchart->tasks_only)
1504			draw_c_p_states(tchart);
1505		if (tchart->proc_num)
1506			draw_wakeups(tchart);
1507	}
1508
1509	svg_close();
1510}
1511
1512static int process_header(struct perf_file_section *section __maybe_unused,
1513			  struct perf_header *ph,
1514			  int feat,
1515			  int fd __maybe_unused,
1516			  void *data)
1517{
1518	struct timechart *tchart = data;
1519
1520	switch (feat) {
1521	case HEADER_NRCPUS:
1522		tchart->numcpus = ph->env.nr_cpus_avail;
1523		break;
1524
1525	case HEADER_CPU_TOPOLOGY:
1526		if (!tchart->topology)
1527			break;
1528
1529		if (svg_build_topology_map(&ph->env))
1530			fprintf(stderr, "problem building topology\n");
1531		break;
1532
1533	default:
1534		break;
1535	}
1536
1537	return 0;
1538}
1539
1540static int __cmd_timechart(struct timechart *tchart, const char *output_name)
1541{
1542	const struct evsel_str_handler power_tracepoints[] = {
1543		{ "power:cpu_idle",		process_sample_cpu_idle },
1544		{ "power:cpu_frequency",	process_sample_cpu_frequency },
1545		{ "sched:sched_wakeup",		process_sample_sched_wakeup },
1546		{ "sched:sched_switch",		process_sample_sched_switch },
1547#ifdef SUPPORT_OLD_POWER_EVENTS
1548		{ "power:power_start",		process_sample_power_start },
1549		{ "power:power_end",		process_sample_power_end },
1550		{ "power:power_frequency",	process_sample_power_frequency },
1551#endif
1552
1553		{ "syscalls:sys_enter_read",		process_enter_read },
1554		{ "syscalls:sys_enter_pread64",		process_enter_read },
1555		{ "syscalls:sys_enter_readv",		process_enter_read },
1556		{ "syscalls:sys_enter_preadv",		process_enter_read },
1557		{ "syscalls:sys_enter_write",		process_enter_write },
1558		{ "syscalls:sys_enter_pwrite64",	process_enter_write },
1559		{ "syscalls:sys_enter_writev",		process_enter_write },
1560		{ "syscalls:sys_enter_pwritev",		process_enter_write },
1561		{ "syscalls:sys_enter_sync",		process_enter_sync },
1562		{ "syscalls:sys_enter_sync_file_range",	process_enter_sync },
1563		{ "syscalls:sys_enter_fsync",		process_enter_sync },
1564		{ "syscalls:sys_enter_msync",		process_enter_sync },
1565		{ "syscalls:sys_enter_recvfrom",	process_enter_rx },
1566		{ "syscalls:sys_enter_recvmmsg",	process_enter_rx },
1567		{ "syscalls:sys_enter_recvmsg",		process_enter_rx },
1568		{ "syscalls:sys_enter_sendto",		process_enter_tx },
1569		{ "syscalls:sys_enter_sendmsg",		process_enter_tx },
1570		{ "syscalls:sys_enter_sendmmsg",	process_enter_tx },
1571		{ "syscalls:sys_enter_epoll_pwait",	process_enter_poll },
1572		{ "syscalls:sys_enter_epoll_wait",	process_enter_poll },
1573		{ "syscalls:sys_enter_poll",		process_enter_poll },
1574		{ "syscalls:sys_enter_ppoll",		process_enter_poll },
1575		{ "syscalls:sys_enter_pselect6",	process_enter_poll },
1576		{ "syscalls:sys_enter_select",		process_enter_poll },
1577
1578		{ "syscalls:sys_exit_read",		process_exit_read },
1579		{ "syscalls:sys_exit_pread64",		process_exit_read },
1580		{ "syscalls:sys_exit_readv",		process_exit_read },
1581		{ "syscalls:sys_exit_preadv",		process_exit_read },
1582		{ "syscalls:sys_exit_write",		process_exit_write },
1583		{ "syscalls:sys_exit_pwrite64",		process_exit_write },
1584		{ "syscalls:sys_exit_writev",		process_exit_write },
1585		{ "syscalls:sys_exit_pwritev",		process_exit_write },
1586		{ "syscalls:sys_exit_sync",		process_exit_sync },
1587		{ "syscalls:sys_exit_sync_file_range",	process_exit_sync },
1588		{ "syscalls:sys_exit_fsync",		process_exit_sync },
1589		{ "syscalls:sys_exit_msync",		process_exit_sync },
1590		{ "syscalls:sys_exit_recvfrom",		process_exit_rx },
1591		{ "syscalls:sys_exit_recvmmsg",		process_exit_rx },
1592		{ "syscalls:sys_exit_recvmsg",		process_exit_rx },
1593		{ "syscalls:sys_exit_sendto",		process_exit_tx },
1594		{ "syscalls:sys_exit_sendmsg",		process_exit_tx },
1595		{ "syscalls:sys_exit_sendmmsg",		process_exit_tx },
1596		{ "syscalls:sys_exit_epoll_pwait",	process_exit_poll },
1597		{ "syscalls:sys_exit_epoll_wait",	process_exit_poll },
1598		{ "syscalls:sys_exit_poll",		process_exit_poll },
1599		{ "syscalls:sys_exit_ppoll",		process_exit_poll },
1600		{ "syscalls:sys_exit_pselect6",		process_exit_poll },
1601		{ "syscalls:sys_exit_select",		process_exit_poll },
1602	};
1603	struct perf_data data = {
1604		.path  = input_name,
1605		.mode  = PERF_DATA_MODE_READ,
1606		.force = tchart->force,
1607	};
1608	struct perf_session *session;
1609	int ret = -EINVAL;
1610
1611	perf_tool__init(&tchart->tool, /*ordered_events=*/true);
1612	tchart->tool.comm		 = process_comm_event;
1613	tchart->tool.fork		 = process_fork_event;
1614	tchart->tool.exit		 = process_exit_event;
1615	tchart->tool.sample		 = process_sample_event;
1616
1617	session = perf_session__new(&data, &tchart->tool);
1618	if (IS_ERR(session))
1619		return PTR_ERR(session);
1620
1621	symbol__init(&session->header.env);
1622
1623	(void)perf_header__process_sections(&session->header,
1624					    perf_data__fd(session->data),
1625					    tchart,
1626					    process_header);
1627
1628	if (!perf_session__has_traces(session, "timechart record"))
1629		goto out_delete;
1630
1631	if (perf_session__set_tracepoints_handlers(session,
1632						   power_tracepoints)) {
1633		pr_err("Initializing session tracepoint handlers failed\n");
1634		goto out_delete;
1635	}
1636
1637	ret = perf_session__process_events(session);
1638	if (ret)
1639		goto out_delete;
1640
1641	end_sample_processing(tchart);
1642
1643	sort_pids(tchart);
1644
1645	write_svg_file(tchart, output_name);
1646
1647	pr_info("Written %2.1f seconds of trace to %s.\n",
1648		(tchart->last_time - tchart->first_time) / (double)NSEC_PER_SEC, output_name);
1649out_delete:
1650	perf_session__delete(session);
1651	return ret;
1652}
1653
1654static int timechart__io_record(int argc, const char **argv)
1655{
1656	unsigned int rec_argc, i;
1657	const char **rec_argv;
1658	const char **p;
1659	char *filter = NULL;
1660
1661	const char * const common_args[] = {
1662		"record", "-a", "-R", "-c", "1",
1663	};
1664	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1665
1666	const char * const disk_events[] = {
1667		"syscalls:sys_enter_read",
1668		"syscalls:sys_enter_pread64",
1669		"syscalls:sys_enter_readv",
1670		"syscalls:sys_enter_preadv",
1671		"syscalls:sys_enter_write",
1672		"syscalls:sys_enter_pwrite64",
1673		"syscalls:sys_enter_writev",
1674		"syscalls:sys_enter_pwritev",
1675		"syscalls:sys_enter_sync",
1676		"syscalls:sys_enter_sync_file_range",
1677		"syscalls:sys_enter_fsync",
1678		"syscalls:sys_enter_msync",
1679
1680		"syscalls:sys_exit_read",
1681		"syscalls:sys_exit_pread64",
1682		"syscalls:sys_exit_readv",
1683		"syscalls:sys_exit_preadv",
1684		"syscalls:sys_exit_write",
1685		"syscalls:sys_exit_pwrite64",
1686		"syscalls:sys_exit_writev",
1687		"syscalls:sys_exit_pwritev",
1688		"syscalls:sys_exit_sync",
1689		"syscalls:sys_exit_sync_file_range",
1690		"syscalls:sys_exit_fsync",
1691		"syscalls:sys_exit_msync",
1692	};
1693	unsigned int disk_events_nr = ARRAY_SIZE(disk_events);
1694
1695	const char * const net_events[] = {
1696		"syscalls:sys_enter_recvfrom",
1697		"syscalls:sys_enter_recvmmsg",
1698		"syscalls:sys_enter_recvmsg",
1699		"syscalls:sys_enter_sendto",
1700		"syscalls:sys_enter_sendmsg",
1701		"syscalls:sys_enter_sendmmsg",
1702
1703		"syscalls:sys_exit_recvfrom",
1704		"syscalls:sys_exit_recvmmsg",
1705		"syscalls:sys_exit_recvmsg",
1706		"syscalls:sys_exit_sendto",
1707		"syscalls:sys_exit_sendmsg",
1708		"syscalls:sys_exit_sendmmsg",
1709	};
1710	unsigned int net_events_nr = ARRAY_SIZE(net_events);
1711
1712	const char * const poll_events[] = {
1713		"syscalls:sys_enter_epoll_pwait",
1714		"syscalls:sys_enter_epoll_wait",
1715		"syscalls:sys_enter_poll",
1716		"syscalls:sys_enter_ppoll",
1717		"syscalls:sys_enter_pselect6",
1718		"syscalls:sys_enter_select",
1719
1720		"syscalls:sys_exit_epoll_pwait",
1721		"syscalls:sys_exit_epoll_wait",
1722		"syscalls:sys_exit_poll",
1723		"syscalls:sys_exit_ppoll",
1724		"syscalls:sys_exit_pselect6",
1725		"syscalls:sys_exit_select",
1726	};
1727	unsigned int poll_events_nr = ARRAY_SIZE(poll_events);
1728
1729	rec_argc = common_args_nr +
1730		disk_events_nr * 4 +
1731		net_events_nr * 4 +
1732		poll_events_nr * 4 +
1733		argc;
1734	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1735
1736	if (rec_argv == NULL)
1737		return -ENOMEM;
1738
1739	if (asprintf(&filter, "common_pid != %d", getpid()) < 0) {
1740		free(rec_argv);
1741		return -ENOMEM;
1742	}
1743
1744	p = rec_argv;
1745	for (i = 0; i < common_args_nr; i++)
1746		*p++ = strdup(common_args[i]);
1747
1748	for (i = 0; i < disk_events_nr; i++) {
1749		if (!is_valid_tracepoint(disk_events[i])) {
1750			rec_argc -= 4;
1751			continue;
1752		}
1753
1754		*p++ = "-e";
1755		*p++ = strdup(disk_events[i]);
1756		*p++ = "--filter";
1757		*p++ = filter;
1758	}
1759	for (i = 0; i < net_events_nr; i++) {
1760		if (!is_valid_tracepoint(net_events[i])) {
1761			rec_argc -= 4;
1762			continue;
1763		}
1764
1765		*p++ = "-e";
1766		*p++ = strdup(net_events[i]);
1767		*p++ = "--filter";
1768		*p++ = filter;
1769	}
1770	for (i = 0; i < poll_events_nr; i++) {
1771		if (!is_valid_tracepoint(poll_events[i])) {
1772			rec_argc -= 4;
1773			continue;
1774		}
1775
1776		*p++ = "-e";
1777		*p++ = strdup(poll_events[i]);
1778		*p++ = "--filter";
1779		*p++ = filter;
1780	}
1781
1782	for (i = 0; i < (unsigned int)argc; i++)
1783		*p++ = argv[i];
1784
1785	return cmd_record(rec_argc, rec_argv);
1786}
1787
1788
1789static int timechart__record(struct timechart *tchart, int argc, const char **argv)
1790{
1791	unsigned int rec_argc, i, j;
1792	const char **rec_argv;
1793	const char **p;
1794	unsigned int record_elems;
1795
1796	const char * const common_args[] = {
1797		"record", "-a", "-R", "-c", "1",
1798	};
1799	unsigned int common_args_nr = ARRAY_SIZE(common_args);
1800
1801	const char * const backtrace_args[] = {
1802		"-g",
1803	};
1804	unsigned int backtrace_args_no = ARRAY_SIZE(backtrace_args);
1805
1806	const char * const power_args[] = {
1807		"-e", "power:cpu_frequency",
1808		"-e", "power:cpu_idle",
1809	};
1810	unsigned int power_args_nr = ARRAY_SIZE(power_args);
1811
1812	const char * const old_power_args[] = {
1813#ifdef SUPPORT_OLD_POWER_EVENTS
1814		"-e", "power:power_start",
1815		"-e", "power:power_end",
1816		"-e", "power:power_frequency",
1817#endif
1818	};
1819	unsigned int old_power_args_nr = ARRAY_SIZE(old_power_args);
1820
1821	const char * const tasks_args[] = {
1822		"-e", "sched:sched_wakeup",
1823		"-e", "sched:sched_switch",
1824	};
1825	unsigned int tasks_args_nr = ARRAY_SIZE(tasks_args);
1826
1827#ifdef SUPPORT_OLD_POWER_EVENTS
1828	if (!is_valid_tracepoint("power:cpu_idle") &&
1829	    is_valid_tracepoint("power:power_start")) {
1830		use_old_power_events = 1;
1831		power_args_nr = 0;
1832	} else {
1833		old_power_args_nr = 0;
1834	}
1835#endif
1836
1837	if (tchart->power_only)
1838		tasks_args_nr = 0;
1839
1840	if (tchart->tasks_only) {
1841		power_args_nr = 0;
1842		old_power_args_nr = 0;
1843	}
1844
1845	if (!tchart->with_backtrace)
1846		backtrace_args_no = 0;
1847
1848	record_elems = common_args_nr + tasks_args_nr +
1849		power_args_nr + old_power_args_nr + backtrace_args_no;
1850
1851	rec_argc = record_elems + argc;
1852	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1853
1854	if (rec_argv == NULL)
1855		return -ENOMEM;
1856
1857	p = rec_argv;
1858	for (i = 0; i < common_args_nr; i++)
1859		*p++ = strdup(common_args[i]);
1860
1861	for (i = 0; i < backtrace_args_no; i++)
1862		*p++ = strdup(backtrace_args[i]);
1863
1864	for (i = 0; i < tasks_args_nr; i++)
1865		*p++ = strdup(tasks_args[i]);
1866
1867	for (i = 0; i < power_args_nr; i++)
1868		*p++ = strdup(power_args[i]);
1869
1870	for (i = 0; i < old_power_args_nr; i++)
1871		*p++ = strdup(old_power_args[i]);
1872
1873	for (j = 0; j < (unsigned int)argc; j++)
1874		*p++ = argv[j];
1875
1876	return cmd_record(rec_argc, rec_argv);
1877}
1878
1879static int
1880parse_process(const struct option *opt __maybe_unused, const char *arg,
1881	      int __maybe_unused unset)
1882{
1883	if (arg)
1884		add_process_filter(arg);
1885	return 0;
1886}
1887
1888static int
1889parse_highlight(const struct option *opt __maybe_unused, const char *arg,
1890		int __maybe_unused unset)
1891{
1892	unsigned long duration = strtoul(arg, NULL, 0);
1893
1894	if (svg_highlight || svg_highlight_name)
1895		return -1;
 
 
 
 
 
 
 
 
1896
1897	if (duration)
1898		svg_highlight = duration;
1899	else
1900		svg_highlight_name = strdup(arg);
1901
1902	return 0;
1903}
1904
1905static int
1906parse_time(const struct option *opt, const char *arg, int __maybe_unused unset)
1907{
1908	char unit = 'n';
1909	u64 *value = opt->value;
1910
1911	if (sscanf(arg, "%" PRIu64 "%cs", value, &unit) > 0) {
1912		switch (unit) {
1913		case 'm':
1914			*value *= NSEC_PER_MSEC;
1915			break;
1916		case 'u':
1917			*value *= NSEC_PER_USEC;
1918			break;
1919		case 'n':
1920			break;
1921		default:
1922			return -1;
1923		}
1924	}
1925
1926	return 0;
1927}
1928
1929int cmd_timechart(int argc, const char **argv)
1930{
1931	struct timechart tchart = {
1932		.proc_num = 15,
1933		.min_time = NSEC_PER_MSEC,
1934		.merge_dist = 1000,
1935	};
1936	const char *output_name = "output.svg";
1937	const struct option timechart_common_options[] = {
1938	OPT_BOOLEAN('P', "power-only", &tchart.power_only, "output power data only"),
1939	OPT_BOOLEAN('T', "tasks-only", &tchart.tasks_only, "output processes data only"),
1940	OPT_END()
1941	};
1942	const struct option timechart_options[] = {
1943	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1944	OPT_STRING('o', "output", &output_name, "file", "output file name"),
1945	OPT_INTEGER('w', "width", &svg_page_width, "page width"),
1946	OPT_CALLBACK(0, "highlight", NULL, "duration or task name",
1947		      "highlight tasks. Pass duration in ns or process name.",
1948		       parse_highlight),
1949	OPT_CALLBACK('p', "process", NULL, "process",
1950		      "process selector. Pass a pid or process name.",
1951		       parse_process),
1952	OPT_CALLBACK(0, "symfs", NULL, "directory",
1953		     "Look for files with symbols relative to this directory",
1954		     symbol__config_symfs),
1955	OPT_INTEGER('n', "proc-num", &tchart.proc_num,
1956		    "min. number of tasks to print"),
1957	OPT_BOOLEAN('t', "topology", &tchart.topology,
1958		    "sort CPUs according to topology"),
1959	OPT_BOOLEAN(0, "io-skip-eagain", &tchart.skip_eagain,
1960		    "skip EAGAIN errors"),
1961	OPT_CALLBACK(0, "io-min-time", &tchart.min_time, "time",
1962		     "all IO faster than min-time will visually appear longer",
1963		     parse_time),
1964	OPT_CALLBACK(0, "io-merge-dist", &tchart.merge_dist, "time",
1965		     "merge events that are merge-dist us apart",
1966		     parse_time),
1967	OPT_BOOLEAN('f', "force", &tchart.force, "don't complain, do it"),
1968	OPT_PARENT(timechart_common_options),
1969	};
1970	const char * const timechart_subcommands[] = { "record", NULL };
1971	const char *timechart_usage[] = {
1972		"perf timechart [<options>] {record}",
1973		NULL
1974	};
1975	const struct option timechart_record_options[] = {
1976	OPT_BOOLEAN('I', "io-only", &tchart.io_only,
1977		    "record only IO data"),
1978	OPT_BOOLEAN('g', "callchain", &tchart.with_backtrace, "record callchain"),
1979	OPT_PARENT(timechart_common_options),
1980	};
1981	const char * const timechart_record_usage[] = {
1982		"perf timechart record [<options>]",
1983		NULL
1984	};
1985	int ret;
1986
1987	cpus_cstate_start_times = calloc(MAX_CPUS, sizeof(*cpus_cstate_start_times));
1988	if (!cpus_cstate_start_times)
1989		return -ENOMEM;
1990	cpus_cstate_state = calloc(MAX_CPUS, sizeof(*cpus_cstate_state));
1991	if (!cpus_cstate_state) {
1992		ret = -ENOMEM;
1993		goto out;
1994	}
1995	cpus_pstate_start_times = calloc(MAX_CPUS, sizeof(*cpus_pstate_start_times));
1996	if (!cpus_pstate_start_times) {
1997		ret = -ENOMEM;
1998		goto out;
1999	}
2000	cpus_pstate_state = calloc(MAX_CPUS, sizeof(*cpus_pstate_state));
2001	if (!cpus_pstate_state) {
2002		ret = -ENOMEM;
2003		goto out;
2004	}
2005
2006	argc = parse_options_subcommand(argc, argv, timechart_options, timechart_subcommands,
2007			timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2008
2009	if (tchart.power_only && tchart.tasks_only) {
2010		pr_err("-P and -T options cannot be used at the same time.\n");
2011		ret = -1;
2012		goto out;
2013	}
2014
2015	if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2016		argc = parse_options(argc, argv, timechart_record_options,
2017				     timechart_record_usage,
2018				     PARSE_OPT_STOP_AT_NON_OPTION);
2019
2020		if (tchart.power_only && tchart.tasks_only) {
2021			pr_err("-P and -T options cannot be used at the same time.\n");
2022			ret = -1;
2023			goto out;
2024		}
2025
2026		if (tchart.io_only)
2027			ret = timechart__io_record(argc, argv);
2028		else
2029			ret = timechart__record(&tchart, argc, argv);
2030		goto out;
2031	} else if (argc)
2032		usage_with_options(timechart_usage, timechart_options);
2033
2034	setup_pager();
2035
2036	ret = __cmd_timechart(&tchart, output_name);
2037out:
2038	zfree(&cpus_cstate_start_times);
2039	zfree(&cpus_cstate_state);
2040	zfree(&cpus_pstate_start_times);
2041	zfree(&cpus_pstate_state);
2042	return ret;
2043}