Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * builtin-kwork.c
   4 *
   5 * Copyright (c) 2022  Huawei Inc,  Yang Jihong <yangjihong1@huawei.com>
   6 */
   7
   8#include "builtin.h"
   9
  10#include "util/data.h"
  11#include "util/evlist.h"
  12#include "util/evsel.h"
  13#include "util/header.h"
  14#include "util/kwork.h"
  15#include "util/debug.h"
  16#include "util/session.h"
  17#include "util/symbol.h"
  18#include "util/thread.h"
  19#include "util/string2.h"
  20#include "util/callchain.h"
  21#include "util/evsel_fprintf.h"
  22#include "util/util.h"
  23
  24#include <subcmd/pager.h>
  25#include <subcmd/parse-options.h>
  26#include <event-parse.h>
  27
  28#include <errno.h>
  29#include <inttypes.h>
  30#include <signal.h>
  31#include <linux/err.h>
  32#include <linux/time64.h>
  33#include <linux/zalloc.h>
  34
  35/*
  36 * report header elements width
  37 */
  38#define PRINT_CPU_WIDTH 4
  39#define PRINT_COUNT_WIDTH 9
  40#define PRINT_RUNTIME_WIDTH 10
  41#define PRINT_LATENCY_WIDTH 10
  42#define PRINT_TIMESTAMP_WIDTH 17
  43#define PRINT_KWORK_NAME_WIDTH 30
  44#define RPINT_DECIMAL_WIDTH 3
  45#define PRINT_BRACKETPAIR_WIDTH 2
  46#define PRINT_TIME_UNIT_SEC_WIDTH 2
  47#define PRINT_TIME_UNIT_MESC_WIDTH 3
  48#define PRINT_PID_WIDTH 7
  49#define PRINT_TASK_NAME_WIDTH 16
  50#define PRINT_CPU_USAGE_WIDTH 6
  51#define PRINT_CPU_USAGE_DECIMAL_WIDTH 2
  52#define PRINT_CPU_USAGE_HIST_WIDTH 30
  53#define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
  54#define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
  55#define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
  56#define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
  57
  58struct sort_dimension {
  59	const char      *name;
  60	int             (*cmp)(struct kwork_work *l, struct kwork_work *r);
  61	struct          list_head list;
  62};
  63
  64static int id_cmp(struct kwork_work *l, struct kwork_work *r)
  65{
  66	if (l->cpu > r->cpu)
  67		return 1;
  68	if (l->cpu < r->cpu)
  69		return -1;
  70
  71	if (l->id > r->id)
  72		return 1;
  73	if (l->id < r->id)
  74		return -1;
  75
  76	return 0;
  77}
  78
  79static int count_cmp(struct kwork_work *l, struct kwork_work *r)
  80{
  81	if (l->nr_atoms > r->nr_atoms)
  82		return 1;
  83	if (l->nr_atoms < r->nr_atoms)
  84		return -1;
  85
  86	return 0;
  87}
  88
  89static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
  90{
  91	if (l->total_runtime > r->total_runtime)
  92		return 1;
  93	if (l->total_runtime < r->total_runtime)
  94		return -1;
  95
  96	return 0;
  97}
  98
  99static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
 100{
 101	if (l->max_runtime > r->max_runtime)
 102		return 1;
 103	if (l->max_runtime < r->max_runtime)
 104		return -1;
 105
 106	return 0;
 107}
 108
 109static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
 110{
 111	u64 avgl, avgr;
 112
 113	if (!r->nr_atoms)
 114		return 1;
 115	if (!l->nr_atoms)
 116		return -1;
 117
 118	avgl = l->total_latency / l->nr_atoms;
 119	avgr = r->total_latency / r->nr_atoms;
 120
 121	if (avgl > avgr)
 122		return 1;
 123	if (avgl < avgr)
 124		return -1;
 125
 126	return 0;
 127}
 128
 129static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
 130{
 131	if (l->max_latency > r->max_latency)
 132		return 1;
 133	if (l->max_latency < r->max_latency)
 134		return -1;
 135
 136	return 0;
 137}
 138
 139static int cpu_usage_cmp(struct kwork_work *l, struct kwork_work *r)
 140{
 141	if (l->cpu_usage > r->cpu_usage)
 142		return 1;
 143	if (l->cpu_usage < r->cpu_usage)
 144		return -1;
 145
 146	return 0;
 147}
 148
 149static int id_or_cpu_r_cmp(struct kwork_work *l, struct kwork_work *r)
 150{
 151	if (l->id < r->id)
 152		return 1;
 153	if (l->id > r->id)
 154		return -1;
 155
 156	if (l->id != 0)
 157		return 0;
 158
 159	if (l->cpu < r->cpu)
 160		return 1;
 161	if (l->cpu > r->cpu)
 162		return -1;
 163
 164	return 0;
 165}
 166
 167static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
 168			       const char *tok, struct list_head *list)
 169{
 170	size_t i;
 171	static struct sort_dimension max_sort_dimension = {
 172		.name = "max",
 173		.cmp  = max_runtime_cmp,
 174	};
 175	static struct sort_dimension id_sort_dimension = {
 176		.name = "id",
 177		.cmp  = id_cmp,
 178	};
 179	static struct sort_dimension runtime_sort_dimension = {
 180		.name = "runtime",
 181		.cmp  = runtime_cmp,
 182	};
 183	static struct sort_dimension count_sort_dimension = {
 184		.name = "count",
 185		.cmp  = count_cmp,
 186	};
 187	static struct sort_dimension avg_sort_dimension = {
 188		.name = "avg",
 189		.cmp  = avg_latency_cmp,
 190	};
 191	static struct sort_dimension rate_sort_dimension = {
 192		.name = "rate",
 193		.cmp  = cpu_usage_cmp,
 194	};
 195	static struct sort_dimension tid_sort_dimension = {
 196		.name = "tid",
 197		.cmp  = id_or_cpu_r_cmp,
 198	};
 199	struct sort_dimension *available_sorts[] = {
 200		&id_sort_dimension,
 201		&max_sort_dimension,
 202		&count_sort_dimension,
 203		&runtime_sort_dimension,
 204		&avg_sort_dimension,
 205		&rate_sort_dimension,
 206		&tid_sort_dimension,
 207	};
 208
 209	if (kwork->report == KWORK_REPORT_LATENCY)
 210		max_sort_dimension.cmp = max_latency_cmp;
 211
 212	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
 213		if (!strcmp(available_sorts[i]->name, tok)) {
 214			list_add_tail(&available_sorts[i]->list, list);
 215			return 0;
 216		}
 217	}
 218
 219	return -1;
 220}
 221
 222static void setup_sorting(struct perf_kwork *kwork,
 223			  const struct option *options,
 224			  const char * const usage_msg[])
 225{
 226	char *tmp, *tok, *str = strdup(kwork->sort_order);
 227
 228	for (tok = strtok_r(str, ", ", &tmp);
 229	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
 230		if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
 231			usage_with_options_msg(usage_msg, options,
 232					       "Unknown --sort key: `%s'", tok);
 233	}
 234
 235	pr_debug("Sort order: %s\n", kwork->sort_order);
 236	free(str);
 237}
 238
 239static struct kwork_atom *atom_new(struct perf_kwork *kwork,
 240				   struct perf_sample *sample)
 241{
 242	unsigned long i;
 243	struct kwork_atom_page *page;
 244	struct kwork_atom *atom = NULL;
 245
 246	list_for_each_entry(page, &kwork->atom_page_list, list) {
 247		if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
 248			i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
 249			BUG_ON(i >= NR_ATOM_PER_PAGE);
 250			atom = &page->atoms[i];
 251			goto found_atom;
 252		}
 253	}
 254
 255	/*
 256	 * new page
 257	 */
 258	page = zalloc(sizeof(*page));
 259	if (page == NULL) {
 260		pr_err("Failed to zalloc kwork atom page\n");
 261		return NULL;
 262	}
 263
 264	i = 0;
 265	atom = &page->atoms[0];
 266	list_add_tail(&page->list, &kwork->atom_page_list);
 267
 268found_atom:
 269	__set_bit(i, page->bitmap);
 270	atom->time = sample->time;
 271	atom->prev = NULL;
 272	atom->page_addr = page;
 273	atom->bit_inpage = i;
 274	return atom;
 275}
 276
 277static void atom_free(struct kwork_atom *atom)
 278{
 279	if (atom->prev != NULL)
 280		atom_free(atom->prev);
 281
 282	__clear_bit(atom->bit_inpage,
 283		    ((struct kwork_atom_page *)atom->page_addr)->bitmap);
 284}
 285
 286static void atom_del(struct kwork_atom *atom)
 287{
 288	list_del(&atom->list);
 289	atom_free(atom);
 290}
 291
 292static int work_cmp(struct list_head *list,
 293		    struct kwork_work *l, struct kwork_work *r)
 294{
 295	int ret = 0;
 296	struct sort_dimension *sort;
 297
 298	BUG_ON(list_empty(list));
 299
 300	list_for_each_entry(sort, list, list) {
 301		ret = sort->cmp(l, r);
 302		if (ret)
 303			return ret;
 304	}
 305
 306	return ret;
 307}
 308
 309static struct kwork_work *work_search(struct rb_root_cached *root,
 310				      struct kwork_work *key,
 311				      struct list_head *sort_list)
 312{
 313	int cmp;
 314	struct kwork_work *work;
 315	struct rb_node *node = root->rb_root.rb_node;
 316
 317	while (node) {
 318		work = container_of(node, struct kwork_work, node);
 319		cmp = work_cmp(sort_list, key, work);
 320		if (cmp > 0)
 321			node = node->rb_left;
 322		else if (cmp < 0)
 323			node = node->rb_right;
 324		else {
 325			if (work->name == NULL)
 326				work->name = key->name;
 327			return work;
 328		}
 329	}
 330	return NULL;
 331}
 332
 333static void work_insert(struct rb_root_cached *root,
 334			struct kwork_work *key, struct list_head *sort_list)
 335{
 336	int cmp;
 337	bool leftmost = true;
 338	struct kwork_work *cur;
 339	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
 340
 341	while (*new) {
 342		cur = container_of(*new, struct kwork_work, node);
 343		parent = *new;
 344		cmp = work_cmp(sort_list, key, cur);
 345
 346		if (cmp > 0)
 347			new = &((*new)->rb_left);
 348		else {
 349			new = &((*new)->rb_right);
 350			leftmost = false;
 351		}
 352	}
 353
 354	rb_link_node(&key->node, parent, new);
 355	rb_insert_color_cached(&key->node, root, leftmost);
 356}
 357
 358static struct kwork_work *work_new(struct kwork_work *key)
 359{
 360	int i;
 361	struct kwork_work *work = zalloc(sizeof(*work));
 362
 363	if (work == NULL) {
 364		pr_err("Failed to zalloc kwork work\n");
 365		return NULL;
 366	}
 367
 368	for (i = 0; i < KWORK_TRACE_MAX; i++)
 369		INIT_LIST_HEAD(&work->atom_list[i]);
 370
 371	work->id = key->id;
 372	work->cpu = key->cpu;
 373	work->name = key->name;
 374	work->class = key->class;
 375	return work;
 376}
 377
 378static struct kwork_work *work_findnew(struct rb_root_cached *root,
 379				       struct kwork_work *key,
 380				       struct list_head *sort_list)
 381{
 382	struct kwork_work *work = work_search(root, key, sort_list);
 383
 384	if (work != NULL)
 385		return work;
 386
 387	work = work_new(key);
 388	if (work)
 389		work_insert(root, work, sort_list);
 390
 391	return work;
 392}
 393
 394static void profile_update_timespan(struct perf_kwork *kwork,
 395				    struct perf_sample *sample)
 396{
 397	if (!kwork->summary)
 398		return;
 399
 400	if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
 401		kwork->timestart = sample->time;
 402
 403	if (kwork->timeend < sample->time)
 404		kwork->timeend = sample->time;
 405}
 406
 407static bool profile_name_match(struct perf_kwork *kwork,
 408			       struct kwork_work *work)
 409{
 410	if (kwork->profile_name && work->name &&
 411	    (strcmp(work->name, kwork->profile_name) != 0)) {
 412		return false;
 413	}
 414
 415	return true;
 416}
 417
 418static bool profile_event_match(struct perf_kwork *kwork,
 419				struct kwork_work *work,
 420				struct perf_sample *sample)
 421{
 422	int cpu = work->cpu;
 423	u64 time = sample->time;
 424	struct perf_time_interval *ptime = &kwork->ptime;
 425
 426	if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
 427		return false;
 428
 429	if (((ptime->start != 0) && (ptime->start > time)) ||
 430	    ((ptime->end != 0) && (ptime->end < time)))
 431		return false;
 432
 433	/*
 434	 * report top needs to collect the runtime of all tasks to
 435	 * calculate the load of each core.
 436	 */
 437	if ((kwork->report != KWORK_REPORT_TOP) &&
 438	    !profile_name_match(kwork, work)) {
 439		return false;
 440	}
 441
 442	profile_update_timespan(kwork, sample);
 443	return true;
 444}
 445
 446static int work_push_atom(struct perf_kwork *kwork,
 447			  struct kwork_class *class,
 448			  enum kwork_trace_type src_type,
 449			  enum kwork_trace_type dst_type,
 450			  struct evsel *evsel,
 451			  struct perf_sample *sample,
 452			  struct machine *machine,
 453			  struct kwork_work **ret_work,
 454			  bool overwrite)
 455{
 456	struct kwork_atom *atom, *dst_atom, *last_atom;
 457	struct kwork_work *work, key;
 458
 459	BUG_ON(class->work_init == NULL);
 460	class->work_init(kwork, class, &key, src_type, evsel, sample, machine);
 461
 462	atom = atom_new(kwork, sample);
 463	if (atom == NULL)
 464		return -1;
 465
 466	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
 467	if (work == NULL) {
 468		atom_free(atom);
 469		return -1;
 470	}
 471
 472	if (!profile_event_match(kwork, work, sample)) {
 473		atom_free(atom);
 474		return 0;
 475	}
 476
 477	if (dst_type < KWORK_TRACE_MAX) {
 478		dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
 479						   struct kwork_atom, list);
 480		if (dst_atom != NULL) {
 481			atom->prev = dst_atom;
 482			list_del(&dst_atom->list);
 483		}
 484	}
 485
 486	if (ret_work != NULL)
 487		*ret_work = work;
 488
 489	if (overwrite) {
 490		last_atom = list_last_entry_or_null(&work->atom_list[src_type],
 491						    struct kwork_atom, list);
 492		if (last_atom) {
 493			atom_del(last_atom);
 494
 495			kwork->nr_skipped_events[src_type]++;
 496			kwork->nr_skipped_events[KWORK_TRACE_MAX]++;
 497		}
 498	}
 499
 500	list_add_tail(&atom->list, &work->atom_list[src_type]);
 501
 502	return 0;
 503}
 504
 505static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
 506					struct kwork_class *class,
 507					enum kwork_trace_type src_type,
 508					enum kwork_trace_type dst_type,
 509					struct evsel *evsel,
 510					struct perf_sample *sample,
 511					struct machine *machine,
 512					struct kwork_work **ret_work)
 513{
 514	struct kwork_atom *atom, *src_atom;
 515	struct kwork_work *work, key;
 516
 517	BUG_ON(class->work_init == NULL);
 518	class->work_init(kwork, class, &key, src_type, evsel, sample, machine);
 519
 520	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
 521	if (ret_work != NULL)
 522		*ret_work = work;
 523
 524	if (work == NULL)
 525		return NULL;
 526
 527	if (!profile_event_match(kwork, work, sample))
 528		return NULL;
 529
 530	atom = list_last_entry_or_null(&work->atom_list[dst_type],
 531				       struct kwork_atom, list);
 532	if (atom != NULL)
 533		return atom;
 534
 535	src_atom = atom_new(kwork, sample);
 536	if (src_atom != NULL)
 537		list_add_tail(&src_atom->list, &work->atom_list[src_type]);
 538	else {
 539		if (ret_work != NULL)
 540			*ret_work = NULL;
 541	}
 542
 543	return NULL;
 544}
 545
 546static struct kwork_work *find_work_by_id(struct rb_root_cached *root,
 547					  u64 id, int cpu)
 548{
 549	struct rb_node *next;
 550	struct kwork_work *work;
 551
 552	next = rb_first_cached(root);
 553	while (next) {
 554		work = rb_entry(next, struct kwork_work, node);
 555		if ((cpu != -1 && work->id == id && work->cpu == cpu) ||
 556		    (cpu == -1 && work->id == id))
 557			return work;
 558
 559		next = rb_next(next);
 560	}
 561
 562	return NULL;
 563}
 564
 565static struct kwork_class *get_kwork_class(struct perf_kwork *kwork,
 566					   enum kwork_class_type type)
 567{
 568	struct kwork_class *class;
 569
 570	list_for_each_entry(class, &kwork->class_list, list) {
 571		if (class->type == type)
 572			return class;
 573	}
 574
 575	return NULL;
 576}
 577
 578static void report_update_exit_event(struct kwork_work *work,
 579				     struct kwork_atom *atom,
 580				     struct perf_sample *sample)
 581{
 582	u64 delta;
 583	u64 exit_time = sample->time;
 584	u64 entry_time = atom->time;
 585
 586	if ((entry_time != 0) && (exit_time >= entry_time)) {
 587		delta = exit_time - entry_time;
 588		if ((delta > work->max_runtime) ||
 589		    (work->max_runtime == 0)) {
 590			work->max_runtime = delta;
 591			work->max_runtime_start = entry_time;
 592			work->max_runtime_end = exit_time;
 593		}
 594		work->total_runtime += delta;
 595		work->nr_atoms++;
 596	}
 597}
 598
 599static int report_entry_event(struct perf_kwork *kwork,
 600			      struct kwork_class *class,
 601			      struct evsel *evsel,
 602			      struct perf_sample *sample,
 603			      struct machine *machine)
 604{
 605	return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 606			      KWORK_TRACE_MAX, evsel, sample,
 607			      machine, NULL, true);
 608}
 609
 610static int report_exit_event(struct perf_kwork *kwork,
 611			     struct kwork_class *class,
 612			     struct evsel *evsel,
 613			     struct perf_sample *sample,
 614			     struct machine *machine)
 615{
 616	struct kwork_atom *atom = NULL;
 617	struct kwork_work *work = NULL;
 618
 619	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 620			     KWORK_TRACE_ENTRY, evsel, sample,
 621			     machine, &work);
 622	if (work == NULL)
 623		return -1;
 624
 625	if (atom != NULL) {
 626		report_update_exit_event(work, atom, sample);
 627		atom_del(atom);
 628	}
 629
 630	return 0;
 631}
 632
 633static void latency_update_entry_event(struct kwork_work *work,
 634				       struct kwork_atom *atom,
 635				       struct perf_sample *sample)
 636{
 637	u64 delta;
 638	u64 entry_time = sample->time;
 639	u64 raise_time = atom->time;
 640
 641	if ((raise_time != 0) && (entry_time >= raise_time)) {
 642		delta = entry_time - raise_time;
 643		if ((delta > work->max_latency) ||
 644		    (work->max_latency == 0)) {
 645			work->max_latency = delta;
 646			work->max_latency_start = raise_time;
 647			work->max_latency_end = entry_time;
 648		}
 649		work->total_latency += delta;
 650		work->nr_atoms++;
 651	}
 652}
 653
 654static int latency_raise_event(struct perf_kwork *kwork,
 655			       struct kwork_class *class,
 656			       struct evsel *evsel,
 657			       struct perf_sample *sample,
 658			       struct machine *machine)
 659{
 660	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
 661			      KWORK_TRACE_MAX, evsel, sample,
 662			      machine, NULL, true);
 663}
 664
 665static int latency_entry_event(struct perf_kwork *kwork,
 666			       struct kwork_class *class,
 667			       struct evsel *evsel,
 668			       struct perf_sample *sample,
 669			       struct machine *machine)
 670{
 671	struct kwork_atom *atom = NULL;
 672	struct kwork_work *work = NULL;
 673
 674	atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
 675			     KWORK_TRACE_RAISE, evsel, sample,
 676			     machine, &work);
 677	if (work == NULL)
 678		return -1;
 679
 680	if (atom != NULL) {
 681		latency_update_entry_event(work, atom, sample);
 682		atom_del(atom);
 683	}
 684
 685	return 0;
 686}
 687
 688static void timehist_save_callchain(struct perf_kwork *kwork,
 689				    struct perf_sample *sample,
 690				    struct evsel *evsel,
 691				    struct machine *machine)
 692{
 693	struct symbol *sym;
 694	struct thread *thread;
 695	struct callchain_cursor_node *node;
 696	struct callchain_cursor *cursor;
 697
 698	if (!kwork->show_callchain || sample->callchain == NULL)
 699		return;
 700
 701	/* want main thread for process - has maps */
 702	thread = machine__findnew_thread(machine, sample->pid, sample->pid);
 703	if (thread == NULL) {
 704		pr_debug("Failed to get thread for pid %d\n", sample->pid);
 705		return;
 706	}
 707
 708	cursor = get_tls_callchain_cursor();
 709
 710	if (thread__resolve_callchain(thread, cursor, evsel, sample,
 711				      NULL, NULL, kwork->max_stack + 2) != 0) {
 712		pr_debug("Failed to resolve callchain, skipping\n");
 713		goto out_put;
 714	}
 715
 716	callchain_cursor_commit(cursor);
 717
 718	while (true) {
 719		node = callchain_cursor_current(cursor);
 720		if (node == NULL)
 721			break;
 722
 723		sym = node->ms.sym;
 724		if (sym) {
 725			if (!strcmp(sym->name, "__softirqentry_text_start") ||
 726			    !strcmp(sym->name, "__do_softirq"))
 727				sym->ignore = 1;
 728		}
 729
 730		callchain_cursor_advance(cursor);
 731	}
 732
 733out_put:
 734	thread__put(thread);
 735}
 736
 737static void timehist_print_event(struct perf_kwork *kwork,
 738				 struct kwork_work *work,
 739				 struct kwork_atom *atom,
 740				 struct perf_sample *sample,
 741				 struct addr_location *al)
 742{
 743	char entrytime[32], exittime[32];
 744	char kwork_name[PRINT_KWORK_NAME_WIDTH];
 745
 746	/*
 747	 * runtime start
 748	 */
 749	timestamp__scnprintf_usec(atom->time,
 750				  entrytime, sizeof(entrytime));
 751	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime);
 752
 753	/*
 754	 * runtime end
 755	 */
 756	timestamp__scnprintf_usec(sample->time,
 757				  exittime, sizeof(exittime));
 758	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime);
 759
 760	/*
 761	 * cpu
 762	 */
 763	printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
 764
 765	/*
 766	 * kwork name
 767	 */
 768	if (work->class && work->class->work_name) {
 769		work->class->work_name(work, kwork_name,
 770				       PRINT_KWORK_NAME_WIDTH);
 771		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name);
 772	} else
 773		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, "");
 774
 775	/*
 776	 *runtime
 777	 */
 778	printf(" %*.*f ",
 779	       PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
 780	       (double)(sample->time - atom->time) / NSEC_PER_MSEC);
 781
 782	/*
 783	 * delaytime
 784	 */
 785	if (atom->prev != NULL)
 786		printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
 787		       (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC);
 788	else
 789		printf(" %*s ", PRINT_LATENCY_WIDTH, " ");
 790
 791	/*
 792	 * callchain
 793	 */
 794	if (kwork->show_callchain) {
 795		struct callchain_cursor *cursor = get_tls_callchain_cursor();
 796
 797		if (cursor == NULL)
 798			return;
 799
 800		printf(" ");
 801
 802		sample__fprintf_sym(sample, al, 0,
 803				    EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
 804				    EVSEL__PRINT_CALLCHAIN_ARROW |
 805				    EVSEL__PRINT_SKIP_IGNORED,
 806				    cursor, symbol_conf.bt_stop_list,
 807				    stdout);
 808	}
 809
 810	printf("\n");
 811}
 812
 813static int timehist_raise_event(struct perf_kwork *kwork,
 814				struct kwork_class *class,
 815				struct evsel *evsel,
 816				struct perf_sample *sample,
 817				struct machine *machine)
 818{
 819	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
 820			      KWORK_TRACE_MAX, evsel, sample,
 821			      machine, NULL, true);
 822}
 823
 824static int timehist_entry_event(struct perf_kwork *kwork,
 825				struct kwork_class *class,
 826				struct evsel *evsel,
 827				struct perf_sample *sample,
 828				struct machine *machine)
 829{
 830	int ret;
 831	struct kwork_work *work = NULL;
 832
 833	ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 834			     KWORK_TRACE_RAISE, evsel, sample,
 835			     machine, &work, true);
 836	if (ret)
 837		return ret;
 838
 839	if (work != NULL)
 840		timehist_save_callchain(kwork, sample, evsel, machine);
 841
 842	return 0;
 843}
 844
 845static int timehist_exit_event(struct perf_kwork *kwork,
 846			       struct kwork_class *class,
 847			       struct evsel *evsel,
 848			       struct perf_sample *sample,
 849			       struct machine *machine)
 850{
 851	struct kwork_atom *atom = NULL;
 852	struct kwork_work *work = NULL;
 853	struct addr_location al;
 854	int ret = 0;
 855
 856	addr_location__init(&al);
 857	if (machine__resolve(machine, &al, sample) < 0) {
 858		pr_debug("Problem processing event, skipping it\n");
 859		ret = -1;
 860		goto out;
 861	}
 862
 863	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 864			     KWORK_TRACE_ENTRY, evsel, sample,
 865			     machine, &work);
 866	if (work == NULL) {
 867		ret = -1;
 868		goto out;
 869	}
 870
 871	if (atom != NULL) {
 872		work->nr_atoms++;
 873		timehist_print_event(kwork, work, atom, sample, &al);
 874		atom_del(atom);
 875	}
 876
 877out:
 878	addr_location__exit(&al);
 879	return ret;
 880}
 881
 882static void top_update_runtime(struct kwork_work *work,
 883			       struct kwork_atom *atom,
 884			       struct perf_sample *sample)
 885{
 886	u64 delta;
 887	u64 exit_time = sample->time;
 888	u64 entry_time = atom->time;
 889
 890	if ((entry_time != 0) && (exit_time >= entry_time)) {
 891		delta = exit_time - entry_time;
 892		work->total_runtime += delta;
 893	}
 894}
 895
 896static int top_entry_event(struct perf_kwork *kwork,
 897			   struct kwork_class *class,
 898			   struct evsel *evsel,
 899			   struct perf_sample *sample,
 900			   struct machine *machine)
 901{
 902	return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 903			      KWORK_TRACE_MAX, evsel, sample,
 904			      machine, NULL, true);
 905}
 906
 907static int top_exit_event(struct perf_kwork *kwork,
 908			  struct kwork_class *class,
 909			  struct evsel *evsel,
 910			  struct perf_sample *sample,
 911			  struct machine *machine)
 912{
 913	struct kwork_work *work, *sched_work;
 914	struct kwork_class *sched_class;
 915	struct kwork_atom *atom;
 916
 917	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 918			     KWORK_TRACE_ENTRY, evsel, sample,
 919			     machine, &work);
 920	if (!work)
 921		return -1;
 922
 923	if (atom) {
 924		sched_class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
 925		if (sched_class) {
 926			sched_work = find_work_by_id(&sched_class->work_root,
 927						     work->id, work->cpu);
 928			if (sched_work)
 929				top_update_runtime(work, atom, sample);
 930		}
 931		atom_del(atom);
 932	}
 933
 934	return 0;
 935}
 936
 937static int top_sched_switch_event(struct perf_kwork *kwork,
 938				  struct kwork_class *class,
 939				  struct evsel *evsel,
 940				  struct perf_sample *sample,
 941				  struct machine *machine)
 942{
 943	struct kwork_atom *atom;
 944	struct kwork_work *work;
 945
 946	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 947			     KWORK_TRACE_ENTRY, evsel, sample,
 948			     machine, &work);
 949	if (!work)
 950		return -1;
 951
 952	if (atom) {
 953		top_update_runtime(work, atom, sample);
 954		atom_del(atom);
 955	}
 956
 957	return top_entry_event(kwork, class, evsel, sample, machine);
 958}
 959
 960static struct kwork_class kwork_irq;
 961static int process_irq_handler_entry_event(const struct perf_tool *tool,
 962					   struct evsel *evsel,
 963					   struct perf_sample *sample,
 964					   struct machine *machine)
 965{
 966	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 967
 968	if (kwork->tp_handler->entry_event)
 969		return kwork->tp_handler->entry_event(kwork, &kwork_irq,
 970						      evsel, sample, machine);
 971	return 0;
 972}
 973
 974static int process_irq_handler_exit_event(const struct perf_tool *tool,
 975					  struct evsel *evsel,
 976					  struct perf_sample *sample,
 977					  struct machine *machine)
 978{
 979	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 980
 981	if (kwork->tp_handler->exit_event)
 982		return kwork->tp_handler->exit_event(kwork, &kwork_irq,
 983						     evsel, sample, machine);
 984	return 0;
 985}
 986
 987const struct evsel_str_handler irq_tp_handlers[] = {
 988	{ "irq:irq_handler_entry", process_irq_handler_entry_event, },
 989	{ "irq:irq_handler_exit",  process_irq_handler_exit_event,  },
 990};
 991
 992static int irq_class_init(struct kwork_class *class,
 993			  struct perf_session *session)
 994{
 995	if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
 996		pr_err("Failed to set irq tracepoints handlers\n");
 997		return -1;
 998	}
 999
1000	class->work_root = RB_ROOT_CACHED;
1001	return 0;
1002}
1003
1004static void irq_work_init(struct perf_kwork *kwork,
1005			  struct kwork_class *class,
1006			  struct kwork_work *work,
1007			  enum kwork_trace_type src_type __maybe_unused,
1008			  struct evsel *evsel,
1009			  struct perf_sample *sample,
1010			  struct machine *machine __maybe_unused)
1011{
1012	work->class = class;
1013	work->cpu = sample->cpu;
1014
1015	if (kwork->report == KWORK_REPORT_TOP) {
1016		work->id = evsel__intval_common(evsel, sample, "common_pid");
1017		work->name = NULL;
1018	} else {
1019		work->id = evsel__intval(evsel, sample, "irq");
1020		work->name = evsel__strval(evsel, sample, "name");
1021	}
1022}
1023
1024static void irq_work_name(struct kwork_work *work, char *buf, int len)
1025{
1026	snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
1027}
1028
1029static struct kwork_class kwork_irq = {
1030	.name           = "irq",
1031	.type           = KWORK_CLASS_IRQ,
1032	.nr_tracepoints = 2,
1033	.tp_handlers    = irq_tp_handlers,
1034	.class_init     = irq_class_init,
1035	.work_init      = irq_work_init,
1036	.work_name      = irq_work_name,
1037};
1038
1039static struct kwork_class kwork_softirq;
1040static int process_softirq_raise_event(const struct perf_tool *tool,
1041				       struct evsel *evsel,
1042				       struct perf_sample *sample,
1043				       struct machine *machine)
1044{
1045	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1046
1047	if (kwork->tp_handler->raise_event)
1048		return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
1049						      evsel, sample, machine);
1050
1051	return 0;
1052}
1053
1054static int process_softirq_entry_event(const struct perf_tool *tool,
1055				       struct evsel *evsel,
1056				       struct perf_sample *sample,
1057				       struct machine *machine)
1058{
1059	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1060
1061	if (kwork->tp_handler->entry_event)
1062		return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
1063						      evsel, sample, machine);
1064
1065	return 0;
1066}
1067
1068static int process_softirq_exit_event(const struct perf_tool *tool,
1069				      struct evsel *evsel,
1070				      struct perf_sample *sample,
1071				      struct machine *machine)
1072{
1073	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1074
1075	if (kwork->tp_handler->exit_event)
1076		return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
1077						     evsel, sample, machine);
1078
1079	return 0;
1080}
1081
1082const struct evsel_str_handler softirq_tp_handlers[] = {
1083	{ "irq:softirq_raise", process_softirq_raise_event, },
1084	{ "irq:softirq_entry", process_softirq_entry_event, },
1085	{ "irq:softirq_exit",  process_softirq_exit_event,  },
1086};
1087
1088static int softirq_class_init(struct kwork_class *class,
1089			      struct perf_session *session)
1090{
1091	if (perf_session__set_tracepoints_handlers(session,
1092						   softirq_tp_handlers)) {
1093		pr_err("Failed to set softirq tracepoints handlers\n");
1094		return -1;
1095	}
1096
1097	class->work_root = RB_ROOT_CACHED;
1098	return 0;
1099}
1100
1101static char *evsel__softirq_name(struct evsel *evsel, u64 num)
1102{
1103	char *name = NULL;
1104	bool found = false;
1105	struct tep_print_flag_sym *sym = NULL;
1106	struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
1107
1108	if ((args == NULL) || (args->next == NULL))
1109		return NULL;
1110
1111	/* skip softirq field: "REC->vec" */
1112	for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
1113		if ((eval_flag(sym->value) == (unsigned long long)num) &&
1114		    (strlen(sym->str) != 0)) {
1115			found = true;
1116			break;
1117		}
1118	}
1119
1120	if (!found)
1121		return NULL;
1122
1123	name = strdup(sym->str);
1124	if (name == NULL) {
1125		pr_err("Failed to copy symbol name\n");
1126		return NULL;
1127	}
1128	return name;
1129}
1130
1131static void softirq_work_init(struct perf_kwork *kwork,
1132			      struct kwork_class *class,
1133			      struct kwork_work *work,
1134			      enum kwork_trace_type src_type __maybe_unused,
1135			      struct evsel *evsel,
1136			      struct perf_sample *sample,
1137			      struct machine *machine __maybe_unused)
1138{
1139	u64 num;
1140
1141	work->class = class;
1142	work->cpu = sample->cpu;
1143
1144	if (kwork->report == KWORK_REPORT_TOP) {
1145		work->id = evsel__intval_common(evsel, sample, "common_pid");
1146		work->name = NULL;
1147	} else {
1148		num = evsel__intval(evsel, sample, "vec");
1149		work->id = num;
1150		work->name = evsel__softirq_name(evsel, num);
1151	}
1152}
1153
1154static void softirq_work_name(struct kwork_work *work, char *buf, int len)
1155{
1156	snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
1157}
1158
1159static struct kwork_class kwork_softirq = {
1160	.name           = "softirq",
1161	.type           = KWORK_CLASS_SOFTIRQ,
1162	.nr_tracepoints = 3,
1163	.tp_handlers    = softirq_tp_handlers,
1164	.class_init     = softirq_class_init,
1165	.work_init      = softirq_work_init,
1166	.work_name      = softirq_work_name,
1167};
1168
1169static struct kwork_class kwork_workqueue;
1170static int process_workqueue_activate_work_event(const struct perf_tool *tool,
1171						 struct evsel *evsel,
1172						 struct perf_sample *sample,
1173						 struct machine *machine)
1174{
1175	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1176
1177	if (kwork->tp_handler->raise_event)
1178		return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
1179						    evsel, sample, machine);
1180
1181	return 0;
1182}
1183
1184static int process_workqueue_execute_start_event(const struct perf_tool *tool,
1185						 struct evsel *evsel,
1186						 struct perf_sample *sample,
1187						 struct machine *machine)
1188{
1189	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1190
1191	if (kwork->tp_handler->entry_event)
1192		return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
1193						    evsel, sample, machine);
1194
1195	return 0;
1196}
1197
1198static int process_workqueue_execute_end_event(const struct perf_tool *tool,
1199					       struct evsel *evsel,
1200					       struct perf_sample *sample,
1201					       struct machine *machine)
1202{
1203	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1204
1205	if (kwork->tp_handler->exit_event)
1206		return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
1207						   evsel, sample, machine);
1208
1209	return 0;
1210}
1211
1212const struct evsel_str_handler workqueue_tp_handlers[] = {
1213	{ "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
1214	{ "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
1215	{ "workqueue:workqueue_execute_end",   process_workqueue_execute_end_event,   },
1216};
1217
1218static int workqueue_class_init(struct kwork_class *class,
1219				struct perf_session *session)
1220{
1221	if (perf_session__set_tracepoints_handlers(session,
1222						   workqueue_tp_handlers)) {
1223		pr_err("Failed to set workqueue tracepoints handlers\n");
1224		return -1;
1225	}
1226
1227	class->work_root = RB_ROOT_CACHED;
1228	return 0;
1229}
1230
1231static void workqueue_work_init(struct perf_kwork *kwork __maybe_unused,
1232				struct kwork_class *class,
1233				struct kwork_work *work,
1234				enum kwork_trace_type src_type __maybe_unused,
1235				struct evsel *evsel,
1236				struct perf_sample *sample,
1237				struct machine *machine)
1238{
1239	char *modp = NULL;
1240	unsigned long long function_addr = evsel__intval(evsel,
1241							 sample, "function");
1242
1243	work->class = class;
1244	work->cpu = sample->cpu;
1245	work->id = evsel__intval(evsel, sample, "work");
1246	work->name = function_addr == 0 ? NULL :
1247		machine__resolve_kernel_addr(machine, &function_addr, &modp);
1248}
1249
1250static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
1251{
1252	if (work->name != NULL)
1253		snprintf(buf, len, "(w)%s", work->name);
1254	else
1255		snprintf(buf, len, "(w)0x%" PRIx64, work->id);
1256}
1257
1258static struct kwork_class kwork_workqueue = {
1259	.name           = "workqueue",
1260	.type           = KWORK_CLASS_WORKQUEUE,
1261	.nr_tracepoints = 3,
1262	.tp_handlers    = workqueue_tp_handlers,
1263	.class_init     = workqueue_class_init,
1264	.work_init      = workqueue_work_init,
1265	.work_name      = workqueue_work_name,
1266};
1267
1268static struct kwork_class kwork_sched;
1269static int process_sched_switch_event(const struct perf_tool *tool,
1270				      struct evsel *evsel,
1271				      struct perf_sample *sample,
1272				      struct machine *machine)
1273{
1274	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1275
1276	if (kwork->tp_handler->sched_switch_event)
1277		return kwork->tp_handler->sched_switch_event(kwork, &kwork_sched,
1278							     evsel, sample, machine);
1279	return 0;
1280}
1281
1282const struct evsel_str_handler sched_tp_handlers[] = {
1283	{ "sched:sched_switch",  process_sched_switch_event, },
1284};
1285
1286static int sched_class_init(struct kwork_class *class,
1287			    struct perf_session *session)
1288{
1289	if (perf_session__set_tracepoints_handlers(session,
1290						   sched_tp_handlers)) {
1291		pr_err("Failed to set sched tracepoints handlers\n");
1292		return -1;
1293	}
1294
1295	class->work_root = RB_ROOT_CACHED;
1296	return 0;
1297}
1298
1299static void sched_work_init(struct perf_kwork *kwork __maybe_unused,
1300			    struct kwork_class *class,
1301			    struct kwork_work *work,
1302			    enum kwork_trace_type src_type,
1303			    struct evsel *evsel,
1304			    struct perf_sample *sample,
1305			    struct machine *machine __maybe_unused)
1306{
1307	work->class = class;
1308	work->cpu = sample->cpu;
1309
1310	if (src_type == KWORK_TRACE_EXIT) {
1311		work->id = evsel__intval(evsel, sample, "prev_pid");
1312		work->name = strdup(evsel__strval(evsel, sample, "prev_comm"));
1313	} else if (src_type == KWORK_TRACE_ENTRY) {
1314		work->id = evsel__intval(evsel, sample, "next_pid");
1315		work->name = strdup(evsel__strval(evsel, sample, "next_comm"));
1316	}
1317}
1318
1319static void sched_work_name(struct kwork_work *work, char *buf, int len)
1320{
1321	snprintf(buf, len, "%s", work->name);
1322}
1323
1324static struct kwork_class kwork_sched = {
1325	.name		= "sched",
1326	.type		= KWORK_CLASS_SCHED,
1327	.nr_tracepoints	= ARRAY_SIZE(sched_tp_handlers),
1328	.tp_handlers	= sched_tp_handlers,
1329	.class_init	= sched_class_init,
1330	.work_init	= sched_work_init,
1331	.work_name	= sched_work_name,
1332};
1333
1334static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
1335	[KWORK_CLASS_IRQ]       = &kwork_irq,
1336	[KWORK_CLASS_SOFTIRQ]   = &kwork_softirq,
1337	[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
1338	[KWORK_CLASS_SCHED]     = &kwork_sched,
1339};
1340
1341static void print_separator(int len)
1342{
1343	printf(" %.*s\n", len, graph_dotted_line);
1344}
1345
1346static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
1347{
1348	int ret = 0;
1349	char kwork_name[PRINT_KWORK_NAME_WIDTH];
1350	char max_runtime_start[32], max_runtime_end[32];
1351	char max_latency_start[32], max_latency_end[32];
1352
1353	printf(" ");
1354
1355	/*
1356	 * kwork name
1357	 */
1358	if (work->class && work->class->work_name) {
1359		work->class->work_name(work, kwork_name,
1360				       PRINT_KWORK_NAME_WIDTH);
1361		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
1362	} else {
1363		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
1364	}
1365
1366	/*
1367	 * cpu
1368	 */
1369	ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
1370
1371	/*
1372	 * total runtime
1373	 */
1374	if (kwork->report == KWORK_REPORT_RUNTIME) {
1375		ret += printf(" %*.*f ms |",
1376			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1377			      (double)work->total_runtime / NSEC_PER_MSEC);
1378	} else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
1379		ret += printf(" %*.*f ms |",
1380			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1381			      (double)work->total_latency /
1382			      work->nr_atoms / NSEC_PER_MSEC);
1383	}
1384
1385	/*
1386	 * count
1387	 */
1388	ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
1389
1390	/*
1391	 * max runtime, max runtime start, max runtime end
1392	 */
1393	if (kwork->report == KWORK_REPORT_RUNTIME) {
1394		timestamp__scnprintf_usec(work->max_runtime_start,
1395					  max_runtime_start,
1396					  sizeof(max_runtime_start));
1397		timestamp__scnprintf_usec(work->max_runtime_end,
1398					  max_runtime_end,
1399					  sizeof(max_runtime_end));
1400		ret += printf(" %*.*f ms | %*s s | %*s s |",
1401			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1402			      (double)work->max_runtime / NSEC_PER_MSEC,
1403			      PRINT_TIMESTAMP_WIDTH, max_runtime_start,
1404			      PRINT_TIMESTAMP_WIDTH, max_runtime_end);
1405	}
1406	/*
1407	 * max delay, max delay start, max delay end
1408	 */
1409	else if (kwork->report == KWORK_REPORT_LATENCY) {
1410		timestamp__scnprintf_usec(work->max_latency_start,
1411					  max_latency_start,
1412					  sizeof(max_latency_start));
1413		timestamp__scnprintf_usec(work->max_latency_end,
1414					  max_latency_end,
1415					  sizeof(max_latency_end));
1416		ret += printf(" %*.*f ms | %*s s | %*s s |",
1417			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1418			      (double)work->max_latency / NSEC_PER_MSEC,
1419			      PRINT_TIMESTAMP_WIDTH, max_latency_start,
1420			      PRINT_TIMESTAMP_WIDTH, max_latency_end);
1421	}
1422
1423	printf("\n");
1424	return ret;
1425}
1426
1427static int report_print_header(struct perf_kwork *kwork)
1428{
1429	int ret;
1430
1431	printf("\n ");
1432	ret = printf(" %-*s | %-*s |",
1433		     PRINT_KWORK_NAME_WIDTH, "Kwork Name",
1434		     PRINT_CPU_WIDTH, "Cpu");
1435
1436	if (kwork->report == KWORK_REPORT_RUNTIME) {
1437		ret += printf(" %-*s |",
1438			      PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
1439	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1440		ret += printf(" %-*s |",
1441			      PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
1442	}
1443
1444	ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
1445
1446	if (kwork->report == KWORK_REPORT_RUNTIME) {
1447		ret += printf(" %-*s | %-*s | %-*s |",
1448			      PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
1449			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
1450			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
1451	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1452		ret += printf(" %-*s | %-*s | %-*s |",
1453			      PRINT_LATENCY_HEADER_WIDTH, "Max delay",
1454			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
1455			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
1456	}
1457
1458	printf("\n");
1459	print_separator(ret);
1460	return ret;
1461}
1462
1463static void timehist_print_header(void)
1464{
1465	/*
1466	 * header row
1467	 */
1468	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1469	       PRINT_TIMESTAMP_WIDTH, "Runtime start",
1470	       PRINT_TIMESTAMP_WIDTH, "Runtime end",
1471	       PRINT_TIMEHIST_CPU_WIDTH, "Cpu",
1472	       PRINT_KWORK_NAME_WIDTH, "Kwork name",
1473	       PRINT_RUNTIME_WIDTH, "Runtime",
1474	       PRINT_RUNTIME_WIDTH, "Delaytime");
1475
1476	/*
1477	 * units row
1478	 */
1479	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1480	       PRINT_TIMESTAMP_WIDTH, "",
1481	       PRINT_TIMESTAMP_WIDTH, "",
1482	       PRINT_TIMEHIST_CPU_WIDTH, "",
1483	       PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM",
1484	       PRINT_RUNTIME_WIDTH, "(msec)",
1485	       PRINT_RUNTIME_WIDTH, "(msec)");
1486
1487	/*
1488	 * separator
1489	 */
1490	printf(" %.*s  %.*s  %.*s  %.*s  %.*s  %.*s\n",
1491	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1492	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1493	       PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line,
1494	       PRINT_KWORK_NAME_WIDTH, graph_dotted_line,
1495	       PRINT_RUNTIME_WIDTH, graph_dotted_line,
1496	       PRINT_RUNTIME_WIDTH, graph_dotted_line);
1497}
1498
1499static void print_summary(struct perf_kwork *kwork)
1500{
1501	u64 time = kwork->timeend - kwork->timestart;
1502
1503	printf("  Total count            : %9" PRIu64 "\n", kwork->all_count);
1504	printf("  Total runtime   (msec) : %9.3f (%.3f%% load average)\n",
1505	       (double)kwork->all_runtime / NSEC_PER_MSEC,
1506	       time == 0 ? 0 : (double)kwork->all_runtime / time);
1507	printf("  Total time span (msec) : %9.3f\n",
1508	       (double)time / NSEC_PER_MSEC);
1509}
1510
1511static unsigned long long nr_list_entry(struct list_head *head)
1512{
1513	struct list_head *pos;
1514	unsigned long long n = 0;
1515
1516	list_for_each(pos, head)
1517		n++;
1518
1519	return n;
1520}
1521
1522static void print_skipped_events(struct perf_kwork *kwork)
1523{
1524	int i;
1525	const char *const kwork_event_str[] = {
1526		[KWORK_TRACE_RAISE] = "raise",
1527		[KWORK_TRACE_ENTRY] = "entry",
1528		[KWORK_TRACE_EXIT]  = "exit",
1529	};
1530
1531	if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
1532	    (kwork->nr_events != 0)) {
1533		printf("  INFO: %.3f%% skipped events (%" PRIu64 " including ",
1534		       (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
1535		       (double)kwork->nr_events * 100.0,
1536		       kwork->nr_skipped_events[KWORK_TRACE_MAX]);
1537
1538		for (i = 0; i < KWORK_TRACE_MAX; i++) {
1539			printf("%" PRIu64 " %s%s",
1540			       kwork->nr_skipped_events[i],
1541			       kwork_event_str[i],
1542			       (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
1543		}
1544	}
1545
1546	if (verbose > 0)
1547		printf("  INFO: use %lld atom pages\n",
1548		       nr_list_entry(&kwork->atom_page_list));
1549}
1550
1551static void print_bad_events(struct perf_kwork *kwork)
1552{
1553	if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
1554		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1555		       (double)kwork->nr_lost_events /
1556		       (double)kwork->nr_events * 100.0,
1557		       kwork->nr_lost_events, kwork->nr_events,
1558		       kwork->nr_lost_chunks);
1559	}
1560}
1561
1562const char *graph_load = "||||||||||||||||||||||||||||||||||||||||||||||||";
1563const char *graph_idle = "                                                ";
1564static void top_print_per_cpu_load(struct perf_kwork *kwork)
1565{
1566	int i, load_width;
1567	u64 total, load, load_ratio;
1568	struct kwork_top_stat *stat = &kwork->top_stat;
1569
1570	for (i = 0; i < MAX_NR_CPUS; i++) {
1571		total = stat->cpus_runtime[i].total;
1572		load = stat->cpus_runtime[i].load;
1573		if (test_bit(i, stat->all_cpus_bitmap) && total) {
1574			load_ratio = load * 10000 / total;
1575			load_width = PRINT_CPU_USAGE_HIST_WIDTH *
1576				load_ratio / 10000;
1577
1578			printf("%%Cpu%-*d[%.*s%.*s %*.*f%%]\n",
1579			       PRINT_CPU_WIDTH, i,
1580			       load_width, graph_load,
1581			       PRINT_CPU_USAGE_HIST_WIDTH - load_width,
1582			       graph_idle,
1583			       PRINT_CPU_USAGE_WIDTH,
1584			       PRINT_CPU_USAGE_DECIMAL_WIDTH,
1585			       (double)load_ratio / 100);
1586		}
1587	}
1588}
1589
1590static void top_print_cpu_usage(struct perf_kwork *kwork)
1591{
1592	struct kwork_top_stat *stat = &kwork->top_stat;
1593	u64 idle_time = stat->cpus_runtime[MAX_NR_CPUS].idle;
1594	u64 hardirq_time = stat->cpus_runtime[MAX_NR_CPUS].irq;
1595	u64 softirq_time = stat->cpus_runtime[MAX_NR_CPUS].softirq;
1596	int cpus_nr = bitmap_weight(stat->all_cpus_bitmap, MAX_NR_CPUS);
1597	u64 cpus_total_time = stat->cpus_runtime[MAX_NR_CPUS].total;
1598
1599	printf("Total  : %*.*f ms, %d cpus\n",
1600	       PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1601	       (double)cpus_total_time / NSEC_PER_MSEC,
1602	       cpus_nr);
1603
1604	printf("%%Cpu(s): %*.*f%% id, %*.*f%% hi, %*.*f%% si\n",
1605	       PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
1606	       cpus_total_time ? (double)idle_time * 100 / cpus_total_time : 0,
1607
1608	       PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
1609	       cpus_total_time ? (double)hardirq_time * 100 / cpus_total_time : 0,
1610
1611	       PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
1612	       cpus_total_time ? (double)softirq_time * 100 / cpus_total_time : 0);
1613
1614	top_print_per_cpu_load(kwork);
1615}
1616
1617static void top_print_header(struct perf_kwork *kwork __maybe_unused)
1618{
1619	int ret;
1620
1621	printf("\n ");
1622	ret = printf(" %*s %s%*s%s %*s  %*s  %-*s",
1623		     PRINT_PID_WIDTH, "PID",
1624
1625		     kwork->use_bpf ? " " : "",
1626		     kwork->use_bpf ? PRINT_PID_WIDTH : 0,
1627		     kwork->use_bpf ? "SPID" : "",
1628		     kwork->use_bpf ? " " : "",
1629
1630		     PRINT_CPU_USAGE_WIDTH, "%CPU",
1631		     PRINT_RUNTIME_HEADER_WIDTH + RPINT_DECIMAL_WIDTH, "RUNTIME",
1632		     PRINT_TASK_NAME_WIDTH, "COMMAND");
1633	printf("\n ");
1634	print_separator(ret);
1635}
1636
1637static int top_print_work(struct perf_kwork *kwork __maybe_unused, struct kwork_work *work)
1638{
1639	int ret = 0;
1640
1641	printf(" ");
1642
1643	/*
1644	 * pid
1645	 */
1646	ret += printf(" %*" PRIu64 " ", PRINT_PID_WIDTH, work->id);
1647
1648	/*
1649	 * tgid
1650	 */
1651	if (kwork->use_bpf)
1652		ret += printf(" %*d ", PRINT_PID_WIDTH, work->tgid);
1653
1654	/*
1655	 * cpu usage
1656	 */
1657	ret += printf(" %*.*f ",
1658		      PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
1659		      (double)work->cpu_usage / 100);
1660
1661	/*
1662	 * total runtime
1663	 */
1664	ret += printf(" %*.*f ms ",
1665		      PRINT_RUNTIME_WIDTH + RPINT_DECIMAL_WIDTH, RPINT_DECIMAL_WIDTH,
1666		      (double)work->total_runtime / NSEC_PER_MSEC);
1667
1668	/*
1669	 * command
1670	 */
1671	if (kwork->use_bpf)
1672		ret += printf(" %s%s%s",
1673			      work->is_kthread ? "[" : "",
1674			      work->name,
1675			      work->is_kthread ? "]" : "");
1676	else
1677		ret += printf(" %-*s", PRINT_TASK_NAME_WIDTH, work->name);
1678
1679	printf("\n");
1680	return ret;
1681}
1682
1683static void work_sort(struct perf_kwork *kwork,
1684		      struct kwork_class *class, struct rb_root_cached *root)
1685{
1686	struct rb_node *node;
1687	struct kwork_work *data;
1688
1689	pr_debug("Sorting %s ...\n", class->name);
1690	for (;;) {
1691		node = rb_first_cached(root);
1692		if (!node)
1693			break;
1694
1695		rb_erase_cached(node, root);
1696		data = rb_entry(node, struct kwork_work, node);
1697		work_insert(&kwork->sorted_work_root,
1698			       data, &kwork->sort_list);
1699	}
1700}
1701
1702static void perf_kwork__sort(struct perf_kwork *kwork)
1703{
1704	struct kwork_class *class;
1705
1706	list_for_each_entry(class, &kwork->class_list, list)
1707		work_sort(kwork, class, &class->work_root);
1708}
1709
1710static int perf_kwork__check_config(struct perf_kwork *kwork,
1711				    struct perf_session *session)
1712{
1713	int ret;
1714	struct evsel *evsel;
1715	struct kwork_class *class;
1716
1717	static struct trace_kwork_handler report_ops = {
1718		.entry_event = report_entry_event,
1719		.exit_event  = report_exit_event,
1720	};
1721	static struct trace_kwork_handler latency_ops = {
1722		.raise_event = latency_raise_event,
1723		.entry_event = latency_entry_event,
1724	};
1725	static struct trace_kwork_handler timehist_ops = {
1726		.raise_event = timehist_raise_event,
1727		.entry_event = timehist_entry_event,
1728		.exit_event  = timehist_exit_event,
1729	};
1730	static struct trace_kwork_handler top_ops = {
1731		.entry_event        = timehist_entry_event,
1732		.exit_event         = top_exit_event,
1733		.sched_switch_event = top_sched_switch_event,
1734	};
1735
1736	switch (kwork->report) {
1737	case KWORK_REPORT_RUNTIME:
1738		kwork->tp_handler = &report_ops;
1739		break;
1740	case KWORK_REPORT_LATENCY:
1741		kwork->tp_handler = &latency_ops;
1742		break;
1743	case KWORK_REPORT_TIMEHIST:
1744		kwork->tp_handler = &timehist_ops;
1745		break;
1746	case KWORK_REPORT_TOP:
1747		kwork->tp_handler = &top_ops;
1748		break;
1749	default:
1750		pr_debug("Invalid report type %d\n", kwork->report);
1751		return -1;
1752	}
1753
1754	list_for_each_entry(class, &kwork->class_list, list)
1755		if ((class->class_init != NULL) &&
1756		    (class->class_init(class, session) != 0))
1757			return -1;
1758
1759	if (kwork->cpu_list != NULL) {
1760		ret = perf_session__cpu_bitmap(session,
1761					       kwork->cpu_list,
1762					       kwork->cpu_bitmap);
1763		if (ret < 0) {
1764			pr_err("Invalid cpu bitmap\n");
1765			return -1;
1766		}
1767	}
1768
1769	if (kwork->time_str != NULL) {
1770		ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
1771		if (ret != 0) {
1772			pr_err("Invalid time span\n");
1773			return -1;
1774		}
1775	}
1776
1777	list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
1778		if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
1779			pr_debug("Samples do not have callchains\n");
1780			kwork->show_callchain = 0;
1781			symbol_conf.use_callchain = 0;
1782		}
1783	}
1784
1785	return 0;
1786}
1787
1788static int perf_kwork__read_events(struct perf_kwork *kwork)
1789{
1790	int ret = -1;
1791	struct perf_session *session = NULL;
1792
1793	struct perf_data data = {
1794		.path  = input_name,
1795		.mode  = PERF_DATA_MODE_READ,
1796		.force = kwork->force,
1797	};
1798
1799	session = perf_session__new(&data, &kwork->tool);
1800	if (IS_ERR(session)) {
1801		pr_debug("Error creating perf session\n");
1802		return PTR_ERR(session);
1803	}
1804
1805	symbol__init(&session->header.env);
1806
1807	if (perf_kwork__check_config(kwork, session) != 0)
1808		goto out_delete;
1809
1810	if (session->tevent.pevent &&
1811	    tep_set_function_resolver(session->tevent.pevent,
1812				      machine__resolve_kernel_addr,
1813				      &session->machines.host) < 0) {
1814		pr_err("Failed to set libtraceevent function resolver\n");
1815		goto out_delete;
1816	}
1817
1818	if (kwork->report == KWORK_REPORT_TIMEHIST)
1819		timehist_print_header();
1820
1821	ret = perf_session__process_events(session);
1822	if (ret) {
1823		pr_debug("Failed to process events, error %d\n", ret);
1824		goto out_delete;
1825	}
1826
1827	kwork->nr_events      = session->evlist->stats.nr_events[0];
1828	kwork->nr_lost_events = session->evlist->stats.total_lost;
1829	kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1830
1831out_delete:
1832	perf_session__delete(session);
1833	return ret;
1834}
1835
1836static void process_skipped_events(struct perf_kwork *kwork,
1837				   struct kwork_work *work)
1838{
1839	int i;
1840	unsigned long long count;
1841
1842	for (i = 0; i < KWORK_TRACE_MAX; i++) {
1843		count = nr_list_entry(&work->atom_list[i]);
1844		kwork->nr_skipped_events[i] += count;
1845		kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
1846	}
1847}
1848
1849struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
1850				       struct kwork_class *class,
1851				       struct kwork_work *key)
1852{
1853	struct kwork_work *work = NULL;
1854
1855	work = work_new(key);
1856	if (work == NULL)
1857		return NULL;
1858
1859	work_insert(&class->work_root, work, &kwork->cmp_id);
1860	return work;
1861}
1862
1863static void sig_handler(int sig)
1864{
1865	/*
1866	 * Simply capture termination signal so that
1867	 * the program can continue after pause returns
1868	 */
1869	pr_debug("Capture signal %d\n", sig);
1870}
1871
1872static int perf_kwork__report_bpf(struct perf_kwork *kwork)
1873{
1874	int ret;
1875
1876	signal(SIGINT, sig_handler);
1877	signal(SIGTERM, sig_handler);
1878
1879	ret = perf_kwork__trace_prepare_bpf(kwork);
1880	if (ret)
1881		return -1;
1882
1883	printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
1884
1885	perf_kwork__trace_start();
1886
1887	/*
1888	 * a simple pause, wait here for stop signal
1889	 */
1890	pause();
1891
1892	perf_kwork__trace_finish();
1893
1894	perf_kwork__report_read_bpf(kwork);
1895
1896	perf_kwork__report_cleanup_bpf();
1897
1898	return 0;
1899}
1900
1901static int perf_kwork__report(struct perf_kwork *kwork)
1902{
1903	int ret;
1904	struct rb_node *next;
1905	struct kwork_work *work;
1906
1907	if (kwork->use_bpf)
1908		ret = perf_kwork__report_bpf(kwork);
1909	else
1910		ret = perf_kwork__read_events(kwork);
1911
1912	if (ret != 0)
1913		return -1;
1914
1915	perf_kwork__sort(kwork);
1916
1917	setup_pager();
1918
1919	ret = report_print_header(kwork);
1920	next = rb_first_cached(&kwork->sorted_work_root);
1921	while (next) {
1922		work = rb_entry(next, struct kwork_work, node);
1923		process_skipped_events(kwork, work);
1924
1925		if (work->nr_atoms != 0) {
1926			report_print_work(kwork, work);
1927			if (kwork->summary) {
1928				kwork->all_runtime += work->total_runtime;
1929				kwork->all_count += work->nr_atoms;
1930			}
1931		}
1932		next = rb_next(next);
1933	}
1934	print_separator(ret);
1935
1936	if (kwork->summary) {
1937		print_summary(kwork);
1938		print_separator(ret);
1939	}
1940
1941	print_bad_events(kwork);
1942	print_skipped_events(kwork);
1943	printf("\n");
1944
1945	return 0;
1946}
1947
1948typedef int (*tracepoint_handler)(const struct perf_tool *tool,
1949				  struct evsel *evsel,
1950				  struct perf_sample *sample,
1951				  struct machine *machine);
1952
1953static int perf_kwork__process_tracepoint_sample(const struct perf_tool *tool,
1954						 union perf_event *event __maybe_unused,
1955						 struct perf_sample *sample,
1956						 struct evsel *evsel,
1957						 struct machine *machine)
1958{
1959	int err = 0;
1960
1961	if (evsel->handler != NULL) {
1962		tracepoint_handler f = evsel->handler;
1963
1964		err = f(tool, evsel, sample, machine);
1965	}
1966
1967	return err;
1968}
1969
1970static int perf_kwork__timehist(struct perf_kwork *kwork)
1971{
1972	/*
1973	 * event handlers for timehist option
1974	 */
1975	kwork->tool.comm	 = perf_event__process_comm;
1976	kwork->tool.exit	 = perf_event__process_exit;
1977	kwork->tool.fork	 = perf_event__process_fork;
1978	kwork->tool.attr	 = perf_event__process_attr;
1979	kwork->tool.tracing_data = perf_event__process_tracing_data;
1980	kwork->tool.build_id	 = perf_event__process_build_id;
1981	kwork->tool.ordered_events = true;
1982	kwork->tool.ordering_requires_timestamps = true;
1983	symbol_conf.use_callchain = kwork->show_callchain;
1984
1985	if (symbol__validate_sym_arguments()) {
1986		pr_err("Failed to validate sym arguments\n");
1987		return -1;
1988	}
1989
1990	setup_pager();
1991
1992	return perf_kwork__read_events(kwork);
1993}
1994
1995static void top_calc_total_runtime(struct perf_kwork *kwork)
1996{
1997	struct kwork_class *class;
1998	struct kwork_work *work;
1999	struct rb_node *next;
2000	struct kwork_top_stat *stat = &kwork->top_stat;
2001
2002	class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
2003	if (!class)
2004		return;
2005
2006	next = rb_first_cached(&class->work_root);
2007	while (next) {
2008		work = rb_entry(next, struct kwork_work, node);
2009		BUG_ON(work->cpu >= MAX_NR_CPUS);
2010		stat->cpus_runtime[work->cpu].total += work->total_runtime;
2011		stat->cpus_runtime[MAX_NR_CPUS].total += work->total_runtime;
2012		next = rb_next(next);
2013	}
2014}
2015
2016static void top_calc_idle_time(struct perf_kwork *kwork,
2017				struct kwork_work *work)
2018{
2019	struct kwork_top_stat *stat = &kwork->top_stat;
2020
2021	if (work->id == 0) {
2022		stat->cpus_runtime[work->cpu].idle += work->total_runtime;
2023		stat->cpus_runtime[MAX_NR_CPUS].idle += work->total_runtime;
2024	}
2025}
2026
2027static void top_calc_irq_runtime(struct perf_kwork *kwork,
2028				 enum kwork_class_type type,
2029				 struct kwork_work *work)
2030{
2031	struct kwork_top_stat *stat = &kwork->top_stat;
2032
2033	if (type == KWORK_CLASS_IRQ) {
2034		stat->cpus_runtime[work->cpu].irq += work->total_runtime;
2035		stat->cpus_runtime[MAX_NR_CPUS].irq += work->total_runtime;
2036	} else if (type == KWORK_CLASS_SOFTIRQ) {
2037		stat->cpus_runtime[work->cpu].softirq += work->total_runtime;
2038		stat->cpus_runtime[MAX_NR_CPUS].softirq += work->total_runtime;
2039	}
2040}
2041
2042static void top_subtract_irq_runtime(struct perf_kwork *kwork,
2043				     struct kwork_work *work)
2044{
2045	struct kwork_class *class;
2046	struct kwork_work *data;
2047	unsigned int i;
2048	int irq_class_list[] = {KWORK_CLASS_IRQ, KWORK_CLASS_SOFTIRQ};
2049
2050	for (i = 0; i < ARRAY_SIZE(irq_class_list); i++) {
2051		class = get_kwork_class(kwork, irq_class_list[i]);
2052		if (!class)
2053			continue;
2054
2055		data = find_work_by_id(&class->work_root,
2056				       work->id, work->cpu);
2057		if (!data)
2058			continue;
2059
2060		if (work->total_runtime > data->total_runtime) {
2061			work->total_runtime -= data->total_runtime;
2062			top_calc_irq_runtime(kwork, irq_class_list[i], data);
2063		}
2064	}
2065}
2066
2067static void top_calc_cpu_usage(struct perf_kwork *kwork)
2068{
2069	struct kwork_class *class;
2070	struct kwork_work *work;
2071	struct rb_node *next;
2072	struct kwork_top_stat *stat = &kwork->top_stat;
2073
2074	class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
2075	if (!class)
2076		return;
2077
2078	next = rb_first_cached(&class->work_root);
2079	while (next) {
2080		work = rb_entry(next, struct kwork_work, node);
2081
2082		if (work->total_runtime == 0)
2083			goto next;
2084
2085		__set_bit(work->cpu, stat->all_cpus_bitmap);
2086
2087		top_subtract_irq_runtime(kwork, work);
2088
2089		work->cpu_usage = work->total_runtime * 10000 /
2090			stat->cpus_runtime[work->cpu].total;
2091
2092		top_calc_idle_time(kwork, work);
2093next:
2094		next = rb_next(next);
2095	}
2096}
2097
2098static void top_calc_load_runtime(struct perf_kwork *kwork,
2099				  struct kwork_work *work)
2100{
2101	struct kwork_top_stat *stat = &kwork->top_stat;
2102
2103	if (work->id != 0) {
2104		stat->cpus_runtime[work->cpu].load += work->total_runtime;
2105		stat->cpus_runtime[MAX_NR_CPUS].load += work->total_runtime;
2106	}
2107}
2108
2109static void top_merge_tasks(struct perf_kwork *kwork)
2110{
2111	struct kwork_work *merged_work, *data;
2112	struct kwork_class *class;
2113	struct rb_node *node;
2114	int cpu;
2115	struct rb_root_cached merged_root = RB_ROOT_CACHED;
2116
2117	class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
2118	if (!class)
2119		return;
2120
2121	for (;;) {
2122		node = rb_first_cached(&class->work_root);
2123		if (!node)
2124			break;
2125
2126		rb_erase_cached(node, &class->work_root);
2127		data = rb_entry(node, struct kwork_work, node);
2128
2129		if (!profile_name_match(kwork, data))
2130			continue;
2131
2132		cpu = data->cpu;
2133		merged_work = find_work_by_id(&merged_root, data->id,
2134					      data->id == 0 ? cpu : -1);
2135		if (!merged_work) {
2136			work_insert(&merged_root, data, &kwork->cmp_id);
2137		} else {
2138			merged_work->total_runtime += data->total_runtime;
2139			merged_work->cpu_usage += data->cpu_usage;
2140		}
2141
2142		top_calc_load_runtime(kwork, data);
2143	}
2144
2145	work_sort(kwork, class, &merged_root);
2146}
2147
2148static void perf_kwork__top_report(struct perf_kwork *kwork)
2149{
2150	struct kwork_work *work;
2151	struct rb_node *next;
2152
2153	printf("\n");
2154
2155	top_print_cpu_usage(kwork);
2156	top_print_header(kwork);
2157	next = rb_first_cached(&kwork->sorted_work_root);
2158	while (next) {
2159		work = rb_entry(next, struct kwork_work, node);
2160		process_skipped_events(kwork, work);
2161
2162		if (work->total_runtime == 0)
2163			goto next;
2164
2165		top_print_work(kwork, work);
2166
2167next:
2168		next = rb_next(next);
2169	}
2170
2171	printf("\n");
2172}
2173
2174static int perf_kwork__top_bpf(struct perf_kwork *kwork)
2175{
2176	int ret;
2177
2178	signal(SIGINT, sig_handler);
2179	signal(SIGTERM, sig_handler);
2180
2181	ret = perf_kwork__top_prepare_bpf(kwork);
2182	if (ret)
2183		return -1;
2184
2185	printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
2186
2187	perf_kwork__top_start();
2188
2189	/*
2190	 * a simple pause, wait here for stop signal
2191	 */
2192	pause();
2193
2194	perf_kwork__top_finish();
2195
2196	perf_kwork__top_read_bpf(kwork);
2197
2198	perf_kwork__top_cleanup_bpf();
2199
2200	return 0;
2201
2202}
2203
2204static int perf_kwork__top(struct perf_kwork *kwork)
2205{
2206	struct __top_cpus_runtime *cpus_runtime;
2207	int ret = 0;
2208
2209	cpus_runtime = zalloc(sizeof(struct __top_cpus_runtime) * (MAX_NR_CPUS + 1));
2210	if (!cpus_runtime)
2211		return -1;
2212
2213	kwork->top_stat.cpus_runtime = cpus_runtime;
2214	bitmap_zero(kwork->top_stat.all_cpus_bitmap, MAX_NR_CPUS);
2215
2216	if (kwork->use_bpf)
2217		ret = perf_kwork__top_bpf(kwork);
2218	else
2219		ret = perf_kwork__read_events(kwork);
2220
2221	if (ret)
2222		goto out;
2223
2224	top_calc_total_runtime(kwork);
2225	top_calc_cpu_usage(kwork);
2226	top_merge_tasks(kwork);
2227
2228	setup_pager();
2229
2230	perf_kwork__top_report(kwork);
2231
2232out:
2233	zfree(&kwork->top_stat.cpus_runtime);
2234	return ret;
2235}
2236
2237static void setup_event_list(struct perf_kwork *kwork,
2238			     const struct option *options,
2239			     const char * const usage_msg[])
2240{
2241	int i;
2242	struct kwork_class *class;
2243	char *tmp, *tok, *str;
2244
2245	/*
2246	 * set default events list if not specified
2247	 */
2248	if (kwork->event_list_str == NULL)
2249		kwork->event_list_str = "irq, softirq, workqueue";
2250
2251	str = strdup(kwork->event_list_str);
2252	for (tok = strtok_r(str, ", ", &tmp);
2253	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
2254		for (i = 0; i < KWORK_CLASS_MAX; i++) {
2255			class = kwork_class_supported_list[i];
2256			if (strcmp(tok, class->name) == 0) {
2257				list_add_tail(&class->list, &kwork->class_list);
2258				break;
2259			}
2260		}
2261		if (i == KWORK_CLASS_MAX) {
2262			usage_with_options_msg(usage_msg, options,
2263					       "Unknown --event key: `%s'", tok);
2264		}
2265	}
2266	free(str);
2267
2268	pr_debug("Config event list:");
2269	list_for_each_entry(class, &kwork->class_list, list)
2270		pr_debug(" %s", class->name);
2271	pr_debug("\n");
2272}
2273
2274static int perf_kwork__record(struct perf_kwork *kwork,
2275			      int argc, const char **argv)
2276{
2277	const char **rec_argv;
2278	unsigned int rec_argc, i, j;
2279	struct kwork_class *class;
2280
2281	const char *const record_args[] = {
2282		"record",
2283		"-a",
2284		"-R",
2285		"-m", "1024",
2286		"-c", "1",
2287	};
2288
2289	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
2290
2291	list_for_each_entry(class, &kwork->class_list, list)
2292		rec_argc += 2 * class->nr_tracepoints;
2293
2294	rec_argv = calloc(rec_argc + 1, sizeof(char *));
2295	if (rec_argv == NULL)
2296		return -ENOMEM;
2297
2298	for (i = 0; i < ARRAY_SIZE(record_args); i++)
2299		rec_argv[i] = strdup(record_args[i]);
2300
2301	list_for_each_entry(class, &kwork->class_list, list) {
2302		for (j = 0; j < class->nr_tracepoints; j++) {
2303			rec_argv[i++] = strdup("-e");
2304			rec_argv[i++] = strdup(class->tp_handlers[j].name);
2305		}
2306	}
2307
2308	for (j = 1; j < (unsigned int)argc; j++, i++)
2309		rec_argv[i] = argv[j];
2310
2311	BUG_ON(i != rec_argc);
2312
2313	pr_debug("record comm: ");
2314	for (j = 0; j < rec_argc; j++)
2315		pr_debug("%s ", rec_argv[j]);
2316	pr_debug("\n");
2317
2318	return cmd_record(i, rec_argv);
2319}
2320
2321int cmd_kwork(int argc, const char **argv)
2322{
2323	static struct perf_kwork kwork = {
2324		.class_list          = LIST_HEAD_INIT(kwork.class_list),
 
 
 
 
 
 
2325		.atom_page_list      = LIST_HEAD_INIT(kwork.atom_page_list),
2326		.sort_list           = LIST_HEAD_INIT(kwork.sort_list),
2327		.cmp_id              = LIST_HEAD_INIT(kwork.cmp_id),
2328		.sorted_work_root    = RB_ROOT_CACHED,
2329		.tp_handler          = NULL,
2330		.profile_name        = NULL,
2331		.cpu_list            = NULL,
2332		.time_str            = NULL,
2333		.force               = false,
2334		.event_list_str      = NULL,
2335		.summary             = false,
2336		.sort_order          = NULL,
2337		.show_callchain      = false,
2338		.max_stack           = 5,
2339		.timestart           = 0,
2340		.timeend             = 0,
2341		.nr_events           = 0,
2342		.nr_lost_chunks      = 0,
2343		.nr_lost_events      = 0,
2344		.all_runtime         = 0,
2345		.all_count           = 0,
2346		.nr_skipped_events   = { 0 },
2347	};
2348	static const char default_report_sort_order[] = "runtime, max, count";
2349	static const char default_latency_sort_order[] = "avg, max, count";
2350	static const char default_top_sort_order[] = "rate, runtime";
2351	const struct option kwork_options[] = {
2352	OPT_INCR('v', "verbose", &verbose,
2353		 "be more verbose (show symbol address, etc)"),
2354	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
2355		    "dump raw trace in ASCII"),
2356	OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
2357		   "list of kwork to profile (irq, softirq, workqueue, sched, etc)"),
2358	OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
2359	OPT_END()
2360	};
2361	const struct option report_options[] = {
2362	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
2363		   "sort by key(s): runtime, max, count"),
2364	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
2365		   "list of cpus to profile"),
2366	OPT_STRING('n', "name", &kwork.profile_name, "name",
2367		   "event name to profile"),
2368	OPT_STRING(0, "time", &kwork.time_str, "str",
2369		   "Time span for analysis (start,stop)"),
2370	OPT_STRING('i', "input", &input_name, "file",
2371		   "input file name"),
2372	OPT_BOOLEAN('S', "with-summary", &kwork.summary,
2373		    "Show summary with statistics"),
2374#ifdef HAVE_BPF_SKEL
2375	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
2376		    "Use BPF to measure kwork runtime"),
2377#endif
2378	OPT_PARENT(kwork_options)
2379	};
2380	const struct option latency_options[] = {
2381	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
2382		   "sort by key(s): avg, max, count"),
2383	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
2384		   "list of cpus to profile"),
2385	OPT_STRING('n', "name", &kwork.profile_name, "name",
2386		   "event name to profile"),
2387	OPT_STRING(0, "time", &kwork.time_str, "str",
2388		   "Time span for analysis (start,stop)"),
2389	OPT_STRING('i', "input", &input_name, "file",
2390		   "input file name"),
2391#ifdef HAVE_BPF_SKEL
2392	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
2393		    "Use BPF to measure kwork latency"),
2394#endif
2395	OPT_PARENT(kwork_options)
2396	};
2397	const struct option timehist_options[] = {
2398	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
2399		   "file", "vmlinux pathname"),
2400	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
2401		   "file", "kallsyms pathname"),
2402	OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
2403		    "Display call chains if present"),
2404	OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
2405		   "Maximum number of functions to display backtrace."),
2406	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
2407		    "Look for files with symbols relative to this directory"),
2408	OPT_STRING(0, "time", &kwork.time_str, "str",
2409		   "Time span for analysis (start,stop)"),
2410	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
2411		   "list of cpus to profile"),
2412	OPT_STRING('n', "name", &kwork.profile_name, "name",
2413		   "event name to profile"),
2414	OPT_STRING('i', "input", &input_name, "file",
2415		   "input file name"),
2416	OPT_PARENT(kwork_options)
2417	};
2418	const struct option top_options[] = {
2419	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
2420		   "sort by key(s): rate, runtime, tid"),
2421	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
2422		   "list of cpus to profile"),
2423	OPT_STRING('n', "name", &kwork.profile_name, "name",
2424		   "event name to profile"),
2425	OPT_STRING(0, "time", &kwork.time_str, "str",
2426		   "Time span for analysis (start,stop)"),
2427	OPT_STRING('i', "input", &input_name, "file",
2428		   "input file name"),
2429#ifdef HAVE_BPF_SKEL
2430	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
2431		    "Use BPF to measure task cpu usage"),
2432#endif
2433	OPT_PARENT(kwork_options)
2434	};
2435	const char *kwork_usage[] = {
2436		NULL,
2437		NULL
2438	};
2439	const char * const report_usage[] = {
2440		"perf kwork report [<options>]",
2441		NULL
2442	};
2443	const char * const latency_usage[] = {
2444		"perf kwork latency [<options>]",
2445		NULL
2446	};
2447	const char * const timehist_usage[] = {
2448		"perf kwork timehist [<options>]",
2449		NULL
2450	};
2451	const char * const top_usage[] = {
2452		"perf kwork top [<options>]",
2453		NULL
2454	};
2455	const char *const kwork_subcommands[] = {
2456		"record", "report", "latency", "timehist", "top", NULL
2457	};
2458
2459	perf_tool__init(&kwork.tool, /*ordered_events=*/true);
2460	kwork.tool.mmap	  = perf_event__process_mmap;
2461	kwork.tool.mmap2  = perf_event__process_mmap2;
2462	kwork.tool.sample = perf_kwork__process_tracepoint_sample;
2463
2464	argc = parse_options_subcommand(argc, argv, kwork_options,
2465					kwork_subcommands, kwork_usage,
2466					PARSE_OPT_STOP_AT_NON_OPTION);
2467	if (!argc)
2468		usage_with_options(kwork_usage, kwork_options);
2469
2470	sort_dimension__add(&kwork, "id", &kwork.cmp_id);
2471
2472	if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2473		setup_event_list(&kwork, kwork_options, kwork_usage);
2474		return perf_kwork__record(&kwork, argc, argv);
2475	} else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
2476		kwork.sort_order = default_report_sort_order;
2477		if (argc > 1) {
2478			argc = parse_options(argc, argv, report_options, report_usage, 0);
2479			if (argc)
2480				usage_with_options(report_usage, report_options);
2481		}
2482		kwork.report = KWORK_REPORT_RUNTIME;
2483		setup_sorting(&kwork, report_options, report_usage);
2484		setup_event_list(&kwork, kwork_options, kwork_usage);
2485		return perf_kwork__report(&kwork);
2486	} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
2487		kwork.sort_order = default_latency_sort_order;
2488		if (argc > 1) {
2489			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
2490			if (argc)
2491				usage_with_options(latency_usage, latency_options);
2492		}
2493		kwork.report = KWORK_REPORT_LATENCY;
2494		setup_sorting(&kwork, latency_options, latency_usage);
2495		setup_event_list(&kwork, kwork_options, kwork_usage);
2496		return perf_kwork__report(&kwork);
2497	} else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
2498		if (argc > 1) {
2499			argc = parse_options(argc, argv, timehist_options, timehist_usage, 0);
2500			if (argc)
2501				usage_with_options(timehist_usage, timehist_options);
2502		}
2503		kwork.report = KWORK_REPORT_TIMEHIST;
2504		setup_event_list(&kwork, kwork_options, kwork_usage);
2505		return perf_kwork__timehist(&kwork);
2506	} else if (strlen(argv[0]) > 2 && strstarts("top", argv[0])) {
2507		kwork.sort_order = default_top_sort_order;
2508		if (argc > 1) {
2509			argc = parse_options(argc, argv, top_options, top_usage, 0);
2510			if (argc)
2511				usage_with_options(top_usage, top_options);
2512		}
2513		kwork.report = KWORK_REPORT_TOP;
2514		if (!kwork.event_list_str)
2515			kwork.event_list_str = "sched, irq, softirq";
2516		setup_event_list(&kwork, kwork_options, kwork_usage);
2517		setup_sorting(&kwork, top_options, top_usage);
2518		return perf_kwork__top(&kwork);
2519	} else
2520		usage_with_options(kwork_usage, kwork_options);
2521
2522	/* free usage string allocated by parse_options_subcommand */
2523	free((void *)kwork_usage[0]);
2524
2525	return 0;
2526}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * builtin-kwork.c
   4 *
   5 * Copyright (c) 2022  Huawei Inc,  Yang Jihong <yangjihong1@huawei.com>
   6 */
   7
   8#include "builtin.h"
   9
  10#include "util/data.h"
  11#include "util/evlist.h"
  12#include "util/evsel.h"
  13#include "util/header.h"
  14#include "util/kwork.h"
  15#include "util/debug.h"
  16#include "util/session.h"
  17#include "util/symbol.h"
  18#include "util/thread.h"
  19#include "util/string2.h"
  20#include "util/callchain.h"
  21#include "util/evsel_fprintf.h"
  22#include "util/util.h"
  23
  24#include <subcmd/pager.h>
  25#include <subcmd/parse-options.h>
  26#include <traceevent/event-parse.h>
  27
  28#include <errno.h>
  29#include <inttypes.h>
  30#include <signal.h>
  31#include <linux/err.h>
  32#include <linux/time64.h>
  33#include <linux/zalloc.h>
  34
  35/*
  36 * report header elements width
  37 */
  38#define PRINT_CPU_WIDTH 4
  39#define PRINT_COUNT_WIDTH 9
  40#define PRINT_RUNTIME_WIDTH 10
  41#define PRINT_LATENCY_WIDTH 10
  42#define PRINT_TIMESTAMP_WIDTH 17
  43#define PRINT_KWORK_NAME_WIDTH 30
  44#define RPINT_DECIMAL_WIDTH 3
  45#define PRINT_BRACKETPAIR_WIDTH 2
  46#define PRINT_TIME_UNIT_SEC_WIDTH 2
  47#define PRINT_TIME_UNIT_MESC_WIDTH 3
  48#define PRINT_PID_WIDTH 7
  49#define PRINT_TASK_NAME_WIDTH 16
  50#define PRINT_CPU_USAGE_WIDTH 6
  51#define PRINT_CPU_USAGE_DECIMAL_WIDTH 2
  52#define PRINT_CPU_USAGE_HIST_WIDTH 30
  53#define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
  54#define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
  55#define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
  56#define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
  57
  58struct sort_dimension {
  59	const char      *name;
  60	int             (*cmp)(struct kwork_work *l, struct kwork_work *r);
  61	struct          list_head list;
  62};
  63
  64static int id_cmp(struct kwork_work *l, struct kwork_work *r)
  65{
  66	if (l->cpu > r->cpu)
  67		return 1;
  68	if (l->cpu < r->cpu)
  69		return -1;
  70
  71	if (l->id > r->id)
  72		return 1;
  73	if (l->id < r->id)
  74		return -1;
  75
  76	return 0;
  77}
  78
  79static int count_cmp(struct kwork_work *l, struct kwork_work *r)
  80{
  81	if (l->nr_atoms > r->nr_atoms)
  82		return 1;
  83	if (l->nr_atoms < r->nr_atoms)
  84		return -1;
  85
  86	return 0;
  87}
  88
  89static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
  90{
  91	if (l->total_runtime > r->total_runtime)
  92		return 1;
  93	if (l->total_runtime < r->total_runtime)
  94		return -1;
  95
  96	return 0;
  97}
  98
  99static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
 100{
 101	if (l->max_runtime > r->max_runtime)
 102		return 1;
 103	if (l->max_runtime < r->max_runtime)
 104		return -1;
 105
 106	return 0;
 107}
 108
 109static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
 110{
 111	u64 avgl, avgr;
 112
 113	if (!r->nr_atoms)
 114		return 1;
 115	if (!l->nr_atoms)
 116		return -1;
 117
 118	avgl = l->total_latency / l->nr_atoms;
 119	avgr = r->total_latency / r->nr_atoms;
 120
 121	if (avgl > avgr)
 122		return 1;
 123	if (avgl < avgr)
 124		return -1;
 125
 126	return 0;
 127}
 128
 129static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
 130{
 131	if (l->max_latency > r->max_latency)
 132		return 1;
 133	if (l->max_latency < r->max_latency)
 134		return -1;
 135
 136	return 0;
 137}
 138
 139static int cpu_usage_cmp(struct kwork_work *l, struct kwork_work *r)
 140{
 141	if (l->cpu_usage > r->cpu_usage)
 142		return 1;
 143	if (l->cpu_usage < r->cpu_usage)
 144		return -1;
 145
 146	return 0;
 147}
 148
 149static int id_or_cpu_r_cmp(struct kwork_work *l, struct kwork_work *r)
 150{
 151	if (l->id < r->id)
 152		return 1;
 153	if (l->id > r->id)
 154		return -1;
 155
 156	if (l->id != 0)
 157		return 0;
 158
 159	if (l->cpu < r->cpu)
 160		return 1;
 161	if (l->cpu > r->cpu)
 162		return -1;
 163
 164	return 0;
 165}
 166
 167static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
 168			       const char *tok, struct list_head *list)
 169{
 170	size_t i;
 171	static struct sort_dimension max_sort_dimension = {
 172		.name = "max",
 173		.cmp  = max_runtime_cmp,
 174	};
 175	static struct sort_dimension id_sort_dimension = {
 176		.name = "id",
 177		.cmp  = id_cmp,
 178	};
 179	static struct sort_dimension runtime_sort_dimension = {
 180		.name = "runtime",
 181		.cmp  = runtime_cmp,
 182	};
 183	static struct sort_dimension count_sort_dimension = {
 184		.name = "count",
 185		.cmp  = count_cmp,
 186	};
 187	static struct sort_dimension avg_sort_dimension = {
 188		.name = "avg",
 189		.cmp  = avg_latency_cmp,
 190	};
 191	static struct sort_dimension rate_sort_dimension = {
 192		.name = "rate",
 193		.cmp  = cpu_usage_cmp,
 194	};
 195	static struct sort_dimension tid_sort_dimension = {
 196		.name = "tid",
 197		.cmp  = id_or_cpu_r_cmp,
 198	};
 199	struct sort_dimension *available_sorts[] = {
 200		&id_sort_dimension,
 201		&max_sort_dimension,
 202		&count_sort_dimension,
 203		&runtime_sort_dimension,
 204		&avg_sort_dimension,
 205		&rate_sort_dimension,
 206		&tid_sort_dimension,
 207	};
 208
 209	if (kwork->report == KWORK_REPORT_LATENCY)
 210		max_sort_dimension.cmp = max_latency_cmp;
 211
 212	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
 213		if (!strcmp(available_sorts[i]->name, tok)) {
 214			list_add_tail(&available_sorts[i]->list, list);
 215			return 0;
 216		}
 217	}
 218
 219	return -1;
 220}
 221
 222static void setup_sorting(struct perf_kwork *kwork,
 223			  const struct option *options,
 224			  const char * const usage_msg[])
 225{
 226	char *tmp, *tok, *str = strdup(kwork->sort_order);
 227
 228	for (tok = strtok_r(str, ", ", &tmp);
 229	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
 230		if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
 231			usage_with_options_msg(usage_msg, options,
 232					       "Unknown --sort key: `%s'", tok);
 233	}
 234
 235	pr_debug("Sort order: %s\n", kwork->sort_order);
 236	free(str);
 237}
 238
 239static struct kwork_atom *atom_new(struct perf_kwork *kwork,
 240				   struct perf_sample *sample)
 241{
 242	unsigned long i;
 243	struct kwork_atom_page *page;
 244	struct kwork_atom *atom = NULL;
 245
 246	list_for_each_entry(page, &kwork->atom_page_list, list) {
 247		if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
 248			i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
 249			BUG_ON(i >= NR_ATOM_PER_PAGE);
 250			atom = &page->atoms[i];
 251			goto found_atom;
 252		}
 253	}
 254
 255	/*
 256	 * new page
 257	 */
 258	page = zalloc(sizeof(*page));
 259	if (page == NULL) {
 260		pr_err("Failed to zalloc kwork atom page\n");
 261		return NULL;
 262	}
 263
 264	i = 0;
 265	atom = &page->atoms[0];
 266	list_add_tail(&page->list, &kwork->atom_page_list);
 267
 268found_atom:
 269	__set_bit(i, page->bitmap);
 270	atom->time = sample->time;
 271	atom->prev = NULL;
 272	atom->page_addr = page;
 273	atom->bit_inpage = i;
 274	return atom;
 275}
 276
 277static void atom_free(struct kwork_atom *atom)
 278{
 279	if (atom->prev != NULL)
 280		atom_free(atom->prev);
 281
 282	__clear_bit(atom->bit_inpage,
 283		    ((struct kwork_atom_page *)atom->page_addr)->bitmap);
 284}
 285
 286static void atom_del(struct kwork_atom *atom)
 287{
 288	list_del(&atom->list);
 289	atom_free(atom);
 290}
 291
 292static int work_cmp(struct list_head *list,
 293		    struct kwork_work *l, struct kwork_work *r)
 294{
 295	int ret = 0;
 296	struct sort_dimension *sort;
 297
 298	BUG_ON(list_empty(list));
 299
 300	list_for_each_entry(sort, list, list) {
 301		ret = sort->cmp(l, r);
 302		if (ret)
 303			return ret;
 304	}
 305
 306	return ret;
 307}
 308
 309static struct kwork_work *work_search(struct rb_root_cached *root,
 310				      struct kwork_work *key,
 311				      struct list_head *sort_list)
 312{
 313	int cmp;
 314	struct kwork_work *work;
 315	struct rb_node *node = root->rb_root.rb_node;
 316
 317	while (node) {
 318		work = container_of(node, struct kwork_work, node);
 319		cmp = work_cmp(sort_list, key, work);
 320		if (cmp > 0)
 321			node = node->rb_left;
 322		else if (cmp < 0)
 323			node = node->rb_right;
 324		else {
 325			if (work->name == NULL)
 326				work->name = key->name;
 327			return work;
 328		}
 329	}
 330	return NULL;
 331}
 332
 333static void work_insert(struct rb_root_cached *root,
 334			struct kwork_work *key, struct list_head *sort_list)
 335{
 336	int cmp;
 337	bool leftmost = true;
 338	struct kwork_work *cur;
 339	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
 340
 341	while (*new) {
 342		cur = container_of(*new, struct kwork_work, node);
 343		parent = *new;
 344		cmp = work_cmp(sort_list, key, cur);
 345
 346		if (cmp > 0)
 347			new = &((*new)->rb_left);
 348		else {
 349			new = &((*new)->rb_right);
 350			leftmost = false;
 351		}
 352	}
 353
 354	rb_link_node(&key->node, parent, new);
 355	rb_insert_color_cached(&key->node, root, leftmost);
 356}
 357
 358static struct kwork_work *work_new(struct kwork_work *key)
 359{
 360	int i;
 361	struct kwork_work *work = zalloc(sizeof(*work));
 362
 363	if (work == NULL) {
 364		pr_err("Failed to zalloc kwork work\n");
 365		return NULL;
 366	}
 367
 368	for (i = 0; i < KWORK_TRACE_MAX; i++)
 369		INIT_LIST_HEAD(&work->atom_list[i]);
 370
 371	work->id = key->id;
 372	work->cpu = key->cpu;
 373	work->name = key->name;
 374	work->class = key->class;
 375	return work;
 376}
 377
 378static struct kwork_work *work_findnew(struct rb_root_cached *root,
 379				       struct kwork_work *key,
 380				       struct list_head *sort_list)
 381{
 382	struct kwork_work *work = work_search(root, key, sort_list);
 383
 384	if (work != NULL)
 385		return work;
 386
 387	work = work_new(key);
 388	if (work)
 389		work_insert(root, work, sort_list);
 390
 391	return work;
 392}
 393
 394static void profile_update_timespan(struct perf_kwork *kwork,
 395				    struct perf_sample *sample)
 396{
 397	if (!kwork->summary)
 398		return;
 399
 400	if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
 401		kwork->timestart = sample->time;
 402
 403	if (kwork->timeend < sample->time)
 404		kwork->timeend = sample->time;
 405}
 406
 407static bool profile_name_match(struct perf_kwork *kwork,
 408			       struct kwork_work *work)
 409{
 410	if (kwork->profile_name && work->name &&
 411	    (strcmp(work->name, kwork->profile_name) != 0)) {
 412		return false;
 413	}
 414
 415	return true;
 416}
 417
 418static bool profile_event_match(struct perf_kwork *kwork,
 419				struct kwork_work *work,
 420				struct perf_sample *sample)
 421{
 422	int cpu = work->cpu;
 423	u64 time = sample->time;
 424	struct perf_time_interval *ptime = &kwork->ptime;
 425
 426	if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
 427		return false;
 428
 429	if (((ptime->start != 0) && (ptime->start > time)) ||
 430	    ((ptime->end != 0) && (ptime->end < time)))
 431		return false;
 432
 433	/*
 434	 * report top needs to collect the runtime of all tasks to
 435	 * calculate the load of each core.
 436	 */
 437	if ((kwork->report != KWORK_REPORT_TOP) &&
 438	    !profile_name_match(kwork, work)) {
 439		return false;
 440	}
 441
 442	profile_update_timespan(kwork, sample);
 443	return true;
 444}
 445
 446static int work_push_atom(struct perf_kwork *kwork,
 447			  struct kwork_class *class,
 448			  enum kwork_trace_type src_type,
 449			  enum kwork_trace_type dst_type,
 450			  struct evsel *evsel,
 451			  struct perf_sample *sample,
 452			  struct machine *machine,
 453			  struct kwork_work **ret_work,
 454			  bool overwrite)
 455{
 456	struct kwork_atom *atom, *dst_atom, *last_atom;
 457	struct kwork_work *work, key;
 458
 459	BUG_ON(class->work_init == NULL);
 460	class->work_init(kwork, class, &key, src_type, evsel, sample, machine);
 461
 462	atom = atom_new(kwork, sample);
 463	if (atom == NULL)
 464		return -1;
 465
 466	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
 467	if (work == NULL) {
 468		atom_free(atom);
 469		return -1;
 470	}
 471
 472	if (!profile_event_match(kwork, work, sample)) {
 473		atom_free(atom);
 474		return 0;
 475	}
 476
 477	if (dst_type < KWORK_TRACE_MAX) {
 478		dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
 479						   struct kwork_atom, list);
 480		if (dst_atom != NULL) {
 481			atom->prev = dst_atom;
 482			list_del(&dst_atom->list);
 483		}
 484	}
 485
 486	if (ret_work != NULL)
 487		*ret_work = work;
 488
 489	if (overwrite) {
 490		last_atom = list_last_entry_or_null(&work->atom_list[src_type],
 491						    struct kwork_atom, list);
 492		if (last_atom) {
 493			atom_del(last_atom);
 494
 495			kwork->nr_skipped_events[src_type]++;
 496			kwork->nr_skipped_events[KWORK_TRACE_MAX]++;
 497		}
 498	}
 499
 500	list_add_tail(&atom->list, &work->atom_list[src_type]);
 501
 502	return 0;
 503}
 504
 505static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
 506					struct kwork_class *class,
 507					enum kwork_trace_type src_type,
 508					enum kwork_trace_type dst_type,
 509					struct evsel *evsel,
 510					struct perf_sample *sample,
 511					struct machine *machine,
 512					struct kwork_work **ret_work)
 513{
 514	struct kwork_atom *atom, *src_atom;
 515	struct kwork_work *work, key;
 516
 517	BUG_ON(class->work_init == NULL);
 518	class->work_init(kwork, class, &key, src_type, evsel, sample, machine);
 519
 520	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
 521	if (ret_work != NULL)
 522		*ret_work = work;
 523
 524	if (work == NULL)
 525		return NULL;
 526
 527	if (!profile_event_match(kwork, work, sample))
 528		return NULL;
 529
 530	atom = list_last_entry_or_null(&work->atom_list[dst_type],
 531				       struct kwork_atom, list);
 532	if (atom != NULL)
 533		return atom;
 534
 535	src_atom = atom_new(kwork, sample);
 536	if (src_atom != NULL)
 537		list_add_tail(&src_atom->list, &work->atom_list[src_type]);
 538	else {
 539		if (ret_work != NULL)
 540			*ret_work = NULL;
 541	}
 542
 543	return NULL;
 544}
 545
 546static struct kwork_work *find_work_by_id(struct rb_root_cached *root,
 547					  u64 id, int cpu)
 548{
 549	struct rb_node *next;
 550	struct kwork_work *work;
 551
 552	next = rb_first_cached(root);
 553	while (next) {
 554		work = rb_entry(next, struct kwork_work, node);
 555		if ((cpu != -1 && work->id == id && work->cpu == cpu) ||
 556		    (cpu == -1 && work->id == id))
 557			return work;
 558
 559		next = rb_next(next);
 560	}
 561
 562	return NULL;
 563}
 564
 565static struct kwork_class *get_kwork_class(struct perf_kwork *kwork,
 566					   enum kwork_class_type type)
 567{
 568	struct kwork_class *class;
 569
 570	list_for_each_entry(class, &kwork->class_list, list) {
 571		if (class->type == type)
 572			return class;
 573	}
 574
 575	return NULL;
 576}
 577
 578static void report_update_exit_event(struct kwork_work *work,
 579				     struct kwork_atom *atom,
 580				     struct perf_sample *sample)
 581{
 582	u64 delta;
 583	u64 exit_time = sample->time;
 584	u64 entry_time = atom->time;
 585
 586	if ((entry_time != 0) && (exit_time >= entry_time)) {
 587		delta = exit_time - entry_time;
 588		if ((delta > work->max_runtime) ||
 589		    (work->max_runtime == 0)) {
 590			work->max_runtime = delta;
 591			work->max_runtime_start = entry_time;
 592			work->max_runtime_end = exit_time;
 593		}
 594		work->total_runtime += delta;
 595		work->nr_atoms++;
 596	}
 597}
 598
 599static int report_entry_event(struct perf_kwork *kwork,
 600			      struct kwork_class *class,
 601			      struct evsel *evsel,
 602			      struct perf_sample *sample,
 603			      struct machine *machine)
 604{
 605	return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 606			      KWORK_TRACE_MAX, evsel, sample,
 607			      machine, NULL, true);
 608}
 609
 610static int report_exit_event(struct perf_kwork *kwork,
 611			     struct kwork_class *class,
 612			     struct evsel *evsel,
 613			     struct perf_sample *sample,
 614			     struct machine *machine)
 615{
 616	struct kwork_atom *atom = NULL;
 617	struct kwork_work *work = NULL;
 618
 619	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 620			     KWORK_TRACE_ENTRY, evsel, sample,
 621			     machine, &work);
 622	if (work == NULL)
 623		return -1;
 624
 625	if (atom != NULL) {
 626		report_update_exit_event(work, atom, sample);
 627		atom_del(atom);
 628	}
 629
 630	return 0;
 631}
 632
 633static void latency_update_entry_event(struct kwork_work *work,
 634				       struct kwork_atom *atom,
 635				       struct perf_sample *sample)
 636{
 637	u64 delta;
 638	u64 entry_time = sample->time;
 639	u64 raise_time = atom->time;
 640
 641	if ((raise_time != 0) && (entry_time >= raise_time)) {
 642		delta = entry_time - raise_time;
 643		if ((delta > work->max_latency) ||
 644		    (work->max_latency == 0)) {
 645			work->max_latency = delta;
 646			work->max_latency_start = raise_time;
 647			work->max_latency_end = entry_time;
 648		}
 649		work->total_latency += delta;
 650		work->nr_atoms++;
 651	}
 652}
 653
 654static int latency_raise_event(struct perf_kwork *kwork,
 655			       struct kwork_class *class,
 656			       struct evsel *evsel,
 657			       struct perf_sample *sample,
 658			       struct machine *machine)
 659{
 660	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
 661			      KWORK_TRACE_MAX, evsel, sample,
 662			      machine, NULL, true);
 663}
 664
 665static int latency_entry_event(struct perf_kwork *kwork,
 666			       struct kwork_class *class,
 667			       struct evsel *evsel,
 668			       struct perf_sample *sample,
 669			       struct machine *machine)
 670{
 671	struct kwork_atom *atom = NULL;
 672	struct kwork_work *work = NULL;
 673
 674	atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
 675			     KWORK_TRACE_RAISE, evsel, sample,
 676			     machine, &work);
 677	if (work == NULL)
 678		return -1;
 679
 680	if (atom != NULL) {
 681		latency_update_entry_event(work, atom, sample);
 682		atom_del(atom);
 683	}
 684
 685	return 0;
 686}
 687
 688static void timehist_save_callchain(struct perf_kwork *kwork,
 689				    struct perf_sample *sample,
 690				    struct evsel *evsel,
 691				    struct machine *machine)
 692{
 693	struct symbol *sym;
 694	struct thread *thread;
 695	struct callchain_cursor_node *node;
 696	struct callchain_cursor *cursor;
 697
 698	if (!kwork->show_callchain || sample->callchain == NULL)
 699		return;
 700
 701	/* want main thread for process - has maps */
 702	thread = machine__findnew_thread(machine, sample->pid, sample->pid);
 703	if (thread == NULL) {
 704		pr_debug("Failed to get thread for pid %d\n", sample->pid);
 705		return;
 706	}
 707
 708	cursor = get_tls_callchain_cursor();
 709
 710	if (thread__resolve_callchain(thread, cursor, evsel, sample,
 711				      NULL, NULL, kwork->max_stack + 2) != 0) {
 712		pr_debug("Failed to resolve callchain, skipping\n");
 713		goto out_put;
 714	}
 715
 716	callchain_cursor_commit(cursor);
 717
 718	while (true) {
 719		node = callchain_cursor_current(cursor);
 720		if (node == NULL)
 721			break;
 722
 723		sym = node->ms.sym;
 724		if (sym) {
 725			if (!strcmp(sym->name, "__softirqentry_text_start") ||
 726			    !strcmp(sym->name, "__do_softirq"))
 727				sym->ignore = 1;
 728		}
 729
 730		callchain_cursor_advance(cursor);
 731	}
 732
 733out_put:
 734	thread__put(thread);
 735}
 736
 737static void timehist_print_event(struct perf_kwork *kwork,
 738				 struct kwork_work *work,
 739				 struct kwork_atom *atom,
 740				 struct perf_sample *sample,
 741				 struct addr_location *al)
 742{
 743	char entrytime[32], exittime[32];
 744	char kwork_name[PRINT_KWORK_NAME_WIDTH];
 745
 746	/*
 747	 * runtime start
 748	 */
 749	timestamp__scnprintf_usec(atom->time,
 750				  entrytime, sizeof(entrytime));
 751	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime);
 752
 753	/*
 754	 * runtime end
 755	 */
 756	timestamp__scnprintf_usec(sample->time,
 757				  exittime, sizeof(exittime));
 758	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime);
 759
 760	/*
 761	 * cpu
 762	 */
 763	printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
 764
 765	/*
 766	 * kwork name
 767	 */
 768	if (work->class && work->class->work_name) {
 769		work->class->work_name(work, kwork_name,
 770				       PRINT_KWORK_NAME_WIDTH);
 771		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name);
 772	} else
 773		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, "");
 774
 775	/*
 776	 *runtime
 777	 */
 778	printf(" %*.*f ",
 779	       PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
 780	       (double)(sample->time - atom->time) / NSEC_PER_MSEC);
 781
 782	/*
 783	 * delaytime
 784	 */
 785	if (atom->prev != NULL)
 786		printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
 787		       (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC);
 788	else
 789		printf(" %*s ", PRINT_LATENCY_WIDTH, " ");
 790
 791	/*
 792	 * callchain
 793	 */
 794	if (kwork->show_callchain) {
 795		struct callchain_cursor *cursor = get_tls_callchain_cursor();
 796
 797		if (cursor == NULL)
 798			return;
 799
 800		printf(" ");
 801
 802		sample__fprintf_sym(sample, al, 0,
 803				    EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
 804				    EVSEL__PRINT_CALLCHAIN_ARROW |
 805				    EVSEL__PRINT_SKIP_IGNORED,
 806				    cursor, symbol_conf.bt_stop_list,
 807				    stdout);
 808	}
 809
 810	printf("\n");
 811}
 812
 813static int timehist_raise_event(struct perf_kwork *kwork,
 814				struct kwork_class *class,
 815				struct evsel *evsel,
 816				struct perf_sample *sample,
 817				struct machine *machine)
 818{
 819	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
 820			      KWORK_TRACE_MAX, evsel, sample,
 821			      machine, NULL, true);
 822}
 823
 824static int timehist_entry_event(struct perf_kwork *kwork,
 825				struct kwork_class *class,
 826				struct evsel *evsel,
 827				struct perf_sample *sample,
 828				struct machine *machine)
 829{
 830	int ret;
 831	struct kwork_work *work = NULL;
 832
 833	ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 834			     KWORK_TRACE_RAISE, evsel, sample,
 835			     machine, &work, true);
 836	if (ret)
 837		return ret;
 838
 839	if (work != NULL)
 840		timehist_save_callchain(kwork, sample, evsel, machine);
 841
 842	return 0;
 843}
 844
 845static int timehist_exit_event(struct perf_kwork *kwork,
 846			       struct kwork_class *class,
 847			       struct evsel *evsel,
 848			       struct perf_sample *sample,
 849			       struct machine *machine)
 850{
 851	struct kwork_atom *atom = NULL;
 852	struct kwork_work *work = NULL;
 853	struct addr_location al;
 854	int ret = 0;
 855
 856	addr_location__init(&al);
 857	if (machine__resolve(machine, &al, sample) < 0) {
 858		pr_debug("Problem processing event, skipping it\n");
 859		ret = -1;
 860		goto out;
 861	}
 862
 863	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 864			     KWORK_TRACE_ENTRY, evsel, sample,
 865			     machine, &work);
 866	if (work == NULL) {
 867		ret = -1;
 868		goto out;
 869	}
 870
 871	if (atom != NULL) {
 872		work->nr_atoms++;
 873		timehist_print_event(kwork, work, atom, sample, &al);
 874		atom_del(atom);
 875	}
 876
 877out:
 878	addr_location__exit(&al);
 879	return ret;
 880}
 881
 882static void top_update_runtime(struct kwork_work *work,
 883			       struct kwork_atom *atom,
 884			       struct perf_sample *sample)
 885{
 886	u64 delta;
 887	u64 exit_time = sample->time;
 888	u64 entry_time = atom->time;
 889
 890	if ((entry_time != 0) && (exit_time >= entry_time)) {
 891		delta = exit_time - entry_time;
 892		work->total_runtime += delta;
 893	}
 894}
 895
 896static int top_entry_event(struct perf_kwork *kwork,
 897			   struct kwork_class *class,
 898			   struct evsel *evsel,
 899			   struct perf_sample *sample,
 900			   struct machine *machine)
 901{
 902	return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 903			      KWORK_TRACE_MAX, evsel, sample,
 904			      machine, NULL, true);
 905}
 906
 907static int top_exit_event(struct perf_kwork *kwork,
 908			  struct kwork_class *class,
 909			  struct evsel *evsel,
 910			  struct perf_sample *sample,
 911			  struct machine *machine)
 912{
 913	struct kwork_work *work, *sched_work;
 914	struct kwork_class *sched_class;
 915	struct kwork_atom *atom;
 916
 917	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 918			     KWORK_TRACE_ENTRY, evsel, sample,
 919			     machine, &work);
 920	if (!work)
 921		return -1;
 922
 923	if (atom) {
 924		sched_class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
 925		if (sched_class) {
 926			sched_work = find_work_by_id(&sched_class->work_root,
 927						     work->id, work->cpu);
 928			if (sched_work)
 929				top_update_runtime(work, atom, sample);
 930		}
 931		atom_del(atom);
 932	}
 933
 934	return 0;
 935}
 936
 937static int top_sched_switch_event(struct perf_kwork *kwork,
 938				  struct kwork_class *class,
 939				  struct evsel *evsel,
 940				  struct perf_sample *sample,
 941				  struct machine *machine)
 942{
 943	struct kwork_atom *atom;
 944	struct kwork_work *work;
 945
 946	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 947			     KWORK_TRACE_ENTRY, evsel, sample,
 948			     machine, &work);
 949	if (!work)
 950		return -1;
 951
 952	if (atom) {
 953		top_update_runtime(work, atom, sample);
 954		atom_del(atom);
 955	}
 956
 957	return top_entry_event(kwork, class, evsel, sample, machine);
 958}
 959
 960static struct kwork_class kwork_irq;
 961static int process_irq_handler_entry_event(struct perf_tool *tool,
 962					   struct evsel *evsel,
 963					   struct perf_sample *sample,
 964					   struct machine *machine)
 965{
 966	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 967
 968	if (kwork->tp_handler->entry_event)
 969		return kwork->tp_handler->entry_event(kwork, &kwork_irq,
 970						      evsel, sample, machine);
 971	return 0;
 972}
 973
 974static int process_irq_handler_exit_event(struct perf_tool *tool,
 975					  struct evsel *evsel,
 976					  struct perf_sample *sample,
 977					  struct machine *machine)
 978{
 979	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 980
 981	if (kwork->tp_handler->exit_event)
 982		return kwork->tp_handler->exit_event(kwork, &kwork_irq,
 983						     evsel, sample, machine);
 984	return 0;
 985}
 986
 987const struct evsel_str_handler irq_tp_handlers[] = {
 988	{ "irq:irq_handler_entry", process_irq_handler_entry_event, },
 989	{ "irq:irq_handler_exit",  process_irq_handler_exit_event,  },
 990};
 991
 992static int irq_class_init(struct kwork_class *class,
 993			  struct perf_session *session)
 994{
 995	if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
 996		pr_err("Failed to set irq tracepoints handlers\n");
 997		return -1;
 998	}
 999
1000	class->work_root = RB_ROOT_CACHED;
1001	return 0;
1002}
1003
1004static void irq_work_init(struct perf_kwork *kwork,
1005			  struct kwork_class *class,
1006			  struct kwork_work *work,
1007			  enum kwork_trace_type src_type __maybe_unused,
1008			  struct evsel *evsel,
1009			  struct perf_sample *sample,
1010			  struct machine *machine __maybe_unused)
1011{
1012	work->class = class;
1013	work->cpu = sample->cpu;
1014
1015	if (kwork->report == KWORK_REPORT_TOP) {
1016		work->id = evsel__intval_common(evsel, sample, "common_pid");
1017		work->name = NULL;
1018	} else {
1019		work->id = evsel__intval(evsel, sample, "irq");
1020		work->name = evsel__strval(evsel, sample, "name");
1021	}
1022}
1023
1024static void irq_work_name(struct kwork_work *work, char *buf, int len)
1025{
1026	snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
1027}
1028
1029static struct kwork_class kwork_irq = {
1030	.name           = "irq",
1031	.type           = KWORK_CLASS_IRQ,
1032	.nr_tracepoints = 2,
1033	.tp_handlers    = irq_tp_handlers,
1034	.class_init     = irq_class_init,
1035	.work_init      = irq_work_init,
1036	.work_name      = irq_work_name,
1037};
1038
1039static struct kwork_class kwork_softirq;
1040static int process_softirq_raise_event(struct perf_tool *tool,
1041				       struct evsel *evsel,
1042				       struct perf_sample *sample,
1043				       struct machine *machine)
1044{
1045	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1046
1047	if (kwork->tp_handler->raise_event)
1048		return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
1049						      evsel, sample, machine);
1050
1051	return 0;
1052}
1053
1054static int process_softirq_entry_event(struct perf_tool *tool,
1055				       struct evsel *evsel,
1056				       struct perf_sample *sample,
1057				       struct machine *machine)
1058{
1059	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1060
1061	if (kwork->tp_handler->entry_event)
1062		return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
1063						      evsel, sample, machine);
1064
1065	return 0;
1066}
1067
1068static int process_softirq_exit_event(struct perf_tool *tool,
1069				      struct evsel *evsel,
1070				      struct perf_sample *sample,
1071				      struct machine *machine)
1072{
1073	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1074
1075	if (kwork->tp_handler->exit_event)
1076		return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
1077						     evsel, sample, machine);
1078
1079	return 0;
1080}
1081
1082const struct evsel_str_handler softirq_tp_handlers[] = {
1083	{ "irq:softirq_raise", process_softirq_raise_event, },
1084	{ "irq:softirq_entry", process_softirq_entry_event, },
1085	{ "irq:softirq_exit",  process_softirq_exit_event,  },
1086};
1087
1088static int softirq_class_init(struct kwork_class *class,
1089			      struct perf_session *session)
1090{
1091	if (perf_session__set_tracepoints_handlers(session,
1092						   softirq_tp_handlers)) {
1093		pr_err("Failed to set softirq tracepoints handlers\n");
1094		return -1;
1095	}
1096
1097	class->work_root = RB_ROOT_CACHED;
1098	return 0;
1099}
1100
1101static char *evsel__softirq_name(struct evsel *evsel, u64 num)
1102{
1103	char *name = NULL;
1104	bool found = false;
1105	struct tep_print_flag_sym *sym = NULL;
1106	struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
1107
1108	if ((args == NULL) || (args->next == NULL))
1109		return NULL;
1110
1111	/* skip softirq field: "REC->vec" */
1112	for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
1113		if ((eval_flag(sym->value) == (unsigned long long)num) &&
1114		    (strlen(sym->str) != 0)) {
1115			found = true;
1116			break;
1117		}
1118	}
1119
1120	if (!found)
1121		return NULL;
1122
1123	name = strdup(sym->str);
1124	if (name == NULL) {
1125		pr_err("Failed to copy symbol name\n");
1126		return NULL;
1127	}
1128	return name;
1129}
1130
1131static void softirq_work_init(struct perf_kwork *kwork,
1132			      struct kwork_class *class,
1133			      struct kwork_work *work,
1134			      enum kwork_trace_type src_type __maybe_unused,
1135			      struct evsel *evsel,
1136			      struct perf_sample *sample,
1137			      struct machine *machine __maybe_unused)
1138{
1139	u64 num;
1140
1141	work->class = class;
1142	work->cpu = sample->cpu;
1143
1144	if (kwork->report == KWORK_REPORT_TOP) {
1145		work->id = evsel__intval_common(evsel, sample, "common_pid");
1146		work->name = NULL;
1147	} else {
1148		num = evsel__intval(evsel, sample, "vec");
1149		work->id = num;
1150		work->name = evsel__softirq_name(evsel, num);
1151	}
1152}
1153
1154static void softirq_work_name(struct kwork_work *work, char *buf, int len)
1155{
1156	snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
1157}
1158
1159static struct kwork_class kwork_softirq = {
1160	.name           = "softirq",
1161	.type           = KWORK_CLASS_SOFTIRQ,
1162	.nr_tracepoints = 3,
1163	.tp_handlers    = softirq_tp_handlers,
1164	.class_init     = softirq_class_init,
1165	.work_init      = softirq_work_init,
1166	.work_name      = softirq_work_name,
1167};
1168
1169static struct kwork_class kwork_workqueue;
1170static int process_workqueue_activate_work_event(struct perf_tool *tool,
1171						 struct evsel *evsel,
1172						 struct perf_sample *sample,
1173						 struct machine *machine)
1174{
1175	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1176
1177	if (kwork->tp_handler->raise_event)
1178		return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
1179						    evsel, sample, machine);
1180
1181	return 0;
1182}
1183
1184static int process_workqueue_execute_start_event(struct perf_tool *tool,
1185						 struct evsel *evsel,
1186						 struct perf_sample *sample,
1187						 struct machine *machine)
1188{
1189	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1190
1191	if (kwork->tp_handler->entry_event)
1192		return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
1193						    evsel, sample, machine);
1194
1195	return 0;
1196}
1197
1198static int process_workqueue_execute_end_event(struct perf_tool *tool,
1199					       struct evsel *evsel,
1200					       struct perf_sample *sample,
1201					       struct machine *machine)
1202{
1203	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1204
1205	if (kwork->tp_handler->exit_event)
1206		return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
1207						   evsel, sample, machine);
1208
1209	return 0;
1210}
1211
1212const struct evsel_str_handler workqueue_tp_handlers[] = {
1213	{ "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
1214	{ "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
1215	{ "workqueue:workqueue_execute_end",   process_workqueue_execute_end_event,   },
1216};
1217
1218static int workqueue_class_init(struct kwork_class *class,
1219				struct perf_session *session)
1220{
1221	if (perf_session__set_tracepoints_handlers(session,
1222						   workqueue_tp_handlers)) {
1223		pr_err("Failed to set workqueue tracepoints handlers\n");
1224		return -1;
1225	}
1226
1227	class->work_root = RB_ROOT_CACHED;
1228	return 0;
1229}
1230
1231static void workqueue_work_init(struct perf_kwork *kwork __maybe_unused,
1232				struct kwork_class *class,
1233				struct kwork_work *work,
1234				enum kwork_trace_type src_type __maybe_unused,
1235				struct evsel *evsel,
1236				struct perf_sample *sample,
1237				struct machine *machine)
1238{
1239	char *modp = NULL;
1240	unsigned long long function_addr = evsel__intval(evsel,
1241							 sample, "function");
1242
1243	work->class = class;
1244	work->cpu = sample->cpu;
1245	work->id = evsel__intval(evsel, sample, "work");
1246	work->name = function_addr == 0 ? NULL :
1247		machine__resolve_kernel_addr(machine, &function_addr, &modp);
1248}
1249
1250static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
1251{
1252	if (work->name != NULL)
1253		snprintf(buf, len, "(w)%s", work->name);
1254	else
1255		snprintf(buf, len, "(w)0x%" PRIx64, work->id);
1256}
1257
1258static struct kwork_class kwork_workqueue = {
1259	.name           = "workqueue",
1260	.type           = KWORK_CLASS_WORKQUEUE,
1261	.nr_tracepoints = 3,
1262	.tp_handlers    = workqueue_tp_handlers,
1263	.class_init     = workqueue_class_init,
1264	.work_init      = workqueue_work_init,
1265	.work_name      = workqueue_work_name,
1266};
1267
1268static struct kwork_class kwork_sched;
1269static int process_sched_switch_event(struct perf_tool *tool,
1270				      struct evsel *evsel,
1271				      struct perf_sample *sample,
1272				      struct machine *machine)
1273{
1274	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1275
1276	if (kwork->tp_handler->sched_switch_event)
1277		return kwork->tp_handler->sched_switch_event(kwork, &kwork_sched,
1278							     evsel, sample, machine);
1279	return 0;
1280}
1281
1282const struct evsel_str_handler sched_tp_handlers[] = {
1283	{ "sched:sched_switch",  process_sched_switch_event, },
1284};
1285
1286static int sched_class_init(struct kwork_class *class,
1287			    struct perf_session *session)
1288{
1289	if (perf_session__set_tracepoints_handlers(session,
1290						   sched_tp_handlers)) {
1291		pr_err("Failed to set sched tracepoints handlers\n");
1292		return -1;
1293	}
1294
1295	class->work_root = RB_ROOT_CACHED;
1296	return 0;
1297}
1298
1299static void sched_work_init(struct perf_kwork *kwork __maybe_unused,
1300			    struct kwork_class *class,
1301			    struct kwork_work *work,
1302			    enum kwork_trace_type src_type,
1303			    struct evsel *evsel,
1304			    struct perf_sample *sample,
1305			    struct machine *machine __maybe_unused)
1306{
1307	work->class = class;
1308	work->cpu = sample->cpu;
1309
1310	if (src_type == KWORK_TRACE_EXIT) {
1311		work->id = evsel__intval(evsel, sample, "prev_pid");
1312		work->name = strdup(evsel__strval(evsel, sample, "prev_comm"));
1313	} else if (src_type == KWORK_TRACE_ENTRY) {
1314		work->id = evsel__intval(evsel, sample, "next_pid");
1315		work->name = strdup(evsel__strval(evsel, sample, "next_comm"));
1316	}
1317}
1318
1319static void sched_work_name(struct kwork_work *work, char *buf, int len)
1320{
1321	snprintf(buf, len, "%s", work->name);
1322}
1323
1324static struct kwork_class kwork_sched = {
1325	.name		= "sched",
1326	.type		= KWORK_CLASS_SCHED,
1327	.nr_tracepoints	= ARRAY_SIZE(sched_tp_handlers),
1328	.tp_handlers	= sched_tp_handlers,
1329	.class_init	= sched_class_init,
1330	.work_init	= sched_work_init,
1331	.work_name	= sched_work_name,
1332};
1333
1334static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
1335	[KWORK_CLASS_IRQ]       = &kwork_irq,
1336	[KWORK_CLASS_SOFTIRQ]   = &kwork_softirq,
1337	[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
1338	[KWORK_CLASS_SCHED]     = &kwork_sched,
1339};
1340
1341static void print_separator(int len)
1342{
1343	printf(" %.*s\n", len, graph_dotted_line);
1344}
1345
1346static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
1347{
1348	int ret = 0;
1349	char kwork_name[PRINT_KWORK_NAME_WIDTH];
1350	char max_runtime_start[32], max_runtime_end[32];
1351	char max_latency_start[32], max_latency_end[32];
1352
1353	printf(" ");
1354
1355	/*
1356	 * kwork name
1357	 */
1358	if (work->class && work->class->work_name) {
1359		work->class->work_name(work, kwork_name,
1360				       PRINT_KWORK_NAME_WIDTH);
1361		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
1362	} else {
1363		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
1364	}
1365
1366	/*
1367	 * cpu
1368	 */
1369	ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
1370
1371	/*
1372	 * total runtime
1373	 */
1374	if (kwork->report == KWORK_REPORT_RUNTIME) {
1375		ret += printf(" %*.*f ms |",
1376			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1377			      (double)work->total_runtime / NSEC_PER_MSEC);
1378	} else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
1379		ret += printf(" %*.*f ms |",
1380			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1381			      (double)work->total_latency /
1382			      work->nr_atoms / NSEC_PER_MSEC);
1383	}
1384
1385	/*
1386	 * count
1387	 */
1388	ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
1389
1390	/*
1391	 * max runtime, max runtime start, max runtime end
1392	 */
1393	if (kwork->report == KWORK_REPORT_RUNTIME) {
1394		timestamp__scnprintf_usec(work->max_runtime_start,
1395					  max_runtime_start,
1396					  sizeof(max_runtime_start));
1397		timestamp__scnprintf_usec(work->max_runtime_end,
1398					  max_runtime_end,
1399					  sizeof(max_runtime_end));
1400		ret += printf(" %*.*f ms | %*s s | %*s s |",
1401			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1402			      (double)work->max_runtime / NSEC_PER_MSEC,
1403			      PRINT_TIMESTAMP_WIDTH, max_runtime_start,
1404			      PRINT_TIMESTAMP_WIDTH, max_runtime_end);
1405	}
1406	/*
1407	 * max delay, max delay start, max delay end
1408	 */
1409	else if (kwork->report == KWORK_REPORT_LATENCY) {
1410		timestamp__scnprintf_usec(work->max_latency_start,
1411					  max_latency_start,
1412					  sizeof(max_latency_start));
1413		timestamp__scnprintf_usec(work->max_latency_end,
1414					  max_latency_end,
1415					  sizeof(max_latency_end));
1416		ret += printf(" %*.*f ms | %*s s | %*s s |",
1417			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1418			      (double)work->max_latency / NSEC_PER_MSEC,
1419			      PRINT_TIMESTAMP_WIDTH, max_latency_start,
1420			      PRINT_TIMESTAMP_WIDTH, max_latency_end);
1421	}
1422
1423	printf("\n");
1424	return ret;
1425}
1426
1427static int report_print_header(struct perf_kwork *kwork)
1428{
1429	int ret;
1430
1431	printf("\n ");
1432	ret = printf(" %-*s | %-*s |",
1433		     PRINT_KWORK_NAME_WIDTH, "Kwork Name",
1434		     PRINT_CPU_WIDTH, "Cpu");
1435
1436	if (kwork->report == KWORK_REPORT_RUNTIME) {
1437		ret += printf(" %-*s |",
1438			      PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
1439	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1440		ret += printf(" %-*s |",
1441			      PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
1442	}
1443
1444	ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
1445
1446	if (kwork->report == KWORK_REPORT_RUNTIME) {
1447		ret += printf(" %-*s | %-*s | %-*s |",
1448			      PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
1449			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
1450			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
1451	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1452		ret += printf(" %-*s | %-*s | %-*s |",
1453			      PRINT_LATENCY_HEADER_WIDTH, "Max delay",
1454			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
1455			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
1456	}
1457
1458	printf("\n");
1459	print_separator(ret);
1460	return ret;
1461}
1462
1463static void timehist_print_header(void)
1464{
1465	/*
1466	 * header row
1467	 */
1468	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1469	       PRINT_TIMESTAMP_WIDTH, "Runtime start",
1470	       PRINT_TIMESTAMP_WIDTH, "Runtime end",
1471	       PRINT_TIMEHIST_CPU_WIDTH, "Cpu",
1472	       PRINT_KWORK_NAME_WIDTH, "Kwork name",
1473	       PRINT_RUNTIME_WIDTH, "Runtime",
1474	       PRINT_RUNTIME_WIDTH, "Delaytime");
1475
1476	/*
1477	 * units row
1478	 */
1479	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1480	       PRINT_TIMESTAMP_WIDTH, "",
1481	       PRINT_TIMESTAMP_WIDTH, "",
1482	       PRINT_TIMEHIST_CPU_WIDTH, "",
1483	       PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM",
1484	       PRINT_RUNTIME_WIDTH, "(msec)",
1485	       PRINT_RUNTIME_WIDTH, "(msec)");
1486
1487	/*
1488	 * separator
1489	 */
1490	printf(" %.*s  %.*s  %.*s  %.*s  %.*s  %.*s\n",
1491	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1492	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1493	       PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line,
1494	       PRINT_KWORK_NAME_WIDTH, graph_dotted_line,
1495	       PRINT_RUNTIME_WIDTH, graph_dotted_line,
1496	       PRINT_RUNTIME_WIDTH, graph_dotted_line);
1497}
1498
1499static void print_summary(struct perf_kwork *kwork)
1500{
1501	u64 time = kwork->timeend - kwork->timestart;
1502
1503	printf("  Total count            : %9" PRIu64 "\n", kwork->all_count);
1504	printf("  Total runtime   (msec) : %9.3f (%.3f%% load average)\n",
1505	       (double)kwork->all_runtime / NSEC_PER_MSEC,
1506	       time == 0 ? 0 : (double)kwork->all_runtime / time);
1507	printf("  Total time span (msec) : %9.3f\n",
1508	       (double)time / NSEC_PER_MSEC);
1509}
1510
1511static unsigned long long nr_list_entry(struct list_head *head)
1512{
1513	struct list_head *pos;
1514	unsigned long long n = 0;
1515
1516	list_for_each(pos, head)
1517		n++;
1518
1519	return n;
1520}
1521
1522static void print_skipped_events(struct perf_kwork *kwork)
1523{
1524	int i;
1525	const char *const kwork_event_str[] = {
1526		[KWORK_TRACE_RAISE] = "raise",
1527		[KWORK_TRACE_ENTRY] = "entry",
1528		[KWORK_TRACE_EXIT]  = "exit",
1529	};
1530
1531	if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
1532	    (kwork->nr_events != 0)) {
1533		printf("  INFO: %.3f%% skipped events (%" PRIu64 " including ",
1534		       (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
1535		       (double)kwork->nr_events * 100.0,
1536		       kwork->nr_skipped_events[KWORK_TRACE_MAX]);
1537
1538		for (i = 0; i < KWORK_TRACE_MAX; i++) {
1539			printf("%" PRIu64 " %s%s",
1540			       kwork->nr_skipped_events[i],
1541			       kwork_event_str[i],
1542			       (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
1543		}
1544	}
1545
1546	if (verbose > 0)
1547		printf("  INFO: use %lld atom pages\n",
1548		       nr_list_entry(&kwork->atom_page_list));
1549}
1550
1551static void print_bad_events(struct perf_kwork *kwork)
1552{
1553	if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
1554		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1555		       (double)kwork->nr_lost_events /
1556		       (double)kwork->nr_events * 100.0,
1557		       kwork->nr_lost_events, kwork->nr_events,
1558		       kwork->nr_lost_chunks);
1559	}
1560}
1561
1562const char *graph_load = "||||||||||||||||||||||||||||||||||||||||||||||||";
1563const char *graph_idle = "                                                ";
1564static void top_print_per_cpu_load(struct perf_kwork *kwork)
1565{
1566	int i, load_width;
1567	u64 total, load, load_ratio;
1568	struct kwork_top_stat *stat = &kwork->top_stat;
1569
1570	for (i = 0; i < MAX_NR_CPUS; i++) {
1571		total = stat->cpus_runtime[i].total;
1572		load = stat->cpus_runtime[i].load;
1573		if (test_bit(i, stat->all_cpus_bitmap) && total) {
1574			load_ratio = load * 10000 / total;
1575			load_width = PRINT_CPU_USAGE_HIST_WIDTH *
1576				load_ratio / 10000;
1577
1578			printf("%%Cpu%-*d[%.*s%.*s %*.*f%%]\n",
1579			       PRINT_CPU_WIDTH, i,
1580			       load_width, graph_load,
1581			       PRINT_CPU_USAGE_HIST_WIDTH - load_width,
1582			       graph_idle,
1583			       PRINT_CPU_USAGE_WIDTH,
1584			       PRINT_CPU_USAGE_DECIMAL_WIDTH,
1585			       (double)load_ratio / 100);
1586		}
1587	}
1588}
1589
1590static void top_print_cpu_usage(struct perf_kwork *kwork)
1591{
1592	struct kwork_top_stat *stat = &kwork->top_stat;
1593	u64 idle_time = stat->cpus_runtime[MAX_NR_CPUS].idle;
1594	u64 hardirq_time = stat->cpus_runtime[MAX_NR_CPUS].irq;
1595	u64 softirq_time = stat->cpus_runtime[MAX_NR_CPUS].softirq;
1596	int cpus_nr = bitmap_weight(stat->all_cpus_bitmap, MAX_NR_CPUS);
1597	u64 cpus_total_time = stat->cpus_runtime[MAX_NR_CPUS].total;
1598
1599	printf("Total  : %*.*f ms, %d cpus\n",
1600	       PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1601	       (double)cpus_total_time / NSEC_PER_MSEC,
1602	       cpus_nr);
1603
1604	printf("%%Cpu(s): %*.*f%% id, %*.*f%% hi, %*.*f%% si\n",
1605	       PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
1606	       cpus_total_time ? (double)idle_time * 100 / cpus_total_time : 0,
1607
1608	       PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
1609	       cpus_total_time ? (double)hardirq_time * 100 / cpus_total_time : 0,
1610
1611	       PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
1612	       cpus_total_time ? (double)softirq_time * 100 / cpus_total_time : 0);
1613
1614	top_print_per_cpu_load(kwork);
1615}
1616
1617static void top_print_header(struct perf_kwork *kwork __maybe_unused)
1618{
1619	int ret;
1620
1621	printf("\n ");
1622	ret = printf(" %*s %s%*s%s %*s  %*s  %-*s",
1623		     PRINT_PID_WIDTH, "PID",
1624
1625		     kwork->use_bpf ? " " : "",
1626		     kwork->use_bpf ? PRINT_PID_WIDTH : 0,
1627		     kwork->use_bpf ? "SPID" : "",
1628		     kwork->use_bpf ? " " : "",
1629
1630		     PRINT_CPU_USAGE_WIDTH, "%CPU",
1631		     PRINT_RUNTIME_HEADER_WIDTH + RPINT_DECIMAL_WIDTH, "RUNTIME",
1632		     PRINT_TASK_NAME_WIDTH, "COMMAND");
1633	printf("\n ");
1634	print_separator(ret);
1635}
1636
1637static int top_print_work(struct perf_kwork *kwork __maybe_unused, struct kwork_work *work)
1638{
1639	int ret = 0;
1640
1641	printf(" ");
1642
1643	/*
1644	 * pid
1645	 */
1646	ret += printf(" %*" PRIu64 " ", PRINT_PID_WIDTH, work->id);
1647
1648	/*
1649	 * tgid
1650	 */
1651	if (kwork->use_bpf)
1652		ret += printf(" %*d ", PRINT_PID_WIDTH, work->tgid);
1653
1654	/*
1655	 * cpu usage
1656	 */
1657	ret += printf(" %*.*f ",
1658		      PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
1659		      (double)work->cpu_usage / 100);
1660
1661	/*
1662	 * total runtime
1663	 */
1664	ret += printf(" %*.*f ms ",
1665		      PRINT_RUNTIME_WIDTH + RPINT_DECIMAL_WIDTH, RPINT_DECIMAL_WIDTH,
1666		      (double)work->total_runtime / NSEC_PER_MSEC);
1667
1668	/*
1669	 * command
1670	 */
1671	if (kwork->use_bpf)
1672		ret += printf(" %s%s%s",
1673			      work->is_kthread ? "[" : "",
1674			      work->name,
1675			      work->is_kthread ? "]" : "");
1676	else
1677		ret += printf(" %-*s", PRINT_TASK_NAME_WIDTH, work->name);
1678
1679	printf("\n");
1680	return ret;
1681}
1682
1683static void work_sort(struct perf_kwork *kwork,
1684		      struct kwork_class *class, struct rb_root_cached *root)
1685{
1686	struct rb_node *node;
1687	struct kwork_work *data;
1688
1689	pr_debug("Sorting %s ...\n", class->name);
1690	for (;;) {
1691		node = rb_first_cached(root);
1692		if (!node)
1693			break;
1694
1695		rb_erase_cached(node, root);
1696		data = rb_entry(node, struct kwork_work, node);
1697		work_insert(&kwork->sorted_work_root,
1698			       data, &kwork->sort_list);
1699	}
1700}
1701
1702static void perf_kwork__sort(struct perf_kwork *kwork)
1703{
1704	struct kwork_class *class;
1705
1706	list_for_each_entry(class, &kwork->class_list, list)
1707		work_sort(kwork, class, &class->work_root);
1708}
1709
1710static int perf_kwork__check_config(struct perf_kwork *kwork,
1711				    struct perf_session *session)
1712{
1713	int ret;
1714	struct evsel *evsel;
1715	struct kwork_class *class;
1716
1717	static struct trace_kwork_handler report_ops = {
1718		.entry_event = report_entry_event,
1719		.exit_event  = report_exit_event,
1720	};
1721	static struct trace_kwork_handler latency_ops = {
1722		.raise_event = latency_raise_event,
1723		.entry_event = latency_entry_event,
1724	};
1725	static struct trace_kwork_handler timehist_ops = {
1726		.raise_event = timehist_raise_event,
1727		.entry_event = timehist_entry_event,
1728		.exit_event  = timehist_exit_event,
1729	};
1730	static struct trace_kwork_handler top_ops = {
1731		.entry_event        = timehist_entry_event,
1732		.exit_event         = top_exit_event,
1733		.sched_switch_event = top_sched_switch_event,
1734	};
1735
1736	switch (kwork->report) {
1737	case KWORK_REPORT_RUNTIME:
1738		kwork->tp_handler = &report_ops;
1739		break;
1740	case KWORK_REPORT_LATENCY:
1741		kwork->tp_handler = &latency_ops;
1742		break;
1743	case KWORK_REPORT_TIMEHIST:
1744		kwork->tp_handler = &timehist_ops;
1745		break;
1746	case KWORK_REPORT_TOP:
1747		kwork->tp_handler = &top_ops;
1748		break;
1749	default:
1750		pr_debug("Invalid report type %d\n", kwork->report);
1751		return -1;
1752	}
1753
1754	list_for_each_entry(class, &kwork->class_list, list)
1755		if ((class->class_init != NULL) &&
1756		    (class->class_init(class, session) != 0))
1757			return -1;
1758
1759	if (kwork->cpu_list != NULL) {
1760		ret = perf_session__cpu_bitmap(session,
1761					       kwork->cpu_list,
1762					       kwork->cpu_bitmap);
1763		if (ret < 0) {
1764			pr_err("Invalid cpu bitmap\n");
1765			return -1;
1766		}
1767	}
1768
1769	if (kwork->time_str != NULL) {
1770		ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
1771		if (ret != 0) {
1772			pr_err("Invalid time span\n");
1773			return -1;
1774		}
1775	}
1776
1777	list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
1778		if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
1779			pr_debug("Samples do not have callchains\n");
1780			kwork->show_callchain = 0;
1781			symbol_conf.use_callchain = 0;
1782		}
1783	}
1784
1785	return 0;
1786}
1787
1788static int perf_kwork__read_events(struct perf_kwork *kwork)
1789{
1790	int ret = -1;
1791	struct perf_session *session = NULL;
1792
1793	struct perf_data data = {
1794		.path  = input_name,
1795		.mode  = PERF_DATA_MODE_READ,
1796		.force = kwork->force,
1797	};
1798
1799	session = perf_session__new(&data, &kwork->tool);
1800	if (IS_ERR(session)) {
1801		pr_debug("Error creating perf session\n");
1802		return PTR_ERR(session);
1803	}
1804
1805	symbol__init(&session->header.env);
1806
1807	if (perf_kwork__check_config(kwork, session) != 0)
1808		goto out_delete;
1809
1810	if (session->tevent.pevent &&
1811	    tep_set_function_resolver(session->tevent.pevent,
1812				      machine__resolve_kernel_addr,
1813				      &session->machines.host) < 0) {
1814		pr_err("Failed to set libtraceevent function resolver\n");
1815		goto out_delete;
1816	}
1817
1818	if (kwork->report == KWORK_REPORT_TIMEHIST)
1819		timehist_print_header();
1820
1821	ret = perf_session__process_events(session);
1822	if (ret) {
1823		pr_debug("Failed to process events, error %d\n", ret);
1824		goto out_delete;
1825	}
1826
1827	kwork->nr_events      = session->evlist->stats.nr_events[0];
1828	kwork->nr_lost_events = session->evlist->stats.total_lost;
1829	kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1830
1831out_delete:
1832	perf_session__delete(session);
1833	return ret;
1834}
1835
1836static void process_skipped_events(struct perf_kwork *kwork,
1837				   struct kwork_work *work)
1838{
1839	int i;
1840	unsigned long long count;
1841
1842	for (i = 0; i < KWORK_TRACE_MAX; i++) {
1843		count = nr_list_entry(&work->atom_list[i]);
1844		kwork->nr_skipped_events[i] += count;
1845		kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
1846	}
1847}
1848
1849struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
1850				       struct kwork_class *class,
1851				       struct kwork_work *key)
1852{
1853	struct kwork_work *work = NULL;
1854
1855	work = work_new(key);
1856	if (work == NULL)
1857		return NULL;
1858
1859	work_insert(&class->work_root, work, &kwork->cmp_id);
1860	return work;
1861}
1862
1863static void sig_handler(int sig)
1864{
1865	/*
1866	 * Simply capture termination signal so that
1867	 * the program can continue after pause returns
1868	 */
1869	pr_debug("Capture signal %d\n", sig);
1870}
1871
1872static int perf_kwork__report_bpf(struct perf_kwork *kwork)
1873{
1874	int ret;
1875
1876	signal(SIGINT, sig_handler);
1877	signal(SIGTERM, sig_handler);
1878
1879	ret = perf_kwork__trace_prepare_bpf(kwork);
1880	if (ret)
1881		return -1;
1882
1883	printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
1884
1885	perf_kwork__trace_start();
1886
1887	/*
1888	 * a simple pause, wait here for stop signal
1889	 */
1890	pause();
1891
1892	perf_kwork__trace_finish();
1893
1894	perf_kwork__report_read_bpf(kwork);
1895
1896	perf_kwork__report_cleanup_bpf();
1897
1898	return 0;
1899}
1900
1901static int perf_kwork__report(struct perf_kwork *kwork)
1902{
1903	int ret;
1904	struct rb_node *next;
1905	struct kwork_work *work;
1906
1907	if (kwork->use_bpf)
1908		ret = perf_kwork__report_bpf(kwork);
1909	else
1910		ret = perf_kwork__read_events(kwork);
1911
1912	if (ret != 0)
1913		return -1;
1914
1915	perf_kwork__sort(kwork);
1916
1917	setup_pager();
1918
1919	ret = report_print_header(kwork);
1920	next = rb_first_cached(&kwork->sorted_work_root);
1921	while (next) {
1922		work = rb_entry(next, struct kwork_work, node);
1923		process_skipped_events(kwork, work);
1924
1925		if (work->nr_atoms != 0) {
1926			report_print_work(kwork, work);
1927			if (kwork->summary) {
1928				kwork->all_runtime += work->total_runtime;
1929				kwork->all_count += work->nr_atoms;
1930			}
1931		}
1932		next = rb_next(next);
1933	}
1934	print_separator(ret);
1935
1936	if (kwork->summary) {
1937		print_summary(kwork);
1938		print_separator(ret);
1939	}
1940
1941	print_bad_events(kwork);
1942	print_skipped_events(kwork);
1943	printf("\n");
1944
1945	return 0;
1946}
1947
1948typedef int (*tracepoint_handler)(struct perf_tool *tool,
1949				  struct evsel *evsel,
1950				  struct perf_sample *sample,
1951				  struct machine *machine);
1952
1953static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
1954						 union perf_event *event __maybe_unused,
1955						 struct perf_sample *sample,
1956						 struct evsel *evsel,
1957						 struct machine *machine)
1958{
1959	int err = 0;
1960
1961	if (evsel->handler != NULL) {
1962		tracepoint_handler f = evsel->handler;
1963
1964		err = f(tool, evsel, sample, machine);
1965	}
1966
1967	return err;
1968}
1969
1970static int perf_kwork__timehist(struct perf_kwork *kwork)
1971{
1972	/*
1973	 * event handlers for timehist option
1974	 */
1975	kwork->tool.comm	 = perf_event__process_comm;
1976	kwork->tool.exit	 = perf_event__process_exit;
1977	kwork->tool.fork	 = perf_event__process_fork;
1978	kwork->tool.attr	 = perf_event__process_attr;
1979	kwork->tool.tracing_data = perf_event__process_tracing_data;
1980	kwork->tool.build_id	 = perf_event__process_build_id;
1981	kwork->tool.ordered_events = true;
1982	kwork->tool.ordering_requires_timestamps = true;
1983	symbol_conf.use_callchain = kwork->show_callchain;
1984
1985	if (symbol__validate_sym_arguments()) {
1986		pr_err("Failed to validate sym arguments\n");
1987		return -1;
1988	}
1989
1990	setup_pager();
1991
1992	return perf_kwork__read_events(kwork);
1993}
1994
1995static void top_calc_total_runtime(struct perf_kwork *kwork)
1996{
1997	struct kwork_class *class;
1998	struct kwork_work *work;
1999	struct rb_node *next;
2000	struct kwork_top_stat *stat = &kwork->top_stat;
2001
2002	class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
2003	if (!class)
2004		return;
2005
2006	next = rb_first_cached(&class->work_root);
2007	while (next) {
2008		work = rb_entry(next, struct kwork_work, node);
2009		BUG_ON(work->cpu >= MAX_NR_CPUS);
2010		stat->cpus_runtime[work->cpu].total += work->total_runtime;
2011		stat->cpus_runtime[MAX_NR_CPUS].total += work->total_runtime;
2012		next = rb_next(next);
2013	}
2014}
2015
2016static void top_calc_idle_time(struct perf_kwork *kwork,
2017				struct kwork_work *work)
2018{
2019	struct kwork_top_stat *stat = &kwork->top_stat;
2020
2021	if (work->id == 0) {
2022		stat->cpus_runtime[work->cpu].idle += work->total_runtime;
2023		stat->cpus_runtime[MAX_NR_CPUS].idle += work->total_runtime;
2024	}
2025}
2026
2027static void top_calc_irq_runtime(struct perf_kwork *kwork,
2028				 enum kwork_class_type type,
2029				 struct kwork_work *work)
2030{
2031	struct kwork_top_stat *stat = &kwork->top_stat;
2032
2033	if (type == KWORK_CLASS_IRQ) {
2034		stat->cpus_runtime[work->cpu].irq += work->total_runtime;
2035		stat->cpus_runtime[MAX_NR_CPUS].irq += work->total_runtime;
2036	} else if (type == KWORK_CLASS_SOFTIRQ) {
2037		stat->cpus_runtime[work->cpu].softirq += work->total_runtime;
2038		stat->cpus_runtime[MAX_NR_CPUS].softirq += work->total_runtime;
2039	}
2040}
2041
2042static void top_subtract_irq_runtime(struct perf_kwork *kwork,
2043				     struct kwork_work *work)
2044{
2045	struct kwork_class *class;
2046	struct kwork_work *data;
2047	unsigned int i;
2048	int irq_class_list[] = {KWORK_CLASS_IRQ, KWORK_CLASS_SOFTIRQ};
2049
2050	for (i = 0; i < ARRAY_SIZE(irq_class_list); i++) {
2051		class = get_kwork_class(kwork, irq_class_list[i]);
2052		if (!class)
2053			continue;
2054
2055		data = find_work_by_id(&class->work_root,
2056				       work->id, work->cpu);
2057		if (!data)
2058			continue;
2059
2060		if (work->total_runtime > data->total_runtime) {
2061			work->total_runtime -= data->total_runtime;
2062			top_calc_irq_runtime(kwork, irq_class_list[i], data);
2063		}
2064	}
2065}
2066
2067static void top_calc_cpu_usage(struct perf_kwork *kwork)
2068{
2069	struct kwork_class *class;
2070	struct kwork_work *work;
2071	struct rb_node *next;
2072	struct kwork_top_stat *stat = &kwork->top_stat;
2073
2074	class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
2075	if (!class)
2076		return;
2077
2078	next = rb_first_cached(&class->work_root);
2079	while (next) {
2080		work = rb_entry(next, struct kwork_work, node);
2081
2082		if (work->total_runtime == 0)
2083			goto next;
2084
2085		__set_bit(work->cpu, stat->all_cpus_bitmap);
2086
2087		top_subtract_irq_runtime(kwork, work);
2088
2089		work->cpu_usage = work->total_runtime * 10000 /
2090			stat->cpus_runtime[work->cpu].total;
2091
2092		top_calc_idle_time(kwork, work);
2093next:
2094		next = rb_next(next);
2095	}
2096}
2097
2098static void top_calc_load_runtime(struct perf_kwork *kwork,
2099				  struct kwork_work *work)
2100{
2101	struct kwork_top_stat *stat = &kwork->top_stat;
2102
2103	if (work->id != 0) {
2104		stat->cpus_runtime[work->cpu].load += work->total_runtime;
2105		stat->cpus_runtime[MAX_NR_CPUS].load += work->total_runtime;
2106	}
2107}
2108
2109static void top_merge_tasks(struct perf_kwork *kwork)
2110{
2111	struct kwork_work *merged_work, *data;
2112	struct kwork_class *class;
2113	struct rb_node *node;
2114	int cpu;
2115	struct rb_root_cached merged_root = RB_ROOT_CACHED;
2116
2117	class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
2118	if (!class)
2119		return;
2120
2121	for (;;) {
2122		node = rb_first_cached(&class->work_root);
2123		if (!node)
2124			break;
2125
2126		rb_erase_cached(node, &class->work_root);
2127		data = rb_entry(node, struct kwork_work, node);
2128
2129		if (!profile_name_match(kwork, data))
2130			continue;
2131
2132		cpu = data->cpu;
2133		merged_work = find_work_by_id(&merged_root, data->id,
2134					      data->id == 0 ? cpu : -1);
2135		if (!merged_work) {
2136			work_insert(&merged_root, data, &kwork->cmp_id);
2137		} else {
2138			merged_work->total_runtime += data->total_runtime;
2139			merged_work->cpu_usage += data->cpu_usage;
2140		}
2141
2142		top_calc_load_runtime(kwork, data);
2143	}
2144
2145	work_sort(kwork, class, &merged_root);
2146}
2147
2148static void perf_kwork__top_report(struct perf_kwork *kwork)
2149{
2150	struct kwork_work *work;
2151	struct rb_node *next;
2152
2153	printf("\n");
2154
2155	top_print_cpu_usage(kwork);
2156	top_print_header(kwork);
2157	next = rb_first_cached(&kwork->sorted_work_root);
2158	while (next) {
2159		work = rb_entry(next, struct kwork_work, node);
2160		process_skipped_events(kwork, work);
2161
2162		if (work->total_runtime == 0)
2163			goto next;
2164
2165		top_print_work(kwork, work);
2166
2167next:
2168		next = rb_next(next);
2169	}
2170
2171	printf("\n");
2172}
2173
2174static int perf_kwork__top_bpf(struct perf_kwork *kwork)
2175{
2176	int ret;
2177
2178	signal(SIGINT, sig_handler);
2179	signal(SIGTERM, sig_handler);
2180
2181	ret = perf_kwork__top_prepare_bpf(kwork);
2182	if (ret)
2183		return -1;
2184
2185	printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
2186
2187	perf_kwork__top_start();
2188
2189	/*
2190	 * a simple pause, wait here for stop signal
2191	 */
2192	pause();
2193
2194	perf_kwork__top_finish();
2195
2196	perf_kwork__top_read_bpf(kwork);
2197
2198	perf_kwork__top_cleanup_bpf();
2199
2200	return 0;
2201
2202}
2203
2204static int perf_kwork__top(struct perf_kwork *kwork)
2205{
2206	struct __top_cpus_runtime *cpus_runtime;
2207	int ret = 0;
2208
2209	cpus_runtime = zalloc(sizeof(struct __top_cpus_runtime) * (MAX_NR_CPUS + 1));
2210	if (!cpus_runtime)
2211		return -1;
2212
2213	kwork->top_stat.cpus_runtime = cpus_runtime;
2214	bitmap_zero(kwork->top_stat.all_cpus_bitmap, MAX_NR_CPUS);
2215
2216	if (kwork->use_bpf)
2217		ret = perf_kwork__top_bpf(kwork);
2218	else
2219		ret = perf_kwork__read_events(kwork);
2220
2221	if (ret)
2222		goto out;
2223
2224	top_calc_total_runtime(kwork);
2225	top_calc_cpu_usage(kwork);
2226	top_merge_tasks(kwork);
2227
2228	setup_pager();
2229
2230	perf_kwork__top_report(kwork);
2231
2232out:
2233	free(kwork->top_stat.cpus_runtime);
2234	return ret;
2235}
2236
2237static void setup_event_list(struct perf_kwork *kwork,
2238			     const struct option *options,
2239			     const char * const usage_msg[])
2240{
2241	int i;
2242	struct kwork_class *class;
2243	char *tmp, *tok, *str;
2244
2245	/*
2246	 * set default events list if not specified
2247	 */
2248	if (kwork->event_list_str == NULL)
2249		kwork->event_list_str = "irq, softirq, workqueue";
2250
2251	str = strdup(kwork->event_list_str);
2252	for (tok = strtok_r(str, ", ", &tmp);
2253	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
2254		for (i = 0; i < KWORK_CLASS_MAX; i++) {
2255			class = kwork_class_supported_list[i];
2256			if (strcmp(tok, class->name) == 0) {
2257				list_add_tail(&class->list, &kwork->class_list);
2258				break;
2259			}
2260		}
2261		if (i == KWORK_CLASS_MAX) {
2262			usage_with_options_msg(usage_msg, options,
2263					       "Unknown --event key: `%s'", tok);
2264		}
2265	}
2266	free(str);
2267
2268	pr_debug("Config event list:");
2269	list_for_each_entry(class, &kwork->class_list, list)
2270		pr_debug(" %s", class->name);
2271	pr_debug("\n");
2272}
2273
2274static int perf_kwork__record(struct perf_kwork *kwork,
2275			      int argc, const char **argv)
2276{
2277	const char **rec_argv;
2278	unsigned int rec_argc, i, j;
2279	struct kwork_class *class;
2280
2281	const char *const record_args[] = {
2282		"record",
2283		"-a",
2284		"-R",
2285		"-m", "1024",
2286		"-c", "1",
2287	};
2288
2289	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
2290
2291	list_for_each_entry(class, &kwork->class_list, list)
2292		rec_argc += 2 * class->nr_tracepoints;
2293
2294	rec_argv = calloc(rec_argc + 1, sizeof(char *));
2295	if (rec_argv == NULL)
2296		return -ENOMEM;
2297
2298	for (i = 0; i < ARRAY_SIZE(record_args); i++)
2299		rec_argv[i] = strdup(record_args[i]);
2300
2301	list_for_each_entry(class, &kwork->class_list, list) {
2302		for (j = 0; j < class->nr_tracepoints; j++) {
2303			rec_argv[i++] = strdup("-e");
2304			rec_argv[i++] = strdup(class->tp_handlers[j].name);
2305		}
2306	}
2307
2308	for (j = 1; j < (unsigned int)argc; j++, i++)
2309		rec_argv[i] = argv[j];
2310
2311	BUG_ON(i != rec_argc);
2312
2313	pr_debug("record comm: ");
2314	for (j = 0; j < rec_argc; j++)
2315		pr_debug("%s ", rec_argv[j]);
2316	pr_debug("\n");
2317
2318	return cmd_record(i, rec_argv);
2319}
2320
2321int cmd_kwork(int argc, const char **argv)
2322{
2323	static struct perf_kwork kwork = {
2324		.class_list          = LIST_HEAD_INIT(kwork.class_list),
2325		.tool = {
2326			.mmap		= perf_event__process_mmap,
2327			.mmap2		= perf_event__process_mmap2,
2328			.sample		= perf_kwork__process_tracepoint_sample,
2329			.ordered_events = true,
2330		},
2331		.atom_page_list      = LIST_HEAD_INIT(kwork.atom_page_list),
2332		.sort_list           = LIST_HEAD_INIT(kwork.sort_list),
2333		.cmp_id              = LIST_HEAD_INIT(kwork.cmp_id),
2334		.sorted_work_root    = RB_ROOT_CACHED,
2335		.tp_handler          = NULL,
2336		.profile_name        = NULL,
2337		.cpu_list            = NULL,
2338		.time_str            = NULL,
2339		.force               = false,
2340		.event_list_str      = NULL,
2341		.summary             = false,
2342		.sort_order          = NULL,
2343		.show_callchain      = false,
2344		.max_stack           = 5,
2345		.timestart           = 0,
2346		.timeend             = 0,
2347		.nr_events           = 0,
2348		.nr_lost_chunks      = 0,
2349		.nr_lost_events      = 0,
2350		.all_runtime         = 0,
2351		.all_count           = 0,
2352		.nr_skipped_events   = { 0 },
2353	};
2354	static const char default_report_sort_order[] = "runtime, max, count";
2355	static const char default_latency_sort_order[] = "avg, max, count";
2356	static const char default_top_sort_order[] = "rate, runtime";
2357	const struct option kwork_options[] = {
2358	OPT_INCR('v', "verbose", &verbose,
2359		 "be more verbose (show symbol address, etc)"),
2360	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
2361		    "dump raw trace in ASCII"),
2362	OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
2363		   "list of kwork to profile (irq, softirq, workqueue, sched, etc)"),
2364	OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
2365	OPT_END()
2366	};
2367	const struct option report_options[] = {
2368	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
2369		   "sort by key(s): runtime, max, count"),
2370	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
2371		   "list of cpus to profile"),
2372	OPT_STRING('n', "name", &kwork.profile_name, "name",
2373		   "event name to profile"),
2374	OPT_STRING(0, "time", &kwork.time_str, "str",
2375		   "Time span for analysis (start,stop)"),
2376	OPT_STRING('i', "input", &input_name, "file",
2377		   "input file name"),
2378	OPT_BOOLEAN('S', "with-summary", &kwork.summary,
2379		    "Show summary with statistics"),
2380#ifdef HAVE_BPF_SKEL
2381	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
2382		    "Use BPF to measure kwork runtime"),
2383#endif
2384	OPT_PARENT(kwork_options)
2385	};
2386	const struct option latency_options[] = {
2387	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
2388		   "sort by key(s): avg, max, count"),
2389	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
2390		   "list of cpus to profile"),
2391	OPT_STRING('n', "name", &kwork.profile_name, "name",
2392		   "event name to profile"),
2393	OPT_STRING(0, "time", &kwork.time_str, "str",
2394		   "Time span for analysis (start,stop)"),
2395	OPT_STRING('i', "input", &input_name, "file",
2396		   "input file name"),
2397#ifdef HAVE_BPF_SKEL
2398	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
2399		    "Use BPF to measure kwork latency"),
2400#endif
2401	OPT_PARENT(kwork_options)
2402	};
2403	const struct option timehist_options[] = {
2404	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
2405		   "file", "vmlinux pathname"),
2406	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
2407		   "file", "kallsyms pathname"),
2408	OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
2409		    "Display call chains if present"),
2410	OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
2411		   "Maximum number of functions to display backtrace."),
2412	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
2413		    "Look for files with symbols relative to this directory"),
2414	OPT_STRING(0, "time", &kwork.time_str, "str",
2415		   "Time span for analysis (start,stop)"),
2416	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
2417		   "list of cpus to profile"),
2418	OPT_STRING('n', "name", &kwork.profile_name, "name",
2419		   "event name to profile"),
2420	OPT_STRING('i', "input", &input_name, "file",
2421		   "input file name"),
2422	OPT_PARENT(kwork_options)
2423	};
2424	const struct option top_options[] = {
2425	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
2426		   "sort by key(s): rate, runtime, tid"),
2427	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
2428		   "list of cpus to profile"),
2429	OPT_STRING('n', "name", &kwork.profile_name, "name",
2430		   "event name to profile"),
2431	OPT_STRING(0, "time", &kwork.time_str, "str",
2432		   "Time span for analysis (start,stop)"),
2433	OPT_STRING('i', "input", &input_name, "file",
2434		   "input file name"),
2435#ifdef HAVE_BPF_SKEL
2436	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
2437		    "Use BPF to measure task cpu usage"),
2438#endif
2439	OPT_PARENT(kwork_options)
2440	};
2441	const char *kwork_usage[] = {
2442		NULL,
2443		NULL
2444	};
2445	const char * const report_usage[] = {
2446		"perf kwork report [<options>]",
2447		NULL
2448	};
2449	const char * const latency_usage[] = {
2450		"perf kwork latency [<options>]",
2451		NULL
2452	};
2453	const char * const timehist_usage[] = {
2454		"perf kwork timehist [<options>]",
2455		NULL
2456	};
2457	const char * const top_usage[] = {
2458		"perf kwork top [<options>]",
2459		NULL
2460	};
2461	const char *const kwork_subcommands[] = {
2462		"record", "report", "latency", "timehist", "top", NULL
2463	};
2464
 
 
 
 
 
2465	argc = parse_options_subcommand(argc, argv, kwork_options,
2466					kwork_subcommands, kwork_usage,
2467					PARSE_OPT_STOP_AT_NON_OPTION);
2468	if (!argc)
2469		usage_with_options(kwork_usage, kwork_options);
2470
2471	sort_dimension__add(&kwork, "id", &kwork.cmp_id);
2472
2473	if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2474		setup_event_list(&kwork, kwork_options, kwork_usage);
2475		return perf_kwork__record(&kwork, argc, argv);
2476	} else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
2477		kwork.sort_order = default_report_sort_order;
2478		if (argc > 1) {
2479			argc = parse_options(argc, argv, report_options, report_usage, 0);
2480			if (argc)
2481				usage_with_options(report_usage, report_options);
2482		}
2483		kwork.report = KWORK_REPORT_RUNTIME;
2484		setup_sorting(&kwork, report_options, report_usage);
2485		setup_event_list(&kwork, kwork_options, kwork_usage);
2486		return perf_kwork__report(&kwork);
2487	} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
2488		kwork.sort_order = default_latency_sort_order;
2489		if (argc > 1) {
2490			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
2491			if (argc)
2492				usage_with_options(latency_usage, latency_options);
2493		}
2494		kwork.report = KWORK_REPORT_LATENCY;
2495		setup_sorting(&kwork, latency_options, latency_usage);
2496		setup_event_list(&kwork, kwork_options, kwork_usage);
2497		return perf_kwork__report(&kwork);
2498	} else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
2499		if (argc > 1) {
2500			argc = parse_options(argc, argv, timehist_options, timehist_usage, 0);
2501			if (argc)
2502				usage_with_options(timehist_usage, timehist_options);
2503		}
2504		kwork.report = KWORK_REPORT_TIMEHIST;
2505		setup_event_list(&kwork, kwork_options, kwork_usage);
2506		return perf_kwork__timehist(&kwork);
2507	} else if (strlen(argv[0]) > 2 && strstarts("top", argv[0])) {
2508		kwork.sort_order = default_top_sort_order;
2509		if (argc > 1) {
2510			argc = parse_options(argc, argv, top_options, top_usage, 0);
2511			if (argc)
2512				usage_with_options(top_usage, top_options);
2513		}
2514		kwork.report = KWORK_REPORT_TOP;
2515		if (!kwork.event_list_str)
2516			kwork.event_list_str = "sched, irq, softirq";
2517		setup_event_list(&kwork, kwork_options, kwork_usage);
2518		setup_sorting(&kwork, top_options, top_usage);
2519		return perf_kwork__top(&kwork);
2520	} else
2521		usage_with_options(kwork_usage, kwork_options);
 
 
 
2522
2523	return 0;
2524}