Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * builtin-kwork.c
   4 *
   5 * Copyright (c) 2022  Huawei Inc,  Yang Jihong <yangjihong1@huawei.com>
   6 */
   7
   8#include "builtin.h"
 
   9
  10#include "util/data.h"
  11#include "util/evlist.h"
  12#include "util/evsel.h"
  13#include "util/header.h"
  14#include "util/kwork.h"
  15#include "util/debug.h"
  16#include "util/session.h"
  17#include "util/symbol.h"
  18#include "util/thread.h"
  19#include "util/string2.h"
  20#include "util/callchain.h"
  21#include "util/evsel_fprintf.h"
  22#include "util/util.h"
  23
  24#include <subcmd/pager.h>
  25#include <subcmd/parse-options.h>
  26#include <event-parse.h>
  27
  28#include <errno.h>
  29#include <inttypes.h>
  30#include <signal.h>
  31#include <linux/err.h>
  32#include <linux/time64.h>
  33#include <linux/zalloc.h>
  34
  35/*
  36 * report header elements width
  37 */
  38#define PRINT_CPU_WIDTH 4
  39#define PRINT_COUNT_WIDTH 9
  40#define PRINT_RUNTIME_WIDTH 10
  41#define PRINT_LATENCY_WIDTH 10
  42#define PRINT_TIMESTAMP_WIDTH 17
  43#define PRINT_KWORK_NAME_WIDTH 30
  44#define RPINT_DECIMAL_WIDTH 3
  45#define PRINT_BRACKETPAIR_WIDTH 2
  46#define PRINT_TIME_UNIT_SEC_WIDTH 2
  47#define PRINT_TIME_UNIT_MESC_WIDTH 3
  48#define PRINT_PID_WIDTH 7
  49#define PRINT_TASK_NAME_WIDTH 16
  50#define PRINT_CPU_USAGE_WIDTH 6
  51#define PRINT_CPU_USAGE_DECIMAL_WIDTH 2
  52#define PRINT_CPU_USAGE_HIST_WIDTH 30
  53#define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
  54#define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
  55#define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
  56#define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
  57
  58struct sort_dimension {
  59	const char      *name;
  60	int             (*cmp)(struct kwork_work *l, struct kwork_work *r);
  61	struct          list_head list;
  62};
  63
  64static int id_cmp(struct kwork_work *l, struct kwork_work *r)
  65{
  66	if (l->cpu > r->cpu)
  67		return 1;
  68	if (l->cpu < r->cpu)
  69		return -1;
  70
  71	if (l->id > r->id)
  72		return 1;
  73	if (l->id < r->id)
  74		return -1;
  75
  76	return 0;
  77}
  78
  79static int count_cmp(struct kwork_work *l, struct kwork_work *r)
  80{
  81	if (l->nr_atoms > r->nr_atoms)
  82		return 1;
  83	if (l->nr_atoms < r->nr_atoms)
  84		return -1;
  85
  86	return 0;
  87}
  88
  89static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
  90{
  91	if (l->total_runtime > r->total_runtime)
  92		return 1;
  93	if (l->total_runtime < r->total_runtime)
  94		return -1;
  95
  96	return 0;
  97}
  98
  99static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
 100{
 101	if (l->max_runtime > r->max_runtime)
 102		return 1;
 103	if (l->max_runtime < r->max_runtime)
 104		return -1;
 105
 106	return 0;
 107}
 108
 109static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
 110{
 111	u64 avgl, avgr;
 112
 113	if (!r->nr_atoms)
 114		return 1;
 115	if (!l->nr_atoms)
 116		return -1;
 117
 118	avgl = l->total_latency / l->nr_atoms;
 119	avgr = r->total_latency / r->nr_atoms;
 120
 121	if (avgl > avgr)
 122		return 1;
 123	if (avgl < avgr)
 124		return -1;
 125
 126	return 0;
 127}
 128
 129static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
 130{
 131	if (l->max_latency > r->max_latency)
 132		return 1;
 133	if (l->max_latency < r->max_latency)
 134		return -1;
 135
 136	return 0;
 137}
 138
 139static int cpu_usage_cmp(struct kwork_work *l, struct kwork_work *r)
 140{
 141	if (l->cpu_usage > r->cpu_usage)
 142		return 1;
 143	if (l->cpu_usage < r->cpu_usage)
 144		return -1;
 145
 146	return 0;
 147}
 148
 149static int id_or_cpu_r_cmp(struct kwork_work *l, struct kwork_work *r)
 150{
 151	if (l->id < r->id)
 152		return 1;
 153	if (l->id > r->id)
 154		return -1;
 155
 156	if (l->id != 0)
 157		return 0;
 158
 159	if (l->cpu < r->cpu)
 160		return 1;
 161	if (l->cpu > r->cpu)
 162		return -1;
 163
 164	return 0;
 165}
 166
 167static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
 168			       const char *tok, struct list_head *list)
 169{
 170	size_t i;
 171	static struct sort_dimension max_sort_dimension = {
 172		.name = "max",
 173		.cmp  = max_runtime_cmp,
 174	};
 175	static struct sort_dimension id_sort_dimension = {
 176		.name = "id",
 177		.cmp  = id_cmp,
 178	};
 179	static struct sort_dimension runtime_sort_dimension = {
 180		.name = "runtime",
 181		.cmp  = runtime_cmp,
 182	};
 183	static struct sort_dimension count_sort_dimension = {
 184		.name = "count",
 185		.cmp  = count_cmp,
 186	};
 187	static struct sort_dimension avg_sort_dimension = {
 188		.name = "avg",
 189		.cmp  = avg_latency_cmp,
 190	};
 191	static struct sort_dimension rate_sort_dimension = {
 192		.name = "rate",
 193		.cmp  = cpu_usage_cmp,
 194	};
 195	static struct sort_dimension tid_sort_dimension = {
 196		.name = "tid",
 197		.cmp  = id_or_cpu_r_cmp,
 198	};
 199	struct sort_dimension *available_sorts[] = {
 200		&id_sort_dimension,
 201		&max_sort_dimension,
 202		&count_sort_dimension,
 203		&runtime_sort_dimension,
 204		&avg_sort_dimension,
 205		&rate_sort_dimension,
 206		&tid_sort_dimension,
 207	};
 208
 209	if (kwork->report == KWORK_REPORT_LATENCY)
 210		max_sort_dimension.cmp = max_latency_cmp;
 211
 212	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
 213		if (!strcmp(available_sorts[i]->name, tok)) {
 214			list_add_tail(&available_sorts[i]->list, list);
 215			return 0;
 216		}
 217	}
 218
 219	return -1;
 220}
 221
 222static void setup_sorting(struct perf_kwork *kwork,
 223			  const struct option *options,
 224			  const char * const usage_msg[])
 225{
 226	char *tmp, *tok, *str = strdup(kwork->sort_order);
 227
 228	for (tok = strtok_r(str, ", ", &tmp);
 229	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
 230		if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
 231			usage_with_options_msg(usage_msg, options,
 232					       "Unknown --sort key: `%s'", tok);
 233	}
 234
 235	pr_debug("Sort order: %s\n", kwork->sort_order);
 236	free(str);
 237}
 238
 239static struct kwork_atom *atom_new(struct perf_kwork *kwork,
 240				   struct perf_sample *sample)
 241{
 242	unsigned long i;
 243	struct kwork_atom_page *page;
 244	struct kwork_atom *atom = NULL;
 245
 246	list_for_each_entry(page, &kwork->atom_page_list, list) {
 247		if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
 248			i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
 249			BUG_ON(i >= NR_ATOM_PER_PAGE);
 250			atom = &page->atoms[i];
 251			goto found_atom;
 252		}
 253	}
 254
 255	/*
 256	 * new page
 257	 */
 258	page = zalloc(sizeof(*page));
 259	if (page == NULL) {
 260		pr_err("Failed to zalloc kwork atom page\n");
 261		return NULL;
 262	}
 263
 264	i = 0;
 265	atom = &page->atoms[0];
 266	list_add_tail(&page->list, &kwork->atom_page_list);
 267
 268found_atom:
 269	__set_bit(i, page->bitmap);
 270	atom->time = sample->time;
 271	atom->prev = NULL;
 272	atom->page_addr = page;
 273	atom->bit_inpage = i;
 274	return atom;
 275}
 276
 277static void atom_free(struct kwork_atom *atom)
 278{
 279	if (atom->prev != NULL)
 280		atom_free(atom->prev);
 281
 282	__clear_bit(atom->bit_inpage,
 283		    ((struct kwork_atom_page *)atom->page_addr)->bitmap);
 284}
 285
 286static void atom_del(struct kwork_atom *atom)
 287{
 288	list_del(&atom->list);
 289	atom_free(atom);
 290}
 291
 292static int work_cmp(struct list_head *list,
 293		    struct kwork_work *l, struct kwork_work *r)
 294{
 295	int ret = 0;
 296	struct sort_dimension *sort;
 297
 298	BUG_ON(list_empty(list));
 299
 300	list_for_each_entry(sort, list, list) {
 301		ret = sort->cmp(l, r);
 302		if (ret)
 303			return ret;
 304	}
 305
 306	return ret;
 307}
 308
 309static struct kwork_work *work_search(struct rb_root_cached *root,
 310				      struct kwork_work *key,
 311				      struct list_head *sort_list)
 312{
 313	int cmp;
 314	struct kwork_work *work;
 315	struct rb_node *node = root->rb_root.rb_node;
 316
 317	while (node) {
 318		work = container_of(node, struct kwork_work, node);
 319		cmp = work_cmp(sort_list, key, work);
 320		if (cmp > 0)
 321			node = node->rb_left;
 322		else if (cmp < 0)
 323			node = node->rb_right;
 324		else {
 325			if (work->name == NULL)
 326				work->name = key->name;
 327			return work;
 328		}
 329	}
 330	return NULL;
 331}
 332
 333static void work_insert(struct rb_root_cached *root,
 334			struct kwork_work *key, struct list_head *sort_list)
 335{
 336	int cmp;
 337	bool leftmost = true;
 338	struct kwork_work *cur;
 339	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
 340
 341	while (*new) {
 342		cur = container_of(*new, struct kwork_work, node);
 343		parent = *new;
 344		cmp = work_cmp(sort_list, key, cur);
 345
 346		if (cmp > 0)
 347			new = &((*new)->rb_left);
 348		else {
 349			new = &((*new)->rb_right);
 350			leftmost = false;
 351		}
 352	}
 353
 354	rb_link_node(&key->node, parent, new);
 355	rb_insert_color_cached(&key->node, root, leftmost);
 356}
 357
 358static struct kwork_work *work_new(struct kwork_work *key)
 359{
 360	int i;
 361	struct kwork_work *work = zalloc(sizeof(*work));
 362
 363	if (work == NULL) {
 364		pr_err("Failed to zalloc kwork work\n");
 365		return NULL;
 366	}
 367
 368	for (i = 0; i < KWORK_TRACE_MAX; i++)
 369		INIT_LIST_HEAD(&work->atom_list[i]);
 370
 371	work->id = key->id;
 372	work->cpu = key->cpu;
 373	work->name = key->name;
 374	work->class = key->class;
 375	return work;
 376}
 377
 378static struct kwork_work *work_findnew(struct rb_root_cached *root,
 379				       struct kwork_work *key,
 380				       struct list_head *sort_list)
 381{
 382	struct kwork_work *work = work_search(root, key, sort_list);
 383
 384	if (work != NULL)
 385		return work;
 386
 387	work = work_new(key);
 388	if (work)
 389		work_insert(root, work, sort_list);
 390
 391	return work;
 392}
 393
 394static void profile_update_timespan(struct perf_kwork *kwork,
 395				    struct perf_sample *sample)
 396{
 397	if (!kwork->summary)
 398		return;
 399
 400	if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
 401		kwork->timestart = sample->time;
 402
 403	if (kwork->timeend < sample->time)
 404		kwork->timeend = sample->time;
 405}
 406
 407static bool profile_name_match(struct perf_kwork *kwork,
 408			       struct kwork_work *work)
 409{
 410	if (kwork->profile_name && work->name &&
 411	    (strcmp(work->name, kwork->profile_name) != 0)) {
 412		return false;
 413	}
 414
 415	return true;
 416}
 417
 418static bool profile_event_match(struct perf_kwork *kwork,
 419				struct kwork_work *work,
 420				struct perf_sample *sample)
 421{
 422	int cpu = work->cpu;
 423	u64 time = sample->time;
 424	struct perf_time_interval *ptime = &kwork->ptime;
 425
 426	if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
 427		return false;
 428
 429	if (((ptime->start != 0) && (ptime->start > time)) ||
 430	    ((ptime->end != 0) && (ptime->end < time)))
 431		return false;
 432
 433	/*
 434	 * report top needs to collect the runtime of all tasks to
 435	 * calculate the load of each core.
 436	 */
 437	if ((kwork->report != KWORK_REPORT_TOP) &&
 438	    !profile_name_match(kwork, work)) {
 439		return false;
 440	}
 441
 442	profile_update_timespan(kwork, sample);
 443	return true;
 444}
 445
 446static int work_push_atom(struct perf_kwork *kwork,
 447			  struct kwork_class *class,
 448			  enum kwork_trace_type src_type,
 449			  enum kwork_trace_type dst_type,
 450			  struct evsel *evsel,
 451			  struct perf_sample *sample,
 452			  struct machine *machine,
 453			  struct kwork_work **ret_work,
 454			  bool overwrite)
 455{
 456	struct kwork_atom *atom, *dst_atom, *last_atom;
 457	struct kwork_work *work, key;
 458
 459	BUG_ON(class->work_init == NULL);
 460	class->work_init(kwork, class, &key, src_type, evsel, sample, machine);
 461
 462	atom = atom_new(kwork, sample);
 463	if (atom == NULL)
 464		return -1;
 465
 466	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
 467	if (work == NULL) {
 468		atom_free(atom);
 469		return -1;
 470	}
 471
 472	if (!profile_event_match(kwork, work, sample)) {
 473		atom_free(atom);
 474		return 0;
 475	}
 476
 477	if (dst_type < KWORK_TRACE_MAX) {
 478		dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
 479						   struct kwork_atom, list);
 480		if (dst_atom != NULL) {
 481			atom->prev = dst_atom;
 482			list_del(&dst_atom->list);
 483		}
 484	}
 485
 486	if (ret_work != NULL)
 487		*ret_work = work;
 488
 489	if (overwrite) {
 490		last_atom = list_last_entry_or_null(&work->atom_list[src_type],
 491						    struct kwork_atom, list);
 492		if (last_atom) {
 493			atom_del(last_atom);
 494
 495			kwork->nr_skipped_events[src_type]++;
 496			kwork->nr_skipped_events[KWORK_TRACE_MAX]++;
 497		}
 498	}
 499
 500	list_add_tail(&atom->list, &work->atom_list[src_type]);
 501
 502	return 0;
 503}
 504
 505static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
 506					struct kwork_class *class,
 507					enum kwork_trace_type src_type,
 508					enum kwork_trace_type dst_type,
 509					struct evsel *evsel,
 510					struct perf_sample *sample,
 511					struct machine *machine,
 512					struct kwork_work **ret_work)
 513{
 514	struct kwork_atom *atom, *src_atom;
 515	struct kwork_work *work, key;
 516
 517	BUG_ON(class->work_init == NULL);
 518	class->work_init(kwork, class, &key, src_type, evsel, sample, machine);
 519
 520	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
 521	if (ret_work != NULL)
 522		*ret_work = work;
 523
 524	if (work == NULL)
 525		return NULL;
 526
 527	if (!profile_event_match(kwork, work, sample))
 528		return NULL;
 529
 530	atom = list_last_entry_or_null(&work->atom_list[dst_type],
 531				       struct kwork_atom, list);
 532	if (atom != NULL)
 533		return atom;
 534
 535	src_atom = atom_new(kwork, sample);
 536	if (src_atom != NULL)
 537		list_add_tail(&src_atom->list, &work->atom_list[src_type]);
 538	else {
 539		if (ret_work != NULL)
 540			*ret_work = NULL;
 541	}
 542
 543	return NULL;
 544}
 545
 546static struct kwork_work *find_work_by_id(struct rb_root_cached *root,
 547					  u64 id, int cpu)
 548{
 549	struct rb_node *next;
 550	struct kwork_work *work;
 551
 552	next = rb_first_cached(root);
 553	while (next) {
 554		work = rb_entry(next, struct kwork_work, node);
 555		if ((cpu != -1 && work->id == id && work->cpu == cpu) ||
 556		    (cpu == -1 && work->id == id))
 557			return work;
 558
 559		next = rb_next(next);
 560	}
 561
 562	return NULL;
 563}
 564
 565static struct kwork_class *get_kwork_class(struct perf_kwork *kwork,
 566					   enum kwork_class_type type)
 567{
 568	struct kwork_class *class;
 569
 570	list_for_each_entry(class, &kwork->class_list, list) {
 571		if (class->type == type)
 572			return class;
 573	}
 574
 575	return NULL;
 576}
 577
 578static void report_update_exit_event(struct kwork_work *work,
 579				     struct kwork_atom *atom,
 580				     struct perf_sample *sample)
 581{
 582	u64 delta;
 583	u64 exit_time = sample->time;
 584	u64 entry_time = atom->time;
 585
 586	if ((entry_time != 0) && (exit_time >= entry_time)) {
 587		delta = exit_time - entry_time;
 588		if ((delta > work->max_runtime) ||
 589		    (work->max_runtime == 0)) {
 590			work->max_runtime = delta;
 591			work->max_runtime_start = entry_time;
 592			work->max_runtime_end = exit_time;
 593		}
 594		work->total_runtime += delta;
 595		work->nr_atoms++;
 596	}
 597}
 598
 599static int report_entry_event(struct perf_kwork *kwork,
 600			      struct kwork_class *class,
 601			      struct evsel *evsel,
 602			      struct perf_sample *sample,
 603			      struct machine *machine)
 604{
 605	return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 606			      KWORK_TRACE_MAX, evsel, sample,
 607			      machine, NULL, true);
 608}
 609
 610static int report_exit_event(struct perf_kwork *kwork,
 611			     struct kwork_class *class,
 612			     struct evsel *evsel,
 613			     struct perf_sample *sample,
 614			     struct machine *machine)
 615{
 616	struct kwork_atom *atom = NULL;
 617	struct kwork_work *work = NULL;
 618
 619	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 620			     KWORK_TRACE_ENTRY, evsel, sample,
 621			     machine, &work);
 622	if (work == NULL)
 623		return -1;
 624
 625	if (atom != NULL) {
 626		report_update_exit_event(work, atom, sample);
 627		atom_del(atom);
 628	}
 629
 630	return 0;
 631}
 632
 633static void latency_update_entry_event(struct kwork_work *work,
 634				       struct kwork_atom *atom,
 635				       struct perf_sample *sample)
 636{
 637	u64 delta;
 638	u64 entry_time = sample->time;
 639	u64 raise_time = atom->time;
 640
 641	if ((raise_time != 0) && (entry_time >= raise_time)) {
 642		delta = entry_time - raise_time;
 643		if ((delta > work->max_latency) ||
 644		    (work->max_latency == 0)) {
 645			work->max_latency = delta;
 646			work->max_latency_start = raise_time;
 647			work->max_latency_end = entry_time;
 648		}
 649		work->total_latency += delta;
 650		work->nr_atoms++;
 651	}
 652}
 653
 654static int latency_raise_event(struct perf_kwork *kwork,
 655			       struct kwork_class *class,
 656			       struct evsel *evsel,
 657			       struct perf_sample *sample,
 658			       struct machine *machine)
 659{
 660	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
 661			      KWORK_TRACE_MAX, evsel, sample,
 662			      machine, NULL, true);
 663}
 664
 665static int latency_entry_event(struct perf_kwork *kwork,
 666			       struct kwork_class *class,
 667			       struct evsel *evsel,
 668			       struct perf_sample *sample,
 669			       struct machine *machine)
 670{
 671	struct kwork_atom *atom = NULL;
 672	struct kwork_work *work = NULL;
 673
 674	atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
 675			     KWORK_TRACE_RAISE, evsel, sample,
 676			     machine, &work);
 677	if (work == NULL)
 678		return -1;
 679
 680	if (atom != NULL) {
 681		latency_update_entry_event(work, atom, sample);
 682		atom_del(atom);
 683	}
 684
 685	return 0;
 686}
 687
 688static void timehist_save_callchain(struct perf_kwork *kwork,
 689				    struct perf_sample *sample,
 690				    struct evsel *evsel,
 691				    struct machine *machine)
 692{
 693	struct symbol *sym;
 694	struct thread *thread;
 695	struct callchain_cursor_node *node;
 696	struct callchain_cursor *cursor;
 697
 698	if (!kwork->show_callchain || sample->callchain == NULL)
 699		return;
 700
 701	/* want main thread for process - has maps */
 702	thread = machine__findnew_thread(machine, sample->pid, sample->pid);
 703	if (thread == NULL) {
 704		pr_debug("Failed to get thread for pid %d\n", sample->pid);
 705		return;
 706	}
 707
 708	cursor = get_tls_callchain_cursor();
 709
 710	if (thread__resolve_callchain(thread, cursor, evsel, sample,
 711				      NULL, NULL, kwork->max_stack + 2) != 0) {
 712		pr_debug("Failed to resolve callchain, skipping\n");
 713		goto out_put;
 714	}
 715
 716	callchain_cursor_commit(cursor);
 717
 718	while (true) {
 719		node = callchain_cursor_current(cursor);
 720		if (node == NULL)
 721			break;
 722
 723		sym = node->ms.sym;
 724		if (sym) {
 725			if (!strcmp(sym->name, "__softirqentry_text_start") ||
 726			    !strcmp(sym->name, "__do_softirq"))
 727				sym->ignore = 1;
 728		}
 729
 730		callchain_cursor_advance(cursor);
 731	}
 732
 733out_put:
 734	thread__put(thread);
 735}
 736
 737static void timehist_print_event(struct perf_kwork *kwork,
 738				 struct kwork_work *work,
 739				 struct kwork_atom *atom,
 740				 struct perf_sample *sample,
 741				 struct addr_location *al)
 742{
 743	char entrytime[32], exittime[32];
 744	char kwork_name[PRINT_KWORK_NAME_WIDTH];
 745
 746	/*
 747	 * runtime start
 748	 */
 749	timestamp__scnprintf_usec(atom->time,
 750				  entrytime, sizeof(entrytime));
 751	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime);
 752
 753	/*
 754	 * runtime end
 755	 */
 756	timestamp__scnprintf_usec(sample->time,
 757				  exittime, sizeof(exittime));
 758	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime);
 759
 760	/*
 761	 * cpu
 762	 */
 763	printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
 764
 765	/*
 766	 * kwork name
 767	 */
 768	if (work->class && work->class->work_name) {
 769		work->class->work_name(work, kwork_name,
 770				       PRINT_KWORK_NAME_WIDTH);
 771		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name);
 772	} else
 773		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, "");
 774
 775	/*
 776	 *runtime
 777	 */
 778	printf(" %*.*f ",
 779	       PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
 780	       (double)(sample->time - atom->time) / NSEC_PER_MSEC);
 781
 782	/*
 783	 * delaytime
 784	 */
 785	if (atom->prev != NULL)
 786		printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
 787		       (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC);
 788	else
 789		printf(" %*s ", PRINT_LATENCY_WIDTH, " ");
 790
 791	/*
 792	 * callchain
 793	 */
 794	if (kwork->show_callchain) {
 795		struct callchain_cursor *cursor = get_tls_callchain_cursor();
 796
 797		if (cursor == NULL)
 798			return;
 799
 800		printf(" ");
 801
 802		sample__fprintf_sym(sample, al, 0,
 803				    EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
 804				    EVSEL__PRINT_CALLCHAIN_ARROW |
 805				    EVSEL__PRINT_SKIP_IGNORED,
 806				    cursor, symbol_conf.bt_stop_list,
 807				    stdout);
 808	}
 809
 810	printf("\n");
 811}
 812
 813static int timehist_raise_event(struct perf_kwork *kwork,
 814				struct kwork_class *class,
 815				struct evsel *evsel,
 816				struct perf_sample *sample,
 817				struct machine *machine)
 818{
 819	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
 820			      KWORK_TRACE_MAX, evsel, sample,
 821			      machine, NULL, true);
 822}
 823
 824static int timehist_entry_event(struct perf_kwork *kwork,
 825				struct kwork_class *class,
 826				struct evsel *evsel,
 827				struct perf_sample *sample,
 828				struct machine *machine)
 829{
 830	int ret;
 831	struct kwork_work *work = NULL;
 832
 833	ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 834			     KWORK_TRACE_RAISE, evsel, sample,
 835			     machine, &work, true);
 836	if (ret)
 837		return ret;
 838
 839	if (work != NULL)
 840		timehist_save_callchain(kwork, sample, evsel, machine);
 841
 842	return 0;
 843}
 844
 845static int timehist_exit_event(struct perf_kwork *kwork,
 846			       struct kwork_class *class,
 847			       struct evsel *evsel,
 848			       struct perf_sample *sample,
 849			       struct machine *machine)
 850{
 851	struct kwork_atom *atom = NULL;
 852	struct kwork_work *work = NULL;
 853	struct addr_location al;
 854	int ret = 0;
 855
 856	addr_location__init(&al);
 857	if (machine__resolve(machine, &al, sample) < 0) {
 858		pr_debug("Problem processing event, skipping it\n");
 859		ret = -1;
 860		goto out;
 861	}
 862
 863	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 864			     KWORK_TRACE_ENTRY, evsel, sample,
 865			     machine, &work);
 866	if (work == NULL) {
 867		ret = -1;
 868		goto out;
 869	}
 870
 871	if (atom != NULL) {
 872		work->nr_atoms++;
 873		timehist_print_event(kwork, work, atom, sample, &al);
 874		atom_del(atom);
 875	}
 876
 877out:
 878	addr_location__exit(&al);
 879	return ret;
 880}
 881
 882static void top_update_runtime(struct kwork_work *work,
 883			       struct kwork_atom *atom,
 884			       struct perf_sample *sample)
 885{
 886	u64 delta;
 887	u64 exit_time = sample->time;
 888	u64 entry_time = atom->time;
 889
 890	if ((entry_time != 0) && (exit_time >= entry_time)) {
 891		delta = exit_time - entry_time;
 892		work->total_runtime += delta;
 893	}
 894}
 895
 896static int top_entry_event(struct perf_kwork *kwork,
 897			   struct kwork_class *class,
 898			   struct evsel *evsel,
 899			   struct perf_sample *sample,
 900			   struct machine *machine)
 901{
 902	return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 903			      KWORK_TRACE_MAX, evsel, sample,
 904			      machine, NULL, true);
 905}
 906
 907static int top_exit_event(struct perf_kwork *kwork,
 908			  struct kwork_class *class,
 909			  struct evsel *evsel,
 910			  struct perf_sample *sample,
 911			  struct machine *machine)
 912{
 913	struct kwork_work *work, *sched_work;
 914	struct kwork_class *sched_class;
 915	struct kwork_atom *atom;
 916
 917	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 918			     KWORK_TRACE_ENTRY, evsel, sample,
 919			     machine, &work);
 920	if (!work)
 921		return -1;
 922
 923	if (atom) {
 924		sched_class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
 925		if (sched_class) {
 926			sched_work = find_work_by_id(&sched_class->work_root,
 927						     work->id, work->cpu);
 928			if (sched_work)
 929				top_update_runtime(work, atom, sample);
 930		}
 931		atom_del(atom);
 932	}
 933
 934	return 0;
 935}
 936
 937static int top_sched_switch_event(struct perf_kwork *kwork,
 938				  struct kwork_class *class,
 939				  struct evsel *evsel,
 940				  struct perf_sample *sample,
 941				  struct machine *machine)
 942{
 943	struct kwork_atom *atom;
 944	struct kwork_work *work;
 945
 946	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 947			     KWORK_TRACE_ENTRY, evsel, sample,
 948			     machine, &work);
 949	if (!work)
 950		return -1;
 951
 952	if (atom) {
 953		top_update_runtime(work, atom, sample);
 954		atom_del(atom);
 955	}
 956
 957	return top_entry_event(kwork, class, evsel, sample, machine);
 958}
 959
 960static struct kwork_class kwork_irq;
 961static int process_irq_handler_entry_event(const struct perf_tool *tool,
 962					   struct evsel *evsel,
 963					   struct perf_sample *sample,
 964					   struct machine *machine)
 965{
 966	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 967
 968	if (kwork->tp_handler->entry_event)
 969		return kwork->tp_handler->entry_event(kwork, &kwork_irq,
 970						      evsel, sample, machine);
 971	return 0;
 972}
 973
 974static int process_irq_handler_exit_event(const struct perf_tool *tool,
 975					  struct evsel *evsel,
 976					  struct perf_sample *sample,
 977					  struct machine *machine)
 978{
 979	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 980
 981	if (kwork->tp_handler->exit_event)
 982		return kwork->tp_handler->exit_event(kwork, &kwork_irq,
 983						     evsel, sample, machine);
 984	return 0;
 985}
 986
 987const struct evsel_str_handler irq_tp_handlers[] = {
 988	{ "irq:irq_handler_entry", process_irq_handler_entry_event, },
 989	{ "irq:irq_handler_exit",  process_irq_handler_exit_event,  },
 990};
 991
 992static int irq_class_init(struct kwork_class *class,
 993			  struct perf_session *session)
 994{
 995	if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
 996		pr_err("Failed to set irq tracepoints handlers\n");
 997		return -1;
 998	}
 999
1000	class->work_root = RB_ROOT_CACHED;
1001	return 0;
1002}
1003
1004static void irq_work_init(struct perf_kwork *kwork,
1005			  struct kwork_class *class,
1006			  struct kwork_work *work,
1007			  enum kwork_trace_type src_type __maybe_unused,
1008			  struct evsel *evsel,
1009			  struct perf_sample *sample,
1010			  struct machine *machine __maybe_unused)
1011{
1012	work->class = class;
1013	work->cpu = sample->cpu;
1014
1015	if (kwork->report == KWORK_REPORT_TOP) {
1016		work->id = evsel__intval_common(evsel, sample, "common_pid");
1017		work->name = NULL;
1018	} else {
1019		work->id = evsel__intval(evsel, sample, "irq");
1020		work->name = evsel__strval(evsel, sample, "name");
1021	}
1022}
1023
1024static void irq_work_name(struct kwork_work *work, char *buf, int len)
1025{
1026	snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
1027}
1028
1029static struct kwork_class kwork_irq = {
1030	.name           = "irq",
1031	.type           = KWORK_CLASS_IRQ,
1032	.nr_tracepoints = 2,
1033	.tp_handlers    = irq_tp_handlers,
1034	.class_init     = irq_class_init,
1035	.work_init      = irq_work_init,
1036	.work_name      = irq_work_name,
1037};
1038
1039static struct kwork_class kwork_softirq;
1040static int process_softirq_raise_event(const struct perf_tool *tool,
1041				       struct evsel *evsel,
1042				       struct perf_sample *sample,
1043				       struct machine *machine)
1044{
1045	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1046
1047	if (kwork->tp_handler->raise_event)
1048		return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
1049						      evsel, sample, machine);
1050
1051	return 0;
1052}
1053
1054static int process_softirq_entry_event(const struct perf_tool *tool,
1055				       struct evsel *evsel,
1056				       struct perf_sample *sample,
1057				       struct machine *machine)
1058{
1059	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1060
1061	if (kwork->tp_handler->entry_event)
1062		return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
1063						      evsel, sample, machine);
1064
1065	return 0;
1066}
1067
1068static int process_softirq_exit_event(const struct perf_tool *tool,
1069				      struct evsel *evsel,
1070				      struct perf_sample *sample,
1071				      struct machine *machine)
1072{
1073	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1074
1075	if (kwork->tp_handler->exit_event)
1076		return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
1077						     evsel, sample, machine);
1078
1079	return 0;
1080}
1081
1082const struct evsel_str_handler softirq_tp_handlers[] = {
1083	{ "irq:softirq_raise", process_softirq_raise_event, },
1084	{ "irq:softirq_entry", process_softirq_entry_event, },
1085	{ "irq:softirq_exit",  process_softirq_exit_event,  },
1086};
1087
1088static int softirq_class_init(struct kwork_class *class,
1089			      struct perf_session *session)
1090{
1091	if (perf_session__set_tracepoints_handlers(session,
1092						   softirq_tp_handlers)) {
1093		pr_err("Failed to set softirq tracepoints handlers\n");
1094		return -1;
1095	}
1096
1097	class->work_root = RB_ROOT_CACHED;
1098	return 0;
1099}
1100
1101static char *evsel__softirq_name(struct evsel *evsel, u64 num)
1102{
1103	char *name = NULL;
1104	bool found = false;
1105	struct tep_print_flag_sym *sym = NULL;
1106	struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
1107
1108	if ((args == NULL) || (args->next == NULL))
1109		return NULL;
1110
1111	/* skip softirq field: "REC->vec" */
1112	for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
1113		if ((eval_flag(sym->value) == (unsigned long long)num) &&
1114		    (strlen(sym->str) != 0)) {
1115			found = true;
1116			break;
1117		}
1118	}
1119
1120	if (!found)
1121		return NULL;
1122
1123	name = strdup(sym->str);
1124	if (name == NULL) {
1125		pr_err("Failed to copy symbol name\n");
1126		return NULL;
1127	}
1128	return name;
1129}
1130
1131static void softirq_work_init(struct perf_kwork *kwork,
1132			      struct kwork_class *class,
1133			      struct kwork_work *work,
1134			      enum kwork_trace_type src_type __maybe_unused,
1135			      struct evsel *evsel,
1136			      struct perf_sample *sample,
1137			      struct machine *machine __maybe_unused)
1138{
1139	u64 num;
1140
 
1141	work->class = class;
1142	work->cpu = sample->cpu;
1143
1144	if (kwork->report == KWORK_REPORT_TOP) {
1145		work->id = evsel__intval_common(evsel, sample, "common_pid");
1146		work->name = NULL;
1147	} else {
1148		num = evsel__intval(evsel, sample, "vec");
1149		work->id = num;
1150		work->name = evsel__softirq_name(evsel, num);
1151	}
1152}
1153
1154static void softirq_work_name(struct kwork_work *work, char *buf, int len)
1155{
1156	snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
1157}
1158
1159static struct kwork_class kwork_softirq = {
1160	.name           = "softirq",
1161	.type           = KWORK_CLASS_SOFTIRQ,
1162	.nr_tracepoints = 3,
1163	.tp_handlers    = softirq_tp_handlers,
1164	.class_init     = softirq_class_init,
1165	.work_init      = softirq_work_init,
1166	.work_name      = softirq_work_name,
1167};
1168
1169static struct kwork_class kwork_workqueue;
1170static int process_workqueue_activate_work_event(const struct perf_tool *tool,
1171						 struct evsel *evsel,
1172						 struct perf_sample *sample,
1173						 struct machine *machine)
1174{
1175	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1176
1177	if (kwork->tp_handler->raise_event)
1178		return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
1179						    evsel, sample, machine);
1180
1181	return 0;
1182}
1183
1184static int process_workqueue_execute_start_event(const struct perf_tool *tool,
1185						 struct evsel *evsel,
1186						 struct perf_sample *sample,
1187						 struct machine *machine)
1188{
1189	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1190
1191	if (kwork->tp_handler->entry_event)
1192		return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
1193						    evsel, sample, machine);
1194
1195	return 0;
1196}
1197
1198static int process_workqueue_execute_end_event(const struct perf_tool *tool,
1199					       struct evsel *evsel,
1200					       struct perf_sample *sample,
1201					       struct machine *machine)
1202{
1203	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1204
1205	if (kwork->tp_handler->exit_event)
1206		return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
1207						   evsel, sample, machine);
1208
1209	return 0;
1210}
1211
1212const struct evsel_str_handler workqueue_tp_handlers[] = {
1213	{ "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
1214	{ "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
1215	{ "workqueue:workqueue_execute_end",   process_workqueue_execute_end_event,   },
1216};
1217
1218static int workqueue_class_init(struct kwork_class *class,
1219				struct perf_session *session)
1220{
1221	if (perf_session__set_tracepoints_handlers(session,
1222						   workqueue_tp_handlers)) {
1223		pr_err("Failed to set workqueue tracepoints handlers\n");
1224		return -1;
1225	}
1226
1227	class->work_root = RB_ROOT_CACHED;
1228	return 0;
1229}
1230
1231static void workqueue_work_init(struct perf_kwork *kwork __maybe_unused,
1232				struct kwork_class *class,
1233				struct kwork_work *work,
1234				enum kwork_trace_type src_type __maybe_unused,
1235				struct evsel *evsel,
1236				struct perf_sample *sample,
1237				struct machine *machine)
1238{
1239	char *modp = NULL;
1240	unsigned long long function_addr = evsel__intval(evsel,
1241							 sample, "function");
1242
1243	work->class = class;
1244	work->cpu = sample->cpu;
1245	work->id = evsel__intval(evsel, sample, "work");
1246	work->name = function_addr == 0 ? NULL :
1247		machine__resolve_kernel_addr(machine, &function_addr, &modp);
1248}
1249
1250static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
1251{
1252	if (work->name != NULL)
1253		snprintf(buf, len, "(w)%s", work->name);
1254	else
1255		snprintf(buf, len, "(w)0x%" PRIx64, work->id);
1256}
1257
1258static struct kwork_class kwork_workqueue = {
1259	.name           = "workqueue",
1260	.type           = KWORK_CLASS_WORKQUEUE,
1261	.nr_tracepoints = 3,
1262	.tp_handlers    = workqueue_tp_handlers,
1263	.class_init     = workqueue_class_init,
1264	.work_init      = workqueue_work_init,
1265	.work_name      = workqueue_work_name,
1266};
1267
1268static struct kwork_class kwork_sched;
1269static int process_sched_switch_event(const struct perf_tool *tool,
1270				      struct evsel *evsel,
1271				      struct perf_sample *sample,
1272				      struct machine *machine)
1273{
1274	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1275
1276	if (kwork->tp_handler->sched_switch_event)
1277		return kwork->tp_handler->sched_switch_event(kwork, &kwork_sched,
1278							     evsel, sample, machine);
1279	return 0;
1280}
1281
1282const struct evsel_str_handler sched_tp_handlers[] = {
1283	{ "sched:sched_switch",  process_sched_switch_event, },
1284};
1285
1286static int sched_class_init(struct kwork_class *class,
1287			    struct perf_session *session)
1288{
1289	if (perf_session__set_tracepoints_handlers(session,
1290						   sched_tp_handlers)) {
1291		pr_err("Failed to set sched tracepoints handlers\n");
1292		return -1;
1293	}
1294
1295	class->work_root = RB_ROOT_CACHED;
1296	return 0;
1297}
1298
1299static void sched_work_init(struct perf_kwork *kwork __maybe_unused,
1300			    struct kwork_class *class,
1301			    struct kwork_work *work,
1302			    enum kwork_trace_type src_type,
1303			    struct evsel *evsel,
1304			    struct perf_sample *sample,
1305			    struct machine *machine __maybe_unused)
1306{
1307	work->class = class;
1308	work->cpu = sample->cpu;
1309
1310	if (src_type == KWORK_TRACE_EXIT) {
1311		work->id = evsel__intval(evsel, sample, "prev_pid");
1312		work->name = strdup(evsel__strval(evsel, sample, "prev_comm"));
1313	} else if (src_type == KWORK_TRACE_ENTRY) {
1314		work->id = evsel__intval(evsel, sample, "next_pid");
1315		work->name = strdup(evsel__strval(evsel, sample, "next_comm"));
1316	}
1317}
1318
1319static void sched_work_name(struct kwork_work *work, char *buf, int len)
1320{
1321	snprintf(buf, len, "%s", work->name);
1322}
1323
1324static struct kwork_class kwork_sched = {
1325	.name		= "sched",
1326	.type		= KWORK_CLASS_SCHED,
1327	.nr_tracepoints	= ARRAY_SIZE(sched_tp_handlers),
1328	.tp_handlers	= sched_tp_handlers,
1329	.class_init	= sched_class_init,
1330	.work_init	= sched_work_init,
1331	.work_name	= sched_work_name,
1332};
1333
1334static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
1335	[KWORK_CLASS_IRQ]       = &kwork_irq,
1336	[KWORK_CLASS_SOFTIRQ]   = &kwork_softirq,
1337	[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
1338	[KWORK_CLASS_SCHED]     = &kwork_sched,
1339};
1340
1341static void print_separator(int len)
1342{
1343	printf(" %.*s\n", len, graph_dotted_line);
1344}
1345
1346static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
1347{
1348	int ret = 0;
1349	char kwork_name[PRINT_KWORK_NAME_WIDTH];
1350	char max_runtime_start[32], max_runtime_end[32];
1351	char max_latency_start[32], max_latency_end[32];
1352
1353	printf(" ");
1354
1355	/*
1356	 * kwork name
1357	 */
1358	if (work->class && work->class->work_name) {
1359		work->class->work_name(work, kwork_name,
1360				       PRINT_KWORK_NAME_WIDTH);
1361		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
1362	} else {
1363		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
1364	}
1365
1366	/*
1367	 * cpu
1368	 */
1369	ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
1370
1371	/*
1372	 * total runtime
1373	 */
1374	if (kwork->report == KWORK_REPORT_RUNTIME) {
1375		ret += printf(" %*.*f ms |",
1376			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1377			      (double)work->total_runtime / NSEC_PER_MSEC);
1378	} else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
1379		ret += printf(" %*.*f ms |",
1380			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1381			      (double)work->total_latency /
1382			      work->nr_atoms / NSEC_PER_MSEC);
1383	}
1384
1385	/*
1386	 * count
1387	 */
1388	ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
1389
1390	/*
1391	 * max runtime, max runtime start, max runtime end
1392	 */
1393	if (kwork->report == KWORK_REPORT_RUNTIME) {
1394		timestamp__scnprintf_usec(work->max_runtime_start,
1395					  max_runtime_start,
1396					  sizeof(max_runtime_start));
1397		timestamp__scnprintf_usec(work->max_runtime_end,
1398					  max_runtime_end,
1399					  sizeof(max_runtime_end));
1400		ret += printf(" %*.*f ms | %*s s | %*s s |",
1401			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1402			      (double)work->max_runtime / NSEC_PER_MSEC,
1403			      PRINT_TIMESTAMP_WIDTH, max_runtime_start,
1404			      PRINT_TIMESTAMP_WIDTH, max_runtime_end);
1405	}
1406	/*
1407	 * max delay, max delay start, max delay end
1408	 */
1409	else if (kwork->report == KWORK_REPORT_LATENCY) {
1410		timestamp__scnprintf_usec(work->max_latency_start,
1411					  max_latency_start,
1412					  sizeof(max_latency_start));
1413		timestamp__scnprintf_usec(work->max_latency_end,
1414					  max_latency_end,
1415					  sizeof(max_latency_end));
1416		ret += printf(" %*.*f ms | %*s s | %*s s |",
1417			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1418			      (double)work->max_latency / NSEC_PER_MSEC,
1419			      PRINT_TIMESTAMP_WIDTH, max_latency_start,
1420			      PRINT_TIMESTAMP_WIDTH, max_latency_end);
1421	}
1422
1423	printf("\n");
1424	return ret;
1425}
1426
1427static int report_print_header(struct perf_kwork *kwork)
1428{
1429	int ret;
1430
1431	printf("\n ");
1432	ret = printf(" %-*s | %-*s |",
1433		     PRINT_KWORK_NAME_WIDTH, "Kwork Name",
1434		     PRINT_CPU_WIDTH, "Cpu");
1435
1436	if (kwork->report == KWORK_REPORT_RUNTIME) {
1437		ret += printf(" %-*s |",
1438			      PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
1439	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1440		ret += printf(" %-*s |",
1441			      PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
1442	}
1443
1444	ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
1445
1446	if (kwork->report == KWORK_REPORT_RUNTIME) {
1447		ret += printf(" %-*s | %-*s | %-*s |",
1448			      PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
1449			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
1450			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
1451	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1452		ret += printf(" %-*s | %-*s | %-*s |",
1453			      PRINT_LATENCY_HEADER_WIDTH, "Max delay",
1454			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
1455			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
1456	}
1457
1458	printf("\n");
1459	print_separator(ret);
1460	return ret;
1461}
1462
1463static void timehist_print_header(void)
1464{
1465	/*
1466	 * header row
1467	 */
1468	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1469	       PRINT_TIMESTAMP_WIDTH, "Runtime start",
1470	       PRINT_TIMESTAMP_WIDTH, "Runtime end",
1471	       PRINT_TIMEHIST_CPU_WIDTH, "Cpu",
1472	       PRINT_KWORK_NAME_WIDTH, "Kwork name",
1473	       PRINT_RUNTIME_WIDTH, "Runtime",
1474	       PRINT_RUNTIME_WIDTH, "Delaytime");
1475
1476	/*
1477	 * units row
1478	 */
1479	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1480	       PRINT_TIMESTAMP_WIDTH, "",
1481	       PRINT_TIMESTAMP_WIDTH, "",
1482	       PRINT_TIMEHIST_CPU_WIDTH, "",
1483	       PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM",
1484	       PRINT_RUNTIME_WIDTH, "(msec)",
1485	       PRINT_RUNTIME_WIDTH, "(msec)");
1486
1487	/*
1488	 * separator
1489	 */
1490	printf(" %.*s  %.*s  %.*s  %.*s  %.*s  %.*s\n",
1491	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1492	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1493	       PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line,
1494	       PRINT_KWORK_NAME_WIDTH, graph_dotted_line,
1495	       PRINT_RUNTIME_WIDTH, graph_dotted_line,
1496	       PRINT_RUNTIME_WIDTH, graph_dotted_line);
1497}
1498
1499static void print_summary(struct perf_kwork *kwork)
1500{
1501	u64 time = kwork->timeend - kwork->timestart;
1502
1503	printf("  Total count            : %9" PRIu64 "\n", kwork->all_count);
1504	printf("  Total runtime   (msec) : %9.3f (%.3f%% load average)\n",
1505	       (double)kwork->all_runtime / NSEC_PER_MSEC,
1506	       time == 0 ? 0 : (double)kwork->all_runtime / time);
1507	printf("  Total time span (msec) : %9.3f\n",
1508	       (double)time / NSEC_PER_MSEC);
1509}
1510
1511static unsigned long long nr_list_entry(struct list_head *head)
1512{
1513	struct list_head *pos;
1514	unsigned long long n = 0;
1515
1516	list_for_each(pos, head)
1517		n++;
1518
1519	return n;
1520}
1521
1522static void print_skipped_events(struct perf_kwork *kwork)
1523{
1524	int i;
1525	const char *const kwork_event_str[] = {
1526		[KWORK_TRACE_RAISE] = "raise",
1527		[KWORK_TRACE_ENTRY] = "entry",
1528		[KWORK_TRACE_EXIT]  = "exit",
1529	};
1530
1531	if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
1532	    (kwork->nr_events != 0)) {
1533		printf("  INFO: %.3f%% skipped events (%" PRIu64 " including ",
1534		       (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
1535		       (double)kwork->nr_events * 100.0,
1536		       kwork->nr_skipped_events[KWORK_TRACE_MAX]);
1537
1538		for (i = 0; i < KWORK_TRACE_MAX; i++) {
1539			printf("%" PRIu64 " %s%s",
1540			       kwork->nr_skipped_events[i],
1541			       kwork_event_str[i],
1542			       (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
1543		}
1544	}
1545
1546	if (verbose > 0)
1547		printf("  INFO: use %lld atom pages\n",
1548		       nr_list_entry(&kwork->atom_page_list));
1549}
1550
1551static void print_bad_events(struct perf_kwork *kwork)
1552{
1553	if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
1554		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1555		       (double)kwork->nr_lost_events /
1556		       (double)kwork->nr_events * 100.0,
1557		       kwork->nr_lost_events, kwork->nr_events,
1558		       kwork->nr_lost_chunks);
1559	}
1560}
1561
1562const char *graph_load = "||||||||||||||||||||||||||||||||||||||||||||||||";
1563const char *graph_idle = "                                                ";
1564static void top_print_per_cpu_load(struct perf_kwork *kwork)
1565{
1566	int i, load_width;
1567	u64 total, load, load_ratio;
1568	struct kwork_top_stat *stat = &kwork->top_stat;
1569
1570	for (i = 0; i < MAX_NR_CPUS; i++) {
1571		total = stat->cpus_runtime[i].total;
1572		load = stat->cpus_runtime[i].load;
1573		if (test_bit(i, stat->all_cpus_bitmap) && total) {
1574			load_ratio = load * 10000 / total;
1575			load_width = PRINT_CPU_USAGE_HIST_WIDTH *
1576				load_ratio / 10000;
1577
1578			printf("%%Cpu%-*d[%.*s%.*s %*.*f%%]\n",
1579			       PRINT_CPU_WIDTH, i,
1580			       load_width, graph_load,
1581			       PRINT_CPU_USAGE_HIST_WIDTH - load_width,
1582			       graph_idle,
1583			       PRINT_CPU_USAGE_WIDTH,
1584			       PRINT_CPU_USAGE_DECIMAL_WIDTH,
1585			       (double)load_ratio / 100);
1586		}
1587	}
1588}
1589
1590static void top_print_cpu_usage(struct perf_kwork *kwork)
1591{
1592	struct kwork_top_stat *stat = &kwork->top_stat;
1593	u64 idle_time = stat->cpus_runtime[MAX_NR_CPUS].idle;
1594	u64 hardirq_time = stat->cpus_runtime[MAX_NR_CPUS].irq;
1595	u64 softirq_time = stat->cpus_runtime[MAX_NR_CPUS].softirq;
1596	int cpus_nr = bitmap_weight(stat->all_cpus_bitmap, MAX_NR_CPUS);
1597	u64 cpus_total_time = stat->cpus_runtime[MAX_NR_CPUS].total;
1598
1599	printf("Total  : %*.*f ms, %d cpus\n",
1600	       PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1601	       (double)cpus_total_time / NSEC_PER_MSEC,
1602	       cpus_nr);
1603
1604	printf("%%Cpu(s): %*.*f%% id, %*.*f%% hi, %*.*f%% si\n",
1605	       PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
1606	       cpus_total_time ? (double)idle_time * 100 / cpus_total_time : 0,
1607
1608	       PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
1609	       cpus_total_time ? (double)hardirq_time * 100 / cpus_total_time : 0,
1610
1611	       PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
1612	       cpus_total_time ? (double)softirq_time * 100 / cpus_total_time : 0);
1613
1614	top_print_per_cpu_load(kwork);
1615}
1616
1617static void top_print_header(struct perf_kwork *kwork __maybe_unused)
1618{
1619	int ret;
1620
1621	printf("\n ");
1622	ret = printf(" %*s %s%*s%s %*s  %*s  %-*s",
1623		     PRINT_PID_WIDTH, "PID",
1624
1625		     kwork->use_bpf ? " " : "",
1626		     kwork->use_bpf ? PRINT_PID_WIDTH : 0,
1627		     kwork->use_bpf ? "SPID" : "",
1628		     kwork->use_bpf ? " " : "",
1629
1630		     PRINT_CPU_USAGE_WIDTH, "%CPU",
1631		     PRINT_RUNTIME_HEADER_WIDTH + RPINT_DECIMAL_WIDTH, "RUNTIME",
1632		     PRINT_TASK_NAME_WIDTH, "COMMAND");
1633	printf("\n ");
1634	print_separator(ret);
1635}
1636
1637static int top_print_work(struct perf_kwork *kwork __maybe_unused, struct kwork_work *work)
1638{
1639	int ret = 0;
1640
1641	printf(" ");
1642
1643	/*
1644	 * pid
1645	 */
1646	ret += printf(" %*" PRIu64 " ", PRINT_PID_WIDTH, work->id);
1647
1648	/*
1649	 * tgid
1650	 */
1651	if (kwork->use_bpf)
1652		ret += printf(" %*d ", PRINT_PID_WIDTH, work->tgid);
1653
1654	/*
1655	 * cpu usage
1656	 */
1657	ret += printf(" %*.*f ",
1658		      PRINT_CPU_USAGE_WIDTH, PRINT_CPU_USAGE_DECIMAL_WIDTH,
1659		      (double)work->cpu_usage / 100);
1660
1661	/*
1662	 * total runtime
1663	 */
1664	ret += printf(" %*.*f ms ",
1665		      PRINT_RUNTIME_WIDTH + RPINT_DECIMAL_WIDTH, RPINT_DECIMAL_WIDTH,
1666		      (double)work->total_runtime / NSEC_PER_MSEC);
1667
1668	/*
1669	 * command
1670	 */
1671	if (kwork->use_bpf)
1672		ret += printf(" %s%s%s",
1673			      work->is_kthread ? "[" : "",
1674			      work->name,
1675			      work->is_kthread ? "]" : "");
1676	else
1677		ret += printf(" %-*s", PRINT_TASK_NAME_WIDTH, work->name);
1678
1679	printf("\n");
1680	return ret;
1681}
1682
1683static void work_sort(struct perf_kwork *kwork,
1684		      struct kwork_class *class, struct rb_root_cached *root)
1685{
1686	struct rb_node *node;
1687	struct kwork_work *data;
 
1688
1689	pr_debug("Sorting %s ...\n", class->name);
1690	for (;;) {
1691		node = rb_first_cached(root);
1692		if (!node)
1693			break;
1694
1695		rb_erase_cached(node, root);
1696		data = rb_entry(node, struct kwork_work, node);
1697		work_insert(&kwork->sorted_work_root,
1698			       data, &kwork->sort_list);
1699	}
1700}
1701
1702static void perf_kwork__sort(struct perf_kwork *kwork)
1703{
1704	struct kwork_class *class;
1705
1706	list_for_each_entry(class, &kwork->class_list, list)
1707		work_sort(kwork, class, &class->work_root);
1708}
1709
1710static int perf_kwork__check_config(struct perf_kwork *kwork,
1711				    struct perf_session *session)
1712{
1713	int ret;
1714	struct evsel *evsel;
1715	struct kwork_class *class;
1716
1717	static struct trace_kwork_handler report_ops = {
1718		.entry_event = report_entry_event,
1719		.exit_event  = report_exit_event,
1720	};
1721	static struct trace_kwork_handler latency_ops = {
1722		.raise_event = latency_raise_event,
1723		.entry_event = latency_entry_event,
1724	};
1725	static struct trace_kwork_handler timehist_ops = {
1726		.raise_event = timehist_raise_event,
1727		.entry_event = timehist_entry_event,
1728		.exit_event  = timehist_exit_event,
1729	};
1730	static struct trace_kwork_handler top_ops = {
1731		.entry_event        = timehist_entry_event,
1732		.exit_event         = top_exit_event,
1733		.sched_switch_event = top_sched_switch_event,
1734	};
1735
1736	switch (kwork->report) {
1737	case KWORK_REPORT_RUNTIME:
1738		kwork->tp_handler = &report_ops;
1739		break;
1740	case KWORK_REPORT_LATENCY:
1741		kwork->tp_handler = &latency_ops;
1742		break;
1743	case KWORK_REPORT_TIMEHIST:
1744		kwork->tp_handler = &timehist_ops;
1745		break;
1746	case KWORK_REPORT_TOP:
1747		kwork->tp_handler = &top_ops;
1748		break;
1749	default:
1750		pr_debug("Invalid report type %d\n", kwork->report);
1751		return -1;
1752	}
1753
1754	list_for_each_entry(class, &kwork->class_list, list)
1755		if ((class->class_init != NULL) &&
1756		    (class->class_init(class, session) != 0))
1757			return -1;
1758
1759	if (kwork->cpu_list != NULL) {
1760		ret = perf_session__cpu_bitmap(session,
1761					       kwork->cpu_list,
1762					       kwork->cpu_bitmap);
1763		if (ret < 0) {
1764			pr_err("Invalid cpu bitmap\n");
1765			return -1;
1766		}
1767	}
1768
1769	if (kwork->time_str != NULL) {
1770		ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
1771		if (ret != 0) {
1772			pr_err("Invalid time span\n");
1773			return -1;
1774		}
1775	}
1776
1777	list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
1778		if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
1779			pr_debug("Samples do not have callchains\n");
1780			kwork->show_callchain = 0;
1781			symbol_conf.use_callchain = 0;
1782		}
1783	}
1784
1785	return 0;
1786}
1787
1788static int perf_kwork__read_events(struct perf_kwork *kwork)
1789{
1790	int ret = -1;
1791	struct perf_session *session = NULL;
1792
1793	struct perf_data data = {
1794		.path  = input_name,
1795		.mode  = PERF_DATA_MODE_READ,
1796		.force = kwork->force,
1797	};
1798
1799	session = perf_session__new(&data, &kwork->tool);
1800	if (IS_ERR(session)) {
1801		pr_debug("Error creating perf session\n");
1802		return PTR_ERR(session);
1803	}
1804
1805	symbol__init(&session->header.env);
1806
1807	if (perf_kwork__check_config(kwork, session) != 0)
1808		goto out_delete;
1809
1810	if (session->tevent.pevent &&
1811	    tep_set_function_resolver(session->tevent.pevent,
1812				      machine__resolve_kernel_addr,
1813				      &session->machines.host) < 0) {
1814		pr_err("Failed to set libtraceevent function resolver\n");
1815		goto out_delete;
1816	}
1817
1818	if (kwork->report == KWORK_REPORT_TIMEHIST)
1819		timehist_print_header();
1820
1821	ret = perf_session__process_events(session);
1822	if (ret) {
1823		pr_debug("Failed to process events, error %d\n", ret);
1824		goto out_delete;
1825	}
1826
1827	kwork->nr_events      = session->evlist->stats.nr_events[0];
1828	kwork->nr_lost_events = session->evlist->stats.total_lost;
1829	kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1830
1831out_delete:
1832	perf_session__delete(session);
1833	return ret;
1834}
1835
1836static void process_skipped_events(struct perf_kwork *kwork,
1837				   struct kwork_work *work)
1838{
1839	int i;
1840	unsigned long long count;
1841
1842	for (i = 0; i < KWORK_TRACE_MAX; i++) {
1843		count = nr_list_entry(&work->atom_list[i]);
1844		kwork->nr_skipped_events[i] += count;
1845		kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
1846	}
1847}
1848
1849struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
1850				       struct kwork_class *class,
1851				       struct kwork_work *key)
1852{
1853	struct kwork_work *work = NULL;
1854
1855	work = work_new(key);
1856	if (work == NULL)
1857		return NULL;
1858
1859	work_insert(&class->work_root, work, &kwork->cmp_id);
1860	return work;
1861}
1862
1863static void sig_handler(int sig)
1864{
1865	/*
1866	 * Simply capture termination signal so that
1867	 * the program can continue after pause returns
1868	 */
1869	pr_debug("Capture signal %d\n", sig);
1870}
1871
1872static int perf_kwork__report_bpf(struct perf_kwork *kwork)
1873{
1874	int ret;
1875
1876	signal(SIGINT, sig_handler);
1877	signal(SIGTERM, sig_handler);
1878
1879	ret = perf_kwork__trace_prepare_bpf(kwork);
1880	if (ret)
1881		return -1;
1882
1883	printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
1884
1885	perf_kwork__trace_start();
1886
1887	/*
1888	 * a simple pause, wait here for stop signal
1889	 */
1890	pause();
1891
1892	perf_kwork__trace_finish();
1893
1894	perf_kwork__report_read_bpf(kwork);
1895
1896	perf_kwork__report_cleanup_bpf();
1897
1898	return 0;
1899}
1900
1901static int perf_kwork__report(struct perf_kwork *kwork)
1902{
1903	int ret;
1904	struct rb_node *next;
1905	struct kwork_work *work;
1906
1907	if (kwork->use_bpf)
1908		ret = perf_kwork__report_bpf(kwork);
1909	else
1910		ret = perf_kwork__read_events(kwork);
1911
1912	if (ret != 0)
1913		return -1;
1914
1915	perf_kwork__sort(kwork);
1916
1917	setup_pager();
1918
1919	ret = report_print_header(kwork);
1920	next = rb_first_cached(&kwork->sorted_work_root);
1921	while (next) {
1922		work = rb_entry(next, struct kwork_work, node);
1923		process_skipped_events(kwork, work);
1924
1925		if (work->nr_atoms != 0) {
1926			report_print_work(kwork, work);
1927			if (kwork->summary) {
1928				kwork->all_runtime += work->total_runtime;
1929				kwork->all_count += work->nr_atoms;
1930			}
1931		}
1932		next = rb_next(next);
1933	}
1934	print_separator(ret);
1935
1936	if (kwork->summary) {
1937		print_summary(kwork);
1938		print_separator(ret);
1939	}
1940
1941	print_bad_events(kwork);
1942	print_skipped_events(kwork);
1943	printf("\n");
1944
1945	return 0;
1946}
1947
1948typedef int (*tracepoint_handler)(const struct perf_tool *tool,
1949				  struct evsel *evsel,
1950				  struct perf_sample *sample,
1951				  struct machine *machine);
1952
1953static int perf_kwork__process_tracepoint_sample(const struct perf_tool *tool,
1954						 union perf_event *event __maybe_unused,
1955						 struct perf_sample *sample,
1956						 struct evsel *evsel,
1957						 struct machine *machine)
1958{
1959	int err = 0;
1960
1961	if (evsel->handler != NULL) {
1962		tracepoint_handler f = evsel->handler;
1963
1964		err = f(tool, evsel, sample, machine);
1965	}
1966
1967	return err;
1968}
1969
1970static int perf_kwork__timehist(struct perf_kwork *kwork)
1971{
1972	/*
1973	 * event handlers for timehist option
1974	 */
1975	kwork->tool.comm	 = perf_event__process_comm;
1976	kwork->tool.exit	 = perf_event__process_exit;
1977	kwork->tool.fork	 = perf_event__process_fork;
1978	kwork->tool.attr	 = perf_event__process_attr;
1979	kwork->tool.tracing_data = perf_event__process_tracing_data;
1980	kwork->tool.build_id	 = perf_event__process_build_id;
1981	kwork->tool.ordered_events = true;
1982	kwork->tool.ordering_requires_timestamps = true;
1983	symbol_conf.use_callchain = kwork->show_callchain;
1984
1985	if (symbol__validate_sym_arguments()) {
1986		pr_err("Failed to validate sym arguments\n");
1987		return -1;
1988	}
1989
1990	setup_pager();
1991
1992	return perf_kwork__read_events(kwork);
1993}
1994
1995static void top_calc_total_runtime(struct perf_kwork *kwork)
1996{
1997	struct kwork_class *class;
1998	struct kwork_work *work;
1999	struct rb_node *next;
2000	struct kwork_top_stat *stat = &kwork->top_stat;
2001
2002	class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
2003	if (!class)
2004		return;
2005
2006	next = rb_first_cached(&class->work_root);
2007	while (next) {
2008		work = rb_entry(next, struct kwork_work, node);
2009		BUG_ON(work->cpu >= MAX_NR_CPUS);
2010		stat->cpus_runtime[work->cpu].total += work->total_runtime;
2011		stat->cpus_runtime[MAX_NR_CPUS].total += work->total_runtime;
2012		next = rb_next(next);
2013	}
2014}
2015
2016static void top_calc_idle_time(struct perf_kwork *kwork,
2017				struct kwork_work *work)
2018{
2019	struct kwork_top_stat *stat = &kwork->top_stat;
2020
2021	if (work->id == 0) {
2022		stat->cpus_runtime[work->cpu].idle += work->total_runtime;
2023		stat->cpus_runtime[MAX_NR_CPUS].idle += work->total_runtime;
2024	}
2025}
2026
2027static void top_calc_irq_runtime(struct perf_kwork *kwork,
2028				 enum kwork_class_type type,
2029				 struct kwork_work *work)
2030{
2031	struct kwork_top_stat *stat = &kwork->top_stat;
2032
2033	if (type == KWORK_CLASS_IRQ) {
2034		stat->cpus_runtime[work->cpu].irq += work->total_runtime;
2035		stat->cpus_runtime[MAX_NR_CPUS].irq += work->total_runtime;
2036	} else if (type == KWORK_CLASS_SOFTIRQ) {
2037		stat->cpus_runtime[work->cpu].softirq += work->total_runtime;
2038		stat->cpus_runtime[MAX_NR_CPUS].softirq += work->total_runtime;
2039	}
2040}
2041
2042static void top_subtract_irq_runtime(struct perf_kwork *kwork,
2043				     struct kwork_work *work)
2044{
2045	struct kwork_class *class;
2046	struct kwork_work *data;
2047	unsigned int i;
2048	int irq_class_list[] = {KWORK_CLASS_IRQ, KWORK_CLASS_SOFTIRQ};
2049
2050	for (i = 0; i < ARRAY_SIZE(irq_class_list); i++) {
2051		class = get_kwork_class(kwork, irq_class_list[i]);
2052		if (!class)
2053			continue;
2054
2055		data = find_work_by_id(&class->work_root,
2056				       work->id, work->cpu);
2057		if (!data)
2058			continue;
2059
2060		if (work->total_runtime > data->total_runtime) {
2061			work->total_runtime -= data->total_runtime;
2062			top_calc_irq_runtime(kwork, irq_class_list[i], data);
2063		}
2064	}
2065}
2066
2067static void top_calc_cpu_usage(struct perf_kwork *kwork)
2068{
2069	struct kwork_class *class;
2070	struct kwork_work *work;
2071	struct rb_node *next;
2072	struct kwork_top_stat *stat = &kwork->top_stat;
2073
2074	class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
2075	if (!class)
2076		return;
2077
2078	next = rb_first_cached(&class->work_root);
2079	while (next) {
2080		work = rb_entry(next, struct kwork_work, node);
2081
2082		if (work->total_runtime == 0)
2083			goto next;
2084
2085		__set_bit(work->cpu, stat->all_cpus_bitmap);
2086
2087		top_subtract_irq_runtime(kwork, work);
2088
2089		work->cpu_usage = work->total_runtime * 10000 /
2090			stat->cpus_runtime[work->cpu].total;
2091
2092		top_calc_idle_time(kwork, work);
2093next:
2094		next = rb_next(next);
2095	}
2096}
2097
2098static void top_calc_load_runtime(struct perf_kwork *kwork,
2099				  struct kwork_work *work)
2100{
2101	struct kwork_top_stat *stat = &kwork->top_stat;
2102
2103	if (work->id != 0) {
2104		stat->cpus_runtime[work->cpu].load += work->total_runtime;
2105		stat->cpus_runtime[MAX_NR_CPUS].load += work->total_runtime;
2106	}
2107}
2108
2109static void top_merge_tasks(struct perf_kwork *kwork)
2110{
2111	struct kwork_work *merged_work, *data;
2112	struct kwork_class *class;
2113	struct rb_node *node;
2114	int cpu;
2115	struct rb_root_cached merged_root = RB_ROOT_CACHED;
2116
2117	class = get_kwork_class(kwork, KWORK_CLASS_SCHED);
2118	if (!class)
2119		return;
2120
2121	for (;;) {
2122		node = rb_first_cached(&class->work_root);
2123		if (!node)
2124			break;
2125
2126		rb_erase_cached(node, &class->work_root);
2127		data = rb_entry(node, struct kwork_work, node);
2128
2129		if (!profile_name_match(kwork, data))
2130			continue;
2131
2132		cpu = data->cpu;
2133		merged_work = find_work_by_id(&merged_root, data->id,
2134					      data->id == 0 ? cpu : -1);
2135		if (!merged_work) {
2136			work_insert(&merged_root, data, &kwork->cmp_id);
2137		} else {
2138			merged_work->total_runtime += data->total_runtime;
2139			merged_work->cpu_usage += data->cpu_usage;
2140		}
2141
2142		top_calc_load_runtime(kwork, data);
2143	}
2144
2145	work_sort(kwork, class, &merged_root);
2146}
2147
2148static void perf_kwork__top_report(struct perf_kwork *kwork)
2149{
2150	struct kwork_work *work;
2151	struct rb_node *next;
2152
2153	printf("\n");
2154
2155	top_print_cpu_usage(kwork);
2156	top_print_header(kwork);
2157	next = rb_first_cached(&kwork->sorted_work_root);
2158	while (next) {
2159		work = rb_entry(next, struct kwork_work, node);
2160		process_skipped_events(kwork, work);
2161
2162		if (work->total_runtime == 0)
2163			goto next;
2164
2165		top_print_work(kwork, work);
2166
2167next:
2168		next = rb_next(next);
2169	}
2170
2171	printf("\n");
2172}
2173
2174static int perf_kwork__top_bpf(struct perf_kwork *kwork)
2175{
2176	int ret;
2177
2178	signal(SIGINT, sig_handler);
2179	signal(SIGTERM, sig_handler);
2180
2181	ret = perf_kwork__top_prepare_bpf(kwork);
2182	if (ret)
2183		return -1;
2184
2185	printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
2186
2187	perf_kwork__top_start();
2188
2189	/*
2190	 * a simple pause, wait here for stop signal
2191	 */
2192	pause();
2193
2194	perf_kwork__top_finish();
2195
2196	perf_kwork__top_read_bpf(kwork);
2197
2198	perf_kwork__top_cleanup_bpf();
2199
2200	return 0;
2201
2202}
2203
2204static int perf_kwork__top(struct perf_kwork *kwork)
2205{
2206	struct __top_cpus_runtime *cpus_runtime;
2207	int ret = 0;
2208
2209	cpus_runtime = zalloc(sizeof(struct __top_cpus_runtime) * (MAX_NR_CPUS + 1));
2210	if (!cpus_runtime)
2211		return -1;
2212
2213	kwork->top_stat.cpus_runtime = cpus_runtime;
2214	bitmap_zero(kwork->top_stat.all_cpus_bitmap, MAX_NR_CPUS);
2215
2216	if (kwork->use_bpf)
2217		ret = perf_kwork__top_bpf(kwork);
2218	else
2219		ret = perf_kwork__read_events(kwork);
2220
2221	if (ret)
2222		goto out;
2223
2224	top_calc_total_runtime(kwork);
2225	top_calc_cpu_usage(kwork);
2226	top_merge_tasks(kwork);
2227
2228	setup_pager();
2229
2230	perf_kwork__top_report(kwork);
2231
2232out:
2233	zfree(&kwork->top_stat.cpus_runtime);
2234	return ret;
2235}
2236
2237static void setup_event_list(struct perf_kwork *kwork,
2238			     const struct option *options,
2239			     const char * const usage_msg[])
2240{
2241	int i;
2242	struct kwork_class *class;
2243	char *tmp, *tok, *str;
2244
2245	/*
2246	 * set default events list if not specified
2247	 */
2248	if (kwork->event_list_str == NULL)
2249		kwork->event_list_str = "irq, softirq, workqueue";
2250
2251	str = strdup(kwork->event_list_str);
2252	for (tok = strtok_r(str, ", ", &tmp);
2253	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
2254		for (i = 0; i < KWORK_CLASS_MAX; i++) {
2255			class = kwork_class_supported_list[i];
2256			if (strcmp(tok, class->name) == 0) {
2257				list_add_tail(&class->list, &kwork->class_list);
2258				break;
2259			}
2260		}
2261		if (i == KWORK_CLASS_MAX) {
2262			usage_with_options_msg(usage_msg, options,
2263					       "Unknown --event key: `%s'", tok);
2264		}
2265	}
2266	free(str);
2267
 
 
 
 
 
 
 
 
 
 
 
2268	pr_debug("Config event list:");
2269	list_for_each_entry(class, &kwork->class_list, list)
2270		pr_debug(" %s", class->name);
2271	pr_debug("\n");
2272}
2273
2274static int perf_kwork__record(struct perf_kwork *kwork,
2275			      int argc, const char **argv)
2276{
2277	const char **rec_argv;
2278	unsigned int rec_argc, i, j;
2279	struct kwork_class *class;
2280
2281	const char *const record_args[] = {
2282		"record",
2283		"-a",
2284		"-R",
2285		"-m", "1024",
2286		"-c", "1",
2287	};
2288
2289	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
2290
2291	list_for_each_entry(class, &kwork->class_list, list)
2292		rec_argc += 2 * class->nr_tracepoints;
2293
2294	rec_argv = calloc(rec_argc + 1, sizeof(char *));
2295	if (rec_argv == NULL)
2296		return -ENOMEM;
2297
2298	for (i = 0; i < ARRAY_SIZE(record_args); i++)
2299		rec_argv[i] = strdup(record_args[i]);
2300
2301	list_for_each_entry(class, &kwork->class_list, list) {
2302		for (j = 0; j < class->nr_tracepoints; j++) {
2303			rec_argv[i++] = strdup("-e");
2304			rec_argv[i++] = strdup(class->tp_handlers[j].name);
2305		}
2306	}
2307
2308	for (j = 1; j < (unsigned int)argc; j++, i++)
2309		rec_argv[i] = argv[j];
2310
2311	BUG_ON(i != rec_argc);
2312
2313	pr_debug("record comm: ");
2314	for (j = 0; j < rec_argc; j++)
2315		pr_debug("%s ", rec_argv[j]);
2316	pr_debug("\n");
2317
2318	return cmd_record(i, rec_argv);
2319}
2320
2321int cmd_kwork(int argc, const char **argv)
2322{
2323	static struct perf_kwork kwork = {
2324		.class_list          = LIST_HEAD_INIT(kwork.class_list),
 
 
 
 
 
2325		.atom_page_list      = LIST_HEAD_INIT(kwork.atom_page_list),
2326		.sort_list           = LIST_HEAD_INIT(kwork.sort_list),
2327		.cmp_id              = LIST_HEAD_INIT(kwork.cmp_id),
2328		.sorted_work_root    = RB_ROOT_CACHED,
2329		.tp_handler          = NULL,
2330		.profile_name        = NULL,
2331		.cpu_list            = NULL,
2332		.time_str            = NULL,
2333		.force               = false,
2334		.event_list_str      = NULL,
2335		.summary             = false,
2336		.sort_order          = NULL,
2337		.show_callchain      = false,
2338		.max_stack           = 5,
2339		.timestart           = 0,
2340		.timeend             = 0,
2341		.nr_events           = 0,
2342		.nr_lost_chunks      = 0,
2343		.nr_lost_events      = 0,
2344		.all_runtime         = 0,
2345		.all_count           = 0,
2346		.nr_skipped_events   = { 0 },
2347	};
2348	static const char default_report_sort_order[] = "runtime, max, count";
2349	static const char default_latency_sort_order[] = "avg, max, count";
2350	static const char default_top_sort_order[] = "rate, runtime";
2351	const struct option kwork_options[] = {
2352	OPT_INCR('v', "verbose", &verbose,
2353		 "be more verbose (show symbol address, etc)"),
2354	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
2355		    "dump raw trace in ASCII"),
2356	OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
2357		   "list of kwork to profile (irq, softirq, workqueue, sched, etc)"),
2358	OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
2359	OPT_END()
2360	};
2361	const struct option report_options[] = {
2362	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
2363		   "sort by key(s): runtime, max, count"),
2364	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
2365		   "list of cpus to profile"),
2366	OPT_STRING('n', "name", &kwork.profile_name, "name",
2367		   "event name to profile"),
2368	OPT_STRING(0, "time", &kwork.time_str, "str",
2369		   "Time span for analysis (start,stop)"),
2370	OPT_STRING('i', "input", &input_name, "file",
2371		   "input file name"),
2372	OPT_BOOLEAN('S', "with-summary", &kwork.summary,
2373		    "Show summary with statistics"),
2374#ifdef HAVE_BPF_SKEL
2375	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
2376		    "Use BPF to measure kwork runtime"),
2377#endif
2378	OPT_PARENT(kwork_options)
2379	};
2380	const struct option latency_options[] = {
2381	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
2382		   "sort by key(s): avg, max, count"),
2383	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
2384		   "list of cpus to profile"),
2385	OPT_STRING('n', "name", &kwork.profile_name, "name",
2386		   "event name to profile"),
2387	OPT_STRING(0, "time", &kwork.time_str, "str",
2388		   "Time span for analysis (start,stop)"),
2389	OPT_STRING('i', "input", &input_name, "file",
2390		   "input file name"),
2391#ifdef HAVE_BPF_SKEL
2392	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
2393		    "Use BPF to measure kwork latency"),
2394#endif
2395	OPT_PARENT(kwork_options)
2396	};
2397	const struct option timehist_options[] = {
2398	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
2399		   "file", "vmlinux pathname"),
2400	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
2401		   "file", "kallsyms pathname"),
2402	OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
2403		    "Display call chains if present"),
2404	OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
2405		   "Maximum number of functions to display backtrace."),
2406	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
2407		    "Look for files with symbols relative to this directory"),
2408	OPT_STRING(0, "time", &kwork.time_str, "str",
2409		   "Time span for analysis (start,stop)"),
2410	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
2411		   "list of cpus to profile"),
2412	OPT_STRING('n', "name", &kwork.profile_name, "name",
2413		   "event name to profile"),
2414	OPT_STRING('i', "input", &input_name, "file",
2415		   "input file name"),
2416	OPT_PARENT(kwork_options)
2417	};
2418	const struct option top_options[] = {
2419	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
2420		   "sort by key(s): rate, runtime, tid"),
2421	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
2422		   "list of cpus to profile"),
2423	OPT_STRING('n', "name", &kwork.profile_name, "name",
2424		   "event name to profile"),
2425	OPT_STRING(0, "time", &kwork.time_str, "str",
2426		   "Time span for analysis (start,stop)"),
2427	OPT_STRING('i', "input", &input_name, "file",
2428		   "input file name"),
2429#ifdef HAVE_BPF_SKEL
2430	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
2431		    "Use BPF to measure task cpu usage"),
2432#endif
2433	OPT_PARENT(kwork_options)
2434	};
2435	const char *kwork_usage[] = {
2436		NULL,
2437		NULL
2438	};
2439	const char * const report_usage[] = {
2440		"perf kwork report [<options>]",
2441		NULL
2442	};
2443	const char * const latency_usage[] = {
2444		"perf kwork latency [<options>]",
2445		NULL
2446	};
2447	const char * const timehist_usage[] = {
2448		"perf kwork timehist [<options>]",
2449		NULL
2450	};
2451	const char * const top_usage[] = {
2452		"perf kwork top [<options>]",
2453		NULL
2454	};
2455	const char *const kwork_subcommands[] = {
2456		"record", "report", "latency", "timehist", "top", NULL
2457	};
2458
2459	perf_tool__init(&kwork.tool, /*ordered_events=*/true);
2460	kwork.tool.mmap	  = perf_event__process_mmap;
2461	kwork.tool.mmap2  = perf_event__process_mmap2;
2462	kwork.tool.sample = perf_kwork__process_tracepoint_sample;
2463
2464	argc = parse_options_subcommand(argc, argv, kwork_options,
2465					kwork_subcommands, kwork_usage,
2466					PARSE_OPT_STOP_AT_NON_OPTION);
2467	if (!argc)
2468		usage_with_options(kwork_usage, kwork_options);
2469
 
2470	sort_dimension__add(&kwork, "id", &kwork.cmp_id);
2471
2472	if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2473		setup_event_list(&kwork, kwork_options, kwork_usage);
2474		return perf_kwork__record(&kwork, argc, argv);
2475	} else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
2476		kwork.sort_order = default_report_sort_order;
2477		if (argc > 1) {
2478			argc = parse_options(argc, argv, report_options, report_usage, 0);
2479			if (argc)
2480				usage_with_options(report_usage, report_options);
2481		}
2482		kwork.report = KWORK_REPORT_RUNTIME;
2483		setup_sorting(&kwork, report_options, report_usage);
2484		setup_event_list(&kwork, kwork_options, kwork_usage);
2485		return perf_kwork__report(&kwork);
2486	} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
2487		kwork.sort_order = default_latency_sort_order;
2488		if (argc > 1) {
2489			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
2490			if (argc)
2491				usage_with_options(latency_usage, latency_options);
2492		}
2493		kwork.report = KWORK_REPORT_LATENCY;
2494		setup_sorting(&kwork, latency_options, latency_usage);
2495		setup_event_list(&kwork, kwork_options, kwork_usage);
2496		return perf_kwork__report(&kwork);
2497	} else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
2498		if (argc > 1) {
2499			argc = parse_options(argc, argv, timehist_options, timehist_usage, 0);
2500			if (argc)
2501				usage_with_options(timehist_usage, timehist_options);
2502		}
2503		kwork.report = KWORK_REPORT_TIMEHIST;
2504		setup_event_list(&kwork, kwork_options, kwork_usage);
2505		return perf_kwork__timehist(&kwork);
2506	} else if (strlen(argv[0]) > 2 && strstarts("top", argv[0])) {
2507		kwork.sort_order = default_top_sort_order;
2508		if (argc > 1) {
2509			argc = parse_options(argc, argv, top_options, top_usage, 0);
2510			if (argc)
2511				usage_with_options(top_usage, top_options);
2512		}
2513		kwork.report = KWORK_REPORT_TOP;
2514		if (!kwork.event_list_str)
2515			kwork.event_list_str = "sched, irq, softirq";
2516		setup_event_list(&kwork, kwork_options, kwork_usage);
2517		setup_sorting(&kwork, top_options, top_usage);
2518		return perf_kwork__top(&kwork);
2519	} else
2520		usage_with_options(kwork_usage, kwork_options);
2521
2522	/* free usage string allocated by parse_options_subcommand */
2523	free((void *)kwork_usage[0]);
2524
2525	return 0;
2526}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * builtin-kwork.c
   4 *
   5 * Copyright (c) 2022  Huawei Inc,  Yang Jihong <yangjihong1@huawei.com>
   6 */
   7
   8#include "builtin.h"
   9#include "perf.h"
  10
  11#include "util/data.h"
  12#include "util/evlist.h"
  13#include "util/evsel.h"
  14#include "util/header.h"
  15#include "util/kwork.h"
  16#include "util/debug.h"
  17#include "util/session.h"
  18#include "util/symbol.h"
  19#include "util/thread.h"
  20#include "util/string2.h"
  21#include "util/callchain.h"
  22#include "util/evsel_fprintf.h"
 
  23
  24#include <subcmd/pager.h>
  25#include <subcmd/parse-options.h>
  26#include <traceevent/event-parse.h>
  27
  28#include <errno.h>
  29#include <inttypes.h>
  30#include <signal.h>
  31#include <linux/err.h>
  32#include <linux/time64.h>
  33#include <linux/zalloc.h>
  34
  35/*
  36 * report header elements width
  37 */
  38#define PRINT_CPU_WIDTH 4
  39#define PRINT_COUNT_WIDTH 9
  40#define PRINT_RUNTIME_WIDTH 10
  41#define PRINT_LATENCY_WIDTH 10
  42#define PRINT_TIMESTAMP_WIDTH 17
  43#define PRINT_KWORK_NAME_WIDTH 30
  44#define RPINT_DECIMAL_WIDTH 3
  45#define PRINT_BRACKETPAIR_WIDTH 2
  46#define PRINT_TIME_UNIT_SEC_WIDTH 2
  47#define PRINT_TIME_UNIT_MESC_WIDTH 3
 
 
 
 
 
  48#define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
  49#define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
  50#define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
  51#define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
  52
  53struct sort_dimension {
  54	const char      *name;
  55	int             (*cmp)(struct kwork_work *l, struct kwork_work *r);
  56	struct          list_head list;
  57};
  58
  59static int id_cmp(struct kwork_work *l, struct kwork_work *r)
  60{
  61	if (l->cpu > r->cpu)
  62		return 1;
  63	if (l->cpu < r->cpu)
  64		return -1;
  65
  66	if (l->id > r->id)
  67		return 1;
  68	if (l->id < r->id)
  69		return -1;
  70
  71	return 0;
  72}
  73
  74static int count_cmp(struct kwork_work *l, struct kwork_work *r)
  75{
  76	if (l->nr_atoms > r->nr_atoms)
  77		return 1;
  78	if (l->nr_atoms < r->nr_atoms)
  79		return -1;
  80
  81	return 0;
  82}
  83
  84static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
  85{
  86	if (l->total_runtime > r->total_runtime)
  87		return 1;
  88	if (l->total_runtime < r->total_runtime)
  89		return -1;
  90
  91	return 0;
  92}
  93
  94static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
  95{
  96	if (l->max_runtime > r->max_runtime)
  97		return 1;
  98	if (l->max_runtime < r->max_runtime)
  99		return -1;
 100
 101	return 0;
 102}
 103
 104static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
 105{
 106	u64 avgl, avgr;
 107
 108	if (!r->nr_atoms)
 109		return 1;
 110	if (!l->nr_atoms)
 111		return -1;
 112
 113	avgl = l->total_latency / l->nr_atoms;
 114	avgr = r->total_latency / r->nr_atoms;
 115
 116	if (avgl > avgr)
 117		return 1;
 118	if (avgl < avgr)
 119		return -1;
 120
 121	return 0;
 122}
 123
 124static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
 125{
 126	if (l->max_latency > r->max_latency)
 127		return 1;
 128	if (l->max_latency < r->max_latency)
 129		return -1;
 130
 131	return 0;
 132}
 133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 134static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
 135			       const char *tok, struct list_head *list)
 136{
 137	size_t i;
 138	static struct sort_dimension max_sort_dimension = {
 139		.name = "max",
 140		.cmp  = max_runtime_cmp,
 141	};
 142	static struct sort_dimension id_sort_dimension = {
 143		.name = "id",
 144		.cmp  = id_cmp,
 145	};
 146	static struct sort_dimension runtime_sort_dimension = {
 147		.name = "runtime",
 148		.cmp  = runtime_cmp,
 149	};
 150	static struct sort_dimension count_sort_dimension = {
 151		.name = "count",
 152		.cmp  = count_cmp,
 153	};
 154	static struct sort_dimension avg_sort_dimension = {
 155		.name = "avg",
 156		.cmp  = avg_latency_cmp,
 157	};
 
 
 
 
 
 
 
 
 158	struct sort_dimension *available_sorts[] = {
 159		&id_sort_dimension,
 160		&max_sort_dimension,
 161		&count_sort_dimension,
 162		&runtime_sort_dimension,
 163		&avg_sort_dimension,
 
 
 164	};
 165
 166	if (kwork->report == KWORK_REPORT_LATENCY)
 167		max_sort_dimension.cmp = max_latency_cmp;
 168
 169	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
 170		if (!strcmp(available_sorts[i]->name, tok)) {
 171			list_add_tail(&available_sorts[i]->list, list);
 172			return 0;
 173		}
 174	}
 175
 176	return -1;
 177}
 178
 179static void setup_sorting(struct perf_kwork *kwork,
 180			  const struct option *options,
 181			  const char * const usage_msg[])
 182{
 183	char *tmp, *tok, *str = strdup(kwork->sort_order);
 184
 185	for (tok = strtok_r(str, ", ", &tmp);
 186	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
 187		if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
 188			usage_with_options_msg(usage_msg, options,
 189					       "Unknown --sort key: `%s'", tok);
 190	}
 191
 192	pr_debug("Sort order: %s\n", kwork->sort_order);
 193	free(str);
 194}
 195
 196static struct kwork_atom *atom_new(struct perf_kwork *kwork,
 197				   struct perf_sample *sample)
 198{
 199	unsigned long i;
 200	struct kwork_atom_page *page;
 201	struct kwork_atom *atom = NULL;
 202
 203	list_for_each_entry(page, &kwork->atom_page_list, list) {
 204		if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
 205			i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
 206			BUG_ON(i >= NR_ATOM_PER_PAGE);
 207			atom = &page->atoms[i];
 208			goto found_atom;
 209		}
 210	}
 211
 212	/*
 213	 * new page
 214	 */
 215	page = zalloc(sizeof(*page));
 216	if (page == NULL) {
 217		pr_err("Failed to zalloc kwork atom page\n");
 218		return NULL;
 219	}
 220
 221	i = 0;
 222	atom = &page->atoms[0];
 223	list_add_tail(&page->list, &kwork->atom_page_list);
 224
 225found_atom:
 226	__set_bit(i, page->bitmap);
 227	atom->time = sample->time;
 228	atom->prev = NULL;
 229	atom->page_addr = page;
 230	atom->bit_inpage = i;
 231	return atom;
 232}
 233
 234static void atom_free(struct kwork_atom *atom)
 235{
 236	if (atom->prev != NULL)
 237		atom_free(atom->prev);
 238
 239	__clear_bit(atom->bit_inpage,
 240		    ((struct kwork_atom_page *)atom->page_addr)->bitmap);
 241}
 242
 243static void atom_del(struct kwork_atom *atom)
 244{
 245	list_del(&atom->list);
 246	atom_free(atom);
 247}
 248
 249static int work_cmp(struct list_head *list,
 250		    struct kwork_work *l, struct kwork_work *r)
 251{
 252	int ret = 0;
 253	struct sort_dimension *sort;
 254
 255	BUG_ON(list_empty(list));
 256
 257	list_for_each_entry(sort, list, list) {
 258		ret = sort->cmp(l, r);
 259		if (ret)
 260			return ret;
 261	}
 262
 263	return ret;
 264}
 265
 266static struct kwork_work *work_search(struct rb_root_cached *root,
 267				      struct kwork_work *key,
 268				      struct list_head *sort_list)
 269{
 270	int cmp;
 271	struct kwork_work *work;
 272	struct rb_node *node = root->rb_root.rb_node;
 273
 274	while (node) {
 275		work = container_of(node, struct kwork_work, node);
 276		cmp = work_cmp(sort_list, key, work);
 277		if (cmp > 0)
 278			node = node->rb_left;
 279		else if (cmp < 0)
 280			node = node->rb_right;
 281		else {
 282			if (work->name == NULL)
 283				work->name = key->name;
 284			return work;
 285		}
 286	}
 287	return NULL;
 288}
 289
 290static void work_insert(struct rb_root_cached *root,
 291			struct kwork_work *key, struct list_head *sort_list)
 292{
 293	int cmp;
 294	bool leftmost = true;
 295	struct kwork_work *cur;
 296	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
 297
 298	while (*new) {
 299		cur = container_of(*new, struct kwork_work, node);
 300		parent = *new;
 301		cmp = work_cmp(sort_list, key, cur);
 302
 303		if (cmp > 0)
 304			new = &((*new)->rb_left);
 305		else {
 306			new = &((*new)->rb_right);
 307			leftmost = false;
 308		}
 309	}
 310
 311	rb_link_node(&key->node, parent, new);
 312	rb_insert_color_cached(&key->node, root, leftmost);
 313}
 314
 315static struct kwork_work *work_new(struct kwork_work *key)
 316{
 317	int i;
 318	struct kwork_work *work = zalloc(sizeof(*work));
 319
 320	if (work == NULL) {
 321		pr_err("Failed to zalloc kwork work\n");
 322		return NULL;
 323	}
 324
 325	for (i = 0; i < KWORK_TRACE_MAX; i++)
 326		INIT_LIST_HEAD(&work->atom_list[i]);
 327
 328	work->id = key->id;
 329	work->cpu = key->cpu;
 330	work->name = key->name;
 331	work->class = key->class;
 332	return work;
 333}
 334
 335static struct kwork_work *work_findnew(struct rb_root_cached *root,
 336				       struct kwork_work *key,
 337				       struct list_head *sort_list)
 338{
 339	struct kwork_work *work = work_search(root, key, sort_list);
 340
 341	if (work != NULL)
 342		return work;
 343
 344	work = work_new(key);
 345	if (work)
 346		work_insert(root, work, sort_list);
 347
 348	return work;
 349}
 350
 351static void profile_update_timespan(struct perf_kwork *kwork,
 352				    struct perf_sample *sample)
 353{
 354	if (!kwork->summary)
 355		return;
 356
 357	if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
 358		kwork->timestart = sample->time;
 359
 360	if (kwork->timeend < sample->time)
 361		kwork->timeend = sample->time;
 362}
 363
 
 
 
 
 
 
 
 
 
 
 
 364static bool profile_event_match(struct perf_kwork *kwork,
 365				struct kwork_work *work,
 366				struct perf_sample *sample)
 367{
 368	int cpu = work->cpu;
 369	u64 time = sample->time;
 370	struct perf_time_interval *ptime = &kwork->ptime;
 371
 372	if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
 373		return false;
 374
 375	if (((ptime->start != 0) && (ptime->start > time)) ||
 376	    ((ptime->end != 0) && (ptime->end < time)))
 377		return false;
 378
 379	if ((kwork->profile_name != NULL) &&
 380	    (work->name != NULL) &&
 381	    (strcmp(work->name, kwork->profile_name) != 0))
 
 
 
 382		return false;
 
 383
 384	profile_update_timespan(kwork, sample);
 385	return true;
 386}
 387
 388static int work_push_atom(struct perf_kwork *kwork,
 389			  struct kwork_class *class,
 390			  enum kwork_trace_type src_type,
 391			  enum kwork_trace_type dst_type,
 392			  struct evsel *evsel,
 393			  struct perf_sample *sample,
 394			  struct machine *machine,
 395			  struct kwork_work **ret_work)
 
 396{
 397	struct kwork_atom *atom, *dst_atom;
 398	struct kwork_work *work, key;
 399
 400	BUG_ON(class->work_init == NULL);
 401	class->work_init(class, &key, evsel, sample, machine);
 402
 403	atom = atom_new(kwork, sample);
 404	if (atom == NULL)
 405		return -1;
 406
 407	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
 408	if (work == NULL) {
 409		free(atom);
 410		return -1;
 411	}
 412
 413	if (!profile_event_match(kwork, work, sample))
 
 414		return 0;
 
 415
 416	if (dst_type < KWORK_TRACE_MAX) {
 417		dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
 418						   struct kwork_atom, list);
 419		if (dst_atom != NULL) {
 420			atom->prev = dst_atom;
 421			list_del(&dst_atom->list);
 422		}
 423	}
 424
 425	if (ret_work != NULL)
 426		*ret_work = work;
 427
 
 
 
 
 
 
 
 
 
 
 
 428	list_add_tail(&atom->list, &work->atom_list[src_type]);
 429
 430	return 0;
 431}
 432
 433static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
 434					struct kwork_class *class,
 435					enum kwork_trace_type src_type,
 436					enum kwork_trace_type dst_type,
 437					struct evsel *evsel,
 438					struct perf_sample *sample,
 439					struct machine *machine,
 440					struct kwork_work **ret_work)
 441{
 442	struct kwork_atom *atom, *src_atom;
 443	struct kwork_work *work, key;
 444
 445	BUG_ON(class->work_init == NULL);
 446	class->work_init(class, &key, evsel, sample, machine);
 447
 448	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
 449	if (ret_work != NULL)
 450		*ret_work = work;
 451
 452	if (work == NULL)
 453		return NULL;
 454
 455	if (!profile_event_match(kwork, work, sample))
 456		return NULL;
 457
 458	atom = list_last_entry_or_null(&work->atom_list[dst_type],
 459				       struct kwork_atom, list);
 460	if (atom != NULL)
 461		return atom;
 462
 463	src_atom = atom_new(kwork, sample);
 464	if (src_atom != NULL)
 465		list_add_tail(&src_atom->list, &work->atom_list[src_type]);
 466	else {
 467		if (ret_work != NULL)
 468			*ret_work = NULL;
 469	}
 470
 471	return NULL;
 472}
 473
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 474static void report_update_exit_event(struct kwork_work *work,
 475				     struct kwork_atom *atom,
 476				     struct perf_sample *sample)
 477{
 478	u64 delta;
 479	u64 exit_time = sample->time;
 480	u64 entry_time = atom->time;
 481
 482	if ((entry_time != 0) && (exit_time >= entry_time)) {
 483		delta = exit_time - entry_time;
 484		if ((delta > work->max_runtime) ||
 485		    (work->max_runtime == 0)) {
 486			work->max_runtime = delta;
 487			work->max_runtime_start = entry_time;
 488			work->max_runtime_end = exit_time;
 489		}
 490		work->total_runtime += delta;
 491		work->nr_atoms++;
 492	}
 493}
 494
 495static int report_entry_event(struct perf_kwork *kwork,
 496			      struct kwork_class *class,
 497			      struct evsel *evsel,
 498			      struct perf_sample *sample,
 499			      struct machine *machine)
 500{
 501	return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 502			      KWORK_TRACE_MAX, evsel, sample,
 503			      machine, NULL);
 504}
 505
 506static int report_exit_event(struct perf_kwork *kwork,
 507			     struct kwork_class *class,
 508			     struct evsel *evsel,
 509			     struct perf_sample *sample,
 510			     struct machine *machine)
 511{
 512	struct kwork_atom *atom = NULL;
 513	struct kwork_work *work = NULL;
 514
 515	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 516			     KWORK_TRACE_ENTRY, evsel, sample,
 517			     machine, &work);
 518	if (work == NULL)
 519		return -1;
 520
 521	if (atom != NULL) {
 522		report_update_exit_event(work, atom, sample);
 523		atom_del(atom);
 524	}
 525
 526	return 0;
 527}
 528
 529static void latency_update_entry_event(struct kwork_work *work,
 530				       struct kwork_atom *atom,
 531				       struct perf_sample *sample)
 532{
 533	u64 delta;
 534	u64 entry_time = sample->time;
 535	u64 raise_time = atom->time;
 536
 537	if ((raise_time != 0) && (entry_time >= raise_time)) {
 538		delta = entry_time - raise_time;
 539		if ((delta > work->max_latency) ||
 540		    (work->max_latency == 0)) {
 541			work->max_latency = delta;
 542			work->max_latency_start = raise_time;
 543			work->max_latency_end = entry_time;
 544		}
 545		work->total_latency += delta;
 546		work->nr_atoms++;
 547	}
 548}
 549
 550static int latency_raise_event(struct perf_kwork *kwork,
 551			       struct kwork_class *class,
 552			       struct evsel *evsel,
 553			       struct perf_sample *sample,
 554			       struct machine *machine)
 555{
 556	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
 557			      KWORK_TRACE_MAX, evsel, sample,
 558			      machine, NULL);
 559}
 560
 561static int latency_entry_event(struct perf_kwork *kwork,
 562			       struct kwork_class *class,
 563			       struct evsel *evsel,
 564			       struct perf_sample *sample,
 565			       struct machine *machine)
 566{
 567	struct kwork_atom *atom = NULL;
 568	struct kwork_work *work = NULL;
 569
 570	atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
 571			     KWORK_TRACE_RAISE, evsel, sample,
 572			     machine, &work);
 573	if (work == NULL)
 574		return -1;
 575
 576	if (atom != NULL) {
 577		latency_update_entry_event(work, atom, sample);
 578		atom_del(atom);
 579	}
 580
 581	return 0;
 582}
 583
 584static void timehist_save_callchain(struct perf_kwork *kwork,
 585				    struct perf_sample *sample,
 586				    struct evsel *evsel,
 587				    struct machine *machine)
 588{
 589	struct symbol *sym;
 590	struct thread *thread;
 591	struct callchain_cursor_node *node;
 592	struct callchain_cursor *cursor = &callchain_cursor;
 593
 594	if (!kwork->show_callchain || sample->callchain == NULL)
 595		return;
 596
 597	/* want main thread for process - has maps */
 598	thread = machine__findnew_thread(machine, sample->pid, sample->pid);
 599	if (thread == NULL) {
 600		pr_debug("Failed to get thread for pid %d\n", sample->pid);
 601		return;
 602	}
 603
 
 
 604	if (thread__resolve_callchain(thread, cursor, evsel, sample,
 605				      NULL, NULL, kwork->max_stack + 2) != 0) {
 606		pr_debug("Failed to resolve callchain, skipping\n");
 607		goto out_put;
 608	}
 609
 610	callchain_cursor_commit(cursor);
 611
 612	while (true) {
 613		node = callchain_cursor_current(cursor);
 614		if (node == NULL)
 615			break;
 616
 617		sym = node->ms.sym;
 618		if (sym) {
 619			if (!strcmp(sym->name, "__softirqentry_text_start") ||
 620			    !strcmp(sym->name, "__do_softirq"))
 621				sym->ignore = 1;
 622		}
 623
 624		callchain_cursor_advance(cursor);
 625	}
 626
 627out_put:
 628	thread__put(thread);
 629}
 630
 631static void timehist_print_event(struct perf_kwork *kwork,
 632				 struct kwork_work *work,
 633				 struct kwork_atom *atom,
 634				 struct perf_sample *sample,
 635				 struct addr_location *al)
 636{
 637	char entrytime[32], exittime[32];
 638	char kwork_name[PRINT_KWORK_NAME_WIDTH];
 639
 640	/*
 641	 * runtime start
 642	 */
 643	timestamp__scnprintf_usec(atom->time,
 644				  entrytime, sizeof(entrytime));
 645	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime);
 646
 647	/*
 648	 * runtime end
 649	 */
 650	timestamp__scnprintf_usec(sample->time,
 651				  exittime, sizeof(exittime));
 652	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime);
 653
 654	/*
 655	 * cpu
 656	 */
 657	printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
 658
 659	/*
 660	 * kwork name
 661	 */
 662	if (work->class && work->class->work_name) {
 663		work->class->work_name(work, kwork_name,
 664				       PRINT_KWORK_NAME_WIDTH);
 665		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name);
 666	} else
 667		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, "");
 668
 669	/*
 670	 *runtime
 671	 */
 672	printf(" %*.*f ",
 673	       PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
 674	       (double)(sample->time - atom->time) / NSEC_PER_MSEC);
 675
 676	/*
 677	 * delaytime
 678	 */
 679	if (atom->prev != NULL)
 680		printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
 681		       (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC);
 682	else
 683		printf(" %*s ", PRINT_LATENCY_WIDTH, " ");
 684
 685	/*
 686	 * callchain
 687	 */
 688	if (kwork->show_callchain) {
 
 
 
 
 
 689		printf(" ");
 
 690		sample__fprintf_sym(sample, al, 0,
 691				    EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
 692				    EVSEL__PRINT_CALLCHAIN_ARROW |
 693				    EVSEL__PRINT_SKIP_IGNORED,
 694				    &callchain_cursor, symbol_conf.bt_stop_list,
 695				    stdout);
 696	}
 697
 698	printf("\n");
 699}
 700
 701static int timehist_raise_event(struct perf_kwork *kwork,
 702				struct kwork_class *class,
 703				struct evsel *evsel,
 704				struct perf_sample *sample,
 705				struct machine *machine)
 706{
 707	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
 708			      KWORK_TRACE_MAX, evsel, sample,
 709			      machine, NULL);
 710}
 711
 712static int timehist_entry_event(struct perf_kwork *kwork,
 713				struct kwork_class *class,
 714				struct evsel *evsel,
 715				struct perf_sample *sample,
 716				struct machine *machine)
 717{
 718	int ret;
 719	struct kwork_work *work = NULL;
 720
 721	ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 722			     KWORK_TRACE_RAISE, evsel, sample,
 723			     machine, &work);
 724	if (ret)
 725		return ret;
 726
 727	if (work != NULL)
 728		timehist_save_callchain(kwork, sample, evsel, machine);
 729
 730	return 0;
 731}
 732
 733static int timehist_exit_event(struct perf_kwork *kwork,
 734			       struct kwork_class *class,
 735			       struct evsel *evsel,
 736			       struct perf_sample *sample,
 737			       struct machine *machine)
 738{
 739	struct kwork_atom *atom = NULL;
 740	struct kwork_work *work = NULL;
 741	struct addr_location al;
 
 742
 
 743	if (machine__resolve(machine, &al, sample) < 0) {
 744		pr_debug("Problem processing event, skipping it\n");
 745		return -1;
 
 746	}
 747
 748	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 749			     KWORK_TRACE_ENTRY, evsel, sample,
 750			     machine, &work);
 751	if (work == NULL)
 752		return -1;
 
 
 753
 754	if (atom != NULL) {
 755		work->nr_atoms++;
 756		timehist_print_event(kwork, work, atom, sample, &al);
 757		atom_del(atom);
 758	}
 759
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 760	return 0;
 761}
 762
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 763static struct kwork_class kwork_irq;
 764static int process_irq_handler_entry_event(struct perf_tool *tool,
 765					   struct evsel *evsel,
 766					   struct perf_sample *sample,
 767					   struct machine *machine)
 768{
 769	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 770
 771	if (kwork->tp_handler->entry_event)
 772		return kwork->tp_handler->entry_event(kwork, &kwork_irq,
 773						      evsel, sample, machine);
 774	return 0;
 775}
 776
 777static int process_irq_handler_exit_event(struct perf_tool *tool,
 778					  struct evsel *evsel,
 779					  struct perf_sample *sample,
 780					  struct machine *machine)
 781{
 782	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 783
 784	if (kwork->tp_handler->exit_event)
 785		return kwork->tp_handler->exit_event(kwork, &kwork_irq,
 786						     evsel, sample, machine);
 787	return 0;
 788}
 789
 790const struct evsel_str_handler irq_tp_handlers[] = {
 791	{ "irq:irq_handler_entry", process_irq_handler_entry_event, },
 792	{ "irq:irq_handler_exit",  process_irq_handler_exit_event,  },
 793};
 794
 795static int irq_class_init(struct kwork_class *class,
 796			  struct perf_session *session)
 797{
 798	if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
 799		pr_err("Failed to set irq tracepoints handlers\n");
 800		return -1;
 801	}
 802
 803	class->work_root = RB_ROOT_CACHED;
 804	return 0;
 805}
 806
 807static void irq_work_init(struct kwork_class *class,
 
 808			  struct kwork_work *work,
 
 809			  struct evsel *evsel,
 810			  struct perf_sample *sample,
 811			  struct machine *machine __maybe_unused)
 812{
 813	work->class = class;
 814	work->cpu = sample->cpu;
 815	work->id = evsel__intval(evsel, sample, "irq");
 816	work->name = evsel__strval(evsel, sample, "name");
 
 
 
 
 
 
 817}
 818
 819static void irq_work_name(struct kwork_work *work, char *buf, int len)
 820{
 821	snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
 822}
 823
 824static struct kwork_class kwork_irq = {
 825	.name           = "irq",
 826	.type           = KWORK_CLASS_IRQ,
 827	.nr_tracepoints = 2,
 828	.tp_handlers    = irq_tp_handlers,
 829	.class_init     = irq_class_init,
 830	.work_init      = irq_work_init,
 831	.work_name      = irq_work_name,
 832};
 833
 834static struct kwork_class kwork_softirq;
 835static int process_softirq_raise_event(struct perf_tool *tool,
 836				       struct evsel *evsel,
 837				       struct perf_sample *sample,
 838				       struct machine *machine)
 839{
 840	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 841
 842	if (kwork->tp_handler->raise_event)
 843		return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
 844						      evsel, sample, machine);
 845
 846	return 0;
 847}
 848
 849static int process_softirq_entry_event(struct perf_tool *tool,
 850				       struct evsel *evsel,
 851				       struct perf_sample *sample,
 852				       struct machine *machine)
 853{
 854	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 855
 856	if (kwork->tp_handler->entry_event)
 857		return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
 858						      evsel, sample, machine);
 859
 860	return 0;
 861}
 862
 863static int process_softirq_exit_event(struct perf_tool *tool,
 864				      struct evsel *evsel,
 865				      struct perf_sample *sample,
 866				      struct machine *machine)
 867{
 868	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 869
 870	if (kwork->tp_handler->exit_event)
 871		return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
 872						     evsel, sample, machine);
 873
 874	return 0;
 875}
 876
 877const struct evsel_str_handler softirq_tp_handlers[] = {
 878	{ "irq:softirq_raise", process_softirq_raise_event, },
 879	{ "irq:softirq_entry", process_softirq_entry_event, },
 880	{ "irq:softirq_exit",  process_softirq_exit_event,  },
 881};
 882
 883static int softirq_class_init(struct kwork_class *class,
 884			      struct perf_session *session)
 885{
 886	if (perf_session__set_tracepoints_handlers(session,
 887						   softirq_tp_handlers)) {
 888		pr_err("Failed to set softirq tracepoints handlers\n");
 889		return -1;
 890	}
 891
 892	class->work_root = RB_ROOT_CACHED;
 893	return 0;
 894}
 895
 896static char *evsel__softirq_name(struct evsel *evsel, u64 num)
 897{
 898	char *name = NULL;
 899	bool found = false;
 900	struct tep_print_flag_sym *sym = NULL;
 901	struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
 902
 903	if ((args == NULL) || (args->next == NULL))
 904		return NULL;
 905
 906	/* skip softirq field: "REC->vec" */
 907	for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
 908		if ((eval_flag(sym->value) == (unsigned long long)num) &&
 909		    (strlen(sym->str) != 0)) {
 910			found = true;
 911			break;
 912		}
 913	}
 914
 915	if (!found)
 916		return NULL;
 917
 918	name = strdup(sym->str);
 919	if (name == NULL) {
 920		pr_err("Failed to copy symbol name\n");
 921		return NULL;
 922	}
 923	return name;
 924}
 925
 926static void softirq_work_init(struct kwork_class *class,
 
 927			      struct kwork_work *work,
 
 928			      struct evsel *evsel,
 929			      struct perf_sample *sample,
 930			      struct machine *machine __maybe_unused)
 931{
 932	u64 num = evsel__intval(evsel, sample, "vec");
 933
 934	work->id = num;
 935	work->class = class;
 936	work->cpu = sample->cpu;
 937	work->name = evsel__softirq_name(evsel, num);
 
 
 
 
 
 
 
 
 938}
 939
 940static void softirq_work_name(struct kwork_work *work, char *buf, int len)
 941{
 942	snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
 943}
 944
 945static struct kwork_class kwork_softirq = {
 946	.name           = "softirq",
 947	.type           = KWORK_CLASS_SOFTIRQ,
 948	.nr_tracepoints = 3,
 949	.tp_handlers    = softirq_tp_handlers,
 950	.class_init     = softirq_class_init,
 951	.work_init      = softirq_work_init,
 952	.work_name      = softirq_work_name,
 953};
 954
 955static struct kwork_class kwork_workqueue;
 956static int process_workqueue_activate_work_event(struct perf_tool *tool,
 957						 struct evsel *evsel,
 958						 struct perf_sample *sample,
 959						 struct machine *machine)
 960{
 961	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 962
 963	if (kwork->tp_handler->raise_event)
 964		return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
 965						    evsel, sample, machine);
 966
 967	return 0;
 968}
 969
 970static int process_workqueue_execute_start_event(struct perf_tool *tool,
 971						 struct evsel *evsel,
 972						 struct perf_sample *sample,
 973						 struct machine *machine)
 974{
 975	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 976
 977	if (kwork->tp_handler->entry_event)
 978		return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
 979						    evsel, sample, machine);
 980
 981	return 0;
 982}
 983
 984static int process_workqueue_execute_end_event(struct perf_tool *tool,
 985					       struct evsel *evsel,
 986					       struct perf_sample *sample,
 987					       struct machine *machine)
 988{
 989	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 990
 991	if (kwork->tp_handler->exit_event)
 992		return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
 993						   evsel, sample, machine);
 994
 995	return 0;
 996}
 997
 998const struct evsel_str_handler workqueue_tp_handlers[] = {
 999	{ "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
1000	{ "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
1001	{ "workqueue:workqueue_execute_end",   process_workqueue_execute_end_event,   },
1002};
1003
1004static int workqueue_class_init(struct kwork_class *class,
1005				struct perf_session *session)
1006{
1007	if (perf_session__set_tracepoints_handlers(session,
1008						   workqueue_tp_handlers)) {
1009		pr_err("Failed to set workqueue tracepoints handlers\n");
1010		return -1;
1011	}
1012
1013	class->work_root = RB_ROOT_CACHED;
1014	return 0;
1015}
1016
1017static void workqueue_work_init(struct kwork_class *class,
 
1018				struct kwork_work *work,
 
1019				struct evsel *evsel,
1020				struct perf_sample *sample,
1021				struct machine *machine)
1022{
1023	char *modp = NULL;
1024	unsigned long long function_addr = evsel__intval(evsel,
1025							 sample, "function");
1026
1027	work->class = class;
1028	work->cpu = sample->cpu;
1029	work->id = evsel__intval(evsel, sample, "work");
1030	work->name = function_addr == 0 ? NULL :
1031		machine__resolve_kernel_addr(machine, &function_addr, &modp);
1032}
1033
1034static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
1035{
1036	if (work->name != NULL)
1037		snprintf(buf, len, "(w)%s", work->name);
1038	else
1039		snprintf(buf, len, "(w)0x%" PRIx64, work->id);
1040}
1041
1042static struct kwork_class kwork_workqueue = {
1043	.name           = "workqueue",
1044	.type           = KWORK_CLASS_WORKQUEUE,
1045	.nr_tracepoints = 3,
1046	.tp_handlers    = workqueue_tp_handlers,
1047	.class_init     = workqueue_class_init,
1048	.work_init      = workqueue_work_init,
1049	.work_name      = workqueue_work_name,
1050};
1051
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1052static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
1053	[KWORK_CLASS_IRQ]       = &kwork_irq,
1054	[KWORK_CLASS_SOFTIRQ]   = &kwork_softirq,
1055	[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
 
1056};
1057
1058static void print_separator(int len)
1059{
1060	printf(" %.*s\n", len, graph_dotted_line);
1061}
1062
1063static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
1064{
1065	int ret = 0;
1066	char kwork_name[PRINT_KWORK_NAME_WIDTH];
1067	char max_runtime_start[32], max_runtime_end[32];
1068	char max_latency_start[32], max_latency_end[32];
1069
1070	printf(" ");
1071
1072	/*
1073	 * kwork name
1074	 */
1075	if (work->class && work->class->work_name) {
1076		work->class->work_name(work, kwork_name,
1077				       PRINT_KWORK_NAME_WIDTH);
1078		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
1079	} else {
1080		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
1081	}
1082
1083	/*
1084	 * cpu
1085	 */
1086	ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
1087
1088	/*
1089	 * total runtime
1090	 */
1091	if (kwork->report == KWORK_REPORT_RUNTIME) {
1092		ret += printf(" %*.*f ms |",
1093			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1094			      (double)work->total_runtime / NSEC_PER_MSEC);
1095	} else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
1096		ret += printf(" %*.*f ms |",
1097			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1098			      (double)work->total_latency /
1099			      work->nr_atoms / NSEC_PER_MSEC);
1100	}
1101
1102	/*
1103	 * count
1104	 */
1105	ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
1106
1107	/*
1108	 * max runtime, max runtime start, max runtime end
1109	 */
1110	if (kwork->report == KWORK_REPORT_RUNTIME) {
1111		timestamp__scnprintf_usec(work->max_runtime_start,
1112					  max_runtime_start,
1113					  sizeof(max_runtime_start));
1114		timestamp__scnprintf_usec(work->max_runtime_end,
1115					  max_runtime_end,
1116					  sizeof(max_runtime_end));
1117		ret += printf(" %*.*f ms | %*s s | %*s s |",
1118			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1119			      (double)work->max_runtime / NSEC_PER_MSEC,
1120			      PRINT_TIMESTAMP_WIDTH, max_runtime_start,
1121			      PRINT_TIMESTAMP_WIDTH, max_runtime_end);
1122	}
1123	/*
1124	 * max delay, max delay start, max delay end
1125	 */
1126	else if (kwork->report == KWORK_REPORT_LATENCY) {
1127		timestamp__scnprintf_usec(work->max_latency_start,
1128					  max_latency_start,
1129					  sizeof(max_latency_start));
1130		timestamp__scnprintf_usec(work->max_latency_end,
1131					  max_latency_end,
1132					  sizeof(max_latency_end));
1133		ret += printf(" %*.*f ms | %*s s | %*s s |",
1134			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1135			      (double)work->max_latency / NSEC_PER_MSEC,
1136			      PRINT_TIMESTAMP_WIDTH, max_latency_start,
1137			      PRINT_TIMESTAMP_WIDTH, max_latency_end);
1138	}
1139
1140	printf("\n");
1141	return ret;
1142}
1143
1144static int report_print_header(struct perf_kwork *kwork)
1145{
1146	int ret;
1147
1148	printf("\n ");
1149	ret = printf(" %-*s | %-*s |",
1150		     PRINT_KWORK_NAME_WIDTH, "Kwork Name",
1151		     PRINT_CPU_WIDTH, "Cpu");
1152
1153	if (kwork->report == KWORK_REPORT_RUNTIME) {
1154		ret += printf(" %-*s |",
1155			      PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
1156	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1157		ret += printf(" %-*s |",
1158			      PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
1159	}
1160
1161	ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
1162
1163	if (kwork->report == KWORK_REPORT_RUNTIME) {
1164		ret += printf(" %-*s | %-*s | %-*s |",
1165			      PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
1166			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
1167			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
1168	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1169		ret += printf(" %-*s | %-*s | %-*s |",
1170			      PRINT_LATENCY_HEADER_WIDTH, "Max delay",
1171			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
1172			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
1173	}
1174
1175	printf("\n");
1176	print_separator(ret);
1177	return ret;
1178}
1179
1180static void timehist_print_header(void)
1181{
1182	/*
1183	 * header row
1184	 */
1185	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1186	       PRINT_TIMESTAMP_WIDTH, "Runtime start",
1187	       PRINT_TIMESTAMP_WIDTH, "Runtime end",
1188	       PRINT_TIMEHIST_CPU_WIDTH, "Cpu",
1189	       PRINT_KWORK_NAME_WIDTH, "Kwork name",
1190	       PRINT_RUNTIME_WIDTH, "Runtime",
1191	       PRINT_RUNTIME_WIDTH, "Delaytime");
1192
1193	/*
1194	 * units row
1195	 */
1196	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1197	       PRINT_TIMESTAMP_WIDTH, "",
1198	       PRINT_TIMESTAMP_WIDTH, "",
1199	       PRINT_TIMEHIST_CPU_WIDTH, "",
1200	       PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM",
1201	       PRINT_RUNTIME_WIDTH, "(msec)",
1202	       PRINT_RUNTIME_WIDTH, "(msec)");
1203
1204	/*
1205	 * separator
1206	 */
1207	printf(" %.*s  %.*s  %.*s  %.*s  %.*s  %.*s\n",
1208	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1209	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1210	       PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line,
1211	       PRINT_KWORK_NAME_WIDTH, graph_dotted_line,
1212	       PRINT_RUNTIME_WIDTH, graph_dotted_line,
1213	       PRINT_RUNTIME_WIDTH, graph_dotted_line);
1214}
1215
1216static void print_summary(struct perf_kwork *kwork)
1217{
1218	u64 time = kwork->timeend - kwork->timestart;
1219
1220	printf("  Total count            : %9" PRIu64 "\n", kwork->all_count);
1221	printf("  Total runtime   (msec) : %9.3f (%.3f%% load average)\n",
1222	       (double)kwork->all_runtime / NSEC_PER_MSEC,
1223	       time == 0 ? 0 : (double)kwork->all_runtime / time);
1224	printf("  Total time span (msec) : %9.3f\n",
1225	       (double)time / NSEC_PER_MSEC);
1226}
1227
1228static unsigned long long nr_list_entry(struct list_head *head)
1229{
1230	struct list_head *pos;
1231	unsigned long long n = 0;
1232
1233	list_for_each(pos, head)
1234		n++;
1235
1236	return n;
1237}
1238
1239static void print_skipped_events(struct perf_kwork *kwork)
1240{
1241	int i;
1242	const char *const kwork_event_str[] = {
1243		[KWORK_TRACE_RAISE] = "raise",
1244		[KWORK_TRACE_ENTRY] = "entry",
1245		[KWORK_TRACE_EXIT]  = "exit",
1246	};
1247
1248	if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
1249	    (kwork->nr_events != 0)) {
1250		printf("  INFO: %.3f%% skipped events (%" PRIu64 " including ",
1251		       (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
1252		       (double)kwork->nr_events * 100.0,
1253		       kwork->nr_skipped_events[KWORK_TRACE_MAX]);
1254
1255		for (i = 0; i < KWORK_TRACE_MAX; i++) {
1256			printf("%" PRIu64 " %s%s",
1257			       kwork->nr_skipped_events[i],
1258			       kwork_event_str[i],
1259			       (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
1260		}
1261	}
1262
1263	if (verbose > 0)
1264		printf("  INFO: use %lld atom pages\n",
1265		       nr_list_entry(&kwork->atom_page_list));
1266}
1267
1268static void print_bad_events(struct perf_kwork *kwork)
1269{
1270	if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
1271		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1272		       (double)kwork->nr_lost_events /
1273		       (double)kwork->nr_events * 100.0,
1274		       kwork->nr_lost_events, kwork->nr_events,
1275		       kwork->nr_lost_chunks);
1276	}
1277}
1278
1279static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1280{
1281	struct rb_node *node;
1282	struct kwork_work *data;
1283	struct rb_root_cached *root = &class->work_root;
1284
1285	pr_debug("Sorting %s ...\n", class->name);
1286	for (;;) {
1287		node = rb_first_cached(root);
1288		if (!node)
1289			break;
1290
1291		rb_erase_cached(node, root);
1292		data = rb_entry(node, struct kwork_work, node);
1293		work_insert(&kwork->sorted_work_root,
1294			       data, &kwork->sort_list);
1295	}
1296}
1297
1298static void perf_kwork__sort(struct perf_kwork *kwork)
1299{
1300	struct kwork_class *class;
1301
1302	list_for_each_entry(class, &kwork->class_list, list)
1303		work_sort(kwork, class);
1304}
1305
1306static int perf_kwork__check_config(struct perf_kwork *kwork,
1307				    struct perf_session *session)
1308{
1309	int ret;
1310	struct evsel *evsel;
1311	struct kwork_class *class;
1312
1313	static struct trace_kwork_handler report_ops = {
1314		.entry_event = report_entry_event,
1315		.exit_event  = report_exit_event,
1316	};
1317	static struct trace_kwork_handler latency_ops = {
1318		.raise_event = latency_raise_event,
1319		.entry_event = latency_entry_event,
1320	};
1321	static struct trace_kwork_handler timehist_ops = {
1322		.raise_event = timehist_raise_event,
1323		.entry_event = timehist_entry_event,
1324		.exit_event  = timehist_exit_event,
1325	};
 
 
 
 
 
1326
1327	switch (kwork->report) {
1328	case KWORK_REPORT_RUNTIME:
1329		kwork->tp_handler = &report_ops;
1330		break;
1331	case KWORK_REPORT_LATENCY:
1332		kwork->tp_handler = &latency_ops;
1333		break;
1334	case KWORK_REPORT_TIMEHIST:
1335		kwork->tp_handler = &timehist_ops;
1336		break;
 
 
 
1337	default:
1338		pr_debug("Invalid report type %d\n", kwork->report);
1339		return -1;
1340	}
1341
1342	list_for_each_entry(class, &kwork->class_list, list)
1343		if ((class->class_init != NULL) &&
1344		    (class->class_init(class, session) != 0))
1345			return -1;
1346
1347	if (kwork->cpu_list != NULL) {
1348		ret = perf_session__cpu_bitmap(session,
1349					       kwork->cpu_list,
1350					       kwork->cpu_bitmap);
1351		if (ret < 0) {
1352			pr_err("Invalid cpu bitmap\n");
1353			return -1;
1354		}
1355	}
1356
1357	if (kwork->time_str != NULL) {
1358		ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
1359		if (ret != 0) {
1360			pr_err("Invalid time span\n");
1361			return -1;
1362		}
1363	}
1364
1365	list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
1366		if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
1367			pr_debug("Samples do not have callchains\n");
1368			kwork->show_callchain = 0;
1369			symbol_conf.use_callchain = 0;
1370		}
1371	}
1372
1373	return 0;
1374}
1375
1376static int perf_kwork__read_events(struct perf_kwork *kwork)
1377{
1378	int ret = -1;
1379	struct perf_session *session = NULL;
1380
1381	struct perf_data data = {
1382		.path  = input_name,
1383		.mode  = PERF_DATA_MODE_READ,
1384		.force = kwork->force,
1385	};
1386
1387	session = perf_session__new(&data, &kwork->tool);
1388	if (IS_ERR(session)) {
1389		pr_debug("Error creating perf session\n");
1390		return PTR_ERR(session);
1391	}
1392
1393	symbol__init(&session->header.env);
1394
1395	if (perf_kwork__check_config(kwork, session) != 0)
1396		goto out_delete;
1397
1398	if (session->tevent.pevent &&
1399	    tep_set_function_resolver(session->tevent.pevent,
1400				      machine__resolve_kernel_addr,
1401				      &session->machines.host) < 0) {
1402		pr_err("Failed to set libtraceevent function resolver\n");
1403		goto out_delete;
1404	}
1405
1406	if (kwork->report == KWORK_REPORT_TIMEHIST)
1407		timehist_print_header();
1408
1409	ret = perf_session__process_events(session);
1410	if (ret) {
1411		pr_debug("Failed to process events, error %d\n", ret);
1412		goto out_delete;
1413	}
1414
1415	kwork->nr_events      = session->evlist->stats.nr_events[0];
1416	kwork->nr_lost_events = session->evlist->stats.total_lost;
1417	kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1418
1419out_delete:
1420	perf_session__delete(session);
1421	return ret;
1422}
1423
1424static void process_skipped_events(struct perf_kwork *kwork,
1425				   struct kwork_work *work)
1426{
1427	int i;
1428	unsigned long long count;
1429
1430	for (i = 0; i < KWORK_TRACE_MAX; i++) {
1431		count = nr_list_entry(&work->atom_list[i]);
1432		kwork->nr_skipped_events[i] += count;
1433		kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
1434	}
1435}
1436
1437struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
1438				       struct kwork_class *class,
1439				       struct kwork_work *key)
1440{
1441	struct kwork_work *work = NULL;
1442
1443	work = work_new(key);
1444	if (work == NULL)
1445		return NULL;
1446
1447	work_insert(&class->work_root, work, &kwork->cmp_id);
1448	return work;
1449}
1450
1451static void sig_handler(int sig)
1452{
1453	/*
1454	 * Simply capture termination signal so that
1455	 * the program can continue after pause returns
1456	 */
1457	pr_debug("Captuer signal %d\n", sig);
1458}
1459
1460static int perf_kwork__report_bpf(struct perf_kwork *kwork)
1461{
1462	int ret;
1463
1464	signal(SIGINT, sig_handler);
1465	signal(SIGTERM, sig_handler);
1466
1467	ret = perf_kwork__trace_prepare_bpf(kwork);
1468	if (ret)
1469		return -1;
1470
1471	printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
1472
1473	perf_kwork__trace_start();
1474
1475	/*
1476	 * a simple pause, wait here for stop signal
1477	 */
1478	pause();
1479
1480	perf_kwork__trace_finish();
1481
1482	perf_kwork__report_read_bpf(kwork);
1483
1484	perf_kwork__report_cleanup_bpf();
1485
1486	return 0;
1487}
1488
1489static int perf_kwork__report(struct perf_kwork *kwork)
1490{
1491	int ret;
1492	struct rb_node *next;
1493	struct kwork_work *work;
1494
1495	if (kwork->use_bpf)
1496		ret = perf_kwork__report_bpf(kwork);
1497	else
1498		ret = perf_kwork__read_events(kwork);
1499
1500	if (ret != 0)
1501		return -1;
1502
1503	perf_kwork__sort(kwork);
1504
1505	setup_pager();
1506
1507	ret = report_print_header(kwork);
1508	next = rb_first_cached(&kwork->sorted_work_root);
1509	while (next) {
1510		work = rb_entry(next, struct kwork_work, node);
1511		process_skipped_events(kwork, work);
1512
1513		if (work->nr_atoms != 0) {
1514			report_print_work(kwork, work);
1515			if (kwork->summary) {
1516				kwork->all_runtime += work->total_runtime;
1517				kwork->all_count += work->nr_atoms;
1518			}
1519		}
1520		next = rb_next(next);
1521	}
1522	print_separator(ret);
1523
1524	if (kwork->summary) {
1525		print_summary(kwork);
1526		print_separator(ret);
1527	}
1528
1529	print_bad_events(kwork);
1530	print_skipped_events(kwork);
1531	printf("\n");
1532
1533	return 0;
1534}
1535
1536typedef int (*tracepoint_handler)(struct perf_tool *tool,
1537				  struct evsel *evsel,
1538				  struct perf_sample *sample,
1539				  struct machine *machine);
1540
1541static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
1542						 union perf_event *event __maybe_unused,
1543						 struct perf_sample *sample,
1544						 struct evsel *evsel,
1545						 struct machine *machine)
1546{
1547	int err = 0;
1548
1549	if (evsel->handler != NULL) {
1550		tracepoint_handler f = evsel->handler;
1551
1552		err = f(tool, evsel, sample, machine);
1553	}
1554
1555	return err;
1556}
1557
1558static int perf_kwork__timehist(struct perf_kwork *kwork)
1559{
1560	/*
1561	 * event handlers for timehist option
1562	 */
1563	kwork->tool.comm	 = perf_event__process_comm;
1564	kwork->tool.exit	 = perf_event__process_exit;
1565	kwork->tool.fork	 = perf_event__process_fork;
1566	kwork->tool.attr	 = perf_event__process_attr;
1567	kwork->tool.tracing_data = perf_event__process_tracing_data;
1568	kwork->tool.build_id	 = perf_event__process_build_id;
1569	kwork->tool.ordered_events = true;
1570	kwork->tool.ordering_requires_timestamps = true;
1571	symbol_conf.use_callchain = kwork->show_callchain;
1572
1573	if (symbol__validate_sym_arguments()) {
1574		pr_err("Failed to validate sym arguments\n");
1575		return -1;
1576	}
1577
1578	setup_pager();
1579
1580	return perf_kwork__read_events(kwork);
1581}
1582
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1583static void setup_event_list(struct perf_kwork *kwork,
1584			     const struct option *options,
1585			     const char * const usage_msg[])
1586{
1587	int i;
1588	struct kwork_class *class;
1589	char *tmp, *tok, *str;
1590
 
 
 
1591	if (kwork->event_list_str == NULL)
1592		goto null_event_list_str;
1593
1594	str = strdup(kwork->event_list_str);
1595	for (tok = strtok_r(str, ", ", &tmp);
1596	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
1597		for (i = 0; i < KWORK_CLASS_MAX; i++) {
1598			class = kwork_class_supported_list[i];
1599			if (strcmp(tok, class->name) == 0) {
1600				list_add_tail(&class->list, &kwork->class_list);
1601				break;
1602			}
1603		}
1604		if (i == KWORK_CLASS_MAX) {
1605			usage_with_options_msg(usage_msg, options,
1606					       "Unknown --event key: `%s'", tok);
1607		}
1608	}
1609	free(str);
1610
1611null_event_list_str:
1612	/*
1613	 * config all kwork events if not specified
1614	 */
1615	if (list_empty(&kwork->class_list)) {
1616		for (i = 0; i < KWORK_CLASS_MAX; i++) {
1617			list_add_tail(&kwork_class_supported_list[i]->list,
1618				      &kwork->class_list);
1619		}
1620	}
1621
1622	pr_debug("Config event list:");
1623	list_for_each_entry(class, &kwork->class_list, list)
1624		pr_debug(" %s", class->name);
1625	pr_debug("\n");
1626}
1627
1628static int perf_kwork__record(struct perf_kwork *kwork,
1629			      int argc, const char **argv)
1630{
1631	const char **rec_argv;
1632	unsigned int rec_argc, i, j;
1633	struct kwork_class *class;
1634
1635	const char *const record_args[] = {
1636		"record",
1637		"-a",
1638		"-R",
1639		"-m", "1024",
1640		"-c", "1",
1641	};
1642
1643	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1644
1645	list_for_each_entry(class, &kwork->class_list, list)
1646		rec_argc += 2 * class->nr_tracepoints;
1647
1648	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1649	if (rec_argv == NULL)
1650		return -ENOMEM;
1651
1652	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1653		rec_argv[i] = strdup(record_args[i]);
1654
1655	list_for_each_entry(class, &kwork->class_list, list) {
1656		for (j = 0; j < class->nr_tracepoints; j++) {
1657			rec_argv[i++] = strdup("-e");
1658			rec_argv[i++] = strdup(class->tp_handlers[j].name);
1659		}
1660	}
1661
1662	for (j = 1; j < (unsigned int)argc; j++, i++)
1663		rec_argv[i] = argv[j];
1664
1665	BUG_ON(i != rec_argc);
1666
1667	pr_debug("record comm: ");
1668	for (j = 0; j < rec_argc; j++)
1669		pr_debug("%s ", rec_argv[j]);
1670	pr_debug("\n");
1671
1672	return cmd_record(i, rec_argv);
1673}
1674
1675int cmd_kwork(int argc, const char **argv)
1676{
1677	static struct perf_kwork kwork = {
1678		.class_list          = LIST_HEAD_INIT(kwork.class_list),
1679		.tool = {
1680			.mmap    = perf_event__process_mmap,
1681			.mmap2   = perf_event__process_mmap2,
1682			.sample  = perf_kwork__process_tracepoint_sample,
1683		},
1684		.atom_page_list      = LIST_HEAD_INIT(kwork.atom_page_list),
1685		.sort_list           = LIST_HEAD_INIT(kwork.sort_list),
1686		.cmp_id              = LIST_HEAD_INIT(kwork.cmp_id),
1687		.sorted_work_root    = RB_ROOT_CACHED,
1688		.tp_handler          = NULL,
1689		.profile_name        = NULL,
1690		.cpu_list            = NULL,
1691		.time_str            = NULL,
1692		.force               = false,
1693		.event_list_str      = NULL,
1694		.summary             = false,
1695		.sort_order          = NULL,
1696		.show_callchain      = false,
1697		.max_stack           = 5,
1698		.timestart           = 0,
1699		.timeend             = 0,
1700		.nr_events           = 0,
1701		.nr_lost_chunks      = 0,
1702		.nr_lost_events      = 0,
1703		.all_runtime         = 0,
1704		.all_count           = 0,
1705		.nr_skipped_events   = { 0 },
1706	};
1707	static const char default_report_sort_order[] = "runtime, max, count";
1708	static const char default_latency_sort_order[] = "avg, max, count";
 
1709	const struct option kwork_options[] = {
1710	OPT_INCR('v', "verbose", &verbose,
1711		 "be more verbose (show symbol address, etc)"),
1712	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1713		    "dump raw trace in ASCII"),
1714	OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
1715		   "list of kwork to profile (irq, softirq, workqueue, etc)"),
1716	OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
1717	OPT_END()
1718	};
1719	const struct option report_options[] = {
1720	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1721		   "sort by key(s): runtime, max, count"),
1722	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1723		   "list of cpus to profile"),
1724	OPT_STRING('n', "name", &kwork.profile_name, "name",
1725		   "event name to profile"),
1726	OPT_STRING(0, "time", &kwork.time_str, "str",
1727		   "Time span for analysis (start,stop)"),
1728	OPT_STRING('i', "input", &input_name, "file",
1729		   "input file name"),
1730	OPT_BOOLEAN('S', "with-summary", &kwork.summary,
1731		    "Show summary with statistics"),
1732#ifdef HAVE_BPF_SKEL
1733	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1734		    "Use BPF to measure kwork runtime"),
1735#endif
1736	OPT_PARENT(kwork_options)
1737	};
1738	const struct option latency_options[] = {
1739	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1740		   "sort by key(s): avg, max, count"),
1741	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1742		   "list of cpus to profile"),
1743	OPT_STRING('n', "name", &kwork.profile_name, "name",
1744		   "event name to profile"),
1745	OPT_STRING(0, "time", &kwork.time_str, "str",
1746		   "Time span for analysis (start,stop)"),
1747	OPT_STRING('i', "input", &input_name, "file",
1748		   "input file name"),
1749#ifdef HAVE_BPF_SKEL
1750	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1751		    "Use BPF to measure kwork latency"),
1752#endif
1753	OPT_PARENT(kwork_options)
1754	};
1755	const struct option timehist_options[] = {
1756	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1757		   "file", "vmlinux pathname"),
1758	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1759		   "file", "kallsyms pathname"),
1760	OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
1761		    "Display call chains if present"),
1762	OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
1763		   "Maximum number of functions to display backtrace."),
1764	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1765		    "Look for files with symbols relative to this directory"),
1766	OPT_STRING(0, "time", &kwork.time_str, "str",
1767		   "Time span for analysis (start,stop)"),
1768	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1769		   "list of cpus to profile"),
1770	OPT_STRING('n', "name", &kwork.profile_name, "name",
1771		   "event name to profile"),
1772	OPT_STRING('i', "input", &input_name, "file",
1773		   "input file name"),
1774	OPT_PARENT(kwork_options)
1775	};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1776	const char *kwork_usage[] = {
1777		NULL,
1778		NULL
1779	};
1780	const char * const report_usage[] = {
1781		"perf kwork report [<options>]",
1782		NULL
1783	};
1784	const char * const latency_usage[] = {
1785		"perf kwork latency [<options>]",
1786		NULL
1787	};
1788	const char * const timehist_usage[] = {
1789		"perf kwork timehist [<options>]",
1790		NULL
1791	};
 
 
 
 
1792	const char *const kwork_subcommands[] = {
1793		"record", "report", "latency", "timehist", NULL
1794	};
1795
 
 
 
 
 
1796	argc = parse_options_subcommand(argc, argv, kwork_options,
1797					kwork_subcommands, kwork_usage,
1798					PARSE_OPT_STOP_AT_NON_OPTION);
1799	if (!argc)
1800		usage_with_options(kwork_usage, kwork_options);
1801
1802	setup_event_list(&kwork, kwork_options, kwork_usage);
1803	sort_dimension__add(&kwork, "id", &kwork.cmp_id);
1804
1805	if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
 
1806		return perf_kwork__record(&kwork, argc, argv);
1807	else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
1808		kwork.sort_order = default_report_sort_order;
1809		if (argc > 1) {
1810			argc = parse_options(argc, argv, report_options, report_usage, 0);
1811			if (argc)
1812				usage_with_options(report_usage, report_options);
1813		}
1814		kwork.report = KWORK_REPORT_RUNTIME;
1815		setup_sorting(&kwork, report_options, report_usage);
 
1816		return perf_kwork__report(&kwork);
1817	} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
1818		kwork.sort_order = default_latency_sort_order;
1819		if (argc > 1) {
1820			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1821			if (argc)
1822				usage_with_options(latency_usage, latency_options);
1823		}
1824		kwork.report = KWORK_REPORT_LATENCY;
1825		setup_sorting(&kwork, latency_options, latency_usage);
 
1826		return perf_kwork__report(&kwork);
1827	} else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
1828		if (argc > 1) {
1829			argc = parse_options(argc, argv, timehist_options, timehist_usage, 0);
1830			if (argc)
1831				usage_with_options(timehist_usage, timehist_options);
1832		}
1833		kwork.report = KWORK_REPORT_TIMEHIST;
 
1834		return perf_kwork__timehist(&kwork);
 
 
 
 
 
 
 
 
 
 
 
 
 
1835	} else
1836		usage_with_options(kwork_usage, kwork_options);
 
 
 
1837
1838	return 0;
1839}