Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * builtin-kwork.c
   4 *
   5 * Copyright (c) 2022  Huawei Inc,  Yang Jihong <yangjihong1@huawei.com>
   6 */
   7
   8#include "builtin.h"
   9#include "perf.h"
  10
  11#include "util/data.h"
  12#include "util/evlist.h"
  13#include "util/evsel.h"
  14#include "util/header.h"
  15#include "util/kwork.h"
  16#include "util/debug.h"
  17#include "util/session.h"
  18#include "util/symbol.h"
  19#include "util/thread.h"
  20#include "util/string2.h"
  21#include "util/callchain.h"
  22#include "util/evsel_fprintf.h"
  23
  24#include <subcmd/pager.h>
  25#include <subcmd/parse-options.h>
  26#include <traceevent/event-parse.h>
  27
  28#include <errno.h>
  29#include <inttypes.h>
  30#include <signal.h>
  31#include <linux/err.h>
  32#include <linux/time64.h>
  33#include <linux/zalloc.h>
  34
  35/*
  36 * report header elements width
  37 */
  38#define PRINT_CPU_WIDTH 4
  39#define PRINT_COUNT_WIDTH 9
  40#define PRINT_RUNTIME_WIDTH 10
  41#define PRINT_LATENCY_WIDTH 10
  42#define PRINT_TIMESTAMP_WIDTH 17
  43#define PRINT_KWORK_NAME_WIDTH 30
  44#define RPINT_DECIMAL_WIDTH 3
  45#define PRINT_BRACKETPAIR_WIDTH 2
  46#define PRINT_TIME_UNIT_SEC_WIDTH 2
  47#define PRINT_TIME_UNIT_MESC_WIDTH 3
  48#define PRINT_RUNTIME_HEADER_WIDTH (PRINT_RUNTIME_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
  49#define PRINT_LATENCY_HEADER_WIDTH (PRINT_LATENCY_WIDTH + PRINT_TIME_UNIT_MESC_WIDTH)
  50#define PRINT_TIMEHIST_CPU_WIDTH (PRINT_CPU_WIDTH + PRINT_BRACKETPAIR_WIDTH)
  51#define PRINT_TIMESTAMP_HEADER_WIDTH (PRINT_TIMESTAMP_WIDTH + PRINT_TIME_UNIT_SEC_WIDTH)
  52
  53struct sort_dimension {
  54	const char      *name;
  55	int             (*cmp)(struct kwork_work *l, struct kwork_work *r);
  56	struct          list_head list;
  57};
  58
  59static int id_cmp(struct kwork_work *l, struct kwork_work *r)
  60{
  61	if (l->cpu > r->cpu)
  62		return 1;
  63	if (l->cpu < r->cpu)
  64		return -1;
  65
  66	if (l->id > r->id)
  67		return 1;
  68	if (l->id < r->id)
  69		return -1;
  70
  71	return 0;
  72}
  73
  74static int count_cmp(struct kwork_work *l, struct kwork_work *r)
  75{
  76	if (l->nr_atoms > r->nr_atoms)
  77		return 1;
  78	if (l->nr_atoms < r->nr_atoms)
  79		return -1;
  80
  81	return 0;
  82}
  83
  84static int runtime_cmp(struct kwork_work *l, struct kwork_work *r)
  85{
  86	if (l->total_runtime > r->total_runtime)
  87		return 1;
  88	if (l->total_runtime < r->total_runtime)
  89		return -1;
  90
  91	return 0;
  92}
  93
  94static int max_runtime_cmp(struct kwork_work *l, struct kwork_work *r)
  95{
  96	if (l->max_runtime > r->max_runtime)
  97		return 1;
  98	if (l->max_runtime < r->max_runtime)
  99		return -1;
 100
 101	return 0;
 102}
 103
 104static int avg_latency_cmp(struct kwork_work *l, struct kwork_work *r)
 105{
 106	u64 avgl, avgr;
 107
 108	if (!r->nr_atoms)
 109		return 1;
 110	if (!l->nr_atoms)
 111		return -1;
 112
 113	avgl = l->total_latency / l->nr_atoms;
 114	avgr = r->total_latency / r->nr_atoms;
 115
 116	if (avgl > avgr)
 117		return 1;
 118	if (avgl < avgr)
 119		return -1;
 120
 121	return 0;
 122}
 123
 124static int max_latency_cmp(struct kwork_work *l, struct kwork_work *r)
 125{
 126	if (l->max_latency > r->max_latency)
 127		return 1;
 128	if (l->max_latency < r->max_latency)
 129		return -1;
 130
 131	return 0;
 132}
 133
 134static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
 135			       const char *tok, struct list_head *list)
 136{
 137	size_t i;
 138	static struct sort_dimension max_sort_dimension = {
 139		.name = "max",
 140		.cmp  = max_runtime_cmp,
 141	};
 142	static struct sort_dimension id_sort_dimension = {
 143		.name = "id",
 144		.cmp  = id_cmp,
 145	};
 146	static struct sort_dimension runtime_sort_dimension = {
 147		.name = "runtime",
 148		.cmp  = runtime_cmp,
 149	};
 150	static struct sort_dimension count_sort_dimension = {
 151		.name = "count",
 152		.cmp  = count_cmp,
 153	};
 154	static struct sort_dimension avg_sort_dimension = {
 155		.name = "avg",
 156		.cmp  = avg_latency_cmp,
 157	};
 158	struct sort_dimension *available_sorts[] = {
 159		&id_sort_dimension,
 160		&max_sort_dimension,
 161		&count_sort_dimension,
 162		&runtime_sort_dimension,
 163		&avg_sort_dimension,
 164	};
 165
 166	if (kwork->report == KWORK_REPORT_LATENCY)
 167		max_sort_dimension.cmp = max_latency_cmp;
 168
 169	for (i = 0; i < ARRAY_SIZE(available_sorts); i++) {
 170		if (!strcmp(available_sorts[i]->name, tok)) {
 171			list_add_tail(&available_sorts[i]->list, list);
 172			return 0;
 173		}
 174	}
 175
 176	return -1;
 177}
 178
 179static void setup_sorting(struct perf_kwork *kwork,
 180			  const struct option *options,
 181			  const char * const usage_msg[])
 182{
 183	char *tmp, *tok, *str = strdup(kwork->sort_order);
 184
 185	for (tok = strtok_r(str, ", ", &tmp);
 186	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
 187		if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
 188			usage_with_options_msg(usage_msg, options,
 189					       "Unknown --sort key: `%s'", tok);
 190	}
 191
 192	pr_debug("Sort order: %s\n", kwork->sort_order);
 193	free(str);
 194}
 195
 196static struct kwork_atom *atom_new(struct perf_kwork *kwork,
 197				   struct perf_sample *sample)
 198{
 199	unsigned long i;
 200	struct kwork_atom_page *page;
 201	struct kwork_atom *atom = NULL;
 202
 203	list_for_each_entry(page, &kwork->atom_page_list, list) {
 204		if (!bitmap_full(page->bitmap, NR_ATOM_PER_PAGE)) {
 205			i = find_first_zero_bit(page->bitmap, NR_ATOM_PER_PAGE);
 206			BUG_ON(i >= NR_ATOM_PER_PAGE);
 207			atom = &page->atoms[i];
 208			goto found_atom;
 209		}
 210	}
 211
 212	/*
 213	 * new page
 214	 */
 215	page = zalloc(sizeof(*page));
 216	if (page == NULL) {
 217		pr_err("Failed to zalloc kwork atom page\n");
 218		return NULL;
 219	}
 220
 221	i = 0;
 222	atom = &page->atoms[0];
 223	list_add_tail(&page->list, &kwork->atom_page_list);
 224
 225found_atom:
 226	__set_bit(i, page->bitmap);
 227	atom->time = sample->time;
 228	atom->prev = NULL;
 229	atom->page_addr = page;
 230	atom->bit_inpage = i;
 231	return atom;
 232}
 233
 234static void atom_free(struct kwork_atom *atom)
 235{
 236	if (atom->prev != NULL)
 237		atom_free(atom->prev);
 238
 239	__clear_bit(atom->bit_inpage,
 240		    ((struct kwork_atom_page *)atom->page_addr)->bitmap);
 241}
 242
 243static void atom_del(struct kwork_atom *atom)
 244{
 245	list_del(&atom->list);
 246	atom_free(atom);
 247}
 248
 249static int work_cmp(struct list_head *list,
 250		    struct kwork_work *l, struct kwork_work *r)
 251{
 252	int ret = 0;
 253	struct sort_dimension *sort;
 254
 255	BUG_ON(list_empty(list));
 256
 257	list_for_each_entry(sort, list, list) {
 258		ret = sort->cmp(l, r);
 259		if (ret)
 260			return ret;
 261	}
 262
 263	return ret;
 264}
 265
 266static struct kwork_work *work_search(struct rb_root_cached *root,
 267				      struct kwork_work *key,
 268				      struct list_head *sort_list)
 269{
 270	int cmp;
 271	struct kwork_work *work;
 272	struct rb_node *node = root->rb_root.rb_node;
 273
 274	while (node) {
 275		work = container_of(node, struct kwork_work, node);
 276		cmp = work_cmp(sort_list, key, work);
 277		if (cmp > 0)
 278			node = node->rb_left;
 279		else if (cmp < 0)
 280			node = node->rb_right;
 281		else {
 282			if (work->name == NULL)
 283				work->name = key->name;
 284			return work;
 285		}
 286	}
 287	return NULL;
 288}
 289
 290static void work_insert(struct rb_root_cached *root,
 291			struct kwork_work *key, struct list_head *sort_list)
 292{
 293	int cmp;
 294	bool leftmost = true;
 295	struct kwork_work *cur;
 296	struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL;
 297
 298	while (*new) {
 299		cur = container_of(*new, struct kwork_work, node);
 300		parent = *new;
 301		cmp = work_cmp(sort_list, key, cur);
 302
 303		if (cmp > 0)
 304			new = &((*new)->rb_left);
 305		else {
 306			new = &((*new)->rb_right);
 307			leftmost = false;
 308		}
 309	}
 310
 311	rb_link_node(&key->node, parent, new);
 312	rb_insert_color_cached(&key->node, root, leftmost);
 313}
 314
 315static struct kwork_work *work_new(struct kwork_work *key)
 316{
 317	int i;
 318	struct kwork_work *work = zalloc(sizeof(*work));
 319
 320	if (work == NULL) {
 321		pr_err("Failed to zalloc kwork work\n");
 322		return NULL;
 323	}
 324
 325	for (i = 0; i < KWORK_TRACE_MAX; i++)
 326		INIT_LIST_HEAD(&work->atom_list[i]);
 327
 328	work->id = key->id;
 329	work->cpu = key->cpu;
 330	work->name = key->name;
 331	work->class = key->class;
 332	return work;
 333}
 334
 335static struct kwork_work *work_findnew(struct rb_root_cached *root,
 336				       struct kwork_work *key,
 337				       struct list_head *sort_list)
 338{
 339	struct kwork_work *work = work_search(root, key, sort_list);
 340
 341	if (work != NULL)
 342		return work;
 343
 344	work = work_new(key);
 345	if (work)
 346		work_insert(root, work, sort_list);
 347
 348	return work;
 349}
 350
 351static void profile_update_timespan(struct perf_kwork *kwork,
 352				    struct perf_sample *sample)
 353{
 354	if (!kwork->summary)
 355		return;
 356
 357	if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
 358		kwork->timestart = sample->time;
 359
 360	if (kwork->timeend < sample->time)
 361		kwork->timeend = sample->time;
 362}
 363
 364static bool profile_event_match(struct perf_kwork *kwork,
 365				struct kwork_work *work,
 366				struct perf_sample *sample)
 367{
 368	int cpu = work->cpu;
 369	u64 time = sample->time;
 370	struct perf_time_interval *ptime = &kwork->ptime;
 371
 372	if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
 373		return false;
 374
 375	if (((ptime->start != 0) && (ptime->start > time)) ||
 376	    ((ptime->end != 0) && (ptime->end < time)))
 377		return false;
 378
 379	if ((kwork->profile_name != NULL) &&
 380	    (work->name != NULL) &&
 381	    (strcmp(work->name, kwork->profile_name) != 0))
 382		return false;
 383
 384	profile_update_timespan(kwork, sample);
 385	return true;
 386}
 387
 388static int work_push_atom(struct perf_kwork *kwork,
 389			  struct kwork_class *class,
 390			  enum kwork_trace_type src_type,
 391			  enum kwork_trace_type dst_type,
 392			  struct evsel *evsel,
 393			  struct perf_sample *sample,
 394			  struct machine *machine,
 395			  struct kwork_work **ret_work)
 396{
 397	struct kwork_atom *atom, *dst_atom;
 398	struct kwork_work *work, key;
 399
 400	BUG_ON(class->work_init == NULL);
 401	class->work_init(class, &key, evsel, sample, machine);
 402
 403	atom = atom_new(kwork, sample);
 404	if (atom == NULL)
 405		return -1;
 406
 407	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
 408	if (work == NULL) {
 409		free(atom);
 410		return -1;
 411	}
 412
 413	if (!profile_event_match(kwork, work, sample))
 414		return 0;
 415
 416	if (dst_type < KWORK_TRACE_MAX) {
 417		dst_atom = list_last_entry_or_null(&work->atom_list[dst_type],
 418						   struct kwork_atom, list);
 419		if (dst_atom != NULL) {
 420			atom->prev = dst_atom;
 421			list_del(&dst_atom->list);
 422		}
 423	}
 424
 425	if (ret_work != NULL)
 426		*ret_work = work;
 427
 428	list_add_tail(&atom->list, &work->atom_list[src_type]);
 429
 430	return 0;
 431}
 432
 433static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
 434					struct kwork_class *class,
 435					enum kwork_trace_type src_type,
 436					enum kwork_trace_type dst_type,
 437					struct evsel *evsel,
 438					struct perf_sample *sample,
 439					struct machine *machine,
 440					struct kwork_work **ret_work)
 441{
 442	struct kwork_atom *atom, *src_atom;
 443	struct kwork_work *work, key;
 444
 445	BUG_ON(class->work_init == NULL);
 446	class->work_init(class, &key, evsel, sample, machine);
 447
 448	work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
 449	if (ret_work != NULL)
 450		*ret_work = work;
 451
 452	if (work == NULL)
 453		return NULL;
 454
 455	if (!profile_event_match(kwork, work, sample))
 456		return NULL;
 457
 458	atom = list_last_entry_or_null(&work->atom_list[dst_type],
 459				       struct kwork_atom, list);
 460	if (atom != NULL)
 461		return atom;
 462
 463	src_atom = atom_new(kwork, sample);
 464	if (src_atom != NULL)
 465		list_add_tail(&src_atom->list, &work->atom_list[src_type]);
 466	else {
 467		if (ret_work != NULL)
 468			*ret_work = NULL;
 469	}
 470
 471	return NULL;
 472}
 473
 474static void report_update_exit_event(struct kwork_work *work,
 475				     struct kwork_atom *atom,
 476				     struct perf_sample *sample)
 477{
 478	u64 delta;
 479	u64 exit_time = sample->time;
 480	u64 entry_time = atom->time;
 481
 482	if ((entry_time != 0) && (exit_time >= entry_time)) {
 483		delta = exit_time - entry_time;
 484		if ((delta > work->max_runtime) ||
 485		    (work->max_runtime == 0)) {
 486			work->max_runtime = delta;
 487			work->max_runtime_start = entry_time;
 488			work->max_runtime_end = exit_time;
 489		}
 490		work->total_runtime += delta;
 491		work->nr_atoms++;
 492	}
 493}
 494
 495static int report_entry_event(struct perf_kwork *kwork,
 496			      struct kwork_class *class,
 497			      struct evsel *evsel,
 498			      struct perf_sample *sample,
 499			      struct machine *machine)
 500{
 501	return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 502			      KWORK_TRACE_MAX, evsel, sample,
 503			      machine, NULL);
 504}
 505
 506static int report_exit_event(struct perf_kwork *kwork,
 507			     struct kwork_class *class,
 508			     struct evsel *evsel,
 509			     struct perf_sample *sample,
 510			     struct machine *machine)
 511{
 512	struct kwork_atom *atom = NULL;
 513	struct kwork_work *work = NULL;
 514
 515	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 516			     KWORK_TRACE_ENTRY, evsel, sample,
 517			     machine, &work);
 518	if (work == NULL)
 519		return -1;
 520
 521	if (atom != NULL) {
 522		report_update_exit_event(work, atom, sample);
 523		atom_del(atom);
 524	}
 525
 526	return 0;
 527}
 528
 529static void latency_update_entry_event(struct kwork_work *work,
 530				       struct kwork_atom *atom,
 531				       struct perf_sample *sample)
 532{
 533	u64 delta;
 534	u64 entry_time = sample->time;
 535	u64 raise_time = atom->time;
 536
 537	if ((raise_time != 0) && (entry_time >= raise_time)) {
 538		delta = entry_time - raise_time;
 539		if ((delta > work->max_latency) ||
 540		    (work->max_latency == 0)) {
 541			work->max_latency = delta;
 542			work->max_latency_start = raise_time;
 543			work->max_latency_end = entry_time;
 544		}
 545		work->total_latency += delta;
 546		work->nr_atoms++;
 547	}
 548}
 549
 550static int latency_raise_event(struct perf_kwork *kwork,
 551			       struct kwork_class *class,
 552			       struct evsel *evsel,
 553			       struct perf_sample *sample,
 554			       struct machine *machine)
 555{
 556	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
 557			      KWORK_TRACE_MAX, evsel, sample,
 558			      machine, NULL);
 559}
 560
 561static int latency_entry_event(struct perf_kwork *kwork,
 562			       struct kwork_class *class,
 563			       struct evsel *evsel,
 564			       struct perf_sample *sample,
 565			       struct machine *machine)
 566{
 567	struct kwork_atom *atom = NULL;
 568	struct kwork_work *work = NULL;
 569
 570	atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
 571			     KWORK_TRACE_RAISE, evsel, sample,
 572			     machine, &work);
 573	if (work == NULL)
 574		return -1;
 575
 576	if (atom != NULL) {
 577		latency_update_entry_event(work, atom, sample);
 578		atom_del(atom);
 579	}
 580
 581	return 0;
 582}
 583
 584static void timehist_save_callchain(struct perf_kwork *kwork,
 585				    struct perf_sample *sample,
 586				    struct evsel *evsel,
 587				    struct machine *machine)
 588{
 589	struct symbol *sym;
 590	struct thread *thread;
 591	struct callchain_cursor_node *node;
 592	struct callchain_cursor *cursor = &callchain_cursor;
 593
 594	if (!kwork->show_callchain || sample->callchain == NULL)
 595		return;
 596
 597	/* want main thread for process - has maps */
 598	thread = machine__findnew_thread(machine, sample->pid, sample->pid);
 599	if (thread == NULL) {
 600		pr_debug("Failed to get thread for pid %d\n", sample->pid);
 601		return;
 602	}
 603
 604	if (thread__resolve_callchain(thread, cursor, evsel, sample,
 605				      NULL, NULL, kwork->max_stack + 2) != 0) {
 606		pr_debug("Failed to resolve callchain, skipping\n");
 607		goto out_put;
 608	}
 609
 610	callchain_cursor_commit(cursor);
 611
 612	while (true) {
 613		node = callchain_cursor_current(cursor);
 614		if (node == NULL)
 615			break;
 616
 617		sym = node->ms.sym;
 618		if (sym) {
 619			if (!strcmp(sym->name, "__softirqentry_text_start") ||
 620			    !strcmp(sym->name, "__do_softirq"))
 621				sym->ignore = 1;
 622		}
 623
 624		callchain_cursor_advance(cursor);
 625	}
 626
 627out_put:
 628	thread__put(thread);
 629}
 630
 631static void timehist_print_event(struct perf_kwork *kwork,
 632				 struct kwork_work *work,
 633				 struct kwork_atom *atom,
 634				 struct perf_sample *sample,
 635				 struct addr_location *al)
 636{
 637	char entrytime[32], exittime[32];
 638	char kwork_name[PRINT_KWORK_NAME_WIDTH];
 639
 640	/*
 641	 * runtime start
 642	 */
 643	timestamp__scnprintf_usec(atom->time,
 644				  entrytime, sizeof(entrytime));
 645	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, entrytime);
 646
 647	/*
 648	 * runtime end
 649	 */
 650	timestamp__scnprintf_usec(sample->time,
 651				  exittime, sizeof(exittime));
 652	printf(" %*s ", PRINT_TIMESTAMP_WIDTH, exittime);
 653
 654	/*
 655	 * cpu
 656	 */
 657	printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu);
 658
 659	/*
 660	 * kwork name
 661	 */
 662	if (work->class && work->class->work_name) {
 663		work->class->work_name(work, kwork_name,
 664				       PRINT_KWORK_NAME_WIDTH);
 665		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, kwork_name);
 666	} else
 667		printf(" %-*s ", PRINT_KWORK_NAME_WIDTH, "");
 668
 669	/*
 670	 *runtime
 671	 */
 672	printf(" %*.*f ",
 673	       PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
 674	       (double)(sample->time - atom->time) / NSEC_PER_MSEC);
 675
 676	/*
 677	 * delaytime
 678	 */
 679	if (atom->prev != NULL)
 680		printf(" %*.*f ", PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
 681		       (double)(atom->time - atom->prev->time) / NSEC_PER_MSEC);
 682	else
 683		printf(" %*s ", PRINT_LATENCY_WIDTH, " ");
 684
 685	/*
 686	 * callchain
 687	 */
 688	if (kwork->show_callchain) {
 689		printf(" ");
 690		sample__fprintf_sym(sample, al, 0,
 691				    EVSEL__PRINT_SYM | EVSEL__PRINT_ONELINE |
 692				    EVSEL__PRINT_CALLCHAIN_ARROW |
 693				    EVSEL__PRINT_SKIP_IGNORED,
 694				    &callchain_cursor, symbol_conf.bt_stop_list,
 695				    stdout);
 696	}
 697
 698	printf("\n");
 699}
 700
 701static int timehist_raise_event(struct perf_kwork *kwork,
 702				struct kwork_class *class,
 703				struct evsel *evsel,
 704				struct perf_sample *sample,
 705				struct machine *machine)
 706{
 707	return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
 708			      KWORK_TRACE_MAX, evsel, sample,
 709			      machine, NULL);
 710}
 711
 712static int timehist_entry_event(struct perf_kwork *kwork,
 713				struct kwork_class *class,
 714				struct evsel *evsel,
 715				struct perf_sample *sample,
 716				struct machine *machine)
 717{
 718	int ret;
 719	struct kwork_work *work = NULL;
 720
 721	ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
 722			     KWORK_TRACE_RAISE, evsel, sample,
 723			     machine, &work);
 724	if (ret)
 725		return ret;
 726
 727	if (work != NULL)
 728		timehist_save_callchain(kwork, sample, evsel, machine);
 729
 730	return 0;
 731}
 732
 733static int timehist_exit_event(struct perf_kwork *kwork,
 734			       struct kwork_class *class,
 735			       struct evsel *evsel,
 736			       struct perf_sample *sample,
 737			       struct machine *machine)
 738{
 739	struct kwork_atom *atom = NULL;
 740	struct kwork_work *work = NULL;
 741	struct addr_location al;
 742
 743	if (machine__resolve(machine, &al, sample) < 0) {
 744		pr_debug("Problem processing event, skipping it\n");
 745		return -1;
 746	}
 747
 748	atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
 749			     KWORK_TRACE_ENTRY, evsel, sample,
 750			     machine, &work);
 751	if (work == NULL)
 752		return -1;
 753
 754	if (atom != NULL) {
 755		work->nr_atoms++;
 756		timehist_print_event(kwork, work, atom, sample, &al);
 757		atom_del(atom);
 758	}
 759
 760	return 0;
 761}
 762
 763static struct kwork_class kwork_irq;
 764static int process_irq_handler_entry_event(struct perf_tool *tool,
 765					   struct evsel *evsel,
 766					   struct perf_sample *sample,
 767					   struct machine *machine)
 768{
 769	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 770
 771	if (kwork->tp_handler->entry_event)
 772		return kwork->tp_handler->entry_event(kwork, &kwork_irq,
 773						      evsel, sample, machine);
 774	return 0;
 775}
 776
 777static int process_irq_handler_exit_event(struct perf_tool *tool,
 778					  struct evsel *evsel,
 779					  struct perf_sample *sample,
 780					  struct machine *machine)
 781{
 782	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 783
 784	if (kwork->tp_handler->exit_event)
 785		return kwork->tp_handler->exit_event(kwork, &kwork_irq,
 786						     evsel, sample, machine);
 787	return 0;
 788}
 789
 790const struct evsel_str_handler irq_tp_handlers[] = {
 791	{ "irq:irq_handler_entry", process_irq_handler_entry_event, },
 792	{ "irq:irq_handler_exit",  process_irq_handler_exit_event,  },
 793};
 794
 795static int irq_class_init(struct kwork_class *class,
 796			  struct perf_session *session)
 797{
 798	if (perf_session__set_tracepoints_handlers(session, irq_tp_handlers)) {
 799		pr_err("Failed to set irq tracepoints handlers\n");
 800		return -1;
 801	}
 802
 803	class->work_root = RB_ROOT_CACHED;
 804	return 0;
 805}
 806
 807static void irq_work_init(struct kwork_class *class,
 808			  struct kwork_work *work,
 809			  struct evsel *evsel,
 810			  struct perf_sample *sample,
 811			  struct machine *machine __maybe_unused)
 812{
 813	work->class = class;
 814	work->cpu = sample->cpu;
 815	work->id = evsel__intval(evsel, sample, "irq");
 816	work->name = evsel__strval(evsel, sample, "name");
 817}
 818
 819static void irq_work_name(struct kwork_work *work, char *buf, int len)
 820{
 821	snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id);
 822}
 823
 824static struct kwork_class kwork_irq = {
 825	.name           = "irq",
 826	.type           = KWORK_CLASS_IRQ,
 827	.nr_tracepoints = 2,
 828	.tp_handlers    = irq_tp_handlers,
 829	.class_init     = irq_class_init,
 830	.work_init      = irq_work_init,
 831	.work_name      = irq_work_name,
 832};
 833
 834static struct kwork_class kwork_softirq;
 835static int process_softirq_raise_event(struct perf_tool *tool,
 836				       struct evsel *evsel,
 837				       struct perf_sample *sample,
 838				       struct machine *machine)
 839{
 840	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 841
 842	if (kwork->tp_handler->raise_event)
 843		return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
 844						      evsel, sample, machine);
 845
 846	return 0;
 847}
 848
 849static int process_softirq_entry_event(struct perf_tool *tool,
 850				       struct evsel *evsel,
 851				       struct perf_sample *sample,
 852				       struct machine *machine)
 853{
 854	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 855
 856	if (kwork->tp_handler->entry_event)
 857		return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
 858						      evsel, sample, machine);
 859
 860	return 0;
 861}
 862
 863static int process_softirq_exit_event(struct perf_tool *tool,
 864				      struct evsel *evsel,
 865				      struct perf_sample *sample,
 866				      struct machine *machine)
 867{
 868	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 869
 870	if (kwork->tp_handler->exit_event)
 871		return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
 872						     evsel, sample, machine);
 873
 874	return 0;
 875}
 876
 877const struct evsel_str_handler softirq_tp_handlers[] = {
 878	{ "irq:softirq_raise", process_softirq_raise_event, },
 879	{ "irq:softirq_entry", process_softirq_entry_event, },
 880	{ "irq:softirq_exit",  process_softirq_exit_event,  },
 881};
 882
 883static int softirq_class_init(struct kwork_class *class,
 884			      struct perf_session *session)
 885{
 886	if (perf_session__set_tracepoints_handlers(session,
 887						   softirq_tp_handlers)) {
 888		pr_err("Failed to set softirq tracepoints handlers\n");
 889		return -1;
 890	}
 891
 892	class->work_root = RB_ROOT_CACHED;
 893	return 0;
 894}
 895
 896static char *evsel__softirq_name(struct evsel *evsel, u64 num)
 897{
 898	char *name = NULL;
 899	bool found = false;
 900	struct tep_print_flag_sym *sym = NULL;
 901	struct tep_print_arg *args = evsel->tp_format->print_fmt.args;
 902
 903	if ((args == NULL) || (args->next == NULL))
 904		return NULL;
 905
 906	/* skip softirq field: "REC->vec" */
 907	for (sym = args->next->symbol.symbols; sym != NULL; sym = sym->next) {
 908		if ((eval_flag(sym->value) == (unsigned long long)num) &&
 909		    (strlen(sym->str) != 0)) {
 910			found = true;
 911			break;
 912		}
 913	}
 914
 915	if (!found)
 916		return NULL;
 917
 918	name = strdup(sym->str);
 919	if (name == NULL) {
 920		pr_err("Failed to copy symbol name\n");
 921		return NULL;
 922	}
 923	return name;
 924}
 925
 926static void softirq_work_init(struct kwork_class *class,
 927			      struct kwork_work *work,
 928			      struct evsel *evsel,
 929			      struct perf_sample *sample,
 930			      struct machine *machine __maybe_unused)
 931{
 932	u64 num = evsel__intval(evsel, sample, "vec");
 933
 934	work->id = num;
 935	work->class = class;
 936	work->cpu = sample->cpu;
 937	work->name = evsel__softirq_name(evsel, num);
 938}
 939
 940static void softirq_work_name(struct kwork_work *work, char *buf, int len)
 941{
 942	snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id);
 943}
 944
 945static struct kwork_class kwork_softirq = {
 946	.name           = "softirq",
 947	.type           = KWORK_CLASS_SOFTIRQ,
 948	.nr_tracepoints = 3,
 949	.tp_handlers    = softirq_tp_handlers,
 950	.class_init     = softirq_class_init,
 951	.work_init      = softirq_work_init,
 952	.work_name      = softirq_work_name,
 953};
 954
 955static struct kwork_class kwork_workqueue;
 956static int process_workqueue_activate_work_event(struct perf_tool *tool,
 957						 struct evsel *evsel,
 958						 struct perf_sample *sample,
 959						 struct machine *machine)
 960{
 961	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 962
 963	if (kwork->tp_handler->raise_event)
 964		return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
 965						    evsel, sample, machine);
 966
 967	return 0;
 968}
 969
 970static int process_workqueue_execute_start_event(struct perf_tool *tool,
 971						 struct evsel *evsel,
 972						 struct perf_sample *sample,
 973						 struct machine *machine)
 974{
 975	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 976
 977	if (kwork->tp_handler->entry_event)
 978		return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
 979						    evsel, sample, machine);
 980
 981	return 0;
 982}
 983
 984static int process_workqueue_execute_end_event(struct perf_tool *tool,
 985					       struct evsel *evsel,
 986					       struct perf_sample *sample,
 987					       struct machine *machine)
 988{
 989	struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
 990
 991	if (kwork->tp_handler->exit_event)
 992		return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
 993						   evsel, sample, machine);
 994
 995	return 0;
 996}
 997
 998const struct evsel_str_handler workqueue_tp_handlers[] = {
 999	{ "workqueue:workqueue_activate_work", process_workqueue_activate_work_event, },
1000	{ "workqueue:workqueue_execute_start", process_workqueue_execute_start_event, },
1001	{ "workqueue:workqueue_execute_end",   process_workqueue_execute_end_event,   },
1002};
1003
1004static int workqueue_class_init(struct kwork_class *class,
1005				struct perf_session *session)
1006{
1007	if (perf_session__set_tracepoints_handlers(session,
1008						   workqueue_tp_handlers)) {
1009		pr_err("Failed to set workqueue tracepoints handlers\n");
1010		return -1;
1011	}
1012
1013	class->work_root = RB_ROOT_CACHED;
1014	return 0;
1015}
1016
1017static void workqueue_work_init(struct kwork_class *class,
1018				struct kwork_work *work,
1019				struct evsel *evsel,
1020				struct perf_sample *sample,
1021				struct machine *machine)
1022{
1023	char *modp = NULL;
1024	unsigned long long function_addr = evsel__intval(evsel,
1025							 sample, "function");
1026
1027	work->class = class;
1028	work->cpu = sample->cpu;
1029	work->id = evsel__intval(evsel, sample, "work");
1030	work->name = function_addr == 0 ? NULL :
1031		machine__resolve_kernel_addr(machine, &function_addr, &modp);
1032}
1033
1034static void workqueue_work_name(struct kwork_work *work, char *buf, int len)
1035{
1036	if (work->name != NULL)
1037		snprintf(buf, len, "(w)%s", work->name);
1038	else
1039		snprintf(buf, len, "(w)0x%" PRIx64, work->id);
1040}
1041
1042static struct kwork_class kwork_workqueue = {
1043	.name           = "workqueue",
1044	.type           = KWORK_CLASS_WORKQUEUE,
1045	.nr_tracepoints = 3,
1046	.tp_handlers    = workqueue_tp_handlers,
1047	.class_init     = workqueue_class_init,
1048	.work_init      = workqueue_work_init,
1049	.work_name      = workqueue_work_name,
1050};
1051
1052static struct kwork_class *kwork_class_supported_list[KWORK_CLASS_MAX] = {
1053	[KWORK_CLASS_IRQ]       = &kwork_irq,
1054	[KWORK_CLASS_SOFTIRQ]   = &kwork_softirq,
1055	[KWORK_CLASS_WORKQUEUE] = &kwork_workqueue,
1056};
1057
1058static void print_separator(int len)
1059{
1060	printf(" %.*s\n", len, graph_dotted_line);
1061}
1062
1063static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
1064{
1065	int ret = 0;
1066	char kwork_name[PRINT_KWORK_NAME_WIDTH];
1067	char max_runtime_start[32], max_runtime_end[32];
1068	char max_latency_start[32], max_latency_end[32];
1069
1070	printf(" ");
1071
1072	/*
1073	 * kwork name
1074	 */
1075	if (work->class && work->class->work_name) {
1076		work->class->work_name(work, kwork_name,
1077				       PRINT_KWORK_NAME_WIDTH);
1078		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, kwork_name);
1079	} else {
1080		ret += printf(" %-*s |", PRINT_KWORK_NAME_WIDTH, "");
1081	}
1082
1083	/*
1084	 * cpu
1085	 */
1086	ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu);
1087
1088	/*
1089	 * total runtime
1090	 */
1091	if (kwork->report == KWORK_REPORT_RUNTIME) {
1092		ret += printf(" %*.*f ms |",
1093			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1094			      (double)work->total_runtime / NSEC_PER_MSEC);
1095	} else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
1096		ret += printf(" %*.*f ms |",
1097			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1098			      (double)work->total_latency /
1099			      work->nr_atoms / NSEC_PER_MSEC);
1100	}
1101
1102	/*
1103	 * count
1104	 */
1105	ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms);
1106
1107	/*
1108	 * max runtime, max runtime start, max runtime end
1109	 */
1110	if (kwork->report == KWORK_REPORT_RUNTIME) {
1111		timestamp__scnprintf_usec(work->max_runtime_start,
1112					  max_runtime_start,
1113					  sizeof(max_runtime_start));
1114		timestamp__scnprintf_usec(work->max_runtime_end,
1115					  max_runtime_end,
1116					  sizeof(max_runtime_end));
1117		ret += printf(" %*.*f ms | %*s s | %*s s |",
1118			      PRINT_RUNTIME_WIDTH, RPINT_DECIMAL_WIDTH,
1119			      (double)work->max_runtime / NSEC_PER_MSEC,
1120			      PRINT_TIMESTAMP_WIDTH, max_runtime_start,
1121			      PRINT_TIMESTAMP_WIDTH, max_runtime_end);
1122	}
1123	/*
1124	 * max delay, max delay start, max delay end
1125	 */
1126	else if (kwork->report == KWORK_REPORT_LATENCY) {
1127		timestamp__scnprintf_usec(work->max_latency_start,
1128					  max_latency_start,
1129					  sizeof(max_latency_start));
1130		timestamp__scnprintf_usec(work->max_latency_end,
1131					  max_latency_end,
1132					  sizeof(max_latency_end));
1133		ret += printf(" %*.*f ms | %*s s | %*s s |",
1134			      PRINT_LATENCY_WIDTH, RPINT_DECIMAL_WIDTH,
1135			      (double)work->max_latency / NSEC_PER_MSEC,
1136			      PRINT_TIMESTAMP_WIDTH, max_latency_start,
1137			      PRINT_TIMESTAMP_WIDTH, max_latency_end);
1138	}
1139
1140	printf("\n");
1141	return ret;
1142}
1143
1144static int report_print_header(struct perf_kwork *kwork)
1145{
1146	int ret;
1147
1148	printf("\n ");
1149	ret = printf(" %-*s | %-*s |",
1150		     PRINT_KWORK_NAME_WIDTH, "Kwork Name",
1151		     PRINT_CPU_WIDTH, "Cpu");
1152
1153	if (kwork->report == KWORK_REPORT_RUNTIME) {
1154		ret += printf(" %-*s |",
1155			      PRINT_RUNTIME_HEADER_WIDTH, "Total Runtime");
1156	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1157		ret += printf(" %-*s |",
1158			      PRINT_LATENCY_HEADER_WIDTH, "Avg delay");
1159	}
1160
1161	ret += printf(" %-*s |", PRINT_COUNT_WIDTH, "Count");
1162
1163	if (kwork->report == KWORK_REPORT_RUNTIME) {
1164		ret += printf(" %-*s | %-*s | %-*s |",
1165			      PRINT_RUNTIME_HEADER_WIDTH, "Max runtime",
1166			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime start",
1167			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max runtime end");
1168	} else if (kwork->report == KWORK_REPORT_LATENCY) {
1169		ret += printf(" %-*s | %-*s | %-*s |",
1170			      PRINT_LATENCY_HEADER_WIDTH, "Max delay",
1171			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay start",
1172			      PRINT_TIMESTAMP_HEADER_WIDTH, "Max delay end");
1173	}
1174
1175	printf("\n");
1176	print_separator(ret);
1177	return ret;
1178}
1179
1180static void timehist_print_header(void)
1181{
1182	/*
1183	 * header row
1184	 */
1185	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1186	       PRINT_TIMESTAMP_WIDTH, "Runtime start",
1187	       PRINT_TIMESTAMP_WIDTH, "Runtime end",
1188	       PRINT_TIMEHIST_CPU_WIDTH, "Cpu",
1189	       PRINT_KWORK_NAME_WIDTH, "Kwork name",
1190	       PRINT_RUNTIME_WIDTH, "Runtime",
1191	       PRINT_RUNTIME_WIDTH, "Delaytime");
1192
1193	/*
1194	 * units row
1195	 */
1196	printf(" %-*s  %-*s  %-*s  %-*s  %-*s  %-*s\n",
1197	       PRINT_TIMESTAMP_WIDTH, "",
1198	       PRINT_TIMESTAMP_WIDTH, "",
1199	       PRINT_TIMEHIST_CPU_WIDTH, "",
1200	       PRINT_KWORK_NAME_WIDTH, "(TYPE)NAME:NUM",
1201	       PRINT_RUNTIME_WIDTH, "(msec)",
1202	       PRINT_RUNTIME_WIDTH, "(msec)");
1203
1204	/*
1205	 * separator
1206	 */
1207	printf(" %.*s  %.*s  %.*s  %.*s  %.*s  %.*s\n",
1208	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1209	       PRINT_TIMESTAMP_WIDTH, graph_dotted_line,
1210	       PRINT_TIMEHIST_CPU_WIDTH, graph_dotted_line,
1211	       PRINT_KWORK_NAME_WIDTH, graph_dotted_line,
1212	       PRINT_RUNTIME_WIDTH, graph_dotted_line,
1213	       PRINT_RUNTIME_WIDTH, graph_dotted_line);
1214}
1215
1216static void print_summary(struct perf_kwork *kwork)
1217{
1218	u64 time = kwork->timeend - kwork->timestart;
1219
1220	printf("  Total count            : %9" PRIu64 "\n", kwork->all_count);
1221	printf("  Total runtime   (msec) : %9.3f (%.3f%% load average)\n",
1222	       (double)kwork->all_runtime / NSEC_PER_MSEC,
1223	       time == 0 ? 0 : (double)kwork->all_runtime / time);
1224	printf("  Total time span (msec) : %9.3f\n",
1225	       (double)time / NSEC_PER_MSEC);
1226}
1227
1228static unsigned long long nr_list_entry(struct list_head *head)
1229{
1230	struct list_head *pos;
1231	unsigned long long n = 0;
1232
1233	list_for_each(pos, head)
1234		n++;
1235
1236	return n;
1237}
1238
1239static void print_skipped_events(struct perf_kwork *kwork)
1240{
1241	int i;
1242	const char *const kwork_event_str[] = {
1243		[KWORK_TRACE_RAISE] = "raise",
1244		[KWORK_TRACE_ENTRY] = "entry",
1245		[KWORK_TRACE_EXIT]  = "exit",
1246	};
1247
1248	if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
1249	    (kwork->nr_events != 0)) {
1250		printf("  INFO: %.3f%% skipped events (%" PRIu64 " including ",
1251		       (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
1252		       (double)kwork->nr_events * 100.0,
1253		       kwork->nr_skipped_events[KWORK_TRACE_MAX]);
1254
1255		for (i = 0; i < KWORK_TRACE_MAX; i++) {
1256			printf("%" PRIu64 " %s%s",
1257			       kwork->nr_skipped_events[i],
1258			       kwork_event_str[i],
1259			       (i == KWORK_TRACE_MAX - 1) ? ")\n" : ", ");
1260		}
1261	}
1262
1263	if (verbose > 0)
1264		printf("  INFO: use %lld atom pages\n",
1265		       nr_list_entry(&kwork->atom_page_list));
1266}
1267
1268static void print_bad_events(struct perf_kwork *kwork)
1269{
1270	if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
1271		printf("  INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1272		       (double)kwork->nr_lost_events /
1273		       (double)kwork->nr_events * 100.0,
1274		       kwork->nr_lost_events, kwork->nr_events,
1275		       kwork->nr_lost_chunks);
1276	}
1277}
1278
1279static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
1280{
1281	struct rb_node *node;
1282	struct kwork_work *data;
1283	struct rb_root_cached *root = &class->work_root;
1284
1285	pr_debug("Sorting %s ...\n", class->name);
1286	for (;;) {
1287		node = rb_first_cached(root);
1288		if (!node)
1289			break;
1290
1291		rb_erase_cached(node, root);
1292		data = rb_entry(node, struct kwork_work, node);
1293		work_insert(&kwork->sorted_work_root,
1294			       data, &kwork->sort_list);
1295	}
1296}
1297
1298static void perf_kwork__sort(struct perf_kwork *kwork)
1299{
1300	struct kwork_class *class;
1301
1302	list_for_each_entry(class, &kwork->class_list, list)
1303		work_sort(kwork, class);
1304}
1305
1306static int perf_kwork__check_config(struct perf_kwork *kwork,
1307				    struct perf_session *session)
1308{
1309	int ret;
1310	struct evsel *evsel;
1311	struct kwork_class *class;
1312
1313	static struct trace_kwork_handler report_ops = {
1314		.entry_event = report_entry_event,
1315		.exit_event  = report_exit_event,
1316	};
1317	static struct trace_kwork_handler latency_ops = {
1318		.raise_event = latency_raise_event,
1319		.entry_event = latency_entry_event,
1320	};
1321	static struct trace_kwork_handler timehist_ops = {
1322		.raise_event = timehist_raise_event,
1323		.entry_event = timehist_entry_event,
1324		.exit_event  = timehist_exit_event,
1325	};
1326
1327	switch (kwork->report) {
1328	case KWORK_REPORT_RUNTIME:
1329		kwork->tp_handler = &report_ops;
1330		break;
1331	case KWORK_REPORT_LATENCY:
1332		kwork->tp_handler = &latency_ops;
1333		break;
1334	case KWORK_REPORT_TIMEHIST:
1335		kwork->tp_handler = &timehist_ops;
1336		break;
1337	default:
1338		pr_debug("Invalid report type %d\n", kwork->report);
1339		return -1;
1340	}
1341
1342	list_for_each_entry(class, &kwork->class_list, list)
1343		if ((class->class_init != NULL) &&
1344		    (class->class_init(class, session) != 0))
1345			return -1;
1346
1347	if (kwork->cpu_list != NULL) {
1348		ret = perf_session__cpu_bitmap(session,
1349					       kwork->cpu_list,
1350					       kwork->cpu_bitmap);
1351		if (ret < 0) {
1352			pr_err("Invalid cpu bitmap\n");
1353			return -1;
1354		}
1355	}
1356
1357	if (kwork->time_str != NULL) {
1358		ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
1359		if (ret != 0) {
1360			pr_err("Invalid time span\n");
1361			return -1;
1362		}
1363	}
1364
1365	list_for_each_entry(evsel, &session->evlist->core.entries, core.node) {
1366		if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
1367			pr_debug("Samples do not have callchains\n");
1368			kwork->show_callchain = 0;
1369			symbol_conf.use_callchain = 0;
1370		}
1371	}
1372
1373	return 0;
1374}
1375
1376static int perf_kwork__read_events(struct perf_kwork *kwork)
1377{
1378	int ret = -1;
1379	struct perf_session *session = NULL;
1380
1381	struct perf_data data = {
1382		.path  = input_name,
1383		.mode  = PERF_DATA_MODE_READ,
1384		.force = kwork->force,
1385	};
1386
1387	session = perf_session__new(&data, &kwork->tool);
1388	if (IS_ERR(session)) {
1389		pr_debug("Error creating perf session\n");
1390		return PTR_ERR(session);
1391	}
1392
1393	symbol__init(&session->header.env);
1394
1395	if (perf_kwork__check_config(kwork, session) != 0)
1396		goto out_delete;
1397
1398	if (session->tevent.pevent &&
1399	    tep_set_function_resolver(session->tevent.pevent,
1400				      machine__resolve_kernel_addr,
1401				      &session->machines.host) < 0) {
1402		pr_err("Failed to set libtraceevent function resolver\n");
1403		goto out_delete;
1404	}
1405
1406	if (kwork->report == KWORK_REPORT_TIMEHIST)
1407		timehist_print_header();
1408
1409	ret = perf_session__process_events(session);
1410	if (ret) {
1411		pr_debug("Failed to process events, error %d\n", ret);
1412		goto out_delete;
1413	}
1414
1415	kwork->nr_events      = session->evlist->stats.nr_events[0];
1416	kwork->nr_lost_events = session->evlist->stats.total_lost;
1417	kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1418
1419out_delete:
1420	perf_session__delete(session);
1421	return ret;
1422}
1423
1424static void process_skipped_events(struct perf_kwork *kwork,
1425				   struct kwork_work *work)
1426{
1427	int i;
1428	unsigned long long count;
1429
1430	for (i = 0; i < KWORK_TRACE_MAX; i++) {
1431		count = nr_list_entry(&work->atom_list[i]);
1432		kwork->nr_skipped_events[i] += count;
1433		kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
1434	}
1435}
1436
1437struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
1438				       struct kwork_class *class,
1439				       struct kwork_work *key)
1440{
1441	struct kwork_work *work = NULL;
1442
1443	work = work_new(key);
1444	if (work == NULL)
1445		return NULL;
1446
1447	work_insert(&class->work_root, work, &kwork->cmp_id);
1448	return work;
1449}
1450
1451static void sig_handler(int sig)
1452{
1453	/*
1454	 * Simply capture termination signal so that
1455	 * the program can continue after pause returns
1456	 */
1457	pr_debug("Captuer signal %d\n", sig);
1458}
1459
1460static int perf_kwork__report_bpf(struct perf_kwork *kwork)
1461{
1462	int ret;
1463
1464	signal(SIGINT, sig_handler);
1465	signal(SIGTERM, sig_handler);
1466
1467	ret = perf_kwork__trace_prepare_bpf(kwork);
1468	if (ret)
1469		return -1;
1470
1471	printf("Starting trace, Hit <Ctrl+C> to stop and report\n");
1472
1473	perf_kwork__trace_start();
1474
1475	/*
1476	 * a simple pause, wait here for stop signal
1477	 */
1478	pause();
1479
1480	perf_kwork__trace_finish();
1481
1482	perf_kwork__report_read_bpf(kwork);
1483
1484	perf_kwork__report_cleanup_bpf();
1485
1486	return 0;
1487}
1488
1489static int perf_kwork__report(struct perf_kwork *kwork)
1490{
1491	int ret;
1492	struct rb_node *next;
1493	struct kwork_work *work;
1494
1495	if (kwork->use_bpf)
1496		ret = perf_kwork__report_bpf(kwork);
1497	else
1498		ret = perf_kwork__read_events(kwork);
1499
1500	if (ret != 0)
1501		return -1;
1502
1503	perf_kwork__sort(kwork);
1504
1505	setup_pager();
1506
1507	ret = report_print_header(kwork);
1508	next = rb_first_cached(&kwork->sorted_work_root);
1509	while (next) {
1510		work = rb_entry(next, struct kwork_work, node);
1511		process_skipped_events(kwork, work);
1512
1513		if (work->nr_atoms != 0) {
1514			report_print_work(kwork, work);
1515			if (kwork->summary) {
1516				kwork->all_runtime += work->total_runtime;
1517				kwork->all_count += work->nr_atoms;
1518			}
1519		}
1520		next = rb_next(next);
1521	}
1522	print_separator(ret);
1523
1524	if (kwork->summary) {
1525		print_summary(kwork);
1526		print_separator(ret);
1527	}
1528
1529	print_bad_events(kwork);
1530	print_skipped_events(kwork);
1531	printf("\n");
1532
1533	return 0;
1534}
1535
1536typedef int (*tracepoint_handler)(struct perf_tool *tool,
1537				  struct evsel *evsel,
1538				  struct perf_sample *sample,
1539				  struct machine *machine);
1540
1541static int perf_kwork__process_tracepoint_sample(struct perf_tool *tool,
1542						 union perf_event *event __maybe_unused,
1543						 struct perf_sample *sample,
1544						 struct evsel *evsel,
1545						 struct machine *machine)
1546{
1547	int err = 0;
1548
1549	if (evsel->handler != NULL) {
1550		tracepoint_handler f = evsel->handler;
1551
1552		err = f(tool, evsel, sample, machine);
1553	}
1554
1555	return err;
1556}
1557
1558static int perf_kwork__timehist(struct perf_kwork *kwork)
1559{
1560	/*
1561	 * event handlers for timehist option
1562	 */
1563	kwork->tool.comm	 = perf_event__process_comm;
1564	kwork->tool.exit	 = perf_event__process_exit;
1565	kwork->tool.fork	 = perf_event__process_fork;
1566	kwork->tool.attr	 = perf_event__process_attr;
1567	kwork->tool.tracing_data = perf_event__process_tracing_data;
1568	kwork->tool.build_id	 = perf_event__process_build_id;
1569	kwork->tool.ordered_events = true;
1570	kwork->tool.ordering_requires_timestamps = true;
1571	symbol_conf.use_callchain = kwork->show_callchain;
1572
1573	if (symbol__validate_sym_arguments()) {
1574		pr_err("Failed to validate sym arguments\n");
1575		return -1;
1576	}
1577
1578	setup_pager();
1579
1580	return perf_kwork__read_events(kwork);
1581}
1582
1583static void setup_event_list(struct perf_kwork *kwork,
1584			     const struct option *options,
1585			     const char * const usage_msg[])
1586{
1587	int i;
1588	struct kwork_class *class;
1589	char *tmp, *tok, *str;
1590
1591	if (kwork->event_list_str == NULL)
1592		goto null_event_list_str;
1593
1594	str = strdup(kwork->event_list_str);
1595	for (tok = strtok_r(str, ", ", &tmp);
1596	     tok; tok = strtok_r(NULL, ", ", &tmp)) {
1597		for (i = 0; i < KWORK_CLASS_MAX; i++) {
1598			class = kwork_class_supported_list[i];
1599			if (strcmp(tok, class->name) == 0) {
1600				list_add_tail(&class->list, &kwork->class_list);
1601				break;
1602			}
1603		}
1604		if (i == KWORK_CLASS_MAX) {
1605			usage_with_options_msg(usage_msg, options,
1606					       "Unknown --event key: `%s'", tok);
1607		}
1608	}
1609	free(str);
1610
1611null_event_list_str:
1612	/*
1613	 * config all kwork events if not specified
1614	 */
1615	if (list_empty(&kwork->class_list)) {
1616		for (i = 0; i < KWORK_CLASS_MAX; i++) {
1617			list_add_tail(&kwork_class_supported_list[i]->list,
1618				      &kwork->class_list);
1619		}
1620	}
1621
1622	pr_debug("Config event list:");
1623	list_for_each_entry(class, &kwork->class_list, list)
1624		pr_debug(" %s", class->name);
1625	pr_debug("\n");
1626}
1627
1628static int perf_kwork__record(struct perf_kwork *kwork,
1629			      int argc, const char **argv)
1630{
1631	const char **rec_argv;
1632	unsigned int rec_argc, i, j;
1633	struct kwork_class *class;
1634
1635	const char *const record_args[] = {
1636		"record",
1637		"-a",
1638		"-R",
1639		"-m", "1024",
1640		"-c", "1",
1641	};
1642
1643	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1644
1645	list_for_each_entry(class, &kwork->class_list, list)
1646		rec_argc += 2 * class->nr_tracepoints;
1647
1648	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1649	if (rec_argv == NULL)
1650		return -ENOMEM;
1651
1652	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1653		rec_argv[i] = strdup(record_args[i]);
1654
1655	list_for_each_entry(class, &kwork->class_list, list) {
1656		for (j = 0; j < class->nr_tracepoints; j++) {
1657			rec_argv[i++] = strdup("-e");
1658			rec_argv[i++] = strdup(class->tp_handlers[j].name);
1659		}
1660	}
1661
1662	for (j = 1; j < (unsigned int)argc; j++, i++)
1663		rec_argv[i] = argv[j];
1664
1665	BUG_ON(i != rec_argc);
1666
1667	pr_debug("record comm: ");
1668	for (j = 0; j < rec_argc; j++)
1669		pr_debug("%s ", rec_argv[j]);
1670	pr_debug("\n");
1671
1672	return cmd_record(i, rec_argv);
1673}
1674
1675int cmd_kwork(int argc, const char **argv)
1676{
1677	static struct perf_kwork kwork = {
1678		.class_list          = LIST_HEAD_INIT(kwork.class_list),
1679		.tool = {
1680			.mmap    = perf_event__process_mmap,
1681			.mmap2   = perf_event__process_mmap2,
1682			.sample  = perf_kwork__process_tracepoint_sample,
1683		},
1684		.atom_page_list      = LIST_HEAD_INIT(kwork.atom_page_list),
1685		.sort_list           = LIST_HEAD_INIT(kwork.sort_list),
1686		.cmp_id              = LIST_HEAD_INIT(kwork.cmp_id),
1687		.sorted_work_root    = RB_ROOT_CACHED,
1688		.tp_handler          = NULL,
1689		.profile_name        = NULL,
1690		.cpu_list            = NULL,
1691		.time_str            = NULL,
1692		.force               = false,
1693		.event_list_str      = NULL,
1694		.summary             = false,
1695		.sort_order          = NULL,
1696		.show_callchain      = false,
1697		.max_stack           = 5,
1698		.timestart           = 0,
1699		.timeend             = 0,
1700		.nr_events           = 0,
1701		.nr_lost_chunks      = 0,
1702		.nr_lost_events      = 0,
1703		.all_runtime         = 0,
1704		.all_count           = 0,
1705		.nr_skipped_events   = { 0 },
1706	};
1707	static const char default_report_sort_order[] = "runtime, max, count";
1708	static const char default_latency_sort_order[] = "avg, max, count";
1709	const struct option kwork_options[] = {
1710	OPT_INCR('v', "verbose", &verbose,
1711		 "be more verbose (show symbol address, etc)"),
1712	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1713		    "dump raw trace in ASCII"),
1714	OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
1715		   "list of kwork to profile (irq, softirq, workqueue, etc)"),
1716	OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
1717	OPT_END()
1718	};
1719	const struct option report_options[] = {
1720	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1721		   "sort by key(s): runtime, max, count"),
1722	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1723		   "list of cpus to profile"),
1724	OPT_STRING('n', "name", &kwork.profile_name, "name",
1725		   "event name to profile"),
1726	OPT_STRING(0, "time", &kwork.time_str, "str",
1727		   "Time span for analysis (start,stop)"),
1728	OPT_STRING('i', "input", &input_name, "file",
1729		   "input file name"),
1730	OPT_BOOLEAN('S', "with-summary", &kwork.summary,
1731		    "Show summary with statistics"),
1732#ifdef HAVE_BPF_SKEL
1733	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1734		    "Use BPF to measure kwork runtime"),
1735#endif
1736	OPT_PARENT(kwork_options)
1737	};
1738	const struct option latency_options[] = {
1739	OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1740		   "sort by key(s): avg, max, count"),
1741	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1742		   "list of cpus to profile"),
1743	OPT_STRING('n', "name", &kwork.profile_name, "name",
1744		   "event name to profile"),
1745	OPT_STRING(0, "time", &kwork.time_str, "str",
1746		   "Time span for analysis (start,stop)"),
1747	OPT_STRING('i', "input", &input_name, "file",
1748		   "input file name"),
1749#ifdef HAVE_BPF_SKEL
1750	OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1751		    "Use BPF to measure kwork latency"),
1752#endif
1753	OPT_PARENT(kwork_options)
1754	};
1755	const struct option timehist_options[] = {
1756	OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1757		   "file", "vmlinux pathname"),
1758	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1759		   "file", "kallsyms pathname"),
1760	OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
1761		    "Display call chains if present"),
1762	OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
1763		   "Maximum number of functions to display backtrace."),
1764	OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory",
1765		    "Look for files with symbols relative to this directory"),
1766	OPT_STRING(0, "time", &kwork.time_str, "str",
1767		   "Time span for analysis (start,stop)"),
1768	OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1769		   "list of cpus to profile"),
1770	OPT_STRING('n', "name", &kwork.profile_name, "name",
1771		   "event name to profile"),
1772	OPT_STRING('i', "input", &input_name, "file",
1773		   "input file name"),
1774	OPT_PARENT(kwork_options)
1775	};
1776	const char *kwork_usage[] = {
1777		NULL,
1778		NULL
1779	};
1780	const char * const report_usage[] = {
1781		"perf kwork report [<options>]",
1782		NULL
1783	};
1784	const char * const latency_usage[] = {
1785		"perf kwork latency [<options>]",
1786		NULL
1787	};
1788	const char * const timehist_usage[] = {
1789		"perf kwork timehist [<options>]",
1790		NULL
1791	};
1792	const char *const kwork_subcommands[] = {
1793		"record", "report", "latency", "timehist", NULL
1794	};
1795
1796	argc = parse_options_subcommand(argc, argv, kwork_options,
1797					kwork_subcommands, kwork_usage,
1798					PARSE_OPT_STOP_AT_NON_OPTION);
1799	if (!argc)
1800		usage_with_options(kwork_usage, kwork_options);
1801
1802	setup_event_list(&kwork, kwork_options, kwork_usage);
1803	sort_dimension__add(&kwork, "id", &kwork.cmp_id);
1804
1805	if (strlen(argv[0]) > 2 && strstarts("record", argv[0]))
1806		return perf_kwork__record(&kwork, argc, argv);
1807	else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
1808		kwork.sort_order = default_report_sort_order;
1809		if (argc > 1) {
1810			argc = parse_options(argc, argv, report_options, report_usage, 0);
1811			if (argc)
1812				usage_with_options(report_usage, report_options);
1813		}
1814		kwork.report = KWORK_REPORT_RUNTIME;
1815		setup_sorting(&kwork, report_options, report_usage);
1816		return perf_kwork__report(&kwork);
1817	} else if (strlen(argv[0]) > 2 && strstarts("latency", argv[0])) {
1818		kwork.sort_order = default_latency_sort_order;
1819		if (argc > 1) {
1820			argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1821			if (argc)
1822				usage_with_options(latency_usage, latency_options);
1823		}
1824		kwork.report = KWORK_REPORT_LATENCY;
1825		setup_sorting(&kwork, latency_options, latency_usage);
1826		return perf_kwork__report(&kwork);
1827	} else if (strlen(argv[0]) > 2 && strstarts("timehist", argv[0])) {
1828		if (argc > 1) {
1829			argc = parse_options(argc, argv, timehist_options, timehist_usage, 0);
1830			if (argc)
1831				usage_with_options(timehist_usage, timehist_options);
1832		}
1833		kwork.report = KWORK_REPORT_TIMEHIST;
1834		return perf_kwork__timehist(&kwork);
1835	} else
1836		usage_with_options(kwork_usage, kwork_options);
1837
1838	return 0;
1839}