Linux Audio

Check our new training course

Loading...
v3.5.6
 
 
 
   1#include "builtin.h"
   2#include "perf.h"
   3
   4#include "util/util.h"
   5#include "util/cache.h"
   6#include "util/symbol.h"
   7#include "util/thread.h"
   8#include "util/header.h"
 
 
 
 
 
   9
  10#include "util/parse-options.h"
 
  11#include "util/trace-event.h"
 
  12
  13#include "util/debug.h"
  14#include "util/session.h"
  15#include "util/tool.h"
 
 
 
 
  16
 
  17#include <sys/types.h>
  18#include <sys/prctl.h>
  19#include <semaphore.h>
  20#include <pthread.h>
  21#include <math.h>
  22#include <limits.h>
 
  23
  24#include <linux/list.h>
  25#include <linux/hash.h>
 
 
 
 
  26
  27static struct perf_session *session;
 
  28
  29/* based on kernel/lockdep.c */
  30#define LOCKHASH_BITS		12
  31#define LOCKHASH_SIZE		(1UL << LOCKHASH_BITS)
  32
  33static struct list_head lockhash_table[LOCKHASH_SIZE];
  34
  35#define __lockhashfn(key)	hash_long((unsigned long)key, LOCKHASH_BITS)
  36#define lockhashentry(key)	(lockhash_table + __lockhashfn((key)))
  37
  38struct lock_stat {
  39	struct list_head	hash_entry;
  40	struct rb_node		rb;		/* used for sorting */
  41
  42	/*
  43	 * FIXME: raw_field_value() returns unsigned long long,
  44	 * so address of lockdep_map should be dealed as 64bit.
  45	 * Is there more better solution?
  46	 */
  47	void			*addr;		/* address of lockdep_map, used as ID */
  48	char			*name;		/* for strcpy(), we cannot use const */
  49
  50	unsigned int		nr_acquire;
  51	unsigned int		nr_acquired;
  52	unsigned int		nr_contended;
  53	unsigned int		nr_release;
  54
  55	unsigned int		nr_readlock;
  56	unsigned int		nr_trylock;
  57	/* these times are in nano sec. */
  58	u64			wait_time_total;
  59	u64			wait_time_min;
  60	u64			wait_time_max;
  61
  62	int			discard; /* flag of blacklist */
  63};
  64
  65/*
  66 * States of lock_seq_stat
  67 *
  68 * UNINITIALIZED is required for detecting first event of acquire.
  69 * As the nature of lock events, there is no guarantee
  70 * that the first event for the locks are acquire,
  71 * it can be acquired, contended or release.
  72 */
  73#define SEQ_STATE_UNINITIALIZED      0	       /* initial state */
  74#define SEQ_STATE_RELEASED	1
  75#define SEQ_STATE_ACQUIRING	2
  76#define SEQ_STATE_ACQUIRED	3
  77#define SEQ_STATE_READ_ACQUIRED	4
  78#define SEQ_STATE_CONTENDED	5
  79
  80/*
  81 * MAX_LOCK_DEPTH
  82 * Imported from include/linux/sched.h.
  83 * Should this be synchronized?
  84 */
  85#define MAX_LOCK_DEPTH 48
  86
  87/*
  88 * struct lock_seq_stat:
  89 * Place to put on state of one lock sequence
  90 * 1) acquire -> acquired -> release
  91 * 2) acquire -> contended -> acquired -> release
  92 * 3) acquire (with read or try) -> release
  93 * 4) Are there other patterns?
  94 */
  95struct lock_seq_stat {
  96	struct list_head        list;
  97	int			state;
  98	u64			prev_event_time;
  99	void                    *addr;
 100
 101	int                     read_count;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 102};
 103
 104struct thread_stat {
 105	struct rb_node		rb;
 106
 107	u32                     tid;
 108	struct list_head        seq_list;
 109};
 110
 111static struct rb_root		thread_stats;
 
 
 
 112
 113static struct thread_stat *thread_stat_find(u32 tid)
 114{
 115	struct rb_node *node;
 116	struct thread_stat *st;
 117
 118	node = thread_stats.rb_node;
 119	while (node) {
 120		st = container_of(node, struct thread_stat, rb);
 121		if (st->tid == tid)
 122			return st;
 123		else if (tid < st->tid)
 124			node = node->rb_left;
 125		else
 126			node = node->rb_right;
 127	}
 128
 129	return NULL;
 130}
 131
 132static void thread_stat_insert(struct thread_stat *new)
 133{
 134	struct rb_node **rb = &thread_stats.rb_node;
 135	struct rb_node *parent = NULL;
 136	struct thread_stat *p;
 137
 138	while (*rb) {
 139		p = container_of(*rb, struct thread_stat, rb);
 140		parent = *rb;
 141
 142		if (new->tid < p->tid)
 143			rb = &(*rb)->rb_left;
 144		else if (new->tid > p->tid)
 145			rb = &(*rb)->rb_right;
 146		else
 147			BUG_ON("inserting invalid thread_stat\n");
 148	}
 149
 150	rb_link_node(&new->rb, parent, rb);
 151	rb_insert_color(&new->rb, &thread_stats);
 152}
 153
 154static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
 155{
 156	struct thread_stat *st;
 157
 158	st = thread_stat_find(tid);
 159	if (st)
 160		return st;
 161
 162	st = zalloc(sizeof(struct thread_stat));
 163	if (!st)
 164		die("memory allocation failed\n");
 
 
 165
 166	st->tid = tid;
 167	INIT_LIST_HEAD(&st->seq_list);
 168
 169	thread_stat_insert(st);
 170
 171	return st;
 172}
 173
 174static struct thread_stat *thread_stat_findnew_first(u32 tid);
 175static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
 176	thread_stat_findnew_first;
 177
 178static struct thread_stat *thread_stat_findnew_first(u32 tid)
 179{
 180	struct thread_stat *st;
 181
 182	st = zalloc(sizeof(struct thread_stat));
 183	if (!st)
 184		die("memory allocation failed\n");
 
 
 185	st->tid = tid;
 186	INIT_LIST_HEAD(&st->seq_list);
 187
 188	rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
 189	rb_insert_color(&st->rb, &thread_stats);
 190
 191	thread_stat_findnew = thread_stat_findnew_after_first;
 192	return st;
 193}
 194
 195/* build simple key function one is bigger than two */
 196#define SINGLE_KEY(member)						\
 197	static int lock_stat_key_ ## member(struct lock_stat *one,	\
 198					 struct lock_stat *two)		\
 199	{								\
 200		return one->member > two->member;			\
 201	}
 202
 203SINGLE_KEY(nr_acquired)
 204SINGLE_KEY(nr_contended)
 
 205SINGLE_KEY(wait_time_total)
 206SINGLE_KEY(wait_time_max)
 207
 208static int lock_stat_key_wait_time_min(struct lock_stat *one,
 209					struct lock_stat *two)
 210{
 211	u64 s1 = one->wait_time_min;
 212	u64 s2 = two->wait_time_min;
 213	if (s1 == ULLONG_MAX)
 214		s1 = 0;
 215	if (s2 == ULLONG_MAX)
 216		s2 = 0;
 217	return s1 > s2;
 218}
 219
 220struct lock_key {
 221	/*
 222	 * name: the value for specify by user
 223	 * this should be simpler than raw name of member
 224	 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
 225	 */
 226	const char		*name;
 
 
 
 
 
 227	int			(*key)(struct lock_stat*, struct lock_stat*);
 228};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 229
 230static const char		*sort_key = "acquired";
 231
 232static int			(*compare)(struct lock_stat *, struct lock_stat *);
 233
 
 234static struct rb_root		result;	/* place to store sorted data */
 235
 236#define DEF_KEY_LOCK(name, fn_suffix)	\
 237	{ #name, lock_stat_key_ ## fn_suffix }
 238struct lock_key keys[] = {
 239	DEF_KEY_LOCK(acquired, nr_acquired),
 240	DEF_KEY_LOCK(contended, nr_contended),
 241	DEF_KEY_LOCK(wait_total, wait_time_total),
 242	DEF_KEY_LOCK(wait_min, wait_time_min),
 243	DEF_KEY_LOCK(wait_max, wait_time_max),
 
 
 
 
 244
 245	/* extra comparisons much complicated should be here */
 
 
 
 
 
 
 
 
 
 246
 247	{ NULL, NULL }
 
 248};
 249
 250static void select_key(void)
 251{
 252	int i;
 
 
 
 
 253
 254	for (i = 0; keys[i].name; i++) {
 255		if (!strcmp(keys[i].name, sort_key)) {
 256			compare = keys[i].key;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 257			return;
 258		}
 
 
 
 
 
 259	}
 260
 261	die("Unknown compare key:%s\n", sort_key);
 
 262}
 263
 264static void insert_to_result(struct lock_stat *st,
 265			     int (*bigger)(struct lock_stat *, struct lock_stat *))
 266{
 267	struct rb_node **rb = &result.rb_node;
 268	struct rb_node *parent = NULL;
 269	struct lock_stat *p;
 270
 
 
 
 271	while (*rb) {
 272		p = container_of(*rb, struct lock_stat, rb);
 273		parent = *rb;
 274
 275		if (bigger(st, p))
 276			rb = &(*rb)->rb_left;
 277		else
 278			rb = &(*rb)->rb_right;
 279	}
 280
 281	rb_link_node(&st->rb, parent, rb);
 282	rb_insert_color(&st->rb, &result);
 283}
 284
 285/* returns left most element of result, and erase it */
 286static struct lock_stat *pop_from_result(void)
 287{
 288	struct rb_node *node = result.rb_node;
 289
 290	if (!node)
 291		return NULL;
 292
 293	while (node->rb_left)
 294		node = node->rb_left;
 295
 296	rb_erase(node, &result);
 297	return container_of(node, struct lock_stat, rb);
 298}
 299
 300static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 301{
 302	struct list_head *entry = lockhashentry(addr);
 303	struct lock_stat *ret, *new;
 304
 305	list_for_each_entry(ret, entry, hash_entry) {
 306		if (ret->addr == addr)
 307			return ret;
 308	}
 309
 310	new = zalloc(sizeof(struct lock_stat));
 311	if (!new)
 312		goto alloc_failed;
 313
 314	new->addr = addr;
 315	new->name = zalloc(sizeof(char) * strlen(name) + 1);
 316	if (!new->name)
 
 317		goto alloc_failed;
 318	strcpy(new->name, name);
 319
 
 320	new->wait_time_min = ULLONG_MAX;
 321
 322	list_add(&new->hash_entry, entry);
 323	return new;
 324
 325alloc_failed:
 326	die("memory allocation failed\n");
 
 327}
 328
 329static const char *input_name;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330
 331struct raw_event_sample {
 332	u32			size;
 333	char			data[0];
 334};
 
 
 
 335
 336struct trace_acquire_event {
 337	void			*addr;
 338	const char		*name;
 339	int			flag;
 340};
 341
 342struct trace_acquired_event {
 343	void			*addr;
 344	const char		*name;
 345};
 346
 347struct trace_contended_event {
 348	void			*addr;
 349	const char		*name;
 350};
 351
 352struct trace_release_event {
 353	void			*addr;
 354	const char		*name;
 355};
 356
 357struct trace_lock_handler {
 358	void (*acquire_event)(struct trace_acquire_event *,
 359			      struct event_format *,
 360			      int cpu,
 361			      u64 timestamp,
 362			      struct thread *thread);
 363
 364	void (*acquired_event)(struct trace_acquired_event *,
 365			       struct event_format *,
 366			       int cpu,
 367			       u64 timestamp,
 368			       struct thread *thread);
 369
 370	void (*contended_event)(struct trace_contended_event *,
 371				struct event_format *,
 372				int cpu,
 373				u64 timestamp,
 374				struct thread *thread);
 375
 376	void (*release_event)(struct trace_release_event *,
 377			      struct event_format *,
 378			      int cpu,
 379			      u64 timestamp,
 380			      struct thread *thread);
 381};
 382
 383static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
 384{
 385	struct lock_seq_stat *seq;
 386
 387	list_for_each_entry(seq, &ts->seq_list, list) {
 388		if (seq->addr == addr)
 389			return seq;
 390	}
 391
 392	seq = zalloc(sizeof(struct lock_seq_stat));
 393	if (!seq)
 394		die("Not enough memory\n");
 
 
 395	seq->state = SEQ_STATE_UNINITIALIZED;
 396	seq->addr = addr;
 397
 398	list_add(&seq->list, &ts->seq_list);
 399	return seq;
 400}
 401
 402enum broken_state {
 403	BROKEN_ACQUIRE,
 404	BROKEN_ACQUIRED,
 405	BROKEN_CONTENDED,
 406	BROKEN_RELEASE,
 407	BROKEN_MAX,
 408};
 409
 410static int bad_hist[BROKEN_MAX];
 411
 412enum acquire_flags {
 413	TRY_LOCK = 1,
 414	READ_LOCK = 2,
 415};
 416
 417static void
 418report_lock_acquire_event(struct trace_acquire_event *acquire_event,
 419			struct event_format *__event __used,
 420			int cpu __used,
 421			u64 timestamp __used,
 422			struct thread *thread __used)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 423{
 424	struct lock_stat *ls;
 425	struct thread_stat *ts;
 426	struct lock_seq_stat *seq;
 
 
 
 
 
 
 
 
 
 427
 428	ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
 429	if (ls->discard)
 430		return;
 
 
 
 
 431
 432	ts = thread_stat_findnew(thread->pid);
 433	seq = get_seq(ts, acquire_event->addr);
 
 434
 435	switch (seq->state) {
 436	case SEQ_STATE_UNINITIALIZED:
 437	case SEQ_STATE_RELEASED:
 438		if (!acquire_event->flag) {
 439			seq->state = SEQ_STATE_ACQUIRING;
 440		} else {
 441			if (acquire_event->flag & TRY_LOCK)
 442				ls->nr_trylock++;
 443			if (acquire_event->flag & READ_LOCK)
 444				ls->nr_readlock++;
 445			seq->state = SEQ_STATE_READ_ACQUIRED;
 446			seq->read_count = 1;
 447			ls->nr_acquired++;
 448		}
 449		break;
 450	case SEQ_STATE_READ_ACQUIRED:
 451		if (acquire_event->flag & READ_LOCK) {
 452			seq->read_count++;
 453			ls->nr_acquired++;
 454			goto end;
 455		} else {
 456			goto broken;
 457		}
 458		break;
 459	case SEQ_STATE_ACQUIRED:
 460	case SEQ_STATE_ACQUIRING:
 461	case SEQ_STATE_CONTENDED:
 462broken:
 463		/* broken lock sequence, discard it */
 464		ls->discard = 1;
 465		bad_hist[BROKEN_ACQUIRE]++;
 466		list_del(&seq->list);
 
 
 467		free(seq);
 468		goto end;
 469		break;
 470	default:
 471		BUG_ON("Unknown state of lock sequence found!\n");
 472		break;
 473	}
 474
 475	ls->nr_acquire++;
 476	seq->prev_event_time = timestamp;
 477end:
 478	return;
 479}
 480
 481static void
 482report_lock_acquired_event(struct trace_acquired_event *acquired_event,
 483			 struct event_format *__event __used,
 484			 int cpu __used,
 485			 u64 timestamp __used,
 486			 struct thread *thread __used)
 487{
 488	struct lock_stat *ls;
 489	struct thread_stat *ts;
 490	struct lock_seq_stat *seq;
 491	u64 contended_term;
 
 
 
 
 
 
 
 
 492
 493	ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
 494	if (ls->discard)
 495		return;
 
 
 
 
 496
 497	ts = thread_stat_findnew(thread->pid);
 498	seq = get_seq(ts, acquired_event->addr);
 
 499
 500	switch (seq->state) {
 501	case SEQ_STATE_UNINITIALIZED:
 502		/* orphan event, do nothing */
 503		return;
 504	case SEQ_STATE_ACQUIRING:
 505		break;
 506	case SEQ_STATE_CONTENDED:
 507		contended_term = timestamp - seq->prev_event_time;
 508		ls->wait_time_total += contended_term;
 509		if (contended_term < ls->wait_time_min)
 510			ls->wait_time_min = contended_term;
 511		if (ls->wait_time_max < contended_term)
 512			ls->wait_time_max = contended_term;
 513		break;
 514	case SEQ_STATE_RELEASED:
 515	case SEQ_STATE_ACQUIRED:
 516	case SEQ_STATE_READ_ACQUIRED:
 517		/* broken lock sequence, discard it */
 518		ls->discard = 1;
 519		bad_hist[BROKEN_ACQUIRED]++;
 520		list_del(&seq->list);
 
 
 521		free(seq);
 522		goto end;
 523		break;
 524
 525	default:
 526		BUG_ON("Unknown state of lock sequence found!\n");
 527		break;
 528	}
 529
 530	seq->state = SEQ_STATE_ACQUIRED;
 531	ls->nr_acquired++;
 532	seq->prev_event_time = timestamp;
 
 533end:
 534	return;
 535}
 536
 537static void
 538report_lock_contended_event(struct trace_contended_event *contended_event,
 539			  struct event_format *__event __used,
 540			  int cpu __used,
 541			  u64 timestamp __used,
 542			  struct thread *thread __used)
 543{
 544	struct lock_stat *ls;
 545	struct thread_stat *ts;
 546	struct lock_seq_stat *seq;
 
 
 
 
 
 
 
 
 547
 548	ls = lock_stat_findnew(contended_event->addr, contended_event->name);
 549	if (ls->discard)
 550		return;
 
 
 
 
 551
 552	ts = thread_stat_findnew(thread->pid);
 553	seq = get_seq(ts, contended_event->addr);
 
 554
 555	switch (seq->state) {
 556	case SEQ_STATE_UNINITIALIZED:
 557		/* orphan event, do nothing */
 558		return;
 559	case SEQ_STATE_ACQUIRING:
 560		break;
 561	case SEQ_STATE_RELEASED:
 562	case SEQ_STATE_ACQUIRED:
 563	case SEQ_STATE_READ_ACQUIRED:
 564	case SEQ_STATE_CONTENDED:
 565		/* broken lock sequence, discard it */
 566		ls->discard = 1;
 567		bad_hist[BROKEN_CONTENDED]++;
 568		list_del(&seq->list);
 
 
 569		free(seq);
 570		goto end;
 571		break;
 572	default:
 573		BUG_ON("Unknown state of lock sequence found!\n");
 574		break;
 575	}
 576
 577	seq->state = SEQ_STATE_CONTENDED;
 578	ls->nr_contended++;
 579	seq->prev_event_time = timestamp;
 
 580end:
 581	return;
 582}
 583
 584static void
 585report_lock_release_event(struct trace_release_event *release_event,
 586			struct event_format *__event __used,
 587			int cpu __used,
 588			u64 timestamp __used,
 589			struct thread *thread __used)
 590{
 591	struct lock_stat *ls;
 592	struct thread_stat *ts;
 593	struct lock_seq_stat *seq;
 
 
 
 
 
 
 
 
 594
 595	ls = lock_stat_findnew(release_event->addr, release_event->name);
 596	if (ls->discard)
 597		return;
 
 
 
 
 598
 599	ts = thread_stat_findnew(thread->pid);
 600	seq = get_seq(ts, release_event->addr);
 
 601
 602	switch (seq->state) {
 603	case SEQ_STATE_UNINITIALIZED:
 604		goto end;
 605		break;
 606	case SEQ_STATE_ACQUIRED:
 607		break;
 608	case SEQ_STATE_READ_ACQUIRED:
 609		seq->read_count--;
 610		BUG_ON(seq->read_count < 0);
 611		if (!seq->read_count) {
 612			ls->nr_release++;
 613			goto end;
 614		}
 615		break;
 616	case SEQ_STATE_ACQUIRING:
 617	case SEQ_STATE_CONTENDED:
 618	case SEQ_STATE_RELEASED:
 619		/* broken lock sequence, discard it */
 620		ls->discard = 1;
 621		bad_hist[BROKEN_RELEASE]++;
 
 
 622		goto free_seq;
 623		break;
 624	default:
 625		BUG_ON("Unknown state of lock sequence found!\n");
 626		break;
 627	}
 628
 629	ls->nr_release++;
 630free_seq:
 631	list_del(&seq->list);
 632	free(seq);
 633end:
 634	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 635}
 636
 637/* lock oriented handlers */
 638/* TODO: handlers for CPU oriented, thread oriented */
 639static struct trace_lock_handler report_lock_ops  = {
 640	.acquire_event		= report_lock_acquire_event,
 641	.acquired_event		= report_lock_acquired_event,
 642	.contended_event	= report_lock_contended_event,
 643	.release_event		= report_lock_release_event,
 
 
 644};
 645
 646static struct trace_lock_handler *trace_handler;
 647
 648static void
 649process_lock_acquire_event(void *data,
 650			   struct event_format *event __used,
 651			   int cpu __used,
 652			   u64 timestamp __used,
 653			   struct thread *thread __used)
 654{
 655	struct trace_acquire_event acquire_event;
 656	u64 tmp;		/* this is required for casting... */
 657
 658	tmp = raw_field_value(event, "lockdep_addr", data);
 659	memcpy(&acquire_event.addr, &tmp, sizeof(void *));
 660	acquire_event.name = (char *)raw_field_ptr(event, "name", data);
 661	acquire_event.flag = (int)raw_field_value(event, "flag", data);
 662
 663	if (trace_handler->acquire_event)
 664		trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
 665}
 666
 667static void
 668process_lock_acquired_event(void *data,
 669			    struct event_format *event __used,
 670			    int cpu __used,
 671			    u64 timestamp __used,
 672			    struct thread *thread __used)
 673{
 674	struct trace_acquired_event acquired_event;
 675	u64 tmp;		/* this is required for casting... */
 676
 677	tmp = raw_field_value(event, "lockdep_addr", data);
 678	memcpy(&acquired_event.addr, &tmp, sizeof(void *));
 679	acquired_event.name = (char *)raw_field_ptr(event, "name", data);
 680
 
 
 681	if (trace_handler->acquire_event)
 682		trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
 
 683}
 684
 685static void
 686process_lock_contended_event(void *data,
 687			     struct event_format *event __used,
 688			     int cpu __used,
 689			     u64 timestamp __used,
 690			     struct thread *thread __used)
 691{
 692	struct trace_contended_event contended_event;
 693	u64 tmp;		/* this is required for casting... */
 694
 695	tmp = raw_field_value(event, "lockdep_addr", data);
 696	memcpy(&contended_event.addr, &tmp, sizeof(void *));
 697	contended_event.name = (char *)raw_field_ptr(event, "name", data);
 698
 699	if (trace_handler->acquire_event)
 700		trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
 701}
 702
 703static void
 704process_lock_release_event(void *data,
 705			   struct event_format *event __used,
 706			   int cpu __used,
 707			   u64 timestamp __used,
 708			   struct thread *thread __used)
 709{
 710	struct trace_release_event release_event;
 711	u64 tmp;		/* this is required for casting... */
 712
 713	tmp = raw_field_value(event, "lockdep_addr", data);
 714	memcpy(&release_event.addr, &tmp, sizeof(void *));
 715	release_event.name = (char *)raw_field_ptr(event, "name", data);
 716
 717	if (trace_handler->acquire_event)
 718		trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
 
 
 
 719}
 720
 721static void
 722process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
 723{
 724	struct event_format *event;
 725	int type;
 
 
 726
 727	type = trace_parse_common_type(data);
 728	event = trace_find_event(type);
 729
 730	if (!strcmp(event->name, "lock_acquire"))
 731		process_lock_acquire_event(data, event, cpu, timestamp, thread);
 732	if (!strcmp(event->name, "lock_acquired"))
 733		process_lock_acquired_event(data, event, cpu, timestamp, thread);
 734	if (!strcmp(event->name, "lock_contended"))
 735		process_lock_contended_event(data, event, cpu, timestamp, thread);
 736	if (!strcmp(event->name, "lock_release"))
 737		process_lock_release_event(data, event, cpu, timestamp, thread);
 738}
 739
 740static void print_bad_events(int bad, int total)
 741{
 742	/* Output for debug, this have to be removed */
 743	int i;
 
 744	const char *name[4] =
 745		{ "acquire", "acquired", "contended", "release" };
 746
 747	pr_info("\n=== output for debug===\n\n");
 748	pr_info("bad: %d, total: %d\n", bad, total);
 749	pr_info("bad rate: %f %%\n", (double)bad / (double)total * 100);
 750	pr_info("histogram of events caused bad sequence\n");
 751	for (i = 0; i < BROKEN_MAX; i++)
 752		pr_info(" %10s: %d\n", name[i], bad_hist[i]);
 
 
 
 
 
 
 
 
 
 
 753}
 754
 755/* TODO: various way to print, coloring, nano or milli sec */
 756static void print_result(void)
 757{
 758	struct lock_stat *st;
 
 759	char cut_name[20];
 760	int bad, total;
 761
 762	pr_info("%20s ", "Name");
 763	pr_info("%10s ", "acquired");
 764	pr_info("%10s ", "contended");
 765
 766	pr_info("%15s ", "total wait (ns)");
 767	pr_info("%15s ", "max wait (ns)");
 768	pr_info("%15s ", "min wait (ns)");
 769
 770	pr_info("\n\n");
 771
 772	bad = total = 0;
 773	while ((st = pop_from_result())) {
 774		total++;
 775		if (st->discard) {
 776			bad++;
 
 777			continue;
 778		}
 779		bzero(cut_name, 20);
 780
 781		if (strlen(st->name) < 16) {
 782			/* output raw name */
 783			pr_info("%20s ", st->name);
 
 
 
 
 
 
 
 
 
 
 784		} else {
 785			strncpy(cut_name, st->name, 16);
 786			cut_name[16] = '.';
 787			cut_name[17] = '.';
 788			cut_name[18] = '.';
 789			cut_name[19] = '\0';
 790			/* cut off name for saving output style */
 791			pr_info("%20s ", cut_name);
 792		}
 793
 794		pr_info("%10u ", st->nr_acquired);
 795		pr_info("%10u ", st->nr_contended);
 
 
 
 796
 797		pr_info("%15" PRIu64 " ", st->wait_time_total);
 798		pr_info("%15" PRIu64 " ", st->wait_time_max);
 799		pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
 800		       0 : st->wait_time_min);
 801		pr_info("\n");
 802	}
 803
 804	print_bad_events(bad, total);
 805}
 806
 807static bool info_threads, info_map;
 808
 809static void dump_threads(void)
 810{
 811	struct thread_stat *st;
 812	struct rb_node *node;
 813	struct thread *t;
 814
 815	pr_info("%10s: comm\n", "Thread ID");
 816
 817	node = rb_first(&thread_stats);
 818	while (node) {
 819		st = container_of(node, struct thread_stat, rb);
 820		t = perf_session__findnew(session, st->tid);
 821		pr_info("%10d: %s\n", st->tid, t->comm);
 822		node = rb_next(node);
 823	};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 824}
 825
 826static void dump_map(void)
 827{
 828	unsigned int i;
 829	struct lock_stat *st;
 830
 831	pr_info("Address of instance: name of class\n");
 832	for (i = 0; i < LOCKHASH_SIZE; i++) {
 833		list_for_each_entry(st, &lockhash_table[i], hash_entry) {
 834			pr_info(" %p: %s\n", st->addr, st->name);
 835		}
 836	}
 
 
 
 837}
 838
 839static void dump_info(void)
 840{
 841	if (info_threads)
 842		dump_threads();
 843	else if (info_map)
 
 
 
 844		dump_map();
 845	else
 846		die("Unknown type of information\n");
 847}
 848
 849static int process_sample_event(struct perf_tool *tool __used,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 850				union perf_event *event,
 851				struct perf_sample *sample,
 852				struct perf_evsel *evsel __used,
 853				struct machine *machine)
 854{
 855	struct thread *thread = machine__findnew_thread(machine, sample->tid);
 
 
 856
 857	if (thread == NULL) {
 858		pr_debug("problem processing %d event, skipping it.\n",
 859			event->header.type);
 860		return -1;
 861	}
 862
 863	process_raw_event(sample->raw_data, sample->cpu, sample->time, thread);
 
 
 
 864
 865	return 0;
 866}
 867
 868static struct perf_tool eops = {
 869	.sample			= process_sample_event,
 870	.comm			= perf_event__process_comm,
 871	.ordered_samples	= true,
 872};
 873
 874static int read_events(void)
 875{
 876	session = perf_session__new(input_name, O_RDONLY, 0, false, &eops);
 877	if (!session)
 878		die("Initializing perf session failed\n");
 879
 880	return perf_session__process_events(session, &eops);
 
 
 
 
 
 
 
 881}
 882
 883static void sort_result(void)
 884{
 885	unsigned int i;
 886	struct lock_stat *st;
 887
 888	for (i = 0; i < LOCKHASH_SIZE; i++) {
 889		list_for_each_entry(st, &lockhash_table[i], hash_entry) {
 890			insert_to_result(st, compare);
 891		}
 892	}
 893}
 894
 895static void __cmd_report(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 896{
 897	setup_pager();
 898	select_key();
 899	read_events();
 900	sort_result();
 901	print_result();
 902}
 903
 904static const char * const report_usage[] = {
 905	"perf lock report [<options>]",
 906	NULL
 907};
 908
 909static const struct option report_options[] = {
 910	OPT_STRING('k', "key", &sort_key, "acquired",
 911		    "key for sorting (acquired / contended / wait_total / wait_max / wait_min)"),
 912	/* TODO: type */
 913	OPT_END()
 914};
 915
 916static const char * const info_usage[] = {
 917	"perf lock info [<options>]",
 918	NULL
 919};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 920
 921static const struct option info_options[] = {
 922	OPT_BOOLEAN('t', "threads", &info_threads,
 923		    "dump thread list in perf.data"),
 924	OPT_BOOLEAN('m', "map", &info_map,
 925		    "map of lock instances (address:name table)"),
 926	OPT_END()
 927};
 928
 929static const char * const lock_usage[] = {
 930	"perf lock [<options>] {record|report|script|info}",
 931	NULL
 932};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 933
 934static const struct option lock_options[] = {
 935	OPT_STRING('i', "input", &input_name, "file", "input file name"),
 936	OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
 937	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
 938	OPT_END()
 939};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 940
 941static const char *record_args[] = {
 942	"record",
 943	"-R",
 944	"-f",
 945	"-m", "1024",
 946	"-c", "1",
 947	"-e", "lock:lock_acquire",
 948	"-e", "lock:lock_acquired",
 949	"-e", "lock:lock_contended",
 950	"-e", "lock:lock_release",
 951};
 952
 953static int __cmd_record(int argc, const char **argv)
 954{
 955	unsigned int rec_argc, i, j;
 
 
 
 
 
 
 
 
 956	const char **rec_argv;
 
 957
 958	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
 959	rec_argv = calloc(rec_argc + 1, sizeof(char *));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960
 961	if (rec_argv == NULL)
 
 
 
 
 
 
 
 
 
 962		return -ENOMEM;
 963
 964	for (i = 0; i < ARRAY_SIZE(record_args); i++)
 965		rec_argv[i] = strdup(record_args[i]);
 
 
 
 
 
 
 
 
 
 
 966
 967	for (j = 1; j < (unsigned int)argc; j++, i++)
 968		rec_argv[i] = argv[j];
 969
 970	BUG_ON(i != rec_argc);
 971
 972	return cmd_record(i, rec_argv, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973}
 974
 975int cmd_lock(int argc, const char **argv, const char *prefix __used)
 976{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 977	unsigned int i;
 
 
 
 
 
 978
 979	symbol__init();
 980	for (i = 0; i < LOCKHASH_SIZE; i++)
 981		INIT_LIST_HEAD(lockhash_table + i);
 982
 983	argc = parse_options(argc, argv, lock_options, lock_usage,
 984			     PARSE_OPT_STOP_AT_NON_OPTION);
 
 985	if (!argc)
 986		usage_with_options(lock_usage, lock_options);
 987
 988	if (!strncmp(argv[0], "rec", 3)) {
 989		return __cmd_record(argc, argv);
 990	} else if (!strncmp(argv[0], "report", 6)) {
 991		trace_handler = &report_lock_ops;
 992		if (argc) {
 993			argc = parse_options(argc, argv,
 994					     report_options, report_usage, 0);
 995			if (argc)
 996				usage_with_options(report_usage, report_options);
 997		}
 998		__cmd_report();
 999	} else if (!strcmp(argv[0], "script")) {
1000		/* Aliased to 'perf script' */
1001		return cmd_script(argc, argv, prefix);
1002	} else if (!strcmp(argv[0], "info")) {
1003		if (argc) {
1004			argc = parse_options(argc, argv,
1005					     info_options, info_usage, 0);
1006			if (argc)
1007				usage_with_options(info_usage, info_options);
1008		}
 
 
 
 
 
 
 
1009		/* recycling report_lock_ops */
1010		trace_handler = &report_lock_ops;
1011		setup_pager();
1012		read_events();
1013		dump_info();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1014	} else {
1015		usage_with_options(lock_usage, lock_options);
1016	}
1017
1018	return 0;
 
 
 
 
1019}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2#include <errno.h>
   3#include <inttypes.h>
   4#include "builtin.h"
   5#include "perf.h"
   6
   7#include "util/evlist.h" // for struct evsel_str_handler
   8#include "util/evsel.h"
   9#include "util/symbol.h"
  10#include "util/thread.h"
  11#include "util/header.h"
  12#include "util/target.h"
  13#include "util/cgroup.h"
  14#include "util/callchain.h"
  15#include "util/lock-contention.h"
  16#include "util/bpf_skel/lock_data.h"
  17
  18#include <subcmd/pager.h>
  19#include <subcmd/parse-options.h>
  20#include "util/trace-event.h"
  21#include "util/tracepoint.h"
  22
  23#include "util/debug.h"
  24#include "util/session.h"
  25#include "util/tool.h"
  26#include "util/data.h"
  27#include "util/string2.h"
  28#include "util/map.h"
  29#include "util/util.h"
  30
  31#include <stdio.h>
  32#include <sys/types.h>
  33#include <sys/prctl.h>
  34#include <semaphore.h>
 
  35#include <math.h>
  36#include <limits.h>
  37#include <ctype.h>
  38
  39#include <linux/list.h>
  40#include <linux/hash.h>
  41#include <linux/kernel.h>
  42#include <linux/zalloc.h>
  43#include <linux/err.h>
  44#include <linux/stringify.h>
  45
  46static struct perf_session *session;
  47static struct target target;
  48
  49/* based on kernel/lockdep.c */
  50#define LOCKHASH_BITS		12
  51#define LOCKHASH_SIZE		(1UL << LOCKHASH_BITS)
  52
  53static struct hlist_head *lockhash_table;
  54
  55#define __lockhashfn(key)	hash_long((unsigned long)key, LOCKHASH_BITS)
  56#define lockhashentry(key)	(lockhash_table + __lockhashfn((key)))
  57
  58static struct rb_root		thread_stats;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  59
  60static bool combine_locks;
  61static bool show_thread_stats;
  62static bool show_lock_addrs;
  63static bool show_lock_owner;
  64static bool show_lock_cgroups;
  65static bool use_bpf;
  66static unsigned long bpf_map_entries = MAX_ENTRIES;
  67static int max_stack_depth = CONTENTION_STACK_DEPTH;
  68static int stack_skip = CONTENTION_STACK_SKIP;
  69static int print_nr_entries = INT_MAX / 2;
  70static LIST_HEAD(callstack_filters);
  71static const char *output_name = NULL;
  72static FILE *lock_output;
  73
  74struct callstack_filter {
  75	struct list_head list;
  76	char name[];
  77};
  78
  79static struct lock_filter filters;
 
  80
  81static enum lock_aggr_mode aggr_mode = LOCK_AGGR_ADDR;
 
 
  82
  83static bool needs_callstack(void)
  84{
  85	return !list_empty(&callstack_filters);
  86}
  87
  88static struct thread_stat *thread_stat_find(u32 tid)
  89{
  90	struct rb_node *node;
  91	struct thread_stat *st;
  92
  93	node = thread_stats.rb_node;
  94	while (node) {
  95		st = container_of(node, struct thread_stat, rb);
  96		if (st->tid == tid)
  97			return st;
  98		else if (tid < st->tid)
  99			node = node->rb_left;
 100		else
 101			node = node->rb_right;
 102	}
 103
 104	return NULL;
 105}
 106
 107static void thread_stat_insert(struct thread_stat *new)
 108{
 109	struct rb_node **rb = &thread_stats.rb_node;
 110	struct rb_node *parent = NULL;
 111	struct thread_stat *p;
 112
 113	while (*rb) {
 114		p = container_of(*rb, struct thread_stat, rb);
 115		parent = *rb;
 116
 117		if (new->tid < p->tid)
 118			rb = &(*rb)->rb_left;
 119		else if (new->tid > p->tid)
 120			rb = &(*rb)->rb_right;
 121		else
 122			BUG_ON("inserting invalid thread_stat\n");
 123	}
 124
 125	rb_link_node(&new->rb, parent, rb);
 126	rb_insert_color(&new->rb, &thread_stats);
 127}
 128
 129static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
 130{
 131	struct thread_stat *st;
 132
 133	st = thread_stat_find(tid);
 134	if (st)
 135		return st;
 136
 137	st = zalloc(sizeof(struct thread_stat));
 138	if (!st) {
 139		pr_err("memory allocation failed\n");
 140		return NULL;
 141	}
 142
 143	st->tid = tid;
 144	INIT_LIST_HEAD(&st->seq_list);
 145
 146	thread_stat_insert(st);
 147
 148	return st;
 149}
 150
 151static struct thread_stat *thread_stat_findnew_first(u32 tid);
 152static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
 153	thread_stat_findnew_first;
 154
 155static struct thread_stat *thread_stat_findnew_first(u32 tid)
 156{
 157	struct thread_stat *st;
 158
 159	st = zalloc(sizeof(struct thread_stat));
 160	if (!st) {
 161		pr_err("memory allocation failed\n");
 162		return NULL;
 163	}
 164	st->tid = tid;
 165	INIT_LIST_HEAD(&st->seq_list);
 166
 167	rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
 168	rb_insert_color(&st->rb, &thread_stats);
 169
 170	thread_stat_findnew = thread_stat_findnew_after_first;
 171	return st;
 172}
 173
 174/* build simple key function one is bigger than two */
 175#define SINGLE_KEY(member)						\
 176	static int lock_stat_key_ ## member(struct lock_stat *one,	\
 177					 struct lock_stat *two)		\
 178	{								\
 179		return one->member > two->member;			\
 180	}
 181
 182SINGLE_KEY(nr_acquired)
 183SINGLE_KEY(nr_contended)
 184SINGLE_KEY(avg_wait_time)
 185SINGLE_KEY(wait_time_total)
 186SINGLE_KEY(wait_time_max)
 187
 188static int lock_stat_key_wait_time_min(struct lock_stat *one,
 189					struct lock_stat *two)
 190{
 191	u64 s1 = one->wait_time_min;
 192	u64 s2 = two->wait_time_min;
 193	if (s1 == ULLONG_MAX)
 194		s1 = 0;
 195	if (s2 == ULLONG_MAX)
 196		s2 = 0;
 197	return s1 > s2;
 198}
 199
 200struct lock_key {
 201	/*
 202	 * name: the value for specify by user
 203	 * this should be simpler than raw name of member
 204	 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
 205	 */
 206	const char		*name;
 207	/* header: the string printed on the header line */
 208	const char		*header;
 209	/* len: the printing width of the field */
 210	int			len;
 211	/* key: a pointer to function to compare two lock stats for sorting */
 212	int			(*key)(struct lock_stat*, struct lock_stat*);
 213	/* print: a pointer to function to print a given lock stats */
 214	void			(*print)(struct lock_key*, struct lock_stat*);
 215	/* list: list entry to link this */
 216	struct list_head	list;
 217};
 218
 219static void lock_stat_key_print_time(unsigned long long nsec, int len)
 220{
 221	static const struct {
 222		float base;
 223		const char *unit;
 224	} table[] = {
 225		{ 1e9 * 3600, "h " },
 226		{ 1e9 * 60, "m " },
 227		{ 1e9, "s " },
 228		{ 1e6, "ms" },
 229		{ 1e3, "us" },
 230		{ 0, NULL },
 231	};
 232
 233	/* for CSV output */
 234	if (len == 0) {
 235		fprintf(lock_output, "%llu", nsec);
 236		return;
 237	}
 238
 239	for (int i = 0; table[i].unit; i++) {
 240		if (nsec < table[i].base)
 241			continue;
 242
 243		fprintf(lock_output, "%*.2f %s", len - 3, nsec / table[i].base, table[i].unit);
 244		return;
 245	}
 246
 247	fprintf(lock_output, "%*llu %s", len - 3, nsec, "ns");
 248}
 249
 250#define PRINT_KEY(member)						\
 251static void lock_stat_key_print_ ## member(struct lock_key *key,	\
 252					   struct lock_stat *ls)	\
 253{									\
 254	fprintf(lock_output, "%*llu", key->len, (unsigned long long)ls->member);\
 255}
 256
 257#define PRINT_TIME(member)						\
 258static void lock_stat_key_print_ ## member(struct lock_key *key,	\
 259					   struct lock_stat *ls)	\
 260{									\
 261	lock_stat_key_print_time((unsigned long long)ls->member, key->len);	\
 262}
 263
 264PRINT_KEY(nr_acquired)
 265PRINT_KEY(nr_contended)
 266PRINT_TIME(avg_wait_time)
 267PRINT_TIME(wait_time_total)
 268PRINT_TIME(wait_time_max)
 269
 270static void lock_stat_key_print_wait_time_min(struct lock_key *key,
 271					      struct lock_stat *ls)
 272{
 273	u64 wait_time = ls->wait_time_min;
 274
 275	if (wait_time == ULLONG_MAX)
 276		wait_time = 0;
 277
 278	lock_stat_key_print_time(wait_time, key->len);
 279}
 280
 281
 282static const char		*sort_key = "acquired";
 283
 284static int			(*compare)(struct lock_stat *, struct lock_stat *);
 285
 286static struct rb_root		sorted; /* place to store intermediate data */
 287static struct rb_root		result;	/* place to store sorted data */
 288
 289static LIST_HEAD(lock_keys);
 290static const char		*output_fields;
 291
 292#define DEF_KEY_LOCK(name, header, fn_suffix, len)			\
 293	{ #name, header, len, lock_stat_key_ ## fn_suffix, lock_stat_key_print_ ## fn_suffix, {} }
 294static struct lock_key report_keys[] = {
 295	DEF_KEY_LOCK(acquired, "acquired", nr_acquired, 10),
 296	DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
 297	DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
 298	DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
 299	DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
 300	DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
 301
 302	/* extra comparisons much complicated should be here */
 303	{ }
 304};
 305
 306static struct lock_key contention_keys[] = {
 307	DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
 308	DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
 309	DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
 310	DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
 311	DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
 312
 313	/* extra comparisons much complicated should be here */
 314	{ }
 315};
 316
 317static int select_key(bool contention)
 318{
 319	int i;
 320	struct lock_key *keys = report_keys;
 321
 322	if (contention)
 323		keys = contention_keys;
 324
 325	for (i = 0; keys[i].name; i++) {
 326		if (!strcmp(keys[i].name, sort_key)) {
 327			compare = keys[i].key;
 328
 329			/* selected key should be in the output fields */
 330			if (list_empty(&keys[i].list))
 331				list_add_tail(&keys[i].list, &lock_keys);
 332
 333			return 0;
 334		}
 335	}
 336
 337	pr_err("Unknown compare key: %s\n", sort_key);
 338	return -1;
 339}
 340
 341static int add_output_field(bool contention, char *name)
 342{
 343	int i;
 344	struct lock_key *keys = report_keys;
 345
 346	if (contention)
 347		keys = contention_keys;
 348
 349	for (i = 0; keys[i].name; i++) {
 350		if (strcmp(keys[i].name, name))
 351			continue;
 352
 353		/* prevent double link */
 354		if (list_empty(&keys[i].list))
 355			list_add_tail(&keys[i].list, &lock_keys);
 356
 357		return 0;
 358	}
 359
 360	pr_err("Unknown output field: %s\n", name);
 361	return -1;
 362}
 363
 364static int setup_output_field(bool contention, const char *str)
 365{
 366	char *tok, *tmp, *orig;
 367	int i, ret = 0;
 368	struct lock_key *keys = report_keys;
 369
 370	if (contention)
 371		keys = contention_keys;
 372
 373	/* no output field given: use all of them */
 374	if (str == NULL) {
 375		for (i = 0; keys[i].name; i++)
 376			list_add_tail(&keys[i].list, &lock_keys);
 377		return 0;
 378	}
 379
 380	for (i = 0; keys[i].name; i++)
 381		INIT_LIST_HEAD(&keys[i].list);
 382
 383	orig = tmp = strdup(str);
 384	if (orig == NULL)
 385		return -ENOMEM;
 386
 387	while ((tok = strsep(&tmp, ",")) != NULL){
 388		ret = add_output_field(contention, tok);
 389		if (ret < 0)
 390			break;
 391	}
 392	free(orig);
 393
 394	return ret;
 395}
 396
 397static void combine_lock_stats(struct lock_stat *st)
 398{
 399	struct rb_node **rb = &sorted.rb_node;
 400	struct rb_node *parent = NULL;
 401	struct lock_stat *p;
 402	int ret;
 403
 404	while (*rb) {
 405		p = container_of(*rb, struct lock_stat, rb);
 406		parent = *rb;
 407
 408		if (st->name && p->name)
 409			ret = strcmp(st->name, p->name);
 410		else
 411			ret = !!st->name - !!p->name;
 412
 413		if (ret == 0) {
 414			p->nr_acquired += st->nr_acquired;
 415			p->nr_contended += st->nr_contended;
 416			p->wait_time_total += st->wait_time_total;
 417
 418			if (p->nr_contended)
 419				p->avg_wait_time = p->wait_time_total / p->nr_contended;
 420
 421			if (p->wait_time_min > st->wait_time_min)
 422				p->wait_time_min = st->wait_time_min;
 423			if (p->wait_time_max < st->wait_time_max)
 424				p->wait_time_max = st->wait_time_max;
 425
 426			p->broken |= st->broken;
 427			st->combined = 1;
 428			return;
 429		}
 430
 431		if (ret < 0)
 432			rb = &(*rb)->rb_left;
 433		else
 434			rb = &(*rb)->rb_right;
 435	}
 436
 437	rb_link_node(&st->rb, parent, rb);
 438	rb_insert_color(&st->rb, &sorted);
 439}
 440
 441static void insert_to_result(struct lock_stat *st,
 442			     int (*bigger)(struct lock_stat *, struct lock_stat *))
 443{
 444	struct rb_node **rb = &result.rb_node;
 445	struct rb_node *parent = NULL;
 446	struct lock_stat *p;
 447
 448	if (combine_locks && st->combined)
 449		return;
 450
 451	while (*rb) {
 452		p = container_of(*rb, struct lock_stat, rb);
 453		parent = *rb;
 454
 455		if (bigger(st, p))
 456			rb = &(*rb)->rb_left;
 457		else
 458			rb = &(*rb)->rb_right;
 459	}
 460
 461	rb_link_node(&st->rb, parent, rb);
 462	rb_insert_color(&st->rb, &result);
 463}
 464
 465/* returns left most element of result, and erase it */
 466static struct lock_stat *pop_from_result(void)
 467{
 468	struct rb_node *node = result.rb_node;
 469
 470	if (!node)
 471		return NULL;
 472
 473	while (node->rb_left)
 474		node = node->rb_left;
 475
 476	rb_erase(node, &result);
 477	return container_of(node, struct lock_stat, rb);
 478}
 479
 480struct lock_stat *lock_stat_find(u64 addr)
 481{
 482	struct hlist_head *entry = lockhashentry(addr);
 483	struct lock_stat *ret;
 484
 485	hlist_for_each_entry(ret, entry, hash_entry) {
 486		if (ret->addr == addr)
 487			return ret;
 488	}
 489	return NULL;
 490}
 491
 492struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
 493{
 494	struct hlist_head *entry = lockhashentry(addr);
 495	struct lock_stat *ret, *new;
 496
 497	hlist_for_each_entry(ret, entry, hash_entry) {
 498		if (ret->addr == addr)
 499			return ret;
 500	}
 501
 502	new = zalloc(sizeof(struct lock_stat));
 503	if (!new)
 504		goto alloc_failed;
 505
 506	new->addr = addr;
 507	new->name = strdup(name);
 508	if (!new->name) {
 509		free(new);
 510		goto alloc_failed;
 511	}
 512
 513	new->flags = flags;
 514	new->wait_time_min = ULLONG_MAX;
 515
 516	hlist_add_head(&new->hash_entry, entry);
 517	return new;
 518
 519alloc_failed:
 520	pr_err("memory allocation failed\n");
 521	return NULL;
 522}
 523
 524bool match_callstack_filter(struct machine *machine, u64 *callstack)
 525{
 526	struct map *kmap;
 527	struct symbol *sym;
 528	u64 ip;
 529	const char *arch = perf_env__arch(machine->env);
 530
 531	if (list_empty(&callstack_filters))
 532		return true;
 533
 534	for (int i = 0; i < max_stack_depth; i++) {
 535		struct callstack_filter *filter;
 536
 537		/*
 538		 * In powerpc, the callchain saved by kernel always includes
 539		 * first three entries as the NIP (next instruction pointer),
 540		 * LR (link register), and the contents of LR save area in the
 541		 * second stack frame. In certain scenarios its possible to have
 542		 * invalid kernel instruction addresses in either LR or the second
 543		 * stack frame's LR. In that case, kernel will store that address as
 544		 * zero.
 545		 *
 546		 * The below check will continue to look into callstack,
 547		 * incase first or second callstack index entry has 0
 548		 * address for powerpc.
 549		 */
 550		if (!callstack || (!callstack[i] && (strcmp(arch, "powerpc") ||
 551						(i != 1 && i != 2))))
 552			break;
 553
 554		ip = callstack[i];
 555		sym = machine__find_kernel_symbol(machine, ip, &kmap);
 556		if (sym == NULL)
 557			continue;
 558
 559		list_for_each_entry(filter, &callstack_filters, list) {
 560			if (strstr(sym->name, filter->name))
 561				return true;
 562		}
 563	}
 564	return false;
 565}
 566
 567struct trace_lock_handler {
 568	/* it's used on CONFIG_LOCKDEP */
 569	int (*acquire_event)(struct evsel *evsel,
 570			     struct perf_sample *sample);
 
 571
 572	/* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
 573	int (*acquired_event)(struct evsel *evsel,
 574			      struct perf_sample *sample);
 
 575
 576	/* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
 577	int (*contended_event)(struct evsel *evsel,
 578			       struct perf_sample *sample);
 
 579
 580	/* it's used on CONFIG_LOCKDEP */
 581	int (*release_event)(struct evsel *evsel,
 582			     struct perf_sample *sample);
 
 583
 584	/* it's used when CONFIG_LOCKDEP is off */
 585	int (*contention_begin_event)(struct evsel *evsel,
 586				      struct perf_sample *sample);
 587
 588	/* it's used when CONFIG_LOCKDEP is off */
 589	int (*contention_end_event)(struct evsel *evsel,
 590				    struct perf_sample *sample);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 591};
 592
 593static struct lock_seq_stat *get_seq(struct thread_stat *ts, u64 addr)
 594{
 595	struct lock_seq_stat *seq;
 596
 597	list_for_each_entry(seq, &ts->seq_list, list) {
 598		if (seq->addr == addr)
 599			return seq;
 600	}
 601
 602	seq = zalloc(sizeof(struct lock_seq_stat));
 603	if (!seq) {
 604		pr_err("memory allocation failed\n");
 605		return NULL;
 606	}
 607	seq->state = SEQ_STATE_UNINITIALIZED;
 608	seq->addr = addr;
 609
 610	list_add(&seq->list, &ts->seq_list);
 611	return seq;
 612}
 613
 614enum broken_state {
 615	BROKEN_ACQUIRE,
 616	BROKEN_ACQUIRED,
 617	BROKEN_CONTENDED,
 618	BROKEN_RELEASE,
 619	BROKEN_MAX,
 620};
 621
 622static int bad_hist[BROKEN_MAX];
 623
 624enum acquire_flags {
 625	TRY_LOCK = 1,
 626	READ_LOCK = 2,
 627};
 628
 629static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid)
 630{
 631	switch (aggr_mode) {
 632	case LOCK_AGGR_ADDR:
 633		*key = addr;
 634		break;
 635	case LOCK_AGGR_TASK:
 636		*key = tid;
 637		break;
 638	case LOCK_AGGR_CALLER:
 639	case LOCK_AGGR_CGROUP:
 640	default:
 641		pr_err("Invalid aggregation mode: %d\n", aggr_mode);
 642		return -EINVAL;
 643	}
 644	return 0;
 645}
 646
 647static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample);
 648
 649static int get_key_by_aggr_mode(u64 *key, u64 addr, struct evsel *evsel,
 650				 struct perf_sample *sample)
 651{
 652	if (aggr_mode == LOCK_AGGR_CALLER) {
 653		*key = callchain_id(evsel, sample);
 654		return 0;
 655	}
 656	return get_key_by_aggr_mode_simple(key, addr, sample->tid);
 657}
 658
 659static int report_lock_acquire_event(struct evsel *evsel,
 660				     struct perf_sample *sample)
 661{
 662	struct lock_stat *ls;
 663	struct thread_stat *ts;
 664	struct lock_seq_stat *seq;
 665	const char *name = evsel__strval(evsel, sample, "name");
 666	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
 667	int flag = evsel__intval(evsel, sample, "flags");
 668	u64 key;
 669	int ret;
 670
 671	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
 672	if (ret < 0)
 673		return ret;
 674
 675	ls = lock_stat_findnew(key, name, 0);
 676	if (!ls)
 677		return -ENOMEM;
 678
 679	ts = thread_stat_findnew(sample->tid);
 680	if (!ts)
 681		return -ENOMEM;
 682
 683	seq = get_seq(ts, addr);
 684	if (!seq)
 685		return -ENOMEM;
 686
 687	switch (seq->state) {
 688	case SEQ_STATE_UNINITIALIZED:
 689	case SEQ_STATE_RELEASED:
 690		if (!flag) {
 691			seq->state = SEQ_STATE_ACQUIRING;
 692		} else {
 693			if (flag & TRY_LOCK)
 694				ls->nr_trylock++;
 695			if (flag & READ_LOCK)
 696				ls->nr_readlock++;
 697			seq->state = SEQ_STATE_READ_ACQUIRED;
 698			seq->read_count = 1;
 699			ls->nr_acquired++;
 700		}
 701		break;
 702	case SEQ_STATE_READ_ACQUIRED:
 703		if (flag & READ_LOCK) {
 704			seq->read_count++;
 705			ls->nr_acquired++;
 706			goto end;
 707		} else {
 708			goto broken;
 709		}
 710		break;
 711	case SEQ_STATE_ACQUIRED:
 712	case SEQ_STATE_ACQUIRING:
 713	case SEQ_STATE_CONTENDED:
 714broken:
 715		/* broken lock sequence */
 716		if (!ls->broken) {
 717			ls->broken = 1;
 718			bad_hist[BROKEN_ACQUIRE]++;
 719		}
 720		list_del_init(&seq->list);
 721		free(seq);
 722		goto end;
 
 723	default:
 724		BUG_ON("Unknown state of lock sequence found!\n");
 725		break;
 726	}
 727
 728	ls->nr_acquire++;
 729	seq->prev_event_time = sample->time;
 730end:
 731	return 0;
 732}
 733
 734static int report_lock_acquired_event(struct evsel *evsel,
 735				      struct perf_sample *sample)
 
 
 
 
 736{
 737	struct lock_stat *ls;
 738	struct thread_stat *ts;
 739	struct lock_seq_stat *seq;
 740	u64 contended_term;
 741	const char *name = evsel__strval(evsel, sample, "name");
 742	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
 743	u64 key;
 744	int ret;
 745
 746	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
 747	if (ret < 0)
 748		return ret;
 749
 750	ls = lock_stat_findnew(key, name, 0);
 751	if (!ls)
 752		return -ENOMEM;
 753
 754	ts = thread_stat_findnew(sample->tid);
 755	if (!ts)
 756		return -ENOMEM;
 757
 758	seq = get_seq(ts, addr);
 759	if (!seq)
 760		return -ENOMEM;
 761
 762	switch (seq->state) {
 763	case SEQ_STATE_UNINITIALIZED:
 764		/* orphan event, do nothing */
 765		return 0;
 766	case SEQ_STATE_ACQUIRING:
 767		break;
 768	case SEQ_STATE_CONTENDED:
 769		contended_term = sample->time - seq->prev_event_time;
 770		ls->wait_time_total += contended_term;
 771		if (contended_term < ls->wait_time_min)
 772			ls->wait_time_min = contended_term;
 773		if (ls->wait_time_max < contended_term)
 774			ls->wait_time_max = contended_term;
 775		break;
 776	case SEQ_STATE_RELEASED:
 777	case SEQ_STATE_ACQUIRED:
 778	case SEQ_STATE_READ_ACQUIRED:
 779		/* broken lock sequence */
 780		if (!ls->broken) {
 781			ls->broken = 1;
 782			bad_hist[BROKEN_ACQUIRED]++;
 783		}
 784		list_del_init(&seq->list);
 785		free(seq);
 786		goto end;
 
 
 787	default:
 788		BUG_ON("Unknown state of lock sequence found!\n");
 789		break;
 790	}
 791
 792	seq->state = SEQ_STATE_ACQUIRED;
 793	ls->nr_acquired++;
 794	ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0;
 795	seq->prev_event_time = sample->time;
 796end:
 797	return 0;
 798}
 799
 800static int report_lock_contended_event(struct evsel *evsel,
 801				       struct perf_sample *sample)
 
 
 
 
 802{
 803	struct lock_stat *ls;
 804	struct thread_stat *ts;
 805	struct lock_seq_stat *seq;
 806	const char *name = evsel__strval(evsel, sample, "name");
 807	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
 808	u64 key;
 809	int ret;
 810
 811	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
 812	if (ret < 0)
 813		return ret;
 814
 815	ls = lock_stat_findnew(key, name, 0);
 816	if (!ls)
 817		return -ENOMEM;
 818
 819	ts = thread_stat_findnew(sample->tid);
 820	if (!ts)
 821		return -ENOMEM;
 822
 823	seq = get_seq(ts, addr);
 824	if (!seq)
 825		return -ENOMEM;
 826
 827	switch (seq->state) {
 828	case SEQ_STATE_UNINITIALIZED:
 829		/* orphan event, do nothing */
 830		return 0;
 831	case SEQ_STATE_ACQUIRING:
 832		break;
 833	case SEQ_STATE_RELEASED:
 834	case SEQ_STATE_ACQUIRED:
 835	case SEQ_STATE_READ_ACQUIRED:
 836	case SEQ_STATE_CONTENDED:
 837		/* broken lock sequence */
 838		if (!ls->broken) {
 839			ls->broken = 1;
 840			bad_hist[BROKEN_CONTENDED]++;
 841		}
 842		list_del_init(&seq->list);
 843		free(seq);
 844		goto end;
 
 845	default:
 846		BUG_ON("Unknown state of lock sequence found!\n");
 847		break;
 848	}
 849
 850	seq->state = SEQ_STATE_CONTENDED;
 851	ls->nr_contended++;
 852	ls->avg_wait_time = ls->wait_time_total/ls->nr_contended;
 853	seq->prev_event_time = sample->time;
 854end:
 855	return 0;
 856}
 857
 858static int report_lock_release_event(struct evsel *evsel,
 859				     struct perf_sample *sample)
 
 
 
 
 860{
 861	struct lock_stat *ls;
 862	struct thread_stat *ts;
 863	struct lock_seq_stat *seq;
 864	const char *name = evsel__strval(evsel, sample, "name");
 865	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
 866	u64 key;
 867	int ret;
 868
 869	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
 870	if (ret < 0)
 871		return ret;
 872
 873	ls = lock_stat_findnew(key, name, 0);
 874	if (!ls)
 875		return -ENOMEM;
 876
 877	ts = thread_stat_findnew(sample->tid);
 878	if (!ts)
 879		return -ENOMEM;
 880
 881	seq = get_seq(ts, addr);
 882	if (!seq)
 883		return -ENOMEM;
 884
 885	switch (seq->state) {
 886	case SEQ_STATE_UNINITIALIZED:
 887		goto end;
 
 888	case SEQ_STATE_ACQUIRED:
 889		break;
 890	case SEQ_STATE_READ_ACQUIRED:
 891		seq->read_count--;
 892		BUG_ON(seq->read_count < 0);
 893		if (seq->read_count) {
 894			ls->nr_release++;
 895			goto end;
 896		}
 897		break;
 898	case SEQ_STATE_ACQUIRING:
 899	case SEQ_STATE_CONTENDED:
 900	case SEQ_STATE_RELEASED:
 901		/* broken lock sequence */
 902		if (!ls->broken) {
 903			ls->broken = 1;
 904			bad_hist[BROKEN_RELEASE]++;
 905		}
 906		goto free_seq;
 
 907	default:
 908		BUG_ON("Unknown state of lock sequence found!\n");
 909		break;
 910	}
 911
 912	ls->nr_release++;
 913free_seq:
 914	list_del_init(&seq->list);
 915	free(seq);
 916end:
 917	return 0;
 918}
 919
 920static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip,
 921				  char *buf, int size)
 922{
 923	u64 offset;
 924
 925	if (map == NULL || sym == NULL) {
 926		buf[0] = '\0';
 927		return 0;
 928	}
 929
 930	offset = map__map_ip(map, ip) - sym->start;
 931
 932	if (offset)
 933		return scnprintf(buf, size, "%s+%#lx", sym->name, offset);
 934	else
 935		return strlcpy(buf, sym->name, size);
 936}
 937static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sample,
 938				  char *buf, int size)
 939{
 940	struct thread *thread;
 941	struct callchain_cursor *cursor;
 942	struct machine *machine = &session->machines.host;
 943	struct symbol *sym;
 944	int skip = 0;
 945	int ret;
 946
 947	/* lock names will be replaced to task name later */
 948	if (show_thread_stats)
 949		return -1;
 950
 951	thread = machine__findnew_thread(machine, -1, sample->pid);
 952	if (thread == NULL)
 953		return -1;
 954
 955	cursor = get_tls_callchain_cursor();
 956
 957	/* use caller function name from the callchain */
 958	ret = thread__resolve_callchain(thread, cursor, evsel, sample,
 959					NULL, NULL, max_stack_depth);
 960	if (ret != 0) {
 961		thread__put(thread);
 962		return -1;
 963	}
 964
 965	callchain_cursor_commit(cursor);
 966	thread__put(thread);
 967
 968	while (true) {
 969		struct callchain_cursor_node *node;
 970
 971		node = callchain_cursor_current(cursor);
 972		if (node == NULL)
 973			break;
 974
 975		/* skip first few entries - for lock functions */
 976		if (++skip <= stack_skip)
 977			goto next;
 978
 979		sym = node->ms.sym;
 980		if (sym && !machine__is_lock_function(machine, node->ip)) {
 981			get_symbol_name_offset(node->ms.map, sym, node->ip,
 982					       buf, size);
 983			return 0;
 984		}
 985
 986next:
 987		callchain_cursor_advance(cursor);
 988	}
 989	return -1;
 990}
 991
 992static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample)
 993{
 994	struct callchain_cursor *cursor;
 995	struct machine *machine = &session->machines.host;
 996	struct thread *thread;
 997	u64 hash = 0;
 998	int skip = 0;
 999	int ret;
1000
1001	thread = machine__findnew_thread(machine, -1, sample->pid);
1002	if (thread == NULL)
1003		return -1;
1004
1005	cursor = get_tls_callchain_cursor();
1006	/* use caller function name from the callchain */
1007	ret = thread__resolve_callchain(thread, cursor, evsel, sample,
1008					NULL, NULL, max_stack_depth);
1009	thread__put(thread);
1010
1011	if (ret != 0)
1012		return -1;
1013
1014	callchain_cursor_commit(cursor);
1015
1016	while (true) {
1017		struct callchain_cursor_node *node;
1018
1019		node = callchain_cursor_current(cursor);
1020		if (node == NULL)
1021			break;
1022
1023		/* skip first few entries - for lock functions */
1024		if (++skip <= stack_skip)
1025			goto next;
1026
1027		if (node->ms.sym && machine__is_lock_function(machine, node->ip))
1028			goto next;
1029
1030		hash ^= hash_long((unsigned long)node->ip, 64);
1031
1032next:
1033		callchain_cursor_advance(cursor);
1034	}
1035	return hash;
1036}
1037
1038static u64 *get_callstack(struct perf_sample *sample, int max_stack)
1039{
1040	u64 *callstack;
1041	u64 i;
1042	int c;
1043
1044	callstack = calloc(max_stack, sizeof(*callstack));
1045	if (callstack == NULL)
1046		return NULL;
1047
1048	for (i = 0, c = 0; i < sample->callchain->nr && c < max_stack; i++) {
1049		u64 ip = sample->callchain->ips[i];
1050
1051		if (ip >= PERF_CONTEXT_MAX)
1052			continue;
1053
1054		callstack[c++] = ip;
1055	}
1056	return callstack;
1057}
1058
1059static int report_lock_contention_begin_event(struct evsel *evsel,
1060					      struct perf_sample *sample)
1061{
1062	struct lock_stat *ls;
1063	struct thread_stat *ts;
1064	struct lock_seq_stat *seq;
1065	u64 addr = evsel__intval(evsel, sample, "lock_addr");
1066	unsigned int flags = evsel__intval(evsel, sample, "flags");
1067	u64 key;
1068	int i, ret;
1069	static bool kmap_loaded;
1070	struct machine *machine = &session->machines.host;
1071	struct map *kmap;
1072	struct symbol *sym;
1073
1074	ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
1075	if (ret < 0)
1076		return ret;
1077
1078	if (!kmap_loaded) {
1079		unsigned long *addrs;
1080
1081		/* make sure it loads the kernel map to find lock symbols */
1082		map__load(machine__kernel_map(machine));
1083		kmap_loaded = true;
1084
1085		/* convert (kernel) symbols to addresses */
1086		for (i = 0; i < filters.nr_syms; i++) {
1087			sym = machine__find_kernel_symbol_by_name(machine,
1088								  filters.syms[i],
1089								  &kmap);
1090			if (sym == NULL) {
1091				pr_warning("ignore unknown symbol: %s\n",
1092					   filters.syms[i]);
1093				continue;
1094			}
1095
1096			addrs = realloc(filters.addrs,
1097					(filters.nr_addrs + 1) * sizeof(*addrs));
1098			if (addrs == NULL) {
1099				pr_warning("memory allocation failure\n");
1100				return -ENOMEM;
1101			}
1102
1103			addrs[filters.nr_addrs++] = map__unmap_ip(kmap, sym->start);
1104			filters.addrs = addrs;
1105		}
1106	}
1107
1108	ls = lock_stat_find(key);
1109	if (!ls) {
1110		char buf[128];
1111		const char *name = "";
1112
1113		switch (aggr_mode) {
1114		case LOCK_AGGR_ADDR:
1115			sym = machine__find_kernel_symbol(machine, key, &kmap);
1116			if (sym)
1117				name = sym->name;
1118			break;
1119		case LOCK_AGGR_CALLER:
1120			name = buf;
1121			if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0)
1122				name = "Unknown";
1123			break;
1124		case LOCK_AGGR_CGROUP:
1125		case LOCK_AGGR_TASK:
1126		default:
1127			break;
1128		}
1129
1130		ls = lock_stat_findnew(key, name, flags);
1131		if (!ls)
1132			return -ENOMEM;
1133	}
1134
1135	if (filters.nr_types) {
1136		bool found = false;
1137
1138		for (i = 0; i < filters.nr_types; i++) {
1139			if (flags == filters.types[i]) {
1140				found = true;
1141				break;
1142			}
1143		}
1144
1145		if (!found)
1146			return 0;
1147	}
1148
1149	if (filters.nr_addrs) {
1150		bool found = false;
1151
1152		for (i = 0; i < filters.nr_addrs; i++) {
1153			if (addr == filters.addrs[i]) {
1154				found = true;
1155				break;
1156			}
1157		}
1158
1159		if (!found)
1160			return 0;
1161	}
1162
1163	if (needs_callstack()) {
1164		u64 *callstack = get_callstack(sample, max_stack_depth);
1165		if (callstack == NULL)
1166			return -ENOMEM;
1167
1168		if (!match_callstack_filter(machine, callstack)) {
1169			free(callstack);
1170			return 0;
1171		}
1172
1173		if (ls->callstack == NULL)
1174			ls->callstack = callstack;
1175		else
1176			free(callstack);
1177	}
1178
1179	ts = thread_stat_findnew(sample->tid);
1180	if (!ts)
1181		return -ENOMEM;
1182
1183	seq = get_seq(ts, addr);
1184	if (!seq)
1185		return -ENOMEM;
1186
1187	switch (seq->state) {
1188	case SEQ_STATE_UNINITIALIZED:
1189	case SEQ_STATE_ACQUIRED:
1190		break;
1191	case SEQ_STATE_CONTENDED:
1192		/*
1193		 * It can have nested contention begin with mutex spinning,
1194		 * then we would use the original contention begin event and
1195		 * ignore the second one.
1196		 */
1197		goto end;
1198	case SEQ_STATE_ACQUIRING:
1199	case SEQ_STATE_READ_ACQUIRED:
1200	case SEQ_STATE_RELEASED:
1201		/* broken lock sequence */
1202		if (!ls->broken) {
1203			ls->broken = 1;
1204			bad_hist[BROKEN_CONTENDED]++;
1205		}
1206		list_del_init(&seq->list);
1207		free(seq);
1208		goto end;
1209	default:
1210		BUG_ON("Unknown state of lock sequence found!\n");
1211		break;
1212	}
1213
1214	if (seq->state != SEQ_STATE_CONTENDED) {
1215		seq->state = SEQ_STATE_CONTENDED;
1216		seq->prev_event_time = sample->time;
1217		ls->nr_contended++;
1218	}
1219end:
1220	return 0;
1221}
1222
1223static int report_lock_contention_end_event(struct evsel *evsel,
1224					    struct perf_sample *sample)
1225{
1226	struct lock_stat *ls;
1227	struct thread_stat *ts;
1228	struct lock_seq_stat *seq;
1229	u64 contended_term;
1230	u64 addr = evsel__intval(evsel, sample, "lock_addr");
1231	u64 key;
1232	int ret;
1233
1234	ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
1235	if (ret < 0)
1236		return ret;
1237
1238	ls = lock_stat_find(key);
1239	if (!ls)
1240		return 0;
1241
1242	ts = thread_stat_find(sample->tid);
1243	if (!ts)
1244		return 0;
1245
1246	seq = get_seq(ts, addr);
1247	if (!seq)
1248		return -ENOMEM;
1249
1250	switch (seq->state) {
1251	case SEQ_STATE_UNINITIALIZED:
1252		goto end;
1253	case SEQ_STATE_CONTENDED:
1254		contended_term = sample->time - seq->prev_event_time;
1255		ls->wait_time_total += contended_term;
1256		if (contended_term < ls->wait_time_min)
1257			ls->wait_time_min = contended_term;
1258		if (ls->wait_time_max < contended_term)
1259			ls->wait_time_max = contended_term;
1260		break;
1261	case SEQ_STATE_ACQUIRING:
1262	case SEQ_STATE_ACQUIRED:
1263	case SEQ_STATE_READ_ACQUIRED:
1264	case SEQ_STATE_RELEASED:
1265		/* broken lock sequence */
1266		if (!ls->broken) {
1267			ls->broken = 1;
1268			bad_hist[BROKEN_ACQUIRED]++;
1269		}
1270		list_del_init(&seq->list);
1271		free(seq);
1272		goto end;
1273	default:
1274		BUG_ON("Unknown state of lock sequence found!\n");
1275		break;
1276	}
1277
1278	seq->state = SEQ_STATE_ACQUIRED;
1279	ls->nr_acquired++;
1280	ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired;
1281end:
1282	return 0;
1283}
1284
1285/* lock oriented handlers */
1286/* TODO: handlers for CPU oriented, thread oriented */
1287static struct trace_lock_handler report_lock_ops  = {
1288	.acquire_event		= report_lock_acquire_event,
1289	.acquired_event		= report_lock_acquired_event,
1290	.contended_event	= report_lock_contended_event,
1291	.release_event		= report_lock_release_event,
1292	.contention_begin_event	= report_lock_contention_begin_event,
1293	.contention_end_event	= report_lock_contention_end_event,
1294};
1295
1296static struct trace_lock_handler contention_lock_ops  = {
1297	.contention_begin_event	= report_lock_contention_begin_event,
1298	.contention_end_event	= report_lock_contention_end_event,
1299};
 
 
 
 
 
 
 
 
 
 
 
 
1300
 
 
 
1301
1302static struct trace_lock_handler *trace_handler;
 
 
 
 
 
 
 
 
 
 
 
 
1303
1304static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample)
1305{
1306	if (trace_handler->acquire_event)
1307		return trace_handler->acquire_event(evsel, sample);
1308	return 0;
1309}
1310
1311static int evsel__process_lock_acquired(struct evsel *evsel, struct perf_sample *sample)
1312{
1313	if (trace_handler->acquired_event)
1314		return trace_handler->acquired_event(evsel, sample);
1315	return 0;
 
 
 
 
 
 
 
 
 
 
 
1316}
1317
1318static int evsel__process_lock_contended(struct evsel *evsel, struct perf_sample *sample)
1319{
1320	if (trace_handler->contended_event)
1321		return trace_handler->contended_event(evsel, sample);
1322	return 0;
1323}
 
 
 
 
 
 
 
1324
1325static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *sample)
1326{
1327	if (trace_handler->release_event)
1328		return trace_handler->release_event(evsel, sample);
1329	return 0;
1330}
1331
1332static int evsel__process_contention_begin(struct evsel *evsel, struct perf_sample *sample)
 
1333{
1334	if (trace_handler->contention_begin_event)
1335		return trace_handler->contention_begin_event(evsel, sample);
1336	return 0;
1337}
1338
1339static int evsel__process_contention_end(struct evsel *evsel, struct perf_sample *sample)
1340{
1341	if (trace_handler->contention_end_event)
1342		return trace_handler->contention_end_event(evsel, sample);
1343	return 0;
 
 
 
 
 
 
1344}
1345
1346static void print_bad_events(int bad, int total)
1347{
1348	/* Output for debug, this have to be removed */
1349	int i;
1350	int broken = 0;
1351	const char *name[4] =
1352		{ "acquire", "acquired", "contended", "release" };
1353
 
 
 
 
1354	for (i = 0; i < BROKEN_MAX; i++)
1355		broken += bad_hist[i];
1356
1357	if (quiet || total == 0 || (broken == 0 && verbose <= 0))
1358		return;
1359
1360	fprintf(lock_output, "\n=== output for debug ===\n\n");
1361	fprintf(lock_output, "bad: %d, total: %d\n", bad, total);
1362	fprintf(lock_output, "bad rate: %.2f %%\n", (double)bad / (double)total * 100);
1363	fprintf(lock_output, "histogram of events caused bad sequence\n");
1364	for (i = 0; i < BROKEN_MAX; i++)
1365		fprintf(lock_output, " %10s: %d\n", name[i], bad_hist[i]);
1366}
1367
1368/* TODO: various way to print, coloring, nano or milli sec */
1369static void print_result(void)
1370{
1371	struct lock_stat *st;
1372	struct lock_key *key;
1373	char cut_name[20];
1374	int bad, total, printed;
1375
1376	if (!quiet) {
1377		fprintf(lock_output, "%20s ", "Name");
1378		list_for_each_entry(key, &lock_keys, list)
1379			fprintf(lock_output, "%*s ", key->len, key->header);
1380		fprintf(lock_output, "\n\n");
1381	}
 
 
 
1382
1383	bad = total = printed = 0;
1384	while ((st = pop_from_result())) {
1385		total++;
1386		if (st->broken)
1387			bad++;
1388		if (!st->nr_acquired)
1389			continue;
1390
1391		bzero(cut_name, 20);
1392
1393		if (strlen(st->name) < 20) {
1394			/* output raw name */
1395			const char *name = st->name;
1396
1397			if (show_thread_stats) {
1398				struct thread *t;
1399
1400				/* st->addr contains tid of thread */
1401				t = perf_session__findnew(session, st->addr);
1402				name = thread__comm_str(t);
1403			}
1404
1405			fprintf(lock_output, "%20s ", name);
1406		} else {
1407			strncpy(cut_name, st->name, 16);
1408			cut_name[16] = '.';
1409			cut_name[17] = '.';
1410			cut_name[18] = '.';
1411			cut_name[19] = '\0';
1412			/* cut off name for saving output style */
1413			fprintf(lock_output, "%20s ", cut_name);
1414		}
1415
1416		list_for_each_entry(key, &lock_keys, list) {
1417			key->print(key, st);
1418			fprintf(lock_output, " ");
1419		}
1420		fprintf(lock_output, "\n");
1421
1422		if (++printed >= print_nr_entries)
1423			break;
 
 
 
1424	}
1425
1426	print_bad_events(bad, total);
1427}
1428
1429static bool info_threads, info_map;
1430
1431static void dump_threads(void)
1432{
1433	struct thread_stat *st;
1434	struct rb_node *node;
1435	struct thread *t;
1436
1437	fprintf(lock_output, "%10s: comm\n", "Thread ID");
1438
1439	node = rb_first(&thread_stats);
1440	while (node) {
1441		st = container_of(node, struct thread_stat, rb);
1442		t = perf_session__findnew(session, st->tid);
1443		fprintf(lock_output, "%10d: %s\n", st->tid, thread__comm_str(t));
1444		node = rb_next(node);
1445		thread__put(t);
1446	}
1447}
1448
1449static int compare_maps(struct lock_stat *a, struct lock_stat *b)
1450{
1451	int ret;
1452
1453	if (a->name && b->name)
1454		ret = strcmp(a->name, b->name);
1455	else
1456		ret = !!a->name - !!b->name;
1457
1458	if (!ret)
1459		return a->addr < b->addr;
1460	else
1461		return ret < 0;
1462}
1463
1464static void dump_map(void)
1465{
1466	unsigned int i;
1467	struct lock_stat *st;
1468
1469	fprintf(lock_output, "Address of instance: name of class\n");
1470	for (i = 0; i < LOCKHASH_SIZE; i++) {
1471		hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1472			insert_to_result(st, compare_maps);
1473		}
1474	}
1475
1476	while ((st = pop_from_result()))
1477		fprintf(lock_output, " %#llx: %s\n", (unsigned long long)st->addr, st->name);
1478}
1479
1480static void dump_info(void)
1481{
1482	if (info_threads)
1483		dump_threads();
1484
1485	if (info_map) {
1486		if (info_threads)
1487			fputc('\n', lock_output);
1488		dump_map();
1489	}
 
1490}
1491
1492static const struct evsel_str_handler lock_tracepoints[] = {
1493	{ "lock:lock_acquire",	 evsel__process_lock_acquire,   }, /* CONFIG_LOCKDEP */
1494	{ "lock:lock_acquired",	 evsel__process_lock_acquired,  }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
1495	{ "lock:lock_contended", evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
1496	{ "lock:lock_release",	 evsel__process_lock_release,   }, /* CONFIG_LOCKDEP */
1497};
1498
1499static const struct evsel_str_handler contention_tracepoints[] = {
1500	{ "lock:contention_begin", evsel__process_contention_begin, },
1501	{ "lock:contention_end",   evsel__process_contention_end,   },
1502};
1503
1504static int process_event_update(const struct perf_tool *tool,
1505				union perf_event *event,
1506				struct evlist **pevlist)
1507{
1508	int ret;
1509
1510	ret = perf_event__process_event_update(tool, event, pevlist);
1511	if (ret < 0)
1512		return ret;
1513
1514	/* this can return -EEXIST since we call it for each evsel */
1515	perf_session__set_tracepoints_handlers(session, lock_tracepoints);
1516	perf_session__set_tracepoints_handlers(session, contention_tracepoints);
1517	return 0;
1518}
1519
1520typedef int (*tracepoint_handler)(struct evsel *evsel,
1521				  struct perf_sample *sample);
1522
1523static int process_sample_event(const struct perf_tool *tool __maybe_unused,
1524				union perf_event *event,
1525				struct perf_sample *sample,
1526				struct evsel *evsel,
1527				struct machine *machine)
1528{
1529	int err = 0;
1530	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1531							sample->tid);
1532
1533	if (thread == NULL) {
1534		pr_debug("problem processing %d event, skipping it.\n",
1535			event->header.type);
1536		return -1;
1537	}
1538
1539	if (evsel->handler != NULL) {
1540		tracepoint_handler f = evsel->handler;
1541		err = f(evsel, sample);
1542	}
1543
1544	thread__put(thread);
 
1545
1546	return err;
1547}
 
 
 
1548
1549static void combine_result(void)
1550{
1551	unsigned int i;
1552	struct lock_stat *st;
 
1553
1554	if (!combine_locks)
1555		return;
1556
1557	for (i = 0; i < LOCKHASH_SIZE; i++) {
1558		hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1559			combine_lock_stats(st);
1560		}
1561	}
1562}
1563
1564static void sort_result(void)
1565{
1566	unsigned int i;
1567	struct lock_stat *st;
1568
1569	for (i = 0; i < LOCKHASH_SIZE; i++) {
1570		hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1571			insert_to_result(st, compare);
1572		}
1573	}
1574}
1575
1576static const struct {
1577	unsigned int flags;
1578	const char *str;
1579	const char *name;
1580} lock_type_table[] = {
1581	{ 0,				"semaphore",	"semaphore" },
1582	{ LCB_F_SPIN,			"spinlock",	"spinlock" },
1583	{ LCB_F_SPIN | LCB_F_READ,	"rwlock:R",	"rwlock" },
1584	{ LCB_F_SPIN | LCB_F_WRITE,	"rwlock:W",	"rwlock" },
1585	{ LCB_F_READ,			"rwsem:R",	"rwsem" },
1586	{ LCB_F_WRITE,			"rwsem:W",	"rwsem" },
1587	{ LCB_F_RT,			"rt-mutex",	"rt-mutex" },
1588	{ LCB_F_RT | LCB_F_READ,	"rwlock-rt:R",	"rwlock-rt" },
1589	{ LCB_F_RT | LCB_F_WRITE,	"rwlock-rt:W",	"rwlock-rt" },
1590	{ LCB_F_PERCPU | LCB_F_READ,	"pcpu-sem:R",	"percpu-rwsem" },
1591	{ LCB_F_PERCPU | LCB_F_WRITE,	"pcpu-sem:W",	"percpu-rwsem" },
1592	{ LCB_F_MUTEX,			"mutex",	"mutex" },
1593	{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex",	"mutex" },
1594	/* alias for optimistic spinning only */
1595	{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex:spin",	"mutex-spin" },
1596};
1597
1598static const char *get_type_str(unsigned int flags)
1599{
1600	flags &= LCB_F_MAX_FLAGS - 1;
1601
1602	for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
1603		if (lock_type_table[i].flags == flags)
1604			return lock_type_table[i].str;
1605	}
1606	return "unknown";
1607}
1608
1609static const char *get_type_name(unsigned int flags)
1610{
1611	flags &= LCB_F_MAX_FLAGS - 1;
1612
1613	for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
1614		if (lock_type_table[i].flags == flags)
1615			return lock_type_table[i].name;
1616	}
1617	return "unknown";
1618}
1619
1620static void lock_filter_finish(void)
1621{
1622	zfree(&filters.types);
1623	filters.nr_types = 0;
1624
1625	zfree(&filters.addrs);
1626	filters.nr_addrs = 0;
1627
1628	for (int i = 0; i < filters.nr_syms; i++)
1629		free(filters.syms[i]);
1630
1631	zfree(&filters.syms);
1632	filters.nr_syms = 0;
1633
1634	zfree(&filters.cgrps);
1635	filters.nr_cgrps = 0;
1636}
1637
1638static void sort_contention_result(void)
1639{
 
 
 
1640	sort_result();
 
1641}
1642
1643static void print_header_stdio(void)
1644{
1645	struct lock_key *key;
 
1646
1647	list_for_each_entry(key, &lock_keys, list)
1648		fprintf(lock_output, "%*s ", key->len, key->header);
 
 
 
 
1649
1650	switch (aggr_mode) {
1651	case LOCK_AGGR_TASK:
1652		fprintf(lock_output, "  %10s   %s\n\n", "pid",
1653			show_lock_owner ? "owner" : "comm");
1654		break;
1655	case LOCK_AGGR_CALLER:
1656		fprintf(lock_output, "  %10s   %s\n\n", "type", "caller");
1657		break;
1658	case LOCK_AGGR_ADDR:
1659		fprintf(lock_output, "  %16s   %s\n\n", "address", "symbol");
1660		break;
1661	case LOCK_AGGR_CGROUP:
1662		fprintf(lock_output, "  %s\n\n", "cgroup");
1663		break;
1664	default:
1665		break;
1666	}
1667}
1668
1669static void print_header_csv(const char *sep)
1670{
1671	struct lock_key *key;
 
 
 
 
1672
1673	fprintf(lock_output, "# output: ");
1674	list_for_each_entry(key, &lock_keys, list)
1675		fprintf(lock_output, "%s%s ", key->header, sep);
1676
1677	switch (aggr_mode) {
1678	case LOCK_AGGR_TASK:
1679		fprintf(lock_output, "%s%s %s\n", "pid", sep,
1680			show_lock_owner ? "owner" : "comm");
1681		break;
1682	case LOCK_AGGR_CALLER:
1683		fprintf(lock_output, "%s%s %s", "type", sep, "caller");
1684		if (verbose > 0)
1685			fprintf(lock_output, "%s %s", sep, "stacktrace");
1686		fprintf(lock_output, "\n");
1687		break;
1688	case LOCK_AGGR_ADDR:
1689		fprintf(lock_output, "%s%s %s%s %s\n", "address", sep, "symbol", sep, "type");
1690		break;
1691	case LOCK_AGGR_CGROUP:
1692		fprintf(lock_output, "%s\n", "cgroup");
1693		break;
1694	default:
1695		break;
1696	}
1697}
1698
1699static void print_header(void)
1700{
1701	if (!quiet) {
1702		if (symbol_conf.field_sep)
1703			print_header_csv(symbol_conf.field_sep);
1704		else
1705			print_header_stdio();
1706	}
1707}
1708
1709static void print_lock_stat_stdio(struct lock_contention *con, struct lock_stat *st)
1710{
1711	struct lock_key *key;
1712	struct thread *t;
1713	int pid;
1714
1715	list_for_each_entry(key, &lock_keys, list) {
1716		key->print(key, st);
1717		fprintf(lock_output, " ");
1718	}
1719
1720	switch (aggr_mode) {
1721	case LOCK_AGGR_CALLER:
1722		fprintf(lock_output, "  %10s   %s\n", get_type_str(st->flags), st->name);
1723		break;
1724	case LOCK_AGGR_TASK:
1725		pid = st->addr;
1726		t = perf_session__findnew(session, pid);
1727		fprintf(lock_output, "  %10d   %s\n",
1728			pid, pid == -1 ? "Unknown" : thread__comm_str(t));
1729		break;
1730	case LOCK_AGGR_ADDR:
1731		fprintf(lock_output, "  %016llx   %s (%s)\n", (unsigned long long)st->addr,
1732			st->name, get_type_name(st->flags));
1733		break;
1734	case LOCK_AGGR_CGROUP:
1735		fprintf(lock_output, "  %s\n", st->name);
1736		break;
1737	default:
1738		break;
1739	}
1740
1741	if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
1742		struct map *kmap;
1743		struct symbol *sym;
1744		char buf[128];
1745		u64 ip;
1746
1747		for (int i = 0; i < max_stack_depth; i++) {
1748			if (!st->callstack || !st->callstack[i])
1749				break;
1750
1751			ip = st->callstack[i];
1752			sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
1753			get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
1754			fprintf(lock_output, "\t\t\t%#lx  %s\n", (unsigned long)ip, buf);
1755		}
1756	}
1757}
1758
1759static void print_lock_stat_csv(struct lock_contention *con, struct lock_stat *st,
1760				const char *sep)
1761{
1762	struct lock_key *key;
1763	struct thread *t;
1764	int pid;
1765
1766	list_for_each_entry(key, &lock_keys, list) {
1767		key->print(key, st);
1768		fprintf(lock_output, "%s ", sep);
1769	}
1770
1771	switch (aggr_mode) {
1772	case LOCK_AGGR_CALLER:
1773		fprintf(lock_output, "%s%s %s", get_type_str(st->flags), sep, st->name);
1774		if (verbose <= 0)
1775			fprintf(lock_output, "\n");
1776		break;
1777	case LOCK_AGGR_TASK:
1778		pid = st->addr;
1779		t = perf_session__findnew(session, pid);
1780		fprintf(lock_output, "%d%s %s\n", pid, sep,
1781			pid == -1 ? "Unknown" : thread__comm_str(t));
1782		break;
1783	case LOCK_AGGR_ADDR:
1784		fprintf(lock_output, "%llx%s %s%s %s\n", (unsigned long long)st->addr, sep,
1785			st->name, sep, get_type_name(st->flags));
1786		break;
1787	case LOCK_AGGR_CGROUP:
1788		fprintf(lock_output, "%s\n",st->name);
1789		break;
1790	default:
1791		break;
1792	}
1793
1794	if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
1795		struct map *kmap;
1796		struct symbol *sym;
1797		char buf[128];
1798		u64 ip;
1799
1800		for (int i = 0; i < max_stack_depth; i++) {
1801			if (!st->callstack || !st->callstack[i])
1802				break;
1803
1804			ip = st->callstack[i];
1805			sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
1806			get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
1807			fprintf(lock_output, "%s %#lx %s", i ? ":" : sep, (unsigned long) ip, buf);
1808		}
1809		fprintf(lock_output, "\n");
1810	}
1811}
1812
1813static void print_lock_stat(struct lock_contention *con, struct lock_stat *st)
1814{
1815	if (symbol_conf.field_sep)
1816		print_lock_stat_csv(con, st, symbol_conf.field_sep);
1817	else
1818		print_lock_stat_stdio(con, st);
1819}
1820
1821static void print_footer_stdio(int total, int bad, struct lock_contention_fails *fails)
1822{
1823	/* Output for debug, this have to be removed */
1824	int broken = fails->task + fails->stack + fails->time + fails->data;
1825
1826	if (!use_bpf)
1827		print_bad_events(bad, total);
1828
1829	if (quiet || total == 0 || (broken == 0 && verbose <= 0))
1830		return;
1831
1832	total += broken;
1833	fprintf(lock_output, "\n=== output for debug ===\n\n");
1834	fprintf(lock_output, "bad: %d, total: %d\n", broken, total);
1835	fprintf(lock_output, "bad rate: %.2f %%\n", 100.0 * broken / total);
1836
1837	fprintf(lock_output, "histogram of failure reasons\n");
1838	fprintf(lock_output, " %10s: %d\n", "task", fails->task);
1839	fprintf(lock_output, " %10s: %d\n", "stack", fails->stack);
1840	fprintf(lock_output, " %10s: %d\n", "time", fails->time);
1841	fprintf(lock_output, " %10s: %d\n", "data", fails->data);
1842}
1843
1844static void print_footer_csv(int total, int bad, struct lock_contention_fails *fails,
1845			     const char *sep)
1846{
1847	/* Output for debug, this have to be removed */
1848	if (use_bpf)
1849		bad = fails->task + fails->stack + fails->time + fails->data;
1850
1851	if (quiet || total == 0 || (bad == 0 && verbose <= 0))
1852		return;
1853
1854	total += bad;
1855	fprintf(lock_output, "# debug: total=%d%s bad=%d", total, sep, bad);
1856
1857	if (use_bpf) {
1858		fprintf(lock_output, "%s bad_%s=%d", sep, "task", fails->task);
1859		fprintf(lock_output, "%s bad_%s=%d", sep, "stack", fails->stack);
1860		fprintf(lock_output, "%s bad_%s=%d", sep, "time", fails->time);
1861		fprintf(lock_output, "%s bad_%s=%d", sep, "data", fails->data);
1862	} else {
1863		int i;
1864		const char *name[4] = { "acquire", "acquired", "contended", "release" };
1865
1866		for (i = 0; i < BROKEN_MAX; i++)
1867			fprintf(lock_output, "%s bad_%s=%d", sep, name[i], bad_hist[i]);
1868	}
1869	fprintf(lock_output, "\n");
1870}
1871
1872static void print_footer(int total, int bad, struct lock_contention_fails *fails)
1873{
1874	if (symbol_conf.field_sep)
1875		print_footer_csv(total, bad, fails, symbol_conf.field_sep);
1876	else
1877		print_footer_stdio(total, bad, fails);
1878}
1879
1880static void print_contention_result(struct lock_contention *con)
1881{
1882	struct lock_stat *st;
1883	int bad, total, printed;
1884
1885	if (!quiet)
1886		print_header();
1887
1888	bad = total = printed = 0;
1889
1890	while ((st = pop_from_result())) {
1891		total += use_bpf ? st->nr_contended : 1;
1892		if (st->broken)
1893			bad++;
1894
1895		if (!st->wait_time_total)
1896			continue;
1897
1898		print_lock_stat(con, st);
1899
1900		if (++printed >= print_nr_entries)
1901			break;
1902	}
1903
1904	if (print_nr_entries) {
1905		/* update the total/bad stats */
1906		while ((st = pop_from_result())) {
1907			total += use_bpf ? st->nr_contended : 1;
1908			if (st->broken)
1909				bad++;
1910		}
1911	}
1912	/* some entries are collected but hidden by the callstack filter */
1913	total += con->nr_filtered;
1914
1915	print_footer(total, bad, &con->fails);
1916}
1917
1918static bool force;
1919
1920static int __cmd_report(bool display_info)
1921{
1922	int err = -EINVAL;
1923	struct perf_tool eops;
1924	struct perf_data data = {
1925		.path  = input_name,
1926		.mode  = PERF_DATA_MODE_READ,
1927		.force = force,
1928	};
1929
1930	perf_tool__init(&eops, /*ordered_events=*/true);
1931	eops.attr		 = perf_event__process_attr;
1932	eops.event_update	 = process_event_update;
1933	eops.sample		 = process_sample_event;
1934	eops.comm		 = perf_event__process_comm;
1935	eops.mmap		 = perf_event__process_mmap;
1936	eops.namespaces		 = perf_event__process_namespaces;
1937	eops.tracing_data	 = perf_event__process_tracing_data;
1938	session = perf_session__new(&data, &eops);
1939	if (IS_ERR(session)) {
1940		pr_err("Initializing perf session failed\n");
1941		return PTR_ERR(session);
1942	}
1943
1944	symbol_conf.allow_aliases = true;
1945	symbol__init(&session->header.env);
1946
1947	if (!data.is_pipe) {
1948		if (!perf_session__has_traces(session, "lock record"))
1949			goto out_delete;
1950
1951		if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
1952			pr_err("Initializing perf session tracepoint handlers failed\n");
1953			goto out_delete;
1954		}
1955
1956		if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) {
1957			pr_err("Initializing perf session tracepoint handlers failed\n");
1958			goto out_delete;
1959		}
1960	}
1961
1962	if (setup_output_field(false, output_fields))
1963		goto out_delete;
1964
1965	if (select_key(false))
1966		goto out_delete;
1967
1968	if (show_thread_stats)
1969		aggr_mode = LOCK_AGGR_TASK;
1970
1971	err = perf_session__process_events(session);
1972	if (err)
1973		goto out_delete;
1974
1975	setup_pager();
1976	if (display_info) /* used for info subcommand */
1977		dump_info();
1978	else {
1979		combine_result();
1980		sort_result();
1981		print_result();
1982	}
1983
1984out_delete:
1985	perf_session__delete(session);
1986	return err;
1987}
1988
1989static void sighandler(int sig __maybe_unused)
1990{
1991}
1992
1993static int check_lock_contention_options(const struct option *options,
1994					 const char * const *usage)
1995
1996{
1997	if (show_thread_stats && show_lock_addrs) {
1998		pr_err("Cannot use thread and addr mode together\n");
1999		parse_options_usage(usage, options, "threads", 0);
2000		parse_options_usage(NULL, options, "lock-addr", 0);
2001		return -1;
2002	}
2003
2004	if (show_lock_owner && !use_bpf) {
2005		pr_err("Lock owners are available only with BPF\n");
2006		parse_options_usage(usage, options, "lock-owner", 0);
2007		parse_options_usage(NULL, options, "use-bpf", 0);
2008		return -1;
2009	}
2010
2011	if (show_lock_owner && show_lock_addrs) {
2012		pr_err("Cannot use owner and addr mode together\n");
2013		parse_options_usage(usage, options, "lock-owner", 0);
2014		parse_options_usage(NULL, options, "lock-addr", 0);
2015		return -1;
2016	}
2017
2018	if (show_lock_cgroups && !use_bpf) {
2019		pr_err("Cgroups are available only with BPF\n");
2020		parse_options_usage(usage, options, "lock-cgroup", 0);
2021		parse_options_usage(NULL, options, "use-bpf", 0);
2022		return -1;
2023	}
2024
2025	if (show_lock_cgroups && show_lock_addrs) {
2026		pr_err("Cannot use cgroup and addr mode together\n");
2027		parse_options_usage(usage, options, "lock-cgroup", 0);
2028		parse_options_usage(NULL, options, "lock-addr", 0);
2029		return -1;
2030	}
2031
2032	if (show_lock_cgroups && show_thread_stats) {
2033		pr_err("Cannot use cgroup and thread mode together\n");
2034		parse_options_usage(usage, options, "lock-cgroup", 0);
2035		parse_options_usage(NULL, options, "threads", 0);
2036		return -1;
2037	}
2038
2039	if (symbol_conf.field_sep) {
2040		if (strstr(symbol_conf.field_sep, ":") || /* part of type flags */
2041		    strstr(symbol_conf.field_sep, "+") || /* part of caller offset */
2042		    strstr(symbol_conf.field_sep, ".")) { /* can be in a symbol name */
2043			pr_err("Cannot use the separator that is already used\n");
2044			parse_options_usage(usage, options, "x", 1);
2045			return -1;
2046		}
2047	}
2048
2049	if (show_lock_owner)
2050		show_thread_stats = true;
2051
2052	return 0;
2053}
2054
2055static int __cmd_contention(int argc, const char **argv)
2056{
2057	int err = -EINVAL;
2058	struct perf_tool eops;
2059	struct perf_data data = {
2060		.path  = input_name,
2061		.mode  = PERF_DATA_MODE_READ,
2062		.force = force,
2063	};
2064	struct lock_contention con = {
2065		.target = &target,
2066		.map_nr_entries = bpf_map_entries,
2067		.max_stack = max_stack_depth,
2068		.stack_skip = stack_skip,
2069		.filters = &filters,
2070		.save_callstack = needs_callstack(),
2071		.owner = show_lock_owner,
2072		.cgroups = RB_ROOT,
2073	};
2074
2075	lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table));
2076	if (!lockhash_table)
2077		return -ENOMEM;
2078
2079	con.result = &lockhash_table[0];
2080
2081	perf_tool__init(&eops, /*ordered_events=*/true);
2082	eops.attr		 = perf_event__process_attr;
2083	eops.event_update	 = process_event_update;
2084	eops.sample		 = process_sample_event;
2085	eops.comm		 = perf_event__process_comm;
2086	eops.mmap		 = perf_event__process_mmap;
2087	eops.tracing_data	 = perf_event__process_tracing_data;
2088
2089	session = perf_session__new(use_bpf ? NULL : &data, &eops);
2090	if (IS_ERR(session)) {
2091		pr_err("Initializing perf session failed\n");
2092		err = PTR_ERR(session);
2093		session = NULL;
2094		goto out_delete;
2095	}
2096
2097	con.machine = &session->machines.host;
2098
2099	con.aggr_mode = aggr_mode = show_thread_stats ? LOCK_AGGR_TASK :
2100		show_lock_addrs ? LOCK_AGGR_ADDR :
2101		show_lock_cgroups ? LOCK_AGGR_CGROUP : LOCK_AGGR_CALLER;
2102
2103	if (con.aggr_mode == LOCK_AGGR_CALLER)
2104		con.save_callstack = true;
2105
2106	symbol_conf.allow_aliases = true;
2107	symbol__init(&session->header.env);
2108
2109	if (use_bpf) {
2110		err = target__validate(&target);
2111		if (err) {
2112			char errbuf[512];
2113
2114			target__strerror(&target, err, errbuf, 512);
2115			pr_err("%s\n", errbuf);
2116			goto out_delete;
2117		}
2118
2119		signal(SIGINT, sighandler);
2120		signal(SIGCHLD, sighandler);
2121		signal(SIGTERM, sighandler);
2122
2123		con.evlist = evlist__new();
2124		if (con.evlist == NULL) {
2125			err = -ENOMEM;
2126			goto out_delete;
2127		}
2128
2129		err = evlist__create_maps(con.evlist, &target);
2130		if (err < 0)
2131			goto out_delete;
2132
2133		if (argc) {
2134			err = evlist__prepare_workload(con.evlist, &target,
2135						       argv, false, NULL);
2136			if (err < 0)
2137				goto out_delete;
2138		}
2139
2140		if (lock_contention_prepare(&con) < 0) {
2141			pr_err("lock contention BPF setup failed\n");
2142			goto out_delete;
2143		}
2144	} else if (!data.is_pipe) {
2145		if (!perf_session__has_traces(session, "lock record"))
2146			goto out_delete;
2147
2148		if (!evlist__find_evsel_by_str(session->evlist,
2149					       "lock:contention_begin")) {
2150			pr_err("lock contention evsel not found\n");
2151			goto out_delete;
2152		}
2153
2154		if (perf_session__set_tracepoints_handlers(session,
2155						contention_tracepoints)) {
2156			pr_err("Initializing perf session tracepoint handlers failed\n");
2157			goto out_delete;
2158		}
2159	}
2160
2161	if (setup_output_field(true, output_fields))
2162		goto out_delete;
2163
2164	if (select_key(true))
2165		goto out_delete;
2166
2167	if (symbol_conf.field_sep) {
2168		int i;
2169		struct lock_key *keys = contention_keys;
2170
2171		/* do not align output in CSV format */
2172		for (i = 0; keys[i].name; i++)
2173			keys[i].len = 0;
2174	}
2175
2176	if (use_bpf) {
2177		lock_contention_start();
2178		if (argc)
2179			evlist__start_workload(con.evlist);
2180
2181		/* wait for signal */
2182		pause();
2183
2184		lock_contention_stop();
2185		lock_contention_read(&con);
2186	} else {
2187		err = perf_session__process_events(session);
2188		if (err)
2189			goto out_delete;
2190	}
2191
2192	setup_pager();
2193
2194	sort_contention_result();
2195	print_contention_result(&con);
2196
2197out_delete:
2198	lock_filter_finish();
2199	evlist__delete(con.evlist);
2200	lock_contention_finish(&con);
2201	perf_session__delete(session);
2202	zfree(&lockhash_table);
2203	return err;
2204}
2205
 
 
 
 
 
 
 
 
 
 
 
2206
2207static int __cmd_record(int argc, const char **argv)
2208{
2209	const char *record_args[] = {
2210		"record", "-R", "-m", "1024", "-c", "1", "--synth", "task",
2211	};
2212	const char *callgraph_args[] = {
2213		"--call-graph", "fp," __stringify(CONTENTION_STACK_DEPTH),
2214	};
2215	unsigned int rec_argc, i, j, ret;
2216	unsigned int nr_tracepoints;
2217	unsigned int nr_callgraph_args = 0;
2218	const char **rec_argv;
2219	bool has_lock_stat = true;
2220
2221	for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
2222		if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
2223			pr_debug("tracepoint %s is not enabled. "
2224				 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
2225				 lock_tracepoints[i].name);
2226			has_lock_stat = false;
2227			break;
2228		}
2229	}
2230
2231	if (has_lock_stat)
2232		goto setup_args;
2233
2234	for (i = 0; i < ARRAY_SIZE(contention_tracepoints); i++) {
2235		if (!is_valid_tracepoint(contention_tracepoints[i].name)) {
2236			pr_err("tracepoint %s is not enabled.\n",
2237			       contention_tracepoints[i].name);
2238			return 1;
2239		}
2240	}
2241
2242	nr_callgraph_args = ARRAY_SIZE(callgraph_args);
2243
2244setup_args:
2245	rec_argc = ARRAY_SIZE(record_args) + nr_callgraph_args + argc - 1;
2246
2247	if (has_lock_stat)
2248		nr_tracepoints = ARRAY_SIZE(lock_tracepoints);
2249	else
2250		nr_tracepoints = ARRAY_SIZE(contention_tracepoints);
2251
2252	/* factor of 2 is for -e in front of each tracepoint */
2253	rec_argc += 2 * nr_tracepoints;
2254
2255	rec_argv = calloc(rec_argc + 1, sizeof(char *));
2256	if (!rec_argv)
2257		return -ENOMEM;
2258
2259	for (i = 0; i < ARRAY_SIZE(record_args); i++)
2260		rec_argv[i] = record_args[i];
2261
2262	for (j = 0; j < nr_tracepoints; j++) {
2263		rec_argv[i++] = "-e";
2264		rec_argv[i++] = has_lock_stat
2265			? lock_tracepoints[j].name
2266			: contention_tracepoints[j].name;
2267	}
2268
2269	for (j = 0; j < nr_callgraph_args; j++, i++)
2270		rec_argv[i] = callgraph_args[j];
2271
2272	for (j = 1; j < (unsigned int)argc; j++, i++)
2273		rec_argv[i] = argv[j];
2274
2275	BUG_ON(i != rec_argc);
2276
2277	ret = cmd_record(i, rec_argv);
2278	free(rec_argv);
2279	return ret;
2280}
2281
2282static int parse_map_entry(const struct option *opt, const char *str,
2283			    int unset __maybe_unused)
2284{
2285	unsigned long *len = (unsigned long *)opt->value;
2286	unsigned long val;
2287	char *endptr;
2288
2289	errno = 0;
2290	val = strtoul(str, &endptr, 0);
2291	if (*endptr != '\0' || errno != 0) {
2292		pr_err("invalid BPF map length: %s\n", str);
2293		return -1;
2294	}
2295
2296	*len = val;
2297	return 0;
2298}
2299
2300static int parse_max_stack(const struct option *opt, const char *str,
2301			   int unset __maybe_unused)
2302{
2303	unsigned long *len = (unsigned long *)opt->value;
2304	long val;
2305	char *endptr;
2306
2307	errno = 0;
2308	val = strtol(str, &endptr, 0);
2309	if (*endptr != '\0' || errno != 0) {
2310		pr_err("invalid max stack depth: %s\n", str);
2311		return -1;
2312	}
2313
2314	if (val < 0 || val > sysctl__max_stack()) {
2315		pr_err("invalid max stack depth: %ld\n", val);
2316		return -1;
2317	}
2318
2319	*len = val;
2320	return 0;
2321}
2322
2323static bool add_lock_type(unsigned int flags)
2324{
2325	unsigned int *tmp;
2326
2327	tmp = realloc(filters.types, (filters.nr_types + 1) * sizeof(*filters.types));
2328	if (tmp == NULL)
2329		return false;
2330
2331	tmp[filters.nr_types++] = flags;
2332	filters.types = tmp;
2333	return true;
2334}
2335
2336static int parse_lock_type(const struct option *opt __maybe_unused, const char *str,
2337			   int unset __maybe_unused)
2338{
2339	char *s, *tmp, *tok;
2340
2341	s = strdup(str);
2342	if (s == NULL)
2343		return -1;
2344
2345	for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2346		bool found = false;
2347
2348		/* `tok` is `str` in `lock_type_table` if it contains ':'. */
2349		if (strchr(tok, ':')) {
2350			for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
2351				if (!strcmp(lock_type_table[i].str, tok) &&
2352				    add_lock_type(lock_type_table[i].flags)) {
2353					found = true;
2354					break;
2355				}
2356			}
2357
2358			if (!found) {
2359				pr_err("Unknown lock flags name: %s\n", tok);
2360				free(s);
2361				return -1;
2362			}
2363
2364			continue;
2365		}
2366
2367		/*
2368		 * Otherwise `tok` is `name` in `lock_type_table`.
2369		 * Single lock name could contain multiple flags.
2370		 */
2371		for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
2372			if (!strcmp(lock_type_table[i].name, tok)) {
2373				if (add_lock_type(lock_type_table[i].flags)) {
2374					found = true;
2375				} else {
2376					free(s);
2377					return -1;
2378				}
2379			}
2380		}
2381
2382		if (!found) {
2383			pr_err("Unknown lock name: %s\n", tok);
2384			free(s);
2385			return -1;
2386		}
2387
2388	}
2389
2390	free(s);
2391	return 0;
2392}
2393
2394static bool add_lock_addr(unsigned long addr)
2395{
2396	unsigned long *tmp;
2397
2398	tmp = realloc(filters.addrs, (filters.nr_addrs + 1) * sizeof(*filters.addrs));
2399	if (tmp == NULL) {
2400		pr_err("Memory allocation failure\n");
2401		return false;
2402	}
2403
2404	tmp[filters.nr_addrs++] = addr;
2405	filters.addrs = tmp;
2406	return true;
2407}
2408
2409static bool add_lock_sym(char *name)
2410{
2411	char **tmp;
2412	char *sym = strdup(name);
2413
2414	if (sym == NULL) {
2415		pr_err("Memory allocation failure\n");
2416		return false;
2417	}
2418
2419	tmp = realloc(filters.syms, (filters.nr_syms + 1) * sizeof(*filters.syms));
2420	if (tmp == NULL) {
2421		pr_err("Memory allocation failure\n");
2422		free(sym);
2423		return false;
2424	}
2425
2426	tmp[filters.nr_syms++] = sym;
2427	filters.syms = tmp;
2428	return true;
2429}
2430
2431static int parse_lock_addr(const struct option *opt __maybe_unused, const char *str,
2432			   int unset __maybe_unused)
2433{
2434	char *s, *tmp, *tok;
2435	int ret = 0;
2436	u64 addr;
2437
2438	s = strdup(str);
2439	if (s == NULL)
2440		return -1;
2441
2442	for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2443		char *end;
2444
2445		addr = strtoul(tok, &end, 16);
2446		if (*end == '\0') {
2447			if (!add_lock_addr(addr)) {
2448				ret = -1;
2449				break;
2450			}
2451			continue;
2452		}
2453
2454		/*
2455		 * At this moment, we don't have kernel symbols.  Save the symbols
2456		 * in a separate list and resolve them to addresses later.
2457		 */
2458		if (!add_lock_sym(tok)) {
2459			ret = -1;
2460			break;
2461		}
2462	}
2463
2464	free(s);
2465	return ret;
2466}
2467
2468static int parse_call_stack(const struct option *opt __maybe_unused, const char *str,
2469			   int unset __maybe_unused)
2470{
2471	char *s, *tmp, *tok;
2472	int ret = 0;
2473
2474	s = strdup(str);
2475	if (s == NULL)
2476		return -1;
2477
2478	for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2479		struct callstack_filter *entry;
2480
2481		entry = malloc(sizeof(*entry) + strlen(tok) + 1);
2482		if (entry == NULL) {
2483			pr_err("Memory allocation failure\n");
2484			free(s);
2485			return -1;
2486		}
2487
2488		strcpy(entry->name, tok);
2489		list_add_tail(&entry->list, &callstack_filters);
2490	}
2491
2492	free(s);
2493	return ret;
2494}
2495
2496static int parse_output(const struct option *opt __maybe_unused, const char *str,
2497			int unset __maybe_unused)
2498{
2499	const char **name = (const char **)opt->value;
2500
2501	if (str == NULL)
2502		return -1;
2503
2504	lock_output = fopen(str, "w");
2505	if (lock_output == NULL) {
2506		pr_err("Cannot open %s\n", str);
2507		return -1;
2508	}
2509
2510	*name = str;
2511	return 0;
2512}
2513
2514static bool add_lock_cgroup(char *name)
2515{
2516	u64 *tmp;
2517	struct cgroup *cgrp;
2518
2519	cgrp = cgroup__new(name, /*do_open=*/false);
2520	if (cgrp == NULL) {
2521		pr_err("Failed to create cgroup: %s\n", name);
2522		return false;
2523	}
2524
2525	if (read_cgroup_id(cgrp) < 0) {
2526		pr_err("Failed to read cgroup id for %s\n", name);
2527		cgroup__put(cgrp);
2528		return false;
2529	}
2530
2531	tmp = realloc(filters.cgrps, (filters.nr_cgrps + 1) * sizeof(*filters.cgrps));
2532	if (tmp == NULL) {
2533		pr_err("Memory allocation failure\n");
2534		return false;
2535	}
2536
2537	tmp[filters.nr_cgrps++] = cgrp->id;
2538	filters.cgrps = tmp;
2539	cgroup__put(cgrp);
2540	return true;
2541}
2542
2543static int parse_cgroup_filter(const struct option *opt __maybe_unused, const char *str,
2544			       int unset __maybe_unused)
2545{
2546	char *s, *tmp, *tok;
2547	int ret = 0;
2548
2549	s = strdup(str);
2550	if (s == NULL)
2551		return -1;
2552
2553	for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2554		if (!add_lock_cgroup(tok)) {
2555			ret = -1;
2556			break;
2557		}
2558	}
2559
2560	free(s);
2561	return ret;
2562}
2563
2564int cmd_lock(int argc, const char **argv)
2565{
2566	const struct option lock_options[] = {
2567	OPT_STRING('i', "input", &input_name, "file", "input file name"),
2568	OPT_CALLBACK(0, "output", &output_name, "file", "output file name", parse_output),
2569	OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
2570	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
2571	OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
2572	OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2573		   "file", "vmlinux pathname"),
2574	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
2575		   "file", "kallsyms pathname"),
2576	OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
2577	OPT_END()
2578	};
2579
2580	const struct option info_options[] = {
2581	OPT_BOOLEAN('t', "threads", &info_threads,
2582		    "dump the thread list in perf.data"),
2583	OPT_BOOLEAN('m', "map", &info_map,
2584		    "dump the map of lock instances (address:name table)"),
2585	OPT_PARENT(lock_options)
2586	};
2587
2588	const struct option report_options[] = {
2589	OPT_STRING('k', "key", &sort_key, "acquired",
2590		    "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
2591	OPT_STRING('F', "field", &output_fields, NULL,
2592		    "output fields (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
2593	/* TODO: type */
2594	OPT_BOOLEAN('c', "combine-locks", &combine_locks,
2595		    "combine locks in the same class"),
2596	OPT_BOOLEAN('t', "threads", &show_thread_stats,
2597		    "show per-thread lock stats"),
2598	OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
2599	OPT_PARENT(lock_options)
2600	};
2601
2602	struct option contention_options[] = {
2603	OPT_STRING('k', "key", &sort_key, "wait_total",
2604		    "key for sorting (contended / wait_total / wait_max / wait_min / avg_wait)"),
2605	OPT_STRING('F', "field", &output_fields, "contended,wait_total,wait_max,avg_wait",
2606		    "output fields (contended / wait_total / wait_max / wait_min / avg_wait)"),
2607	OPT_BOOLEAN('t', "threads", &show_thread_stats,
2608		    "show per-thread lock stats"),
2609	OPT_BOOLEAN('b', "use-bpf", &use_bpf, "use BPF program to collect lock contention stats"),
2610	OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
2611		    "System-wide collection from all CPUs"),
2612	OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
2613		    "List of cpus to monitor"),
2614	OPT_STRING('p', "pid", &target.pid, "pid",
2615		   "Trace on existing process id"),
2616	OPT_STRING(0, "tid", &target.tid, "tid",
2617		   "Trace on existing thread id (exclusive to --pid)"),
2618	OPT_CALLBACK('M', "map-nr-entries", &bpf_map_entries, "num",
2619		     "Max number of BPF map entries", parse_map_entry),
2620	OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num",
2621		     "Set the maximum stack depth when collecting lock contention, "
2622		     "Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack),
2623	OPT_INTEGER(0, "stack-skip", &stack_skip,
2624		    "Set the number of stack depth to skip when finding a lock caller, "
2625		    "Default: " __stringify(CONTENTION_STACK_SKIP)),
2626	OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
2627	OPT_BOOLEAN('l', "lock-addr", &show_lock_addrs, "show lock stats by address"),
2628	OPT_CALLBACK('Y', "type-filter", NULL, "FLAGS",
2629		     "Filter specific type of locks", parse_lock_type),
2630	OPT_CALLBACK('L', "lock-filter", NULL, "ADDRS/NAMES",
2631		     "Filter specific address/symbol of locks", parse_lock_addr),
2632	OPT_CALLBACK('S', "callstack-filter", NULL, "NAMES",
2633		     "Filter specific function in the callstack", parse_call_stack),
2634	OPT_BOOLEAN('o', "lock-owner", &show_lock_owner, "show lock owners instead of waiters"),
2635	OPT_STRING_NOEMPTY('x', "field-separator", &symbol_conf.field_sep, "separator",
2636		   "print result in CSV format with custom separator"),
2637	OPT_BOOLEAN(0, "lock-cgroup", &show_lock_cgroups, "show lock stats by cgroup"),
2638	OPT_CALLBACK('G', "cgroup-filter", NULL, "CGROUPS",
2639		     "Filter specific cgroups", parse_cgroup_filter),
2640	OPT_PARENT(lock_options)
2641	};
2642
2643	const char * const info_usage[] = {
2644		"perf lock info [<options>]",
2645		NULL
2646	};
2647	const char *const lock_subcommands[] = { "record", "report", "script",
2648						 "info", "contention", NULL };
2649	const char *lock_usage[] = {
2650		NULL,
2651		NULL
2652	};
2653	const char * const report_usage[] = {
2654		"perf lock report [<options>]",
2655		NULL
2656	};
2657	const char * const contention_usage[] = {
2658		"perf lock contention [<options>]",
2659		NULL
2660	};
2661	unsigned int i;
2662	int rc = 0;
2663
2664	lockhash_table = calloc(LOCKHASH_SIZE, sizeof(*lockhash_table));
2665	if (!lockhash_table)
2666		return -ENOMEM;
2667
 
2668	for (i = 0; i < LOCKHASH_SIZE; i++)
2669		INIT_HLIST_HEAD(lockhash_table + i);
2670
2671	lock_output = stderr;
2672	argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands,
2673					lock_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2674	if (!argc)
2675		usage_with_options(lock_usage, lock_options);
2676
2677	if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2678		return __cmd_record(argc, argv);
2679	} else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
2680		trace_handler = &report_lock_ops;
2681		if (argc) {
2682			argc = parse_options(argc, argv,
2683					     report_options, report_usage, 0);
2684			if (argc)
2685				usage_with_options(report_usage, report_options);
2686		}
2687		rc = __cmd_report(false);
2688	} else if (!strcmp(argv[0], "script")) {
2689		/* Aliased to 'perf script' */
2690		rc = cmd_script(argc, argv);
2691	} else if (!strcmp(argv[0], "info")) {
2692		if (argc) {
2693			argc = parse_options(argc, argv,
2694					     info_options, info_usage, 0);
2695			if (argc)
2696				usage_with_options(info_usage, info_options);
2697		}
2698
2699		/* If neither threads nor map requested, display both */
2700		if (!info_threads && !info_map) {
2701			info_threads = true;
2702			info_map = true;
2703		}
2704
2705		/* recycling report_lock_ops */
2706		trace_handler = &report_lock_ops;
2707		rc = __cmd_report(true);
2708	} else if (strlen(argv[0]) > 2 && strstarts("contention", argv[0])) {
2709		trace_handler = &contention_lock_ops;
2710		sort_key = "wait_total";
2711		output_fields = "contended,wait_total,wait_max,avg_wait";
2712
2713#ifndef HAVE_BPF_SKEL
2714		set_option_nobuild(contention_options, 'b', "use-bpf",
2715				   "no BUILD_BPF_SKEL=1", false);
2716#endif
2717		if (argc) {
2718			argc = parse_options(argc, argv, contention_options,
2719					     contention_usage, 0);
2720		}
2721
2722		if (check_lock_contention_options(contention_options,
2723						  contention_usage) < 0)
2724			return -1;
2725
2726		rc = __cmd_contention(argc, argv);
2727	} else {
2728		usage_with_options(lock_usage, lock_options);
2729	}
2730
2731	/* free usage string allocated by parse_options_subcommand */
2732	free((void *)lock_usage[0]);
2733
2734	zfree(&lockhash_table);
2735	return rc;
2736}