Linux Audio

Check our new training course

Loading...
v3.5.6
 
 
 
   1#include "builtin.h"
   2#include "perf.h"
   3
   4#include "util/util.h"
   5#include "util/cache.h"
   6#include "util/symbol.h"
   7#include "util/thread.h"
   8#include "util/header.h"
 
 
 
 
   9
  10#include "util/parse-options.h"
 
  11#include "util/trace-event.h"
 
  12
  13#include "util/debug.h"
  14#include "util/session.h"
  15#include "util/tool.h"
 
 
 
 
  16
  17#include <sys/types.h>
  18#include <sys/prctl.h>
  19#include <semaphore.h>
  20#include <pthread.h>
  21#include <math.h>
  22#include <limits.h>
 
  23
  24#include <linux/list.h>
  25#include <linux/hash.h>
 
 
 
 
  26
  27static struct perf_session *session;
 
  28
  29/* based on kernel/lockdep.c */
  30#define LOCKHASH_BITS		12
  31#define LOCKHASH_SIZE		(1UL << LOCKHASH_BITS)
  32
  33static struct list_head lockhash_table[LOCKHASH_SIZE];
  34
  35#define __lockhashfn(key)	hash_long((unsigned long)key, LOCKHASH_BITS)
  36#define lockhashentry(key)	(lockhash_table + __lockhashfn((key)))
  37
  38struct lock_stat {
  39	struct list_head	hash_entry;
  40	struct rb_node		rb;		/* used for sorting */
  41
  42	/*
  43	 * FIXME: raw_field_value() returns unsigned long long,
  44	 * so address of lockdep_map should be dealed as 64bit.
  45	 * Is there more better solution?
  46	 */
  47	void			*addr;		/* address of lockdep_map, used as ID */
  48	char			*name;		/* for strcpy(), we cannot use const */
  49
  50	unsigned int		nr_acquire;
  51	unsigned int		nr_acquired;
  52	unsigned int		nr_contended;
  53	unsigned int		nr_release;
  54
  55	unsigned int		nr_readlock;
  56	unsigned int		nr_trylock;
  57	/* these times are in nano sec. */
  58	u64			wait_time_total;
  59	u64			wait_time_min;
  60	u64			wait_time_max;
  61
  62	int			discard; /* flag of blacklist */
  63};
  64
  65/*
  66 * States of lock_seq_stat
  67 *
  68 * UNINITIALIZED is required for detecting first event of acquire.
  69 * As the nature of lock events, there is no guarantee
  70 * that the first event for the locks are acquire,
  71 * it can be acquired, contended or release.
  72 */
  73#define SEQ_STATE_UNINITIALIZED      0	       /* initial state */
  74#define SEQ_STATE_RELEASED	1
  75#define SEQ_STATE_ACQUIRING	2
  76#define SEQ_STATE_ACQUIRED	3
  77#define SEQ_STATE_READ_ACQUIRED	4
  78#define SEQ_STATE_CONTENDED	5
  79
  80/*
  81 * MAX_LOCK_DEPTH
  82 * Imported from include/linux/sched.h.
  83 * Should this be synchronized?
  84 */
  85#define MAX_LOCK_DEPTH 48
  86
  87/*
  88 * struct lock_seq_stat:
  89 * Place to put on state of one lock sequence
  90 * 1) acquire -> acquired -> release
  91 * 2) acquire -> contended -> acquired -> release
  92 * 3) acquire (with read or try) -> release
  93 * 4) Are there other patterns?
  94 */
  95struct lock_seq_stat {
  96	struct list_head        list;
  97	int			state;
  98	u64			prev_event_time;
  99	void                    *addr;
 100
 101	int                     read_count;
 102};
 
 
 
 
 
 
 103
 104struct thread_stat {
 105	struct rb_node		rb;
 106
 107	u32                     tid;
 108	struct list_head        seq_list;
 109};
 110
 111static struct rb_root		thread_stats;
 112
 113static struct thread_stat *thread_stat_find(u32 tid)
 114{
 115	struct rb_node *node;
 116	struct thread_stat *st;
 117
 118	node = thread_stats.rb_node;
 119	while (node) {
 120		st = container_of(node, struct thread_stat, rb);
 121		if (st->tid == tid)
 122			return st;
 123		else if (tid < st->tid)
 124			node = node->rb_left;
 125		else
 126			node = node->rb_right;
 127	}
 128
 129	return NULL;
 130}
 131
 132static void thread_stat_insert(struct thread_stat *new)
 133{
 134	struct rb_node **rb = &thread_stats.rb_node;
 135	struct rb_node *parent = NULL;
 136	struct thread_stat *p;
 137
 138	while (*rb) {
 139		p = container_of(*rb, struct thread_stat, rb);
 140		parent = *rb;
 141
 142		if (new->tid < p->tid)
 143			rb = &(*rb)->rb_left;
 144		else if (new->tid > p->tid)
 145			rb = &(*rb)->rb_right;
 146		else
 147			BUG_ON("inserting invalid thread_stat\n");
 148	}
 149
 150	rb_link_node(&new->rb, parent, rb);
 151	rb_insert_color(&new->rb, &thread_stats);
 152}
 153
 154static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
 155{
 156	struct thread_stat *st;
 157
 158	st = thread_stat_find(tid);
 159	if (st)
 160		return st;
 161
 162	st = zalloc(sizeof(struct thread_stat));
 163	if (!st)
 164		die("memory allocation failed\n");
 
 
 165
 166	st->tid = tid;
 167	INIT_LIST_HEAD(&st->seq_list);
 168
 169	thread_stat_insert(st);
 170
 171	return st;
 172}
 173
 174static struct thread_stat *thread_stat_findnew_first(u32 tid);
 175static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
 176	thread_stat_findnew_first;
 177
 178static struct thread_stat *thread_stat_findnew_first(u32 tid)
 179{
 180	struct thread_stat *st;
 181
 182	st = zalloc(sizeof(struct thread_stat));
 183	if (!st)
 184		die("memory allocation failed\n");
 
 
 185	st->tid = tid;
 186	INIT_LIST_HEAD(&st->seq_list);
 187
 188	rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
 189	rb_insert_color(&st->rb, &thread_stats);
 190
 191	thread_stat_findnew = thread_stat_findnew_after_first;
 192	return st;
 193}
 194
 195/* build simple key function one is bigger than two */
 196#define SINGLE_KEY(member)						\
 197	static int lock_stat_key_ ## member(struct lock_stat *one,	\
 198					 struct lock_stat *two)		\
 199	{								\
 200		return one->member > two->member;			\
 201	}
 202
 203SINGLE_KEY(nr_acquired)
 204SINGLE_KEY(nr_contended)
 
 205SINGLE_KEY(wait_time_total)
 206SINGLE_KEY(wait_time_max)
 207
 208static int lock_stat_key_wait_time_min(struct lock_stat *one,
 209					struct lock_stat *two)
 210{
 211	u64 s1 = one->wait_time_min;
 212	u64 s2 = two->wait_time_min;
 213	if (s1 == ULLONG_MAX)
 214		s1 = 0;
 215	if (s2 == ULLONG_MAX)
 216		s2 = 0;
 217	return s1 > s2;
 218}
 219
 220struct lock_key {
 221	/*
 222	 * name: the value for specify by user
 223	 * this should be simpler than raw name of member
 224	 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
 225	 */
 226	const char		*name;
 
 
 
 
 
 227	int			(*key)(struct lock_stat*, struct lock_stat*);
 228};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 229
 230static const char		*sort_key = "acquired";
 231
 232static int			(*compare)(struct lock_stat *, struct lock_stat *);
 233
 
 234static struct rb_root		result;	/* place to store sorted data */
 235
 236#define DEF_KEY_LOCK(name, fn_suffix)	\
 237	{ #name, lock_stat_key_ ## fn_suffix }
 238struct lock_key keys[] = {
 239	DEF_KEY_LOCK(acquired, nr_acquired),
 240	DEF_KEY_LOCK(contended, nr_contended),
 241	DEF_KEY_LOCK(wait_total, wait_time_total),
 242	DEF_KEY_LOCK(wait_min, wait_time_min),
 243	DEF_KEY_LOCK(wait_max, wait_time_max),
 
 
 
 
 244
 245	/* extra comparisons much complicated should be here */
 
 
 
 
 
 
 
 
 
 246
 247	{ NULL, NULL }
 
 248};
 249
 250static void select_key(void)
 251{
 252	int i;
 
 
 
 
 253
 254	for (i = 0; keys[i].name; i++) {
 255		if (!strcmp(keys[i].name, sort_key)) {
 256			compare = keys[i].key;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 257			return;
 258		}
 
 
 
 
 
 259	}
 260
 261	die("Unknown compare key:%s\n", sort_key);
 
 262}
 263
 264static void insert_to_result(struct lock_stat *st,
 265			     int (*bigger)(struct lock_stat *, struct lock_stat *))
 266{
 267	struct rb_node **rb = &result.rb_node;
 268	struct rb_node *parent = NULL;
 269	struct lock_stat *p;
 270
 
 
 
 271	while (*rb) {
 272		p = container_of(*rb, struct lock_stat, rb);
 273		parent = *rb;
 274
 275		if (bigger(st, p))
 276			rb = &(*rb)->rb_left;
 277		else
 278			rb = &(*rb)->rb_right;
 279	}
 280
 281	rb_link_node(&st->rb, parent, rb);
 282	rb_insert_color(&st->rb, &result);
 283}
 284
 285/* returns left most element of result, and erase it */
 286static struct lock_stat *pop_from_result(void)
 287{
 288	struct rb_node *node = result.rb_node;
 289
 290	if (!node)
 291		return NULL;
 292
 293	while (node->rb_left)
 294		node = node->rb_left;
 295
 296	rb_erase(node, &result);
 297	return container_of(node, struct lock_stat, rb);
 298}
 299
 300static struct lock_stat *lock_stat_findnew(void *addr, const char *name)
 
 
 
 
 
 
 
 
 
 
 
 
 301{
 302	struct list_head *entry = lockhashentry(addr);
 303	struct lock_stat *ret, *new;
 304
 305	list_for_each_entry(ret, entry, hash_entry) {
 306		if (ret->addr == addr)
 307			return ret;
 308	}
 309
 310	new = zalloc(sizeof(struct lock_stat));
 311	if (!new)
 312		goto alloc_failed;
 313
 314	new->addr = addr;
 315	new->name = zalloc(sizeof(char) * strlen(name) + 1);
 316	if (!new->name)
 
 317		goto alloc_failed;
 318	strcpy(new->name, name);
 319
 
 320	new->wait_time_min = ULLONG_MAX;
 321
 322	list_add(&new->hash_entry, entry);
 323	return new;
 324
 325alloc_failed:
 326	die("memory allocation failed\n");
 
 327}
 328
 329static const char *input_name;
 
 
 
 330
 331struct raw_event_sample {
 332	u32			size;
 333	char			data[0];
 334};
 335
 336struct trace_acquire_event {
 337	void			*addr;
 338	const char		*name;
 339	int			flag;
 340};
 341
 342struct trace_acquired_event {
 343	void			*addr;
 344	const char		*name;
 345};
 346
 347struct trace_contended_event {
 348	void			*addr;
 349	const char		*name;
 350};
 351
 352struct trace_release_event {
 353	void			*addr;
 354	const char		*name;
 355};
 356
 357struct trace_lock_handler {
 358	void (*acquire_event)(struct trace_acquire_event *,
 359			      struct event_format *,
 360			      int cpu,
 361			      u64 timestamp,
 362			      struct thread *thread);
 363
 364	void (*acquired_event)(struct trace_acquired_event *,
 365			       struct event_format *,
 366			       int cpu,
 367			       u64 timestamp,
 368			       struct thread *thread);
 369
 370	void (*contended_event)(struct trace_contended_event *,
 371				struct event_format *,
 372				int cpu,
 373				u64 timestamp,
 374				struct thread *thread);
 375
 376	void (*release_event)(struct trace_release_event *,
 377			      struct event_format *,
 378			      int cpu,
 379			      u64 timestamp,
 380			      struct thread *thread);
 381};
 382
 383static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr)
 384{
 385	struct lock_seq_stat *seq;
 386
 387	list_for_each_entry(seq, &ts->seq_list, list) {
 388		if (seq->addr == addr)
 389			return seq;
 390	}
 391
 392	seq = zalloc(sizeof(struct lock_seq_stat));
 393	if (!seq)
 394		die("Not enough memory\n");
 
 
 395	seq->state = SEQ_STATE_UNINITIALIZED;
 396	seq->addr = addr;
 397
 398	list_add(&seq->list, &ts->seq_list);
 399	return seq;
 400}
 401
 402enum broken_state {
 403	BROKEN_ACQUIRE,
 404	BROKEN_ACQUIRED,
 405	BROKEN_CONTENDED,
 406	BROKEN_RELEASE,
 407	BROKEN_MAX,
 408};
 409
 410static int bad_hist[BROKEN_MAX];
 411
 412enum acquire_flags {
 413	TRY_LOCK = 1,
 414	READ_LOCK = 2,
 415};
 416
 417static void
 418report_lock_acquire_event(struct trace_acquire_event *acquire_event,
 419			struct event_format *__event __used,
 420			int cpu __used,
 421			u64 timestamp __used,
 422			struct thread *thread __used)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 423{
 424	struct lock_stat *ls;
 425	struct thread_stat *ts;
 426	struct lock_seq_stat *seq;
 
 
 
 
 
 
 
 
 
 427
 428	ls = lock_stat_findnew(acquire_event->addr, acquire_event->name);
 429	if (ls->discard)
 430		return;
 431
 432	ts = thread_stat_findnew(thread->pid);
 433	seq = get_seq(ts, acquire_event->addr);
 
 
 
 
 
 434
 435	switch (seq->state) {
 436	case SEQ_STATE_UNINITIALIZED:
 437	case SEQ_STATE_RELEASED:
 438		if (!acquire_event->flag) {
 439			seq->state = SEQ_STATE_ACQUIRING;
 440		} else {
 441			if (acquire_event->flag & TRY_LOCK)
 442				ls->nr_trylock++;
 443			if (acquire_event->flag & READ_LOCK)
 444				ls->nr_readlock++;
 445			seq->state = SEQ_STATE_READ_ACQUIRED;
 446			seq->read_count = 1;
 447			ls->nr_acquired++;
 448		}
 449		break;
 450	case SEQ_STATE_READ_ACQUIRED:
 451		if (acquire_event->flag & READ_LOCK) {
 452			seq->read_count++;
 453			ls->nr_acquired++;
 454			goto end;
 455		} else {
 456			goto broken;
 457		}
 458		break;
 459	case SEQ_STATE_ACQUIRED:
 460	case SEQ_STATE_ACQUIRING:
 461	case SEQ_STATE_CONTENDED:
 462broken:
 463		/* broken lock sequence, discard it */
 464		ls->discard = 1;
 465		bad_hist[BROKEN_ACQUIRE]++;
 466		list_del(&seq->list);
 
 
 467		free(seq);
 468		goto end;
 469		break;
 470	default:
 471		BUG_ON("Unknown state of lock sequence found!\n");
 472		break;
 473	}
 474
 475	ls->nr_acquire++;
 476	seq->prev_event_time = timestamp;
 477end:
 478	return;
 479}
 480
 481static void
 482report_lock_acquired_event(struct trace_acquired_event *acquired_event,
 483			 struct event_format *__event __used,
 484			 int cpu __used,
 485			 u64 timestamp __used,
 486			 struct thread *thread __used)
 487{
 488	struct lock_stat *ls;
 489	struct thread_stat *ts;
 490	struct lock_seq_stat *seq;
 491	u64 contended_term;
 
 
 
 
 
 
 
 
 492
 493	ls = lock_stat_findnew(acquired_event->addr, acquired_event->name);
 494	if (ls->discard)
 495		return;
 496
 497	ts = thread_stat_findnew(thread->pid);
 498	seq = get_seq(ts, acquired_event->addr);
 
 
 
 
 
 499
 500	switch (seq->state) {
 501	case SEQ_STATE_UNINITIALIZED:
 502		/* orphan event, do nothing */
 503		return;
 504	case SEQ_STATE_ACQUIRING:
 505		break;
 506	case SEQ_STATE_CONTENDED:
 507		contended_term = timestamp - seq->prev_event_time;
 508		ls->wait_time_total += contended_term;
 509		if (contended_term < ls->wait_time_min)
 510			ls->wait_time_min = contended_term;
 511		if (ls->wait_time_max < contended_term)
 512			ls->wait_time_max = contended_term;
 513		break;
 514	case SEQ_STATE_RELEASED:
 515	case SEQ_STATE_ACQUIRED:
 516	case SEQ_STATE_READ_ACQUIRED:
 517		/* broken lock sequence, discard it */
 518		ls->discard = 1;
 519		bad_hist[BROKEN_ACQUIRED]++;
 520		list_del(&seq->list);
 
 
 521		free(seq);
 522		goto end;
 523		break;
 524
 525	default:
 526		BUG_ON("Unknown state of lock sequence found!\n");
 527		break;
 528	}
 529
 530	seq->state = SEQ_STATE_ACQUIRED;
 531	ls->nr_acquired++;
 532	seq->prev_event_time = timestamp;
 
 533end:
 534	return;
 535}
 536
 537static void
 538report_lock_contended_event(struct trace_contended_event *contended_event,
 539			  struct event_format *__event __used,
 540			  int cpu __used,
 541			  u64 timestamp __used,
 542			  struct thread *thread __used)
 543{
 544	struct lock_stat *ls;
 545	struct thread_stat *ts;
 546	struct lock_seq_stat *seq;
 
 
 
 
 
 
 
 
 547
 548	ls = lock_stat_findnew(contended_event->addr, contended_event->name);
 549	if (ls->discard)
 550		return;
 551
 552	ts = thread_stat_findnew(thread->pid);
 553	seq = get_seq(ts, contended_event->addr);
 
 
 
 
 
 554
 555	switch (seq->state) {
 556	case SEQ_STATE_UNINITIALIZED:
 557		/* orphan event, do nothing */
 558		return;
 559	case SEQ_STATE_ACQUIRING:
 560		break;
 561	case SEQ_STATE_RELEASED:
 562	case SEQ_STATE_ACQUIRED:
 563	case SEQ_STATE_READ_ACQUIRED:
 564	case SEQ_STATE_CONTENDED:
 565		/* broken lock sequence, discard it */
 566		ls->discard = 1;
 567		bad_hist[BROKEN_CONTENDED]++;
 568		list_del(&seq->list);
 
 
 569		free(seq);
 570		goto end;
 571		break;
 572	default:
 573		BUG_ON("Unknown state of lock sequence found!\n");
 574		break;
 575	}
 576
 577	seq->state = SEQ_STATE_CONTENDED;
 578	ls->nr_contended++;
 579	seq->prev_event_time = timestamp;
 
 580end:
 581	return;
 582}
 583
 584static void
 585report_lock_release_event(struct trace_release_event *release_event,
 586			struct event_format *__event __used,
 587			int cpu __used,
 588			u64 timestamp __used,
 589			struct thread *thread __used)
 590{
 591	struct lock_stat *ls;
 592	struct thread_stat *ts;
 593	struct lock_seq_stat *seq;
 
 
 
 
 
 
 
 
 594
 595	ls = lock_stat_findnew(release_event->addr, release_event->name);
 596	if (ls->discard)
 597		return;
 598
 599	ts = thread_stat_findnew(thread->pid);
 600	seq = get_seq(ts, release_event->addr);
 
 
 
 
 
 601
 602	switch (seq->state) {
 603	case SEQ_STATE_UNINITIALIZED:
 604		goto end;
 605		break;
 606	case SEQ_STATE_ACQUIRED:
 607		break;
 608	case SEQ_STATE_READ_ACQUIRED:
 609		seq->read_count--;
 610		BUG_ON(seq->read_count < 0);
 611		if (!seq->read_count) {
 612			ls->nr_release++;
 613			goto end;
 614		}
 615		break;
 616	case SEQ_STATE_ACQUIRING:
 617	case SEQ_STATE_CONTENDED:
 618	case SEQ_STATE_RELEASED:
 619		/* broken lock sequence, discard it */
 620		ls->discard = 1;
 621		bad_hist[BROKEN_RELEASE]++;
 
 
 622		goto free_seq;
 623		break;
 624	default:
 625		BUG_ON("Unknown state of lock sequence found!\n");
 626		break;
 627	}
 628
 629	ls->nr_release++;
 630free_seq:
 631	list_del(&seq->list);
 632	free(seq);
 633end:
 634	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 635}
 636
 637/* lock oriented handlers */
 638/* TODO: handlers for CPU oriented, thread oriented */
 639static struct trace_lock_handler report_lock_ops  = {
 640	.acquire_event		= report_lock_acquire_event,
 641	.acquired_event		= report_lock_acquired_event,
 642	.contended_event	= report_lock_contended_event,
 643	.release_event		= report_lock_release_event,
 
 
 644};
 645
 646static struct trace_lock_handler *trace_handler;
 647
 648static void
 649process_lock_acquire_event(void *data,
 650			   struct event_format *event __used,
 651			   int cpu __used,
 652			   u64 timestamp __used,
 653			   struct thread *thread __used)
 654{
 655	struct trace_acquire_event acquire_event;
 656	u64 tmp;		/* this is required for casting... */
 657
 658	tmp = raw_field_value(event, "lockdep_addr", data);
 659	memcpy(&acquire_event.addr, &tmp, sizeof(void *));
 660	acquire_event.name = (char *)raw_field_ptr(event, "name", data);
 661	acquire_event.flag = (int)raw_field_value(event, "flag", data);
 662
 663	if (trace_handler->acquire_event)
 664		trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread);
 665}
 666
 667static void
 668process_lock_acquired_event(void *data,
 669			    struct event_format *event __used,
 670			    int cpu __used,
 671			    u64 timestamp __used,
 672			    struct thread *thread __used)
 673{
 674	struct trace_acquired_event acquired_event;
 675	u64 tmp;		/* this is required for casting... */
 676
 677	tmp = raw_field_value(event, "lockdep_addr", data);
 678	memcpy(&acquired_event.addr, &tmp, sizeof(void *));
 679	acquired_event.name = (char *)raw_field_ptr(event, "name", data);
 680
 
 
 681	if (trace_handler->acquire_event)
 682		trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread);
 
 683}
 684
 685static void
 686process_lock_contended_event(void *data,
 687			     struct event_format *event __used,
 688			     int cpu __used,
 689			     u64 timestamp __used,
 690			     struct thread *thread __used)
 691{
 692	struct trace_contended_event contended_event;
 693	u64 tmp;		/* this is required for casting... */
 694
 695	tmp = raw_field_value(event, "lockdep_addr", data);
 696	memcpy(&contended_event.addr, &tmp, sizeof(void *));
 697	contended_event.name = (char *)raw_field_ptr(event, "name", data);
 698
 699	if (trace_handler->acquire_event)
 700		trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread);
 701}
 702
 703static void
 704process_lock_release_event(void *data,
 705			   struct event_format *event __used,
 706			   int cpu __used,
 707			   u64 timestamp __used,
 708			   struct thread *thread __used)
 709{
 710	struct trace_release_event release_event;
 711	u64 tmp;		/* this is required for casting... */
 712
 713	tmp = raw_field_value(event, "lockdep_addr", data);
 714	memcpy(&release_event.addr, &tmp, sizeof(void *));
 715	release_event.name = (char *)raw_field_ptr(event, "name", data);
 716
 717	if (trace_handler->acquire_event)
 718		trace_handler->release_event(&release_event, event, cpu, timestamp, thread);
 
 
 
 719}
 720
 721static void
 722process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread)
 723{
 724	struct event_format *event;
 725	int type;
 
 
 726
 727	type = trace_parse_common_type(data);
 728	event = trace_find_event(type);
 729
 730	if (!strcmp(event->name, "lock_acquire"))
 731		process_lock_acquire_event(data, event, cpu, timestamp, thread);
 732	if (!strcmp(event->name, "lock_acquired"))
 733		process_lock_acquired_event(data, event, cpu, timestamp, thread);
 734	if (!strcmp(event->name, "lock_contended"))
 735		process_lock_contended_event(data, event, cpu, timestamp, thread);
 736	if (!strcmp(event->name, "lock_release"))
 737		process_lock_release_event(data, event, cpu, timestamp, thread);
 738}
 739
 740static void print_bad_events(int bad, int total)
 741{
 742	/* Output for debug, this have to be removed */
 743	int i;
 
 744	const char *name[4] =
 745		{ "acquire", "acquired", "contended", "release" };
 746
 
 
 
 
 
 
 747	pr_info("\n=== output for debug===\n\n");
 748	pr_info("bad: %d, total: %d\n", bad, total);
 749	pr_info("bad rate: %f %%\n", (double)bad / (double)total * 100);
 750	pr_info("histogram of events caused bad sequence\n");
 751	for (i = 0; i < BROKEN_MAX; i++)
 752		pr_info(" %10s: %d\n", name[i], bad_hist[i]);
 753}
 754
 755/* TODO: various way to print, coloring, nano or milli sec */
 756static void print_result(void)
 757{
 758	struct lock_stat *st;
 
 759	char cut_name[20];
 760	int bad, total;
 761
 762	pr_info("%20s ", "Name");
 763	pr_info("%10s ", "acquired");
 764	pr_info("%10s ", "contended");
 765
 766	pr_info("%15s ", "total wait (ns)");
 767	pr_info("%15s ", "max wait (ns)");
 768	pr_info("%15s ", "min wait (ns)");
 769
 770	pr_info("\n\n");
 
 771
 772	bad = total = 0;
 773	while ((st = pop_from_result())) {
 774		total++;
 775		if (st->discard) {
 776			bad++;
 
 777			continue;
 778		}
 779		bzero(cut_name, 20);
 780
 781		if (strlen(st->name) < 16) {
 782			/* output raw name */
 783			pr_info("%20s ", st->name);
 
 
 
 
 
 
 
 
 
 
 784		} else {
 785			strncpy(cut_name, st->name, 16);
 786			cut_name[16] = '.';
 787			cut_name[17] = '.';
 788			cut_name[18] = '.';
 789			cut_name[19] = '\0';
 790			/* cut off name for saving output style */
 791			pr_info("%20s ", cut_name);
 792		}
 793
 794		pr_info("%10u ", st->nr_acquired);
 795		pr_info("%10u ", st->nr_contended);
 796
 797		pr_info("%15" PRIu64 " ", st->wait_time_total);
 798		pr_info("%15" PRIu64 " ", st->wait_time_max);
 799		pr_info("%15" PRIu64 " ", st->wait_time_min == ULLONG_MAX ?
 800		       0 : st->wait_time_min);
 801		pr_info("\n");
 
 
 
 802	}
 803
 804	print_bad_events(bad, total);
 805}
 806
 807static bool info_threads, info_map;
 808
 809static void dump_threads(void)
 810{
 811	struct thread_stat *st;
 812	struct rb_node *node;
 813	struct thread *t;
 814
 815	pr_info("%10s: comm\n", "Thread ID");
 816
 817	node = rb_first(&thread_stats);
 818	while (node) {
 819		st = container_of(node, struct thread_stat, rb);
 820		t = perf_session__findnew(session, st->tid);
 821		pr_info("%10d: %s\n", st->tid, t->comm);
 822		node = rb_next(node);
 823	};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 824}
 825
 826static void dump_map(void)
 827{
 828	unsigned int i;
 829	struct lock_stat *st;
 830
 831	pr_info("Address of instance: name of class\n");
 832	for (i = 0; i < LOCKHASH_SIZE; i++) {
 833		list_for_each_entry(st, &lockhash_table[i], hash_entry) {
 834			pr_info(" %p: %s\n", st->addr, st->name);
 835		}
 836	}
 
 
 
 837}
 838
 839static void dump_info(void)
 840{
 
 
 841	if (info_threads)
 842		dump_threads();
 843	else if (info_map)
 844		dump_map();
 845	else
 846		die("Unknown type of information\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 847}
 848
 849static int process_sample_event(struct perf_tool *tool __used,
 
 
 
 850				union perf_event *event,
 851				struct perf_sample *sample,
 852				struct perf_evsel *evsel __used,
 853				struct machine *machine)
 854{
 855	struct thread *thread = machine__findnew_thread(machine, sample->tid);
 
 
 856
 857	if (thread == NULL) {
 858		pr_debug("problem processing %d event, skipping it.\n",
 859			event->header.type);
 860		return -1;
 861	}
 862
 863	process_raw_event(sample->raw_data, sample->cpu, sample->time, thread);
 
 
 
 864
 865	return 0;
 866}
 867
 868static struct perf_tool eops = {
 869	.sample			= process_sample_event,
 870	.comm			= perf_event__process_comm,
 871	.ordered_samples	= true,
 872};
 873
 874static int read_events(void)
 875{
 876	session = perf_session__new(input_name, O_RDONLY, 0, false, &eops);
 877	if (!session)
 878		die("Initializing perf session failed\n");
 879
 880	return perf_session__process_events(session, &eops);
 
 
 
 
 
 
 
 881}
 882
 883static void sort_result(void)
 884{
 885	unsigned int i;
 886	struct lock_stat *st;
 887
 888	for (i = 0; i < LOCKHASH_SIZE; i++) {
 889		list_for_each_entry(st, &lockhash_table[i], hash_entry) {
 890			insert_to_result(st, compare);
 891		}
 892	}
 893}
 894
 895static void __cmd_report(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 896{
 897	setup_pager();
 898	select_key();
 899	read_events();
 900	sort_result();
 901	print_result();
 902}
 903
 904static const char * const report_usage[] = {
 905	"perf lock report [<options>]",
 906	NULL
 907};
 
 908
 909static const struct option report_options[] = {
 910	OPT_STRING('k', "key", &sort_key, "acquired",
 911		    "key for sorting (acquired / contended / wait_total / wait_max / wait_min)"),
 912	/* TODO: type */
 913	OPT_END()
 914};
 
 
 
 
 
 
 
 
 
 
 
 
 915
 916static const char * const info_usage[] = {
 917	"perf lock info [<options>]",
 918	NULL
 919};
 920
 921static const struct option info_options[] = {
 922	OPT_BOOLEAN('t', "threads", &info_threads,
 923		    "dump thread list in perf.data"),
 924	OPT_BOOLEAN('m', "map", &info_map,
 925		    "map of lock instances (address:name table)"),
 926	OPT_END()
 927};
 928
 929static const char * const lock_usage[] = {
 930	"perf lock [<options>] {record|report|script|info}",
 931	NULL
 932};
 933
 934static const struct option lock_options[] = {
 935	OPT_STRING('i', "input", &input_name, "file", "input file name"),
 936	OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
 937	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
 938	OPT_END()
 939};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 940
 941static const char *record_args[] = {
 942	"record",
 943	"-R",
 944	"-f",
 945	"-m", "1024",
 946	"-c", "1",
 947	"-e", "lock:lock_acquire",
 948	"-e", "lock:lock_acquired",
 949	"-e", "lock:lock_contended",
 950	"-e", "lock:lock_release",
 951};
 952
 953static int __cmd_record(int argc, const char **argv)
 954{
 955	unsigned int rec_argc, i, j;
 
 
 
 
 
 
 
 
 956	const char **rec_argv;
 
 957
 958	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
 959	rec_argv = calloc(rec_argc + 1, sizeof(char *));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 960
 961	if (rec_argv == NULL)
 
 
 
 
 
 
 
 
 
 962		return -ENOMEM;
 963
 964	for (i = 0; i < ARRAY_SIZE(record_args); i++)
 965		rec_argv[i] = strdup(record_args[i]);
 966
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 967	for (j = 1; j < (unsigned int)argc; j++, i++)
 968		rec_argv[i] = argv[j];
 969
 970	BUG_ON(i != rec_argc);
 971
 972	return cmd_record(i, rec_argv, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 973}
 974
 975int cmd_lock(int argc, const char **argv, const char *prefix __used)
 976{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 977	unsigned int i;
 
 978
 979	symbol__init();
 980	for (i = 0; i < LOCKHASH_SIZE; i++)
 981		INIT_LIST_HEAD(lockhash_table + i);
 982
 983	argc = parse_options(argc, argv, lock_options, lock_usage,
 984			     PARSE_OPT_STOP_AT_NON_OPTION);
 985	if (!argc)
 986		usage_with_options(lock_usage, lock_options);
 987
 988	if (!strncmp(argv[0], "rec", 3)) {
 989		return __cmd_record(argc, argv);
 990	} else if (!strncmp(argv[0], "report", 6)) {
 991		trace_handler = &report_lock_ops;
 992		if (argc) {
 993			argc = parse_options(argc, argv,
 994					     report_options, report_usage, 0);
 995			if (argc)
 996				usage_with_options(report_usage, report_options);
 997		}
 998		__cmd_report();
 999	} else if (!strcmp(argv[0], "script")) {
1000		/* Aliased to 'perf script' */
1001		return cmd_script(argc, argv, prefix);
1002	} else if (!strcmp(argv[0], "info")) {
1003		if (argc) {
1004			argc = parse_options(argc, argv,
1005					     info_options, info_usage, 0);
1006			if (argc)
1007				usage_with_options(info_usage, info_options);
1008		}
1009		/* recycling report_lock_ops */
1010		trace_handler = &report_lock_ops;
1011		setup_pager();
1012		read_events();
1013		dump_info();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1014	} else {
1015		usage_with_options(lock_usage, lock_options);
1016	}
1017
1018	return 0;
1019}
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#include <errno.h>
   3#include <inttypes.h>
   4#include "builtin.h"
   5#include "perf.h"
   6
   7#include "util/evlist.h" // for struct evsel_str_handler
   8#include "util/evsel.h"
   9#include "util/symbol.h"
  10#include "util/thread.h"
  11#include "util/header.h"
  12#include "util/target.h"
  13#include "util/callchain.h"
  14#include "util/lock-contention.h"
  15#include "util/bpf_skel/lock_data.h"
  16
  17#include <subcmd/pager.h>
  18#include <subcmd/parse-options.h>
  19#include "util/trace-event.h"
  20#include "util/tracepoint.h"
  21
  22#include "util/debug.h"
  23#include "util/session.h"
  24#include "util/tool.h"
  25#include "util/data.h"
  26#include "util/string2.h"
  27#include "util/map.h"
  28#include "util/util.h"
  29
  30#include <sys/types.h>
  31#include <sys/prctl.h>
  32#include <semaphore.h>
 
  33#include <math.h>
  34#include <limits.h>
  35#include <ctype.h>
  36
  37#include <linux/list.h>
  38#include <linux/hash.h>
  39#include <linux/kernel.h>
  40#include <linux/zalloc.h>
  41#include <linux/err.h>
  42#include <linux/stringify.h>
  43
  44static struct perf_session *session;
  45static struct target target;
  46
  47/* based on kernel/lockdep.c */
  48#define LOCKHASH_BITS		12
  49#define LOCKHASH_SIZE		(1UL << LOCKHASH_BITS)
  50
  51static struct hlist_head lockhash_table[LOCKHASH_SIZE];
  52
  53#define __lockhashfn(key)	hash_long((unsigned long)key, LOCKHASH_BITS)
  54#define lockhashentry(key)	(lockhash_table + __lockhashfn((key)))
  55
  56static struct rb_root		thread_stats;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  57
  58static bool combine_locks;
  59static bool show_thread_stats;
  60static bool show_lock_addrs;
  61static bool use_bpf;
  62static unsigned long bpf_map_entries = 10240;
  63static int max_stack_depth = CONTENTION_STACK_DEPTH;
  64static int stack_skip = CONTENTION_STACK_SKIP;
  65static int print_nr_entries = INT_MAX / 2;
  66
  67static struct lock_filter filters;
 
  68
  69static enum lock_aggr_mode aggr_mode = LOCK_AGGR_ADDR;
 
 
 
 
  70
  71static struct thread_stat *thread_stat_find(u32 tid)
  72{
  73	struct rb_node *node;
  74	struct thread_stat *st;
  75
  76	node = thread_stats.rb_node;
  77	while (node) {
  78		st = container_of(node, struct thread_stat, rb);
  79		if (st->tid == tid)
  80			return st;
  81		else if (tid < st->tid)
  82			node = node->rb_left;
  83		else
  84			node = node->rb_right;
  85	}
  86
  87	return NULL;
  88}
  89
  90static void thread_stat_insert(struct thread_stat *new)
  91{
  92	struct rb_node **rb = &thread_stats.rb_node;
  93	struct rb_node *parent = NULL;
  94	struct thread_stat *p;
  95
  96	while (*rb) {
  97		p = container_of(*rb, struct thread_stat, rb);
  98		parent = *rb;
  99
 100		if (new->tid < p->tid)
 101			rb = &(*rb)->rb_left;
 102		else if (new->tid > p->tid)
 103			rb = &(*rb)->rb_right;
 104		else
 105			BUG_ON("inserting invalid thread_stat\n");
 106	}
 107
 108	rb_link_node(&new->rb, parent, rb);
 109	rb_insert_color(&new->rb, &thread_stats);
 110}
 111
 112static struct thread_stat *thread_stat_findnew_after_first(u32 tid)
 113{
 114	struct thread_stat *st;
 115
 116	st = thread_stat_find(tid);
 117	if (st)
 118		return st;
 119
 120	st = zalloc(sizeof(struct thread_stat));
 121	if (!st) {
 122		pr_err("memory allocation failed\n");
 123		return NULL;
 124	}
 125
 126	st->tid = tid;
 127	INIT_LIST_HEAD(&st->seq_list);
 128
 129	thread_stat_insert(st);
 130
 131	return st;
 132}
 133
 134static struct thread_stat *thread_stat_findnew_first(u32 tid);
 135static struct thread_stat *(*thread_stat_findnew)(u32 tid) =
 136	thread_stat_findnew_first;
 137
 138static struct thread_stat *thread_stat_findnew_first(u32 tid)
 139{
 140	struct thread_stat *st;
 141
 142	st = zalloc(sizeof(struct thread_stat));
 143	if (!st) {
 144		pr_err("memory allocation failed\n");
 145		return NULL;
 146	}
 147	st->tid = tid;
 148	INIT_LIST_HEAD(&st->seq_list);
 149
 150	rb_link_node(&st->rb, NULL, &thread_stats.rb_node);
 151	rb_insert_color(&st->rb, &thread_stats);
 152
 153	thread_stat_findnew = thread_stat_findnew_after_first;
 154	return st;
 155}
 156
 157/* build simple key function one is bigger than two */
 158#define SINGLE_KEY(member)						\
 159	static int lock_stat_key_ ## member(struct lock_stat *one,	\
 160					 struct lock_stat *two)		\
 161	{								\
 162		return one->member > two->member;			\
 163	}
 164
 165SINGLE_KEY(nr_acquired)
 166SINGLE_KEY(nr_contended)
 167SINGLE_KEY(avg_wait_time)
 168SINGLE_KEY(wait_time_total)
 169SINGLE_KEY(wait_time_max)
 170
 171static int lock_stat_key_wait_time_min(struct lock_stat *one,
 172					struct lock_stat *two)
 173{
 174	u64 s1 = one->wait_time_min;
 175	u64 s2 = two->wait_time_min;
 176	if (s1 == ULLONG_MAX)
 177		s1 = 0;
 178	if (s2 == ULLONG_MAX)
 179		s2 = 0;
 180	return s1 > s2;
 181}
 182
 183struct lock_key {
 184	/*
 185	 * name: the value for specify by user
 186	 * this should be simpler than raw name of member
 187	 * e.g. nr_acquired -> acquired, wait_time_total -> wait_total
 188	 */
 189	const char		*name;
 190	/* header: the string printed on the header line */
 191	const char		*header;
 192	/* len: the printing width of the field */
 193	int			len;
 194	/* key: a pointer to function to compare two lock stats for sorting */
 195	int			(*key)(struct lock_stat*, struct lock_stat*);
 196	/* print: a pointer to function to print a given lock stats */
 197	void			(*print)(struct lock_key*, struct lock_stat*);
 198	/* list: list entry to link this */
 199	struct list_head	list;
 200};
 201
 202static void lock_stat_key_print_time(unsigned long long nsec, int len)
 203{
 204	static const struct {
 205		float base;
 206		const char *unit;
 207	} table[] = {
 208		{ 1e9 * 3600, "h " },
 209		{ 1e9 * 60, "m " },
 210		{ 1e9, "s " },
 211		{ 1e6, "ms" },
 212		{ 1e3, "us" },
 213		{ 0, NULL },
 214	};
 215
 216	for (int i = 0; table[i].unit; i++) {
 217		if (nsec < table[i].base)
 218			continue;
 219
 220		pr_info("%*.2f %s", len - 3, nsec / table[i].base, table[i].unit);
 221		return;
 222	}
 223
 224	pr_info("%*llu %s", len - 3, nsec, "ns");
 225}
 226
 227#define PRINT_KEY(member)						\
 228static void lock_stat_key_print_ ## member(struct lock_key *key,	\
 229					   struct lock_stat *ls)	\
 230{									\
 231	pr_info("%*llu", key->len, (unsigned long long)ls->member);	\
 232}
 233
 234#define PRINT_TIME(member)						\
 235static void lock_stat_key_print_ ## member(struct lock_key *key,	\
 236					   struct lock_stat *ls)	\
 237{									\
 238	lock_stat_key_print_time((unsigned long long)ls->member, key->len);	\
 239}
 240
 241PRINT_KEY(nr_acquired)
 242PRINT_KEY(nr_contended)
 243PRINT_TIME(avg_wait_time)
 244PRINT_TIME(wait_time_total)
 245PRINT_TIME(wait_time_max)
 246
 247static void lock_stat_key_print_wait_time_min(struct lock_key *key,
 248					      struct lock_stat *ls)
 249{
 250	u64 wait_time = ls->wait_time_min;
 251
 252	if (wait_time == ULLONG_MAX)
 253		wait_time = 0;
 254
 255	lock_stat_key_print_time(wait_time, key->len);
 256}
 257
 258
 259static const char		*sort_key = "acquired";
 260
 261static int			(*compare)(struct lock_stat *, struct lock_stat *);
 262
 263static struct rb_root		sorted; /* place to store intermediate data */
 264static struct rb_root		result;	/* place to store sorted data */
 265
 266static LIST_HEAD(lock_keys);
 267static const char		*output_fields;
 268
 269#define DEF_KEY_LOCK(name, header, fn_suffix, len)			\
 270	{ #name, header, len, lock_stat_key_ ## fn_suffix, lock_stat_key_print_ ## fn_suffix, {} }
 271static struct lock_key report_keys[] = {
 272	DEF_KEY_LOCK(acquired, "acquired", nr_acquired, 10),
 273	DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
 274	DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
 275	DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
 276	DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
 277	DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
 278
 279	/* extra comparisons much complicated should be here */
 280	{ }
 281};
 282
 283static struct lock_key contention_keys[] = {
 284	DEF_KEY_LOCK(contended, "contended", nr_contended, 10),
 285	DEF_KEY_LOCK(wait_total, "total wait", wait_time_total, 12),
 286	DEF_KEY_LOCK(wait_max, "max wait", wait_time_max, 12),
 287	DEF_KEY_LOCK(wait_min, "min wait", wait_time_min, 12),
 288	DEF_KEY_LOCK(avg_wait, "avg wait", avg_wait_time, 12),
 289
 290	/* extra comparisons much complicated should be here */
 291	{ }
 292};
 293
 294static int select_key(bool contention)
 295{
 296	int i;
 297	struct lock_key *keys = report_keys;
 298
 299	if (contention)
 300		keys = contention_keys;
 301
 302	for (i = 0; keys[i].name; i++) {
 303		if (!strcmp(keys[i].name, sort_key)) {
 304			compare = keys[i].key;
 305
 306			/* selected key should be in the output fields */
 307			if (list_empty(&keys[i].list))
 308				list_add_tail(&keys[i].list, &lock_keys);
 309
 310			return 0;
 311		}
 312	}
 313
 314	pr_err("Unknown compare key: %s\n", sort_key);
 315	return -1;
 316}
 317
 318static int add_output_field(bool contention, char *name)
 319{
 320	int i;
 321	struct lock_key *keys = report_keys;
 322
 323	if (contention)
 324		keys = contention_keys;
 325
 326	for (i = 0; keys[i].name; i++) {
 327		if (strcmp(keys[i].name, name))
 328			continue;
 329
 330		/* prevent double link */
 331		if (list_empty(&keys[i].list))
 332			list_add_tail(&keys[i].list, &lock_keys);
 333
 334		return 0;
 335	}
 336
 337	pr_err("Unknown output field: %s\n", name);
 338	return -1;
 339}
 340
 341static int setup_output_field(bool contention, const char *str)
 342{
 343	char *tok, *tmp, *orig;
 344	int i, ret = 0;
 345	struct lock_key *keys = report_keys;
 346
 347	if (contention)
 348		keys = contention_keys;
 349
 350	/* no output field given: use all of them */
 351	if (str == NULL) {
 352		for (i = 0; keys[i].name; i++)
 353			list_add_tail(&keys[i].list, &lock_keys);
 354		return 0;
 355	}
 356
 357	for (i = 0; keys[i].name; i++)
 358		INIT_LIST_HEAD(&keys[i].list);
 359
 360	orig = tmp = strdup(str);
 361	if (orig == NULL)
 362		return -ENOMEM;
 363
 364	while ((tok = strsep(&tmp, ",")) != NULL){
 365		ret = add_output_field(contention, tok);
 366		if (ret < 0)
 367			break;
 368	}
 369	free(orig);
 370
 371	return ret;
 372}
 373
 374static void combine_lock_stats(struct lock_stat *st)
 375{
 376	struct rb_node **rb = &sorted.rb_node;
 377	struct rb_node *parent = NULL;
 378	struct lock_stat *p;
 379	int ret;
 380
 381	while (*rb) {
 382		p = container_of(*rb, struct lock_stat, rb);
 383		parent = *rb;
 384
 385		if (st->name && p->name)
 386			ret = strcmp(st->name, p->name);
 387		else
 388			ret = !!st->name - !!p->name;
 389
 390		if (ret == 0) {
 391			p->nr_acquired += st->nr_acquired;
 392			p->nr_contended += st->nr_contended;
 393			p->wait_time_total += st->wait_time_total;
 394
 395			if (p->nr_contended)
 396				p->avg_wait_time = p->wait_time_total / p->nr_contended;
 397
 398			if (p->wait_time_min > st->wait_time_min)
 399				p->wait_time_min = st->wait_time_min;
 400			if (p->wait_time_max < st->wait_time_max)
 401				p->wait_time_max = st->wait_time_max;
 402
 403			p->broken |= st->broken;
 404			st->combined = 1;
 405			return;
 406		}
 407
 408		if (ret < 0)
 409			rb = &(*rb)->rb_left;
 410		else
 411			rb = &(*rb)->rb_right;
 412	}
 413
 414	rb_link_node(&st->rb, parent, rb);
 415	rb_insert_color(&st->rb, &sorted);
 416}
 417
 418static void insert_to_result(struct lock_stat *st,
 419			     int (*bigger)(struct lock_stat *, struct lock_stat *))
 420{
 421	struct rb_node **rb = &result.rb_node;
 422	struct rb_node *parent = NULL;
 423	struct lock_stat *p;
 424
 425	if (combine_locks && st->combined)
 426		return;
 427
 428	while (*rb) {
 429		p = container_of(*rb, struct lock_stat, rb);
 430		parent = *rb;
 431
 432		if (bigger(st, p))
 433			rb = &(*rb)->rb_left;
 434		else
 435			rb = &(*rb)->rb_right;
 436	}
 437
 438	rb_link_node(&st->rb, parent, rb);
 439	rb_insert_color(&st->rb, &result);
 440}
 441
 442/* returns left most element of result, and erase it */
 443static struct lock_stat *pop_from_result(void)
 444{
 445	struct rb_node *node = result.rb_node;
 446
 447	if (!node)
 448		return NULL;
 449
 450	while (node->rb_left)
 451		node = node->rb_left;
 452
 453	rb_erase(node, &result);
 454	return container_of(node, struct lock_stat, rb);
 455}
 456
 457static struct lock_stat *lock_stat_find(u64 addr)
 458{
 459	struct hlist_head *entry = lockhashentry(addr);
 460	struct lock_stat *ret;
 461
 462	hlist_for_each_entry(ret, entry, hash_entry) {
 463		if (ret->addr == addr)
 464			return ret;
 465	}
 466	return NULL;
 467}
 468
 469static struct lock_stat *lock_stat_findnew(u64 addr, const char *name, int flags)
 470{
 471	struct hlist_head *entry = lockhashentry(addr);
 472	struct lock_stat *ret, *new;
 473
 474	hlist_for_each_entry(ret, entry, hash_entry) {
 475		if (ret->addr == addr)
 476			return ret;
 477	}
 478
 479	new = zalloc(sizeof(struct lock_stat));
 480	if (!new)
 481		goto alloc_failed;
 482
 483	new->addr = addr;
 484	new->name = strdup(name);
 485	if (!new->name) {
 486		free(new);
 487		goto alloc_failed;
 488	}
 489
 490	new->flags = flags;
 491	new->wait_time_min = ULLONG_MAX;
 492
 493	hlist_add_head(&new->hash_entry, entry);
 494	return new;
 495
 496alloc_failed:
 497	pr_err("memory allocation failed\n");
 498	return NULL;
 499}
 500
 501struct trace_lock_handler {
 502	/* it's used on CONFIG_LOCKDEP */
 503	int (*acquire_event)(struct evsel *evsel,
 504			     struct perf_sample *sample);
 505
 506	/* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
 507	int (*acquired_event)(struct evsel *evsel,
 508			      struct perf_sample *sample);
 
 509
 510	/* it's used on CONFIG_LOCKDEP && CONFIG_LOCK_STAT */
 511	int (*contended_event)(struct evsel *evsel,
 512			       struct perf_sample *sample);
 
 
 513
 514	/* it's used on CONFIG_LOCKDEP */
 515	int (*release_event)(struct evsel *evsel,
 516			     struct perf_sample *sample);
 
 517
 518	/* it's used when CONFIG_LOCKDEP is off */
 519	int (*contention_begin_event)(struct evsel *evsel,
 520				      struct perf_sample *sample);
 
 521
 522	/* it's used when CONFIG_LOCKDEP is off */
 523	int (*contention_end_event)(struct evsel *evsel,
 524				    struct perf_sample *sample);
 525};
 526
 527static struct lock_seq_stat *get_seq(struct thread_stat *ts, u64 addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 528{
 529	struct lock_seq_stat *seq;
 530
 531	list_for_each_entry(seq, &ts->seq_list, list) {
 532		if (seq->addr == addr)
 533			return seq;
 534	}
 535
 536	seq = zalloc(sizeof(struct lock_seq_stat));
 537	if (!seq) {
 538		pr_err("memory allocation failed\n");
 539		return NULL;
 540	}
 541	seq->state = SEQ_STATE_UNINITIALIZED;
 542	seq->addr = addr;
 543
 544	list_add(&seq->list, &ts->seq_list);
 545	return seq;
 546}
 547
 548enum broken_state {
 549	BROKEN_ACQUIRE,
 550	BROKEN_ACQUIRED,
 551	BROKEN_CONTENDED,
 552	BROKEN_RELEASE,
 553	BROKEN_MAX,
 554};
 555
 556static int bad_hist[BROKEN_MAX];
 557
 558enum acquire_flags {
 559	TRY_LOCK = 1,
 560	READ_LOCK = 2,
 561};
 562
 563static int get_key_by_aggr_mode_simple(u64 *key, u64 addr, u32 tid)
 564{
 565	switch (aggr_mode) {
 566	case LOCK_AGGR_ADDR:
 567		*key = addr;
 568		break;
 569	case LOCK_AGGR_TASK:
 570		*key = tid;
 571		break;
 572	case LOCK_AGGR_CALLER:
 573	default:
 574		pr_err("Invalid aggregation mode: %d\n", aggr_mode);
 575		return -EINVAL;
 576	}
 577	return 0;
 578}
 579
 580static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample);
 581
 582static int get_key_by_aggr_mode(u64 *key, u64 addr, struct evsel *evsel,
 583				 struct perf_sample *sample)
 584{
 585	if (aggr_mode == LOCK_AGGR_CALLER) {
 586		*key = callchain_id(evsel, sample);
 587		return 0;
 588	}
 589	return get_key_by_aggr_mode_simple(key, addr, sample->tid);
 590}
 591
 592static int report_lock_acquire_event(struct evsel *evsel,
 593				     struct perf_sample *sample)
 594{
 595	struct lock_stat *ls;
 596	struct thread_stat *ts;
 597	struct lock_seq_stat *seq;
 598	const char *name = evsel__strval(evsel, sample, "name");
 599	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
 600	int flag = evsel__intval(evsel, sample, "flags");
 601	u64 key;
 602	int ret;
 603
 604	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
 605	if (ret < 0)
 606		return ret;
 607
 608	ls = lock_stat_findnew(key, name, 0);
 609	if (!ls)
 610		return -ENOMEM;
 611
 612	ts = thread_stat_findnew(sample->tid);
 613	if (!ts)
 614		return -ENOMEM;
 615
 616	seq = get_seq(ts, addr);
 617	if (!seq)
 618		return -ENOMEM;
 619
 620	switch (seq->state) {
 621	case SEQ_STATE_UNINITIALIZED:
 622	case SEQ_STATE_RELEASED:
 623		if (!flag) {
 624			seq->state = SEQ_STATE_ACQUIRING;
 625		} else {
 626			if (flag & TRY_LOCK)
 627				ls->nr_trylock++;
 628			if (flag & READ_LOCK)
 629				ls->nr_readlock++;
 630			seq->state = SEQ_STATE_READ_ACQUIRED;
 631			seq->read_count = 1;
 632			ls->nr_acquired++;
 633		}
 634		break;
 635	case SEQ_STATE_READ_ACQUIRED:
 636		if (flag & READ_LOCK) {
 637			seq->read_count++;
 638			ls->nr_acquired++;
 639			goto end;
 640		} else {
 641			goto broken;
 642		}
 643		break;
 644	case SEQ_STATE_ACQUIRED:
 645	case SEQ_STATE_ACQUIRING:
 646	case SEQ_STATE_CONTENDED:
 647broken:
 648		/* broken lock sequence */
 649		if (!ls->broken) {
 650			ls->broken = 1;
 651			bad_hist[BROKEN_ACQUIRE]++;
 652		}
 653		list_del_init(&seq->list);
 654		free(seq);
 655		goto end;
 
 656	default:
 657		BUG_ON("Unknown state of lock sequence found!\n");
 658		break;
 659	}
 660
 661	ls->nr_acquire++;
 662	seq->prev_event_time = sample->time;
 663end:
 664	return 0;
 665}
 666
 667static int report_lock_acquired_event(struct evsel *evsel,
 668				      struct perf_sample *sample)
 
 
 
 
 669{
 670	struct lock_stat *ls;
 671	struct thread_stat *ts;
 672	struct lock_seq_stat *seq;
 673	u64 contended_term;
 674	const char *name = evsel__strval(evsel, sample, "name");
 675	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
 676	u64 key;
 677	int ret;
 678
 679	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
 680	if (ret < 0)
 681		return ret;
 682
 683	ls = lock_stat_findnew(key, name, 0);
 684	if (!ls)
 685		return -ENOMEM;
 686
 687	ts = thread_stat_findnew(sample->tid);
 688	if (!ts)
 689		return -ENOMEM;
 690
 691	seq = get_seq(ts, addr);
 692	if (!seq)
 693		return -ENOMEM;
 694
 695	switch (seq->state) {
 696	case SEQ_STATE_UNINITIALIZED:
 697		/* orphan event, do nothing */
 698		return 0;
 699	case SEQ_STATE_ACQUIRING:
 700		break;
 701	case SEQ_STATE_CONTENDED:
 702		contended_term = sample->time - seq->prev_event_time;
 703		ls->wait_time_total += contended_term;
 704		if (contended_term < ls->wait_time_min)
 705			ls->wait_time_min = contended_term;
 706		if (ls->wait_time_max < contended_term)
 707			ls->wait_time_max = contended_term;
 708		break;
 709	case SEQ_STATE_RELEASED:
 710	case SEQ_STATE_ACQUIRED:
 711	case SEQ_STATE_READ_ACQUIRED:
 712		/* broken lock sequence */
 713		if (!ls->broken) {
 714			ls->broken = 1;
 715			bad_hist[BROKEN_ACQUIRED]++;
 716		}
 717		list_del_init(&seq->list);
 718		free(seq);
 719		goto end;
 
 
 720	default:
 721		BUG_ON("Unknown state of lock sequence found!\n");
 722		break;
 723	}
 724
 725	seq->state = SEQ_STATE_ACQUIRED;
 726	ls->nr_acquired++;
 727	ls->avg_wait_time = ls->nr_contended ? ls->wait_time_total/ls->nr_contended : 0;
 728	seq->prev_event_time = sample->time;
 729end:
 730	return 0;
 731}
 732
 733static int report_lock_contended_event(struct evsel *evsel,
 734				       struct perf_sample *sample)
 
 
 
 
 735{
 736	struct lock_stat *ls;
 737	struct thread_stat *ts;
 738	struct lock_seq_stat *seq;
 739	const char *name = evsel__strval(evsel, sample, "name");
 740	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
 741	u64 key;
 742	int ret;
 743
 744	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
 745	if (ret < 0)
 746		return ret;
 747
 748	ls = lock_stat_findnew(key, name, 0);
 749	if (!ls)
 750		return -ENOMEM;
 751
 752	ts = thread_stat_findnew(sample->tid);
 753	if (!ts)
 754		return -ENOMEM;
 755
 756	seq = get_seq(ts, addr);
 757	if (!seq)
 758		return -ENOMEM;
 759
 760	switch (seq->state) {
 761	case SEQ_STATE_UNINITIALIZED:
 762		/* orphan event, do nothing */
 763		return 0;
 764	case SEQ_STATE_ACQUIRING:
 765		break;
 766	case SEQ_STATE_RELEASED:
 767	case SEQ_STATE_ACQUIRED:
 768	case SEQ_STATE_READ_ACQUIRED:
 769	case SEQ_STATE_CONTENDED:
 770		/* broken lock sequence */
 771		if (!ls->broken) {
 772			ls->broken = 1;
 773			bad_hist[BROKEN_CONTENDED]++;
 774		}
 775		list_del_init(&seq->list);
 776		free(seq);
 777		goto end;
 
 778	default:
 779		BUG_ON("Unknown state of lock sequence found!\n");
 780		break;
 781	}
 782
 783	seq->state = SEQ_STATE_CONTENDED;
 784	ls->nr_contended++;
 785	ls->avg_wait_time = ls->wait_time_total/ls->nr_contended;
 786	seq->prev_event_time = sample->time;
 787end:
 788	return 0;
 789}
 790
 791static int report_lock_release_event(struct evsel *evsel,
 792				     struct perf_sample *sample)
 
 
 
 
 793{
 794	struct lock_stat *ls;
 795	struct thread_stat *ts;
 796	struct lock_seq_stat *seq;
 797	const char *name = evsel__strval(evsel, sample, "name");
 798	u64 addr = evsel__intval(evsel, sample, "lockdep_addr");
 799	u64 key;
 800	int ret;
 801
 802	ret = get_key_by_aggr_mode_simple(&key, addr, sample->tid);
 803	if (ret < 0)
 804		return ret;
 805
 806	ls = lock_stat_findnew(key, name, 0);
 807	if (!ls)
 808		return -ENOMEM;
 809
 810	ts = thread_stat_findnew(sample->tid);
 811	if (!ts)
 812		return -ENOMEM;
 813
 814	seq = get_seq(ts, addr);
 815	if (!seq)
 816		return -ENOMEM;
 817
 818	switch (seq->state) {
 819	case SEQ_STATE_UNINITIALIZED:
 820		goto end;
 
 821	case SEQ_STATE_ACQUIRED:
 822		break;
 823	case SEQ_STATE_READ_ACQUIRED:
 824		seq->read_count--;
 825		BUG_ON(seq->read_count < 0);
 826		if (seq->read_count) {
 827			ls->nr_release++;
 828			goto end;
 829		}
 830		break;
 831	case SEQ_STATE_ACQUIRING:
 832	case SEQ_STATE_CONTENDED:
 833	case SEQ_STATE_RELEASED:
 834		/* broken lock sequence */
 835		if (!ls->broken) {
 836			ls->broken = 1;
 837			bad_hist[BROKEN_RELEASE]++;
 838		}
 839		goto free_seq;
 
 840	default:
 841		BUG_ON("Unknown state of lock sequence found!\n");
 842		break;
 843	}
 844
 845	ls->nr_release++;
 846free_seq:
 847	list_del_init(&seq->list);
 848	free(seq);
 849end:
 850	return 0;
 851}
 852
 853static int get_symbol_name_offset(struct map *map, struct symbol *sym, u64 ip,
 854				  char *buf, int size)
 855{
 856	u64 offset;
 857
 858	if (map == NULL || sym == NULL) {
 859		buf[0] = '\0';
 860		return 0;
 861	}
 862
 863	offset = map->map_ip(map, ip) - sym->start;
 864
 865	if (offset)
 866		return scnprintf(buf, size, "%s+%#lx", sym->name, offset);
 867	else
 868		return strlcpy(buf, sym->name, size);
 869}
 870static int lock_contention_caller(struct evsel *evsel, struct perf_sample *sample,
 871				  char *buf, int size)
 872{
 873	struct thread *thread;
 874	struct callchain_cursor *cursor = &callchain_cursor;
 875	struct machine *machine = &session->machines.host;
 876	struct symbol *sym;
 877	int skip = 0;
 878	int ret;
 879
 880	/* lock names will be replaced to task name later */
 881	if (show_thread_stats)
 882		return -1;
 883
 884	thread = machine__findnew_thread(machine, -1, sample->pid);
 885	if (thread == NULL)
 886		return -1;
 887
 888	/* use caller function name from the callchain */
 889	ret = thread__resolve_callchain(thread, cursor, evsel, sample,
 890					NULL, NULL, max_stack_depth);
 891	if (ret != 0) {
 892		thread__put(thread);
 893		return -1;
 894	}
 895
 896	callchain_cursor_commit(cursor);
 897	thread__put(thread);
 898
 899	while (true) {
 900		struct callchain_cursor_node *node;
 901
 902		node = callchain_cursor_current(cursor);
 903		if (node == NULL)
 904			break;
 905
 906		/* skip first few entries - for lock functions */
 907		if (++skip <= stack_skip)
 908			goto next;
 909
 910		sym = node->ms.sym;
 911		if (sym && !machine__is_lock_function(machine, node->ip)) {
 912			get_symbol_name_offset(node->ms.map, sym, node->ip,
 913					       buf, size);
 914			return 0;
 915		}
 916
 917next:
 918		callchain_cursor_advance(cursor);
 919	}
 920	return -1;
 921}
 922
 923static u64 callchain_id(struct evsel *evsel, struct perf_sample *sample)
 924{
 925	struct callchain_cursor *cursor = &callchain_cursor;
 926	struct machine *machine = &session->machines.host;
 927	struct thread *thread;
 928	u64 hash = 0;
 929	int skip = 0;
 930	int ret;
 931
 932	thread = machine__findnew_thread(machine, -1, sample->pid);
 933	if (thread == NULL)
 934		return -1;
 935
 936	/* use caller function name from the callchain */
 937	ret = thread__resolve_callchain(thread, cursor, evsel, sample,
 938					NULL, NULL, max_stack_depth);
 939	thread__put(thread);
 940
 941	if (ret != 0)
 942		return -1;
 943
 944	callchain_cursor_commit(cursor);
 945
 946	while (true) {
 947		struct callchain_cursor_node *node;
 948
 949		node = callchain_cursor_current(cursor);
 950		if (node == NULL)
 951			break;
 952
 953		/* skip first few entries - for lock functions */
 954		if (++skip <= stack_skip)
 955			goto next;
 956
 957		if (node->ms.sym && machine__is_lock_function(machine, node->ip))
 958			goto next;
 959
 960		hash ^= hash_long((unsigned long)node->ip, 64);
 961
 962next:
 963		callchain_cursor_advance(cursor);
 964	}
 965	return hash;
 966}
 967
 968static u64 *get_callstack(struct perf_sample *sample, int max_stack)
 969{
 970	u64 *callstack;
 971	u64 i;
 972	int c;
 973
 974	callstack = calloc(max_stack, sizeof(*callstack));
 975	if (callstack == NULL)
 976		return NULL;
 977
 978	for (i = 0, c = 0; i < sample->callchain->nr && c < max_stack; i++) {
 979		u64 ip = sample->callchain->ips[i];
 980
 981		if (ip >= PERF_CONTEXT_MAX)
 982			continue;
 983
 984		callstack[c++] = ip;
 985	}
 986	return callstack;
 987}
 988
 989static int report_lock_contention_begin_event(struct evsel *evsel,
 990					      struct perf_sample *sample)
 991{
 992	struct lock_stat *ls;
 993	struct thread_stat *ts;
 994	struct lock_seq_stat *seq;
 995	u64 addr = evsel__intval(evsel, sample, "lock_addr");
 996	unsigned int flags = evsel__intval(evsel, sample, "flags");
 997	u64 key;
 998	int i, ret;
 999	static bool kmap_loaded;
1000	struct machine *machine = &session->machines.host;
1001	struct map *kmap;
1002	struct symbol *sym;
1003
1004	ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
1005	if (ret < 0)
1006		return ret;
1007
1008	if (!kmap_loaded) {
1009		unsigned long *addrs;
1010
1011		/* make sure it loads the kernel map to find lock symbols */
1012		map__load(machine__kernel_map(machine));
1013		kmap_loaded = true;
1014
1015		/* convert (kernel) symbols to addresses */
1016		for (i = 0; i < filters.nr_syms; i++) {
1017			sym = machine__find_kernel_symbol_by_name(machine,
1018								  filters.syms[i],
1019								  &kmap);
1020			if (sym == NULL) {
1021				pr_warning("ignore unknown symbol: %s\n",
1022					   filters.syms[i]);
1023				continue;
1024			}
1025
1026			addrs = realloc(filters.addrs,
1027					(filters.nr_addrs + 1) * sizeof(*addrs));
1028			if (addrs == NULL) {
1029				pr_warning("memory allocation failure\n");
1030				return -ENOMEM;
1031			}
1032
1033			addrs[filters.nr_addrs++] = kmap->unmap_ip(kmap, sym->start);
1034			filters.addrs = addrs;
1035		}
1036	}
1037
1038	ls = lock_stat_find(key);
1039	if (!ls) {
1040		char buf[128];
1041		const char *name = "";
1042
1043		switch (aggr_mode) {
1044		case LOCK_AGGR_ADDR:
1045			sym = machine__find_kernel_symbol(machine, key, &kmap);
1046			if (sym)
1047				name = sym->name;
1048			break;
1049		case LOCK_AGGR_CALLER:
1050			name = buf;
1051			if (lock_contention_caller(evsel, sample, buf, sizeof(buf)) < 0)
1052				name = "Unknown";
1053			break;
1054		case LOCK_AGGR_TASK:
1055		default:
1056			break;
1057		}
1058
1059		ls = lock_stat_findnew(key, name, flags);
1060		if (!ls)
1061			return -ENOMEM;
1062
1063		if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
1064			ls->callstack = get_callstack(sample, max_stack_depth);
1065			if (ls->callstack == NULL)
1066				return -ENOMEM;
1067		}
1068	}
1069
1070	if (filters.nr_types) {
1071		bool found = false;
1072
1073		for (i = 0; i < filters.nr_types; i++) {
1074			if (flags == filters.types[i]) {
1075				found = true;
1076				break;
1077			}
1078		}
1079
1080		if (!found)
1081			return 0;
1082	}
1083
1084	if (filters.nr_addrs) {
1085		bool found = false;
1086
1087		for (i = 0; i < filters.nr_addrs; i++) {
1088			if (addr == filters.addrs[i]) {
1089				found = true;
1090				break;
1091			}
1092		}
1093
1094		if (!found)
1095			return 0;
1096	}
1097
1098	ts = thread_stat_findnew(sample->tid);
1099	if (!ts)
1100		return -ENOMEM;
1101
1102	seq = get_seq(ts, addr);
1103	if (!seq)
1104		return -ENOMEM;
1105
1106	switch (seq->state) {
1107	case SEQ_STATE_UNINITIALIZED:
1108	case SEQ_STATE_ACQUIRED:
1109		break;
1110	case SEQ_STATE_CONTENDED:
1111		/*
1112		 * It can have nested contention begin with mutex spinning,
1113		 * then we would use the original contention begin event and
1114		 * ignore the second one.
1115		 */
1116		goto end;
1117	case SEQ_STATE_ACQUIRING:
1118	case SEQ_STATE_READ_ACQUIRED:
1119	case SEQ_STATE_RELEASED:
1120		/* broken lock sequence */
1121		if (!ls->broken) {
1122			ls->broken = 1;
1123			bad_hist[BROKEN_CONTENDED]++;
1124		}
1125		list_del_init(&seq->list);
1126		free(seq);
1127		goto end;
1128	default:
1129		BUG_ON("Unknown state of lock sequence found!\n");
1130		break;
1131	}
1132
1133	if (seq->state != SEQ_STATE_CONTENDED) {
1134		seq->state = SEQ_STATE_CONTENDED;
1135		seq->prev_event_time = sample->time;
1136		ls->nr_contended++;
1137	}
1138end:
1139	return 0;
1140}
1141
1142static int report_lock_contention_end_event(struct evsel *evsel,
1143					    struct perf_sample *sample)
1144{
1145	struct lock_stat *ls;
1146	struct thread_stat *ts;
1147	struct lock_seq_stat *seq;
1148	u64 contended_term;
1149	u64 addr = evsel__intval(evsel, sample, "lock_addr");
1150	u64 key;
1151	int ret;
1152
1153	ret = get_key_by_aggr_mode(&key, addr, evsel, sample);
1154	if (ret < 0)
1155		return ret;
1156
1157	ls = lock_stat_find(key);
1158	if (!ls)
1159		return 0;
1160
1161	ts = thread_stat_find(sample->tid);
1162	if (!ts)
1163		return 0;
1164
1165	seq = get_seq(ts, addr);
1166	if (!seq)
1167		return -ENOMEM;
1168
1169	switch (seq->state) {
1170	case SEQ_STATE_UNINITIALIZED:
1171		goto end;
1172	case SEQ_STATE_CONTENDED:
1173		contended_term = sample->time - seq->prev_event_time;
1174		ls->wait_time_total += contended_term;
1175		if (contended_term < ls->wait_time_min)
1176			ls->wait_time_min = contended_term;
1177		if (ls->wait_time_max < contended_term)
1178			ls->wait_time_max = contended_term;
1179		break;
1180	case SEQ_STATE_ACQUIRING:
1181	case SEQ_STATE_ACQUIRED:
1182	case SEQ_STATE_READ_ACQUIRED:
1183	case SEQ_STATE_RELEASED:
1184		/* broken lock sequence */
1185		if (!ls->broken) {
1186			ls->broken = 1;
1187			bad_hist[BROKEN_ACQUIRED]++;
1188		}
1189		list_del_init(&seq->list);
1190		free(seq);
1191		goto end;
1192	default:
1193		BUG_ON("Unknown state of lock sequence found!\n");
1194		break;
1195	}
1196
1197	seq->state = SEQ_STATE_ACQUIRED;
1198	ls->nr_acquired++;
1199	ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired;
1200end:
1201	return 0;
1202}
1203
1204/* lock oriented handlers */
1205/* TODO: handlers for CPU oriented, thread oriented */
1206static struct trace_lock_handler report_lock_ops  = {
1207	.acquire_event		= report_lock_acquire_event,
1208	.acquired_event		= report_lock_acquired_event,
1209	.contended_event	= report_lock_contended_event,
1210	.release_event		= report_lock_release_event,
1211	.contention_begin_event	= report_lock_contention_begin_event,
1212	.contention_end_event	= report_lock_contention_end_event,
1213};
1214
1215static struct trace_lock_handler contention_lock_ops  = {
1216	.contention_begin_event	= report_lock_contention_begin_event,
1217	.contention_end_event	= report_lock_contention_end_event,
1218};
 
 
 
 
 
 
 
 
 
 
 
 
1219
 
 
 
1220
1221static struct trace_lock_handler *trace_handler;
 
 
 
 
 
 
 
 
 
 
 
 
1222
1223static int evsel__process_lock_acquire(struct evsel *evsel, struct perf_sample *sample)
1224{
1225	if (trace_handler->acquire_event)
1226		return trace_handler->acquire_event(evsel, sample);
1227	return 0;
1228}
1229
1230static int evsel__process_lock_acquired(struct evsel *evsel, struct perf_sample *sample)
1231{
1232	if (trace_handler->acquired_event)
1233		return trace_handler->acquired_event(evsel, sample);
1234	return 0;
 
 
 
 
 
 
 
 
 
 
 
1235}
1236
1237static int evsel__process_lock_contended(struct evsel *evsel, struct perf_sample *sample)
1238{
1239	if (trace_handler->contended_event)
1240		return trace_handler->contended_event(evsel, sample);
1241	return 0;
1242}
 
 
 
 
 
 
 
1243
1244static int evsel__process_lock_release(struct evsel *evsel, struct perf_sample *sample)
1245{
1246	if (trace_handler->release_event)
1247		return trace_handler->release_event(evsel, sample);
1248	return 0;
1249}
1250
1251static int evsel__process_contention_begin(struct evsel *evsel, struct perf_sample *sample)
 
1252{
1253	if (trace_handler->contention_begin_event)
1254		return trace_handler->contention_begin_event(evsel, sample);
1255	return 0;
1256}
1257
1258static int evsel__process_contention_end(struct evsel *evsel, struct perf_sample *sample)
1259{
1260	if (trace_handler->contention_end_event)
1261		return trace_handler->contention_end_event(evsel, sample);
1262	return 0;
 
 
 
 
 
 
1263}
1264
1265static void print_bad_events(int bad, int total)
1266{
1267	/* Output for debug, this have to be removed */
1268	int i;
1269	int broken = 0;
1270	const char *name[4] =
1271		{ "acquire", "acquired", "contended", "release" };
1272
1273	for (i = 0; i < BROKEN_MAX; i++)
1274		broken += bad_hist[i];
1275
1276	if (quiet || (broken == 0 && verbose <= 0))
1277		return;
1278
1279	pr_info("\n=== output for debug===\n\n");
1280	pr_info("bad: %d, total: %d\n", bad, total);
1281	pr_info("bad rate: %.2f %%\n", (double)bad / (double)total * 100);
1282	pr_info("histogram of events caused bad sequence\n");
1283	for (i = 0; i < BROKEN_MAX; i++)
1284		pr_info(" %10s: %d\n", name[i], bad_hist[i]);
1285}
1286
1287/* TODO: various way to print, coloring, nano or milli sec */
1288static void print_result(void)
1289{
1290	struct lock_stat *st;
1291	struct lock_key *key;
1292	char cut_name[20];
1293	int bad, total, printed;
 
 
 
 
1294
1295	if (!quiet) {
1296		pr_info("%20s ", "Name");
1297		list_for_each_entry(key, &lock_keys, list)
1298			pr_info("%*s ", key->len, key->header);
1299		pr_info("\n\n");
1300	}
1301
1302	bad = total = printed = 0;
1303	while ((st = pop_from_result())) {
1304		total++;
1305		if (st->broken)
1306			bad++;
1307		if (!st->nr_acquired)
1308			continue;
1309
1310		bzero(cut_name, 20);
1311
1312		if (strlen(st->name) < 20) {
1313			/* output raw name */
1314			const char *name = st->name;
1315
1316			if (show_thread_stats) {
1317				struct thread *t;
1318
1319				/* st->addr contains tid of thread */
1320				t = perf_session__findnew(session, st->addr);
1321				name = thread__comm_str(t);
1322			}
1323
1324			pr_info("%20s ", name);
1325		} else {
1326			strncpy(cut_name, st->name, 16);
1327			cut_name[16] = '.';
1328			cut_name[17] = '.';
1329			cut_name[18] = '.';
1330			cut_name[19] = '\0';
1331			/* cut off name for saving output style */
1332			pr_info("%20s ", cut_name);
1333		}
1334
1335		list_for_each_entry(key, &lock_keys, list) {
1336			key->print(key, st);
1337			pr_info(" ");
1338		}
 
 
 
1339		pr_info("\n");
1340
1341		if (++printed >= print_nr_entries)
1342			break;
1343	}
1344
1345	print_bad_events(bad, total);
1346}
1347
1348static bool info_threads, info_map;
1349
1350static void dump_threads(void)
1351{
1352	struct thread_stat *st;
1353	struct rb_node *node;
1354	struct thread *t;
1355
1356	pr_info("%10s: comm\n", "Thread ID");
1357
1358	node = rb_first(&thread_stats);
1359	while (node) {
1360		st = container_of(node, struct thread_stat, rb);
1361		t = perf_session__findnew(session, st->tid);
1362		pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
1363		node = rb_next(node);
1364		thread__put(t);
1365	}
1366}
1367
1368static int compare_maps(struct lock_stat *a, struct lock_stat *b)
1369{
1370	int ret;
1371
1372	if (a->name && b->name)
1373		ret = strcmp(a->name, b->name);
1374	else
1375		ret = !!a->name - !!b->name;
1376
1377	if (!ret)
1378		return a->addr < b->addr;
1379	else
1380		return ret < 0;
1381}
1382
1383static void dump_map(void)
1384{
1385	unsigned int i;
1386	struct lock_stat *st;
1387
1388	pr_info("Address of instance: name of class\n");
1389	for (i = 0; i < LOCKHASH_SIZE; i++) {
1390		hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1391			insert_to_result(st, compare_maps);
1392		}
1393	}
1394
1395	while ((st = pop_from_result()))
1396		pr_info(" %#llx: %s\n", (unsigned long long)st->addr, st->name);
1397}
1398
1399static int dump_info(void)
1400{
1401	int rc = 0;
1402
1403	if (info_threads)
1404		dump_threads();
1405	else if (info_map)
1406		dump_map();
1407	else {
1408		rc = -1;
1409		pr_err("Unknown type of information\n");
1410	}
1411
1412	return rc;
1413}
1414
1415static const struct evsel_str_handler lock_tracepoints[] = {
1416	{ "lock:lock_acquire",	 evsel__process_lock_acquire,   }, /* CONFIG_LOCKDEP */
1417	{ "lock:lock_acquired",	 evsel__process_lock_acquired,  }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
1418	{ "lock:lock_contended", evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */
1419	{ "lock:lock_release",	 evsel__process_lock_release,   }, /* CONFIG_LOCKDEP */
1420};
1421
1422static const struct evsel_str_handler contention_tracepoints[] = {
1423	{ "lock:contention_begin", evsel__process_contention_begin, },
1424	{ "lock:contention_end",   evsel__process_contention_end,   },
1425};
1426
1427static int process_event_update(struct perf_tool *tool,
1428				union perf_event *event,
1429				struct evlist **pevlist)
1430{
1431	int ret;
1432
1433	ret = perf_event__process_event_update(tool, event, pevlist);
1434	if (ret < 0)
1435		return ret;
1436
1437	/* this can return -EEXIST since we call it for each evsel */
1438	perf_session__set_tracepoints_handlers(session, lock_tracepoints);
1439	perf_session__set_tracepoints_handlers(session, contention_tracepoints);
1440	return 0;
1441}
1442
1443typedef int (*tracepoint_handler)(struct evsel *evsel,
1444				  struct perf_sample *sample);
1445
1446static int process_sample_event(struct perf_tool *tool __maybe_unused,
1447				union perf_event *event,
1448				struct perf_sample *sample,
1449				struct evsel *evsel,
1450				struct machine *machine)
1451{
1452	int err = 0;
1453	struct thread *thread = machine__findnew_thread(machine, sample->pid,
1454							sample->tid);
1455
1456	if (thread == NULL) {
1457		pr_debug("problem processing %d event, skipping it.\n",
1458			event->header.type);
1459		return -1;
1460	}
1461
1462	if (evsel->handler != NULL) {
1463		tracepoint_handler f = evsel->handler;
1464		err = f(evsel, sample);
1465	}
1466
1467	thread__put(thread);
 
1468
1469	return err;
1470}
 
 
 
1471
1472static void combine_result(void)
1473{
1474	unsigned int i;
1475	struct lock_stat *st;
 
1476
1477	if (!combine_locks)
1478		return;
1479
1480	for (i = 0; i < LOCKHASH_SIZE; i++) {
1481		hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1482			combine_lock_stats(st);
1483		}
1484	}
1485}
1486
1487static void sort_result(void)
1488{
1489	unsigned int i;
1490	struct lock_stat *st;
1491
1492	for (i = 0; i < LOCKHASH_SIZE; i++) {
1493		hlist_for_each_entry(st, &lockhash_table[i], hash_entry) {
1494			insert_to_result(st, compare);
1495		}
1496	}
1497}
1498
1499static const struct {
1500	unsigned int flags;
1501	const char *name;
1502} lock_type_table[] = {
1503	{ 0,				"semaphore" },
1504	{ LCB_F_SPIN,			"spinlock" },
1505	{ LCB_F_SPIN | LCB_F_READ,	"rwlock:R" },
1506	{ LCB_F_SPIN | LCB_F_WRITE,	"rwlock:W"},
1507	{ LCB_F_READ,			"rwsem:R" },
1508	{ LCB_F_WRITE,			"rwsem:W" },
1509	{ LCB_F_RT,			"rtmutex" },
1510	{ LCB_F_RT | LCB_F_READ,	"rwlock-rt:R" },
1511	{ LCB_F_RT | LCB_F_WRITE,	"rwlock-rt:W"},
1512	{ LCB_F_PERCPU | LCB_F_READ,	"pcpu-sem:R" },
1513	{ LCB_F_PERCPU | LCB_F_WRITE,	"pcpu-sem:W" },
1514	{ LCB_F_MUTEX,			"mutex" },
1515	{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex" },
1516	/* alias for get_type_flag() */
1517	{ LCB_F_MUTEX | LCB_F_SPIN,	"mutex-spin" },
1518};
1519
1520static const char *get_type_str(unsigned int flags)
1521{
1522	for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
1523		if (lock_type_table[i].flags == flags)
1524			return lock_type_table[i].name;
1525	}
1526	return "unknown";
1527}
1528
1529static unsigned int get_type_flag(const char *str)
1530{
1531	for (unsigned int i = 0; i < ARRAY_SIZE(lock_type_table); i++) {
1532		if (!strcmp(lock_type_table[i].name, str))
1533			return lock_type_table[i].flags;
1534	}
1535	return UINT_MAX;
1536}
1537
1538static void lock_filter_finish(void)
1539{
1540	zfree(&filters.types);
1541	filters.nr_types = 0;
1542
1543	zfree(&filters.addrs);
1544	filters.nr_addrs = 0;
1545
1546	for (int i = 0; i < filters.nr_syms; i++)
1547		free(filters.syms[i]);
1548
1549	zfree(&filters.syms);
1550	filters.nr_syms = 0;
1551}
1552
1553static void sort_contention_result(void)
1554{
 
 
 
1555	sort_result();
 
1556}
1557
1558static void print_contention_result(struct lock_contention *con)
1559{
1560	struct lock_stat *st;
1561	struct lock_key *key;
1562	int bad, total, printed;
1563
1564	if (!quiet) {
1565		list_for_each_entry(key, &lock_keys, list)
1566			pr_info("%*s ", key->len, key->header);
1567
1568		switch (aggr_mode) {
1569		case LOCK_AGGR_TASK:
1570			pr_info("  %10s   %s\n\n", "pid", "comm");
1571			break;
1572		case LOCK_AGGR_CALLER:
1573			pr_info("  %10s   %s\n\n", "type", "caller");
1574			break;
1575		case LOCK_AGGR_ADDR:
1576			pr_info("  %16s   %s\n\n", "address", "symbol");
1577			break;
1578		default:
1579			break;
1580		}
1581	}
1582
1583	bad = total = printed = 0;
1584	if (use_bpf)
1585		bad = bad_hist[BROKEN_CONTENDED];
 
1586
1587	while ((st = pop_from_result())) {
1588		struct thread *t;
1589		int pid;
 
 
 
 
1590
1591		total += use_bpf ? st->nr_contended : 1;
1592		if (st->broken)
1593			bad++;
 
1594
1595		if (!st->wait_time_total)
1596			continue;
1597
1598		list_for_each_entry(key, &lock_keys, list) {
1599			key->print(key, st);
1600			pr_info(" ");
1601		}
1602
1603		switch (aggr_mode) {
1604		case LOCK_AGGR_CALLER:
1605			pr_info("  %10s   %s\n", get_type_str(st->flags), st->name);
1606			break;
1607		case LOCK_AGGR_TASK:
1608			pid = st->addr;
1609			t = perf_session__findnew(session, pid);
1610			pr_info("  %10d   %s\n", pid, thread__comm_str(t));
1611			break;
1612		case LOCK_AGGR_ADDR:
1613			pr_info("  %016llx   %s\n", (unsigned long long)st->addr,
1614				st->name ? : "");
1615			break;
1616		default:
1617			break;
1618		}
1619
1620		if (aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
1621			struct map *kmap;
1622			struct symbol *sym;
1623			char buf[128];
1624			u64 ip;
1625
1626			for (int i = 0; i < max_stack_depth; i++) {
1627				if (!st->callstack || !st->callstack[i])
1628					break;
1629
1630				ip = st->callstack[i];
1631				sym = machine__find_kernel_symbol(con->machine, ip, &kmap);
1632				get_symbol_name_offset(kmap, sym, ip, buf, sizeof(buf));
1633				pr_info("\t\t\t%#lx  %s\n", (unsigned long)ip, buf);
1634			}
1635		}
1636
1637		if (++printed >= print_nr_entries)
1638			break;
1639	}
1640
1641	print_bad_events(bad, total);
1642}
1643
1644static bool force;
1645
1646static int __cmd_report(bool display_info)
1647{
1648	int err = -EINVAL;
1649	struct perf_tool eops = {
1650		.attr		 = perf_event__process_attr,
1651		.event_update	 = process_event_update,
1652		.sample		 = process_sample_event,
1653		.comm		 = perf_event__process_comm,
1654		.mmap		 = perf_event__process_mmap,
1655		.namespaces	 = perf_event__process_namespaces,
1656		.tracing_data	 = perf_event__process_tracing_data,
1657		.ordered_events	 = true,
1658	};
1659	struct perf_data data = {
1660		.path  = input_name,
1661		.mode  = PERF_DATA_MODE_READ,
1662		.force = force,
1663	};
1664
1665	session = perf_session__new(&data, &eops);
1666	if (IS_ERR(session)) {
1667		pr_err("Initializing perf session failed\n");
1668		return PTR_ERR(session);
1669	}
1670
1671	/* for lock function check */
1672	symbol_conf.sort_by_name = true;
1673	symbol_conf.allow_aliases = true;
1674	symbol__init(&session->header.env);
1675
1676	if (!data.is_pipe) {
1677		if (!perf_session__has_traces(session, "lock record"))
1678			goto out_delete;
1679
1680		if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) {
1681			pr_err("Initializing perf session tracepoint handlers failed\n");
1682			goto out_delete;
1683		}
1684
1685		if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) {
1686			pr_err("Initializing perf session tracepoint handlers failed\n");
1687			goto out_delete;
1688		}
1689	}
1690
1691	if (setup_output_field(false, output_fields))
1692		goto out_delete;
1693
1694	if (select_key(false))
1695		goto out_delete;
1696
1697	if (show_thread_stats)
1698		aggr_mode = LOCK_AGGR_TASK;
1699
1700	err = perf_session__process_events(session);
1701	if (err)
1702		goto out_delete;
1703
1704	setup_pager();
1705	if (display_info) /* used for info subcommand */
1706		err = dump_info();
1707	else {
1708		combine_result();
1709		sort_result();
1710		print_result();
1711	}
1712
1713out_delete:
1714	perf_session__delete(session);
1715	return err;
1716}
1717
1718static void sighandler(int sig __maybe_unused)
1719{
1720}
1721
1722static int __cmd_contention(int argc, const char **argv)
1723{
1724	int err = -EINVAL;
1725	struct perf_tool eops = {
1726		.attr		 = perf_event__process_attr,
1727		.event_update	 = process_event_update,
1728		.sample		 = process_sample_event,
1729		.comm		 = perf_event__process_comm,
1730		.mmap		 = perf_event__process_mmap,
1731		.tracing_data	 = perf_event__process_tracing_data,
1732		.ordered_events	 = true,
1733	};
1734	struct perf_data data = {
1735		.path  = input_name,
1736		.mode  = PERF_DATA_MODE_READ,
1737		.force = force,
1738	};
1739	struct lock_contention con = {
1740		.target = &target,
1741		.result = &lockhash_table[0],
1742		.map_nr_entries = bpf_map_entries,
1743		.max_stack = max_stack_depth,
1744		.stack_skip = stack_skip,
1745		.filters = &filters,
1746	};
1747
1748	session = perf_session__new(use_bpf ? NULL : &data, &eops);
1749	if (IS_ERR(session)) {
1750		pr_err("Initializing perf session failed\n");
1751		return PTR_ERR(session);
1752	}
1753
1754	con.machine = &session->machines.host;
1755
1756	con.aggr_mode = aggr_mode = show_thread_stats ? LOCK_AGGR_TASK :
1757		show_lock_addrs ? LOCK_AGGR_ADDR : LOCK_AGGR_CALLER;
1758
1759	/* for lock function check */
1760	symbol_conf.sort_by_name = true;
1761	symbol_conf.allow_aliases = true;
1762	symbol__init(&session->header.env);
1763
1764	if (use_bpf) {
1765		err = target__validate(&target);
1766		if (err) {
1767			char errbuf[512];
1768
1769			target__strerror(&target, err, errbuf, 512);
1770			pr_err("%s\n", errbuf);
1771			goto out_delete;
1772		}
1773
1774		signal(SIGINT, sighandler);
1775		signal(SIGCHLD, sighandler);
1776		signal(SIGTERM, sighandler);
1777
1778		con.evlist = evlist__new();
1779		if (con.evlist == NULL) {
1780			err = -ENOMEM;
1781			goto out_delete;
1782		}
1783
1784		err = evlist__create_maps(con.evlist, &target);
1785		if (err < 0)
1786			goto out_delete;
1787
1788		if (argc) {
1789			err = evlist__prepare_workload(con.evlist, &target,
1790						       argv, false, NULL);
1791			if (err < 0)
1792				goto out_delete;
1793		}
1794
1795		if (lock_contention_prepare(&con) < 0) {
1796			pr_err("lock contention BPF setup failed\n");
1797			goto out_delete;
1798		}
1799	} else if (!data.is_pipe) {
1800		if (!perf_session__has_traces(session, "lock record"))
1801			goto out_delete;
1802
1803		if (!evlist__find_evsel_by_str(session->evlist,
1804					       "lock:contention_begin")) {
1805			pr_err("lock contention evsel not found\n");
1806			goto out_delete;
1807		}
1808
1809		if (perf_session__set_tracepoints_handlers(session,
1810						contention_tracepoints)) {
1811			pr_err("Initializing perf session tracepoint handlers failed\n");
1812			goto out_delete;
1813		}
1814	}
1815
1816	if (setup_output_field(true, output_fields))
1817		goto out_delete;
1818
1819	if (select_key(true))
1820		goto out_delete;
1821
1822	if (use_bpf) {
1823		lock_contention_start();
1824		if (argc)
1825			evlist__start_workload(con.evlist);
1826
1827		/* wait for signal */
1828		pause();
1829
1830		lock_contention_stop();
1831		lock_contention_read(&con);
1832
1833		/* abuse bad hist stats for lost entries */
1834		bad_hist[BROKEN_CONTENDED] = con.lost;
1835	} else {
1836		err = perf_session__process_events(session);
1837		if (err)
1838			goto out_delete;
1839	}
1840
1841	setup_pager();
1842
1843	sort_contention_result();
1844	print_contention_result(&con);
1845
1846out_delete:
1847	lock_filter_finish();
1848	evlist__delete(con.evlist);
1849	lock_contention_finish();
1850	perf_session__delete(session);
1851	return err;
1852}
1853
 
 
 
 
 
 
 
 
 
 
 
1854
1855static int __cmd_record(int argc, const char **argv)
1856{
1857	const char *record_args[] = {
1858		"record", "-R", "-m", "1024", "-c", "1", "--synth", "task",
1859	};
1860	const char *callgraph_args[] = {
1861		"--call-graph", "fp," __stringify(CONTENTION_STACK_DEPTH),
1862	};
1863	unsigned int rec_argc, i, j, ret;
1864	unsigned int nr_tracepoints;
1865	unsigned int nr_callgraph_args = 0;
1866	const char **rec_argv;
1867	bool has_lock_stat = true;
1868
1869	for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) {
1870		if (!is_valid_tracepoint(lock_tracepoints[i].name)) {
1871			pr_debug("tracepoint %s is not enabled. "
1872				 "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n",
1873				 lock_tracepoints[i].name);
1874			has_lock_stat = false;
1875			break;
1876		}
1877	}
1878
1879	if (has_lock_stat)
1880		goto setup_args;
1881
1882	for (i = 0; i < ARRAY_SIZE(contention_tracepoints); i++) {
1883		if (!is_valid_tracepoint(contention_tracepoints[i].name)) {
1884			pr_err("tracepoint %s is not enabled.\n",
1885			       contention_tracepoints[i].name);
1886			return 1;
1887		}
1888	}
1889
1890	nr_callgraph_args = ARRAY_SIZE(callgraph_args);
1891
1892setup_args:
1893	rec_argc = ARRAY_SIZE(record_args) + nr_callgraph_args + argc - 1;
1894
1895	if (has_lock_stat)
1896		nr_tracepoints = ARRAY_SIZE(lock_tracepoints);
1897	else
1898		nr_tracepoints = ARRAY_SIZE(contention_tracepoints);
1899
1900	/* factor of 2 is for -e in front of each tracepoint */
1901	rec_argc += 2 * nr_tracepoints;
1902
1903	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1904	if (!rec_argv)
1905		return -ENOMEM;
1906
1907	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1908		rec_argv[i] = strdup(record_args[i]);
1909
1910	for (j = 0; j < nr_tracepoints; j++) {
1911		const char *ev_name;
1912
1913		if (has_lock_stat)
1914			ev_name = strdup(lock_tracepoints[j].name);
1915		else
1916			ev_name = strdup(contention_tracepoints[j].name);
1917
1918		if (!ev_name)
1919			return -ENOMEM;
1920
1921		rec_argv[i++] = "-e";
1922		rec_argv[i++] = ev_name;
1923	}
1924
1925	for (j = 0; j < nr_callgraph_args; j++, i++)
1926		rec_argv[i] = callgraph_args[j];
1927
1928	for (j = 1; j < (unsigned int)argc; j++, i++)
1929		rec_argv[i] = argv[j];
1930
1931	BUG_ON(i != rec_argc);
1932
1933	ret = cmd_record(i, rec_argv);
1934	free(rec_argv);
1935	return ret;
1936}
1937
1938static int parse_map_entry(const struct option *opt, const char *str,
1939			    int unset __maybe_unused)
1940{
1941	unsigned long *len = (unsigned long *)opt->value;
1942	unsigned long val;
1943	char *endptr;
1944
1945	errno = 0;
1946	val = strtoul(str, &endptr, 0);
1947	if (*endptr != '\0' || errno != 0) {
1948		pr_err("invalid BPF map length: %s\n", str);
1949		return -1;
1950	}
1951
1952	*len = val;
1953	return 0;
1954}
1955
1956static int parse_max_stack(const struct option *opt, const char *str,
1957			   int unset __maybe_unused)
1958{
1959	unsigned long *len = (unsigned long *)opt->value;
1960	long val;
1961	char *endptr;
1962
1963	errno = 0;
1964	val = strtol(str, &endptr, 0);
1965	if (*endptr != '\0' || errno != 0) {
1966		pr_err("invalid max stack depth: %s\n", str);
1967		return -1;
1968	}
1969
1970	if (val < 0 || val > sysctl__max_stack()) {
1971		pr_err("invalid max stack depth: %ld\n", val);
1972		return -1;
1973	}
1974
1975	*len = val;
1976	return 0;
1977}
1978
1979static bool add_lock_type(unsigned int flags)
1980{
1981	unsigned int *tmp;
1982
1983	tmp = realloc(filters.types, (filters.nr_types + 1) * sizeof(*filters.types));
1984	if (tmp == NULL)
1985		return false;
1986
1987	tmp[filters.nr_types++] = flags;
1988	filters.types = tmp;
1989	return true;
1990}
1991
1992static int parse_lock_type(const struct option *opt __maybe_unused, const char *str,
1993			   int unset __maybe_unused)
1994{
1995	char *s, *tmp, *tok;
1996	int ret = 0;
1997
1998	s = strdup(str);
1999	if (s == NULL)
2000		return -1;
2001
2002	for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2003		unsigned int flags = get_type_flag(tok);
2004
2005		if (flags == -1U) {
2006			char buf[32];
2007
2008			if (strchr(tok, ':'))
2009			    continue;
2010
2011			/* try :R and :W suffixes for rwlock, rwsem, ... */
2012			scnprintf(buf, sizeof(buf), "%s:R", tok);
2013			flags = get_type_flag(buf);
2014			if (flags != UINT_MAX) {
2015				if (!add_lock_type(flags)) {
2016					ret = -1;
2017					break;
2018				}
2019			}
2020
2021			scnprintf(buf, sizeof(buf), "%s:W", tok);
2022			flags = get_type_flag(buf);
2023			if (flags != UINT_MAX) {
2024				if (!add_lock_type(flags)) {
2025					ret = -1;
2026					break;
2027				}
2028			}
2029			continue;
2030		}
2031
2032		if (!add_lock_type(flags)) {
2033			ret = -1;
2034			break;
2035		}
2036
2037		if (!strcmp(tok, "mutex")) {
2038			flags = get_type_flag("mutex-spin");
2039			if (flags != UINT_MAX) {
2040				if (!add_lock_type(flags)) {
2041					ret = -1;
2042					break;
2043				}
2044			}
2045		}
2046	}
2047
2048	free(s);
2049	return ret;
2050}
2051
2052static bool add_lock_addr(unsigned long addr)
2053{
2054	unsigned long *tmp;
2055
2056	tmp = realloc(filters.addrs, (filters.nr_addrs + 1) * sizeof(*filters.addrs));
2057	if (tmp == NULL) {
2058		pr_err("Memory allocation failure\n");
2059		return false;
2060	}
2061
2062	tmp[filters.nr_addrs++] = addr;
2063	filters.addrs = tmp;
2064	return true;
2065}
2066
2067static bool add_lock_sym(char *name)
2068{
2069	char **tmp;
2070	char *sym = strdup(name);
2071
2072	if (sym == NULL) {
2073		pr_err("Memory allocation failure\n");
2074		return false;
2075	}
2076
2077	tmp = realloc(filters.syms, (filters.nr_syms + 1) * sizeof(*filters.syms));
2078	if (tmp == NULL) {
2079		pr_err("Memory allocation failure\n");
2080		free(sym);
2081		return false;
2082	}
2083
2084	tmp[filters.nr_syms++] = sym;
2085	filters.syms = tmp;
2086	return true;
2087}
2088
2089static int parse_lock_addr(const struct option *opt __maybe_unused, const char *str,
2090			   int unset __maybe_unused)
2091{
2092	char *s, *tmp, *tok;
2093	int ret = 0;
2094	u64 addr;
2095
2096	s = strdup(str);
2097	if (s == NULL)
2098		return -1;
2099
2100	for (tok = strtok_r(s, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) {
2101		char *end;
2102
2103		addr = strtoul(tok, &end, 16);
2104		if (*end == '\0') {
2105			if (!add_lock_addr(addr)) {
2106				ret = -1;
2107				break;
2108			}
2109			continue;
2110		}
2111
2112		/*
2113		 * At this moment, we don't have kernel symbols.  Save the symbols
2114		 * in a separate list and resolve them to addresses later.
2115		 */
2116		if (!add_lock_sym(tok)) {
2117			ret = -1;
2118			break;
2119		}
2120	}
2121
2122	free(s);
2123	return ret;
2124}
2125
2126int cmd_lock(int argc, const char **argv)
2127{
2128	const struct option lock_options[] = {
2129	OPT_STRING('i', "input", &input_name, "file", "input file name"),
2130	OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"),
2131	OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"),
2132	OPT_BOOLEAN('f', "force", &force, "don't complain, do it"),
2133	OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
2134		   "file", "vmlinux pathname"),
2135	OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
2136		   "file", "kallsyms pathname"),
2137	OPT_BOOLEAN('q', "quiet", &quiet, "Do not show any warnings or messages"),
2138	OPT_END()
2139	};
2140
2141	const struct option info_options[] = {
2142	OPT_BOOLEAN('t', "threads", &info_threads,
2143		    "dump thread list in perf.data"),
2144	OPT_BOOLEAN('m', "map", &info_map,
2145		    "map of lock instances (address:name table)"),
2146	OPT_PARENT(lock_options)
2147	};
2148
2149	const struct option report_options[] = {
2150	OPT_STRING('k', "key", &sort_key, "acquired",
2151		    "key for sorting (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
2152	OPT_STRING('F', "field", &output_fields, NULL,
2153		    "output fields (acquired / contended / avg_wait / wait_total / wait_max / wait_min)"),
2154	/* TODO: type */
2155	OPT_BOOLEAN('c', "combine-locks", &combine_locks,
2156		    "combine locks in the same class"),
2157	OPT_BOOLEAN('t', "threads", &show_thread_stats,
2158		    "show per-thread lock stats"),
2159	OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
2160	OPT_PARENT(lock_options)
2161	};
2162
2163	struct option contention_options[] = {
2164	OPT_STRING('k', "key", &sort_key, "wait_total",
2165		    "key for sorting (contended / wait_total / wait_max / wait_min / avg_wait)"),
2166	OPT_STRING('F', "field", &output_fields, "contended,wait_total,wait_max,avg_wait",
2167		    "output fields (contended / wait_total / wait_max / wait_min / avg_wait)"),
2168	OPT_BOOLEAN('t', "threads", &show_thread_stats,
2169		    "show per-thread lock stats"),
2170	OPT_BOOLEAN('b', "use-bpf", &use_bpf, "use BPF program to collect lock contention stats"),
2171	OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
2172		    "System-wide collection from all CPUs"),
2173	OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
2174		    "List of cpus to monitor"),
2175	OPT_STRING('p', "pid", &target.pid, "pid",
2176		   "Trace on existing process id"),
2177	OPT_STRING(0, "tid", &target.tid, "tid",
2178		   "Trace on existing thread id (exclusive to --pid)"),
2179	OPT_CALLBACK(0, "map-nr-entries", &bpf_map_entries, "num",
2180		     "Max number of BPF map entries", parse_map_entry),
2181	OPT_CALLBACK(0, "max-stack", &max_stack_depth, "num",
2182		     "Set the maximum stack depth when collecting lopck contention, "
2183		     "Default: " __stringify(CONTENTION_STACK_DEPTH), parse_max_stack),
2184	OPT_INTEGER(0, "stack-skip", &stack_skip,
2185		    "Set the number of stack depth to skip when finding a lock caller, "
2186		    "Default: " __stringify(CONTENTION_STACK_SKIP)),
2187	OPT_INTEGER('E', "entries", &print_nr_entries, "display this many functions"),
2188	OPT_BOOLEAN('l', "lock-addr", &show_lock_addrs, "show lock stats by address"),
2189	OPT_CALLBACK('Y', "type-filter", NULL, "FLAGS",
2190		     "Filter specific type of locks", parse_lock_type),
2191	OPT_CALLBACK('L', "lock-filter", NULL, "ADDRS/NAMES",
2192		     "Filter specific address/symbol of locks", parse_lock_addr),
2193	OPT_PARENT(lock_options)
2194	};
2195
2196	const char * const info_usage[] = {
2197		"perf lock info [<options>]",
2198		NULL
2199	};
2200	const char *const lock_subcommands[] = { "record", "report", "script",
2201						 "info", "contention", NULL };
2202	const char *lock_usage[] = {
2203		NULL,
2204		NULL
2205	};
2206	const char * const report_usage[] = {
2207		"perf lock report [<options>]",
2208		NULL
2209	};
2210	const char * const contention_usage[] = {
2211		"perf lock contention [<options>]",
2212		NULL
2213	};
2214	unsigned int i;
2215	int rc = 0;
2216
 
2217	for (i = 0; i < LOCKHASH_SIZE; i++)
2218		INIT_HLIST_HEAD(lockhash_table + i);
2219
2220	argc = parse_options_subcommand(argc, argv, lock_options, lock_subcommands,
2221					lock_usage, PARSE_OPT_STOP_AT_NON_OPTION);
2222	if (!argc)
2223		usage_with_options(lock_usage, lock_options);
2224
2225	if (strlen(argv[0]) > 2 && strstarts("record", argv[0])) {
2226		return __cmd_record(argc, argv);
2227	} else if (strlen(argv[0]) > 2 && strstarts("report", argv[0])) {
2228		trace_handler = &report_lock_ops;
2229		if (argc) {
2230			argc = parse_options(argc, argv,
2231					     report_options, report_usage, 0);
2232			if (argc)
2233				usage_with_options(report_usage, report_options);
2234		}
2235		rc = __cmd_report(false);
2236	} else if (!strcmp(argv[0], "script")) {
2237		/* Aliased to 'perf script' */
2238		return cmd_script(argc, argv);
2239	} else if (!strcmp(argv[0], "info")) {
2240		if (argc) {
2241			argc = parse_options(argc, argv,
2242					     info_options, info_usage, 0);
2243			if (argc)
2244				usage_with_options(info_usage, info_options);
2245		}
2246		/* recycling report_lock_ops */
2247		trace_handler = &report_lock_ops;
2248		rc = __cmd_report(true);
2249	} else if (strlen(argv[0]) > 2 && strstarts("contention", argv[0])) {
2250		trace_handler = &contention_lock_ops;
2251		sort_key = "wait_total";
2252		output_fields = "contended,wait_total,wait_max,avg_wait";
2253
2254#ifndef HAVE_BPF_SKEL
2255		set_option_nobuild(contention_options, 'b', "use-bpf",
2256				   "no BUILD_BPF_SKEL=1", false);
2257#endif
2258		if (argc) {
2259			argc = parse_options(argc, argv, contention_options,
2260					     contention_usage, 0);
2261		}
2262
2263		if (show_thread_stats && show_lock_addrs) {
2264			pr_err("Cannot use thread and addr mode together\n");
2265			parse_options_usage(contention_usage, contention_options,
2266					    "threads", 0);
2267			parse_options_usage(NULL, contention_options,
2268					    "lock-addr", 0);
2269			return -1;
2270		}
2271
2272		rc = __cmd_contention(argc, argv);
2273	} else {
2274		usage_with_options(lock_usage, lock_options);
2275	}
2276
2277	return rc;
2278}