Linux Audio

Check our new training course

Loading...
v4.6
   1#include "builtin.h"
   2#include "perf.h"
   3
   4#include "util/evlist.h"
   5#include "util/evsel.h"
   6#include "util/util.h"
   7#include "util/cache.h"
   8#include "util/symbol.h"
   9#include "util/thread.h"
  10#include "util/header.h"
  11#include "util/session.h"
  12#include "util/tool.h"
  13#include "util/callchain.h"
 
  14
  15#include <subcmd/parse-options.h>
  16#include "util/trace-event.h"
  17#include "util/data.h"
  18#include "util/cpumap.h"
  19
  20#include "util/debug.h"
  21
  22#include <linux/rbtree.h>
  23#include <linux/string.h>
  24#include <locale.h>
  25#include <regex.h>
  26
  27static int	kmem_slab;
  28static int	kmem_page;
  29
  30static long	kmem_page_size;
  31static enum {
  32	KMEM_SLAB,
  33	KMEM_PAGE,
  34} kmem_default = KMEM_SLAB;  /* for backward compatibility */
  35
  36struct alloc_stat;
  37typedef int (*sort_fn_t)(void *, void *);
  38
  39static int			alloc_flag;
  40static int			caller_flag;
  41
  42static int			alloc_lines = -1;
  43static int			caller_lines = -1;
  44
  45static bool			raw_ip;
  46
  47struct alloc_stat {
  48	u64	call_site;
  49	u64	ptr;
  50	u64	bytes_req;
  51	u64	bytes_alloc;
 
  52	u32	hit;
  53	u32	pingpong;
  54
  55	short	alloc_cpu;
  56
  57	struct rb_node node;
  58};
  59
  60static struct rb_root root_alloc_stat;
  61static struct rb_root root_alloc_sorted;
  62static struct rb_root root_caller_stat;
  63static struct rb_root root_caller_sorted;
  64
  65static unsigned long total_requested, total_allocated;
  66static unsigned long nr_allocs, nr_cross_allocs;
  67
 
 
 
 
  68static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
  69			     int bytes_req, int bytes_alloc, int cpu)
  70{
  71	struct rb_node **node = &root_alloc_stat.rb_node;
  72	struct rb_node *parent = NULL;
  73	struct alloc_stat *data = NULL;
  74
  75	while (*node) {
  76		parent = *node;
  77		data = rb_entry(*node, struct alloc_stat, node);
  78
  79		if (ptr > data->ptr)
  80			node = &(*node)->rb_right;
  81		else if (ptr < data->ptr)
  82			node = &(*node)->rb_left;
  83		else
  84			break;
  85	}
  86
  87	if (data && data->ptr == ptr) {
  88		data->hit++;
  89		data->bytes_req += bytes_req;
  90		data->bytes_alloc += bytes_alloc;
  91	} else {
  92		data = malloc(sizeof(*data));
  93		if (!data) {
  94			pr_err("%s: malloc failed\n", __func__);
  95			return -1;
  96		}
  97		data->ptr = ptr;
  98		data->pingpong = 0;
  99		data->hit = 1;
 100		data->bytes_req = bytes_req;
 101		data->bytes_alloc = bytes_alloc;
 102
 103		rb_link_node(&data->node, parent, node);
 104		rb_insert_color(&data->node, &root_alloc_stat);
 105	}
 106	data->call_site = call_site;
 107	data->alloc_cpu = cpu;
 
 
 108	return 0;
 109}
 110
 111static int insert_caller_stat(unsigned long call_site,
 112			      int bytes_req, int bytes_alloc)
 113{
 114	struct rb_node **node = &root_caller_stat.rb_node;
 115	struct rb_node *parent = NULL;
 116	struct alloc_stat *data = NULL;
 117
 118	while (*node) {
 119		parent = *node;
 120		data = rb_entry(*node, struct alloc_stat, node);
 121
 122		if (call_site > data->call_site)
 123			node = &(*node)->rb_right;
 124		else if (call_site < data->call_site)
 125			node = &(*node)->rb_left;
 126		else
 127			break;
 128	}
 129
 130	if (data && data->call_site == call_site) {
 131		data->hit++;
 132		data->bytes_req += bytes_req;
 133		data->bytes_alloc += bytes_alloc;
 134	} else {
 135		data = malloc(sizeof(*data));
 136		if (!data) {
 137			pr_err("%s: malloc failed\n", __func__);
 138			return -1;
 139		}
 140		data->call_site = call_site;
 141		data->pingpong = 0;
 142		data->hit = 1;
 143		data->bytes_req = bytes_req;
 144		data->bytes_alloc = bytes_alloc;
 145
 146		rb_link_node(&data->node, parent, node);
 147		rb_insert_color(&data->node, &root_caller_stat);
 148	}
 149
 150	return 0;
 151}
 152
 153static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
 154					   struct perf_sample *sample)
 155{
 156	unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
 157		      call_site = perf_evsel__intval(evsel, sample, "call_site");
 158	int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
 159	    bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
 160
 161	if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
 162	    insert_caller_stat(call_site, bytes_req, bytes_alloc))
 163		return -1;
 164
 165	total_requested += bytes_req;
 166	total_allocated += bytes_alloc;
 167
 168	nr_allocs++;
 169	return 0;
 170}
 171
 172static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
 173						struct perf_sample *sample)
 174{
 175	int ret = perf_evsel__process_alloc_event(evsel, sample);
 176
 177	if (!ret) {
 178		int node1 = cpu__get_node(sample->cpu),
 179		    node2 = perf_evsel__intval(evsel, sample, "node");
 180
 181		if (node1 != node2)
 182			nr_cross_allocs++;
 183	}
 184
 185	return ret;
 186}
 187
 188static int ptr_cmp(void *, void *);
 189static int slab_callsite_cmp(void *, void *);
 190
 191static struct alloc_stat *search_alloc_stat(unsigned long ptr,
 192					    unsigned long call_site,
 193					    struct rb_root *root,
 194					    sort_fn_t sort_fn)
 195{
 196	struct rb_node *node = root->rb_node;
 197	struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
 198
 199	while (node) {
 200		struct alloc_stat *data;
 201		int cmp;
 202
 203		data = rb_entry(node, struct alloc_stat, node);
 204
 205		cmp = sort_fn(&key, data);
 206		if (cmp < 0)
 207			node = node->rb_left;
 208		else if (cmp > 0)
 209			node = node->rb_right;
 210		else
 211			return data;
 212	}
 213	return NULL;
 214}
 215
 216static int perf_evsel__process_free_event(struct perf_evsel *evsel,
 217					  struct perf_sample *sample)
 218{
 219	unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
 220	struct alloc_stat *s_alloc, *s_caller;
 221
 222	s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
 223	if (!s_alloc)
 224		return 0;
 225
 
 
 226	if ((short)sample->cpu != s_alloc->alloc_cpu) {
 227		s_alloc->pingpong++;
 228
 229		s_caller = search_alloc_stat(0, s_alloc->call_site,
 230					     &root_caller_stat,
 231					     slab_callsite_cmp);
 232		if (!s_caller)
 233			return -1;
 234		s_caller->pingpong++;
 235	}
 236	s_alloc->alloc_cpu = -1;
 237
 238	return 0;
 239}
 240
 241static u64 total_page_alloc_bytes;
 242static u64 total_page_free_bytes;
 243static u64 total_page_nomatch_bytes;
 244static u64 total_page_fail_bytes;
 245static unsigned long nr_page_allocs;
 246static unsigned long nr_page_frees;
 247static unsigned long nr_page_fails;
 248static unsigned long nr_page_nomatch;
 249
 250static bool use_pfn;
 251static bool live_page;
 252static struct perf_session *kmem_session;
 253
 254#define MAX_MIGRATE_TYPES  6
 255#define MAX_PAGE_ORDER     11
 256
 257static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
 258
 259struct page_stat {
 260	struct rb_node 	node;
 261	u64 		page;
 262	u64 		callsite;
 263	int 		order;
 264	unsigned 	gfp_flags;
 265	unsigned 	migrate_type;
 266	u64		alloc_bytes;
 267	u64 		free_bytes;
 268	int 		nr_alloc;
 269	int 		nr_free;
 270};
 271
 272static struct rb_root page_live_tree;
 273static struct rb_root page_alloc_tree;
 274static struct rb_root page_alloc_sorted;
 275static struct rb_root page_caller_tree;
 276static struct rb_root page_caller_sorted;
 277
 278struct alloc_func {
 279	u64 start;
 280	u64 end;
 281	char *name;
 282};
 283
 284static int nr_alloc_funcs;
 285static struct alloc_func *alloc_func_list;
 286
 287static int funcmp(const void *a, const void *b)
 288{
 289	const struct alloc_func *fa = a;
 290	const struct alloc_func *fb = b;
 291
 292	if (fa->start > fb->start)
 293		return 1;
 294	else
 295		return -1;
 296}
 297
 298static int callcmp(const void *a, const void *b)
 299{
 300	const struct alloc_func *fa = a;
 301	const struct alloc_func *fb = b;
 302
 303	if (fb->start <= fa->start && fa->end < fb->end)
 304		return 0;
 305
 306	if (fa->start > fb->start)
 307		return 1;
 308	else
 309		return -1;
 310}
 311
 312static int build_alloc_func_list(void)
 313{
 314	int ret;
 315	struct map *kernel_map;
 316	struct symbol *sym;
 317	struct rb_node *node;
 318	struct alloc_func *func;
 319	struct machine *machine = &kmem_session->machines.host;
 320	regex_t alloc_func_regex;
 321	const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
 322
 323	ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
 324	if (ret) {
 325		char err[BUFSIZ];
 326
 327		regerror(ret, &alloc_func_regex, err, sizeof(err));
 328		pr_err("Invalid regex: %s\n%s", pattern, err);
 329		return -EINVAL;
 330	}
 331
 332	kernel_map = machine__kernel_map(machine);
 333	if (map__load(kernel_map, NULL) < 0) {
 334		pr_err("cannot load kernel map\n");
 335		return -ENOENT;
 336	}
 337
 338	map__for_each_symbol(kernel_map, sym, node) {
 339		if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
 340			continue;
 341
 342		func = realloc(alloc_func_list,
 343			       (nr_alloc_funcs + 1) * sizeof(*func));
 344		if (func == NULL)
 345			return -ENOMEM;
 346
 347		pr_debug("alloc func: %s\n", sym->name);
 348		func[nr_alloc_funcs].start = sym->start;
 349		func[nr_alloc_funcs].end   = sym->end;
 350		func[nr_alloc_funcs].name  = sym->name;
 351
 352		alloc_func_list = func;
 353		nr_alloc_funcs++;
 354	}
 355
 356	qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
 357
 358	regfree(&alloc_func_regex);
 359	return 0;
 360}
 361
 362/*
 363 * Find first non-memory allocation function from callchain.
 364 * The allocation functions are in the 'alloc_func_list'.
 365 */
 366static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample)
 367{
 368	struct addr_location al;
 369	struct machine *machine = &kmem_session->machines.host;
 370	struct callchain_cursor_node *node;
 371
 372	if (alloc_func_list == NULL) {
 373		if (build_alloc_func_list() < 0)
 374			goto out;
 375	}
 376
 377	al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
 378	sample__resolve_callchain(sample, NULL, evsel, &al, 16);
 379
 380	callchain_cursor_commit(&callchain_cursor);
 381	while (true) {
 382		struct alloc_func key, *caller;
 383		u64 addr;
 384
 385		node = callchain_cursor_current(&callchain_cursor);
 386		if (node == NULL)
 387			break;
 388
 389		key.start = key.end = node->ip;
 390		caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
 391				 sizeof(key), callcmp);
 392		if (!caller) {
 393			/* found */
 394			if (node->map)
 395				addr = map__unmap_ip(node->map, node->ip);
 396			else
 397				addr = node->ip;
 398
 399			return addr;
 400		} else
 401			pr_debug3("skipping alloc function: %s\n", caller->name);
 402
 403		callchain_cursor_advance(&callchain_cursor);
 404	}
 405
 406out:
 407	pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
 408	return sample->ip;
 409}
 410
 411struct sort_dimension {
 412	const char		name[20];
 413	sort_fn_t		cmp;
 414	struct list_head	list;
 415};
 416
 417static LIST_HEAD(page_alloc_sort_input);
 418static LIST_HEAD(page_caller_sort_input);
 419
 420static struct page_stat *
 421__page_stat__findnew_page(struct page_stat *pstat, bool create)
 422{
 423	struct rb_node **node = &page_live_tree.rb_node;
 424	struct rb_node *parent = NULL;
 425	struct page_stat *data;
 426
 427	while (*node) {
 428		s64 cmp;
 429
 430		parent = *node;
 431		data = rb_entry(*node, struct page_stat, node);
 432
 433		cmp = data->page - pstat->page;
 434		if (cmp < 0)
 435			node = &parent->rb_left;
 436		else if (cmp > 0)
 437			node = &parent->rb_right;
 438		else
 439			return data;
 440	}
 441
 442	if (!create)
 443		return NULL;
 444
 445	data = zalloc(sizeof(*data));
 446	if (data != NULL) {
 447		data->page = pstat->page;
 448		data->order = pstat->order;
 449		data->gfp_flags = pstat->gfp_flags;
 450		data->migrate_type = pstat->migrate_type;
 451
 452		rb_link_node(&data->node, parent, node);
 453		rb_insert_color(&data->node, &page_live_tree);
 454	}
 455
 456	return data;
 457}
 458
 459static struct page_stat *page_stat__find_page(struct page_stat *pstat)
 460{
 461	return __page_stat__findnew_page(pstat, false);
 462}
 463
 464static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
 465{
 466	return __page_stat__findnew_page(pstat, true);
 467}
 468
 469static struct page_stat *
 470__page_stat__findnew_alloc(struct page_stat *pstat, bool create)
 471{
 472	struct rb_node **node = &page_alloc_tree.rb_node;
 473	struct rb_node *parent = NULL;
 474	struct page_stat *data;
 475	struct sort_dimension *sort;
 476
 477	while (*node) {
 478		int cmp = 0;
 479
 480		parent = *node;
 481		data = rb_entry(*node, struct page_stat, node);
 482
 483		list_for_each_entry(sort, &page_alloc_sort_input, list) {
 484			cmp = sort->cmp(pstat, data);
 485			if (cmp)
 486				break;
 487		}
 488
 489		if (cmp < 0)
 490			node = &parent->rb_left;
 491		else if (cmp > 0)
 492			node = &parent->rb_right;
 493		else
 494			return data;
 495	}
 496
 497	if (!create)
 498		return NULL;
 499
 500	data = zalloc(sizeof(*data));
 501	if (data != NULL) {
 502		data->page = pstat->page;
 503		data->order = pstat->order;
 504		data->gfp_flags = pstat->gfp_flags;
 505		data->migrate_type = pstat->migrate_type;
 506
 507		rb_link_node(&data->node, parent, node);
 508		rb_insert_color(&data->node, &page_alloc_tree);
 509	}
 510
 511	return data;
 512}
 513
 514static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
 515{
 516	return __page_stat__findnew_alloc(pstat, false);
 517}
 518
 519static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
 520{
 521	return __page_stat__findnew_alloc(pstat, true);
 522}
 523
 524static struct page_stat *
 525__page_stat__findnew_caller(struct page_stat *pstat, bool create)
 526{
 527	struct rb_node **node = &page_caller_tree.rb_node;
 528	struct rb_node *parent = NULL;
 529	struct page_stat *data;
 530	struct sort_dimension *sort;
 531
 532	while (*node) {
 533		int cmp = 0;
 534
 535		parent = *node;
 536		data = rb_entry(*node, struct page_stat, node);
 537
 538		list_for_each_entry(sort, &page_caller_sort_input, list) {
 539			cmp = sort->cmp(pstat, data);
 540			if (cmp)
 541				break;
 542		}
 543
 544		if (cmp < 0)
 545			node = &parent->rb_left;
 546		else if (cmp > 0)
 547			node = &parent->rb_right;
 548		else
 549			return data;
 550	}
 551
 552	if (!create)
 553		return NULL;
 554
 555	data = zalloc(sizeof(*data));
 556	if (data != NULL) {
 557		data->callsite = pstat->callsite;
 558		data->order = pstat->order;
 559		data->gfp_flags = pstat->gfp_flags;
 560		data->migrate_type = pstat->migrate_type;
 561
 562		rb_link_node(&data->node, parent, node);
 563		rb_insert_color(&data->node, &page_caller_tree);
 564	}
 565
 566	return data;
 567}
 568
 569static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
 570{
 571	return __page_stat__findnew_caller(pstat, false);
 572}
 573
 574static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
 575{
 576	return __page_stat__findnew_caller(pstat, true);
 577}
 578
 579static bool valid_page(u64 pfn_or_page)
 580{
 581	if (use_pfn && pfn_or_page == -1UL)
 582		return false;
 583	if (!use_pfn && pfn_or_page == 0)
 584		return false;
 585	return true;
 586}
 587
 588struct gfp_flag {
 589	unsigned int flags;
 590	char *compact_str;
 591	char *human_readable;
 592};
 593
 594static struct gfp_flag *gfps;
 595static int nr_gfps;
 596
 597static int gfpcmp(const void *a, const void *b)
 598{
 599	const struct gfp_flag *fa = a;
 600	const struct gfp_flag *fb = b;
 601
 602	return fa->flags - fb->flags;
 603}
 604
 605/* see include/trace/events/mmflags.h */
 606static const struct {
 607	const char *original;
 608	const char *compact;
 609} gfp_compact_table[] = {
 610	{ "GFP_TRANSHUGE",		"THP" },
 
 611	{ "GFP_HIGHUSER_MOVABLE",	"HUM" },
 612	{ "GFP_HIGHUSER",		"HU" },
 613	{ "GFP_USER",			"U" },
 614	{ "GFP_TEMPORARY",		"TMP" },
 615	{ "GFP_KERNEL_ACCOUNT",		"KAC" },
 616	{ "GFP_KERNEL",			"K" },
 617	{ "GFP_NOFS",			"NF" },
 618	{ "GFP_ATOMIC",			"A" },
 619	{ "GFP_NOIO",			"NI" },
 620	{ "GFP_NOWAIT",			"NW" },
 621	{ "GFP_DMA",			"D" },
 622	{ "__GFP_HIGHMEM",		"HM" },
 623	{ "GFP_DMA32",			"D32" },
 624	{ "__GFP_HIGH",			"H" },
 625	{ "__GFP_ATOMIC",		"_A" },
 626	{ "__GFP_IO",			"I" },
 627	{ "__GFP_FS",			"F" },
 628	{ "__GFP_COLD",			"CO" },
 629	{ "__GFP_NOWARN",		"NWR" },
 630	{ "__GFP_REPEAT",		"R" },
 631	{ "__GFP_NOFAIL",		"NF" },
 632	{ "__GFP_NORETRY",		"NR" },
 633	{ "__GFP_COMP",			"C" },
 634	{ "__GFP_ZERO",			"Z" },
 635	{ "__GFP_NOMEMALLOC",		"NMA" },
 636	{ "__GFP_MEMALLOC",		"MA" },
 637	{ "__GFP_HARDWALL",		"HW" },
 638	{ "__GFP_THISNODE",		"TN" },
 639	{ "__GFP_RECLAIMABLE",		"RC" },
 640	{ "__GFP_MOVABLE",		"M" },
 641	{ "__GFP_ACCOUNT",		"AC" },
 642	{ "__GFP_NOTRACK",		"NT" },
 643	{ "__GFP_WRITE",		"WR" },
 644	{ "__GFP_RECLAIM",		"R" },
 645	{ "__GFP_DIRECT_RECLAIM",	"DR" },
 646	{ "__GFP_KSWAPD_RECLAIM",	"KR" },
 647	{ "__GFP_OTHER_NODE",		"ON" },
 648};
 649
 650static size_t max_gfp_len;
 651
 652static char *compact_gfp_flags(char *gfp_flags)
 653{
 654	char *orig_flags = strdup(gfp_flags);
 655	char *new_flags = NULL;
 656	char *str, *pos = NULL;
 657	size_t len = 0;
 658
 659	if (orig_flags == NULL)
 660		return NULL;
 661
 662	str = strtok_r(orig_flags, "|", &pos);
 663	while (str) {
 664		size_t i;
 665		char *new;
 666		const char *cpt;
 667
 668		for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
 669			if (strcmp(gfp_compact_table[i].original, str))
 670				continue;
 671
 672			cpt = gfp_compact_table[i].compact;
 673			new = realloc(new_flags, len + strlen(cpt) + 2);
 674			if (new == NULL) {
 675				free(new_flags);
 676				return NULL;
 677			}
 678
 679			new_flags = new;
 680
 681			if (!len) {
 682				strcpy(new_flags, cpt);
 683			} else {
 684				strcat(new_flags, "|");
 685				strcat(new_flags, cpt);
 686				len++;
 687			}
 688
 689			len += strlen(cpt);
 690		}
 691
 692		str = strtok_r(NULL, "|", &pos);
 693	}
 694
 695	if (max_gfp_len < len)
 696		max_gfp_len = len;
 697
 698	free(orig_flags);
 699	return new_flags;
 700}
 701
 702static char *compact_gfp_string(unsigned long gfp_flags)
 703{
 704	struct gfp_flag key = {
 705		.flags = gfp_flags,
 706	};
 707	struct gfp_flag *gfp;
 708
 709	gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
 710	if (gfp)
 711		return gfp->compact_str;
 712
 713	return NULL;
 714}
 715
 716static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample,
 717			   unsigned int gfp_flags)
 718{
 719	struct pevent_record record = {
 720		.cpu = sample->cpu,
 721		.data = sample->raw_data,
 722		.size = sample->raw_size,
 723	};
 724	struct trace_seq seq;
 725	char *str, *pos = NULL;
 726
 727	if (nr_gfps) {
 728		struct gfp_flag key = {
 729			.flags = gfp_flags,
 730		};
 731
 732		if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
 733			return 0;
 734	}
 735
 736	trace_seq_init(&seq);
 737	pevent_event_info(&seq, evsel->tp_format, &record);
 738
 739	str = strtok_r(seq.buffer, " ", &pos);
 740	while (str) {
 741		if (!strncmp(str, "gfp_flags=", 10)) {
 742			struct gfp_flag *new;
 743
 744			new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
 745			if (new == NULL)
 746				return -ENOMEM;
 747
 748			gfps = new;
 749			new += nr_gfps++;
 750
 751			new->flags = gfp_flags;
 752			new->human_readable = strdup(str + 10);
 753			new->compact_str = compact_gfp_flags(str + 10);
 754			if (!new->human_readable || !new->compact_str)
 755				return -ENOMEM;
 756
 757			qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
 758		}
 759
 760		str = strtok_r(NULL, " ", &pos);
 761	}
 762
 763	trace_seq_destroy(&seq);
 764	return 0;
 765}
 766
 767static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
 768						struct perf_sample *sample)
 769{
 770	u64 page;
 771	unsigned int order = perf_evsel__intval(evsel, sample, "order");
 772	unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
 773	unsigned int migrate_type = perf_evsel__intval(evsel, sample,
 774						       "migratetype");
 775	u64 bytes = kmem_page_size << order;
 776	u64 callsite;
 777	struct page_stat *pstat;
 778	struct page_stat this = {
 779		.order = order,
 780		.gfp_flags = gfp_flags,
 781		.migrate_type = migrate_type,
 782	};
 783
 784	if (use_pfn)
 785		page = perf_evsel__intval(evsel, sample, "pfn");
 786	else
 787		page = perf_evsel__intval(evsel, sample, "page");
 788
 789	nr_page_allocs++;
 790	total_page_alloc_bytes += bytes;
 791
 792	if (!valid_page(page)) {
 793		nr_page_fails++;
 794		total_page_fail_bytes += bytes;
 795
 796		return 0;
 797	}
 798
 799	if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
 800		return -1;
 801
 802	callsite = find_callsite(evsel, sample);
 803
 804	/*
 805	 * This is to find the current page (with correct gfp flags and
 806	 * migrate type) at free event.
 807	 */
 808	this.page = page;
 809	pstat = page_stat__findnew_page(&this);
 810	if (pstat == NULL)
 811		return -ENOMEM;
 812
 813	pstat->nr_alloc++;
 814	pstat->alloc_bytes += bytes;
 815	pstat->callsite = callsite;
 816
 817	if (!live_page) {
 818		pstat = page_stat__findnew_alloc(&this);
 819		if (pstat == NULL)
 820			return -ENOMEM;
 821
 822		pstat->nr_alloc++;
 823		pstat->alloc_bytes += bytes;
 824		pstat->callsite = callsite;
 825	}
 826
 827	this.callsite = callsite;
 828	pstat = page_stat__findnew_caller(&this);
 829	if (pstat == NULL)
 830		return -ENOMEM;
 831
 832	pstat->nr_alloc++;
 833	pstat->alloc_bytes += bytes;
 834
 835	order_stats[order][migrate_type]++;
 836
 837	return 0;
 838}
 839
 840static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
 841						struct perf_sample *sample)
 842{
 843	u64 page;
 844	unsigned int order = perf_evsel__intval(evsel, sample, "order");
 845	u64 bytes = kmem_page_size << order;
 846	struct page_stat *pstat;
 847	struct page_stat this = {
 848		.order = order,
 849	};
 850
 851	if (use_pfn)
 852		page = perf_evsel__intval(evsel, sample, "pfn");
 853	else
 854		page = perf_evsel__intval(evsel, sample, "page");
 855
 856	nr_page_frees++;
 857	total_page_free_bytes += bytes;
 858
 859	this.page = page;
 860	pstat = page_stat__find_page(&this);
 861	if (pstat == NULL) {
 862		pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
 863			  page, order);
 864
 865		nr_page_nomatch++;
 866		total_page_nomatch_bytes += bytes;
 867
 868		return 0;
 869	}
 870
 871	this.gfp_flags = pstat->gfp_flags;
 872	this.migrate_type = pstat->migrate_type;
 873	this.callsite = pstat->callsite;
 874
 875	rb_erase(&pstat->node, &page_live_tree);
 876	free(pstat);
 877
 878	if (live_page) {
 879		order_stats[this.order][this.migrate_type]--;
 880	} else {
 881		pstat = page_stat__find_alloc(&this);
 882		if (pstat == NULL)
 883			return -ENOMEM;
 884
 885		pstat->nr_free++;
 886		pstat->free_bytes += bytes;
 887	}
 888
 889	pstat = page_stat__find_caller(&this);
 890	if (pstat == NULL)
 891		return -ENOENT;
 892
 893	pstat->nr_free++;
 894	pstat->free_bytes += bytes;
 895
 896	if (live_page) {
 897		pstat->nr_alloc--;
 898		pstat->alloc_bytes -= bytes;
 899
 900		if (pstat->nr_alloc == 0) {
 901			rb_erase(&pstat->node, &page_caller_tree);
 902			free(pstat);
 903		}
 904	}
 905
 906	return 0;
 907}
 908
 
 
 
 
 
 
 
 
 
 909typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
 910				  struct perf_sample *sample);
 911
 912static int process_sample_event(struct perf_tool *tool __maybe_unused,
 913				union perf_event *event,
 914				struct perf_sample *sample,
 915				struct perf_evsel *evsel,
 916				struct machine *machine)
 917{
 918	int err = 0;
 919	struct thread *thread = machine__findnew_thread(machine, sample->pid,
 920							sample->tid);
 921
 922	if (thread == NULL) {
 923		pr_debug("problem processing %d event, skipping it.\n",
 924			 event->header.type);
 925		return -1;
 926	}
 927
 
 
 
 928	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
 929
 930	if (evsel->handler != NULL) {
 931		tracepoint_handler f = evsel->handler;
 932		err = f(evsel, sample);
 933	}
 934
 935	thread__put(thread);
 936
 937	return err;
 938}
 939
 940static struct perf_tool perf_kmem = {
 941	.sample		 = process_sample_event,
 942	.comm		 = perf_event__process_comm,
 943	.mmap		 = perf_event__process_mmap,
 944	.mmap2		 = perf_event__process_mmap2,
 945	.ordered_events	 = true,
 946};
 947
 948static double fragmentation(unsigned long n_req, unsigned long n_alloc)
 949{
 950	if (n_alloc == 0)
 951		return 0.0;
 952	else
 953		return 100.0 - (100.0 * n_req / n_alloc);
 954}
 955
 956static void __print_slab_result(struct rb_root *root,
 957				struct perf_session *session,
 958				int n_lines, int is_caller)
 959{
 960	struct rb_node *next;
 961	struct machine *machine = &session->machines.host;
 962
 963	printf("%.105s\n", graph_dotted_line);
 964	printf(" %-34s |",  is_caller ? "Callsite": "Alloc Ptr");
 965	printf(" Total_alloc/Per | Total_req/Per   | Hit      | Ping-pong | Frag\n");
 966	printf("%.105s\n", graph_dotted_line);
 967
 968	next = rb_first(root);
 969
 970	while (next && n_lines--) {
 971		struct alloc_stat *data = rb_entry(next, struct alloc_stat,
 972						   node);
 973		struct symbol *sym = NULL;
 974		struct map *map;
 975		char buf[BUFSIZ];
 976		u64 addr;
 977
 978		if (is_caller) {
 979			addr = data->call_site;
 980			if (!raw_ip)
 981				sym = machine__find_kernel_function(machine, addr, &map, NULL);
 982		} else
 983			addr = data->ptr;
 984
 985		if (sym != NULL)
 986			snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
 987				 addr - map->unmap_ip(map, sym->start));
 988		else
 989			snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
 990		printf(" %-34s |", buf);
 991
 992		printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
 993		       (unsigned long long)data->bytes_alloc,
 994		       (unsigned long)data->bytes_alloc / data->hit,
 995		       (unsigned long long)data->bytes_req,
 996		       (unsigned long)data->bytes_req / data->hit,
 997		       (unsigned long)data->hit,
 998		       (unsigned long)data->pingpong,
 999		       fragmentation(data->bytes_req, data->bytes_alloc));
1000
1001		next = rb_next(next);
1002	}
1003
1004	if (n_lines == -1)
1005		printf(" ...                                | ...             | ...             | ...      | ...       | ...   \n");
1006
1007	printf("%.105s\n", graph_dotted_line);
1008}
1009
1010static const char * const migrate_type_str[] = {
1011	"UNMOVABL",
1012	"RECLAIM",
1013	"MOVABLE",
1014	"RESERVED",
1015	"CMA/ISLT",
1016	"UNKNOWN",
1017};
1018
1019static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1020{
1021	struct rb_node *next = rb_first(&page_alloc_sorted);
1022	struct machine *machine = &session->machines.host;
1023	const char *format;
1024	int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1025
1026	printf("\n%.105s\n", graph_dotted_line);
1027	printf(" %-16s | %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1028	       use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
1029	       gfp_len, "GFP flags");
1030	printf("%.105s\n", graph_dotted_line);
1031
1032	if (use_pfn)
1033		format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1034	else
1035		format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1036
1037	while (next && n_lines--) {
1038		struct page_stat *data;
1039		struct symbol *sym;
1040		struct map *map;
1041		char buf[32];
1042		char *caller = buf;
1043
1044		data = rb_entry(next, struct page_stat, node);
1045		sym = machine__find_kernel_function(machine, data->callsite,
1046						    &map, NULL);
1047		if (sym && sym->name)
1048			caller = sym->name;
1049		else
1050			scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1051
1052		printf(format, (unsigned long long)data->page,
1053		       (unsigned long long)data->alloc_bytes / 1024,
1054		       data->nr_alloc, data->order,
1055		       migrate_type_str[data->migrate_type],
1056		       gfp_len, compact_gfp_string(data->gfp_flags), caller);
1057
1058		next = rb_next(next);
1059	}
1060
1061	if (n_lines == -1) {
1062		printf(" ...              | ...              | ...       | ...   | ...      | %-*s | ...\n",
1063		       gfp_len, "...");
1064	}
1065
1066	printf("%.105s\n", graph_dotted_line);
1067}
1068
1069static void __print_page_caller_result(struct perf_session *session, int n_lines)
1070{
1071	struct rb_node *next = rb_first(&page_caller_sorted);
1072	struct machine *machine = &session->machines.host;
1073	int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1074
1075	printf("\n%.105s\n", graph_dotted_line);
1076	printf(" %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1077	       live_page ? "Live" : "Total", gfp_len, "GFP flags");
1078	printf("%.105s\n", graph_dotted_line);
1079
1080	while (next && n_lines--) {
1081		struct page_stat *data;
1082		struct symbol *sym;
1083		struct map *map;
1084		char buf[32];
1085		char *caller = buf;
1086
1087		data = rb_entry(next, struct page_stat, node);
1088		sym = machine__find_kernel_function(machine, data->callsite,
1089						    &map, NULL);
1090		if (sym && sym->name)
1091			caller = sym->name;
1092		else
1093			scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1094
1095		printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
1096		       (unsigned long long)data->alloc_bytes / 1024,
1097		       data->nr_alloc, data->order,
1098		       migrate_type_str[data->migrate_type],
1099		       gfp_len, compact_gfp_string(data->gfp_flags), caller);
1100
1101		next = rb_next(next);
1102	}
1103
1104	if (n_lines == -1) {
1105		printf(" ...              | ...       | ...   | ...      | %-*s | ...\n",
1106		       gfp_len, "...");
1107	}
1108
1109	printf("%.105s\n", graph_dotted_line);
1110}
1111
1112static void print_gfp_flags(void)
1113{
1114	int i;
1115
1116	printf("#\n");
1117	printf("# GFP flags\n");
1118	printf("# ---------\n");
1119	for (i = 0; i < nr_gfps; i++) {
1120		printf("# %08x: %*s: %s\n", gfps[i].flags,
1121		       (int) max_gfp_len, gfps[i].compact_str,
1122		       gfps[i].human_readable);
1123	}
1124}
1125
1126static void print_slab_summary(void)
1127{
1128	printf("\nSUMMARY (SLAB allocator)");
1129	printf("\n========================\n");
1130	printf("Total bytes requested: %'lu\n", total_requested);
1131	printf("Total bytes allocated: %'lu\n", total_allocated);
 
 
 
 
 
1132	printf("Total bytes wasted on internal fragmentation: %'lu\n",
1133	       total_allocated - total_requested);
1134	printf("Internal fragmentation: %f%%\n",
1135	       fragmentation(total_requested, total_allocated));
1136	printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
1137}
1138
1139static void print_page_summary(void)
1140{
1141	int o, m;
1142	u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
1143	u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
1144
1145	printf("\nSUMMARY (page allocator)");
1146	printf("\n========================\n");
1147	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation requests",
1148	       nr_page_allocs, total_page_alloc_bytes / 1024);
1149	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free requests",
1150	       nr_page_frees, total_page_free_bytes / 1024);
1151	printf("\n");
1152
1153	printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
1154	       nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
1155	printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
1156	       nr_page_allocs - nr_alloc_freed,
1157	       (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
1158	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free-only requests",
1159	       nr_page_nomatch, total_page_nomatch_bytes / 1024);
1160	printf("\n");
1161
1162	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation failures",
1163	       nr_page_fails, total_page_fail_bytes / 1024);
1164	printf("\n");
1165
1166	printf("%5s  %12s  %12s  %12s  %12s  %12s\n", "Order",  "Unmovable",
1167	       "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
1168	printf("%.5s  %.12s  %.12s  %.12s  %.12s  %.12s\n", graph_dotted_line,
1169	       graph_dotted_line, graph_dotted_line, graph_dotted_line,
1170	       graph_dotted_line, graph_dotted_line);
1171
1172	for (o = 0; o < MAX_PAGE_ORDER; o++) {
1173		printf("%5d", o);
1174		for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
1175			if (order_stats[o][m])
1176				printf("  %'12d", order_stats[o][m]);
1177			else
1178				printf("  %12c", '.');
1179		}
1180		printf("\n");
1181	}
1182}
1183
1184static void print_slab_result(struct perf_session *session)
1185{
1186	if (caller_flag)
1187		__print_slab_result(&root_caller_sorted, session, caller_lines, 1);
1188	if (alloc_flag)
1189		__print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
1190	print_slab_summary();
1191}
1192
1193static void print_page_result(struct perf_session *session)
1194{
1195	if (caller_flag || alloc_flag)
1196		print_gfp_flags();
1197	if (caller_flag)
1198		__print_page_caller_result(session, caller_lines);
1199	if (alloc_flag)
1200		__print_page_alloc_result(session, alloc_lines);
1201	print_page_summary();
1202}
1203
1204static void print_result(struct perf_session *session)
1205{
1206	if (kmem_slab)
1207		print_slab_result(session);
1208	if (kmem_page)
1209		print_page_result(session);
1210}
1211
1212static LIST_HEAD(slab_caller_sort);
1213static LIST_HEAD(slab_alloc_sort);
1214static LIST_HEAD(page_caller_sort);
1215static LIST_HEAD(page_alloc_sort);
1216
1217static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
1218			     struct list_head *sort_list)
1219{
1220	struct rb_node **new = &(root->rb_node);
1221	struct rb_node *parent = NULL;
1222	struct sort_dimension *sort;
1223
1224	while (*new) {
1225		struct alloc_stat *this;
1226		int cmp = 0;
1227
1228		this = rb_entry(*new, struct alloc_stat, node);
1229		parent = *new;
1230
1231		list_for_each_entry(sort, sort_list, list) {
1232			cmp = sort->cmp(data, this);
1233			if (cmp)
1234				break;
1235		}
1236
1237		if (cmp > 0)
1238			new = &((*new)->rb_left);
1239		else
1240			new = &((*new)->rb_right);
1241	}
1242
1243	rb_link_node(&data->node, parent, new);
1244	rb_insert_color(&data->node, root);
1245}
1246
1247static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1248			       struct list_head *sort_list)
1249{
1250	struct rb_node *node;
1251	struct alloc_stat *data;
1252
1253	for (;;) {
1254		node = rb_first(root);
1255		if (!node)
1256			break;
1257
1258		rb_erase(node, root);
1259		data = rb_entry(node, struct alloc_stat, node);
1260		sort_slab_insert(root_sorted, data, sort_list);
1261	}
1262}
1263
1264static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1265			     struct list_head *sort_list)
1266{
1267	struct rb_node **new = &root->rb_node;
1268	struct rb_node *parent = NULL;
1269	struct sort_dimension *sort;
1270
1271	while (*new) {
1272		struct page_stat *this;
1273		int cmp = 0;
1274
1275		this = rb_entry(*new, struct page_stat, node);
1276		parent = *new;
1277
1278		list_for_each_entry(sort, sort_list, list) {
1279			cmp = sort->cmp(data, this);
1280			if (cmp)
1281				break;
1282		}
1283
1284		if (cmp > 0)
1285			new = &parent->rb_left;
1286		else
1287			new = &parent->rb_right;
1288	}
1289
1290	rb_link_node(&data->node, parent, new);
1291	rb_insert_color(&data->node, root);
1292}
1293
1294static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1295			       struct list_head *sort_list)
1296{
1297	struct rb_node *node;
1298	struct page_stat *data;
1299
1300	for (;;) {
1301		node = rb_first(root);
1302		if (!node)
1303			break;
1304
1305		rb_erase(node, root);
1306		data = rb_entry(node, struct page_stat, node);
1307		sort_page_insert(root_sorted, data, sort_list);
1308	}
1309}
1310
1311static void sort_result(void)
1312{
1313	if (kmem_slab) {
1314		__sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
1315				   &slab_alloc_sort);
1316		__sort_slab_result(&root_caller_stat, &root_caller_sorted,
1317				   &slab_caller_sort);
1318	}
1319	if (kmem_page) {
1320		if (live_page)
1321			__sort_page_result(&page_live_tree, &page_alloc_sorted,
1322					   &page_alloc_sort);
1323		else
1324			__sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1325					   &page_alloc_sort);
1326
1327		__sort_page_result(&page_caller_tree, &page_caller_sorted,
1328				   &page_caller_sort);
1329	}
1330}
1331
1332static int __cmd_kmem(struct perf_session *session)
1333{
1334	int err = -EINVAL;
1335	struct perf_evsel *evsel;
1336	const struct perf_evsel_str_handler kmem_tracepoints[] = {
1337		/* slab allocator */
1338		{ "kmem:kmalloc",		perf_evsel__process_alloc_event, },
1339    		{ "kmem:kmem_cache_alloc",	perf_evsel__process_alloc_event, },
1340		{ "kmem:kmalloc_node",		perf_evsel__process_alloc_node_event, },
1341    		{ "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
1342		{ "kmem:kfree",			perf_evsel__process_free_event, },
1343    		{ "kmem:kmem_cache_free",	perf_evsel__process_free_event, },
1344		/* page allocator */
1345		{ "kmem:mm_page_alloc",		perf_evsel__process_page_alloc_event, },
1346		{ "kmem:mm_page_free",		perf_evsel__process_page_free_event, },
1347	};
1348
1349	if (!perf_session__has_traces(session, "kmem record"))
1350		goto out;
1351
1352	if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1353		pr_err("Initializing perf session tracepoint handlers failed\n");
1354		goto out;
1355	}
1356
1357	evlist__for_each(session->evlist, evsel) {
1358		if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
1359		    perf_evsel__field(evsel, "pfn")) {
1360			use_pfn = true;
1361			break;
1362		}
1363	}
1364
1365	setup_pager();
1366	err = perf_session__process_events(session);
1367	if (err != 0) {
1368		pr_err("error during process events: %d\n", err);
1369		goto out;
1370	}
1371	sort_result();
1372	print_result(session);
1373out:
1374	return err;
1375}
1376
1377/* slab sort keys */
1378static int ptr_cmp(void *a, void *b)
1379{
1380	struct alloc_stat *l = a;
1381	struct alloc_stat *r = b;
1382
1383	if (l->ptr < r->ptr)
1384		return -1;
1385	else if (l->ptr > r->ptr)
1386		return 1;
1387	return 0;
1388}
1389
1390static struct sort_dimension ptr_sort_dimension = {
1391	.name	= "ptr",
1392	.cmp	= ptr_cmp,
1393};
1394
1395static int slab_callsite_cmp(void *a, void *b)
1396{
1397	struct alloc_stat *l = a;
1398	struct alloc_stat *r = b;
1399
1400	if (l->call_site < r->call_site)
1401		return -1;
1402	else if (l->call_site > r->call_site)
1403		return 1;
1404	return 0;
1405}
1406
1407static struct sort_dimension callsite_sort_dimension = {
1408	.name	= "callsite",
1409	.cmp	= slab_callsite_cmp,
1410};
1411
1412static int hit_cmp(void *a, void *b)
1413{
1414	struct alloc_stat *l = a;
1415	struct alloc_stat *r = b;
1416
1417	if (l->hit < r->hit)
1418		return -1;
1419	else if (l->hit > r->hit)
1420		return 1;
1421	return 0;
1422}
1423
1424static struct sort_dimension hit_sort_dimension = {
1425	.name	= "hit",
1426	.cmp	= hit_cmp,
1427};
1428
1429static int bytes_cmp(void *a, void *b)
1430{
1431	struct alloc_stat *l = a;
1432	struct alloc_stat *r = b;
1433
1434	if (l->bytes_alloc < r->bytes_alloc)
1435		return -1;
1436	else if (l->bytes_alloc > r->bytes_alloc)
1437		return 1;
1438	return 0;
1439}
1440
1441static struct sort_dimension bytes_sort_dimension = {
1442	.name	= "bytes",
1443	.cmp	= bytes_cmp,
1444};
1445
1446static int frag_cmp(void *a, void *b)
1447{
1448	double x, y;
1449	struct alloc_stat *l = a;
1450	struct alloc_stat *r = b;
1451
1452	x = fragmentation(l->bytes_req, l->bytes_alloc);
1453	y = fragmentation(r->bytes_req, r->bytes_alloc);
1454
1455	if (x < y)
1456		return -1;
1457	else if (x > y)
1458		return 1;
1459	return 0;
1460}
1461
1462static struct sort_dimension frag_sort_dimension = {
1463	.name	= "frag",
1464	.cmp	= frag_cmp,
1465};
1466
1467static int pingpong_cmp(void *a, void *b)
1468{
1469	struct alloc_stat *l = a;
1470	struct alloc_stat *r = b;
1471
1472	if (l->pingpong < r->pingpong)
1473		return -1;
1474	else if (l->pingpong > r->pingpong)
1475		return 1;
1476	return 0;
1477}
1478
1479static struct sort_dimension pingpong_sort_dimension = {
1480	.name	= "pingpong",
1481	.cmp	= pingpong_cmp,
1482};
1483
1484/* page sort keys */
1485static int page_cmp(void *a, void *b)
1486{
1487	struct page_stat *l = a;
1488	struct page_stat *r = b;
1489
1490	if (l->page < r->page)
1491		return -1;
1492	else if (l->page > r->page)
1493		return 1;
1494	return 0;
1495}
1496
1497static struct sort_dimension page_sort_dimension = {
1498	.name	= "page",
1499	.cmp	= page_cmp,
1500};
1501
1502static int page_callsite_cmp(void *a, void *b)
1503{
1504	struct page_stat *l = a;
1505	struct page_stat *r = b;
1506
1507	if (l->callsite < r->callsite)
1508		return -1;
1509	else if (l->callsite > r->callsite)
1510		return 1;
1511	return 0;
1512}
1513
1514static struct sort_dimension page_callsite_sort_dimension = {
1515	.name	= "callsite",
1516	.cmp	= page_callsite_cmp,
1517};
1518
1519static int page_hit_cmp(void *a, void *b)
1520{
1521	struct page_stat *l = a;
1522	struct page_stat *r = b;
1523
1524	if (l->nr_alloc < r->nr_alloc)
1525		return -1;
1526	else if (l->nr_alloc > r->nr_alloc)
1527		return 1;
1528	return 0;
1529}
1530
1531static struct sort_dimension page_hit_sort_dimension = {
1532	.name	= "hit",
1533	.cmp	= page_hit_cmp,
1534};
1535
1536static int page_bytes_cmp(void *a, void *b)
1537{
1538	struct page_stat *l = a;
1539	struct page_stat *r = b;
1540
1541	if (l->alloc_bytes < r->alloc_bytes)
1542		return -1;
1543	else if (l->alloc_bytes > r->alloc_bytes)
1544		return 1;
1545	return 0;
1546}
1547
1548static struct sort_dimension page_bytes_sort_dimension = {
1549	.name	= "bytes",
1550	.cmp	= page_bytes_cmp,
1551};
1552
1553static int page_order_cmp(void *a, void *b)
1554{
1555	struct page_stat *l = a;
1556	struct page_stat *r = b;
1557
1558	if (l->order < r->order)
1559		return -1;
1560	else if (l->order > r->order)
1561		return 1;
1562	return 0;
1563}
1564
1565static struct sort_dimension page_order_sort_dimension = {
1566	.name	= "order",
1567	.cmp	= page_order_cmp,
1568};
1569
1570static int migrate_type_cmp(void *a, void *b)
1571{
1572	struct page_stat *l = a;
1573	struct page_stat *r = b;
1574
1575	/* for internal use to find free'd page */
1576	if (l->migrate_type == -1U)
1577		return 0;
1578
1579	if (l->migrate_type < r->migrate_type)
1580		return -1;
1581	else if (l->migrate_type > r->migrate_type)
1582		return 1;
1583	return 0;
1584}
1585
1586static struct sort_dimension migrate_type_sort_dimension = {
1587	.name	= "migtype",
1588	.cmp	= migrate_type_cmp,
1589};
1590
1591static int gfp_flags_cmp(void *a, void *b)
1592{
1593	struct page_stat *l = a;
1594	struct page_stat *r = b;
1595
1596	/* for internal use to find free'd page */
1597	if (l->gfp_flags == -1U)
1598		return 0;
1599
1600	if (l->gfp_flags < r->gfp_flags)
1601		return -1;
1602	else if (l->gfp_flags > r->gfp_flags)
1603		return 1;
1604	return 0;
1605}
1606
1607static struct sort_dimension gfp_flags_sort_dimension = {
1608	.name	= "gfp",
1609	.cmp	= gfp_flags_cmp,
1610};
1611
1612static struct sort_dimension *slab_sorts[] = {
1613	&ptr_sort_dimension,
1614	&callsite_sort_dimension,
1615	&hit_sort_dimension,
1616	&bytes_sort_dimension,
1617	&frag_sort_dimension,
1618	&pingpong_sort_dimension,
1619};
1620
1621static struct sort_dimension *page_sorts[] = {
1622	&page_sort_dimension,
1623	&page_callsite_sort_dimension,
1624	&page_hit_sort_dimension,
1625	&page_bytes_sort_dimension,
1626	&page_order_sort_dimension,
1627	&migrate_type_sort_dimension,
1628	&gfp_flags_sort_dimension,
1629};
1630
1631static int slab_sort_dimension__add(const char *tok, struct list_head *list)
1632{
1633	struct sort_dimension *sort;
1634	int i;
1635
1636	for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1637		if (!strcmp(slab_sorts[i]->name, tok)) {
1638			sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
1639			if (!sort) {
1640				pr_err("%s: memdup failed\n", __func__);
1641				return -1;
1642			}
1643			list_add_tail(&sort->list, list);
1644			return 0;
1645		}
1646	}
1647
1648	return -1;
1649}
1650
1651static int page_sort_dimension__add(const char *tok, struct list_head *list)
1652{
1653	struct sort_dimension *sort;
1654	int i;
1655
1656	for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1657		if (!strcmp(page_sorts[i]->name, tok)) {
1658			sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1659			if (!sort) {
1660				pr_err("%s: memdup failed\n", __func__);
1661				return -1;
1662			}
1663			list_add_tail(&sort->list, list);
1664			return 0;
1665		}
1666	}
1667
1668	return -1;
1669}
1670
1671static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
1672{
1673	char *tok;
1674	char *str = strdup(arg);
1675	char *pos = str;
1676
1677	if (!str) {
1678		pr_err("%s: strdup failed\n", __func__);
1679		return -1;
1680	}
1681
1682	while (true) {
1683		tok = strsep(&pos, ",");
1684		if (!tok)
1685			break;
1686		if (slab_sort_dimension__add(tok, sort_list) < 0) {
1687			error("Unknown slab --sort key: '%s'", tok);
1688			free(str);
1689			return -1;
1690		}
1691	}
1692
1693	free(str);
1694	return 0;
1695}
1696
1697static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1698{
1699	char *tok;
1700	char *str = strdup(arg);
1701	char *pos = str;
1702
1703	if (!str) {
1704		pr_err("%s: strdup failed\n", __func__);
1705		return -1;
1706	}
1707
1708	while (true) {
1709		tok = strsep(&pos, ",");
1710		if (!tok)
1711			break;
1712		if (page_sort_dimension__add(tok, sort_list) < 0) {
1713			error("Unknown page --sort key: '%s'", tok);
1714			free(str);
1715			return -1;
1716		}
1717	}
1718
1719	free(str);
1720	return 0;
1721}
1722
1723static int parse_sort_opt(const struct option *opt __maybe_unused,
1724			  const char *arg, int unset __maybe_unused)
1725{
1726	if (!arg)
1727		return -1;
1728
1729	if (kmem_page > kmem_slab ||
1730	    (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
1731		if (caller_flag > alloc_flag)
1732			return setup_page_sorting(&page_caller_sort, arg);
1733		else
1734			return setup_page_sorting(&page_alloc_sort, arg);
1735	} else {
1736		if (caller_flag > alloc_flag)
1737			return setup_slab_sorting(&slab_caller_sort, arg);
1738		else
1739			return setup_slab_sorting(&slab_alloc_sort, arg);
1740	}
1741
1742	return 0;
1743}
1744
1745static int parse_caller_opt(const struct option *opt __maybe_unused,
1746			    const char *arg __maybe_unused,
1747			    int unset __maybe_unused)
1748{
1749	caller_flag = (alloc_flag + 1);
1750	return 0;
1751}
1752
1753static int parse_alloc_opt(const struct option *opt __maybe_unused,
1754			   const char *arg __maybe_unused,
1755			   int unset __maybe_unused)
1756{
1757	alloc_flag = (caller_flag + 1);
1758	return 0;
1759}
1760
1761static int parse_slab_opt(const struct option *opt __maybe_unused,
1762			  const char *arg __maybe_unused,
1763			  int unset __maybe_unused)
1764{
1765	kmem_slab = (kmem_page + 1);
1766	return 0;
1767}
1768
1769static int parse_page_opt(const struct option *opt __maybe_unused,
1770			  const char *arg __maybe_unused,
1771			  int unset __maybe_unused)
1772{
1773	kmem_page = (kmem_slab + 1);
1774	return 0;
1775}
1776
1777static int parse_line_opt(const struct option *opt __maybe_unused,
1778			  const char *arg, int unset __maybe_unused)
1779{
1780	int lines;
1781
1782	if (!arg)
1783		return -1;
1784
1785	lines = strtoul(arg, NULL, 10);
1786
1787	if (caller_flag > alloc_flag)
1788		caller_lines = lines;
1789	else
1790		alloc_lines = lines;
1791
1792	return 0;
1793}
1794
1795static int __cmd_record(int argc, const char **argv)
1796{
1797	const char * const record_args[] = {
1798	"record", "-a", "-R", "-c", "1",
1799	};
1800	const char * const slab_events[] = {
1801	"-e", "kmem:kmalloc",
1802	"-e", "kmem:kmalloc_node",
1803	"-e", "kmem:kfree",
1804	"-e", "kmem:kmem_cache_alloc",
1805	"-e", "kmem:kmem_cache_alloc_node",
1806	"-e", "kmem:kmem_cache_free",
1807	};
1808	const char * const page_events[] = {
1809	"-e", "kmem:mm_page_alloc",
1810	"-e", "kmem:mm_page_free",
1811	};
1812	unsigned int rec_argc, i, j;
1813	const char **rec_argv;
1814
1815	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1816	if (kmem_slab)
1817		rec_argc += ARRAY_SIZE(slab_events);
1818	if (kmem_page)
1819		rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
1820
1821	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1822
1823	if (rec_argv == NULL)
1824		return -ENOMEM;
1825
1826	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1827		rec_argv[i] = strdup(record_args[i]);
1828
1829	if (kmem_slab) {
1830		for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1831			rec_argv[i] = strdup(slab_events[j]);
1832	}
1833	if (kmem_page) {
1834		rec_argv[i++] = strdup("-g");
1835
1836		for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1837			rec_argv[i] = strdup(page_events[j]);
1838	}
1839
1840	for (j = 1; j < (unsigned int)argc; j++, i++)
1841		rec_argv[i] = argv[j];
1842
1843	return cmd_record(i, rec_argv, NULL);
1844}
1845
1846static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
1847{
1848	if (!strcmp(var, "kmem.default")) {
1849		if (!strcmp(value, "slab"))
1850			kmem_default = KMEM_SLAB;
1851		else if (!strcmp(value, "page"))
1852			kmem_default = KMEM_PAGE;
1853		else
1854			pr_err("invalid default value ('slab' or 'page' required): %s\n",
1855			       value);
1856		return 0;
1857	}
1858
1859	return 0;
1860}
1861
1862int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
1863{
1864	const char * const default_slab_sort = "frag,hit,bytes";
1865	const char * const default_page_sort = "bytes,hit";
1866	struct perf_data_file file = {
1867		.mode = PERF_DATA_MODE_READ,
1868	};
1869	const struct option kmem_options[] = {
1870	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1871	OPT_INCR('v', "verbose", &verbose,
1872		    "be more verbose (show symbol address, etc)"),
1873	OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1874			   "show per-callsite statistics", parse_caller_opt),
1875	OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1876			   "show per-allocation statistics", parse_alloc_opt),
1877	OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
1878		     "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1879		     "page, order, migtype, gfp", parse_sort_opt),
1880	OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1881	OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1882	OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
1883	OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1884			   parse_slab_opt),
1885	OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1886			   parse_page_opt),
1887	OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
 
 
1888	OPT_END()
1889	};
1890	const char *const kmem_subcommands[] = { "record", "stat", NULL };
1891	const char *kmem_usage[] = {
1892		NULL,
1893		NULL
1894	};
1895	struct perf_session *session;
1896	int ret = -1;
1897	const char errmsg[] = "No %s allocation events found.  Have you run 'perf kmem record --%s'?\n";
1898
1899	perf_config(kmem_config, NULL);
1900	argc = parse_options_subcommand(argc, argv, kmem_options,
1901					kmem_subcommands, kmem_usage, 0);
1902
1903	if (!argc)
1904		usage_with_options(kmem_usage, kmem_options);
1905
1906	if (kmem_slab == 0 && kmem_page == 0) {
1907		if (kmem_default == KMEM_SLAB)
1908			kmem_slab = 1;
1909		else
1910			kmem_page = 1;
1911	}
1912
1913	if (!strncmp(argv[0], "rec", 3)) {
1914		symbol__init(NULL);
1915		return __cmd_record(argc, argv);
1916	}
1917
1918	file.path = input_name;
1919
1920	kmem_session = session = perf_session__new(&file, false, &perf_kmem);
1921	if (session == NULL)
1922		return -1;
1923
1924	if (kmem_slab) {
1925		if (!perf_evlist__find_tracepoint_by_name(session->evlist,
1926							  "kmem:kmalloc")) {
1927			pr_err(errmsg, "slab", "slab");
1928			goto out_delete;
1929		}
1930	}
1931
1932	if (kmem_page) {
1933		struct perf_evsel *evsel;
1934
1935		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
1936							     "kmem:mm_page_alloc");
1937		if (evsel == NULL) {
1938			pr_err(errmsg, "page", "page");
1939			goto out_delete;
1940		}
1941
1942		kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
1943		symbol_conf.use_callchain = true;
1944	}
1945
1946	symbol__init(&session->header.env);
 
 
 
 
 
1947
1948	if (!strcmp(argv[0], "stat")) {
1949		setlocale(LC_ALL, "");
1950
1951		if (cpu__setup_cpunode_map())
1952			goto out_delete;
1953
1954		if (list_empty(&slab_caller_sort))
1955			setup_slab_sorting(&slab_caller_sort, default_slab_sort);
1956		if (list_empty(&slab_alloc_sort))
1957			setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
1958		if (list_empty(&page_caller_sort))
1959			setup_page_sorting(&page_caller_sort, default_page_sort);
1960		if (list_empty(&page_alloc_sort))
1961			setup_page_sorting(&page_alloc_sort, default_page_sort);
1962
1963		if (kmem_page) {
1964			setup_page_sorting(&page_alloc_sort_input,
1965					   "page,order,migtype,gfp");
1966			setup_page_sorting(&page_caller_sort_input,
1967					   "callsite,order,migtype,gfp");
1968		}
1969		ret = __cmd_kmem(session);
1970	} else
1971		usage_with_options(kmem_usage, kmem_options);
1972
1973out_delete:
1974	perf_session__delete(session);
1975
1976	return ret;
1977}
1978
v4.10.11
   1#include "builtin.h"
   2#include "perf.h"
   3
   4#include "util/evlist.h"
   5#include "util/evsel.h"
   6#include "util/util.h"
   7#include "util/config.h"
   8#include "util/symbol.h"
   9#include "util/thread.h"
  10#include "util/header.h"
  11#include "util/session.h"
  12#include "util/tool.h"
  13#include "util/callchain.h"
  14#include "util/time-utils.h"
  15
  16#include <subcmd/parse-options.h>
  17#include "util/trace-event.h"
  18#include "util/data.h"
  19#include "util/cpumap.h"
  20
  21#include "util/debug.h"
  22
  23#include <linux/rbtree.h>
  24#include <linux/string.h>
  25#include <locale.h>
  26#include <regex.h>
  27
  28static int	kmem_slab;
  29static int	kmem_page;
  30
  31static long	kmem_page_size;
  32static enum {
  33	KMEM_SLAB,
  34	KMEM_PAGE,
  35} kmem_default = KMEM_SLAB;  /* for backward compatibility */
  36
  37struct alloc_stat;
  38typedef int (*sort_fn_t)(void *, void *);
  39
  40static int			alloc_flag;
  41static int			caller_flag;
  42
  43static int			alloc_lines = -1;
  44static int			caller_lines = -1;
  45
  46static bool			raw_ip;
  47
  48struct alloc_stat {
  49	u64	call_site;
  50	u64	ptr;
  51	u64	bytes_req;
  52	u64	bytes_alloc;
  53	u64	last_alloc;
  54	u32	hit;
  55	u32	pingpong;
  56
  57	short	alloc_cpu;
  58
  59	struct rb_node node;
  60};
  61
  62static struct rb_root root_alloc_stat;
  63static struct rb_root root_alloc_sorted;
  64static struct rb_root root_caller_stat;
  65static struct rb_root root_caller_sorted;
  66
  67static unsigned long total_requested, total_allocated, total_freed;
  68static unsigned long nr_allocs, nr_cross_allocs;
  69
  70/* filters for controlling start and stop of time of analysis */
  71static struct perf_time_interval ptime;
  72const char *time_str;
  73
  74static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
  75			     int bytes_req, int bytes_alloc, int cpu)
  76{
  77	struct rb_node **node = &root_alloc_stat.rb_node;
  78	struct rb_node *parent = NULL;
  79	struct alloc_stat *data = NULL;
  80
  81	while (*node) {
  82		parent = *node;
  83		data = rb_entry(*node, struct alloc_stat, node);
  84
  85		if (ptr > data->ptr)
  86			node = &(*node)->rb_right;
  87		else if (ptr < data->ptr)
  88			node = &(*node)->rb_left;
  89		else
  90			break;
  91	}
  92
  93	if (data && data->ptr == ptr) {
  94		data->hit++;
  95		data->bytes_req += bytes_req;
  96		data->bytes_alloc += bytes_alloc;
  97	} else {
  98		data = malloc(sizeof(*data));
  99		if (!data) {
 100			pr_err("%s: malloc failed\n", __func__);
 101			return -1;
 102		}
 103		data->ptr = ptr;
 104		data->pingpong = 0;
 105		data->hit = 1;
 106		data->bytes_req = bytes_req;
 107		data->bytes_alloc = bytes_alloc;
 108
 109		rb_link_node(&data->node, parent, node);
 110		rb_insert_color(&data->node, &root_alloc_stat);
 111	}
 112	data->call_site = call_site;
 113	data->alloc_cpu = cpu;
 114	data->last_alloc = bytes_alloc;
 115
 116	return 0;
 117}
 118
 119static int insert_caller_stat(unsigned long call_site,
 120			      int bytes_req, int bytes_alloc)
 121{
 122	struct rb_node **node = &root_caller_stat.rb_node;
 123	struct rb_node *parent = NULL;
 124	struct alloc_stat *data = NULL;
 125
 126	while (*node) {
 127		parent = *node;
 128		data = rb_entry(*node, struct alloc_stat, node);
 129
 130		if (call_site > data->call_site)
 131			node = &(*node)->rb_right;
 132		else if (call_site < data->call_site)
 133			node = &(*node)->rb_left;
 134		else
 135			break;
 136	}
 137
 138	if (data && data->call_site == call_site) {
 139		data->hit++;
 140		data->bytes_req += bytes_req;
 141		data->bytes_alloc += bytes_alloc;
 142	} else {
 143		data = malloc(sizeof(*data));
 144		if (!data) {
 145			pr_err("%s: malloc failed\n", __func__);
 146			return -1;
 147		}
 148		data->call_site = call_site;
 149		data->pingpong = 0;
 150		data->hit = 1;
 151		data->bytes_req = bytes_req;
 152		data->bytes_alloc = bytes_alloc;
 153
 154		rb_link_node(&data->node, parent, node);
 155		rb_insert_color(&data->node, &root_caller_stat);
 156	}
 157
 158	return 0;
 159}
 160
 161static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
 162					   struct perf_sample *sample)
 163{
 164	unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
 165		      call_site = perf_evsel__intval(evsel, sample, "call_site");
 166	int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
 167	    bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
 168
 169	if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
 170	    insert_caller_stat(call_site, bytes_req, bytes_alloc))
 171		return -1;
 172
 173	total_requested += bytes_req;
 174	total_allocated += bytes_alloc;
 175
 176	nr_allocs++;
 177	return 0;
 178}
 179
 180static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
 181						struct perf_sample *sample)
 182{
 183	int ret = perf_evsel__process_alloc_event(evsel, sample);
 184
 185	if (!ret) {
 186		int node1 = cpu__get_node(sample->cpu),
 187		    node2 = perf_evsel__intval(evsel, sample, "node");
 188
 189		if (node1 != node2)
 190			nr_cross_allocs++;
 191	}
 192
 193	return ret;
 194}
 195
 196static int ptr_cmp(void *, void *);
 197static int slab_callsite_cmp(void *, void *);
 198
 199static struct alloc_stat *search_alloc_stat(unsigned long ptr,
 200					    unsigned long call_site,
 201					    struct rb_root *root,
 202					    sort_fn_t sort_fn)
 203{
 204	struct rb_node *node = root->rb_node;
 205	struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
 206
 207	while (node) {
 208		struct alloc_stat *data;
 209		int cmp;
 210
 211		data = rb_entry(node, struct alloc_stat, node);
 212
 213		cmp = sort_fn(&key, data);
 214		if (cmp < 0)
 215			node = node->rb_left;
 216		else if (cmp > 0)
 217			node = node->rb_right;
 218		else
 219			return data;
 220	}
 221	return NULL;
 222}
 223
 224static int perf_evsel__process_free_event(struct perf_evsel *evsel,
 225					  struct perf_sample *sample)
 226{
 227	unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
 228	struct alloc_stat *s_alloc, *s_caller;
 229
 230	s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
 231	if (!s_alloc)
 232		return 0;
 233
 234	total_freed += s_alloc->last_alloc;
 235
 236	if ((short)sample->cpu != s_alloc->alloc_cpu) {
 237		s_alloc->pingpong++;
 238
 239		s_caller = search_alloc_stat(0, s_alloc->call_site,
 240					     &root_caller_stat,
 241					     slab_callsite_cmp);
 242		if (!s_caller)
 243			return -1;
 244		s_caller->pingpong++;
 245	}
 246	s_alloc->alloc_cpu = -1;
 247
 248	return 0;
 249}
 250
 251static u64 total_page_alloc_bytes;
 252static u64 total_page_free_bytes;
 253static u64 total_page_nomatch_bytes;
 254static u64 total_page_fail_bytes;
 255static unsigned long nr_page_allocs;
 256static unsigned long nr_page_frees;
 257static unsigned long nr_page_fails;
 258static unsigned long nr_page_nomatch;
 259
 260static bool use_pfn;
 261static bool live_page;
 262static struct perf_session *kmem_session;
 263
 264#define MAX_MIGRATE_TYPES  6
 265#define MAX_PAGE_ORDER     11
 266
 267static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
 268
 269struct page_stat {
 270	struct rb_node 	node;
 271	u64 		page;
 272	u64 		callsite;
 273	int 		order;
 274	unsigned 	gfp_flags;
 275	unsigned 	migrate_type;
 276	u64		alloc_bytes;
 277	u64 		free_bytes;
 278	int 		nr_alloc;
 279	int 		nr_free;
 280};
 281
 282static struct rb_root page_live_tree;
 283static struct rb_root page_alloc_tree;
 284static struct rb_root page_alloc_sorted;
 285static struct rb_root page_caller_tree;
 286static struct rb_root page_caller_sorted;
 287
 288struct alloc_func {
 289	u64 start;
 290	u64 end;
 291	char *name;
 292};
 293
 294static int nr_alloc_funcs;
 295static struct alloc_func *alloc_func_list;
 296
 297static int funcmp(const void *a, const void *b)
 298{
 299	const struct alloc_func *fa = a;
 300	const struct alloc_func *fb = b;
 301
 302	if (fa->start > fb->start)
 303		return 1;
 304	else
 305		return -1;
 306}
 307
 308static int callcmp(const void *a, const void *b)
 309{
 310	const struct alloc_func *fa = a;
 311	const struct alloc_func *fb = b;
 312
 313	if (fb->start <= fa->start && fa->end < fb->end)
 314		return 0;
 315
 316	if (fa->start > fb->start)
 317		return 1;
 318	else
 319		return -1;
 320}
 321
 322static int build_alloc_func_list(void)
 323{
 324	int ret;
 325	struct map *kernel_map;
 326	struct symbol *sym;
 327	struct rb_node *node;
 328	struct alloc_func *func;
 329	struct machine *machine = &kmem_session->machines.host;
 330	regex_t alloc_func_regex;
 331	const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
 332
 333	ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
 334	if (ret) {
 335		char err[BUFSIZ];
 336
 337		regerror(ret, &alloc_func_regex, err, sizeof(err));
 338		pr_err("Invalid regex: %s\n%s", pattern, err);
 339		return -EINVAL;
 340	}
 341
 342	kernel_map = machine__kernel_map(machine);
 343	if (map__load(kernel_map) < 0) {
 344		pr_err("cannot load kernel map\n");
 345		return -ENOENT;
 346	}
 347
 348	map__for_each_symbol(kernel_map, sym, node) {
 349		if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
 350			continue;
 351
 352		func = realloc(alloc_func_list,
 353			       (nr_alloc_funcs + 1) * sizeof(*func));
 354		if (func == NULL)
 355			return -ENOMEM;
 356
 357		pr_debug("alloc func: %s\n", sym->name);
 358		func[nr_alloc_funcs].start = sym->start;
 359		func[nr_alloc_funcs].end   = sym->end;
 360		func[nr_alloc_funcs].name  = sym->name;
 361
 362		alloc_func_list = func;
 363		nr_alloc_funcs++;
 364	}
 365
 366	qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
 367
 368	regfree(&alloc_func_regex);
 369	return 0;
 370}
 371
 372/*
 373 * Find first non-memory allocation function from callchain.
 374 * The allocation functions are in the 'alloc_func_list'.
 375 */
 376static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample)
 377{
 378	struct addr_location al;
 379	struct machine *machine = &kmem_session->machines.host;
 380	struct callchain_cursor_node *node;
 381
 382	if (alloc_func_list == NULL) {
 383		if (build_alloc_func_list() < 0)
 384			goto out;
 385	}
 386
 387	al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
 388	sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
 389
 390	callchain_cursor_commit(&callchain_cursor);
 391	while (true) {
 392		struct alloc_func key, *caller;
 393		u64 addr;
 394
 395		node = callchain_cursor_current(&callchain_cursor);
 396		if (node == NULL)
 397			break;
 398
 399		key.start = key.end = node->ip;
 400		caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
 401				 sizeof(key), callcmp);
 402		if (!caller) {
 403			/* found */
 404			if (node->map)
 405				addr = map__unmap_ip(node->map, node->ip);
 406			else
 407				addr = node->ip;
 408
 409			return addr;
 410		} else
 411			pr_debug3("skipping alloc function: %s\n", caller->name);
 412
 413		callchain_cursor_advance(&callchain_cursor);
 414	}
 415
 416out:
 417	pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
 418	return sample->ip;
 419}
 420
 421struct sort_dimension {
 422	const char		name[20];
 423	sort_fn_t		cmp;
 424	struct list_head	list;
 425};
 426
 427static LIST_HEAD(page_alloc_sort_input);
 428static LIST_HEAD(page_caller_sort_input);
 429
 430static struct page_stat *
 431__page_stat__findnew_page(struct page_stat *pstat, bool create)
 432{
 433	struct rb_node **node = &page_live_tree.rb_node;
 434	struct rb_node *parent = NULL;
 435	struct page_stat *data;
 436
 437	while (*node) {
 438		s64 cmp;
 439
 440		parent = *node;
 441		data = rb_entry(*node, struct page_stat, node);
 442
 443		cmp = data->page - pstat->page;
 444		if (cmp < 0)
 445			node = &parent->rb_left;
 446		else if (cmp > 0)
 447			node = &parent->rb_right;
 448		else
 449			return data;
 450	}
 451
 452	if (!create)
 453		return NULL;
 454
 455	data = zalloc(sizeof(*data));
 456	if (data != NULL) {
 457		data->page = pstat->page;
 458		data->order = pstat->order;
 459		data->gfp_flags = pstat->gfp_flags;
 460		data->migrate_type = pstat->migrate_type;
 461
 462		rb_link_node(&data->node, parent, node);
 463		rb_insert_color(&data->node, &page_live_tree);
 464	}
 465
 466	return data;
 467}
 468
 469static struct page_stat *page_stat__find_page(struct page_stat *pstat)
 470{
 471	return __page_stat__findnew_page(pstat, false);
 472}
 473
 474static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
 475{
 476	return __page_stat__findnew_page(pstat, true);
 477}
 478
 479static struct page_stat *
 480__page_stat__findnew_alloc(struct page_stat *pstat, bool create)
 481{
 482	struct rb_node **node = &page_alloc_tree.rb_node;
 483	struct rb_node *parent = NULL;
 484	struct page_stat *data;
 485	struct sort_dimension *sort;
 486
 487	while (*node) {
 488		int cmp = 0;
 489
 490		parent = *node;
 491		data = rb_entry(*node, struct page_stat, node);
 492
 493		list_for_each_entry(sort, &page_alloc_sort_input, list) {
 494			cmp = sort->cmp(pstat, data);
 495			if (cmp)
 496				break;
 497		}
 498
 499		if (cmp < 0)
 500			node = &parent->rb_left;
 501		else if (cmp > 0)
 502			node = &parent->rb_right;
 503		else
 504			return data;
 505	}
 506
 507	if (!create)
 508		return NULL;
 509
 510	data = zalloc(sizeof(*data));
 511	if (data != NULL) {
 512		data->page = pstat->page;
 513		data->order = pstat->order;
 514		data->gfp_flags = pstat->gfp_flags;
 515		data->migrate_type = pstat->migrate_type;
 516
 517		rb_link_node(&data->node, parent, node);
 518		rb_insert_color(&data->node, &page_alloc_tree);
 519	}
 520
 521	return data;
 522}
 523
 524static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
 525{
 526	return __page_stat__findnew_alloc(pstat, false);
 527}
 528
 529static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
 530{
 531	return __page_stat__findnew_alloc(pstat, true);
 532}
 533
 534static struct page_stat *
 535__page_stat__findnew_caller(struct page_stat *pstat, bool create)
 536{
 537	struct rb_node **node = &page_caller_tree.rb_node;
 538	struct rb_node *parent = NULL;
 539	struct page_stat *data;
 540	struct sort_dimension *sort;
 541
 542	while (*node) {
 543		int cmp = 0;
 544
 545		parent = *node;
 546		data = rb_entry(*node, struct page_stat, node);
 547
 548		list_for_each_entry(sort, &page_caller_sort_input, list) {
 549			cmp = sort->cmp(pstat, data);
 550			if (cmp)
 551				break;
 552		}
 553
 554		if (cmp < 0)
 555			node = &parent->rb_left;
 556		else if (cmp > 0)
 557			node = &parent->rb_right;
 558		else
 559			return data;
 560	}
 561
 562	if (!create)
 563		return NULL;
 564
 565	data = zalloc(sizeof(*data));
 566	if (data != NULL) {
 567		data->callsite = pstat->callsite;
 568		data->order = pstat->order;
 569		data->gfp_flags = pstat->gfp_flags;
 570		data->migrate_type = pstat->migrate_type;
 571
 572		rb_link_node(&data->node, parent, node);
 573		rb_insert_color(&data->node, &page_caller_tree);
 574	}
 575
 576	return data;
 577}
 578
 579static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
 580{
 581	return __page_stat__findnew_caller(pstat, false);
 582}
 583
 584static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
 585{
 586	return __page_stat__findnew_caller(pstat, true);
 587}
 588
 589static bool valid_page(u64 pfn_or_page)
 590{
 591	if (use_pfn && pfn_or_page == -1UL)
 592		return false;
 593	if (!use_pfn && pfn_or_page == 0)
 594		return false;
 595	return true;
 596}
 597
 598struct gfp_flag {
 599	unsigned int flags;
 600	char *compact_str;
 601	char *human_readable;
 602};
 603
 604static struct gfp_flag *gfps;
 605static int nr_gfps;
 606
 607static int gfpcmp(const void *a, const void *b)
 608{
 609	const struct gfp_flag *fa = a;
 610	const struct gfp_flag *fb = b;
 611
 612	return fa->flags - fb->flags;
 613}
 614
 615/* see include/trace/events/mmflags.h */
 616static const struct {
 617	const char *original;
 618	const char *compact;
 619} gfp_compact_table[] = {
 620	{ "GFP_TRANSHUGE",		"THP" },
 621	{ "GFP_TRANSHUGE_LIGHT",	"THL" },
 622	{ "GFP_HIGHUSER_MOVABLE",	"HUM" },
 623	{ "GFP_HIGHUSER",		"HU" },
 624	{ "GFP_USER",			"U" },
 625	{ "GFP_TEMPORARY",		"TMP" },
 626	{ "GFP_KERNEL_ACCOUNT",		"KAC" },
 627	{ "GFP_KERNEL",			"K" },
 628	{ "GFP_NOFS",			"NF" },
 629	{ "GFP_ATOMIC",			"A" },
 630	{ "GFP_NOIO",			"NI" },
 631	{ "GFP_NOWAIT",			"NW" },
 632	{ "GFP_DMA",			"D" },
 633	{ "__GFP_HIGHMEM",		"HM" },
 634	{ "GFP_DMA32",			"D32" },
 635	{ "__GFP_HIGH",			"H" },
 636	{ "__GFP_ATOMIC",		"_A" },
 637	{ "__GFP_IO",			"I" },
 638	{ "__GFP_FS",			"F" },
 639	{ "__GFP_COLD",			"CO" },
 640	{ "__GFP_NOWARN",		"NWR" },
 641	{ "__GFP_REPEAT",		"R" },
 642	{ "__GFP_NOFAIL",		"NF" },
 643	{ "__GFP_NORETRY",		"NR" },
 644	{ "__GFP_COMP",			"C" },
 645	{ "__GFP_ZERO",			"Z" },
 646	{ "__GFP_NOMEMALLOC",		"NMA" },
 647	{ "__GFP_MEMALLOC",		"MA" },
 648	{ "__GFP_HARDWALL",		"HW" },
 649	{ "__GFP_THISNODE",		"TN" },
 650	{ "__GFP_RECLAIMABLE",		"RC" },
 651	{ "__GFP_MOVABLE",		"M" },
 652	{ "__GFP_ACCOUNT",		"AC" },
 653	{ "__GFP_NOTRACK",		"NT" },
 654	{ "__GFP_WRITE",		"WR" },
 655	{ "__GFP_RECLAIM",		"R" },
 656	{ "__GFP_DIRECT_RECLAIM",	"DR" },
 657	{ "__GFP_KSWAPD_RECLAIM",	"KR" },
 
 658};
 659
 660static size_t max_gfp_len;
 661
 662static char *compact_gfp_flags(char *gfp_flags)
 663{
 664	char *orig_flags = strdup(gfp_flags);
 665	char *new_flags = NULL;
 666	char *str, *pos = NULL;
 667	size_t len = 0;
 668
 669	if (orig_flags == NULL)
 670		return NULL;
 671
 672	str = strtok_r(orig_flags, "|", &pos);
 673	while (str) {
 674		size_t i;
 675		char *new;
 676		const char *cpt;
 677
 678		for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
 679			if (strcmp(gfp_compact_table[i].original, str))
 680				continue;
 681
 682			cpt = gfp_compact_table[i].compact;
 683			new = realloc(new_flags, len + strlen(cpt) + 2);
 684			if (new == NULL) {
 685				free(new_flags);
 686				return NULL;
 687			}
 688
 689			new_flags = new;
 690
 691			if (!len) {
 692				strcpy(new_flags, cpt);
 693			} else {
 694				strcat(new_flags, "|");
 695				strcat(new_flags, cpt);
 696				len++;
 697			}
 698
 699			len += strlen(cpt);
 700		}
 701
 702		str = strtok_r(NULL, "|", &pos);
 703	}
 704
 705	if (max_gfp_len < len)
 706		max_gfp_len = len;
 707
 708	free(orig_flags);
 709	return new_flags;
 710}
 711
 712static char *compact_gfp_string(unsigned long gfp_flags)
 713{
 714	struct gfp_flag key = {
 715		.flags = gfp_flags,
 716	};
 717	struct gfp_flag *gfp;
 718
 719	gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
 720	if (gfp)
 721		return gfp->compact_str;
 722
 723	return NULL;
 724}
 725
 726static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample,
 727			   unsigned int gfp_flags)
 728{
 729	struct pevent_record record = {
 730		.cpu = sample->cpu,
 731		.data = sample->raw_data,
 732		.size = sample->raw_size,
 733	};
 734	struct trace_seq seq;
 735	char *str, *pos = NULL;
 736
 737	if (nr_gfps) {
 738		struct gfp_flag key = {
 739			.flags = gfp_flags,
 740		};
 741
 742		if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
 743			return 0;
 744	}
 745
 746	trace_seq_init(&seq);
 747	pevent_event_info(&seq, evsel->tp_format, &record);
 748
 749	str = strtok_r(seq.buffer, " ", &pos);
 750	while (str) {
 751		if (!strncmp(str, "gfp_flags=", 10)) {
 752			struct gfp_flag *new;
 753
 754			new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
 755			if (new == NULL)
 756				return -ENOMEM;
 757
 758			gfps = new;
 759			new += nr_gfps++;
 760
 761			new->flags = gfp_flags;
 762			new->human_readable = strdup(str + 10);
 763			new->compact_str = compact_gfp_flags(str + 10);
 764			if (!new->human_readable || !new->compact_str)
 765				return -ENOMEM;
 766
 767			qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
 768		}
 769
 770		str = strtok_r(NULL, " ", &pos);
 771	}
 772
 773	trace_seq_destroy(&seq);
 774	return 0;
 775}
 776
 777static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
 778						struct perf_sample *sample)
 779{
 780	u64 page;
 781	unsigned int order = perf_evsel__intval(evsel, sample, "order");
 782	unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
 783	unsigned int migrate_type = perf_evsel__intval(evsel, sample,
 784						       "migratetype");
 785	u64 bytes = kmem_page_size << order;
 786	u64 callsite;
 787	struct page_stat *pstat;
 788	struct page_stat this = {
 789		.order = order,
 790		.gfp_flags = gfp_flags,
 791		.migrate_type = migrate_type,
 792	};
 793
 794	if (use_pfn)
 795		page = perf_evsel__intval(evsel, sample, "pfn");
 796	else
 797		page = perf_evsel__intval(evsel, sample, "page");
 798
 799	nr_page_allocs++;
 800	total_page_alloc_bytes += bytes;
 801
 802	if (!valid_page(page)) {
 803		nr_page_fails++;
 804		total_page_fail_bytes += bytes;
 805
 806		return 0;
 807	}
 808
 809	if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
 810		return -1;
 811
 812	callsite = find_callsite(evsel, sample);
 813
 814	/*
 815	 * This is to find the current page (with correct gfp flags and
 816	 * migrate type) at free event.
 817	 */
 818	this.page = page;
 819	pstat = page_stat__findnew_page(&this);
 820	if (pstat == NULL)
 821		return -ENOMEM;
 822
 823	pstat->nr_alloc++;
 824	pstat->alloc_bytes += bytes;
 825	pstat->callsite = callsite;
 826
 827	if (!live_page) {
 828		pstat = page_stat__findnew_alloc(&this);
 829		if (pstat == NULL)
 830			return -ENOMEM;
 831
 832		pstat->nr_alloc++;
 833		pstat->alloc_bytes += bytes;
 834		pstat->callsite = callsite;
 835	}
 836
 837	this.callsite = callsite;
 838	pstat = page_stat__findnew_caller(&this);
 839	if (pstat == NULL)
 840		return -ENOMEM;
 841
 842	pstat->nr_alloc++;
 843	pstat->alloc_bytes += bytes;
 844
 845	order_stats[order][migrate_type]++;
 846
 847	return 0;
 848}
 849
 850static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
 851						struct perf_sample *sample)
 852{
 853	u64 page;
 854	unsigned int order = perf_evsel__intval(evsel, sample, "order");
 855	u64 bytes = kmem_page_size << order;
 856	struct page_stat *pstat;
 857	struct page_stat this = {
 858		.order = order,
 859	};
 860
 861	if (use_pfn)
 862		page = perf_evsel__intval(evsel, sample, "pfn");
 863	else
 864		page = perf_evsel__intval(evsel, sample, "page");
 865
 866	nr_page_frees++;
 867	total_page_free_bytes += bytes;
 868
 869	this.page = page;
 870	pstat = page_stat__find_page(&this);
 871	if (pstat == NULL) {
 872		pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
 873			  page, order);
 874
 875		nr_page_nomatch++;
 876		total_page_nomatch_bytes += bytes;
 877
 878		return 0;
 879	}
 880
 881	this.gfp_flags = pstat->gfp_flags;
 882	this.migrate_type = pstat->migrate_type;
 883	this.callsite = pstat->callsite;
 884
 885	rb_erase(&pstat->node, &page_live_tree);
 886	free(pstat);
 887
 888	if (live_page) {
 889		order_stats[this.order][this.migrate_type]--;
 890	} else {
 891		pstat = page_stat__find_alloc(&this);
 892		if (pstat == NULL)
 893			return -ENOMEM;
 894
 895		pstat->nr_free++;
 896		pstat->free_bytes += bytes;
 897	}
 898
 899	pstat = page_stat__find_caller(&this);
 900	if (pstat == NULL)
 901		return -ENOENT;
 902
 903	pstat->nr_free++;
 904	pstat->free_bytes += bytes;
 905
 906	if (live_page) {
 907		pstat->nr_alloc--;
 908		pstat->alloc_bytes -= bytes;
 909
 910		if (pstat->nr_alloc == 0) {
 911			rb_erase(&pstat->node, &page_caller_tree);
 912			free(pstat);
 913		}
 914	}
 915
 916	return 0;
 917}
 918
 919static bool perf_kmem__skip_sample(struct perf_sample *sample)
 920{
 921	/* skip sample based on time? */
 922	if (perf_time__skip_sample(&ptime, sample->time))
 923		return true;
 924
 925	return false;
 926}
 927
 928typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
 929				  struct perf_sample *sample);
 930
 931static int process_sample_event(struct perf_tool *tool __maybe_unused,
 932				union perf_event *event,
 933				struct perf_sample *sample,
 934				struct perf_evsel *evsel,
 935				struct machine *machine)
 936{
 937	int err = 0;
 938	struct thread *thread = machine__findnew_thread(machine, sample->pid,
 939							sample->tid);
 940
 941	if (thread == NULL) {
 942		pr_debug("problem processing %d event, skipping it.\n",
 943			 event->header.type);
 944		return -1;
 945	}
 946
 947	if (perf_kmem__skip_sample(sample))
 948		return 0;
 949
 950	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
 951
 952	if (evsel->handler != NULL) {
 953		tracepoint_handler f = evsel->handler;
 954		err = f(evsel, sample);
 955	}
 956
 957	thread__put(thread);
 958
 959	return err;
 960}
 961
 962static struct perf_tool perf_kmem = {
 963	.sample		 = process_sample_event,
 964	.comm		 = perf_event__process_comm,
 965	.mmap		 = perf_event__process_mmap,
 966	.mmap2		 = perf_event__process_mmap2,
 967	.ordered_events	 = true,
 968};
 969
 970static double fragmentation(unsigned long n_req, unsigned long n_alloc)
 971{
 972	if (n_alloc == 0)
 973		return 0.0;
 974	else
 975		return 100.0 - (100.0 * n_req / n_alloc);
 976}
 977
 978static void __print_slab_result(struct rb_root *root,
 979				struct perf_session *session,
 980				int n_lines, int is_caller)
 981{
 982	struct rb_node *next;
 983	struct machine *machine = &session->machines.host;
 984
 985	printf("%.105s\n", graph_dotted_line);
 986	printf(" %-34s |",  is_caller ? "Callsite": "Alloc Ptr");
 987	printf(" Total_alloc/Per | Total_req/Per   | Hit      | Ping-pong | Frag\n");
 988	printf("%.105s\n", graph_dotted_line);
 989
 990	next = rb_first(root);
 991
 992	while (next && n_lines--) {
 993		struct alloc_stat *data = rb_entry(next, struct alloc_stat,
 994						   node);
 995		struct symbol *sym = NULL;
 996		struct map *map;
 997		char buf[BUFSIZ];
 998		u64 addr;
 999
1000		if (is_caller) {
1001			addr = data->call_site;
1002			if (!raw_ip)
1003				sym = machine__find_kernel_function(machine, addr, &map);
1004		} else
1005			addr = data->ptr;
1006
1007		if (sym != NULL)
1008			snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
1009				 addr - map->unmap_ip(map, sym->start));
1010		else
1011			snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
1012		printf(" %-34s |", buf);
1013
1014		printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
1015		       (unsigned long long)data->bytes_alloc,
1016		       (unsigned long)data->bytes_alloc / data->hit,
1017		       (unsigned long long)data->bytes_req,
1018		       (unsigned long)data->bytes_req / data->hit,
1019		       (unsigned long)data->hit,
1020		       (unsigned long)data->pingpong,
1021		       fragmentation(data->bytes_req, data->bytes_alloc));
1022
1023		next = rb_next(next);
1024	}
1025
1026	if (n_lines == -1)
1027		printf(" ...                                | ...             | ...             | ...      | ...       | ...   \n");
1028
1029	printf("%.105s\n", graph_dotted_line);
1030}
1031
1032static const char * const migrate_type_str[] = {
1033	"UNMOVABL",
1034	"RECLAIM",
1035	"MOVABLE",
1036	"RESERVED",
1037	"CMA/ISLT",
1038	"UNKNOWN",
1039};
1040
1041static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1042{
1043	struct rb_node *next = rb_first(&page_alloc_sorted);
1044	struct machine *machine = &session->machines.host;
1045	const char *format;
1046	int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1047
1048	printf("\n%.105s\n", graph_dotted_line);
1049	printf(" %-16s | %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1050	       use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
1051	       gfp_len, "GFP flags");
1052	printf("%.105s\n", graph_dotted_line);
1053
1054	if (use_pfn)
1055		format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1056	else
1057		format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1058
1059	while (next && n_lines--) {
1060		struct page_stat *data;
1061		struct symbol *sym;
1062		struct map *map;
1063		char buf[32];
1064		char *caller = buf;
1065
1066		data = rb_entry(next, struct page_stat, node);
1067		sym = machine__find_kernel_function(machine, data->callsite, &map);
 
1068		if (sym && sym->name)
1069			caller = sym->name;
1070		else
1071			scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1072
1073		printf(format, (unsigned long long)data->page,
1074		       (unsigned long long)data->alloc_bytes / 1024,
1075		       data->nr_alloc, data->order,
1076		       migrate_type_str[data->migrate_type],
1077		       gfp_len, compact_gfp_string(data->gfp_flags), caller);
1078
1079		next = rb_next(next);
1080	}
1081
1082	if (n_lines == -1) {
1083		printf(" ...              | ...              | ...       | ...   | ...      | %-*s | ...\n",
1084		       gfp_len, "...");
1085	}
1086
1087	printf("%.105s\n", graph_dotted_line);
1088}
1089
1090static void __print_page_caller_result(struct perf_session *session, int n_lines)
1091{
1092	struct rb_node *next = rb_first(&page_caller_sorted);
1093	struct machine *machine = &session->machines.host;
1094	int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1095
1096	printf("\n%.105s\n", graph_dotted_line);
1097	printf(" %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1098	       live_page ? "Live" : "Total", gfp_len, "GFP flags");
1099	printf("%.105s\n", graph_dotted_line);
1100
1101	while (next && n_lines--) {
1102		struct page_stat *data;
1103		struct symbol *sym;
1104		struct map *map;
1105		char buf[32];
1106		char *caller = buf;
1107
1108		data = rb_entry(next, struct page_stat, node);
1109		sym = machine__find_kernel_function(machine, data->callsite, &map);
 
1110		if (sym && sym->name)
1111			caller = sym->name;
1112		else
1113			scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1114
1115		printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
1116		       (unsigned long long)data->alloc_bytes / 1024,
1117		       data->nr_alloc, data->order,
1118		       migrate_type_str[data->migrate_type],
1119		       gfp_len, compact_gfp_string(data->gfp_flags), caller);
1120
1121		next = rb_next(next);
1122	}
1123
1124	if (n_lines == -1) {
1125		printf(" ...              | ...       | ...   | ...      | %-*s | ...\n",
1126		       gfp_len, "...");
1127	}
1128
1129	printf("%.105s\n", graph_dotted_line);
1130}
1131
1132static void print_gfp_flags(void)
1133{
1134	int i;
1135
1136	printf("#\n");
1137	printf("# GFP flags\n");
1138	printf("# ---------\n");
1139	for (i = 0; i < nr_gfps; i++) {
1140		printf("# %08x: %*s: %s\n", gfps[i].flags,
1141		       (int) max_gfp_len, gfps[i].compact_str,
1142		       gfps[i].human_readable);
1143	}
1144}
1145
1146static void print_slab_summary(void)
1147{
1148	printf("\nSUMMARY (SLAB allocator)");
1149	printf("\n========================\n");
1150	printf("Total bytes requested: %'lu\n", total_requested);
1151	printf("Total bytes allocated: %'lu\n", total_allocated);
1152	printf("Total bytes freed:     %'lu\n", total_freed);
1153	if (total_allocated > total_freed) {
1154		printf("Net total bytes allocated: %'lu\n",
1155		total_allocated - total_freed);
1156	}
1157	printf("Total bytes wasted on internal fragmentation: %'lu\n",
1158	       total_allocated - total_requested);
1159	printf("Internal fragmentation: %f%%\n",
1160	       fragmentation(total_requested, total_allocated));
1161	printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
1162}
1163
1164static void print_page_summary(void)
1165{
1166	int o, m;
1167	u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
1168	u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
1169
1170	printf("\nSUMMARY (page allocator)");
1171	printf("\n========================\n");
1172	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation requests",
1173	       nr_page_allocs, total_page_alloc_bytes / 1024);
1174	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free requests",
1175	       nr_page_frees, total_page_free_bytes / 1024);
1176	printf("\n");
1177
1178	printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
1179	       nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
1180	printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
1181	       nr_page_allocs - nr_alloc_freed,
1182	       (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
1183	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free-only requests",
1184	       nr_page_nomatch, total_page_nomatch_bytes / 1024);
1185	printf("\n");
1186
1187	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation failures",
1188	       nr_page_fails, total_page_fail_bytes / 1024);
1189	printf("\n");
1190
1191	printf("%5s  %12s  %12s  %12s  %12s  %12s\n", "Order",  "Unmovable",
1192	       "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
1193	printf("%.5s  %.12s  %.12s  %.12s  %.12s  %.12s\n", graph_dotted_line,
1194	       graph_dotted_line, graph_dotted_line, graph_dotted_line,
1195	       graph_dotted_line, graph_dotted_line);
1196
1197	for (o = 0; o < MAX_PAGE_ORDER; o++) {
1198		printf("%5d", o);
1199		for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
1200			if (order_stats[o][m])
1201				printf("  %'12d", order_stats[o][m]);
1202			else
1203				printf("  %12c", '.');
1204		}
1205		printf("\n");
1206	}
1207}
1208
1209static void print_slab_result(struct perf_session *session)
1210{
1211	if (caller_flag)
1212		__print_slab_result(&root_caller_sorted, session, caller_lines, 1);
1213	if (alloc_flag)
1214		__print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
1215	print_slab_summary();
1216}
1217
1218static void print_page_result(struct perf_session *session)
1219{
1220	if (caller_flag || alloc_flag)
1221		print_gfp_flags();
1222	if (caller_flag)
1223		__print_page_caller_result(session, caller_lines);
1224	if (alloc_flag)
1225		__print_page_alloc_result(session, alloc_lines);
1226	print_page_summary();
1227}
1228
1229static void print_result(struct perf_session *session)
1230{
1231	if (kmem_slab)
1232		print_slab_result(session);
1233	if (kmem_page)
1234		print_page_result(session);
1235}
1236
1237static LIST_HEAD(slab_caller_sort);
1238static LIST_HEAD(slab_alloc_sort);
1239static LIST_HEAD(page_caller_sort);
1240static LIST_HEAD(page_alloc_sort);
1241
1242static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
1243			     struct list_head *sort_list)
1244{
1245	struct rb_node **new = &(root->rb_node);
1246	struct rb_node *parent = NULL;
1247	struct sort_dimension *sort;
1248
1249	while (*new) {
1250		struct alloc_stat *this;
1251		int cmp = 0;
1252
1253		this = rb_entry(*new, struct alloc_stat, node);
1254		parent = *new;
1255
1256		list_for_each_entry(sort, sort_list, list) {
1257			cmp = sort->cmp(data, this);
1258			if (cmp)
1259				break;
1260		}
1261
1262		if (cmp > 0)
1263			new = &((*new)->rb_left);
1264		else
1265			new = &((*new)->rb_right);
1266	}
1267
1268	rb_link_node(&data->node, parent, new);
1269	rb_insert_color(&data->node, root);
1270}
1271
1272static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1273			       struct list_head *sort_list)
1274{
1275	struct rb_node *node;
1276	struct alloc_stat *data;
1277
1278	for (;;) {
1279		node = rb_first(root);
1280		if (!node)
1281			break;
1282
1283		rb_erase(node, root);
1284		data = rb_entry(node, struct alloc_stat, node);
1285		sort_slab_insert(root_sorted, data, sort_list);
1286	}
1287}
1288
1289static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1290			     struct list_head *sort_list)
1291{
1292	struct rb_node **new = &root->rb_node;
1293	struct rb_node *parent = NULL;
1294	struct sort_dimension *sort;
1295
1296	while (*new) {
1297		struct page_stat *this;
1298		int cmp = 0;
1299
1300		this = rb_entry(*new, struct page_stat, node);
1301		parent = *new;
1302
1303		list_for_each_entry(sort, sort_list, list) {
1304			cmp = sort->cmp(data, this);
1305			if (cmp)
1306				break;
1307		}
1308
1309		if (cmp > 0)
1310			new = &parent->rb_left;
1311		else
1312			new = &parent->rb_right;
1313	}
1314
1315	rb_link_node(&data->node, parent, new);
1316	rb_insert_color(&data->node, root);
1317}
1318
1319static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1320			       struct list_head *sort_list)
1321{
1322	struct rb_node *node;
1323	struct page_stat *data;
1324
1325	for (;;) {
1326		node = rb_first(root);
1327		if (!node)
1328			break;
1329
1330		rb_erase(node, root);
1331		data = rb_entry(node, struct page_stat, node);
1332		sort_page_insert(root_sorted, data, sort_list);
1333	}
1334}
1335
1336static void sort_result(void)
1337{
1338	if (kmem_slab) {
1339		__sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
1340				   &slab_alloc_sort);
1341		__sort_slab_result(&root_caller_stat, &root_caller_sorted,
1342				   &slab_caller_sort);
1343	}
1344	if (kmem_page) {
1345		if (live_page)
1346			__sort_page_result(&page_live_tree, &page_alloc_sorted,
1347					   &page_alloc_sort);
1348		else
1349			__sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1350					   &page_alloc_sort);
1351
1352		__sort_page_result(&page_caller_tree, &page_caller_sorted,
1353				   &page_caller_sort);
1354	}
1355}
1356
1357static int __cmd_kmem(struct perf_session *session)
1358{
1359	int err = -EINVAL;
1360	struct perf_evsel *evsel;
1361	const struct perf_evsel_str_handler kmem_tracepoints[] = {
1362		/* slab allocator */
1363		{ "kmem:kmalloc",		perf_evsel__process_alloc_event, },
1364    		{ "kmem:kmem_cache_alloc",	perf_evsel__process_alloc_event, },
1365		{ "kmem:kmalloc_node",		perf_evsel__process_alloc_node_event, },
1366    		{ "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
1367		{ "kmem:kfree",			perf_evsel__process_free_event, },
1368    		{ "kmem:kmem_cache_free",	perf_evsel__process_free_event, },
1369		/* page allocator */
1370		{ "kmem:mm_page_alloc",		perf_evsel__process_page_alloc_event, },
1371		{ "kmem:mm_page_free",		perf_evsel__process_page_free_event, },
1372	};
1373
1374	if (!perf_session__has_traces(session, "kmem record"))
1375		goto out;
1376
1377	if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1378		pr_err("Initializing perf session tracepoint handlers failed\n");
1379		goto out;
1380	}
1381
1382	evlist__for_each_entry(session->evlist, evsel) {
1383		if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
1384		    perf_evsel__field(evsel, "pfn")) {
1385			use_pfn = true;
1386			break;
1387		}
1388	}
1389
1390	setup_pager();
1391	err = perf_session__process_events(session);
1392	if (err != 0) {
1393		pr_err("error during process events: %d\n", err);
1394		goto out;
1395	}
1396	sort_result();
1397	print_result(session);
1398out:
1399	return err;
1400}
1401
1402/* slab sort keys */
1403static int ptr_cmp(void *a, void *b)
1404{
1405	struct alloc_stat *l = a;
1406	struct alloc_stat *r = b;
1407
1408	if (l->ptr < r->ptr)
1409		return -1;
1410	else if (l->ptr > r->ptr)
1411		return 1;
1412	return 0;
1413}
1414
1415static struct sort_dimension ptr_sort_dimension = {
1416	.name	= "ptr",
1417	.cmp	= ptr_cmp,
1418};
1419
1420static int slab_callsite_cmp(void *a, void *b)
1421{
1422	struct alloc_stat *l = a;
1423	struct alloc_stat *r = b;
1424
1425	if (l->call_site < r->call_site)
1426		return -1;
1427	else if (l->call_site > r->call_site)
1428		return 1;
1429	return 0;
1430}
1431
1432static struct sort_dimension callsite_sort_dimension = {
1433	.name	= "callsite",
1434	.cmp	= slab_callsite_cmp,
1435};
1436
1437static int hit_cmp(void *a, void *b)
1438{
1439	struct alloc_stat *l = a;
1440	struct alloc_stat *r = b;
1441
1442	if (l->hit < r->hit)
1443		return -1;
1444	else if (l->hit > r->hit)
1445		return 1;
1446	return 0;
1447}
1448
1449static struct sort_dimension hit_sort_dimension = {
1450	.name	= "hit",
1451	.cmp	= hit_cmp,
1452};
1453
1454static int bytes_cmp(void *a, void *b)
1455{
1456	struct alloc_stat *l = a;
1457	struct alloc_stat *r = b;
1458
1459	if (l->bytes_alloc < r->bytes_alloc)
1460		return -1;
1461	else if (l->bytes_alloc > r->bytes_alloc)
1462		return 1;
1463	return 0;
1464}
1465
1466static struct sort_dimension bytes_sort_dimension = {
1467	.name	= "bytes",
1468	.cmp	= bytes_cmp,
1469};
1470
1471static int frag_cmp(void *a, void *b)
1472{
1473	double x, y;
1474	struct alloc_stat *l = a;
1475	struct alloc_stat *r = b;
1476
1477	x = fragmentation(l->bytes_req, l->bytes_alloc);
1478	y = fragmentation(r->bytes_req, r->bytes_alloc);
1479
1480	if (x < y)
1481		return -1;
1482	else if (x > y)
1483		return 1;
1484	return 0;
1485}
1486
1487static struct sort_dimension frag_sort_dimension = {
1488	.name	= "frag",
1489	.cmp	= frag_cmp,
1490};
1491
1492static int pingpong_cmp(void *a, void *b)
1493{
1494	struct alloc_stat *l = a;
1495	struct alloc_stat *r = b;
1496
1497	if (l->pingpong < r->pingpong)
1498		return -1;
1499	else if (l->pingpong > r->pingpong)
1500		return 1;
1501	return 0;
1502}
1503
1504static struct sort_dimension pingpong_sort_dimension = {
1505	.name	= "pingpong",
1506	.cmp	= pingpong_cmp,
1507};
1508
1509/* page sort keys */
1510static int page_cmp(void *a, void *b)
1511{
1512	struct page_stat *l = a;
1513	struct page_stat *r = b;
1514
1515	if (l->page < r->page)
1516		return -1;
1517	else if (l->page > r->page)
1518		return 1;
1519	return 0;
1520}
1521
1522static struct sort_dimension page_sort_dimension = {
1523	.name	= "page",
1524	.cmp	= page_cmp,
1525};
1526
1527static int page_callsite_cmp(void *a, void *b)
1528{
1529	struct page_stat *l = a;
1530	struct page_stat *r = b;
1531
1532	if (l->callsite < r->callsite)
1533		return -1;
1534	else if (l->callsite > r->callsite)
1535		return 1;
1536	return 0;
1537}
1538
1539static struct sort_dimension page_callsite_sort_dimension = {
1540	.name	= "callsite",
1541	.cmp	= page_callsite_cmp,
1542};
1543
1544static int page_hit_cmp(void *a, void *b)
1545{
1546	struct page_stat *l = a;
1547	struct page_stat *r = b;
1548
1549	if (l->nr_alloc < r->nr_alloc)
1550		return -1;
1551	else if (l->nr_alloc > r->nr_alloc)
1552		return 1;
1553	return 0;
1554}
1555
1556static struct sort_dimension page_hit_sort_dimension = {
1557	.name	= "hit",
1558	.cmp	= page_hit_cmp,
1559};
1560
1561static int page_bytes_cmp(void *a, void *b)
1562{
1563	struct page_stat *l = a;
1564	struct page_stat *r = b;
1565
1566	if (l->alloc_bytes < r->alloc_bytes)
1567		return -1;
1568	else if (l->alloc_bytes > r->alloc_bytes)
1569		return 1;
1570	return 0;
1571}
1572
1573static struct sort_dimension page_bytes_sort_dimension = {
1574	.name	= "bytes",
1575	.cmp	= page_bytes_cmp,
1576};
1577
1578static int page_order_cmp(void *a, void *b)
1579{
1580	struct page_stat *l = a;
1581	struct page_stat *r = b;
1582
1583	if (l->order < r->order)
1584		return -1;
1585	else if (l->order > r->order)
1586		return 1;
1587	return 0;
1588}
1589
1590static struct sort_dimension page_order_sort_dimension = {
1591	.name	= "order",
1592	.cmp	= page_order_cmp,
1593};
1594
1595static int migrate_type_cmp(void *a, void *b)
1596{
1597	struct page_stat *l = a;
1598	struct page_stat *r = b;
1599
1600	/* for internal use to find free'd page */
1601	if (l->migrate_type == -1U)
1602		return 0;
1603
1604	if (l->migrate_type < r->migrate_type)
1605		return -1;
1606	else if (l->migrate_type > r->migrate_type)
1607		return 1;
1608	return 0;
1609}
1610
1611static struct sort_dimension migrate_type_sort_dimension = {
1612	.name	= "migtype",
1613	.cmp	= migrate_type_cmp,
1614};
1615
1616static int gfp_flags_cmp(void *a, void *b)
1617{
1618	struct page_stat *l = a;
1619	struct page_stat *r = b;
1620
1621	/* for internal use to find free'd page */
1622	if (l->gfp_flags == -1U)
1623		return 0;
1624
1625	if (l->gfp_flags < r->gfp_flags)
1626		return -1;
1627	else if (l->gfp_flags > r->gfp_flags)
1628		return 1;
1629	return 0;
1630}
1631
1632static struct sort_dimension gfp_flags_sort_dimension = {
1633	.name	= "gfp",
1634	.cmp	= gfp_flags_cmp,
1635};
1636
1637static struct sort_dimension *slab_sorts[] = {
1638	&ptr_sort_dimension,
1639	&callsite_sort_dimension,
1640	&hit_sort_dimension,
1641	&bytes_sort_dimension,
1642	&frag_sort_dimension,
1643	&pingpong_sort_dimension,
1644};
1645
1646static struct sort_dimension *page_sorts[] = {
1647	&page_sort_dimension,
1648	&page_callsite_sort_dimension,
1649	&page_hit_sort_dimension,
1650	&page_bytes_sort_dimension,
1651	&page_order_sort_dimension,
1652	&migrate_type_sort_dimension,
1653	&gfp_flags_sort_dimension,
1654};
1655
1656static int slab_sort_dimension__add(const char *tok, struct list_head *list)
1657{
1658	struct sort_dimension *sort;
1659	int i;
1660
1661	for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1662		if (!strcmp(slab_sorts[i]->name, tok)) {
1663			sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
1664			if (!sort) {
1665				pr_err("%s: memdup failed\n", __func__);
1666				return -1;
1667			}
1668			list_add_tail(&sort->list, list);
1669			return 0;
1670		}
1671	}
1672
1673	return -1;
1674}
1675
1676static int page_sort_dimension__add(const char *tok, struct list_head *list)
1677{
1678	struct sort_dimension *sort;
1679	int i;
1680
1681	for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1682		if (!strcmp(page_sorts[i]->name, tok)) {
1683			sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1684			if (!sort) {
1685				pr_err("%s: memdup failed\n", __func__);
1686				return -1;
1687			}
1688			list_add_tail(&sort->list, list);
1689			return 0;
1690		}
1691	}
1692
1693	return -1;
1694}
1695
1696static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
1697{
1698	char *tok;
1699	char *str = strdup(arg);
1700	char *pos = str;
1701
1702	if (!str) {
1703		pr_err("%s: strdup failed\n", __func__);
1704		return -1;
1705	}
1706
1707	while (true) {
1708		tok = strsep(&pos, ",");
1709		if (!tok)
1710			break;
1711		if (slab_sort_dimension__add(tok, sort_list) < 0) {
1712			error("Unknown slab --sort key: '%s'", tok);
1713			free(str);
1714			return -1;
1715		}
1716	}
1717
1718	free(str);
1719	return 0;
1720}
1721
1722static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1723{
1724	char *tok;
1725	char *str = strdup(arg);
1726	char *pos = str;
1727
1728	if (!str) {
1729		pr_err("%s: strdup failed\n", __func__);
1730		return -1;
1731	}
1732
1733	while (true) {
1734		tok = strsep(&pos, ",");
1735		if (!tok)
1736			break;
1737		if (page_sort_dimension__add(tok, sort_list) < 0) {
1738			error("Unknown page --sort key: '%s'", tok);
1739			free(str);
1740			return -1;
1741		}
1742	}
1743
1744	free(str);
1745	return 0;
1746}
1747
1748static int parse_sort_opt(const struct option *opt __maybe_unused,
1749			  const char *arg, int unset __maybe_unused)
1750{
1751	if (!arg)
1752		return -1;
1753
1754	if (kmem_page > kmem_slab ||
1755	    (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
1756		if (caller_flag > alloc_flag)
1757			return setup_page_sorting(&page_caller_sort, arg);
1758		else
1759			return setup_page_sorting(&page_alloc_sort, arg);
1760	} else {
1761		if (caller_flag > alloc_flag)
1762			return setup_slab_sorting(&slab_caller_sort, arg);
1763		else
1764			return setup_slab_sorting(&slab_alloc_sort, arg);
1765	}
1766
1767	return 0;
1768}
1769
1770static int parse_caller_opt(const struct option *opt __maybe_unused,
1771			    const char *arg __maybe_unused,
1772			    int unset __maybe_unused)
1773{
1774	caller_flag = (alloc_flag + 1);
1775	return 0;
1776}
1777
1778static int parse_alloc_opt(const struct option *opt __maybe_unused,
1779			   const char *arg __maybe_unused,
1780			   int unset __maybe_unused)
1781{
1782	alloc_flag = (caller_flag + 1);
1783	return 0;
1784}
1785
1786static int parse_slab_opt(const struct option *opt __maybe_unused,
1787			  const char *arg __maybe_unused,
1788			  int unset __maybe_unused)
1789{
1790	kmem_slab = (kmem_page + 1);
1791	return 0;
1792}
1793
1794static int parse_page_opt(const struct option *opt __maybe_unused,
1795			  const char *arg __maybe_unused,
1796			  int unset __maybe_unused)
1797{
1798	kmem_page = (kmem_slab + 1);
1799	return 0;
1800}
1801
1802static int parse_line_opt(const struct option *opt __maybe_unused,
1803			  const char *arg, int unset __maybe_unused)
1804{
1805	int lines;
1806
1807	if (!arg)
1808		return -1;
1809
1810	lines = strtoul(arg, NULL, 10);
1811
1812	if (caller_flag > alloc_flag)
1813		caller_lines = lines;
1814	else
1815		alloc_lines = lines;
1816
1817	return 0;
1818}
1819
1820static int __cmd_record(int argc, const char **argv)
1821{
1822	const char * const record_args[] = {
1823	"record", "-a", "-R", "-c", "1",
1824	};
1825	const char * const slab_events[] = {
1826	"-e", "kmem:kmalloc",
1827	"-e", "kmem:kmalloc_node",
1828	"-e", "kmem:kfree",
1829	"-e", "kmem:kmem_cache_alloc",
1830	"-e", "kmem:kmem_cache_alloc_node",
1831	"-e", "kmem:kmem_cache_free",
1832	};
1833	const char * const page_events[] = {
1834	"-e", "kmem:mm_page_alloc",
1835	"-e", "kmem:mm_page_free",
1836	};
1837	unsigned int rec_argc, i, j;
1838	const char **rec_argv;
1839
1840	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1841	if (kmem_slab)
1842		rec_argc += ARRAY_SIZE(slab_events);
1843	if (kmem_page)
1844		rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
1845
1846	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1847
1848	if (rec_argv == NULL)
1849		return -ENOMEM;
1850
1851	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1852		rec_argv[i] = strdup(record_args[i]);
1853
1854	if (kmem_slab) {
1855		for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1856			rec_argv[i] = strdup(slab_events[j]);
1857	}
1858	if (kmem_page) {
1859		rec_argv[i++] = strdup("-g");
1860
1861		for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1862			rec_argv[i] = strdup(page_events[j]);
1863	}
1864
1865	for (j = 1; j < (unsigned int)argc; j++, i++)
1866		rec_argv[i] = argv[j];
1867
1868	return cmd_record(i, rec_argv, NULL);
1869}
1870
1871static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
1872{
1873	if (!strcmp(var, "kmem.default")) {
1874		if (!strcmp(value, "slab"))
1875			kmem_default = KMEM_SLAB;
1876		else if (!strcmp(value, "page"))
1877			kmem_default = KMEM_PAGE;
1878		else
1879			pr_err("invalid default value ('slab' or 'page' required): %s\n",
1880			       value);
1881		return 0;
1882	}
1883
1884	return 0;
1885}
1886
1887int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
1888{
1889	const char * const default_slab_sort = "frag,hit,bytes";
1890	const char * const default_page_sort = "bytes,hit";
1891	struct perf_data_file file = {
1892		.mode = PERF_DATA_MODE_READ,
1893	};
1894	const struct option kmem_options[] = {
1895	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1896	OPT_INCR('v', "verbose", &verbose,
1897		    "be more verbose (show symbol address, etc)"),
1898	OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1899			   "show per-callsite statistics", parse_caller_opt),
1900	OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1901			   "show per-allocation statistics", parse_alloc_opt),
1902	OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
1903		     "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1904		     "page, order, migtype, gfp", parse_sort_opt),
1905	OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1906	OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1907	OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
1908	OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1909			   parse_slab_opt),
1910	OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1911			   parse_page_opt),
1912	OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
1913	OPT_STRING(0, "time", &time_str, "str",
1914		   "Time span of interest (start,stop)"),
1915	OPT_END()
1916	};
1917	const char *const kmem_subcommands[] = { "record", "stat", NULL };
1918	const char *kmem_usage[] = {
1919		NULL,
1920		NULL
1921	};
1922	struct perf_session *session;
1923	int ret = -1;
1924	const char errmsg[] = "No %s allocation events found.  Have you run 'perf kmem record --%s'?\n";
1925
1926	perf_config(kmem_config, NULL);
1927	argc = parse_options_subcommand(argc, argv, kmem_options,
1928					kmem_subcommands, kmem_usage, 0);
1929
1930	if (!argc)
1931		usage_with_options(kmem_usage, kmem_options);
1932
1933	if (kmem_slab == 0 && kmem_page == 0) {
1934		if (kmem_default == KMEM_SLAB)
1935			kmem_slab = 1;
1936		else
1937			kmem_page = 1;
1938	}
1939
1940	if (!strncmp(argv[0], "rec", 3)) {
1941		symbol__init(NULL);
1942		return __cmd_record(argc, argv);
1943	}
1944
1945	file.path = input_name;
1946
1947	kmem_session = session = perf_session__new(&file, false, &perf_kmem);
1948	if (session == NULL)
1949		return -1;
1950
1951	if (kmem_slab) {
1952		if (!perf_evlist__find_tracepoint_by_name(session->evlist,
1953							  "kmem:kmalloc")) {
1954			pr_err(errmsg, "slab", "slab");
1955			goto out_delete;
1956		}
1957	}
1958
1959	if (kmem_page) {
1960		struct perf_evsel *evsel;
1961
1962		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
1963							     "kmem:mm_page_alloc");
1964		if (evsel == NULL) {
1965			pr_err(errmsg, "page", "page");
1966			goto out_delete;
1967		}
1968
1969		kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
1970		symbol_conf.use_callchain = true;
1971	}
1972
1973	symbol__init(&session->header.env);
1974
1975	if (perf_time__parse_str(&ptime, time_str) != 0) {
1976		pr_err("Invalid time string\n");
1977		return -EINVAL;
1978	}
1979
1980	if (!strcmp(argv[0], "stat")) {
1981		setlocale(LC_ALL, "");
1982
1983		if (cpu__setup_cpunode_map())
1984			goto out_delete;
1985
1986		if (list_empty(&slab_caller_sort))
1987			setup_slab_sorting(&slab_caller_sort, default_slab_sort);
1988		if (list_empty(&slab_alloc_sort))
1989			setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
1990		if (list_empty(&page_caller_sort))
1991			setup_page_sorting(&page_caller_sort, default_page_sort);
1992		if (list_empty(&page_alloc_sort))
1993			setup_page_sorting(&page_alloc_sort, default_page_sort);
1994
1995		if (kmem_page) {
1996			setup_page_sorting(&page_alloc_sort_input,
1997					   "page,order,migtype,gfp");
1998			setup_page_sorting(&page_caller_sort_input,
1999					   "callsite,order,migtype,gfp");
2000		}
2001		ret = __cmd_kmem(session);
2002	} else
2003		usage_with_options(kmem_usage, kmem_options);
2004
2005out_delete:
2006	perf_session__delete(session);
2007
2008	return ret;
2009}
2010