Linux Audio

Check our new training course

Loading...
v4.10.11
 
   1#include "builtin.h"
   2#include "perf.h"
   3
 
   4#include "util/evlist.h"
   5#include "util/evsel.h"
   6#include "util/util.h"
   7#include "util/config.h"
 
   8#include "util/symbol.h"
   9#include "util/thread.h"
  10#include "util/header.h"
  11#include "util/session.h"
  12#include "util/tool.h"
  13#include "util/callchain.h"
  14#include "util/time-utils.h"
 
  15
 
  16#include <subcmd/parse-options.h>
  17#include "util/trace-event.h"
  18#include "util/data.h"
  19#include "util/cpumap.h"
  20
  21#include "util/debug.h"
 
  22
 
  23#include <linux/rbtree.h>
  24#include <linux/string.h>
 
 
 
  25#include <locale.h>
  26#include <regex.h>
  27
 
 
  28static int	kmem_slab;
  29static int	kmem_page;
  30
  31static long	kmem_page_size;
  32static enum {
  33	KMEM_SLAB,
  34	KMEM_PAGE,
  35} kmem_default = KMEM_SLAB;  /* for backward compatibility */
  36
  37struct alloc_stat;
  38typedef int (*sort_fn_t)(void *, void *);
  39
  40static int			alloc_flag;
  41static int			caller_flag;
  42
  43static int			alloc_lines = -1;
  44static int			caller_lines = -1;
  45
  46static bool			raw_ip;
  47
  48struct alloc_stat {
  49	u64	call_site;
  50	u64	ptr;
  51	u64	bytes_req;
  52	u64	bytes_alloc;
  53	u64	last_alloc;
  54	u32	hit;
  55	u32	pingpong;
  56
  57	short	alloc_cpu;
  58
  59	struct rb_node node;
  60};
  61
  62static struct rb_root root_alloc_stat;
  63static struct rb_root root_alloc_sorted;
  64static struct rb_root root_caller_stat;
  65static struct rb_root root_caller_sorted;
  66
  67static unsigned long total_requested, total_allocated, total_freed;
  68static unsigned long nr_allocs, nr_cross_allocs;
  69
  70/* filters for controlling start and stop of time of analysis */
  71static struct perf_time_interval ptime;
  72const char *time_str;
  73
  74static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
  75			     int bytes_req, int bytes_alloc, int cpu)
  76{
  77	struct rb_node **node = &root_alloc_stat.rb_node;
  78	struct rb_node *parent = NULL;
  79	struct alloc_stat *data = NULL;
  80
  81	while (*node) {
  82		parent = *node;
  83		data = rb_entry(*node, struct alloc_stat, node);
  84
  85		if (ptr > data->ptr)
  86			node = &(*node)->rb_right;
  87		else if (ptr < data->ptr)
  88			node = &(*node)->rb_left;
  89		else
  90			break;
  91	}
  92
  93	if (data && data->ptr == ptr) {
  94		data->hit++;
  95		data->bytes_req += bytes_req;
  96		data->bytes_alloc += bytes_alloc;
  97	} else {
  98		data = malloc(sizeof(*data));
  99		if (!data) {
 100			pr_err("%s: malloc failed\n", __func__);
 101			return -1;
 102		}
 103		data->ptr = ptr;
 104		data->pingpong = 0;
 105		data->hit = 1;
 106		data->bytes_req = bytes_req;
 107		data->bytes_alloc = bytes_alloc;
 108
 109		rb_link_node(&data->node, parent, node);
 110		rb_insert_color(&data->node, &root_alloc_stat);
 111	}
 112	data->call_site = call_site;
 113	data->alloc_cpu = cpu;
 114	data->last_alloc = bytes_alloc;
 115
 116	return 0;
 117}
 118
 119static int insert_caller_stat(unsigned long call_site,
 120			      int bytes_req, int bytes_alloc)
 121{
 122	struct rb_node **node = &root_caller_stat.rb_node;
 123	struct rb_node *parent = NULL;
 124	struct alloc_stat *data = NULL;
 125
 126	while (*node) {
 127		parent = *node;
 128		data = rb_entry(*node, struct alloc_stat, node);
 129
 130		if (call_site > data->call_site)
 131			node = &(*node)->rb_right;
 132		else if (call_site < data->call_site)
 133			node = &(*node)->rb_left;
 134		else
 135			break;
 136	}
 137
 138	if (data && data->call_site == call_site) {
 139		data->hit++;
 140		data->bytes_req += bytes_req;
 141		data->bytes_alloc += bytes_alloc;
 142	} else {
 143		data = malloc(sizeof(*data));
 144		if (!data) {
 145			pr_err("%s: malloc failed\n", __func__);
 146			return -1;
 147		}
 148		data->call_site = call_site;
 149		data->pingpong = 0;
 150		data->hit = 1;
 151		data->bytes_req = bytes_req;
 152		data->bytes_alloc = bytes_alloc;
 153
 154		rb_link_node(&data->node, parent, node);
 155		rb_insert_color(&data->node, &root_caller_stat);
 156	}
 157
 158	return 0;
 159}
 160
 161static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
 162					   struct perf_sample *sample)
 163{
 164	unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
 165		      call_site = perf_evsel__intval(evsel, sample, "call_site");
 166	int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
 167	    bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
 168
 169	if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
 170	    insert_caller_stat(call_site, bytes_req, bytes_alloc))
 171		return -1;
 172
 173	total_requested += bytes_req;
 174	total_allocated += bytes_alloc;
 175
 176	nr_allocs++;
 177	return 0;
 178}
 179
 180static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
 181						struct perf_sample *sample)
 182{
 183	int ret = perf_evsel__process_alloc_event(evsel, sample);
 184
 185	if (!ret) {
 186		int node1 = cpu__get_node(sample->cpu),
 187		    node2 = perf_evsel__intval(evsel, sample, "node");
 188
 189		if (node1 != node2)
 190			nr_cross_allocs++;
 191	}
 192
 193	return ret;
 194}
 195
 196static int ptr_cmp(void *, void *);
 197static int slab_callsite_cmp(void *, void *);
 198
 199static struct alloc_stat *search_alloc_stat(unsigned long ptr,
 200					    unsigned long call_site,
 201					    struct rb_root *root,
 202					    sort_fn_t sort_fn)
 203{
 204	struct rb_node *node = root->rb_node;
 205	struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
 206
 207	while (node) {
 208		struct alloc_stat *data;
 209		int cmp;
 210
 211		data = rb_entry(node, struct alloc_stat, node);
 212
 213		cmp = sort_fn(&key, data);
 214		if (cmp < 0)
 215			node = node->rb_left;
 216		else if (cmp > 0)
 217			node = node->rb_right;
 218		else
 219			return data;
 220	}
 221	return NULL;
 222}
 223
 224static int perf_evsel__process_free_event(struct perf_evsel *evsel,
 225					  struct perf_sample *sample)
 226{
 227	unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
 228	struct alloc_stat *s_alloc, *s_caller;
 229
 230	s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
 231	if (!s_alloc)
 232		return 0;
 233
 234	total_freed += s_alloc->last_alloc;
 235
 236	if ((short)sample->cpu != s_alloc->alloc_cpu) {
 237		s_alloc->pingpong++;
 238
 239		s_caller = search_alloc_stat(0, s_alloc->call_site,
 240					     &root_caller_stat,
 241					     slab_callsite_cmp);
 242		if (!s_caller)
 243			return -1;
 244		s_caller->pingpong++;
 245	}
 246	s_alloc->alloc_cpu = -1;
 247
 248	return 0;
 249}
 250
 251static u64 total_page_alloc_bytes;
 252static u64 total_page_free_bytes;
 253static u64 total_page_nomatch_bytes;
 254static u64 total_page_fail_bytes;
 255static unsigned long nr_page_allocs;
 256static unsigned long nr_page_frees;
 257static unsigned long nr_page_fails;
 258static unsigned long nr_page_nomatch;
 259
 260static bool use_pfn;
 261static bool live_page;
 262static struct perf_session *kmem_session;
 263
 264#define MAX_MIGRATE_TYPES  6
 265#define MAX_PAGE_ORDER     11
 266
 267static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
 268
 269struct page_stat {
 270	struct rb_node 	node;
 271	u64 		page;
 272	u64 		callsite;
 273	int 		order;
 274	unsigned 	gfp_flags;
 275	unsigned 	migrate_type;
 276	u64		alloc_bytes;
 277	u64 		free_bytes;
 278	int 		nr_alloc;
 279	int 		nr_free;
 280};
 281
 282static struct rb_root page_live_tree;
 283static struct rb_root page_alloc_tree;
 284static struct rb_root page_alloc_sorted;
 285static struct rb_root page_caller_tree;
 286static struct rb_root page_caller_sorted;
 287
 288struct alloc_func {
 289	u64 start;
 290	u64 end;
 291	char *name;
 292};
 293
 294static int nr_alloc_funcs;
 295static struct alloc_func *alloc_func_list;
 296
 297static int funcmp(const void *a, const void *b)
 298{
 299	const struct alloc_func *fa = a;
 300	const struct alloc_func *fb = b;
 301
 302	if (fa->start > fb->start)
 303		return 1;
 304	else
 305		return -1;
 306}
 307
 308static int callcmp(const void *a, const void *b)
 309{
 310	const struct alloc_func *fa = a;
 311	const struct alloc_func *fb = b;
 312
 313	if (fb->start <= fa->start && fa->end < fb->end)
 314		return 0;
 315
 316	if (fa->start > fb->start)
 317		return 1;
 318	else
 319		return -1;
 320}
 321
 322static int build_alloc_func_list(void)
 323{
 324	int ret;
 325	struct map *kernel_map;
 326	struct symbol *sym;
 327	struct rb_node *node;
 328	struct alloc_func *func;
 329	struct machine *machine = &kmem_session->machines.host;
 330	regex_t alloc_func_regex;
 331	const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
 332
 333	ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
 334	if (ret) {
 335		char err[BUFSIZ];
 336
 337		regerror(ret, &alloc_func_regex, err, sizeof(err));
 338		pr_err("Invalid regex: %s\n%s", pattern, err);
 339		return -EINVAL;
 340	}
 341
 342	kernel_map = machine__kernel_map(machine);
 343	if (map__load(kernel_map) < 0) {
 344		pr_err("cannot load kernel map\n");
 345		return -ENOENT;
 346	}
 347
 348	map__for_each_symbol(kernel_map, sym, node) {
 349		if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
 350			continue;
 351
 352		func = realloc(alloc_func_list,
 353			       (nr_alloc_funcs + 1) * sizeof(*func));
 354		if (func == NULL)
 355			return -ENOMEM;
 356
 357		pr_debug("alloc func: %s\n", sym->name);
 358		func[nr_alloc_funcs].start = sym->start;
 359		func[nr_alloc_funcs].end   = sym->end;
 360		func[nr_alloc_funcs].name  = sym->name;
 361
 362		alloc_func_list = func;
 363		nr_alloc_funcs++;
 364	}
 365
 366	qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
 367
 368	regfree(&alloc_func_regex);
 369	return 0;
 370}
 371
 372/*
 373 * Find first non-memory allocation function from callchain.
 374 * The allocation functions are in the 'alloc_func_list'.
 375 */
 376static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample)
 377{
 378	struct addr_location al;
 379	struct machine *machine = &kmem_session->machines.host;
 380	struct callchain_cursor_node *node;
 381
 382	if (alloc_func_list == NULL) {
 383		if (build_alloc_func_list() < 0)
 384			goto out;
 385	}
 386
 387	al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
 388	sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
 389
 390	callchain_cursor_commit(&callchain_cursor);
 391	while (true) {
 392		struct alloc_func key, *caller;
 393		u64 addr;
 394
 395		node = callchain_cursor_current(&callchain_cursor);
 396		if (node == NULL)
 397			break;
 398
 399		key.start = key.end = node->ip;
 400		caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
 401				 sizeof(key), callcmp);
 402		if (!caller) {
 403			/* found */
 404			if (node->map)
 405				addr = map__unmap_ip(node->map, node->ip);
 406			else
 407				addr = node->ip;
 408
 409			return addr;
 410		} else
 411			pr_debug3("skipping alloc function: %s\n", caller->name);
 412
 413		callchain_cursor_advance(&callchain_cursor);
 414	}
 415
 416out:
 417	pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
 418	return sample->ip;
 419}
 420
 421struct sort_dimension {
 422	const char		name[20];
 423	sort_fn_t		cmp;
 424	struct list_head	list;
 425};
 426
 427static LIST_HEAD(page_alloc_sort_input);
 428static LIST_HEAD(page_caller_sort_input);
 429
 430static struct page_stat *
 431__page_stat__findnew_page(struct page_stat *pstat, bool create)
 432{
 433	struct rb_node **node = &page_live_tree.rb_node;
 434	struct rb_node *parent = NULL;
 435	struct page_stat *data;
 436
 437	while (*node) {
 438		s64 cmp;
 439
 440		parent = *node;
 441		data = rb_entry(*node, struct page_stat, node);
 442
 443		cmp = data->page - pstat->page;
 444		if (cmp < 0)
 445			node = &parent->rb_left;
 446		else if (cmp > 0)
 447			node = &parent->rb_right;
 448		else
 449			return data;
 450	}
 451
 452	if (!create)
 453		return NULL;
 454
 455	data = zalloc(sizeof(*data));
 456	if (data != NULL) {
 457		data->page = pstat->page;
 458		data->order = pstat->order;
 459		data->gfp_flags = pstat->gfp_flags;
 460		data->migrate_type = pstat->migrate_type;
 461
 462		rb_link_node(&data->node, parent, node);
 463		rb_insert_color(&data->node, &page_live_tree);
 464	}
 465
 466	return data;
 467}
 468
 469static struct page_stat *page_stat__find_page(struct page_stat *pstat)
 470{
 471	return __page_stat__findnew_page(pstat, false);
 472}
 473
 474static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
 475{
 476	return __page_stat__findnew_page(pstat, true);
 477}
 478
 479static struct page_stat *
 480__page_stat__findnew_alloc(struct page_stat *pstat, bool create)
 481{
 482	struct rb_node **node = &page_alloc_tree.rb_node;
 483	struct rb_node *parent = NULL;
 484	struct page_stat *data;
 485	struct sort_dimension *sort;
 486
 487	while (*node) {
 488		int cmp = 0;
 489
 490		parent = *node;
 491		data = rb_entry(*node, struct page_stat, node);
 492
 493		list_for_each_entry(sort, &page_alloc_sort_input, list) {
 494			cmp = sort->cmp(pstat, data);
 495			if (cmp)
 496				break;
 497		}
 498
 499		if (cmp < 0)
 500			node = &parent->rb_left;
 501		else if (cmp > 0)
 502			node = &parent->rb_right;
 503		else
 504			return data;
 505	}
 506
 507	if (!create)
 508		return NULL;
 509
 510	data = zalloc(sizeof(*data));
 511	if (data != NULL) {
 512		data->page = pstat->page;
 513		data->order = pstat->order;
 514		data->gfp_flags = pstat->gfp_flags;
 515		data->migrate_type = pstat->migrate_type;
 516
 517		rb_link_node(&data->node, parent, node);
 518		rb_insert_color(&data->node, &page_alloc_tree);
 519	}
 520
 521	return data;
 522}
 523
 524static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
 525{
 526	return __page_stat__findnew_alloc(pstat, false);
 527}
 528
 529static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
 530{
 531	return __page_stat__findnew_alloc(pstat, true);
 532}
 533
 534static struct page_stat *
 535__page_stat__findnew_caller(struct page_stat *pstat, bool create)
 536{
 537	struct rb_node **node = &page_caller_tree.rb_node;
 538	struct rb_node *parent = NULL;
 539	struct page_stat *data;
 540	struct sort_dimension *sort;
 541
 542	while (*node) {
 543		int cmp = 0;
 544
 545		parent = *node;
 546		data = rb_entry(*node, struct page_stat, node);
 547
 548		list_for_each_entry(sort, &page_caller_sort_input, list) {
 549			cmp = sort->cmp(pstat, data);
 550			if (cmp)
 551				break;
 552		}
 553
 554		if (cmp < 0)
 555			node = &parent->rb_left;
 556		else if (cmp > 0)
 557			node = &parent->rb_right;
 558		else
 559			return data;
 560	}
 561
 562	if (!create)
 563		return NULL;
 564
 565	data = zalloc(sizeof(*data));
 566	if (data != NULL) {
 567		data->callsite = pstat->callsite;
 568		data->order = pstat->order;
 569		data->gfp_flags = pstat->gfp_flags;
 570		data->migrate_type = pstat->migrate_type;
 571
 572		rb_link_node(&data->node, parent, node);
 573		rb_insert_color(&data->node, &page_caller_tree);
 574	}
 575
 576	return data;
 577}
 578
 579static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
 580{
 581	return __page_stat__findnew_caller(pstat, false);
 582}
 583
 584static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
 585{
 586	return __page_stat__findnew_caller(pstat, true);
 587}
 588
 589static bool valid_page(u64 pfn_or_page)
 590{
 591	if (use_pfn && pfn_or_page == -1UL)
 592		return false;
 593	if (!use_pfn && pfn_or_page == 0)
 594		return false;
 595	return true;
 596}
 597
 598struct gfp_flag {
 599	unsigned int flags;
 600	char *compact_str;
 601	char *human_readable;
 602};
 603
 604static struct gfp_flag *gfps;
 605static int nr_gfps;
 606
 607static int gfpcmp(const void *a, const void *b)
 608{
 609	const struct gfp_flag *fa = a;
 610	const struct gfp_flag *fb = b;
 611
 612	return fa->flags - fb->flags;
 613}
 614
 615/* see include/trace/events/mmflags.h */
 616static const struct {
 617	const char *original;
 618	const char *compact;
 619} gfp_compact_table[] = {
 620	{ "GFP_TRANSHUGE",		"THP" },
 621	{ "GFP_TRANSHUGE_LIGHT",	"THL" },
 622	{ "GFP_HIGHUSER_MOVABLE",	"HUM" },
 623	{ "GFP_HIGHUSER",		"HU" },
 624	{ "GFP_USER",			"U" },
 625	{ "GFP_TEMPORARY",		"TMP" },
 626	{ "GFP_KERNEL_ACCOUNT",		"KAC" },
 627	{ "GFP_KERNEL",			"K" },
 628	{ "GFP_NOFS",			"NF" },
 629	{ "GFP_ATOMIC",			"A" },
 630	{ "GFP_NOIO",			"NI" },
 631	{ "GFP_NOWAIT",			"NW" },
 632	{ "GFP_DMA",			"D" },
 633	{ "__GFP_HIGHMEM",		"HM" },
 634	{ "GFP_DMA32",			"D32" },
 635	{ "__GFP_HIGH",			"H" },
 636	{ "__GFP_ATOMIC",		"_A" },
 637	{ "__GFP_IO",			"I" },
 638	{ "__GFP_FS",			"F" },
 639	{ "__GFP_COLD",			"CO" },
 640	{ "__GFP_NOWARN",		"NWR" },
 641	{ "__GFP_REPEAT",		"R" },
 642	{ "__GFP_NOFAIL",		"NF" },
 643	{ "__GFP_NORETRY",		"NR" },
 644	{ "__GFP_COMP",			"C" },
 645	{ "__GFP_ZERO",			"Z" },
 646	{ "__GFP_NOMEMALLOC",		"NMA" },
 647	{ "__GFP_MEMALLOC",		"MA" },
 648	{ "__GFP_HARDWALL",		"HW" },
 649	{ "__GFP_THISNODE",		"TN" },
 650	{ "__GFP_RECLAIMABLE",		"RC" },
 651	{ "__GFP_MOVABLE",		"M" },
 652	{ "__GFP_ACCOUNT",		"AC" },
 653	{ "__GFP_NOTRACK",		"NT" },
 654	{ "__GFP_WRITE",		"WR" },
 655	{ "__GFP_RECLAIM",		"R" },
 656	{ "__GFP_DIRECT_RECLAIM",	"DR" },
 657	{ "__GFP_KSWAPD_RECLAIM",	"KR" },
 658};
 659
 660static size_t max_gfp_len;
 661
 662static char *compact_gfp_flags(char *gfp_flags)
 663{
 664	char *orig_flags = strdup(gfp_flags);
 665	char *new_flags = NULL;
 666	char *str, *pos = NULL;
 667	size_t len = 0;
 668
 669	if (orig_flags == NULL)
 670		return NULL;
 671
 672	str = strtok_r(orig_flags, "|", &pos);
 673	while (str) {
 674		size_t i;
 675		char *new;
 676		const char *cpt;
 677
 678		for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
 679			if (strcmp(gfp_compact_table[i].original, str))
 680				continue;
 681
 682			cpt = gfp_compact_table[i].compact;
 683			new = realloc(new_flags, len + strlen(cpt) + 2);
 684			if (new == NULL) {
 685				free(new_flags);
 
 686				return NULL;
 687			}
 688
 689			new_flags = new;
 690
 691			if (!len) {
 692				strcpy(new_flags, cpt);
 693			} else {
 694				strcat(new_flags, "|");
 695				strcat(new_flags, cpt);
 696				len++;
 697			}
 698
 699			len += strlen(cpt);
 700		}
 701
 702		str = strtok_r(NULL, "|", &pos);
 703	}
 704
 705	if (max_gfp_len < len)
 706		max_gfp_len = len;
 707
 708	free(orig_flags);
 709	return new_flags;
 710}
 711
 712static char *compact_gfp_string(unsigned long gfp_flags)
 713{
 714	struct gfp_flag key = {
 715		.flags = gfp_flags,
 716	};
 717	struct gfp_flag *gfp;
 718
 719	gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
 720	if (gfp)
 721		return gfp->compact_str;
 722
 723	return NULL;
 724}
 725
 726static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample,
 727			   unsigned int gfp_flags)
 728{
 729	struct pevent_record record = {
 730		.cpu = sample->cpu,
 731		.data = sample->raw_data,
 732		.size = sample->raw_size,
 733	};
 734	struct trace_seq seq;
 735	char *str, *pos = NULL;
 736
 737	if (nr_gfps) {
 738		struct gfp_flag key = {
 739			.flags = gfp_flags,
 740		};
 741
 742		if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
 743			return 0;
 744	}
 745
 746	trace_seq_init(&seq);
 747	pevent_event_info(&seq, evsel->tp_format, &record);
 
 748
 749	str = strtok_r(seq.buffer, " ", &pos);
 750	while (str) {
 751		if (!strncmp(str, "gfp_flags=", 10)) {
 752			struct gfp_flag *new;
 753
 754			new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
 755			if (new == NULL)
 756				return -ENOMEM;
 757
 758			gfps = new;
 759			new += nr_gfps++;
 760
 761			new->flags = gfp_flags;
 762			new->human_readable = strdup(str + 10);
 763			new->compact_str = compact_gfp_flags(str + 10);
 764			if (!new->human_readable || !new->compact_str)
 765				return -ENOMEM;
 766
 767			qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
 768		}
 769
 770		str = strtok_r(NULL, " ", &pos);
 771	}
 772
 773	trace_seq_destroy(&seq);
 774	return 0;
 775}
 776
 777static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
 778						struct perf_sample *sample)
 779{
 780	u64 page;
 781	unsigned int order = perf_evsel__intval(evsel, sample, "order");
 782	unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
 783	unsigned int migrate_type = perf_evsel__intval(evsel, sample,
 784						       "migratetype");
 785	u64 bytes = kmem_page_size << order;
 786	u64 callsite;
 787	struct page_stat *pstat;
 788	struct page_stat this = {
 789		.order = order,
 790		.gfp_flags = gfp_flags,
 791		.migrate_type = migrate_type,
 792	};
 793
 794	if (use_pfn)
 795		page = perf_evsel__intval(evsel, sample, "pfn");
 796	else
 797		page = perf_evsel__intval(evsel, sample, "page");
 798
 799	nr_page_allocs++;
 800	total_page_alloc_bytes += bytes;
 801
 802	if (!valid_page(page)) {
 803		nr_page_fails++;
 804		total_page_fail_bytes += bytes;
 805
 806		return 0;
 807	}
 808
 809	if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
 810		return -1;
 811
 812	callsite = find_callsite(evsel, sample);
 813
 814	/*
 815	 * This is to find the current page (with correct gfp flags and
 816	 * migrate type) at free event.
 817	 */
 818	this.page = page;
 819	pstat = page_stat__findnew_page(&this);
 820	if (pstat == NULL)
 821		return -ENOMEM;
 822
 823	pstat->nr_alloc++;
 824	pstat->alloc_bytes += bytes;
 825	pstat->callsite = callsite;
 826
 827	if (!live_page) {
 828		pstat = page_stat__findnew_alloc(&this);
 829		if (pstat == NULL)
 830			return -ENOMEM;
 831
 832		pstat->nr_alloc++;
 833		pstat->alloc_bytes += bytes;
 834		pstat->callsite = callsite;
 835	}
 836
 837	this.callsite = callsite;
 838	pstat = page_stat__findnew_caller(&this);
 839	if (pstat == NULL)
 840		return -ENOMEM;
 841
 842	pstat->nr_alloc++;
 843	pstat->alloc_bytes += bytes;
 844
 845	order_stats[order][migrate_type]++;
 846
 847	return 0;
 848}
 849
 850static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
 851						struct perf_sample *sample)
 852{
 853	u64 page;
 854	unsigned int order = perf_evsel__intval(evsel, sample, "order");
 855	u64 bytes = kmem_page_size << order;
 856	struct page_stat *pstat;
 857	struct page_stat this = {
 858		.order = order,
 859	};
 860
 861	if (use_pfn)
 862		page = perf_evsel__intval(evsel, sample, "pfn");
 863	else
 864		page = perf_evsel__intval(evsel, sample, "page");
 865
 866	nr_page_frees++;
 867	total_page_free_bytes += bytes;
 868
 869	this.page = page;
 870	pstat = page_stat__find_page(&this);
 871	if (pstat == NULL) {
 872		pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
 873			  page, order);
 874
 875		nr_page_nomatch++;
 876		total_page_nomatch_bytes += bytes;
 877
 878		return 0;
 879	}
 880
 881	this.gfp_flags = pstat->gfp_flags;
 882	this.migrate_type = pstat->migrate_type;
 883	this.callsite = pstat->callsite;
 884
 885	rb_erase(&pstat->node, &page_live_tree);
 886	free(pstat);
 887
 888	if (live_page) {
 889		order_stats[this.order][this.migrate_type]--;
 890	} else {
 891		pstat = page_stat__find_alloc(&this);
 892		if (pstat == NULL)
 893			return -ENOMEM;
 894
 895		pstat->nr_free++;
 896		pstat->free_bytes += bytes;
 897	}
 898
 899	pstat = page_stat__find_caller(&this);
 900	if (pstat == NULL)
 901		return -ENOENT;
 902
 903	pstat->nr_free++;
 904	pstat->free_bytes += bytes;
 905
 906	if (live_page) {
 907		pstat->nr_alloc--;
 908		pstat->alloc_bytes -= bytes;
 909
 910		if (pstat->nr_alloc == 0) {
 911			rb_erase(&pstat->node, &page_caller_tree);
 912			free(pstat);
 913		}
 914	}
 915
 916	return 0;
 917}
 918
 919static bool perf_kmem__skip_sample(struct perf_sample *sample)
 920{
 921	/* skip sample based on time? */
 922	if (perf_time__skip_sample(&ptime, sample->time))
 923		return true;
 924
 925	return false;
 926}
 927
 928typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
 929				  struct perf_sample *sample);
 930
 931static int process_sample_event(struct perf_tool *tool __maybe_unused,
 932				union perf_event *event,
 933				struct perf_sample *sample,
 934				struct perf_evsel *evsel,
 935				struct machine *machine)
 936{
 937	int err = 0;
 938	struct thread *thread = machine__findnew_thread(machine, sample->pid,
 939							sample->tid);
 940
 941	if (thread == NULL) {
 942		pr_debug("problem processing %d event, skipping it.\n",
 943			 event->header.type);
 944		return -1;
 945	}
 946
 947	if (perf_kmem__skip_sample(sample))
 948		return 0;
 949
 950	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
 951
 952	if (evsel->handler != NULL) {
 953		tracepoint_handler f = evsel->handler;
 954		err = f(evsel, sample);
 955	}
 956
 957	thread__put(thread);
 958
 959	return err;
 960}
 961
 962static struct perf_tool perf_kmem = {
 963	.sample		 = process_sample_event,
 964	.comm		 = perf_event__process_comm,
 965	.mmap		 = perf_event__process_mmap,
 966	.mmap2		 = perf_event__process_mmap2,
 
 967	.ordered_events	 = true,
 968};
 969
 970static double fragmentation(unsigned long n_req, unsigned long n_alloc)
 971{
 972	if (n_alloc == 0)
 973		return 0.0;
 974	else
 975		return 100.0 - (100.0 * n_req / n_alloc);
 976}
 977
 978static void __print_slab_result(struct rb_root *root,
 979				struct perf_session *session,
 980				int n_lines, int is_caller)
 981{
 982	struct rb_node *next;
 983	struct machine *machine = &session->machines.host;
 984
 985	printf("%.105s\n", graph_dotted_line);
 986	printf(" %-34s |",  is_caller ? "Callsite": "Alloc Ptr");
 987	printf(" Total_alloc/Per | Total_req/Per   | Hit      | Ping-pong | Frag\n");
 988	printf("%.105s\n", graph_dotted_line);
 989
 990	next = rb_first(root);
 991
 992	while (next && n_lines--) {
 993		struct alloc_stat *data = rb_entry(next, struct alloc_stat,
 994						   node);
 995		struct symbol *sym = NULL;
 996		struct map *map;
 997		char buf[BUFSIZ];
 998		u64 addr;
 999
1000		if (is_caller) {
1001			addr = data->call_site;
1002			if (!raw_ip)
1003				sym = machine__find_kernel_function(machine, addr, &map);
1004		} else
1005			addr = data->ptr;
1006
1007		if (sym != NULL)
1008			snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
1009				 addr - map->unmap_ip(map, sym->start));
1010		else
1011			snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
1012		printf(" %-34s |", buf);
1013
1014		printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
1015		       (unsigned long long)data->bytes_alloc,
1016		       (unsigned long)data->bytes_alloc / data->hit,
1017		       (unsigned long long)data->bytes_req,
1018		       (unsigned long)data->bytes_req / data->hit,
1019		       (unsigned long)data->hit,
1020		       (unsigned long)data->pingpong,
1021		       fragmentation(data->bytes_req, data->bytes_alloc));
1022
1023		next = rb_next(next);
1024	}
1025
1026	if (n_lines == -1)
1027		printf(" ...                                | ...             | ...             | ...      | ...       | ...   \n");
1028
1029	printf("%.105s\n", graph_dotted_line);
1030}
1031
1032static const char * const migrate_type_str[] = {
1033	"UNMOVABL",
1034	"RECLAIM",
1035	"MOVABLE",
1036	"RESERVED",
1037	"CMA/ISLT",
1038	"UNKNOWN",
1039};
1040
1041static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1042{
1043	struct rb_node *next = rb_first(&page_alloc_sorted);
1044	struct machine *machine = &session->machines.host;
1045	const char *format;
1046	int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1047
1048	printf("\n%.105s\n", graph_dotted_line);
1049	printf(" %-16s | %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1050	       use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
1051	       gfp_len, "GFP flags");
1052	printf("%.105s\n", graph_dotted_line);
1053
1054	if (use_pfn)
1055		format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1056	else
1057		format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1058
1059	while (next && n_lines--) {
1060		struct page_stat *data;
1061		struct symbol *sym;
1062		struct map *map;
1063		char buf[32];
1064		char *caller = buf;
1065
1066		data = rb_entry(next, struct page_stat, node);
1067		sym = machine__find_kernel_function(machine, data->callsite, &map);
1068		if (sym && sym->name)
1069			caller = sym->name;
1070		else
1071			scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1072
1073		printf(format, (unsigned long long)data->page,
1074		       (unsigned long long)data->alloc_bytes / 1024,
1075		       data->nr_alloc, data->order,
1076		       migrate_type_str[data->migrate_type],
1077		       gfp_len, compact_gfp_string(data->gfp_flags), caller);
1078
1079		next = rb_next(next);
1080	}
1081
1082	if (n_lines == -1) {
1083		printf(" ...              | ...              | ...       | ...   | ...      | %-*s | ...\n",
1084		       gfp_len, "...");
1085	}
1086
1087	printf("%.105s\n", graph_dotted_line);
1088}
1089
1090static void __print_page_caller_result(struct perf_session *session, int n_lines)
1091{
1092	struct rb_node *next = rb_first(&page_caller_sorted);
1093	struct machine *machine = &session->machines.host;
1094	int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1095
1096	printf("\n%.105s\n", graph_dotted_line);
1097	printf(" %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1098	       live_page ? "Live" : "Total", gfp_len, "GFP flags");
1099	printf("%.105s\n", graph_dotted_line);
1100
1101	while (next && n_lines--) {
1102		struct page_stat *data;
1103		struct symbol *sym;
1104		struct map *map;
1105		char buf[32];
1106		char *caller = buf;
1107
1108		data = rb_entry(next, struct page_stat, node);
1109		sym = machine__find_kernel_function(machine, data->callsite, &map);
1110		if (sym && sym->name)
1111			caller = sym->name;
1112		else
1113			scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1114
1115		printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
1116		       (unsigned long long)data->alloc_bytes / 1024,
1117		       data->nr_alloc, data->order,
1118		       migrate_type_str[data->migrate_type],
1119		       gfp_len, compact_gfp_string(data->gfp_flags), caller);
1120
1121		next = rb_next(next);
1122	}
1123
1124	if (n_lines == -1) {
1125		printf(" ...              | ...       | ...   | ...      | %-*s | ...\n",
1126		       gfp_len, "...");
1127	}
1128
1129	printf("%.105s\n", graph_dotted_line);
1130}
1131
1132static void print_gfp_flags(void)
1133{
1134	int i;
1135
1136	printf("#\n");
1137	printf("# GFP flags\n");
1138	printf("# ---------\n");
1139	for (i = 0; i < nr_gfps; i++) {
1140		printf("# %08x: %*s: %s\n", gfps[i].flags,
1141		       (int) max_gfp_len, gfps[i].compact_str,
1142		       gfps[i].human_readable);
1143	}
1144}
1145
1146static void print_slab_summary(void)
1147{
1148	printf("\nSUMMARY (SLAB allocator)");
1149	printf("\n========================\n");
1150	printf("Total bytes requested: %'lu\n", total_requested);
1151	printf("Total bytes allocated: %'lu\n", total_allocated);
1152	printf("Total bytes freed:     %'lu\n", total_freed);
1153	if (total_allocated > total_freed) {
1154		printf("Net total bytes allocated: %'lu\n",
1155		total_allocated - total_freed);
1156	}
1157	printf("Total bytes wasted on internal fragmentation: %'lu\n",
1158	       total_allocated - total_requested);
1159	printf("Internal fragmentation: %f%%\n",
1160	       fragmentation(total_requested, total_allocated));
1161	printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
1162}
1163
1164static void print_page_summary(void)
1165{
1166	int o, m;
1167	u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
1168	u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
1169
1170	printf("\nSUMMARY (page allocator)");
1171	printf("\n========================\n");
1172	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation requests",
1173	       nr_page_allocs, total_page_alloc_bytes / 1024);
1174	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free requests",
1175	       nr_page_frees, total_page_free_bytes / 1024);
1176	printf("\n");
1177
1178	printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
1179	       nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
1180	printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
1181	       nr_page_allocs - nr_alloc_freed,
1182	       (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
1183	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free-only requests",
1184	       nr_page_nomatch, total_page_nomatch_bytes / 1024);
1185	printf("\n");
1186
1187	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation failures",
1188	       nr_page_fails, total_page_fail_bytes / 1024);
1189	printf("\n");
1190
1191	printf("%5s  %12s  %12s  %12s  %12s  %12s\n", "Order",  "Unmovable",
1192	       "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
1193	printf("%.5s  %.12s  %.12s  %.12s  %.12s  %.12s\n", graph_dotted_line,
1194	       graph_dotted_line, graph_dotted_line, graph_dotted_line,
1195	       graph_dotted_line, graph_dotted_line);
1196
1197	for (o = 0; o < MAX_PAGE_ORDER; o++) {
1198		printf("%5d", o);
1199		for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
1200			if (order_stats[o][m])
1201				printf("  %'12d", order_stats[o][m]);
1202			else
1203				printf("  %12c", '.');
1204		}
1205		printf("\n");
1206	}
1207}
1208
1209static void print_slab_result(struct perf_session *session)
1210{
1211	if (caller_flag)
1212		__print_slab_result(&root_caller_sorted, session, caller_lines, 1);
1213	if (alloc_flag)
1214		__print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
1215	print_slab_summary();
1216}
1217
1218static void print_page_result(struct perf_session *session)
1219{
1220	if (caller_flag || alloc_flag)
1221		print_gfp_flags();
1222	if (caller_flag)
1223		__print_page_caller_result(session, caller_lines);
1224	if (alloc_flag)
1225		__print_page_alloc_result(session, alloc_lines);
1226	print_page_summary();
1227}
1228
1229static void print_result(struct perf_session *session)
1230{
1231	if (kmem_slab)
1232		print_slab_result(session);
1233	if (kmem_page)
1234		print_page_result(session);
1235}
1236
1237static LIST_HEAD(slab_caller_sort);
1238static LIST_HEAD(slab_alloc_sort);
1239static LIST_HEAD(page_caller_sort);
1240static LIST_HEAD(page_alloc_sort);
1241
1242static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
1243			     struct list_head *sort_list)
1244{
1245	struct rb_node **new = &(root->rb_node);
1246	struct rb_node *parent = NULL;
1247	struct sort_dimension *sort;
1248
1249	while (*new) {
1250		struct alloc_stat *this;
1251		int cmp = 0;
1252
1253		this = rb_entry(*new, struct alloc_stat, node);
1254		parent = *new;
1255
1256		list_for_each_entry(sort, sort_list, list) {
1257			cmp = sort->cmp(data, this);
1258			if (cmp)
1259				break;
1260		}
1261
1262		if (cmp > 0)
1263			new = &((*new)->rb_left);
1264		else
1265			new = &((*new)->rb_right);
1266	}
1267
1268	rb_link_node(&data->node, parent, new);
1269	rb_insert_color(&data->node, root);
1270}
1271
1272static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1273			       struct list_head *sort_list)
1274{
1275	struct rb_node *node;
1276	struct alloc_stat *data;
1277
1278	for (;;) {
1279		node = rb_first(root);
1280		if (!node)
1281			break;
1282
1283		rb_erase(node, root);
1284		data = rb_entry(node, struct alloc_stat, node);
1285		sort_slab_insert(root_sorted, data, sort_list);
1286	}
1287}
1288
1289static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1290			     struct list_head *sort_list)
1291{
1292	struct rb_node **new = &root->rb_node;
1293	struct rb_node *parent = NULL;
1294	struct sort_dimension *sort;
1295
1296	while (*new) {
1297		struct page_stat *this;
1298		int cmp = 0;
1299
1300		this = rb_entry(*new, struct page_stat, node);
1301		parent = *new;
1302
1303		list_for_each_entry(sort, sort_list, list) {
1304			cmp = sort->cmp(data, this);
1305			if (cmp)
1306				break;
1307		}
1308
1309		if (cmp > 0)
1310			new = &parent->rb_left;
1311		else
1312			new = &parent->rb_right;
1313	}
1314
1315	rb_link_node(&data->node, parent, new);
1316	rb_insert_color(&data->node, root);
1317}
1318
1319static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1320			       struct list_head *sort_list)
1321{
1322	struct rb_node *node;
1323	struct page_stat *data;
1324
1325	for (;;) {
1326		node = rb_first(root);
1327		if (!node)
1328			break;
1329
1330		rb_erase(node, root);
1331		data = rb_entry(node, struct page_stat, node);
1332		sort_page_insert(root_sorted, data, sort_list);
1333	}
1334}
1335
1336static void sort_result(void)
1337{
1338	if (kmem_slab) {
1339		__sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
1340				   &slab_alloc_sort);
1341		__sort_slab_result(&root_caller_stat, &root_caller_sorted,
1342				   &slab_caller_sort);
1343	}
1344	if (kmem_page) {
1345		if (live_page)
1346			__sort_page_result(&page_live_tree, &page_alloc_sorted,
1347					   &page_alloc_sort);
1348		else
1349			__sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1350					   &page_alloc_sort);
1351
1352		__sort_page_result(&page_caller_tree, &page_caller_sorted,
1353				   &page_caller_sort);
1354	}
1355}
1356
1357static int __cmd_kmem(struct perf_session *session)
1358{
1359	int err = -EINVAL;
1360	struct perf_evsel *evsel;
1361	const struct perf_evsel_str_handler kmem_tracepoints[] = {
1362		/* slab allocator */
1363		{ "kmem:kmalloc",		perf_evsel__process_alloc_event, },
1364    		{ "kmem:kmem_cache_alloc",	perf_evsel__process_alloc_event, },
1365		{ "kmem:kmalloc_node",		perf_evsel__process_alloc_node_event, },
1366    		{ "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
1367		{ "kmem:kfree",			perf_evsel__process_free_event, },
1368    		{ "kmem:kmem_cache_free",	perf_evsel__process_free_event, },
1369		/* page allocator */
1370		{ "kmem:mm_page_alloc",		perf_evsel__process_page_alloc_event, },
1371		{ "kmem:mm_page_free",		perf_evsel__process_page_free_event, },
1372	};
1373
1374	if (!perf_session__has_traces(session, "kmem record"))
1375		goto out;
1376
1377	if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1378		pr_err("Initializing perf session tracepoint handlers failed\n");
1379		goto out;
1380	}
1381
1382	evlist__for_each_entry(session->evlist, evsel) {
1383		if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
1384		    perf_evsel__field(evsel, "pfn")) {
1385			use_pfn = true;
1386			break;
1387		}
1388	}
1389
1390	setup_pager();
1391	err = perf_session__process_events(session);
1392	if (err != 0) {
1393		pr_err("error during process events: %d\n", err);
1394		goto out;
1395	}
1396	sort_result();
1397	print_result(session);
1398out:
1399	return err;
1400}
1401
1402/* slab sort keys */
1403static int ptr_cmp(void *a, void *b)
1404{
1405	struct alloc_stat *l = a;
1406	struct alloc_stat *r = b;
1407
1408	if (l->ptr < r->ptr)
1409		return -1;
1410	else if (l->ptr > r->ptr)
1411		return 1;
1412	return 0;
1413}
1414
1415static struct sort_dimension ptr_sort_dimension = {
1416	.name	= "ptr",
1417	.cmp	= ptr_cmp,
1418};
1419
1420static int slab_callsite_cmp(void *a, void *b)
1421{
1422	struct alloc_stat *l = a;
1423	struct alloc_stat *r = b;
1424
1425	if (l->call_site < r->call_site)
1426		return -1;
1427	else if (l->call_site > r->call_site)
1428		return 1;
1429	return 0;
1430}
1431
1432static struct sort_dimension callsite_sort_dimension = {
1433	.name	= "callsite",
1434	.cmp	= slab_callsite_cmp,
1435};
1436
1437static int hit_cmp(void *a, void *b)
1438{
1439	struct alloc_stat *l = a;
1440	struct alloc_stat *r = b;
1441
1442	if (l->hit < r->hit)
1443		return -1;
1444	else if (l->hit > r->hit)
1445		return 1;
1446	return 0;
1447}
1448
1449static struct sort_dimension hit_sort_dimension = {
1450	.name	= "hit",
1451	.cmp	= hit_cmp,
1452};
1453
1454static int bytes_cmp(void *a, void *b)
1455{
1456	struct alloc_stat *l = a;
1457	struct alloc_stat *r = b;
1458
1459	if (l->bytes_alloc < r->bytes_alloc)
1460		return -1;
1461	else if (l->bytes_alloc > r->bytes_alloc)
1462		return 1;
1463	return 0;
1464}
1465
1466static struct sort_dimension bytes_sort_dimension = {
1467	.name	= "bytes",
1468	.cmp	= bytes_cmp,
1469};
1470
1471static int frag_cmp(void *a, void *b)
1472{
1473	double x, y;
1474	struct alloc_stat *l = a;
1475	struct alloc_stat *r = b;
1476
1477	x = fragmentation(l->bytes_req, l->bytes_alloc);
1478	y = fragmentation(r->bytes_req, r->bytes_alloc);
1479
1480	if (x < y)
1481		return -1;
1482	else if (x > y)
1483		return 1;
1484	return 0;
1485}
1486
1487static struct sort_dimension frag_sort_dimension = {
1488	.name	= "frag",
1489	.cmp	= frag_cmp,
1490};
1491
1492static int pingpong_cmp(void *a, void *b)
1493{
1494	struct alloc_stat *l = a;
1495	struct alloc_stat *r = b;
1496
1497	if (l->pingpong < r->pingpong)
1498		return -1;
1499	else if (l->pingpong > r->pingpong)
1500		return 1;
1501	return 0;
1502}
1503
1504static struct sort_dimension pingpong_sort_dimension = {
1505	.name	= "pingpong",
1506	.cmp	= pingpong_cmp,
1507};
1508
1509/* page sort keys */
1510static int page_cmp(void *a, void *b)
1511{
1512	struct page_stat *l = a;
1513	struct page_stat *r = b;
1514
1515	if (l->page < r->page)
1516		return -1;
1517	else if (l->page > r->page)
1518		return 1;
1519	return 0;
1520}
1521
1522static struct sort_dimension page_sort_dimension = {
1523	.name	= "page",
1524	.cmp	= page_cmp,
1525};
1526
1527static int page_callsite_cmp(void *a, void *b)
1528{
1529	struct page_stat *l = a;
1530	struct page_stat *r = b;
1531
1532	if (l->callsite < r->callsite)
1533		return -1;
1534	else if (l->callsite > r->callsite)
1535		return 1;
1536	return 0;
1537}
1538
1539static struct sort_dimension page_callsite_sort_dimension = {
1540	.name	= "callsite",
1541	.cmp	= page_callsite_cmp,
1542};
1543
1544static int page_hit_cmp(void *a, void *b)
1545{
1546	struct page_stat *l = a;
1547	struct page_stat *r = b;
1548
1549	if (l->nr_alloc < r->nr_alloc)
1550		return -1;
1551	else if (l->nr_alloc > r->nr_alloc)
1552		return 1;
1553	return 0;
1554}
1555
1556static struct sort_dimension page_hit_sort_dimension = {
1557	.name	= "hit",
1558	.cmp	= page_hit_cmp,
1559};
1560
1561static int page_bytes_cmp(void *a, void *b)
1562{
1563	struct page_stat *l = a;
1564	struct page_stat *r = b;
1565
1566	if (l->alloc_bytes < r->alloc_bytes)
1567		return -1;
1568	else if (l->alloc_bytes > r->alloc_bytes)
1569		return 1;
1570	return 0;
1571}
1572
1573static struct sort_dimension page_bytes_sort_dimension = {
1574	.name	= "bytes",
1575	.cmp	= page_bytes_cmp,
1576};
1577
1578static int page_order_cmp(void *a, void *b)
1579{
1580	struct page_stat *l = a;
1581	struct page_stat *r = b;
1582
1583	if (l->order < r->order)
1584		return -1;
1585	else if (l->order > r->order)
1586		return 1;
1587	return 0;
1588}
1589
1590static struct sort_dimension page_order_sort_dimension = {
1591	.name	= "order",
1592	.cmp	= page_order_cmp,
1593};
1594
1595static int migrate_type_cmp(void *a, void *b)
1596{
1597	struct page_stat *l = a;
1598	struct page_stat *r = b;
1599
1600	/* for internal use to find free'd page */
1601	if (l->migrate_type == -1U)
1602		return 0;
1603
1604	if (l->migrate_type < r->migrate_type)
1605		return -1;
1606	else if (l->migrate_type > r->migrate_type)
1607		return 1;
1608	return 0;
1609}
1610
1611static struct sort_dimension migrate_type_sort_dimension = {
1612	.name	= "migtype",
1613	.cmp	= migrate_type_cmp,
1614};
1615
1616static int gfp_flags_cmp(void *a, void *b)
1617{
1618	struct page_stat *l = a;
1619	struct page_stat *r = b;
1620
1621	/* for internal use to find free'd page */
1622	if (l->gfp_flags == -1U)
1623		return 0;
1624
1625	if (l->gfp_flags < r->gfp_flags)
1626		return -1;
1627	else if (l->gfp_flags > r->gfp_flags)
1628		return 1;
1629	return 0;
1630}
1631
1632static struct sort_dimension gfp_flags_sort_dimension = {
1633	.name	= "gfp",
1634	.cmp	= gfp_flags_cmp,
1635};
1636
1637static struct sort_dimension *slab_sorts[] = {
1638	&ptr_sort_dimension,
1639	&callsite_sort_dimension,
1640	&hit_sort_dimension,
1641	&bytes_sort_dimension,
1642	&frag_sort_dimension,
1643	&pingpong_sort_dimension,
1644};
1645
1646static struct sort_dimension *page_sorts[] = {
1647	&page_sort_dimension,
1648	&page_callsite_sort_dimension,
1649	&page_hit_sort_dimension,
1650	&page_bytes_sort_dimension,
1651	&page_order_sort_dimension,
1652	&migrate_type_sort_dimension,
1653	&gfp_flags_sort_dimension,
1654};
1655
1656static int slab_sort_dimension__add(const char *tok, struct list_head *list)
1657{
1658	struct sort_dimension *sort;
1659	int i;
1660
1661	for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1662		if (!strcmp(slab_sorts[i]->name, tok)) {
1663			sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
1664			if (!sort) {
1665				pr_err("%s: memdup failed\n", __func__);
1666				return -1;
1667			}
1668			list_add_tail(&sort->list, list);
1669			return 0;
1670		}
1671	}
1672
1673	return -1;
1674}
1675
1676static int page_sort_dimension__add(const char *tok, struct list_head *list)
1677{
1678	struct sort_dimension *sort;
1679	int i;
1680
1681	for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1682		if (!strcmp(page_sorts[i]->name, tok)) {
1683			sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1684			if (!sort) {
1685				pr_err("%s: memdup failed\n", __func__);
1686				return -1;
1687			}
1688			list_add_tail(&sort->list, list);
1689			return 0;
1690		}
1691	}
1692
1693	return -1;
1694}
1695
1696static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
1697{
1698	char *tok;
1699	char *str = strdup(arg);
1700	char *pos = str;
1701
1702	if (!str) {
1703		pr_err("%s: strdup failed\n", __func__);
1704		return -1;
1705	}
1706
1707	while (true) {
1708		tok = strsep(&pos, ",");
1709		if (!tok)
1710			break;
1711		if (slab_sort_dimension__add(tok, sort_list) < 0) {
1712			error("Unknown slab --sort key: '%s'", tok);
1713			free(str);
1714			return -1;
1715		}
1716	}
1717
1718	free(str);
1719	return 0;
1720}
1721
1722static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1723{
1724	char *tok;
1725	char *str = strdup(arg);
1726	char *pos = str;
1727
1728	if (!str) {
1729		pr_err("%s: strdup failed\n", __func__);
1730		return -1;
1731	}
1732
1733	while (true) {
1734		tok = strsep(&pos, ",");
1735		if (!tok)
1736			break;
1737		if (page_sort_dimension__add(tok, sort_list) < 0) {
1738			error("Unknown page --sort key: '%s'", tok);
1739			free(str);
1740			return -1;
1741		}
1742	}
1743
1744	free(str);
1745	return 0;
1746}
1747
1748static int parse_sort_opt(const struct option *opt __maybe_unused,
1749			  const char *arg, int unset __maybe_unused)
1750{
1751	if (!arg)
1752		return -1;
1753
1754	if (kmem_page > kmem_slab ||
1755	    (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
1756		if (caller_flag > alloc_flag)
1757			return setup_page_sorting(&page_caller_sort, arg);
1758		else
1759			return setup_page_sorting(&page_alloc_sort, arg);
1760	} else {
1761		if (caller_flag > alloc_flag)
1762			return setup_slab_sorting(&slab_caller_sort, arg);
1763		else
1764			return setup_slab_sorting(&slab_alloc_sort, arg);
1765	}
1766
1767	return 0;
1768}
1769
1770static int parse_caller_opt(const struct option *opt __maybe_unused,
1771			    const char *arg __maybe_unused,
1772			    int unset __maybe_unused)
1773{
1774	caller_flag = (alloc_flag + 1);
1775	return 0;
1776}
1777
1778static int parse_alloc_opt(const struct option *opt __maybe_unused,
1779			   const char *arg __maybe_unused,
1780			   int unset __maybe_unused)
1781{
1782	alloc_flag = (caller_flag + 1);
1783	return 0;
1784}
1785
1786static int parse_slab_opt(const struct option *opt __maybe_unused,
1787			  const char *arg __maybe_unused,
1788			  int unset __maybe_unused)
1789{
1790	kmem_slab = (kmem_page + 1);
1791	return 0;
1792}
1793
1794static int parse_page_opt(const struct option *opt __maybe_unused,
1795			  const char *arg __maybe_unused,
1796			  int unset __maybe_unused)
1797{
1798	kmem_page = (kmem_slab + 1);
1799	return 0;
1800}
1801
1802static int parse_line_opt(const struct option *opt __maybe_unused,
1803			  const char *arg, int unset __maybe_unused)
1804{
1805	int lines;
1806
1807	if (!arg)
1808		return -1;
1809
1810	lines = strtoul(arg, NULL, 10);
1811
1812	if (caller_flag > alloc_flag)
1813		caller_lines = lines;
1814	else
1815		alloc_lines = lines;
1816
1817	return 0;
1818}
1819
1820static int __cmd_record(int argc, const char **argv)
1821{
1822	const char * const record_args[] = {
1823	"record", "-a", "-R", "-c", "1",
1824	};
1825	const char * const slab_events[] = {
1826	"-e", "kmem:kmalloc",
1827	"-e", "kmem:kmalloc_node",
1828	"-e", "kmem:kfree",
1829	"-e", "kmem:kmem_cache_alloc",
1830	"-e", "kmem:kmem_cache_alloc_node",
1831	"-e", "kmem:kmem_cache_free",
1832	};
1833	const char * const page_events[] = {
1834	"-e", "kmem:mm_page_alloc",
1835	"-e", "kmem:mm_page_free",
1836	};
1837	unsigned int rec_argc, i, j;
1838	const char **rec_argv;
1839
1840	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1841	if (kmem_slab)
1842		rec_argc += ARRAY_SIZE(slab_events);
1843	if (kmem_page)
1844		rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
1845
1846	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1847
1848	if (rec_argv == NULL)
1849		return -ENOMEM;
1850
1851	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1852		rec_argv[i] = strdup(record_args[i]);
1853
1854	if (kmem_slab) {
1855		for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1856			rec_argv[i] = strdup(slab_events[j]);
1857	}
1858	if (kmem_page) {
1859		rec_argv[i++] = strdup("-g");
1860
1861		for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1862			rec_argv[i] = strdup(page_events[j]);
1863	}
1864
1865	for (j = 1; j < (unsigned int)argc; j++, i++)
1866		rec_argv[i] = argv[j];
1867
1868	return cmd_record(i, rec_argv, NULL);
1869}
1870
1871static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
1872{
1873	if (!strcmp(var, "kmem.default")) {
1874		if (!strcmp(value, "slab"))
1875			kmem_default = KMEM_SLAB;
1876		else if (!strcmp(value, "page"))
1877			kmem_default = KMEM_PAGE;
1878		else
1879			pr_err("invalid default value ('slab' or 'page' required): %s\n",
1880			       value);
1881		return 0;
1882	}
1883
1884	return 0;
1885}
1886
1887int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
1888{
1889	const char * const default_slab_sort = "frag,hit,bytes";
1890	const char * const default_page_sort = "bytes,hit";
1891	struct perf_data_file file = {
1892		.mode = PERF_DATA_MODE_READ,
1893	};
1894	const struct option kmem_options[] = {
1895	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1896	OPT_INCR('v', "verbose", &verbose,
1897		    "be more verbose (show symbol address, etc)"),
1898	OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1899			   "show per-callsite statistics", parse_caller_opt),
1900	OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1901			   "show per-allocation statistics", parse_alloc_opt),
1902	OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
1903		     "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1904		     "page, order, migtype, gfp", parse_sort_opt),
1905	OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1906	OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1907	OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
1908	OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1909			   parse_slab_opt),
1910	OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1911			   parse_page_opt),
1912	OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
1913	OPT_STRING(0, "time", &time_str, "str",
1914		   "Time span of interest (start,stop)"),
1915	OPT_END()
1916	};
1917	const char *const kmem_subcommands[] = { "record", "stat", NULL };
1918	const char *kmem_usage[] = {
1919		NULL,
1920		NULL
1921	};
1922	struct perf_session *session;
1923	int ret = -1;
1924	const char errmsg[] = "No %s allocation events found.  Have you run 'perf kmem record --%s'?\n";
 
 
 
1925
1926	perf_config(kmem_config, NULL);
1927	argc = parse_options_subcommand(argc, argv, kmem_options,
1928					kmem_subcommands, kmem_usage, 0);
 
1929
1930	if (!argc)
1931		usage_with_options(kmem_usage, kmem_options);
1932
1933	if (kmem_slab == 0 && kmem_page == 0) {
1934		if (kmem_default == KMEM_SLAB)
1935			kmem_slab = 1;
1936		else
1937			kmem_page = 1;
1938	}
1939
1940	if (!strncmp(argv[0], "rec", 3)) {
1941		symbol__init(NULL);
1942		return __cmd_record(argc, argv);
1943	}
1944
1945	file.path = input_name;
1946
1947	kmem_session = session = perf_session__new(&file, false, &perf_kmem);
1948	if (session == NULL)
1949		return -1;
 
 
1950
1951	if (kmem_slab) {
1952		if (!perf_evlist__find_tracepoint_by_name(session->evlist,
1953							  "kmem:kmalloc")) {
1954			pr_err(errmsg, "slab", "slab");
1955			goto out_delete;
1956		}
1957	}
1958
1959	if (kmem_page) {
1960		struct perf_evsel *evsel;
1961
1962		evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
1963							     "kmem:mm_page_alloc");
1964		if (evsel == NULL) {
1965			pr_err(errmsg, "page", "page");
1966			goto out_delete;
1967		}
1968
1969		kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
1970		symbol_conf.use_callchain = true;
1971	}
1972
1973	symbol__init(&session->header.env);
1974
1975	if (perf_time__parse_str(&ptime, time_str) != 0) {
1976		pr_err("Invalid time string\n");
1977		return -EINVAL;
 
1978	}
1979
1980	if (!strcmp(argv[0], "stat")) {
1981		setlocale(LC_ALL, "");
1982
1983		if (cpu__setup_cpunode_map())
1984			goto out_delete;
1985
1986		if (list_empty(&slab_caller_sort))
1987			setup_slab_sorting(&slab_caller_sort, default_slab_sort);
1988		if (list_empty(&slab_alloc_sort))
1989			setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
1990		if (list_empty(&page_caller_sort))
1991			setup_page_sorting(&page_caller_sort, default_page_sort);
1992		if (list_empty(&page_alloc_sort))
1993			setup_page_sorting(&page_alloc_sort, default_page_sort);
1994
1995		if (kmem_page) {
1996			setup_page_sorting(&page_alloc_sort_input,
1997					   "page,order,migtype,gfp");
1998			setup_page_sorting(&page_caller_sort_input,
1999					   "callsite,order,migtype,gfp");
2000		}
2001		ret = __cmd_kmem(session);
2002	} else
2003		usage_with_options(kmem_usage, kmem_options);
2004
2005out_delete:
2006	perf_session__delete(session);
2007
2008	return ret;
2009}
2010
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2#include "builtin.h"
   3#include "perf.h"
   4
   5#include "util/dso.h"
   6#include "util/evlist.h"
   7#include "util/evsel.h"
 
   8#include "util/config.h"
   9#include "util/map.h"
  10#include "util/symbol.h"
  11#include "util/thread.h"
  12#include "util/header.h"
  13#include "util/session.h"
  14#include "util/tool.h"
  15#include "util/callchain.h"
  16#include "util/time-utils.h"
  17#include <linux/err.h>
  18
  19#include <subcmd/pager.h>
  20#include <subcmd/parse-options.h>
  21#include "util/trace-event.h"
  22#include "util/data.h"
  23#include "util/cpumap.h"
  24
  25#include "util/debug.h"
  26#include "util/string2.h"
  27
  28#include <linux/kernel.h>
  29#include <linux/rbtree.h>
  30#include <linux/string.h>
  31#include <linux/zalloc.h>
  32#include <errno.h>
  33#include <inttypes.h>
  34#include <locale.h>
  35#include <regex.h>
  36
  37#include <linux/ctype.h>
  38
  39static int	kmem_slab;
  40static int	kmem_page;
  41
  42static long	kmem_page_size;
  43static enum {
  44	KMEM_SLAB,
  45	KMEM_PAGE,
  46} kmem_default = KMEM_SLAB;  /* for backward compatibility */
  47
  48struct alloc_stat;
  49typedef int (*sort_fn_t)(void *, void *);
  50
  51static int			alloc_flag;
  52static int			caller_flag;
  53
  54static int			alloc_lines = -1;
  55static int			caller_lines = -1;
  56
  57static bool			raw_ip;
  58
  59struct alloc_stat {
  60	u64	call_site;
  61	u64	ptr;
  62	u64	bytes_req;
  63	u64	bytes_alloc;
  64	u64	last_alloc;
  65	u32	hit;
  66	u32	pingpong;
  67
  68	short	alloc_cpu;
  69
  70	struct rb_node node;
  71};
  72
  73static struct rb_root root_alloc_stat;
  74static struct rb_root root_alloc_sorted;
  75static struct rb_root root_caller_stat;
  76static struct rb_root root_caller_sorted;
  77
  78static unsigned long total_requested, total_allocated, total_freed;
  79static unsigned long nr_allocs, nr_cross_allocs;
  80
  81/* filters for controlling start and stop of time of analysis */
  82static struct perf_time_interval ptime;
  83const char *time_str;
  84
  85static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
  86			     int bytes_req, int bytes_alloc, int cpu)
  87{
  88	struct rb_node **node = &root_alloc_stat.rb_node;
  89	struct rb_node *parent = NULL;
  90	struct alloc_stat *data = NULL;
  91
  92	while (*node) {
  93		parent = *node;
  94		data = rb_entry(*node, struct alloc_stat, node);
  95
  96		if (ptr > data->ptr)
  97			node = &(*node)->rb_right;
  98		else if (ptr < data->ptr)
  99			node = &(*node)->rb_left;
 100		else
 101			break;
 102	}
 103
 104	if (data && data->ptr == ptr) {
 105		data->hit++;
 106		data->bytes_req += bytes_req;
 107		data->bytes_alloc += bytes_alloc;
 108	} else {
 109		data = malloc(sizeof(*data));
 110		if (!data) {
 111			pr_err("%s: malloc failed\n", __func__);
 112			return -1;
 113		}
 114		data->ptr = ptr;
 115		data->pingpong = 0;
 116		data->hit = 1;
 117		data->bytes_req = bytes_req;
 118		data->bytes_alloc = bytes_alloc;
 119
 120		rb_link_node(&data->node, parent, node);
 121		rb_insert_color(&data->node, &root_alloc_stat);
 122	}
 123	data->call_site = call_site;
 124	data->alloc_cpu = cpu;
 125	data->last_alloc = bytes_alloc;
 126
 127	return 0;
 128}
 129
 130static int insert_caller_stat(unsigned long call_site,
 131			      int bytes_req, int bytes_alloc)
 132{
 133	struct rb_node **node = &root_caller_stat.rb_node;
 134	struct rb_node *parent = NULL;
 135	struct alloc_stat *data = NULL;
 136
 137	while (*node) {
 138		parent = *node;
 139		data = rb_entry(*node, struct alloc_stat, node);
 140
 141		if (call_site > data->call_site)
 142			node = &(*node)->rb_right;
 143		else if (call_site < data->call_site)
 144			node = &(*node)->rb_left;
 145		else
 146			break;
 147	}
 148
 149	if (data && data->call_site == call_site) {
 150		data->hit++;
 151		data->bytes_req += bytes_req;
 152		data->bytes_alloc += bytes_alloc;
 153	} else {
 154		data = malloc(sizeof(*data));
 155		if (!data) {
 156			pr_err("%s: malloc failed\n", __func__);
 157			return -1;
 158		}
 159		data->call_site = call_site;
 160		data->pingpong = 0;
 161		data->hit = 1;
 162		data->bytes_req = bytes_req;
 163		data->bytes_alloc = bytes_alloc;
 164
 165		rb_link_node(&data->node, parent, node);
 166		rb_insert_color(&data->node, &root_caller_stat);
 167	}
 168
 169	return 0;
 170}
 171
 172static int evsel__process_alloc_event(struct evsel *evsel, struct perf_sample *sample)
 
 173{
 174	unsigned long ptr = evsel__intval(evsel, sample, "ptr"),
 175		      call_site = evsel__intval(evsel, sample, "call_site");
 176	int bytes_req = evsel__intval(evsel, sample, "bytes_req"),
 177	    bytes_alloc = evsel__intval(evsel, sample, "bytes_alloc");
 178
 179	if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
 180	    insert_caller_stat(call_site, bytes_req, bytes_alloc))
 181		return -1;
 182
 183	total_requested += bytes_req;
 184	total_allocated += bytes_alloc;
 185
 186	nr_allocs++;
 187	return 0;
 188}
 189
 190static int evsel__process_alloc_node_event(struct evsel *evsel, struct perf_sample *sample)
 
 191{
 192	int ret = evsel__process_alloc_event(evsel, sample);
 193
 194	if (!ret) {
 195		int node1 = cpu__get_node(sample->cpu),
 196		    node2 = evsel__intval(evsel, sample, "node");
 197
 198		if (node1 != node2)
 199			nr_cross_allocs++;
 200	}
 201
 202	return ret;
 203}
 204
 205static int ptr_cmp(void *, void *);
 206static int slab_callsite_cmp(void *, void *);
 207
 208static struct alloc_stat *search_alloc_stat(unsigned long ptr,
 209					    unsigned long call_site,
 210					    struct rb_root *root,
 211					    sort_fn_t sort_fn)
 212{
 213	struct rb_node *node = root->rb_node;
 214	struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
 215
 216	while (node) {
 217		struct alloc_stat *data;
 218		int cmp;
 219
 220		data = rb_entry(node, struct alloc_stat, node);
 221
 222		cmp = sort_fn(&key, data);
 223		if (cmp < 0)
 224			node = node->rb_left;
 225		else if (cmp > 0)
 226			node = node->rb_right;
 227		else
 228			return data;
 229	}
 230	return NULL;
 231}
 232
 233static int evsel__process_free_event(struct evsel *evsel, struct perf_sample *sample)
 
 234{
 235	unsigned long ptr = evsel__intval(evsel, sample, "ptr");
 236	struct alloc_stat *s_alloc, *s_caller;
 237
 238	s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
 239	if (!s_alloc)
 240		return 0;
 241
 242	total_freed += s_alloc->last_alloc;
 243
 244	if ((short)sample->cpu != s_alloc->alloc_cpu) {
 245		s_alloc->pingpong++;
 246
 247		s_caller = search_alloc_stat(0, s_alloc->call_site,
 248					     &root_caller_stat,
 249					     slab_callsite_cmp);
 250		if (!s_caller)
 251			return -1;
 252		s_caller->pingpong++;
 253	}
 254	s_alloc->alloc_cpu = -1;
 255
 256	return 0;
 257}
 258
 259static u64 total_page_alloc_bytes;
 260static u64 total_page_free_bytes;
 261static u64 total_page_nomatch_bytes;
 262static u64 total_page_fail_bytes;
 263static unsigned long nr_page_allocs;
 264static unsigned long nr_page_frees;
 265static unsigned long nr_page_fails;
 266static unsigned long nr_page_nomatch;
 267
 268static bool use_pfn;
 269static bool live_page;
 270static struct perf_session *kmem_session;
 271
 272#define MAX_MIGRATE_TYPES  6
 273#define MAX_PAGE_ORDER     11
 274
 275static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
 276
 277struct page_stat {
 278	struct rb_node 	node;
 279	u64 		page;
 280	u64 		callsite;
 281	int 		order;
 282	unsigned 	gfp_flags;
 283	unsigned 	migrate_type;
 284	u64		alloc_bytes;
 285	u64 		free_bytes;
 286	int 		nr_alloc;
 287	int 		nr_free;
 288};
 289
 290static struct rb_root page_live_tree;
 291static struct rb_root page_alloc_tree;
 292static struct rb_root page_alloc_sorted;
 293static struct rb_root page_caller_tree;
 294static struct rb_root page_caller_sorted;
 295
 296struct alloc_func {
 297	u64 start;
 298	u64 end;
 299	char *name;
 300};
 301
 302static int nr_alloc_funcs;
 303static struct alloc_func *alloc_func_list;
 304
 305static int funcmp(const void *a, const void *b)
 306{
 307	const struct alloc_func *fa = a;
 308	const struct alloc_func *fb = b;
 309
 310	if (fa->start > fb->start)
 311		return 1;
 312	else
 313		return -1;
 314}
 315
 316static int callcmp(const void *a, const void *b)
 317{
 318	const struct alloc_func *fa = a;
 319	const struct alloc_func *fb = b;
 320
 321	if (fb->start <= fa->start && fa->end < fb->end)
 322		return 0;
 323
 324	if (fa->start > fb->start)
 325		return 1;
 326	else
 327		return -1;
 328}
 329
 330static int build_alloc_func_list(void)
 331{
 332	int ret;
 333	struct map *kernel_map;
 334	struct symbol *sym;
 335	struct rb_node *node;
 336	struct alloc_func *func;
 337	struct machine *machine = &kmem_session->machines.host;
 338	regex_t alloc_func_regex;
 339	static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
 340
 341	ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
 342	if (ret) {
 343		char err[BUFSIZ];
 344
 345		regerror(ret, &alloc_func_regex, err, sizeof(err));
 346		pr_err("Invalid regex: %s\n%s", pattern, err);
 347		return -EINVAL;
 348	}
 349
 350	kernel_map = machine__kernel_map(machine);
 351	if (map__load(kernel_map) < 0) {
 352		pr_err("cannot load kernel map\n");
 353		return -ENOENT;
 354	}
 355
 356	map__for_each_symbol(kernel_map, sym, node) {
 357		if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
 358			continue;
 359
 360		func = realloc(alloc_func_list,
 361			       (nr_alloc_funcs + 1) * sizeof(*func));
 362		if (func == NULL)
 363			return -ENOMEM;
 364
 365		pr_debug("alloc func: %s\n", sym->name);
 366		func[nr_alloc_funcs].start = sym->start;
 367		func[nr_alloc_funcs].end   = sym->end;
 368		func[nr_alloc_funcs].name  = sym->name;
 369
 370		alloc_func_list = func;
 371		nr_alloc_funcs++;
 372	}
 373
 374	qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
 375
 376	regfree(&alloc_func_regex);
 377	return 0;
 378}
 379
 380/*
 381 * Find first non-memory allocation function from callchain.
 382 * The allocation functions are in the 'alloc_func_list'.
 383 */
 384static u64 find_callsite(struct evsel *evsel, struct perf_sample *sample)
 385{
 386	struct addr_location al;
 387	struct machine *machine = &kmem_session->machines.host;
 388	struct callchain_cursor_node *node;
 389
 390	if (alloc_func_list == NULL) {
 391		if (build_alloc_func_list() < 0)
 392			goto out;
 393	}
 394
 395	al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
 396	sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
 397
 398	callchain_cursor_commit(&callchain_cursor);
 399	while (true) {
 400		struct alloc_func key, *caller;
 401		u64 addr;
 402
 403		node = callchain_cursor_current(&callchain_cursor);
 404		if (node == NULL)
 405			break;
 406
 407		key.start = key.end = node->ip;
 408		caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
 409				 sizeof(key), callcmp);
 410		if (!caller) {
 411			/* found */
 412			if (node->ms.map)
 413				addr = map__unmap_ip(node->ms.map, node->ip);
 414			else
 415				addr = node->ip;
 416
 417			return addr;
 418		} else
 419			pr_debug3("skipping alloc function: %s\n", caller->name);
 420
 421		callchain_cursor_advance(&callchain_cursor);
 422	}
 423
 424out:
 425	pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
 426	return sample->ip;
 427}
 428
 429struct sort_dimension {
 430	const char		name[20];
 431	sort_fn_t		cmp;
 432	struct list_head	list;
 433};
 434
 435static LIST_HEAD(page_alloc_sort_input);
 436static LIST_HEAD(page_caller_sort_input);
 437
 438static struct page_stat *
 439__page_stat__findnew_page(struct page_stat *pstat, bool create)
 440{
 441	struct rb_node **node = &page_live_tree.rb_node;
 442	struct rb_node *parent = NULL;
 443	struct page_stat *data;
 444
 445	while (*node) {
 446		s64 cmp;
 447
 448		parent = *node;
 449		data = rb_entry(*node, struct page_stat, node);
 450
 451		cmp = data->page - pstat->page;
 452		if (cmp < 0)
 453			node = &parent->rb_left;
 454		else if (cmp > 0)
 455			node = &parent->rb_right;
 456		else
 457			return data;
 458	}
 459
 460	if (!create)
 461		return NULL;
 462
 463	data = zalloc(sizeof(*data));
 464	if (data != NULL) {
 465		data->page = pstat->page;
 466		data->order = pstat->order;
 467		data->gfp_flags = pstat->gfp_flags;
 468		data->migrate_type = pstat->migrate_type;
 469
 470		rb_link_node(&data->node, parent, node);
 471		rb_insert_color(&data->node, &page_live_tree);
 472	}
 473
 474	return data;
 475}
 476
 477static struct page_stat *page_stat__find_page(struct page_stat *pstat)
 478{
 479	return __page_stat__findnew_page(pstat, false);
 480}
 481
 482static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
 483{
 484	return __page_stat__findnew_page(pstat, true);
 485}
 486
 487static struct page_stat *
 488__page_stat__findnew_alloc(struct page_stat *pstat, bool create)
 489{
 490	struct rb_node **node = &page_alloc_tree.rb_node;
 491	struct rb_node *parent = NULL;
 492	struct page_stat *data;
 493	struct sort_dimension *sort;
 494
 495	while (*node) {
 496		int cmp = 0;
 497
 498		parent = *node;
 499		data = rb_entry(*node, struct page_stat, node);
 500
 501		list_for_each_entry(sort, &page_alloc_sort_input, list) {
 502			cmp = sort->cmp(pstat, data);
 503			if (cmp)
 504				break;
 505		}
 506
 507		if (cmp < 0)
 508			node = &parent->rb_left;
 509		else if (cmp > 0)
 510			node = &parent->rb_right;
 511		else
 512			return data;
 513	}
 514
 515	if (!create)
 516		return NULL;
 517
 518	data = zalloc(sizeof(*data));
 519	if (data != NULL) {
 520		data->page = pstat->page;
 521		data->order = pstat->order;
 522		data->gfp_flags = pstat->gfp_flags;
 523		data->migrate_type = pstat->migrate_type;
 524
 525		rb_link_node(&data->node, parent, node);
 526		rb_insert_color(&data->node, &page_alloc_tree);
 527	}
 528
 529	return data;
 530}
 531
 532static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
 533{
 534	return __page_stat__findnew_alloc(pstat, false);
 535}
 536
 537static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
 538{
 539	return __page_stat__findnew_alloc(pstat, true);
 540}
 541
 542static struct page_stat *
 543__page_stat__findnew_caller(struct page_stat *pstat, bool create)
 544{
 545	struct rb_node **node = &page_caller_tree.rb_node;
 546	struct rb_node *parent = NULL;
 547	struct page_stat *data;
 548	struct sort_dimension *sort;
 549
 550	while (*node) {
 551		int cmp = 0;
 552
 553		parent = *node;
 554		data = rb_entry(*node, struct page_stat, node);
 555
 556		list_for_each_entry(sort, &page_caller_sort_input, list) {
 557			cmp = sort->cmp(pstat, data);
 558			if (cmp)
 559				break;
 560		}
 561
 562		if (cmp < 0)
 563			node = &parent->rb_left;
 564		else if (cmp > 0)
 565			node = &parent->rb_right;
 566		else
 567			return data;
 568	}
 569
 570	if (!create)
 571		return NULL;
 572
 573	data = zalloc(sizeof(*data));
 574	if (data != NULL) {
 575		data->callsite = pstat->callsite;
 576		data->order = pstat->order;
 577		data->gfp_flags = pstat->gfp_flags;
 578		data->migrate_type = pstat->migrate_type;
 579
 580		rb_link_node(&data->node, parent, node);
 581		rb_insert_color(&data->node, &page_caller_tree);
 582	}
 583
 584	return data;
 585}
 586
 587static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
 588{
 589	return __page_stat__findnew_caller(pstat, false);
 590}
 591
 592static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
 593{
 594	return __page_stat__findnew_caller(pstat, true);
 595}
 596
 597static bool valid_page(u64 pfn_or_page)
 598{
 599	if (use_pfn && pfn_or_page == -1UL)
 600		return false;
 601	if (!use_pfn && pfn_or_page == 0)
 602		return false;
 603	return true;
 604}
 605
 606struct gfp_flag {
 607	unsigned int flags;
 608	char *compact_str;
 609	char *human_readable;
 610};
 611
 612static struct gfp_flag *gfps;
 613static int nr_gfps;
 614
 615static int gfpcmp(const void *a, const void *b)
 616{
 617	const struct gfp_flag *fa = a;
 618	const struct gfp_flag *fb = b;
 619
 620	return fa->flags - fb->flags;
 621}
 622
 623/* see include/trace/events/mmflags.h */
 624static const struct {
 625	const char *original;
 626	const char *compact;
 627} gfp_compact_table[] = {
 628	{ "GFP_TRANSHUGE",		"THP" },
 629	{ "GFP_TRANSHUGE_LIGHT",	"THL" },
 630	{ "GFP_HIGHUSER_MOVABLE",	"HUM" },
 631	{ "GFP_HIGHUSER",		"HU" },
 632	{ "GFP_USER",			"U" },
 
 633	{ "GFP_KERNEL_ACCOUNT",		"KAC" },
 634	{ "GFP_KERNEL",			"K" },
 635	{ "GFP_NOFS",			"NF" },
 636	{ "GFP_ATOMIC",			"A" },
 637	{ "GFP_NOIO",			"NI" },
 638	{ "GFP_NOWAIT",			"NW" },
 639	{ "GFP_DMA",			"D" },
 640	{ "__GFP_HIGHMEM",		"HM" },
 641	{ "GFP_DMA32",			"D32" },
 642	{ "__GFP_HIGH",			"H" },
 643	{ "__GFP_ATOMIC",		"_A" },
 644	{ "__GFP_IO",			"I" },
 645	{ "__GFP_FS",			"F" },
 
 646	{ "__GFP_NOWARN",		"NWR" },
 647	{ "__GFP_RETRY_MAYFAIL",	"R" },
 648	{ "__GFP_NOFAIL",		"NF" },
 649	{ "__GFP_NORETRY",		"NR" },
 650	{ "__GFP_COMP",			"C" },
 651	{ "__GFP_ZERO",			"Z" },
 652	{ "__GFP_NOMEMALLOC",		"NMA" },
 653	{ "__GFP_MEMALLOC",		"MA" },
 654	{ "__GFP_HARDWALL",		"HW" },
 655	{ "__GFP_THISNODE",		"TN" },
 656	{ "__GFP_RECLAIMABLE",		"RC" },
 657	{ "__GFP_MOVABLE",		"M" },
 658	{ "__GFP_ACCOUNT",		"AC" },
 
 659	{ "__GFP_WRITE",		"WR" },
 660	{ "__GFP_RECLAIM",		"R" },
 661	{ "__GFP_DIRECT_RECLAIM",	"DR" },
 662	{ "__GFP_KSWAPD_RECLAIM",	"KR" },
 663};
 664
 665static size_t max_gfp_len;
 666
 667static char *compact_gfp_flags(char *gfp_flags)
 668{
 669	char *orig_flags = strdup(gfp_flags);
 670	char *new_flags = NULL;
 671	char *str, *pos = NULL;
 672	size_t len = 0;
 673
 674	if (orig_flags == NULL)
 675		return NULL;
 676
 677	str = strtok_r(orig_flags, "|", &pos);
 678	while (str) {
 679		size_t i;
 680		char *new;
 681		const char *cpt;
 682
 683		for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
 684			if (strcmp(gfp_compact_table[i].original, str))
 685				continue;
 686
 687			cpt = gfp_compact_table[i].compact;
 688			new = realloc(new_flags, len + strlen(cpt) + 2);
 689			if (new == NULL) {
 690				free(new_flags);
 691				free(orig_flags);
 692				return NULL;
 693			}
 694
 695			new_flags = new;
 696
 697			if (!len) {
 698				strcpy(new_flags, cpt);
 699			} else {
 700				strcat(new_flags, "|");
 701				strcat(new_flags, cpt);
 702				len++;
 703			}
 704
 705			len += strlen(cpt);
 706		}
 707
 708		str = strtok_r(NULL, "|", &pos);
 709	}
 710
 711	if (max_gfp_len < len)
 712		max_gfp_len = len;
 713
 714	free(orig_flags);
 715	return new_flags;
 716}
 717
 718static char *compact_gfp_string(unsigned long gfp_flags)
 719{
 720	struct gfp_flag key = {
 721		.flags = gfp_flags,
 722	};
 723	struct gfp_flag *gfp;
 724
 725	gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
 726	if (gfp)
 727		return gfp->compact_str;
 728
 729	return NULL;
 730}
 731
 732static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
 733			   unsigned int gfp_flags)
 734{
 735	struct tep_record record = {
 736		.cpu = sample->cpu,
 737		.data = sample->raw_data,
 738		.size = sample->raw_size,
 739	};
 740	struct trace_seq seq;
 741	char *str, *pos = NULL;
 742
 743	if (nr_gfps) {
 744		struct gfp_flag key = {
 745			.flags = gfp_flags,
 746		};
 747
 748		if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
 749			return 0;
 750	}
 751
 752	trace_seq_init(&seq);
 753	tep_print_event(evsel->tp_format->tep,
 754			&seq, &record, "%s", TEP_PRINT_INFO);
 755
 756	str = strtok_r(seq.buffer, " ", &pos);
 757	while (str) {
 758		if (!strncmp(str, "gfp_flags=", 10)) {
 759			struct gfp_flag *new;
 760
 761			new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
 762			if (new == NULL)
 763				return -ENOMEM;
 764
 765			gfps = new;
 766			new += nr_gfps++;
 767
 768			new->flags = gfp_flags;
 769			new->human_readable = strdup(str + 10);
 770			new->compact_str = compact_gfp_flags(str + 10);
 771			if (!new->human_readable || !new->compact_str)
 772				return -ENOMEM;
 773
 774			qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
 775		}
 776
 777		str = strtok_r(NULL, " ", &pos);
 778	}
 779
 780	trace_seq_destroy(&seq);
 781	return 0;
 782}
 783
 784static int evsel__process_page_alloc_event(struct evsel *evsel, struct perf_sample *sample)
 
 785{
 786	u64 page;
 787	unsigned int order = evsel__intval(evsel, sample, "order");
 788	unsigned int gfp_flags = evsel__intval(evsel, sample, "gfp_flags");
 789	unsigned int migrate_type = evsel__intval(evsel, sample,
 790						       "migratetype");
 791	u64 bytes = kmem_page_size << order;
 792	u64 callsite;
 793	struct page_stat *pstat;
 794	struct page_stat this = {
 795		.order = order,
 796		.gfp_flags = gfp_flags,
 797		.migrate_type = migrate_type,
 798	};
 799
 800	if (use_pfn)
 801		page = evsel__intval(evsel, sample, "pfn");
 802	else
 803		page = evsel__intval(evsel, sample, "page");
 804
 805	nr_page_allocs++;
 806	total_page_alloc_bytes += bytes;
 807
 808	if (!valid_page(page)) {
 809		nr_page_fails++;
 810		total_page_fail_bytes += bytes;
 811
 812		return 0;
 813	}
 814
 815	if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
 816		return -1;
 817
 818	callsite = find_callsite(evsel, sample);
 819
 820	/*
 821	 * This is to find the current page (with correct gfp flags and
 822	 * migrate type) at free event.
 823	 */
 824	this.page = page;
 825	pstat = page_stat__findnew_page(&this);
 826	if (pstat == NULL)
 827		return -ENOMEM;
 828
 829	pstat->nr_alloc++;
 830	pstat->alloc_bytes += bytes;
 831	pstat->callsite = callsite;
 832
 833	if (!live_page) {
 834		pstat = page_stat__findnew_alloc(&this);
 835		if (pstat == NULL)
 836			return -ENOMEM;
 837
 838		pstat->nr_alloc++;
 839		pstat->alloc_bytes += bytes;
 840		pstat->callsite = callsite;
 841	}
 842
 843	this.callsite = callsite;
 844	pstat = page_stat__findnew_caller(&this);
 845	if (pstat == NULL)
 846		return -ENOMEM;
 847
 848	pstat->nr_alloc++;
 849	pstat->alloc_bytes += bytes;
 850
 851	order_stats[order][migrate_type]++;
 852
 853	return 0;
 854}
 855
 856static int evsel__process_page_free_event(struct evsel *evsel, struct perf_sample *sample)
 
 857{
 858	u64 page;
 859	unsigned int order = evsel__intval(evsel, sample, "order");
 860	u64 bytes = kmem_page_size << order;
 861	struct page_stat *pstat;
 862	struct page_stat this = {
 863		.order = order,
 864	};
 865
 866	if (use_pfn)
 867		page = evsel__intval(evsel, sample, "pfn");
 868	else
 869		page = evsel__intval(evsel, sample, "page");
 870
 871	nr_page_frees++;
 872	total_page_free_bytes += bytes;
 873
 874	this.page = page;
 875	pstat = page_stat__find_page(&this);
 876	if (pstat == NULL) {
 877		pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
 878			  page, order);
 879
 880		nr_page_nomatch++;
 881		total_page_nomatch_bytes += bytes;
 882
 883		return 0;
 884	}
 885
 886	this.gfp_flags = pstat->gfp_flags;
 887	this.migrate_type = pstat->migrate_type;
 888	this.callsite = pstat->callsite;
 889
 890	rb_erase(&pstat->node, &page_live_tree);
 891	free(pstat);
 892
 893	if (live_page) {
 894		order_stats[this.order][this.migrate_type]--;
 895	} else {
 896		pstat = page_stat__find_alloc(&this);
 897		if (pstat == NULL)
 898			return -ENOMEM;
 899
 900		pstat->nr_free++;
 901		pstat->free_bytes += bytes;
 902	}
 903
 904	pstat = page_stat__find_caller(&this);
 905	if (pstat == NULL)
 906		return -ENOENT;
 907
 908	pstat->nr_free++;
 909	pstat->free_bytes += bytes;
 910
 911	if (live_page) {
 912		pstat->nr_alloc--;
 913		pstat->alloc_bytes -= bytes;
 914
 915		if (pstat->nr_alloc == 0) {
 916			rb_erase(&pstat->node, &page_caller_tree);
 917			free(pstat);
 918		}
 919	}
 920
 921	return 0;
 922}
 923
 924static bool perf_kmem__skip_sample(struct perf_sample *sample)
 925{
 926	/* skip sample based on time? */
 927	if (perf_time__skip_sample(&ptime, sample->time))
 928		return true;
 929
 930	return false;
 931}
 932
 933typedef int (*tracepoint_handler)(struct evsel *evsel,
 934				  struct perf_sample *sample);
 935
 936static int process_sample_event(struct perf_tool *tool __maybe_unused,
 937				union perf_event *event,
 938				struct perf_sample *sample,
 939				struct evsel *evsel,
 940				struct machine *machine)
 941{
 942	int err = 0;
 943	struct thread *thread = machine__findnew_thread(machine, sample->pid,
 944							sample->tid);
 945
 946	if (thread == NULL) {
 947		pr_debug("problem processing %d event, skipping it.\n",
 948			 event->header.type);
 949		return -1;
 950	}
 951
 952	if (perf_kmem__skip_sample(sample))
 953		return 0;
 954
 955	dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
 956
 957	if (evsel->handler != NULL) {
 958		tracepoint_handler f = evsel->handler;
 959		err = f(evsel, sample);
 960	}
 961
 962	thread__put(thread);
 963
 964	return err;
 965}
 966
 967static struct perf_tool perf_kmem = {
 968	.sample		 = process_sample_event,
 969	.comm		 = perf_event__process_comm,
 970	.mmap		 = perf_event__process_mmap,
 971	.mmap2		 = perf_event__process_mmap2,
 972	.namespaces	 = perf_event__process_namespaces,
 973	.ordered_events	 = true,
 974};
 975
 976static double fragmentation(unsigned long n_req, unsigned long n_alloc)
 977{
 978	if (n_alloc == 0)
 979		return 0.0;
 980	else
 981		return 100.0 - (100.0 * n_req / n_alloc);
 982}
 983
 984static void __print_slab_result(struct rb_root *root,
 985				struct perf_session *session,
 986				int n_lines, int is_caller)
 987{
 988	struct rb_node *next;
 989	struct machine *machine = &session->machines.host;
 990
 991	printf("%.105s\n", graph_dotted_line);
 992	printf(" %-34s |",  is_caller ? "Callsite": "Alloc Ptr");
 993	printf(" Total_alloc/Per | Total_req/Per   | Hit      | Ping-pong | Frag\n");
 994	printf("%.105s\n", graph_dotted_line);
 995
 996	next = rb_first(root);
 997
 998	while (next && n_lines--) {
 999		struct alloc_stat *data = rb_entry(next, struct alloc_stat,
1000						   node);
1001		struct symbol *sym = NULL;
1002		struct map *map;
1003		char buf[BUFSIZ];
1004		u64 addr;
1005
1006		if (is_caller) {
1007			addr = data->call_site;
1008			if (!raw_ip)
1009				sym = machine__find_kernel_symbol(machine, addr, &map);
1010		} else
1011			addr = data->ptr;
1012
1013		if (sym != NULL)
1014			snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
1015				 addr - map->unmap_ip(map, sym->start));
1016		else
1017			snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
1018		printf(" %-34s |", buf);
1019
1020		printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
1021		       (unsigned long long)data->bytes_alloc,
1022		       (unsigned long)data->bytes_alloc / data->hit,
1023		       (unsigned long long)data->bytes_req,
1024		       (unsigned long)data->bytes_req / data->hit,
1025		       (unsigned long)data->hit,
1026		       (unsigned long)data->pingpong,
1027		       fragmentation(data->bytes_req, data->bytes_alloc));
1028
1029		next = rb_next(next);
1030	}
1031
1032	if (n_lines == -1)
1033		printf(" ...                                | ...             | ...             | ...      | ...       | ...   \n");
1034
1035	printf("%.105s\n", graph_dotted_line);
1036}
1037
1038static const char * const migrate_type_str[] = {
1039	"UNMOVABL",
1040	"RECLAIM",
1041	"MOVABLE",
1042	"RESERVED",
1043	"CMA/ISLT",
1044	"UNKNOWN",
1045};
1046
1047static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1048{
1049	struct rb_node *next = rb_first(&page_alloc_sorted);
1050	struct machine *machine = &session->machines.host;
1051	const char *format;
1052	int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1053
1054	printf("\n%.105s\n", graph_dotted_line);
1055	printf(" %-16s | %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1056	       use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
1057	       gfp_len, "GFP flags");
1058	printf("%.105s\n", graph_dotted_line);
1059
1060	if (use_pfn)
1061		format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1062	else
1063		format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1064
1065	while (next && n_lines--) {
1066		struct page_stat *data;
1067		struct symbol *sym;
1068		struct map *map;
1069		char buf[32];
1070		char *caller = buf;
1071
1072		data = rb_entry(next, struct page_stat, node);
1073		sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1074		if (sym)
1075			caller = sym->name;
1076		else
1077			scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1078
1079		printf(format, (unsigned long long)data->page,
1080		       (unsigned long long)data->alloc_bytes / 1024,
1081		       data->nr_alloc, data->order,
1082		       migrate_type_str[data->migrate_type],
1083		       gfp_len, compact_gfp_string(data->gfp_flags), caller);
1084
1085		next = rb_next(next);
1086	}
1087
1088	if (n_lines == -1) {
1089		printf(" ...              | ...              | ...       | ...   | ...      | %-*s | ...\n",
1090		       gfp_len, "...");
1091	}
1092
1093	printf("%.105s\n", graph_dotted_line);
1094}
1095
1096static void __print_page_caller_result(struct perf_session *session, int n_lines)
1097{
1098	struct rb_node *next = rb_first(&page_caller_sorted);
1099	struct machine *machine = &session->machines.host;
1100	int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1101
1102	printf("\n%.105s\n", graph_dotted_line);
1103	printf(" %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
1104	       live_page ? "Live" : "Total", gfp_len, "GFP flags");
1105	printf("%.105s\n", graph_dotted_line);
1106
1107	while (next && n_lines--) {
1108		struct page_stat *data;
1109		struct symbol *sym;
1110		struct map *map;
1111		char buf[32];
1112		char *caller = buf;
1113
1114		data = rb_entry(next, struct page_stat, node);
1115		sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1116		if (sym)
1117			caller = sym->name;
1118		else
1119			scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1120
1121		printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
1122		       (unsigned long long)data->alloc_bytes / 1024,
1123		       data->nr_alloc, data->order,
1124		       migrate_type_str[data->migrate_type],
1125		       gfp_len, compact_gfp_string(data->gfp_flags), caller);
1126
1127		next = rb_next(next);
1128	}
1129
1130	if (n_lines == -1) {
1131		printf(" ...              | ...       | ...   | ...      | %-*s | ...\n",
1132		       gfp_len, "...");
1133	}
1134
1135	printf("%.105s\n", graph_dotted_line);
1136}
1137
1138static void print_gfp_flags(void)
1139{
1140	int i;
1141
1142	printf("#\n");
1143	printf("# GFP flags\n");
1144	printf("# ---------\n");
1145	for (i = 0; i < nr_gfps; i++) {
1146		printf("# %08x: %*s: %s\n", gfps[i].flags,
1147		       (int) max_gfp_len, gfps[i].compact_str,
1148		       gfps[i].human_readable);
1149	}
1150}
1151
1152static void print_slab_summary(void)
1153{
1154	printf("\nSUMMARY (SLAB allocator)");
1155	printf("\n========================\n");
1156	printf("Total bytes requested: %'lu\n", total_requested);
1157	printf("Total bytes allocated: %'lu\n", total_allocated);
1158	printf("Total bytes freed:     %'lu\n", total_freed);
1159	if (total_allocated > total_freed) {
1160		printf("Net total bytes allocated: %'lu\n",
1161		total_allocated - total_freed);
1162	}
1163	printf("Total bytes wasted on internal fragmentation: %'lu\n",
1164	       total_allocated - total_requested);
1165	printf("Internal fragmentation: %f%%\n",
1166	       fragmentation(total_requested, total_allocated));
1167	printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
1168}
1169
1170static void print_page_summary(void)
1171{
1172	int o, m;
1173	u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
1174	u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
1175
1176	printf("\nSUMMARY (page allocator)");
1177	printf("\n========================\n");
1178	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation requests",
1179	       nr_page_allocs, total_page_alloc_bytes / 1024);
1180	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free requests",
1181	       nr_page_frees, total_page_free_bytes / 1024);
1182	printf("\n");
1183
1184	printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
1185	       nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
1186	printf("%-30s: %'16"PRIu64"   [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
1187	       nr_page_allocs - nr_alloc_freed,
1188	       (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
1189	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total free-only requests",
1190	       nr_page_nomatch, total_page_nomatch_bytes / 1024);
1191	printf("\n");
1192
1193	printf("%-30s: %'16lu   [ %'16"PRIu64" KB ]\n", "Total allocation failures",
1194	       nr_page_fails, total_page_fail_bytes / 1024);
1195	printf("\n");
1196
1197	printf("%5s  %12s  %12s  %12s  %12s  %12s\n", "Order",  "Unmovable",
1198	       "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
1199	printf("%.5s  %.12s  %.12s  %.12s  %.12s  %.12s\n", graph_dotted_line,
1200	       graph_dotted_line, graph_dotted_line, graph_dotted_line,
1201	       graph_dotted_line, graph_dotted_line);
1202
1203	for (o = 0; o < MAX_PAGE_ORDER; o++) {
1204		printf("%5d", o);
1205		for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
1206			if (order_stats[o][m])
1207				printf("  %'12d", order_stats[o][m]);
1208			else
1209				printf("  %12c", '.');
1210		}
1211		printf("\n");
1212	}
1213}
1214
1215static void print_slab_result(struct perf_session *session)
1216{
1217	if (caller_flag)
1218		__print_slab_result(&root_caller_sorted, session, caller_lines, 1);
1219	if (alloc_flag)
1220		__print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
1221	print_slab_summary();
1222}
1223
1224static void print_page_result(struct perf_session *session)
1225{
1226	if (caller_flag || alloc_flag)
1227		print_gfp_flags();
1228	if (caller_flag)
1229		__print_page_caller_result(session, caller_lines);
1230	if (alloc_flag)
1231		__print_page_alloc_result(session, alloc_lines);
1232	print_page_summary();
1233}
1234
1235static void print_result(struct perf_session *session)
1236{
1237	if (kmem_slab)
1238		print_slab_result(session);
1239	if (kmem_page)
1240		print_page_result(session);
1241}
1242
1243static LIST_HEAD(slab_caller_sort);
1244static LIST_HEAD(slab_alloc_sort);
1245static LIST_HEAD(page_caller_sort);
1246static LIST_HEAD(page_alloc_sort);
1247
1248static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
1249			     struct list_head *sort_list)
1250{
1251	struct rb_node **new = &(root->rb_node);
1252	struct rb_node *parent = NULL;
1253	struct sort_dimension *sort;
1254
1255	while (*new) {
1256		struct alloc_stat *this;
1257		int cmp = 0;
1258
1259		this = rb_entry(*new, struct alloc_stat, node);
1260		parent = *new;
1261
1262		list_for_each_entry(sort, sort_list, list) {
1263			cmp = sort->cmp(data, this);
1264			if (cmp)
1265				break;
1266		}
1267
1268		if (cmp > 0)
1269			new = &((*new)->rb_left);
1270		else
1271			new = &((*new)->rb_right);
1272	}
1273
1274	rb_link_node(&data->node, parent, new);
1275	rb_insert_color(&data->node, root);
1276}
1277
1278static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1279			       struct list_head *sort_list)
1280{
1281	struct rb_node *node;
1282	struct alloc_stat *data;
1283
1284	for (;;) {
1285		node = rb_first(root);
1286		if (!node)
1287			break;
1288
1289		rb_erase(node, root);
1290		data = rb_entry(node, struct alloc_stat, node);
1291		sort_slab_insert(root_sorted, data, sort_list);
1292	}
1293}
1294
1295static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1296			     struct list_head *sort_list)
1297{
1298	struct rb_node **new = &root->rb_node;
1299	struct rb_node *parent = NULL;
1300	struct sort_dimension *sort;
1301
1302	while (*new) {
1303		struct page_stat *this;
1304		int cmp = 0;
1305
1306		this = rb_entry(*new, struct page_stat, node);
1307		parent = *new;
1308
1309		list_for_each_entry(sort, sort_list, list) {
1310			cmp = sort->cmp(data, this);
1311			if (cmp)
1312				break;
1313		}
1314
1315		if (cmp > 0)
1316			new = &parent->rb_left;
1317		else
1318			new = &parent->rb_right;
1319	}
1320
1321	rb_link_node(&data->node, parent, new);
1322	rb_insert_color(&data->node, root);
1323}
1324
1325static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1326			       struct list_head *sort_list)
1327{
1328	struct rb_node *node;
1329	struct page_stat *data;
1330
1331	for (;;) {
1332		node = rb_first(root);
1333		if (!node)
1334			break;
1335
1336		rb_erase(node, root);
1337		data = rb_entry(node, struct page_stat, node);
1338		sort_page_insert(root_sorted, data, sort_list);
1339	}
1340}
1341
1342static void sort_result(void)
1343{
1344	if (kmem_slab) {
1345		__sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
1346				   &slab_alloc_sort);
1347		__sort_slab_result(&root_caller_stat, &root_caller_sorted,
1348				   &slab_caller_sort);
1349	}
1350	if (kmem_page) {
1351		if (live_page)
1352			__sort_page_result(&page_live_tree, &page_alloc_sorted,
1353					   &page_alloc_sort);
1354		else
1355			__sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1356					   &page_alloc_sort);
1357
1358		__sort_page_result(&page_caller_tree, &page_caller_sorted,
1359				   &page_caller_sort);
1360	}
1361}
1362
1363static int __cmd_kmem(struct perf_session *session)
1364{
1365	int err = -EINVAL;
1366	struct evsel *evsel;
1367	const struct evsel_str_handler kmem_tracepoints[] = {
1368		/* slab allocator */
1369		{ "kmem:kmalloc",		evsel__process_alloc_event, },
1370		{ "kmem:kmem_cache_alloc",	evsel__process_alloc_event, },
1371		{ "kmem:kmalloc_node",		evsel__process_alloc_node_event, },
1372		{ "kmem:kmem_cache_alloc_node", evsel__process_alloc_node_event, },
1373		{ "kmem:kfree",			evsel__process_free_event, },
1374		{ "kmem:kmem_cache_free",	evsel__process_free_event, },
1375		/* page allocator */
1376		{ "kmem:mm_page_alloc",		evsel__process_page_alloc_event, },
1377		{ "kmem:mm_page_free",		evsel__process_page_free_event, },
1378	};
1379
1380	if (!perf_session__has_traces(session, "kmem record"))
1381		goto out;
1382
1383	if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1384		pr_err("Initializing perf session tracepoint handlers failed\n");
1385		goto out;
1386	}
1387
1388	evlist__for_each_entry(session->evlist, evsel) {
1389		if (!strcmp(evsel__name(evsel), "kmem:mm_page_alloc") &&
1390		    evsel__field(evsel, "pfn")) {
1391			use_pfn = true;
1392			break;
1393		}
1394	}
1395
1396	setup_pager();
1397	err = perf_session__process_events(session);
1398	if (err != 0) {
1399		pr_err("error during process events: %d\n", err);
1400		goto out;
1401	}
1402	sort_result();
1403	print_result(session);
1404out:
1405	return err;
1406}
1407
1408/* slab sort keys */
1409static int ptr_cmp(void *a, void *b)
1410{
1411	struct alloc_stat *l = a;
1412	struct alloc_stat *r = b;
1413
1414	if (l->ptr < r->ptr)
1415		return -1;
1416	else if (l->ptr > r->ptr)
1417		return 1;
1418	return 0;
1419}
1420
1421static struct sort_dimension ptr_sort_dimension = {
1422	.name	= "ptr",
1423	.cmp	= ptr_cmp,
1424};
1425
1426static int slab_callsite_cmp(void *a, void *b)
1427{
1428	struct alloc_stat *l = a;
1429	struct alloc_stat *r = b;
1430
1431	if (l->call_site < r->call_site)
1432		return -1;
1433	else if (l->call_site > r->call_site)
1434		return 1;
1435	return 0;
1436}
1437
1438static struct sort_dimension callsite_sort_dimension = {
1439	.name	= "callsite",
1440	.cmp	= slab_callsite_cmp,
1441};
1442
1443static int hit_cmp(void *a, void *b)
1444{
1445	struct alloc_stat *l = a;
1446	struct alloc_stat *r = b;
1447
1448	if (l->hit < r->hit)
1449		return -1;
1450	else if (l->hit > r->hit)
1451		return 1;
1452	return 0;
1453}
1454
1455static struct sort_dimension hit_sort_dimension = {
1456	.name	= "hit",
1457	.cmp	= hit_cmp,
1458};
1459
1460static int bytes_cmp(void *a, void *b)
1461{
1462	struct alloc_stat *l = a;
1463	struct alloc_stat *r = b;
1464
1465	if (l->bytes_alloc < r->bytes_alloc)
1466		return -1;
1467	else if (l->bytes_alloc > r->bytes_alloc)
1468		return 1;
1469	return 0;
1470}
1471
1472static struct sort_dimension bytes_sort_dimension = {
1473	.name	= "bytes",
1474	.cmp	= bytes_cmp,
1475};
1476
1477static int frag_cmp(void *a, void *b)
1478{
1479	double x, y;
1480	struct alloc_stat *l = a;
1481	struct alloc_stat *r = b;
1482
1483	x = fragmentation(l->bytes_req, l->bytes_alloc);
1484	y = fragmentation(r->bytes_req, r->bytes_alloc);
1485
1486	if (x < y)
1487		return -1;
1488	else if (x > y)
1489		return 1;
1490	return 0;
1491}
1492
1493static struct sort_dimension frag_sort_dimension = {
1494	.name	= "frag",
1495	.cmp	= frag_cmp,
1496};
1497
1498static int pingpong_cmp(void *a, void *b)
1499{
1500	struct alloc_stat *l = a;
1501	struct alloc_stat *r = b;
1502
1503	if (l->pingpong < r->pingpong)
1504		return -1;
1505	else if (l->pingpong > r->pingpong)
1506		return 1;
1507	return 0;
1508}
1509
1510static struct sort_dimension pingpong_sort_dimension = {
1511	.name	= "pingpong",
1512	.cmp	= pingpong_cmp,
1513};
1514
1515/* page sort keys */
1516static int page_cmp(void *a, void *b)
1517{
1518	struct page_stat *l = a;
1519	struct page_stat *r = b;
1520
1521	if (l->page < r->page)
1522		return -1;
1523	else if (l->page > r->page)
1524		return 1;
1525	return 0;
1526}
1527
1528static struct sort_dimension page_sort_dimension = {
1529	.name	= "page",
1530	.cmp	= page_cmp,
1531};
1532
1533static int page_callsite_cmp(void *a, void *b)
1534{
1535	struct page_stat *l = a;
1536	struct page_stat *r = b;
1537
1538	if (l->callsite < r->callsite)
1539		return -1;
1540	else if (l->callsite > r->callsite)
1541		return 1;
1542	return 0;
1543}
1544
1545static struct sort_dimension page_callsite_sort_dimension = {
1546	.name	= "callsite",
1547	.cmp	= page_callsite_cmp,
1548};
1549
1550static int page_hit_cmp(void *a, void *b)
1551{
1552	struct page_stat *l = a;
1553	struct page_stat *r = b;
1554
1555	if (l->nr_alloc < r->nr_alloc)
1556		return -1;
1557	else if (l->nr_alloc > r->nr_alloc)
1558		return 1;
1559	return 0;
1560}
1561
1562static struct sort_dimension page_hit_sort_dimension = {
1563	.name	= "hit",
1564	.cmp	= page_hit_cmp,
1565};
1566
1567static int page_bytes_cmp(void *a, void *b)
1568{
1569	struct page_stat *l = a;
1570	struct page_stat *r = b;
1571
1572	if (l->alloc_bytes < r->alloc_bytes)
1573		return -1;
1574	else if (l->alloc_bytes > r->alloc_bytes)
1575		return 1;
1576	return 0;
1577}
1578
1579static struct sort_dimension page_bytes_sort_dimension = {
1580	.name	= "bytes",
1581	.cmp	= page_bytes_cmp,
1582};
1583
1584static int page_order_cmp(void *a, void *b)
1585{
1586	struct page_stat *l = a;
1587	struct page_stat *r = b;
1588
1589	if (l->order < r->order)
1590		return -1;
1591	else if (l->order > r->order)
1592		return 1;
1593	return 0;
1594}
1595
1596static struct sort_dimension page_order_sort_dimension = {
1597	.name	= "order",
1598	.cmp	= page_order_cmp,
1599};
1600
1601static int migrate_type_cmp(void *a, void *b)
1602{
1603	struct page_stat *l = a;
1604	struct page_stat *r = b;
1605
1606	/* for internal use to find free'd page */
1607	if (l->migrate_type == -1U)
1608		return 0;
1609
1610	if (l->migrate_type < r->migrate_type)
1611		return -1;
1612	else if (l->migrate_type > r->migrate_type)
1613		return 1;
1614	return 0;
1615}
1616
1617static struct sort_dimension migrate_type_sort_dimension = {
1618	.name	= "migtype",
1619	.cmp	= migrate_type_cmp,
1620};
1621
1622static int gfp_flags_cmp(void *a, void *b)
1623{
1624	struct page_stat *l = a;
1625	struct page_stat *r = b;
1626
1627	/* for internal use to find free'd page */
1628	if (l->gfp_flags == -1U)
1629		return 0;
1630
1631	if (l->gfp_flags < r->gfp_flags)
1632		return -1;
1633	else if (l->gfp_flags > r->gfp_flags)
1634		return 1;
1635	return 0;
1636}
1637
1638static struct sort_dimension gfp_flags_sort_dimension = {
1639	.name	= "gfp",
1640	.cmp	= gfp_flags_cmp,
1641};
1642
1643static struct sort_dimension *slab_sorts[] = {
1644	&ptr_sort_dimension,
1645	&callsite_sort_dimension,
1646	&hit_sort_dimension,
1647	&bytes_sort_dimension,
1648	&frag_sort_dimension,
1649	&pingpong_sort_dimension,
1650};
1651
1652static struct sort_dimension *page_sorts[] = {
1653	&page_sort_dimension,
1654	&page_callsite_sort_dimension,
1655	&page_hit_sort_dimension,
1656	&page_bytes_sort_dimension,
1657	&page_order_sort_dimension,
1658	&migrate_type_sort_dimension,
1659	&gfp_flags_sort_dimension,
1660};
1661
1662static int slab_sort_dimension__add(const char *tok, struct list_head *list)
1663{
1664	struct sort_dimension *sort;
1665	int i;
1666
1667	for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1668		if (!strcmp(slab_sorts[i]->name, tok)) {
1669			sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
1670			if (!sort) {
1671				pr_err("%s: memdup failed\n", __func__);
1672				return -1;
1673			}
1674			list_add_tail(&sort->list, list);
1675			return 0;
1676		}
1677	}
1678
1679	return -1;
1680}
1681
1682static int page_sort_dimension__add(const char *tok, struct list_head *list)
1683{
1684	struct sort_dimension *sort;
1685	int i;
1686
1687	for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1688		if (!strcmp(page_sorts[i]->name, tok)) {
1689			sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1690			if (!sort) {
1691				pr_err("%s: memdup failed\n", __func__);
1692				return -1;
1693			}
1694			list_add_tail(&sort->list, list);
1695			return 0;
1696		}
1697	}
1698
1699	return -1;
1700}
1701
1702static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
1703{
1704	char *tok;
1705	char *str = strdup(arg);
1706	char *pos = str;
1707
1708	if (!str) {
1709		pr_err("%s: strdup failed\n", __func__);
1710		return -1;
1711	}
1712
1713	while (true) {
1714		tok = strsep(&pos, ",");
1715		if (!tok)
1716			break;
1717		if (slab_sort_dimension__add(tok, sort_list) < 0) {
1718			pr_err("Unknown slab --sort key: '%s'", tok);
1719			free(str);
1720			return -1;
1721		}
1722	}
1723
1724	free(str);
1725	return 0;
1726}
1727
1728static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1729{
1730	char *tok;
1731	char *str = strdup(arg);
1732	char *pos = str;
1733
1734	if (!str) {
1735		pr_err("%s: strdup failed\n", __func__);
1736		return -1;
1737	}
1738
1739	while (true) {
1740		tok = strsep(&pos, ",");
1741		if (!tok)
1742			break;
1743		if (page_sort_dimension__add(tok, sort_list) < 0) {
1744			pr_err("Unknown page --sort key: '%s'", tok);
1745			free(str);
1746			return -1;
1747		}
1748	}
1749
1750	free(str);
1751	return 0;
1752}
1753
1754static int parse_sort_opt(const struct option *opt __maybe_unused,
1755			  const char *arg, int unset __maybe_unused)
1756{
1757	if (!arg)
1758		return -1;
1759
1760	if (kmem_page > kmem_slab ||
1761	    (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
1762		if (caller_flag > alloc_flag)
1763			return setup_page_sorting(&page_caller_sort, arg);
1764		else
1765			return setup_page_sorting(&page_alloc_sort, arg);
1766	} else {
1767		if (caller_flag > alloc_flag)
1768			return setup_slab_sorting(&slab_caller_sort, arg);
1769		else
1770			return setup_slab_sorting(&slab_alloc_sort, arg);
1771	}
1772
1773	return 0;
1774}
1775
1776static int parse_caller_opt(const struct option *opt __maybe_unused,
1777			    const char *arg __maybe_unused,
1778			    int unset __maybe_unused)
1779{
1780	caller_flag = (alloc_flag + 1);
1781	return 0;
1782}
1783
1784static int parse_alloc_opt(const struct option *opt __maybe_unused,
1785			   const char *arg __maybe_unused,
1786			   int unset __maybe_unused)
1787{
1788	alloc_flag = (caller_flag + 1);
1789	return 0;
1790}
1791
1792static int parse_slab_opt(const struct option *opt __maybe_unused,
1793			  const char *arg __maybe_unused,
1794			  int unset __maybe_unused)
1795{
1796	kmem_slab = (kmem_page + 1);
1797	return 0;
1798}
1799
1800static int parse_page_opt(const struct option *opt __maybe_unused,
1801			  const char *arg __maybe_unused,
1802			  int unset __maybe_unused)
1803{
1804	kmem_page = (kmem_slab + 1);
1805	return 0;
1806}
1807
1808static int parse_line_opt(const struct option *opt __maybe_unused,
1809			  const char *arg, int unset __maybe_unused)
1810{
1811	int lines;
1812
1813	if (!arg)
1814		return -1;
1815
1816	lines = strtoul(arg, NULL, 10);
1817
1818	if (caller_flag > alloc_flag)
1819		caller_lines = lines;
1820	else
1821		alloc_lines = lines;
1822
1823	return 0;
1824}
1825
1826static int __cmd_record(int argc, const char **argv)
1827{
1828	const char * const record_args[] = {
1829	"record", "-a", "-R", "-c", "1",
1830	};
1831	const char * const slab_events[] = {
1832	"-e", "kmem:kmalloc",
1833	"-e", "kmem:kmalloc_node",
1834	"-e", "kmem:kfree",
1835	"-e", "kmem:kmem_cache_alloc",
1836	"-e", "kmem:kmem_cache_alloc_node",
1837	"-e", "kmem:kmem_cache_free",
1838	};
1839	const char * const page_events[] = {
1840	"-e", "kmem:mm_page_alloc",
1841	"-e", "kmem:mm_page_free",
1842	};
1843	unsigned int rec_argc, i, j;
1844	const char **rec_argv;
1845
1846	rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1847	if (kmem_slab)
1848		rec_argc += ARRAY_SIZE(slab_events);
1849	if (kmem_page)
1850		rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
1851
1852	rec_argv = calloc(rec_argc + 1, sizeof(char *));
1853
1854	if (rec_argv == NULL)
1855		return -ENOMEM;
1856
1857	for (i = 0; i < ARRAY_SIZE(record_args); i++)
1858		rec_argv[i] = strdup(record_args[i]);
1859
1860	if (kmem_slab) {
1861		for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1862			rec_argv[i] = strdup(slab_events[j]);
1863	}
1864	if (kmem_page) {
1865		rec_argv[i++] = strdup("-g");
1866
1867		for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1868			rec_argv[i] = strdup(page_events[j]);
1869	}
1870
1871	for (j = 1; j < (unsigned int)argc; j++, i++)
1872		rec_argv[i] = argv[j];
1873
1874	return cmd_record(i, rec_argv);
1875}
1876
1877static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
1878{
1879	if (!strcmp(var, "kmem.default")) {
1880		if (!strcmp(value, "slab"))
1881			kmem_default = KMEM_SLAB;
1882		else if (!strcmp(value, "page"))
1883			kmem_default = KMEM_PAGE;
1884		else
1885			pr_err("invalid default value ('slab' or 'page' required): %s\n",
1886			       value);
1887		return 0;
1888	}
1889
1890	return 0;
1891}
1892
1893int cmd_kmem(int argc, const char **argv)
1894{
1895	const char * const default_slab_sort = "frag,hit,bytes";
1896	const char * const default_page_sort = "bytes,hit";
1897	struct perf_data data = {
1898		.mode = PERF_DATA_MODE_READ,
1899	};
1900	const struct option kmem_options[] = {
1901	OPT_STRING('i', "input", &input_name, "file", "input file name"),
1902	OPT_INCR('v', "verbose", &verbose,
1903		    "be more verbose (show symbol address, etc)"),
1904	OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1905			   "show per-callsite statistics", parse_caller_opt),
1906	OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1907			   "show per-allocation statistics", parse_alloc_opt),
1908	OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
1909		     "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1910		     "page, order, migtype, gfp", parse_sort_opt),
1911	OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1912	OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1913	OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
1914	OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1915			   parse_slab_opt),
1916	OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1917			   parse_page_opt),
1918	OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
1919	OPT_STRING(0, "time", &time_str, "str",
1920		   "Time span of interest (start,stop)"),
1921	OPT_END()
1922	};
1923	const char *const kmem_subcommands[] = { "record", "stat", NULL };
1924	const char *kmem_usage[] = {
1925		NULL,
1926		NULL
1927	};
1928	struct perf_session *session;
1929	static const char errmsg[] = "No %s allocation events found.  Have you run 'perf kmem record --%s'?\n";
1930	int ret = perf_config(kmem_config, NULL);
1931
1932	if (ret)
1933		return ret;
1934
 
1935	argc = parse_options_subcommand(argc, argv, kmem_options,
1936					kmem_subcommands, kmem_usage,
1937					PARSE_OPT_STOP_AT_NON_OPTION);
1938
1939	if (!argc)
1940		usage_with_options(kmem_usage, kmem_options);
1941
1942	if (kmem_slab == 0 && kmem_page == 0) {
1943		if (kmem_default == KMEM_SLAB)
1944			kmem_slab = 1;
1945		else
1946			kmem_page = 1;
1947	}
1948
1949	if (!strncmp(argv[0], "rec", 3)) {
1950		symbol__init(NULL);
1951		return __cmd_record(argc, argv);
1952	}
1953
1954	data.path = input_name;
1955
1956	kmem_session = session = perf_session__new(&data, false, &perf_kmem);
1957	if (IS_ERR(session))
1958		return PTR_ERR(session);
1959
1960	ret = -1;
1961
1962	if (kmem_slab) {
1963		if (!evlist__find_tracepoint_by_name(session->evlist, "kmem:kmalloc")) {
 
1964			pr_err(errmsg, "slab", "slab");
1965			goto out_delete;
1966		}
1967	}
1968
1969	if (kmem_page) {
1970		struct evsel *evsel = evlist__find_tracepoint_by_name(session->evlist, "kmem:mm_page_alloc");
1971
 
 
1972		if (evsel == NULL) {
1973			pr_err(errmsg, "page", "page");
1974			goto out_delete;
1975		}
1976
1977		kmem_page_size = tep_get_page_size(evsel->tp_format->tep);
1978		symbol_conf.use_callchain = true;
1979	}
1980
1981	symbol__init(&session->header.env);
1982
1983	if (perf_time__parse_str(&ptime, time_str) != 0) {
1984		pr_err("Invalid time string\n");
1985		ret = -EINVAL;
1986		goto out_delete;
1987	}
1988
1989	if (!strcmp(argv[0], "stat")) {
1990		setlocale(LC_ALL, "");
1991
1992		if (cpu__setup_cpunode_map())
1993			goto out_delete;
1994
1995		if (list_empty(&slab_caller_sort))
1996			setup_slab_sorting(&slab_caller_sort, default_slab_sort);
1997		if (list_empty(&slab_alloc_sort))
1998			setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
1999		if (list_empty(&page_caller_sort))
2000			setup_page_sorting(&page_caller_sort, default_page_sort);
2001		if (list_empty(&page_alloc_sort))
2002			setup_page_sorting(&page_alloc_sort, default_page_sort);
2003
2004		if (kmem_page) {
2005			setup_page_sorting(&page_alloc_sort_input,
2006					   "page,order,migtype,gfp");
2007			setup_page_sorting(&page_caller_sort_input,
2008					   "callsite,order,migtype,gfp");
2009		}
2010		ret = __cmd_kmem(session);
2011	} else
2012		usage_with_options(kmem_usage, kmem_options);
2013
2014out_delete:
2015	perf_session__delete(session);
2016
2017	return ret;
2018}
2019