Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include "builtin.h"
3#include "perf.h"
4
5#include "util/dso.h"
6#include "util/evlist.h"
7#include "util/evsel.h"
8#include "util/config.h"
9#include "util/map.h"
10#include "util/symbol.h"
11#include "util/thread.h"
12#include "util/header.h"
13#include "util/session.h"
14#include "util/tool.h"
15#include "util/callchain.h"
16#include "util/time-utils.h"
17#include <linux/err.h>
18
19#include <subcmd/pager.h>
20#include <subcmd/parse-options.h>
21#include "util/trace-event.h"
22#include "util/data.h"
23#include "util/cpumap.h"
24
25#include "util/debug.h"
26#include "util/string2.h"
27
28#include <linux/kernel.h>
29#include <linux/rbtree.h>
30#include <linux/string.h>
31#include <linux/zalloc.h>
32#include <errno.h>
33#include <inttypes.h>
34#include <locale.h>
35#include <regex.h>
36
37#include <linux/ctype.h>
38
39static int kmem_slab;
40static int kmem_page;
41
42static long kmem_page_size;
43static enum {
44 KMEM_SLAB,
45 KMEM_PAGE,
46} kmem_default = KMEM_SLAB; /* for backward compatibility */
47
48struct alloc_stat;
49typedef int (*sort_fn_t)(void *, void *);
50
51static int alloc_flag;
52static int caller_flag;
53
54static int alloc_lines = -1;
55static int caller_lines = -1;
56
57static bool raw_ip;
58
59struct alloc_stat {
60 u64 call_site;
61 u64 ptr;
62 u64 bytes_req;
63 u64 bytes_alloc;
64 u64 last_alloc;
65 u32 hit;
66 u32 pingpong;
67
68 short alloc_cpu;
69
70 struct rb_node node;
71};
72
73static struct rb_root root_alloc_stat;
74static struct rb_root root_alloc_sorted;
75static struct rb_root root_caller_stat;
76static struct rb_root root_caller_sorted;
77
78static unsigned long total_requested, total_allocated, total_freed;
79static unsigned long nr_allocs, nr_cross_allocs;
80
81/* filters for controlling start and stop of time of analysis */
82static struct perf_time_interval ptime;
83const char *time_str;
84
85static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
86 int bytes_req, int bytes_alloc, int cpu)
87{
88 struct rb_node **node = &root_alloc_stat.rb_node;
89 struct rb_node *parent = NULL;
90 struct alloc_stat *data = NULL;
91
92 while (*node) {
93 parent = *node;
94 data = rb_entry(*node, struct alloc_stat, node);
95
96 if (ptr > data->ptr)
97 node = &(*node)->rb_right;
98 else if (ptr < data->ptr)
99 node = &(*node)->rb_left;
100 else
101 break;
102 }
103
104 if (data && data->ptr == ptr) {
105 data->hit++;
106 data->bytes_req += bytes_req;
107 data->bytes_alloc += bytes_alloc;
108 } else {
109 data = malloc(sizeof(*data));
110 if (!data) {
111 pr_err("%s: malloc failed\n", __func__);
112 return -1;
113 }
114 data->ptr = ptr;
115 data->pingpong = 0;
116 data->hit = 1;
117 data->bytes_req = bytes_req;
118 data->bytes_alloc = bytes_alloc;
119
120 rb_link_node(&data->node, parent, node);
121 rb_insert_color(&data->node, &root_alloc_stat);
122 }
123 data->call_site = call_site;
124 data->alloc_cpu = cpu;
125 data->last_alloc = bytes_alloc;
126
127 return 0;
128}
129
130static int insert_caller_stat(unsigned long call_site,
131 int bytes_req, int bytes_alloc)
132{
133 struct rb_node **node = &root_caller_stat.rb_node;
134 struct rb_node *parent = NULL;
135 struct alloc_stat *data = NULL;
136
137 while (*node) {
138 parent = *node;
139 data = rb_entry(*node, struct alloc_stat, node);
140
141 if (call_site > data->call_site)
142 node = &(*node)->rb_right;
143 else if (call_site < data->call_site)
144 node = &(*node)->rb_left;
145 else
146 break;
147 }
148
149 if (data && data->call_site == call_site) {
150 data->hit++;
151 data->bytes_req += bytes_req;
152 data->bytes_alloc += bytes_alloc;
153 } else {
154 data = malloc(sizeof(*data));
155 if (!data) {
156 pr_err("%s: malloc failed\n", __func__);
157 return -1;
158 }
159 data->call_site = call_site;
160 data->pingpong = 0;
161 data->hit = 1;
162 data->bytes_req = bytes_req;
163 data->bytes_alloc = bytes_alloc;
164
165 rb_link_node(&data->node, parent, node);
166 rb_insert_color(&data->node, &root_caller_stat);
167 }
168
169 return 0;
170}
171
172static int perf_evsel__process_alloc_event(struct evsel *evsel,
173 struct perf_sample *sample)
174{
175 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
176 call_site = perf_evsel__intval(evsel, sample, "call_site");
177 int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
178 bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
179
180 if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
181 insert_caller_stat(call_site, bytes_req, bytes_alloc))
182 return -1;
183
184 total_requested += bytes_req;
185 total_allocated += bytes_alloc;
186
187 nr_allocs++;
188 return 0;
189}
190
191static int perf_evsel__process_alloc_node_event(struct evsel *evsel,
192 struct perf_sample *sample)
193{
194 int ret = perf_evsel__process_alloc_event(evsel, sample);
195
196 if (!ret) {
197 int node1 = cpu__get_node(sample->cpu),
198 node2 = perf_evsel__intval(evsel, sample, "node");
199
200 if (node1 != node2)
201 nr_cross_allocs++;
202 }
203
204 return ret;
205}
206
207static int ptr_cmp(void *, void *);
208static int slab_callsite_cmp(void *, void *);
209
210static struct alloc_stat *search_alloc_stat(unsigned long ptr,
211 unsigned long call_site,
212 struct rb_root *root,
213 sort_fn_t sort_fn)
214{
215 struct rb_node *node = root->rb_node;
216 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
217
218 while (node) {
219 struct alloc_stat *data;
220 int cmp;
221
222 data = rb_entry(node, struct alloc_stat, node);
223
224 cmp = sort_fn(&key, data);
225 if (cmp < 0)
226 node = node->rb_left;
227 else if (cmp > 0)
228 node = node->rb_right;
229 else
230 return data;
231 }
232 return NULL;
233}
234
235static int perf_evsel__process_free_event(struct evsel *evsel,
236 struct perf_sample *sample)
237{
238 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
239 struct alloc_stat *s_alloc, *s_caller;
240
241 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
242 if (!s_alloc)
243 return 0;
244
245 total_freed += s_alloc->last_alloc;
246
247 if ((short)sample->cpu != s_alloc->alloc_cpu) {
248 s_alloc->pingpong++;
249
250 s_caller = search_alloc_stat(0, s_alloc->call_site,
251 &root_caller_stat,
252 slab_callsite_cmp);
253 if (!s_caller)
254 return -1;
255 s_caller->pingpong++;
256 }
257 s_alloc->alloc_cpu = -1;
258
259 return 0;
260}
261
262static u64 total_page_alloc_bytes;
263static u64 total_page_free_bytes;
264static u64 total_page_nomatch_bytes;
265static u64 total_page_fail_bytes;
266static unsigned long nr_page_allocs;
267static unsigned long nr_page_frees;
268static unsigned long nr_page_fails;
269static unsigned long nr_page_nomatch;
270
271static bool use_pfn;
272static bool live_page;
273static struct perf_session *kmem_session;
274
275#define MAX_MIGRATE_TYPES 6
276#define MAX_PAGE_ORDER 11
277
278static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
279
280struct page_stat {
281 struct rb_node node;
282 u64 page;
283 u64 callsite;
284 int order;
285 unsigned gfp_flags;
286 unsigned migrate_type;
287 u64 alloc_bytes;
288 u64 free_bytes;
289 int nr_alloc;
290 int nr_free;
291};
292
293static struct rb_root page_live_tree;
294static struct rb_root page_alloc_tree;
295static struct rb_root page_alloc_sorted;
296static struct rb_root page_caller_tree;
297static struct rb_root page_caller_sorted;
298
299struct alloc_func {
300 u64 start;
301 u64 end;
302 char *name;
303};
304
305static int nr_alloc_funcs;
306static struct alloc_func *alloc_func_list;
307
308static int funcmp(const void *a, const void *b)
309{
310 const struct alloc_func *fa = a;
311 const struct alloc_func *fb = b;
312
313 if (fa->start > fb->start)
314 return 1;
315 else
316 return -1;
317}
318
319static int callcmp(const void *a, const void *b)
320{
321 const struct alloc_func *fa = a;
322 const struct alloc_func *fb = b;
323
324 if (fb->start <= fa->start && fa->end < fb->end)
325 return 0;
326
327 if (fa->start > fb->start)
328 return 1;
329 else
330 return -1;
331}
332
333static int build_alloc_func_list(void)
334{
335 int ret;
336 struct map *kernel_map;
337 struct symbol *sym;
338 struct rb_node *node;
339 struct alloc_func *func;
340 struct machine *machine = &kmem_session->machines.host;
341 regex_t alloc_func_regex;
342 static const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
343
344 ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
345 if (ret) {
346 char err[BUFSIZ];
347
348 regerror(ret, &alloc_func_regex, err, sizeof(err));
349 pr_err("Invalid regex: %s\n%s", pattern, err);
350 return -EINVAL;
351 }
352
353 kernel_map = machine__kernel_map(machine);
354 if (map__load(kernel_map) < 0) {
355 pr_err("cannot load kernel map\n");
356 return -ENOENT;
357 }
358
359 map__for_each_symbol(kernel_map, sym, node) {
360 if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
361 continue;
362
363 func = realloc(alloc_func_list,
364 (nr_alloc_funcs + 1) * sizeof(*func));
365 if (func == NULL)
366 return -ENOMEM;
367
368 pr_debug("alloc func: %s\n", sym->name);
369 func[nr_alloc_funcs].start = sym->start;
370 func[nr_alloc_funcs].end = sym->end;
371 func[nr_alloc_funcs].name = sym->name;
372
373 alloc_func_list = func;
374 nr_alloc_funcs++;
375 }
376
377 qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
378
379 regfree(&alloc_func_regex);
380 return 0;
381}
382
383/*
384 * Find first non-memory allocation function from callchain.
385 * The allocation functions are in the 'alloc_func_list'.
386 */
387static u64 find_callsite(struct evsel *evsel, struct perf_sample *sample)
388{
389 struct addr_location al;
390 struct machine *machine = &kmem_session->machines.host;
391 struct callchain_cursor_node *node;
392
393 if (alloc_func_list == NULL) {
394 if (build_alloc_func_list() < 0)
395 goto out;
396 }
397
398 al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
399 sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
400
401 callchain_cursor_commit(&callchain_cursor);
402 while (true) {
403 struct alloc_func key, *caller;
404 u64 addr;
405
406 node = callchain_cursor_current(&callchain_cursor);
407 if (node == NULL)
408 break;
409
410 key.start = key.end = node->ip;
411 caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
412 sizeof(key), callcmp);
413 if (!caller) {
414 /* found */
415 if (node->map)
416 addr = map__unmap_ip(node->map, node->ip);
417 else
418 addr = node->ip;
419
420 return addr;
421 } else
422 pr_debug3("skipping alloc function: %s\n", caller->name);
423
424 callchain_cursor_advance(&callchain_cursor);
425 }
426
427out:
428 pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
429 return sample->ip;
430}
431
432struct sort_dimension {
433 const char name[20];
434 sort_fn_t cmp;
435 struct list_head list;
436};
437
438static LIST_HEAD(page_alloc_sort_input);
439static LIST_HEAD(page_caller_sort_input);
440
441static struct page_stat *
442__page_stat__findnew_page(struct page_stat *pstat, bool create)
443{
444 struct rb_node **node = &page_live_tree.rb_node;
445 struct rb_node *parent = NULL;
446 struct page_stat *data;
447
448 while (*node) {
449 s64 cmp;
450
451 parent = *node;
452 data = rb_entry(*node, struct page_stat, node);
453
454 cmp = data->page - pstat->page;
455 if (cmp < 0)
456 node = &parent->rb_left;
457 else if (cmp > 0)
458 node = &parent->rb_right;
459 else
460 return data;
461 }
462
463 if (!create)
464 return NULL;
465
466 data = zalloc(sizeof(*data));
467 if (data != NULL) {
468 data->page = pstat->page;
469 data->order = pstat->order;
470 data->gfp_flags = pstat->gfp_flags;
471 data->migrate_type = pstat->migrate_type;
472
473 rb_link_node(&data->node, parent, node);
474 rb_insert_color(&data->node, &page_live_tree);
475 }
476
477 return data;
478}
479
480static struct page_stat *page_stat__find_page(struct page_stat *pstat)
481{
482 return __page_stat__findnew_page(pstat, false);
483}
484
485static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
486{
487 return __page_stat__findnew_page(pstat, true);
488}
489
490static struct page_stat *
491__page_stat__findnew_alloc(struct page_stat *pstat, bool create)
492{
493 struct rb_node **node = &page_alloc_tree.rb_node;
494 struct rb_node *parent = NULL;
495 struct page_stat *data;
496 struct sort_dimension *sort;
497
498 while (*node) {
499 int cmp = 0;
500
501 parent = *node;
502 data = rb_entry(*node, struct page_stat, node);
503
504 list_for_each_entry(sort, &page_alloc_sort_input, list) {
505 cmp = sort->cmp(pstat, data);
506 if (cmp)
507 break;
508 }
509
510 if (cmp < 0)
511 node = &parent->rb_left;
512 else if (cmp > 0)
513 node = &parent->rb_right;
514 else
515 return data;
516 }
517
518 if (!create)
519 return NULL;
520
521 data = zalloc(sizeof(*data));
522 if (data != NULL) {
523 data->page = pstat->page;
524 data->order = pstat->order;
525 data->gfp_flags = pstat->gfp_flags;
526 data->migrate_type = pstat->migrate_type;
527
528 rb_link_node(&data->node, parent, node);
529 rb_insert_color(&data->node, &page_alloc_tree);
530 }
531
532 return data;
533}
534
535static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
536{
537 return __page_stat__findnew_alloc(pstat, false);
538}
539
540static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
541{
542 return __page_stat__findnew_alloc(pstat, true);
543}
544
545static struct page_stat *
546__page_stat__findnew_caller(struct page_stat *pstat, bool create)
547{
548 struct rb_node **node = &page_caller_tree.rb_node;
549 struct rb_node *parent = NULL;
550 struct page_stat *data;
551 struct sort_dimension *sort;
552
553 while (*node) {
554 int cmp = 0;
555
556 parent = *node;
557 data = rb_entry(*node, struct page_stat, node);
558
559 list_for_each_entry(sort, &page_caller_sort_input, list) {
560 cmp = sort->cmp(pstat, data);
561 if (cmp)
562 break;
563 }
564
565 if (cmp < 0)
566 node = &parent->rb_left;
567 else if (cmp > 0)
568 node = &parent->rb_right;
569 else
570 return data;
571 }
572
573 if (!create)
574 return NULL;
575
576 data = zalloc(sizeof(*data));
577 if (data != NULL) {
578 data->callsite = pstat->callsite;
579 data->order = pstat->order;
580 data->gfp_flags = pstat->gfp_flags;
581 data->migrate_type = pstat->migrate_type;
582
583 rb_link_node(&data->node, parent, node);
584 rb_insert_color(&data->node, &page_caller_tree);
585 }
586
587 return data;
588}
589
590static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
591{
592 return __page_stat__findnew_caller(pstat, false);
593}
594
595static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
596{
597 return __page_stat__findnew_caller(pstat, true);
598}
599
600static bool valid_page(u64 pfn_or_page)
601{
602 if (use_pfn && pfn_or_page == -1UL)
603 return false;
604 if (!use_pfn && pfn_or_page == 0)
605 return false;
606 return true;
607}
608
609struct gfp_flag {
610 unsigned int flags;
611 char *compact_str;
612 char *human_readable;
613};
614
615static struct gfp_flag *gfps;
616static int nr_gfps;
617
618static int gfpcmp(const void *a, const void *b)
619{
620 const struct gfp_flag *fa = a;
621 const struct gfp_flag *fb = b;
622
623 return fa->flags - fb->flags;
624}
625
626/* see include/trace/events/mmflags.h */
627static const struct {
628 const char *original;
629 const char *compact;
630} gfp_compact_table[] = {
631 { "GFP_TRANSHUGE", "THP" },
632 { "GFP_TRANSHUGE_LIGHT", "THL" },
633 { "GFP_HIGHUSER_MOVABLE", "HUM" },
634 { "GFP_HIGHUSER", "HU" },
635 { "GFP_USER", "U" },
636 { "GFP_KERNEL_ACCOUNT", "KAC" },
637 { "GFP_KERNEL", "K" },
638 { "GFP_NOFS", "NF" },
639 { "GFP_ATOMIC", "A" },
640 { "GFP_NOIO", "NI" },
641 { "GFP_NOWAIT", "NW" },
642 { "GFP_DMA", "D" },
643 { "__GFP_HIGHMEM", "HM" },
644 { "GFP_DMA32", "D32" },
645 { "__GFP_HIGH", "H" },
646 { "__GFP_ATOMIC", "_A" },
647 { "__GFP_IO", "I" },
648 { "__GFP_FS", "F" },
649 { "__GFP_NOWARN", "NWR" },
650 { "__GFP_RETRY_MAYFAIL", "R" },
651 { "__GFP_NOFAIL", "NF" },
652 { "__GFP_NORETRY", "NR" },
653 { "__GFP_COMP", "C" },
654 { "__GFP_ZERO", "Z" },
655 { "__GFP_NOMEMALLOC", "NMA" },
656 { "__GFP_MEMALLOC", "MA" },
657 { "__GFP_HARDWALL", "HW" },
658 { "__GFP_THISNODE", "TN" },
659 { "__GFP_RECLAIMABLE", "RC" },
660 { "__GFP_MOVABLE", "M" },
661 { "__GFP_ACCOUNT", "AC" },
662 { "__GFP_WRITE", "WR" },
663 { "__GFP_RECLAIM", "R" },
664 { "__GFP_DIRECT_RECLAIM", "DR" },
665 { "__GFP_KSWAPD_RECLAIM", "KR" },
666};
667
668static size_t max_gfp_len;
669
670static char *compact_gfp_flags(char *gfp_flags)
671{
672 char *orig_flags = strdup(gfp_flags);
673 char *new_flags = NULL;
674 char *str, *pos = NULL;
675 size_t len = 0;
676
677 if (orig_flags == NULL)
678 return NULL;
679
680 str = strtok_r(orig_flags, "|", &pos);
681 while (str) {
682 size_t i;
683 char *new;
684 const char *cpt;
685
686 for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
687 if (strcmp(gfp_compact_table[i].original, str))
688 continue;
689
690 cpt = gfp_compact_table[i].compact;
691 new = realloc(new_flags, len + strlen(cpt) + 2);
692 if (new == NULL) {
693 free(new_flags);
694 free(orig_flags);
695 return NULL;
696 }
697
698 new_flags = new;
699
700 if (!len) {
701 strcpy(new_flags, cpt);
702 } else {
703 strcat(new_flags, "|");
704 strcat(new_flags, cpt);
705 len++;
706 }
707
708 len += strlen(cpt);
709 }
710
711 str = strtok_r(NULL, "|", &pos);
712 }
713
714 if (max_gfp_len < len)
715 max_gfp_len = len;
716
717 free(orig_flags);
718 return new_flags;
719}
720
721static char *compact_gfp_string(unsigned long gfp_flags)
722{
723 struct gfp_flag key = {
724 .flags = gfp_flags,
725 };
726 struct gfp_flag *gfp;
727
728 gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
729 if (gfp)
730 return gfp->compact_str;
731
732 return NULL;
733}
734
735static int parse_gfp_flags(struct evsel *evsel, struct perf_sample *sample,
736 unsigned int gfp_flags)
737{
738 struct tep_record record = {
739 .cpu = sample->cpu,
740 .data = sample->raw_data,
741 .size = sample->raw_size,
742 };
743 struct trace_seq seq;
744 char *str, *pos = NULL;
745
746 if (nr_gfps) {
747 struct gfp_flag key = {
748 .flags = gfp_flags,
749 };
750
751 if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
752 return 0;
753 }
754
755 trace_seq_init(&seq);
756 tep_print_event(evsel->tp_format->tep,
757 &seq, &record, "%s", TEP_PRINT_INFO);
758
759 str = strtok_r(seq.buffer, " ", &pos);
760 while (str) {
761 if (!strncmp(str, "gfp_flags=", 10)) {
762 struct gfp_flag *new;
763
764 new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
765 if (new == NULL)
766 return -ENOMEM;
767
768 gfps = new;
769 new += nr_gfps++;
770
771 new->flags = gfp_flags;
772 new->human_readable = strdup(str + 10);
773 new->compact_str = compact_gfp_flags(str + 10);
774 if (!new->human_readable || !new->compact_str)
775 return -ENOMEM;
776
777 qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
778 }
779
780 str = strtok_r(NULL, " ", &pos);
781 }
782
783 trace_seq_destroy(&seq);
784 return 0;
785}
786
787static int perf_evsel__process_page_alloc_event(struct evsel *evsel,
788 struct perf_sample *sample)
789{
790 u64 page;
791 unsigned int order = perf_evsel__intval(evsel, sample, "order");
792 unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
793 unsigned int migrate_type = perf_evsel__intval(evsel, sample,
794 "migratetype");
795 u64 bytes = kmem_page_size << order;
796 u64 callsite;
797 struct page_stat *pstat;
798 struct page_stat this = {
799 .order = order,
800 .gfp_flags = gfp_flags,
801 .migrate_type = migrate_type,
802 };
803
804 if (use_pfn)
805 page = perf_evsel__intval(evsel, sample, "pfn");
806 else
807 page = perf_evsel__intval(evsel, sample, "page");
808
809 nr_page_allocs++;
810 total_page_alloc_bytes += bytes;
811
812 if (!valid_page(page)) {
813 nr_page_fails++;
814 total_page_fail_bytes += bytes;
815
816 return 0;
817 }
818
819 if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
820 return -1;
821
822 callsite = find_callsite(evsel, sample);
823
824 /*
825 * This is to find the current page (with correct gfp flags and
826 * migrate type) at free event.
827 */
828 this.page = page;
829 pstat = page_stat__findnew_page(&this);
830 if (pstat == NULL)
831 return -ENOMEM;
832
833 pstat->nr_alloc++;
834 pstat->alloc_bytes += bytes;
835 pstat->callsite = callsite;
836
837 if (!live_page) {
838 pstat = page_stat__findnew_alloc(&this);
839 if (pstat == NULL)
840 return -ENOMEM;
841
842 pstat->nr_alloc++;
843 pstat->alloc_bytes += bytes;
844 pstat->callsite = callsite;
845 }
846
847 this.callsite = callsite;
848 pstat = page_stat__findnew_caller(&this);
849 if (pstat == NULL)
850 return -ENOMEM;
851
852 pstat->nr_alloc++;
853 pstat->alloc_bytes += bytes;
854
855 order_stats[order][migrate_type]++;
856
857 return 0;
858}
859
860static int perf_evsel__process_page_free_event(struct evsel *evsel,
861 struct perf_sample *sample)
862{
863 u64 page;
864 unsigned int order = perf_evsel__intval(evsel, sample, "order");
865 u64 bytes = kmem_page_size << order;
866 struct page_stat *pstat;
867 struct page_stat this = {
868 .order = order,
869 };
870
871 if (use_pfn)
872 page = perf_evsel__intval(evsel, sample, "pfn");
873 else
874 page = perf_evsel__intval(evsel, sample, "page");
875
876 nr_page_frees++;
877 total_page_free_bytes += bytes;
878
879 this.page = page;
880 pstat = page_stat__find_page(&this);
881 if (pstat == NULL) {
882 pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
883 page, order);
884
885 nr_page_nomatch++;
886 total_page_nomatch_bytes += bytes;
887
888 return 0;
889 }
890
891 this.gfp_flags = pstat->gfp_flags;
892 this.migrate_type = pstat->migrate_type;
893 this.callsite = pstat->callsite;
894
895 rb_erase(&pstat->node, &page_live_tree);
896 free(pstat);
897
898 if (live_page) {
899 order_stats[this.order][this.migrate_type]--;
900 } else {
901 pstat = page_stat__find_alloc(&this);
902 if (pstat == NULL)
903 return -ENOMEM;
904
905 pstat->nr_free++;
906 pstat->free_bytes += bytes;
907 }
908
909 pstat = page_stat__find_caller(&this);
910 if (pstat == NULL)
911 return -ENOENT;
912
913 pstat->nr_free++;
914 pstat->free_bytes += bytes;
915
916 if (live_page) {
917 pstat->nr_alloc--;
918 pstat->alloc_bytes -= bytes;
919
920 if (pstat->nr_alloc == 0) {
921 rb_erase(&pstat->node, &page_caller_tree);
922 free(pstat);
923 }
924 }
925
926 return 0;
927}
928
929static bool perf_kmem__skip_sample(struct perf_sample *sample)
930{
931 /* skip sample based on time? */
932 if (perf_time__skip_sample(&ptime, sample->time))
933 return true;
934
935 return false;
936}
937
938typedef int (*tracepoint_handler)(struct evsel *evsel,
939 struct perf_sample *sample);
940
941static int process_sample_event(struct perf_tool *tool __maybe_unused,
942 union perf_event *event,
943 struct perf_sample *sample,
944 struct evsel *evsel,
945 struct machine *machine)
946{
947 int err = 0;
948 struct thread *thread = machine__findnew_thread(machine, sample->pid,
949 sample->tid);
950
951 if (thread == NULL) {
952 pr_debug("problem processing %d event, skipping it.\n",
953 event->header.type);
954 return -1;
955 }
956
957 if (perf_kmem__skip_sample(sample))
958 return 0;
959
960 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
961
962 if (evsel->handler != NULL) {
963 tracepoint_handler f = evsel->handler;
964 err = f(evsel, sample);
965 }
966
967 thread__put(thread);
968
969 return err;
970}
971
972static struct perf_tool perf_kmem = {
973 .sample = process_sample_event,
974 .comm = perf_event__process_comm,
975 .mmap = perf_event__process_mmap,
976 .mmap2 = perf_event__process_mmap2,
977 .namespaces = perf_event__process_namespaces,
978 .ordered_events = true,
979};
980
981static double fragmentation(unsigned long n_req, unsigned long n_alloc)
982{
983 if (n_alloc == 0)
984 return 0.0;
985 else
986 return 100.0 - (100.0 * n_req / n_alloc);
987}
988
989static void __print_slab_result(struct rb_root *root,
990 struct perf_session *session,
991 int n_lines, int is_caller)
992{
993 struct rb_node *next;
994 struct machine *machine = &session->machines.host;
995
996 printf("%.105s\n", graph_dotted_line);
997 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
998 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
999 printf("%.105s\n", graph_dotted_line);
1000
1001 next = rb_first(root);
1002
1003 while (next && n_lines--) {
1004 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
1005 node);
1006 struct symbol *sym = NULL;
1007 struct map *map;
1008 char buf[BUFSIZ];
1009 u64 addr;
1010
1011 if (is_caller) {
1012 addr = data->call_site;
1013 if (!raw_ip)
1014 sym = machine__find_kernel_symbol(machine, addr, &map);
1015 } else
1016 addr = data->ptr;
1017
1018 if (sym != NULL)
1019 snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
1020 addr - map->unmap_ip(map, sym->start));
1021 else
1022 snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
1023 printf(" %-34s |", buf);
1024
1025 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
1026 (unsigned long long)data->bytes_alloc,
1027 (unsigned long)data->bytes_alloc / data->hit,
1028 (unsigned long long)data->bytes_req,
1029 (unsigned long)data->bytes_req / data->hit,
1030 (unsigned long)data->hit,
1031 (unsigned long)data->pingpong,
1032 fragmentation(data->bytes_req, data->bytes_alloc));
1033
1034 next = rb_next(next);
1035 }
1036
1037 if (n_lines == -1)
1038 printf(" ... | ... | ... | ... | ... | ... \n");
1039
1040 printf("%.105s\n", graph_dotted_line);
1041}
1042
1043static const char * const migrate_type_str[] = {
1044 "UNMOVABL",
1045 "RECLAIM",
1046 "MOVABLE",
1047 "RESERVED",
1048 "CMA/ISLT",
1049 "UNKNOWN",
1050};
1051
1052static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1053{
1054 struct rb_node *next = rb_first(&page_alloc_sorted);
1055 struct machine *machine = &session->machines.host;
1056 const char *format;
1057 int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1058
1059 printf("\n%.105s\n", graph_dotted_line);
1060 printf(" %-16s | %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
1061 use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
1062 gfp_len, "GFP flags");
1063 printf("%.105s\n", graph_dotted_line);
1064
1065 if (use_pfn)
1066 format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1067 else
1068 format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1069
1070 while (next && n_lines--) {
1071 struct page_stat *data;
1072 struct symbol *sym;
1073 struct map *map;
1074 char buf[32];
1075 char *caller = buf;
1076
1077 data = rb_entry(next, struct page_stat, node);
1078 sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1079 if (sym)
1080 caller = sym->name;
1081 else
1082 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1083
1084 printf(format, (unsigned long long)data->page,
1085 (unsigned long long)data->alloc_bytes / 1024,
1086 data->nr_alloc, data->order,
1087 migrate_type_str[data->migrate_type],
1088 gfp_len, compact_gfp_string(data->gfp_flags), caller);
1089
1090 next = rb_next(next);
1091 }
1092
1093 if (n_lines == -1) {
1094 printf(" ... | ... | ... | ... | ... | %-*s | ...\n",
1095 gfp_len, "...");
1096 }
1097
1098 printf("%.105s\n", graph_dotted_line);
1099}
1100
1101static void __print_page_caller_result(struct perf_session *session, int n_lines)
1102{
1103 struct rb_node *next = rb_first(&page_caller_sorted);
1104 struct machine *machine = &session->machines.host;
1105 int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1106
1107 printf("\n%.105s\n", graph_dotted_line);
1108 printf(" %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
1109 live_page ? "Live" : "Total", gfp_len, "GFP flags");
1110 printf("%.105s\n", graph_dotted_line);
1111
1112 while (next && n_lines--) {
1113 struct page_stat *data;
1114 struct symbol *sym;
1115 struct map *map;
1116 char buf[32];
1117 char *caller = buf;
1118
1119 data = rb_entry(next, struct page_stat, node);
1120 sym = machine__find_kernel_symbol(machine, data->callsite, &map);
1121 if (sym)
1122 caller = sym->name;
1123 else
1124 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1125
1126 printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
1127 (unsigned long long)data->alloc_bytes / 1024,
1128 data->nr_alloc, data->order,
1129 migrate_type_str[data->migrate_type],
1130 gfp_len, compact_gfp_string(data->gfp_flags), caller);
1131
1132 next = rb_next(next);
1133 }
1134
1135 if (n_lines == -1) {
1136 printf(" ... | ... | ... | ... | %-*s | ...\n",
1137 gfp_len, "...");
1138 }
1139
1140 printf("%.105s\n", graph_dotted_line);
1141}
1142
1143static void print_gfp_flags(void)
1144{
1145 int i;
1146
1147 printf("#\n");
1148 printf("# GFP flags\n");
1149 printf("# ---------\n");
1150 for (i = 0; i < nr_gfps; i++) {
1151 printf("# %08x: %*s: %s\n", gfps[i].flags,
1152 (int) max_gfp_len, gfps[i].compact_str,
1153 gfps[i].human_readable);
1154 }
1155}
1156
1157static void print_slab_summary(void)
1158{
1159 printf("\nSUMMARY (SLAB allocator)");
1160 printf("\n========================\n");
1161 printf("Total bytes requested: %'lu\n", total_requested);
1162 printf("Total bytes allocated: %'lu\n", total_allocated);
1163 printf("Total bytes freed: %'lu\n", total_freed);
1164 if (total_allocated > total_freed) {
1165 printf("Net total bytes allocated: %'lu\n",
1166 total_allocated - total_freed);
1167 }
1168 printf("Total bytes wasted on internal fragmentation: %'lu\n",
1169 total_allocated - total_requested);
1170 printf("Internal fragmentation: %f%%\n",
1171 fragmentation(total_requested, total_allocated));
1172 printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
1173}
1174
1175static void print_page_summary(void)
1176{
1177 int o, m;
1178 u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
1179 u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
1180
1181 printf("\nSUMMARY (page allocator)");
1182 printf("\n========================\n");
1183 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests",
1184 nr_page_allocs, total_page_alloc_bytes / 1024);
1185 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests",
1186 nr_page_frees, total_page_free_bytes / 1024);
1187 printf("\n");
1188
1189 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
1190 nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
1191 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
1192 nr_page_allocs - nr_alloc_freed,
1193 (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
1194 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
1195 nr_page_nomatch, total_page_nomatch_bytes / 1024);
1196 printf("\n");
1197
1198 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures",
1199 nr_page_fails, total_page_fail_bytes / 1024);
1200 printf("\n");
1201
1202 printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable",
1203 "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
1204 printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line,
1205 graph_dotted_line, graph_dotted_line, graph_dotted_line,
1206 graph_dotted_line, graph_dotted_line);
1207
1208 for (o = 0; o < MAX_PAGE_ORDER; o++) {
1209 printf("%5d", o);
1210 for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
1211 if (order_stats[o][m])
1212 printf(" %'12d", order_stats[o][m]);
1213 else
1214 printf(" %12c", '.');
1215 }
1216 printf("\n");
1217 }
1218}
1219
1220static void print_slab_result(struct perf_session *session)
1221{
1222 if (caller_flag)
1223 __print_slab_result(&root_caller_sorted, session, caller_lines, 1);
1224 if (alloc_flag)
1225 __print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
1226 print_slab_summary();
1227}
1228
1229static void print_page_result(struct perf_session *session)
1230{
1231 if (caller_flag || alloc_flag)
1232 print_gfp_flags();
1233 if (caller_flag)
1234 __print_page_caller_result(session, caller_lines);
1235 if (alloc_flag)
1236 __print_page_alloc_result(session, alloc_lines);
1237 print_page_summary();
1238}
1239
1240static void print_result(struct perf_session *session)
1241{
1242 if (kmem_slab)
1243 print_slab_result(session);
1244 if (kmem_page)
1245 print_page_result(session);
1246}
1247
1248static LIST_HEAD(slab_caller_sort);
1249static LIST_HEAD(slab_alloc_sort);
1250static LIST_HEAD(page_caller_sort);
1251static LIST_HEAD(page_alloc_sort);
1252
1253static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
1254 struct list_head *sort_list)
1255{
1256 struct rb_node **new = &(root->rb_node);
1257 struct rb_node *parent = NULL;
1258 struct sort_dimension *sort;
1259
1260 while (*new) {
1261 struct alloc_stat *this;
1262 int cmp = 0;
1263
1264 this = rb_entry(*new, struct alloc_stat, node);
1265 parent = *new;
1266
1267 list_for_each_entry(sort, sort_list, list) {
1268 cmp = sort->cmp(data, this);
1269 if (cmp)
1270 break;
1271 }
1272
1273 if (cmp > 0)
1274 new = &((*new)->rb_left);
1275 else
1276 new = &((*new)->rb_right);
1277 }
1278
1279 rb_link_node(&data->node, parent, new);
1280 rb_insert_color(&data->node, root);
1281}
1282
1283static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1284 struct list_head *sort_list)
1285{
1286 struct rb_node *node;
1287 struct alloc_stat *data;
1288
1289 for (;;) {
1290 node = rb_first(root);
1291 if (!node)
1292 break;
1293
1294 rb_erase(node, root);
1295 data = rb_entry(node, struct alloc_stat, node);
1296 sort_slab_insert(root_sorted, data, sort_list);
1297 }
1298}
1299
1300static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1301 struct list_head *sort_list)
1302{
1303 struct rb_node **new = &root->rb_node;
1304 struct rb_node *parent = NULL;
1305 struct sort_dimension *sort;
1306
1307 while (*new) {
1308 struct page_stat *this;
1309 int cmp = 0;
1310
1311 this = rb_entry(*new, struct page_stat, node);
1312 parent = *new;
1313
1314 list_for_each_entry(sort, sort_list, list) {
1315 cmp = sort->cmp(data, this);
1316 if (cmp)
1317 break;
1318 }
1319
1320 if (cmp > 0)
1321 new = &parent->rb_left;
1322 else
1323 new = &parent->rb_right;
1324 }
1325
1326 rb_link_node(&data->node, parent, new);
1327 rb_insert_color(&data->node, root);
1328}
1329
1330static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1331 struct list_head *sort_list)
1332{
1333 struct rb_node *node;
1334 struct page_stat *data;
1335
1336 for (;;) {
1337 node = rb_first(root);
1338 if (!node)
1339 break;
1340
1341 rb_erase(node, root);
1342 data = rb_entry(node, struct page_stat, node);
1343 sort_page_insert(root_sorted, data, sort_list);
1344 }
1345}
1346
1347static void sort_result(void)
1348{
1349 if (kmem_slab) {
1350 __sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
1351 &slab_alloc_sort);
1352 __sort_slab_result(&root_caller_stat, &root_caller_sorted,
1353 &slab_caller_sort);
1354 }
1355 if (kmem_page) {
1356 if (live_page)
1357 __sort_page_result(&page_live_tree, &page_alloc_sorted,
1358 &page_alloc_sort);
1359 else
1360 __sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1361 &page_alloc_sort);
1362
1363 __sort_page_result(&page_caller_tree, &page_caller_sorted,
1364 &page_caller_sort);
1365 }
1366}
1367
1368static int __cmd_kmem(struct perf_session *session)
1369{
1370 int err = -EINVAL;
1371 struct evsel *evsel;
1372 const struct evsel_str_handler kmem_tracepoints[] = {
1373 /* slab allocator */
1374 { "kmem:kmalloc", perf_evsel__process_alloc_event, },
1375 { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
1376 { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
1377 { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
1378 { "kmem:kfree", perf_evsel__process_free_event, },
1379 { "kmem:kmem_cache_free", perf_evsel__process_free_event, },
1380 /* page allocator */
1381 { "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, },
1382 { "kmem:mm_page_free", perf_evsel__process_page_free_event, },
1383 };
1384
1385 if (!perf_session__has_traces(session, "kmem record"))
1386 goto out;
1387
1388 if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1389 pr_err("Initializing perf session tracepoint handlers failed\n");
1390 goto out;
1391 }
1392
1393 evlist__for_each_entry(session->evlist, evsel) {
1394 if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
1395 perf_evsel__field(evsel, "pfn")) {
1396 use_pfn = true;
1397 break;
1398 }
1399 }
1400
1401 setup_pager();
1402 err = perf_session__process_events(session);
1403 if (err != 0) {
1404 pr_err("error during process events: %d\n", err);
1405 goto out;
1406 }
1407 sort_result();
1408 print_result(session);
1409out:
1410 return err;
1411}
1412
1413/* slab sort keys */
1414static int ptr_cmp(void *a, void *b)
1415{
1416 struct alloc_stat *l = a;
1417 struct alloc_stat *r = b;
1418
1419 if (l->ptr < r->ptr)
1420 return -1;
1421 else if (l->ptr > r->ptr)
1422 return 1;
1423 return 0;
1424}
1425
1426static struct sort_dimension ptr_sort_dimension = {
1427 .name = "ptr",
1428 .cmp = ptr_cmp,
1429};
1430
1431static int slab_callsite_cmp(void *a, void *b)
1432{
1433 struct alloc_stat *l = a;
1434 struct alloc_stat *r = b;
1435
1436 if (l->call_site < r->call_site)
1437 return -1;
1438 else if (l->call_site > r->call_site)
1439 return 1;
1440 return 0;
1441}
1442
1443static struct sort_dimension callsite_sort_dimension = {
1444 .name = "callsite",
1445 .cmp = slab_callsite_cmp,
1446};
1447
1448static int hit_cmp(void *a, void *b)
1449{
1450 struct alloc_stat *l = a;
1451 struct alloc_stat *r = b;
1452
1453 if (l->hit < r->hit)
1454 return -1;
1455 else if (l->hit > r->hit)
1456 return 1;
1457 return 0;
1458}
1459
1460static struct sort_dimension hit_sort_dimension = {
1461 .name = "hit",
1462 .cmp = hit_cmp,
1463};
1464
1465static int bytes_cmp(void *a, void *b)
1466{
1467 struct alloc_stat *l = a;
1468 struct alloc_stat *r = b;
1469
1470 if (l->bytes_alloc < r->bytes_alloc)
1471 return -1;
1472 else if (l->bytes_alloc > r->bytes_alloc)
1473 return 1;
1474 return 0;
1475}
1476
1477static struct sort_dimension bytes_sort_dimension = {
1478 .name = "bytes",
1479 .cmp = bytes_cmp,
1480};
1481
1482static int frag_cmp(void *a, void *b)
1483{
1484 double x, y;
1485 struct alloc_stat *l = a;
1486 struct alloc_stat *r = b;
1487
1488 x = fragmentation(l->bytes_req, l->bytes_alloc);
1489 y = fragmentation(r->bytes_req, r->bytes_alloc);
1490
1491 if (x < y)
1492 return -1;
1493 else if (x > y)
1494 return 1;
1495 return 0;
1496}
1497
1498static struct sort_dimension frag_sort_dimension = {
1499 .name = "frag",
1500 .cmp = frag_cmp,
1501};
1502
1503static int pingpong_cmp(void *a, void *b)
1504{
1505 struct alloc_stat *l = a;
1506 struct alloc_stat *r = b;
1507
1508 if (l->pingpong < r->pingpong)
1509 return -1;
1510 else if (l->pingpong > r->pingpong)
1511 return 1;
1512 return 0;
1513}
1514
1515static struct sort_dimension pingpong_sort_dimension = {
1516 .name = "pingpong",
1517 .cmp = pingpong_cmp,
1518};
1519
1520/* page sort keys */
1521static int page_cmp(void *a, void *b)
1522{
1523 struct page_stat *l = a;
1524 struct page_stat *r = b;
1525
1526 if (l->page < r->page)
1527 return -1;
1528 else if (l->page > r->page)
1529 return 1;
1530 return 0;
1531}
1532
1533static struct sort_dimension page_sort_dimension = {
1534 .name = "page",
1535 .cmp = page_cmp,
1536};
1537
1538static int page_callsite_cmp(void *a, void *b)
1539{
1540 struct page_stat *l = a;
1541 struct page_stat *r = b;
1542
1543 if (l->callsite < r->callsite)
1544 return -1;
1545 else if (l->callsite > r->callsite)
1546 return 1;
1547 return 0;
1548}
1549
1550static struct sort_dimension page_callsite_sort_dimension = {
1551 .name = "callsite",
1552 .cmp = page_callsite_cmp,
1553};
1554
1555static int page_hit_cmp(void *a, void *b)
1556{
1557 struct page_stat *l = a;
1558 struct page_stat *r = b;
1559
1560 if (l->nr_alloc < r->nr_alloc)
1561 return -1;
1562 else if (l->nr_alloc > r->nr_alloc)
1563 return 1;
1564 return 0;
1565}
1566
1567static struct sort_dimension page_hit_sort_dimension = {
1568 .name = "hit",
1569 .cmp = page_hit_cmp,
1570};
1571
1572static int page_bytes_cmp(void *a, void *b)
1573{
1574 struct page_stat *l = a;
1575 struct page_stat *r = b;
1576
1577 if (l->alloc_bytes < r->alloc_bytes)
1578 return -1;
1579 else if (l->alloc_bytes > r->alloc_bytes)
1580 return 1;
1581 return 0;
1582}
1583
1584static struct sort_dimension page_bytes_sort_dimension = {
1585 .name = "bytes",
1586 .cmp = page_bytes_cmp,
1587};
1588
1589static int page_order_cmp(void *a, void *b)
1590{
1591 struct page_stat *l = a;
1592 struct page_stat *r = b;
1593
1594 if (l->order < r->order)
1595 return -1;
1596 else if (l->order > r->order)
1597 return 1;
1598 return 0;
1599}
1600
1601static struct sort_dimension page_order_sort_dimension = {
1602 .name = "order",
1603 .cmp = page_order_cmp,
1604};
1605
1606static int migrate_type_cmp(void *a, void *b)
1607{
1608 struct page_stat *l = a;
1609 struct page_stat *r = b;
1610
1611 /* for internal use to find free'd page */
1612 if (l->migrate_type == -1U)
1613 return 0;
1614
1615 if (l->migrate_type < r->migrate_type)
1616 return -1;
1617 else if (l->migrate_type > r->migrate_type)
1618 return 1;
1619 return 0;
1620}
1621
1622static struct sort_dimension migrate_type_sort_dimension = {
1623 .name = "migtype",
1624 .cmp = migrate_type_cmp,
1625};
1626
1627static int gfp_flags_cmp(void *a, void *b)
1628{
1629 struct page_stat *l = a;
1630 struct page_stat *r = b;
1631
1632 /* for internal use to find free'd page */
1633 if (l->gfp_flags == -1U)
1634 return 0;
1635
1636 if (l->gfp_flags < r->gfp_flags)
1637 return -1;
1638 else if (l->gfp_flags > r->gfp_flags)
1639 return 1;
1640 return 0;
1641}
1642
1643static struct sort_dimension gfp_flags_sort_dimension = {
1644 .name = "gfp",
1645 .cmp = gfp_flags_cmp,
1646};
1647
1648static struct sort_dimension *slab_sorts[] = {
1649 &ptr_sort_dimension,
1650 &callsite_sort_dimension,
1651 &hit_sort_dimension,
1652 &bytes_sort_dimension,
1653 &frag_sort_dimension,
1654 &pingpong_sort_dimension,
1655};
1656
1657static struct sort_dimension *page_sorts[] = {
1658 &page_sort_dimension,
1659 &page_callsite_sort_dimension,
1660 &page_hit_sort_dimension,
1661 &page_bytes_sort_dimension,
1662 &page_order_sort_dimension,
1663 &migrate_type_sort_dimension,
1664 &gfp_flags_sort_dimension,
1665};
1666
1667static int slab_sort_dimension__add(const char *tok, struct list_head *list)
1668{
1669 struct sort_dimension *sort;
1670 int i;
1671
1672 for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1673 if (!strcmp(slab_sorts[i]->name, tok)) {
1674 sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
1675 if (!sort) {
1676 pr_err("%s: memdup failed\n", __func__);
1677 return -1;
1678 }
1679 list_add_tail(&sort->list, list);
1680 return 0;
1681 }
1682 }
1683
1684 return -1;
1685}
1686
1687static int page_sort_dimension__add(const char *tok, struct list_head *list)
1688{
1689 struct sort_dimension *sort;
1690 int i;
1691
1692 for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1693 if (!strcmp(page_sorts[i]->name, tok)) {
1694 sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1695 if (!sort) {
1696 pr_err("%s: memdup failed\n", __func__);
1697 return -1;
1698 }
1699 list_add_tail(&sort->list, list);
1700 return 0;
1701 }
1702 }
1703
1704 return -1;
1705}
1706
1707static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
1708{
1709 char *tok;
1710 char *str = strdup(arg);
1711 char *pos = str;
1712
1713 if (!str) {
1714 pr_err("%s: strdup failed\n", __func__);
1715 return -1;
1716 }
1717
1718 while (true) {
1719 tok = strsep(&pos, ",");
1720 if (!tok)
1721 break;
1722 if (slab_sort_dimension__add(tok, sort_list) < 0) {
1723 pr_err("Unknown slab --sort key: '%s'", tok);
1724 free(str);
1725 return -1;
1726 }
1727 }
1728
1729 free(str);
1730 return 0;
1731}
1732
1733static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1734{
1735 char *tok;
1736 char *str = strdup(arg);
1737 char *pos = str;
1738
1739 if (!str) {
1740 pr_err("%s: strdup failed\n", __func__);
1741 return -1;
1742 }
1743
1744 while (true) {
1745 tok = strsep(&pos, ",");
1746 if (!tok)
1747 break;
1748 if (page_sort_dimension__add(tok, sort_list) < 0) {
1749 pr_err("Unknown page --sort key: '%s'", tok);
1750 free(str);
1751 return -1;
1752 }
1753 }
1754
1755 free(str);
1756 return 0;
1757}
1758
1759static int parse_sort_opt(const struct option *opt __maybe_unused,
1760 const char *arg, int unset __maybe_unused)
1761{
1762 if (!arg)
1763 return -1;
1764
1765 if (kmem_page > kmem_slab ||
1766 (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
1767 if (caller_flag > alloc_flag)
1768 return setup_page_sorting(&page_caller_sort, arg);
1769 else
1770 return setup_page_sorting(&page_alloc_sort, arg);
1771 } else {
1772 if (caller_flag > alloc_flag)
1773 return setup_slab_sorting(&slab_caller_sort, arg);
1774 else
1775 return setup_slab_sorting(&slab_alloc_sort, arg);
1776 }
1777
1778 return 0;
1779}
1780
1781static int parse_caller_opt(const struct option *opt __maybe_unused,
1782 const char *arg __maybe_unused,
1783 int unset __maybe_unused)
1784{
1785 caller_flag = (alloc_flag + 1);
1786 return 0;
1787}
1788
1789static int parse_alloc_opt(const struct option *opt __maybe_unused,
1790 const char *arg __maybe_unused,
1791 int unset __maybe_unused)
1792{
1793 alloc_flag = (caller_flag + 1);
1794 return 0;
1795}
1796
1797static int parse_slab_opt(const struct option *opt __maybe_unused,
1798 const char *arg __maybe_unused,
1799 int unset __maybe_unused)
1800{
1801 kmem_slab = (kmem_page + 1);
1802 return 0;
1803}
1804
1805static int parse_page_opt(const struct option *opt __maybe_unused,
1806 const char *arg __maybe_unused,
1807 int unset __maybe_unused)
1808{
1809 kmem_page = (kmem_slab + 1);
1810 return 0;
1811}
1812
1813static int parse_line_opt(const struct option *opt __maybe_unused,
1814 const char *arg, int unset __maybe_unused)
1815{
1816 int lines;
1817
1818 if (!arg)
1819 return -1;
1820
1821 lines = strtoul(arg, NULL, 10);
1822
1823 if (caller_flag > alloc_flag)
1824 caller_lines = lines;
1825 else
1826 alloc_lines = lines;
1827
1828 return 0;
1829}
1830
1831static int __cmd_record(int argc, const char **argv)
1832{
1833 const char * const record_args[] = {
1834 "record", "-a", "-R", "-c", "1",
1835 };
1836 const char * const slab_events[] = {
1837 "-e", "kmem:kmalloc",
1838 "-e", "kmem:kmalloc_node",
1839 "-e", "kmem:kfree",
1840 "-e", "kmem:kmem_cache_alloc",
1841 "-e", "kmem:kmem_cache_alloc_node",
1842 "-e", "kmem:kmem_cache_free",
1843 };
1844 const char * const page_events[] = {
1845 "-e", "kmem:mm_page_alloc",
1846 "-e", "kmem:mm_page_free",
1847 };
1848 unsigned int rec_argc, i, j;
1849 const char **rec_argv;
1850
1851 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1852 if (kmem_slab)
1853 rec_argc += ARRAY_SIZE(slab_events);
1854 if (kmem_page)
1855 rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
1856
1857 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1858
1859 if (rec_argv == NULL)
1860 return -ENOMEM;
1861
1862 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1863 rec_argv[i] = strdup(record_args[i]);
1864
1865 if (kmem_slab) {
1866 for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1867 rec_argv[i] = strdup(slab_events[j]);
1868 }
1869 if (kmem_page) {
1870 rec_argv[i++] = strdup("-g");
1871
1872 for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1873 rec_argv[i] = strdup(page_events[j]);
1874 }
1875
1876 for (j = 1; j < (unsigned int)argc; j++, i++)
1877 rec_argv[i] = argv[j];
1878
1879 return cmd_record(i, rec_argv);
1880}
1881
1882static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
1883{
1884 if (!strcmp(var, "kmem.default")) {
1885 if (!strcmp(value, "slab"))
1886 kmem_default = KMEM_SLAB;
1887 else if (!strcmp(value, "page"))
1888 kmem_default = KMEM_PAGE;
1889 else
1890 pr_err("invalid default value ('slab' or 'page' required): %s\n",
1891 value);
1892 return 0;
1893 }
1894
1895 return 0;
1896}
1897
1898int cmd_kmem(int argc, const char **argv)
1899{
1900 const char * const default_slab_sort = "frag,hit,bytes";
1901 const char * const default_page_sort = "bytes,hit";
1902 struct perf_data data = {
1903 .mode = PERF_DATA_MODE_READ,
1904 };
1905 const struct option kmem_options[] = {
1906 OPT_STRING('i', "input", &input_name, "file", "input file name"),
1907 OPT_INCR('v', "verbose", &verbose,
1908 "be more verbose (show symbol address, etc)"),
1909 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1910 "show per-callsite statistics", parse_caller_opt),
1911 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1912 "show per-allocation statistics", parse_alloc_opt),
1913 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
1914 "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1915 "page, order, migtype, gfp", parse_sort_opt),
1916 OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1917 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1918 OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"),
1919 OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1920 parse_slab_opt),
1921 OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1922 parse_page_opt),
1923 OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
1924 OPT_STRING(0, "time", &time_str, "str",
1925 "Time span of interest (start,stop)"),
1926 OPT_END()
1927 };
1928 const char *const kmem_subcommands[] = { "record", "stat", NULL };
1929 const char *kmem_usage[] = {
1930 NULL,
1931 NULL
1932 };
1933 struct perf_session *session;
1934 static const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
1935 int ret = perf_config(kmem_config, NULL);
1936
1937 if (ret)
1938 return ret;
1939
1940 argc = parse_options_subcommand(argc, argv, kmem_options,
1941 kmem_subcommands, kmem_usage, 0);
1942
1943 if (!argc)
1944 usage_with_options(kmem_usage, kmem_options);
1945
1946 if (kmem_slab == 0 && kmem_page == 0) {
1947 if (kmem_default == KMEM_SLAB)
1948 kmem_slab = 1;
1949 else
1950 kmem_page = 1;
1951 }
1952
1953 if (!strncmp(argv[0], "rec", 3)) {
1954 symbol__init(NULL);
1955 return __cmd_record(argc, argv);
1956 }
1957
1958 data.path = input_name;
1959
1960 kmem_session = session = perf_session__new(&data, false, &perf_kmem);
1961 if (IS_ERR(session))
1962 return PTR_ERR(session);
1963
1964 ret = -1;
1965
1966 if (kmem_slab) {
1967 if (!perf_evlist__find_tracepoint_by_name(session->evlist,
1968 "kmem:kmalloc")) {
1969 pr_err(errmsg, "slab", "slab");
1970 goto out_delete;
1971 }
1972 }
1973
1974 if (kmem_page) {
1975 struct evsel *evsel;
1976
1977 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
1978 "kmem:mm_page_alloc");
1979 if (evsel == NULL) {
1980 pr_err(errmsg, "page", "page");
1981 goto out_delete;
1982 }
1983
1984 kmem_page_size = tep_get_page_size(evsel->tp_format->tep);
1985 symbol_conf.use_callchain = true;
1986 }
1987
1988 symbol__init(&session->header.env);
1989
1990 if (perf_time__parse_str(&ptime, time_str) != 0) {
1991 pr_err("Invalid time string\n");
1992 ret = -EINVAL;
1993 goto out_delete;
1994 }
1995
1996 if (!strcmp(argv[0], "stat")) {
1997 setlocale(LC_ALL, "");
1998
1999 if (cpu__setup_cpunode_map())
2000 goto out_delete;
2001
2002 if (list_empty(&slab_caller_sort))
2003 setup_slab_sorting(&slab_caller_sort, default_slab_sort);
2004 if (list_empty(&slab_alloc_sort))
2005 setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
2006 if (list_empty(&page_caller_sort))
2007 setup_page_sorting(&page_caller_sort, default_page_sort);
2008 if (list_empty(&page_alloc_sort))
2009 setup_page_sorting(&page_alloc_sort, default_page_sort);
2010
2011 if (kmem_page) {
2012 setup_page_sorting(&page_alloc_sort_input,
2013 "page,order,migtype,gfp");
2014 setup_page_sorting(&page_caller_sort_input,
2015 "callsite,order,migtype,gfp");
2016 }
2017 ret = __cmd_kmem(session);
2018 } else
2019 usage_with_options(kmem_usage, kmem_options);
2020
2021out_delete:
2022 perf_session__delete(session);
2023
2024 return ret;
2025}
2026
1#include "builtin.h"
2#include "perf.h"
3
4#include "util/evlist.h"
5#include "util/evsel.h"
6#include "util/util.h"
7#include "util/config.h"
8#include "util/symbol.h"
9#include "util/thread.h"
10#include "util/header.h"
11#include "util/session.h"
12#include "util/tool.h"
13#include "util/callchain.h"
14#include "util/time-utils.h"
15
16#include <subcmd/parse-options.h>
17#include "util/trace-event.h"
18#include "util/data.h"
19#include "util/cpumap.h"
20
21#include "util/debug.h"
22
23#include <linux/rbtree.h>
24#include <linux/string.h>
25#include <locale.h>
26#include <regex.h>
27
28static int kmem_slab;
29static int kmem_page;
30
31static long kmem_page_size;
32static enum {
33 KMEM_SLAB,
34 KMEM_PAGE,
35} kmem_default = KMEM_SLAB; /* for backward compatibility */
36
37struct alloc_stat;
38typedef int (*sort_fn_t)(void *, void *);
39
40static int alloc_flag;
41static int caller_flag;
42
43static int alloc_lines = -1;
44static int caller_lines = -1;
45
46static bool raw_ip;
47
48struct alloc_stat {
49 u64 call_site;
50 u64 ptr;
51 u64 bytes_req;
52 u64 bytes_alloc;
53 u64 last_alloc;
54 u32 hit;
55 u32 pingpong;
56
57 short alloc_cpu;
58
59 struct rb_node node;
60};
61
62static struct rb_root root_alloc_stat;
63static struct rb_root root_alloc_sorted;
64static struct rb_root root_caller_stat;
65static struct rb_root root_caller_sorted;
66
67static unsigned long total_requested, total_allocated, total_freed;
68static unsigned long nr_allocs, nr_cross_allocs;
69
70/* filters for controlling start and stop of time of analysis */
71static struct perf_time_interval ptime;
72const char *time_str;
73
74static int insert_alloc_stat(unsigned long call_site, unsigned long ptr,
75 int bytes_req, int bytes_alloc, int cpu)
76{
77 struct rb_node **node = &root_alloc_stat.rb_node;
78 struct rb_node *parent = NULL;
79 struct alloc_stat *data = NULL;
80
81 while (*node) {
82 parent = *node;
83 data = rb_entry(*node, struct alloc_stat, node);
84
85 if (ptr > data->ptr)
86 node = &(*node)->rb_right;
87 else if (ptr < data->ptr)
88 node = &(*node)->rb_left;
89 else
90 break;
91 }
92
93 if (data && data->ptr == ptr) {
94 data->hit++;
95 data->bytes_req += bytes_req;
96 data->bytes_alloc += bytes_alloc;
97 } else {
98 data = malloc(sizeof(*data));
99 if (!data) {
100 pr_err("%s: malloc failed\n", __func__);
101 return -1;
102 }
103 data->ptr = ptr;
104 data->pingpong = 0;
105 data->hit = 1;
106 data->bytes_req = bytes_req;
107 data->bytes_alloc = bytes_alloc;
108
109 rb_link_node(&data->node, parent, node);
110 rb_insert_color(&data->node, &root_alloc_stat);
111 }
112 data->call_site = call_site;
113 data->alloc_cpu = cpu;
114 data->last_alloc = bytes_alloc;
115
116 return 0;
117}
118
119static int insert_caller_stat(unsigned long call_site,
120 int bytes_req, int bytes_alloc)
121{
122 struct rb_node **node = &root_caller_stat.rb_node;
123 struct rb_node *parent = NULL;
124 struct alloc_stat *data = NULL;
125
126 while (*node) {
127 parent = *node;
128 data = rb_entry(*node, struct alloc_stat, node);
129
130 if (call_site > data->call_site)
131 node = &(*node)->rb_right;
132 else if (call_site < data->call_site)
133 node = &(*node)->rb_left;
134 else
135 break;
136 }
137
138 if (data && data->call_site == call_site) {
139 data->hit++;
140 data->bytes_req += bytes_req;
141 data->bytes_alloc += bytes_alloc;
142 } else {
143 data = malloc(sizeof(*data));
144 if (!data) {
145 pr_err("%s: malloc failed\n", __func__);
146 return -1;
147 }
148 data->call_site = call_site;
149 data->pingpong = 0;
150 data->hit = 1;
151 data->bytes_req = bytes_req;
152 data->bytes_alloc = bytes_alloc;
153
154 rb_link_node(&data->node, parent, node);
155 rb_insert_color(&data->node, &root_caller_stat);
156 }
157
158 return 0;
159}
160
161static int perf_evsel__process_alloc_event(struct perf_evsel *evsel,
162 struct perf_sample *sample)
163{
164 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"),
165 call_site = perf_evsel__intval(evsel, sample, "call_site");
166 int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"),
167 bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc");
168
169 if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) ||
170 insert_caller_stat(call_site, bytes_req, bytes_alloc))
171 return -1;
172
173 total_requested += bytes_req;
174 total_allocated += bytes_alloc;
175
176 nr_allocs++;
177 return 0;
178}
179
180static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
181 struct perf_sample *sample)
182{
183 int ret = perf_evsel__process_alloc_event(evsel, sample);
184
185 if (!ret) {
186 int node1 = cpu__get_node(sample->cpu),
187 node2 = perf_evsel__intval(evsel, sample, "node");
188
189 if (node1 != node2)
190 nr_cross_allocs++;
191 }
192
193 return ret;
194}
195
196static int ptr_cmp(void *, void *);
197static int slab_callsite_cmp(void *, void *);
198
199static struct alloc_stat *search_alloc_stat(unsigned long ptr,
200 unsigned long call_site,
201 struct rb_root *root,
202 sort_fn_t sort_fn)
203{
204 struct rb_node *node = root->rb_node;
205 struct alloc_stat key = { .ptr = ptr, .call_site = call_site };
206
207 while (node) {
208 struct alloc_stat *data;
209 int cmp;
210
211 data = rb_entry(node, struct alloc_stat, node);
212
213 cmp = sort_fn(&key, data);
214 if (cmp < 0)
215 node = node->rb_left;
216 else if (cmp > 0)
217 node = node->rb_right;
218 else
219 return data;
220 }
221 return NULL;
222}
223
224static int perf_evsel__process_free_event(struct perf_evsel *evsel,
225 struct perf_sample *sample)
226{
227 unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr");
228 struct alloc_stat *s_alloc, *s_caller;
229
230 s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp);
231 if (!s_alloc)
232 return 0;
233
234 total_freed += s_alloc->last_alloc;
235
236 if ((short)sample->cpu != s_alloc->alloc_cpu) {
237 s_alloc->pingpong++;
238
239 s_caller = search_alloc_stat(0, s_alloc->call_site,
240 &root_caller_stat,
241 slab_callsite_cmp);
242 if (!s_caller)
243 return -1;
244 s_caller->pingpong++;
245 }
246 s_alloc->alloc_cpu = -1;
247
248 return 0;
249}
250
251static u64 total_page_alloc_bytes;
252static u64 total_page_free_bytes;
253static u64 total_page_nomatch_bytes;
254static u64 total_page_fail_bytes;
255static unsigned long nr_page_allocs;
256static unsigned long nr_page_frees;
257static unsigned long nr_page_fails;
258static unsigned long nr_page_nomatch;
259
260static bool use_pfn;
261static bool live_page;
262static struct perf_session *kmem_session;
263
264#define MAX_MIGRATE_TYPES 6
265#define MAX_PAGE_ORDER 11
266
267static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
268
269struct page_stat {
270 struct rb_node node;
271 u64 page;
272 u64 callsite;
273 int order;
274 unsigned gfp_flags;
275 unsigned migrate_type;
276 u64 alloc_bytes;
277 u64 free_bytes;
278 int nr_alloc;
279 int nr_free;
280};
281
282static struct rb_root page_live_tree;
283static struct rb_root page_alloc_tree;
284static struct rb_root page_alloc_sorted;
285static struct rb_root page_caller_tree;
286static struct rb_root page_caller_sorted;
287
288struct alloc_func {
289 u64 start;
290 u64 end;
291 char *name;
292};
293
294static int nr_alloc_funcs;
295static struct alloc_func *alloc_func_list;
296
297static int funcmp(const void *a, const void *b)
298{
299 const struct alloc_func *fa = a;
300 const struct alloc_func *fb = b;
301
302 if (fa->start > fb->start)
303 return 1;
304 else
305 return -1;
306}
307
308static int callcmp(const void *a, const void *b)
309{
310 const struct alloc_func *fa = a;
311 const struct alloc_func *fb = b;
312
313 if (fb->start <= fa->start && fa->end < fb->end)
314 return 0;
315
316 if (fa->start > fb->start)
317 return 1;
318 else
319 return -1;
320}
321
322static int build_alloc_func_list(void)
323{
324 int ret;
325 struct map *kernel_map;
326 struct symbol *sym;
327 struct rb_node *node;
328 struct alloc_func *func;
329 struct machine *machine = &kmem_session->machines.host;
330 regex_t alloc_func_regex;
331 const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
332
333 ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
334 if (ret) {
335 char err[BUFSIZ];
336
337 regerror(ret, &alloc_func_regex, err, sizeof(err));
338 pr_err("Invalid regex: %s\n%s", pattern, err);
339 return -EINVAL;
340 }
341
342 kernel_map = machine__kernel_map(machine);
343 if (map__load(kernel_map) < 0) {
344 pr_err("cannot load kernel map\n");
345 return -ENOENT;
346 }
347
348 map__for_each_symbol(kernel_map, sym, node) {
349 if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
350 continue;
351
352 func = realloc(alloc_func_list,
353 (nr_alloc_funcs + 1) * sizeof(*func));
354 if (func == NULL)
355 return -ENOMEM;
356
357 pr_debug("alloc func: %s\n", sym->name);
358 func[nr_alloc_funcs].start = sym->start;
359 func[nr_alloc_funcs].end = sym->end;
360 func[nr_alloc_funcs].name = sym->name;
361
362 alloc_func_list = func;
363 nr_alloc_funcs++;
364 }
365
366 qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
367
368 regfree(&alloc_func_regex);
369 return 0;
370}
371
372/*
373 * Find first non-memory allocation function from callchain.
374 * The allocation functions are in the 'alloc_func_list'.
375 */
376static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample)
377{
378 struct addr_location al;
379 struct machine *machine = &kmem_session->machines.host;
380 struct callchain_cursor_node *node;
381
382 if (alloc_func_list == NULL) {
383 if (build_alloc_func_list() < 0)
384 goto out;
385 }
386
387 al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
388 sample__resolve_callchain(sample, &callchain_cursor, NULL, evsel, &al, 16);
389
390 callchain_cursor_commit(&callchain_cursor);
391 while (true) {
392 struct alloc_func key, *caller;
393 u64 addr;
394
395 node = callchain_cursor_current(&callchain_cursor);
396 if (node == NULL)
397 break;
398
399 key.start = key.end = node->ip;
400 caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
401 sizeof(key), callcmp);
402 if (!caller) {
403 /* found */
404 if (node->map)
405 addr = map__unmap_ip(node->map, node->ip);
406 else
407 addr = node->ip;
408
409 return addr;
410 } else
411 pr_debug3("skipping alloc function: %s\n", caller->name);
412
413 callchain_cursor_advance(&callchain_cursor);
414 }
415
416out:
417 pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
418 return sample->ip;
419}
420
421struct sort_dimension {
422 const char name[20];
423 sort_fn_t cmp;
424 struct list_head list;
425};
426
427static LIST_HEAD(page_alloc_sort_input);
428static LIST_HEAD(page_caller_sort_input);
429
430static struct page_stat *
431__page_stat__findnew_page(struct page_stat *pstat, bool create)
432{
433 struct rb_node **node = &page_live_tree.rb_node;
434 struct rb_node *parent = NULL;
435 struct page_stat *data;
436
437 while (*node) {
438 s64 cmp;
439
440 parent = *node;
441 data = rb_entry(*node, struct page_stat, node);
442
443 cmp = data->page - pstat->page;
444 if (cmp < 0)
445 node = &parent->rb_left;
446 else if (cmp > 0)
447 node = &parent->rb_right;
448 else
449 return data;
450 }
451
452 if (!create)
453 return NULL;
454
455 data = zalloc(sizeof(*data));
456 if (data != NULL) {
457 data->page = pstat->page;
458 data->order = pstat->order;
459 data->gfp_flags = pstat->gfp_flags;
460 data->migrate_type = pstat->migrate_type;
461
462 rb_link_node(&data->node, parent, node);
463 rb_insert_color(&data->node, &page_live_tree);
464 }
465
466 return data;
467}
468
469static struct page_stat *page_stat__find_page(struct page_stat *pstat)
470{
471 return __page_stat__findnew_page(pstat, false);
472}
473
474static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
475{
476 return __page_stat__findnew_page(pstat, true);
477}
478
479static struct page_stat *
480__page_stat__findnew_alloc(struct page_stat *pstat, bool create)
481{
482 struct rb_node **node = &page_alloc_tree.rb_node;
483 struct rb_node *parent = NULL;
484 struct page_stat *data;
485 struct sort_dimension *sort;
486
487 while (*node) {
488 int cmp = 0;
489
490 parent = *node;
491 data = rb_entry(*node, struct page_stat, node);
492
493 list_for_each_entry(sort, &page_alloc_sort_input, list) {
494 cmp = sort->cmp(pstat, data);
495 if (cmp)
496 break;
497 }
498
499 if (cmp < 0)
500 node = &parent->rb_left;
501 else if (cmp > 0)
502 node = &parent->rb_right;
503 else
504 return data;
505 }
506
507 if (!create)
508 return NULL;
509
510 data = zalloc(sizeof(*data));
511 if (data != NULL) {
512 data->page = pstat->page;
513 data->order = pstat->order;
514 data->gfp_flags = pstat->gfp_flags;
515 data->migrate_type = pstat->migrate_type;
516
517 rb_link_node(&data->node, parent, node);
518 rb_insert_color(&data->node, &page_alloc_tree);
519 }
520
521 return data;
522}
523
524static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
525{
526 return __page_stat__findnew_alloc(pstat, false);
527}
528
529static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
530{
531 return __page_stat__findnew_alloc(pstat, true);
532}
533
534static struct page_stat *
535__page_stat__findnew_caller(struct page_stat *pstat, bool create)
536{
537 struct rb_node **node = &page_caller_tree.rb_node;
538 struct rb_node *parent = NULL;
539 struct page_stat *data;
540 struct sort_dimension *sort;
541
542 while (*node) {
543 int cmp = 0;
544
545 parent = *node;
546 data = rb_entry(*node, struct page_stat, node);
547
548 list_for_each_entry(sort, &page_caller_sort_input, list) {
549 cmp = sort->cmp(pstat, data);
550 if (cmp)
551 break;
552 }
553
554 if (cmp < 0)
555 node = &parent->rb_left;
556 else if (cmp > 0)
557 node = &parent->rb_right;
558 else
559 return data;
560 }
561
562 if (!create)
563 return NULL;
564
565 data = zalloc(sizeof(*data));
566 if (data != NULL) {
567 data->callsite = pstat->callsite;
568 data->order = pstat->order;
569 data->gfp_flags = pstat->gfp_flags;
570 data->migrate_type = pstat->migrate_type;
571
572 rb_link_node(&data->node, parent, node);
573 rb_insert_color(&data->node, &page_caller_tree);
574 }
575
576 return data;
577}
578
579static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
580{
581 return __page_stat__findnew_caller(pstat, false);
582}
583
584static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
585{
586 return __page_stat__findnew_caller(pstat, true);
587}
588
589static bool valid_page(u64 pfn_or_page)
590{
591 if (use_pfn && pfn_or_page == -1UL)
592 return false;
593 if (!use_pfn && pfn_or_page == 0)
594 return false;
595 return true;
596}
597
598struct gfp_flag {
599 unsigned int flags;
600 char *compact_str;
601 char *human_readable;
602};
603
604static struct gfp_flag *gfps;
605static int nr_gfps;
606
607static int gfpcmp(const void *a, const void *b)
608{
609 const struct gfp_flag *fa = a;
610 const struct gfp_flag *fb = b;
611
612 return fa->flags - fb->flags;
613}
614
615/* see include/trace/events/mmflags.h */
616static const struct {
617 const char *original;
618 const char *compact;
619} gfp_compact_table[] = {
620 { "GFP_TRANSHUGE", "THP" },
621 { "GFP_TRANSHUGE_LIGHT", "THL" },
622 { "GFP_HIGHUSER_MOVABLE", "HUM" },
623 { "GFP_HIGHUSER", "HU" },
624 { "GFP_USER", "U" },
625 { "GFP_TEMPORARY", "TMP" },
626 { "GFP_KERNEL_ACCOUNT", "KAC" },
627 { "GFP_KERNEL", "K" },
628 { "GFP_NOFS", "NF" },
629 { "GFP_ATOMIC", "A" },
630 { "GFP_NOIO", "NI" },
631 { "GFP_NOWAIT", "NW" },
632 { "GFP_DMA", "D" },
633 { "__GFP_HIGHMEM", "HM" },
634 { "GFP_DMA32", "D32" },
635 { "__GFP_HIGH", "H" },
636 { "__GFP_ATOMIC", "_A" },
637 { "__GFP_IO", "I" },
638 { "__GFP_FS", "F" },
639 { "__GFP_COLD", "CO" },
640 { "__GFP_NOWARN", "NWR" },
641 { "__GFP_REPEAT", "R" },
642 { "__GFP_NOFAIL", "NF" },
643 { "__GFP_NORETRY", "NR" },
644 { "__GFP_COMP", "C" },
645 { "__GFP_ZERO", "Z" },
646 { "__GFP_NOMEMALLOC", "NMA" },
647 { "__GFP_MEMALLOC", "MA" },
648 { "__GFP_HARDWALL", "HW" },
649 { "__GFP_THISNODE", "TN" },
650 { "__GFP_RECLAIMABLE", "RC" },
651 { "__GFP_MOVABLE", "M" },
652 { "__GFP_ACCOUNT", "AC" },
653 { "__GFP_NOTRACK", "NT" },
654 { "__GFP_WRITE", "WR" },
655 { "__GFP_RECLAIM", "R" },
656 { "__GFP_DIRECT_RECLAIM", "DR" },
657 { "__GFP_KSWAPD_RECLAIM", "KR" },
658};
659
660static size_t max_gfp_len;
661
662static char *compact_gfp_flags(char *gfp_flags)
663{
664 char *orig_flags = strdup(gfp_flags);
665 char *new_flags = NULL;
666 char *str, *pos = NULL;
667 size_t len = 0;
668
669 if (orig_flags == NULL)
670 return NULL;
671
672 str = strtok_r(orig_flags, "|", &pos);
673 while (str) {
674 size_t i;
675 char *new;
676 const char *cpt;
677
678 for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
679 if (strcmp(gfp_compact_table[i].original, str))
680 continue;
681
682 cpt = gfp_compact_table[i].compact;
683 new = realloc(new_flags, len + strlen(cpt) + 2);
684 if (new == NULL) {
685 free(new_flags);
686 return NULL;
687 }
688
689 new_flags = new;
690
691 if (!len) {
692 strcpy(new_flags, cpt);
693 } else {
694 strcat(new_flags, "|");
695 strcat(new_flags, cpt);
696 len++;
697 }
698
699 len += strlen(cpt);
700 }
701
702 str = strtok_r(NULL, "|", &pos);
703 }
704
705 if (max_gfp_len < len)
706 max_gfp_len = len;
707
708 free(orig_flags);
709 return new_flags;
710}
711
712static char *compact_gfp_string(unsigned long gfp_flags)
713{
714 struct gfp_flag key = {
715 .flags = gfp_flags,
716 };
717 struct gfp_flag *gfp;
718
719 gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
720 if (gfp)
721 return gfp->compact_str;
722
723 return NULL;
724}
725
726static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample,
727 unsigned int gfp_flags)
728{
729 struct pevent_record record = {
730 .cpu = sample->cpu,
731 .data = sample->raw_data,
732 .size = sample->raw_size,
733 };
734 struct trace_seq seq;
735 char *str, *pos = NULL;
736
737 if (nr_gfps) {
738 struct gfp_flag key = {
739 .flags = gfp_flags,
740 };
741
742 if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
743 return 0;
744 }
745
746 trace_seq_init(&seq);
747 pevent_event_info(&seq, evsel->tp_format, &record);
748
749 str = strtok_r(seq.buffer, " ", &pos);
750 while (str) {
751 if (!strncmp(str, "gfp_flags=", 10)) {
752 struct gfp_flag *new;
753
754 new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
755 if (new == NULL)
756 return -ENOMEM;
757
758 gfps = new;
759 new += nr_gfps++;
760
761 new->flags = gfp_flags;
762 new->human_readable = strdup(str + 10);
763 new->compact_str = compact_gfp_flags(str + 10);
764 if (!new->human_readable || !new->compact_str)
765 return -ENOMEM;
766
767 qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
768 }
769
770 str = strtok_r(NULL, " ", &pos);
771 }
772
773 trace_seq_destroy(&seq);
774 return 0;
775}
776
777static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
778 struct perf_sample *sample)
779{
780 u64 page;
781 unsigned int order = perf_evsel__intval(evsel, sample, "order");
782 unsigned int gfp_flags = perf_evsel__intval(evsel, sample, "gfp_flags");
783 unsigned int migrate_type = perf_evsel__intval(evsel, sample,
784 "migratetype");
785 u64 bytes = kmem_page_size << order;
786 u64 callsite;
787 struct page_stat *pstat;
788 struct page_stat this = {
789 .order = order,
790 .gfp_flags = gfp_flags,
791 .migrate_type = migrate_type,
792 };
793
794 if (use_pfn)
795 page = perf_evsel__intval(evsel, sample, "pfn");
796 else
797 page = perf_evsel__intval(evsel, sample, "page");
798
799 nr_page_allocs++;
800 total_page_alloc_bytes += bytes;
801
802 if (!valid_page(page)) {
803 nr_page_fails++;
804 total_page_fail_bytes += bytes;
805
806 return 0;
807 }
808
809 if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
810 return -1;
811
812 callsite = find_callsite(evsel, sample);
813
814 /*
815 * This is to find the current page (with correct gfp flags and
816 * migrate type) at free event.
817 */
818 this.page = page;
819 pstat = page_stat__findnew_page(&this);
820 if (pstat == NULL)
821 return -ENOMEM;
822
823 pstat->nr_alloc++;
824 pstat->alloc_bytes += bytes;
825 pstat->callsite = callsite;
826
827 if (!live_page) {
828 pstat = page_stat__findnew_alloc(&this);
829 if (pstat == NULL)
830 return -ENOMEM;
831
832 pstat->nr_alloc++;
833 pstat->alloc_bytes += bytes;
834 pstat->callsite = callsite;
835 }
836
837 this.callsite = callsite;
838 pstat = page_stat__findnew_caller(&this);
839 if (pstat == NULL)
840 return -ENOMEM;
841
842 pstat->nr_alloc++;
843 pstat->alloc_bytes += bytes;
844
845 order_stats[order][migrate_type]++;
846
847 return 0;
848}
849
850static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
851 struct perf_sample *sample)
852{
853 u64 page;
854 unsigned int order = perf_evsel__intval(evsel, sample, "order");
855 u64 bytes = kmem_page_size << order;
856 struct page_stat *pstat;
857 struct page_stat this = {
858 .order = order,
859 };
860
861 if (use_pfn)
862 page = perf_evsel__intval(evsel, sample, "pfn");
863 else
864 page = perf_evsel__intval(evsel, sample, "page");
865
866 nr_page_frees++;
867 total_page_free_bytes += bytes;
868
869 this.page = page;
870 pstat = page_stat__find_page(&this);
871 if (pstat == NULL) {
872 pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
873 page, order);
874
875 nr_page_nomatch++;
876 total_page_nomatch_bytes += bytes;
877
878 return 0;
879 }
880
881 this.gfp_flags = pstat->gfp_flags;
882 this.migrate_type = pstat->migrate_type;
883 this.callsite = pstat->callsite;
884
885 rb_erase(&pstat->node, &page_live_tree);
886 free(pstat);
887
888 if (live_page) {
889 order_stats[this.order][this.migrate_type]--;
890 } else {
891 pstat = page_stat__find_alloc(&this);
892 if (pstat == NULL)
893 return -ENOMEM;
894
895 pstat->nr_free++;
896 pstat->free_bytes += bytes;
897 }
898
899 pstat = page_stat__find_caller(&this);
900 if (pstat == NULL)
901 return -ENOENT;
902
903 pstat->nr_free++;
904 pstat->free_bytes += bytes;
905
906 if (live_page) {
907 pstat->nr_alloc--;
908 pstat->alloc_bytes -= bytes;
909
910 if (pstat->nr_alloc == 0) {
911 rb_erase(&pstat->node, &page_caller_tree);
912 free(pstat);
913 }
914 }
915
916 return 0;
917}
918
919static bool perf_kmem__skip_sample(struct perf_sample *sample)
920{
921 /* skip sample based on time? */
922 if (perf_time__skip_sample(&ptime, sample->time))
923 return true;
924
925 return false;
926}
927
928typedef int (*tracepoint_handler)(struct perf_evsel *evsel,
929 struct perf_sample *sample);
930
931static int process_sample_event(struct perf_tool *tool __maybe_unused,
932 union perf_event *event,
933 struct perf_sample *sample,
934 struct perf_evsel *evsel,
935 struct machine *machine)
936{
937 int err = 0;
938 struct thread *thread = machine__findnew_thread(machine, sample->pid,
939 sample->tid);
940
941 if (thread == NULL) {
942 pr_debug("problem processing %d event, skipping it.\n",
943 event->header.type);
944 return -1;
945 }
946
947 if (perf_kmem__skip_sample(sample))
948 return 0;
949
950 dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
951
952 if (evsel->handler != NULL) {
953 tracepoint_handler f = evsel->handler;
954 err = f(evsel, sample);
955 }
956
957 thread__put(thread);
958
959 return err;
960}
961
962static struct perf_tool perf_kmem = {
963 .sample = process_sample_event,
964 .comm = perf_event__process_comm,
965 .mmap = perf_event__process_mmap,
966 .mmap2 = perf_event__process_mmap2,
967 .ordered_events = true,
968};
969
970static double fragmentation(unsigned long n_req, unsigned long n_alloc)
971{
972 if (n_alloc == 0)
973 return 0.0;
974 else
975 return 100.0 - (100.0 * n_req / n_alloc);
976}
977
978static void __print_slab_result(struct rb_root *root,
979 struct perf_session *session,
980 int n_lines, int is_caller)
981{
982 struct rb_node *next;
983 struct machine *machine = &session->machines.host;
984
985 printf("%.105s\n", graph_dotted_line);
986 printf(" %-34s |", is_caller ? "Callsite": "Alloc Ptr");
987 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
988 printf("%.105s\n", graph_dotted_line);
989
990 next = rb_first(root);
991
992 while (next && n_lines--) {
993 struct alloc_stat *data = rb_entry(next, struct alloc_stat,
994 node);
995 struct symbol *sym = NULL;
996 struct map *map;
997 char buf[BUFSIZ];
998 u64 addr;
999
1000 if (is_caller) {
1001 addr = data->call_site;
1002 if (!raw_ip)
1003 sym = machine__find_kernel_function(machine, addr, &map);
1004 } else
1005 addr = data->ptr;
1006
1007 if (sym != NULL)
1008 snprintf(buf, sizeof(buf), "%s+%" PRIx64 "", sym->name,
1009 addr - map->unmap_ip(map, sym->start));
1010 else
1011 snprintf(buf, sizeof(buf), "%#" PRIx64 "", addr);
1012 printf(" %-34s |", buf);
1013
1014 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %9lu | %6.3f%%\n",
1015 (unsigned long long)data->bytes_alloc,
1016 (unsigned long)data->bytes_alloc / data->hit,
1017 (unsigned long long)data->bytes_req,
1018 (unsigned long)data->bytes_req / data->hit,
1019 (unsigned long)data->hit,
1020 (unsigned long)data->pingpong,
1021 fragmentation(data->bytes_req, data->bytes_alloc));
1022
1023 next = rb_next(next);
1024 }
1025
1026 if (n_lines == -1)
1027 printf(" ... | ... | ... | ... | ... | ... \n");
1028
1029 printf("%.105s\n", graph_dotted_line);
1030}
1031
1032static const char * const migrate_type_str[] = {
1033 "UNMOVABL",
1034 "RECLAIM",
1035 "MOVABLE",
1036 "RESERVED",
1037 "CMA/ISLT",
1038 "UNKNOWN",
1039};
1040
1041static void __print_page_alloc_result(struct perf_session *session, int n_lines)
1042{
1043 struct rb_node *next = rb_first(&page_alloc_sorted);
1044 struct machine *machine = &session->machines.host;
1045 const char *format;
1046 int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1047
1048 printf("\n%.105s\n", graph_dotted_line);
1049 printf(" %-16s | %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
1050 use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
1051 gfp_len, "GFP flags");
1052 printf("%.105s\n", graph_dotted_line);
1053
1054 if (use_pfn)
1055 format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1056 else
1057 format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
1058
1059 while (next && n_lines--) {
1060 struct page_stat *data;
1061 struct symbol *sym;
1062 struct map *map;
1063 char buf[32];
1064 char *caller = buf;
1065
1066 data = rb_entry(next, struct page_stat, node);
1067 sym = machine__find_kernel_function(machine, data->callsite, &map);
1068 if (sym && sym->name)
1069 caller = sym->name;
1070 else
1071 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1072
1073 printf(format, (unsigned long long)data->page,
1074 (unsigned long long)data->alloc_bytes / 1024,
1075 data->nr_alloc, data->order,
1076 migrate_type_str[data->migrate_type],
1077 gfp_len, compact_gfp_string(data->gfp_flags), caller);
1078
1079 next = rb_next(next);
1080 }
1081
1082 if (n_lines == -1) {
1083 printf(" ... | ... | ... | ... | ... | %-*s | ...\n",
1084 gfp_len, "...");
1085 }
1086
1087 printf("%.105s\n", graph_dotted_line);
1088}
1089
1090static void __print_page_caller_result(struct perf_session *session, int n_lines)
1091{
1092 struct rb_node *next = rb_first(&page_caller_sorted);
1093 struct machine *machine = &session->machines.host;
1094 int gfp_len = max(strlen("GFP flags"), max_gfp_len);
1095
1096 printf("\n%.105s\n", graph_dotted_line);
1097 printf(" %5s alloc (KB) | Hits | Order | Mig.type | %-*s | Callsite\n",
1098 live_page ? "Live" : "Total", gfp_len, "GFP flags");
1099 printf("%.105s\n", graph_dotted_line);
1100
1101 while (next && n_lines--) {
1102 struct page_stat *data;
1103 struct symbol *sym;
1104 struct map *map;
1105 char buf[32];
1106 char *caller = buf;
1107
1108 data = rb_entry(next, struct page_stat, node);
1109 sym = machine__find_kernel_function(machine, data->callsite, &map);
1110 if (sym && sym->name)
1111 caller = sym->name;
1112 else
1113 scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
1114
1115 printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
1116 (unsigned long long)data->alloc_bytes / 1024,
1117 data->nr_alloc, data->order,
1118 migrate_type_str[data->migrate_type],
1119 gfp_len, compact_gfp_string(data->gfp_flags), caller);
1120
1121 next = rb_next(next);
1122 }
1123
1124 if (n_lines == -1) {
1125 printf(" ... | ... | ... | ... | %-*s | ...\n",
1126 gfp_len, "...");
1127 }
1128
1129 printf("%.105s\n", graph_dotted_line);
1130}
1131
1132static void print_gfp_flags(void)
1133{
1134 int i;
1135
1136 printf("#\n");
1137 printf("# GFP flags\n");
1138 printf("# ---------\n");
1139 for (i = 0; i < nr_gfps; i++) {
1140 printf("# %08x: %*s: %s\n", gfps[i].flags,
1141 (int) max_gfp_len, gfps[i].compact_str,
1142 gfps[i].human_readable);
1143 }
1144}
1145
1146static void print_slab_summary(void)
1147{
1148 printf("\nSUMMARY (SLAB allocator)");
1149 printf("\n========================\n");
1150 printf("Total bytes requested: %'lu\n", total_requested);
1151 printf("Total bytes allocated: %'lu\n", total_allocated);
1152 printf("Total bytes freed: %'lu\n", total_freed);
1153 if (total_allocated > total_freed) {
1154 printf("Net total bytes allocated: %'lu\n",
1155 total_allocated - total_freed);
1156 }
1157 printf("Total bytes wasted on internal fragmentation: %'lu\n",
1158 total_allocated - total_requested);
1159 printf("Internal fragmentation: %f%%\n",
1160 fragmentation(total_requested, total_allocated));
1161 printf("Cross CPU allocations: %'lu/%'lu\n", nr_cross_allocs, nr_allocs);
1162}
1163
1164static void print_page_summary(void)
1165{
1166 int o, m;
1167 u64 nr_alloc_freed = nr_page_frees - nr_page_nomatch;
1168 u64 total_alloc_freed_bytes = total_page_free_bytes - total_page_nomatch_bytes;
1169
1170 printf("\nSUMMARY (page allocator)");
1171 printf("\n========================\n");
1172 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation requests",
1173 nr_page_allocs, total_page_alloc_bytes / 1024);
1174 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free requests",
1175 nr_page_frees, total_page_free_bytes / 1024);
1176 printf("\n");
1177
1178 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
1179 nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
1180 printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
1181 nr_page_allocs - nr_alloc_freed,
1182 (total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
1183 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
1184 nr_page_nomatch, total_page_nomatch_bytes / 1024);
1185 printf("\n");
1186
1187 printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total allocation failures",
1188 nr_page_fails, total_page_fail_bytes / 1024);
1189 printf("\n");
1190
1191 printf("%5s %12s %12s %12s %12s %12s\n", "Order", "Unmovable",
1192 "Reclaimable", "Movable", "Reserved", "CMA/Isolated");
1193 printf("%.5s %.12s %.12s %.12s %.12s %.12s\n", graph_dotted_line,
1194 graph_dotted_line, graph_dotted_line, graph_dotted_line,
1195 graph_dotted_line, graph_dotted_line);
1196
1197 for (o = 0; o < MAX_PAGE_ORDER; o++) {
1198 printf("%5d", o);
1199 for (m = 0; m < MAX_MIGRATE_TYPES - 1; m++) {
1200 if (order_stats[o][m])
1201 printf(" %'12d", order_stats[o][m]);
1202 else
1203 printf(" %12c", '.');
1204 }
1205 printf("\n");
1206 }
1207}
1208
1209static void print_slab_result(struct perf_session *session)
1210{
1211 if (caller_flag)
1212 __print_slab_result(&root_caller_sorted, session, caller_lines, 1);
1213 if (alloc_flag)
1214 __print_slab_result(&root_alloc_sorted, session, alloc_lines, 0);
1215 print_slab_summary();
1216}
1217
1218static void print_page_result(struct perf_session *session)
1219{
1220 if (caller_flag || alloc_flag)
1221 print_gfp_flags();
1222 if (caller_flag)
1223 __print_page_caller_result(session, caller_lines);
1224 if (alloc_flag)
1225 __print_page_alloc_result(session, alloc_lines);
1226 print_page_summary();
1227}
1228
1229static void print_result(struct perf_session *session)
1230{
1231 if (kmem_slab)
1232 print_slab_result(session);
1233 if (kmem_page)
1234 print_page_result(session);
1235}
1236
1237static LIST_HEAD(slab_caller_sort);
1238static LIST_HEAD(slab_alloc_sort);
1239static LIST_HEAD(page_caller_sort);
1240static LIST_HEAD(page_alloc_sort);
1241
1242static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
1243 struct list_head *sort_list)
1244{
1245 struct rb_node **new = &(root->rb_node);
1246 struct rb_node *parent = NULL;
1247 struct sort_dimension *sort;
1248
1249 while (*new) {
1250 struct alloc_stat *this;
1251 int cmp = 0;
1252
1253 this = rb_entry(*new, struct alloc_stat, node);
1254 parent = *new;
1255
1256 list_for_each_entry(sort, sort_list, list) {
1257 cmp = sort->cmp(data, this);
1258 if (cmp)
1259 break;
1260 }
1261
1262 if (cmp > 0)
1263 new = &((*new)->rb_left);
1264 else
1265 new = &((*new)->rb_right);
1266 }
1267
1268 rb_link_node(&data->node, parent, new);
1269 rb_insert_color(&data->node, root);
1270}
1271
1272static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted,
1273 struct list_head *sort_list)
1274{
1275 struct rb_node *node;
1276 struct alloc_stat *data;
1277
1278 for (;;) {
1279 node = rb_first(root);
1280 if (!node)
1281 break;
1282
1283 rb_erase(node, root);
1284 data = rb_entry(node, struct alloc_stat, node);
1285 sort_slab_insert(root_sorted, data, sort_list);
1286 }
1287}
1288
1289static void sort_page_insert(struct rb_root *root, struct page_stat *data,
1290 struct list_head *sort_list)
1291{
1292 struct rb_node **new = &root->rb_node;
1293 struct rb_node *parent = NULL;
1294 struct sort_dimension *sort;
1295
1296 while (*new) {
1297 struct page_stat *this;
1298 int cmp = 0;
1299
1300 this = rb_entry(*new, struct page_stat, node);
1301 parent = *new;
1302
1303 list_for_each_entry(sort, sort_list, list) {
1304 cmp = sort->cmp(data, this);
1305 if (cmp)
1306 break;
1307 }
1308
1309 if (cmp > 0)
1310 new = &parent->rb_left;
1311 else
1312 new = &parent->rb_right;
1313 }
1314
1315 rb_link_node(&data->node, parent, new);
1316 rb_insert_color(&data->node, root);
1317}
1318
1319static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
1320 struct list_head *sort_list)
1321{
1322 struct rb_node *node;
1323 struct page_stat *data;
1324
1325 for (;;) {
1326 node = rb_first(root);
1327 if (!node)
1328 break;
1329
1330 rb_erase(node, root);
1331 data = rb_entry(node, struct page_stat, node);
1332 sort_page_insert(root_sorted, data, sort_list);
1333 }
1334}
1335
1336static void sort_result(void)
1337{
1338 if (kmem_slab) {
1339 __sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
1340 &slab_alloc_sort);
1341 __sort_slab_result(&root_caller_stat, &root_caller_sorted,
1342 &slab_caller_sort);
1343 }
1344 if (kmem_page) {
1345 if (live_page)
1346 __sort_page_result(&page_live_tree, &page_alloc_sorted,
1347 &page_alloc_sort);
1348 else
1349 __sort_page_result(&page_alloc_tree, &page_alloc_sorted,
1350 &page_alloc_sort);
1351
1352 __sort_page_result(&page_caller_tree, &page_caller_sorted,
1353 &page_caller_sort);
1354 }
1355}
1356
1357static int __cmd_kmem(struct perf_session *session)
1358{
1359 int err = -EINVAL;
1360 struct perf_evsel *evsel;
1361 const struct perf_evsel_str_handler kmem_tracepoints[] = {
1362 /* slab allocator */
1363 { "kmem:kmalloc", perf_evsel__process_alloc_event, },
1364 { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, },
1365 { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, },
1366 { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, },
1367 { "kmem:kfree", perf_evsel__process_free_event, },
1368 { "kmem:kmem_cache_free", perf_evsel__process_free_event, },
1369 /* page allocator */
1370 { "kmem:mm_page_alloc", perf_evsel__process_page_alloc_event, },
1371 { "kmem:mm_page_free", perf_evsel__process_page_free_event, },
1372 };
1373
1374 if (!perf_session__has_traces(session, "kmem record"))
1375 goto out;
1376
1377 if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) {
1378 pr_err("Initializing perf session tracepoint handlers failed\n");
1379 goto out;
1380 }
1381
1382 evlist__for_each_entry(session->evlist, evsel) {
1383 if (!strcmp(perf_evsel__name(evsel), "kmem:mm_page_alloc") &&
1384 perf_evsel__field(evsel, "pfn")) {
1385 use_pfn = true;
1386 break;
1387 }
1388 }
1389
1390 setup_pager();
1391 err = perf_session__process_events(session);
1392 if (err != 0) {
1393 pr_err("error during process events: %d\n", err);
1394 goto out;
1395 }
1396 sort_result();
1397 print_result(session);
1398out:
1399 return err;
1400}
1401
1402/* slab sort keys */
1403static int ptr_cmp(void *a, void *b)
1404{
1405 struct alloc_stat *l = a;
1406 struct alloc_stat *r = b;
1407
1408 if (l->ptr < r->ptr)
1409 return -1;
1410 else if (l->ptr > r->ptr)
1411 return 1;
1412 return 0;
1413}
1414
1415static struct sort_dimension ptr_sort_dimension = {
1416 .name = "ptr",
1417 .cmp = ptr_cmp,
1418};
1419
1420static int slab_callsite_cmp(void *a, void *b)
1421{
1422 struct alloc_stat *l = a;
1423 struct alloc_stat *r = b;
1424
1425 if (l->call_site < r->call_site)
1426 return -1;
1427 else if (l->call_site > r->call_site)
1428 return 1;
1429 return 0;
1430}
1431
1432static struct sort_dimension callsite_sort_dimension = {
1433 .name = "callsite",
1434 .cmp = slab_callsite_cmp,
1435};
1436
1437static int hit_cmp(void *a, void *b)
1438{
1439 struct alloc_stat *l = a;
1440 struct alloc_stat *r = b;
1441
1442 if (l->hit < r->hit)
1443 return -1;
1444 else if (l->hit > r->hit)
1445 return 1;
1446 return 0;
1447}
1448
1449static struct sort_dimension hit_sort_dimension = {
1450 .name = "hit",
1451 .cmp = hit_cmp,
1452};
1453
1454static int bytes_cmp(void *a, void *b)
1455{
1456 struct alloc_stat *l = a;
1457 struct alloc_stat *r = b;
1458
1459 if (l->bytes_alloc < r->bytes_alloc)
1460 return -1;
1461 else if (l->bytes_alloc > r->bytes_alloc)
1462 return 1;
1463 return 0;
1464}
1465
1466static struct sort_dimension bytes_sort_dimension = {
1467 .name = "bytes",
1468 .cmp = bytes_cmp,
1469};
1470
1471static int frag_cmp(void *a, void *b)
1472{
1473 double x, y;
1474 struct alloc_stat *l = a;
1475 struct alloc_stat *r = b;
1476
1477 x = fragmentation(l->bytes_req, l->bytes_alloc);
1478 y = fragmentation(r->bytes_req, r->bytes_alloc);
1479
1480 if (x < y)
1481 return -1;
1482 else if (x > y)
1483 return 1;
1484 return 0;
1485}
1486
1487static struct sort_dimension frag_sort_dimension = {
1488 .name = "frag",
1489 .cmp = frag_cmp,
1490};
1491
1492static int pingpong_cmp(void *a, void *b)
1493{
1494 struct alloc_stat *l = a;
1495 struct alloc_stat *r = b;
1496
1497 if (l->pingpong < r->pingpong)
1498 return -1;
1499 else if (l->pingpong > r->pingpong)
1500 return 1;
1501 return 0;
1502}
1503
1504static struct sort_dimension pingpong_sort_dimension = {
1505 .name = "pingpong",
1506 .cmp = pingpong_cmp,
1507};
1508
1509/* page sort keys */
1510static int page_cmp(void *a, void *b)
1511{
1512 struct page_stat *l = a;
1513 struct page_stat *r = b;
1514
1515 if (l->page < r->page)
1516 return -1;
1517 else if (l->page > r->page)
1518 return 1;
1519 return 0;
1520}
1521
1522static struct sort_dimension page_sort_dimension = {
1523 .name = "page",
1524 .cmp = page_cmp,
1525};
1526
1527static int page_callsite_cmp(void *a, void *b)
1528{
1529 struct page_stat *l = a;
1530 struct page_stat *r = b;
1531
1532 if (l->callsite < r->callsite)
1533 return -1;
1534 else if (l->callsite > r->callsite)
1535 return 1;
1536 return 0;
1537}
1538
1539static struct sort_dimension page_callsite_sort_dimension = {
1540 .name = "callsite",
1541 .cmp = page_callsite_cmp,
1542};
1543
1544static int page_hit_cmp(void *a, void *b)
1545{
1546 struct page_stat *l = a;
1547 struct page_stat *r = b;
1548
1549 if (l->nr_alloc < r->nr_alloc)
1550 return -1;
1551 else if (l->nr_alloc > r->nr_alloc)
1552 return 1;
1553 return 0;
1554}
1555
1556static struct sort_dimension page_hit_sort_dimension = {
1557 .name = "hit",
1558 .cmp = page_hit_cmp,
1559};
1560
1561static int page_bytes_cmp(void *a, void *b)
1562{
1563 struct page_stat *l = a;
1564 struct page_stat *r = b;
1565
1566 if (l->alloc_bytes < r->alloc_bytes)
1567 return -1;
1568 else if (l->alloc_bytes > r->alloc_bytes)
1569 return 1;
1570 return 0;
1571}
1572
1573static struct sort_dimension page_bytes_sort_dimension = {
1574 .name = "bytes",
1575 .cmp = page_bytes_cmp,
1576};
1577
1578static int page_order_cmp(void *a, void *b)
1579{
1580 struct page_stat *l = a;
1581 struct page_stat *r = b;
1582
1583 if (l->order < r->order)
1584 return -1;
1585 else if (l->order > r->order)
1586 return 1;
1587 return 0;
1588}
1589
1590static struct sort_dimension page_order_sort_dimension = {
1591 .name = "order",
1592 .cmp = page_order_cmp,
1593};
1594
1595static int migrate_type_cmp(void *a, void *b)
1596{
1597 struct page_stat *l = a;
1598 struct page_stat *r = b;
1599
1600 /* for internal use to find free'd page */
1601 if (l->migrate_type == -1U)
1602 return 0;
1603
1604 if (l->migrate_type < r->migrate_type)
1605 return -1;
1606 else if (l->migrate_type > r->migrate_type)
1607 return 1;
1608 return 0;
1609}
1610
1611static struct sort_dimension migrate_type_sort_dimension = {
1612 .name = "migtype",
1613 .cmp = migrate_type_cmp,
1614};
1615
1616static int gfp_flags_cmp(void *a, void *b)
1617{
1618 struct page_stat *l = a;
1619 struct page_stat *r = b;
1620
1621 /* for internal use to find free'd page */
1622 if (l->gfp_flags == -1U)
1623 return 0;
1624
1625 if (l->gfp_flags < r->gfp_flags)
1626 return -1;
1627 else if (l->gfp_flags > r->gfp_flags)
1628 return 1;
1629 return 0;
1630}
1631
1632static struct sort_dimension gfp_flags_sort_dimension = {
1633 .name = "gfp",
1634 .cmp = gfp_flags_cmp,
1635};
1636
1637static struct sort_dimension *slab_sorts[] = {
1638 &ptr_sort_dimension,
1639 &callsite_sort_dimension,
1640 &hit_sort_dimension,
1641 &bytes_sort_dimension,
1642 &frag_sort_dimension,
1643 &pingpong_sort_dimension,
1644};
1645
1646static struct sort_dimension *page_sorts[] = {
1647 &page_sort_dimension,
1648 &page_callsite_sort_dimension,
1649 &page_hit_sort_dimension,
1650 &page_bytes_sort_dimension,
1651 &page_order_sort_dimension,
1652 &migrate_type_sort_dimension,
1653 &gfp_flags_sort_dimension,
1654};
1655
1656static int slab_sort_dimension__add(const char *tok, struct list_head *list)
1657{
1658 struct sort_dimension *sort;
1659 int i;
1660
1661 for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
1662 if (!strcmp(slab_sorts[i]->name, tok)) {
1663 sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
1664 if (!sort) {
1665 pr_err("%s: memdup failed\n", __func__);
1666 return -1;
1667 }
1668 list_add_tail(&sort->list, list);
1669 return 0;
1670 }
1671 }
1672
1673 return -1;
1674}
1675
1676static int page_sort_dimension__add(const char *tok, struct list_head *list)
1677{
1678 struct sort_dimension *sort;
1679 int i;
1680
1681 for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
1682 if (!strcmp(page_sorts[i]->name, tok)) {
1683 sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
1684 if (!sort) {
1685 pr_err("%s: memdup failed\n", __func__);
1686 return -1;
1687 }
1688 list_add_tail(&sort->list, list);
1689 return 0;
1690 }
1691 }
1692
1693 return -1;
1694}
1695
1696static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
1697{
1698 char *tok;
1699 char *str = strdup(arg);
1700 char *pos = str;
1701
1702 if (!str) {
1703 pr_err("%s: strdup failed\n", __func__);
1704 return -1;
1705 }
1706
1707 while (true) {
1708 tok = strsep(&pos, ",");
1709 if (!tok)
1710 break;
1711 if (slab_sort_dimension__add(tok, sort_list) < 0) {
1712 error("Unknown slab --sort key: '%s'", tok);
1713 free(str);
1714 return -1;
1715 }
1716 }
1717
1718 free(str);
1719 return 0;
1720}
1721
1722static int setup_page_sorting(struct list_head *sort_list, const char *arg)
1723{
1724 char *tok;
1725 char *str = strdup(arg);
1726 char *pos = str;
1727
1728 if (!str) {
1729 pr_err("%s: strdup failed\n", __func__);
1730 return -1;
1731 }
1732
1733 while (true) {
1734 tok = strsep(&pos, ",");
1735 if (!tok)
1736 break;
1737 if (page_sort_dimension__add(tok, sort_list) < 0) {
1738 error("Unknown page --sort key: '%s'", tok);
1739 free(str);
1740 return -1;
1741 }
1742 }
1743
1744 free(str);
1745 return 0;
1746}
1747
1748static int parse_sort_opt(const struct option *opt __maybe_unused,
1749 const char *arg, int unset __maybe_unused)
1750{
1751 if (!arg)
1752 return -1;
1753
1754 if (kmem_page > kmem_slab ||
1755 (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
1756 if (caller_flag > alloc_flag)
1757 return setup_page_sorting(&page_caller_sort, arg);
1758 else
1759 return setup_page_sorting(&page_alloc_sort, arg);
1760 } else {
1761 if (caller_flag > alloc_flag)
1762 return setup_slab_sorting(&slab_caller_sort, arg);
1763 else
1764 return setup_slab_sorting(&slab_alloc_sort, arg);
1765 }
1766
1767 return 0;
1768}
1769
1770static int parse_caller_opt(const struct option *opt __maybe_unused,
1771 const char *arg __maybe_unused,
1772 int unset __maybe_unused)
1773{
1774 caller_flag = (alloc_flag + 1);
1775 return 0;
1776}
1777
1778static int parse_alloc_opt(const struct option *opt __maybe_unused,
1779 const char *arg __maybe_unused,
1780 int unset __maybe_unused)
1781{
1782 alloc_flag = (caller_flag + 1);
1783 return 0;
1784}
1785
1786static int parse_slab_opt(const struct option *opt __maybe_unused,
1787 const char *arg __maybe_unused,
1788 int unset __maybe_unused)
1789{
1790 kmem_slab = (kmem_page + 1);
1791 return 0;
1792}
1793
1794static int parse_page_opt(const struct option *opt __maybe_unused,
1795 const char *arg __maybe_unused,
1796 int unset __maybe_unused)
1797{
1798 kmem_page = (kmem_slab + 1);
1799 return 0;
1800}
1801
1802static int parse_line_opt(const struct option *opt __maybe_unused,
1803 const char *arg, int unset __maybe_unused)
1804{
1805 int lines;
1806
1807 if (!arg)
1808 return -1;
1809
1810 lines = strtoul(arg, NULL, 10);
1811
1812 if (caller_flag > alloc_flag)
1813 caller_lines = lines;
1814 else
1815 alloc_lines = lines;
1816
1817 return 0;
1818}
1819
1820static int __cmd_record(int argc, const char **argv)
1821{
1822 const char * const record_args[] = {
1823 "record", "-a", "-R", "-c", "1",
1824 };
1825 const char * const slab_events[] = {
1826 "-e", "kmem:kmalloc",
1827 "-e", "kmem:kmalloc_node",
1828 "-e", "kmem:kfree",
1829 "-e", "kmem:kmem_cache_alloc",
1830 "-e", "kmem:kmem_cache_alloc_node",
1831 "-e", "kmem:kmem_cache_free",
1832 };
1833 const char * const page_events[] = {
1834 "-e", "kmem:mm_page_alloc",
1835 "-e", "kmem:mm_page_free",
1836 };
1837 unsigned int rec_argc, i, j;
1838 const char **rec_argv;
1839
1840 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1841 if (kmem_slab)
1842 rec_argc += ARRAY_SIZE(slab_events);
1843 if (kmem_page)
1844 rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
1845
1846 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1847
1848 if (rec_argv == NULL)
1849 return -ENOMEM;
1850
1851 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1852 rec_argv[i] = strdup(record_args[i]);
1853
1854 if (kmem_slab) {
1855 for (j = 0; j < ARRAY_SIZE(slab_events); j++, i++)
1856 rec_argv[i] = strdup(slab_events[j]);
1857 }
1858 if (kmem_page) {
1859 rec_argv[i++] = strdup("-g");
1860
1861 for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
1862 rec_argv[i] = strdup(page_events[j]);
1863 }
1864
1865 for (j = 1; j < (unsigned int)argc; j++, i++)
1866 rec_argv[i] = argv[j];
1867
1868 return cmd_record(i, rec_argv, NULL);
1869}
1870
1871static int kmem_config(const char *var, const char *value, void *cb __maybe_unused)
1872{
1873 if (!strcmp(var, "kmem.default")) {
1874 if (!strcmp(value, "slab"))
1875 kmem_default = KMEM_SLAB;
1876 else if (!strcmp(value, "page"))
1877 kmem_default = KMEM_PAGE;
1878 else
1879 pr_err("invalid default value ('slab' or 'page' required): %s\n",
1880 value);
1881 return 0;
1882 }
1883
1884 return 0;
1885}
1886
1887int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
1888{
1889 const char * const default_slab_sort = "frag,hit,bytes";
1890 const char * const default_page_sort = "bytes,hit";
1891 struct perf_data_file file = {
1892 .mode = PERF_DATA_MODE_READ,
1893 };
1894 const struct option kmem_options[] = {
1895 OPT_STRING('i', "input", &input_name, "file", "input file name"),
1896 OPT_INCR('v', "verbose", &verbose,
1897 "be more verbose (show symbol address, etc)"),
1898 OPT_CALLBACK_NOOPT(0, "caller", NULL, NULL,
1899 "show per-callsite statistics", parse_caller_opt),
1900 OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
1901 "show per-allocation statistics", parse_alloc_opt),
1902 OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
1903 "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
1904 "page, order, migtype, gfp", parse_sort_opt),
1905 OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
1906 OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
1907 OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
1908 OPT_CALLBACK_NOOPT(0, "slab", NULL, NULL, "Analyze slab allocator",
1909 parse_slab_opt),
1910 OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
1911 parse_page_opt),
1912 OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
1913 OPT_STRING(0, "time", &time_str, "str",
1914 "Time span of interest (start,stop)"),
1915 OPT_END()
1916 };
1917 const char *const kmem_subcommands[] = { "record", "stat", NULL };
1918 const char *kmem_usage[] = {
1919 NULL,
1920 NULL
1921 };
1922 struct perf_session *session;
1923 int ret = -1;
1924 const char errmsg[] = "No %s allocation events found. Have you run 'perf kmem record --%s'?\n";
1925
1926 perf_config(kmem_config, NULL);
1927 argc = parse_options_subcommand(argc, argv, kmem_options,
1928 kmem_subcommands, kmem_usage, 0);
1929
1930 if (!argc)
1931 usage_with_options(kmem_usage, kmem_options);
1932
1933 if (kmem_slab == 0 && kmem_page == 0) {
1934 if (kmem_default == KMEM_SLAB)
1935 kmem_slab = 1;
1936 else
1937 kmem_page = 1;
1938 }
1939
1940 if (!strncmp(argv[0], "rec", 3)) {
1941 symbol__init(NULL);
1942 return __cmd_record(argc, argv);
1943 }
1944
1945 file.path = input_name;
1946
1947 kmem_session = session = perf_session__new(&file, false, &perf_kmem);
1948 if (session == NULL)
1949 return -1;
1950
1951 if (kmem_slab) {
1952 if (!perf_evlist__find_tracepoint_by_name(session->evlist,
1953 "kmem:kmalloc")) {
1954 pr_err(errmsg, "slab", "slab");
1955 goto out_delete;
1956 }
1957 }
1958
1959 if (kmem_page) {
1960 struct perf_evsel *evsel;
1961
1962 evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
1963 "kmem:mm_page_alloc");
1964 if (evsel == NULL) {
1965 pr_err(errmsg, "page", "page");
1966 goto out_delete;
1967 }
1968
1969 kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
1970 symbol_conf.use_callchain = true;
1971 }
1972
1973 symbol__init(&session->header.env);
1974
1975 if (perf_time__parse_str(&ptime, time_str) != 0) {
1976 pr_err("Invalid time string\n");
1977 return -EINVAL;
1978 }
1979
1980 if (!strcmp(argv[0], "stat")) {
1981 setlocale(LC_ALL, "");
1982
1983 if (cpu__setup_cpunode_map())
1984 goto out_delete;
1985
1986 if (list_empty(&slab_caller_sort))
1987 setup_slab_sorting(&slab_caller_sort, default_slab_sort);
1988 if (list_empty(&slab_alloc_sort))
1989 setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
1990 if (list_empty(&page_caller_sort))
1991 setup_page_sorting(&page_caller_sort, default_page_sort);
1992 if (list_empty(&page_alloc_sort))
1993 setup_page_sorting(&page_alloc_sort, default_page_sort);
1994
1995 if (kmem_page) {
1996 setup_page_sorting(&page_alloc_sort_input,
1997 "page,order,migtype,gfp");
1998 setup_page_sorting(&page_caller_sort_input,
1999 "callsite,order,migtype,gfp");
2000 }
2001 ret = __cmd_kmem(session);
2002 } else
2003 usage_with_options(kmem_usage, kmem_options);
2004
2005out_delete:
2006 perf_session__delete(session);
2007
2008 return ret;
2009}
2010