Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PERF_MEM_EVENTS_H
3#define __PERF_MEM_EVENTS_H
4
5#include <stdbool.h>
6#include <stdint.h>
7#include <stdio.h>
8#include <linux/types.h>
9#include <linux/refcount.h>
10#include <linux/perf_event.h>
11#include "stat.h"
12#include "evsel.h"
13
14struct perf_mem_event {
15 bool record;
16 bool supported;
17 bool ldlat;
18 u32 aux_event;
19 const char *tag;
20 const char *name;
21 const char *event_name;
22};
23
24struct mem_info {
25 struct addr_map_symbol iaddr;
26 struct addr_map_symbol daddr;
27 union perf_mem_data_src data_src;
28 refcount_t refcnt;
29};
30
31enum {
32 PERF_MEM_EVENTS__LOAD,
33 PERF_MEM_EVENTS__STORE,
34 PERF_MEM_EVENTS__LOAD_STORE,
35 PERF_MEM_EVENTS__MAX,
36};
37
38extern unsigned int perf_mem_events__loads_ldlat;
39extern struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX];
40
41int perf_pmu__mem_events_parse(struct perf_pmu *pmu, const char *str);
42int perf_pmu__mem_events_init(struct perf_pmu *pmu);
43
44struct perf_mem_event *perf_pmu__mem_events_ptr(struct perf_pmu *pmu, int i);
45struct perf_pmu *perf_mem_events_find_pmu(void);
46int perf_pmu__mem_events_num_mem_pmus(struct perf_pmu *pmu);
47bool is_mem_loads_aux_event(struct evsel *leader);
48
49void perf_pmu__mem_events_list(struct perf_pmu *pmu);
50int perf_mem_events__record_args(const char **rec_argv, int *argv_nr);
51
52int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
53int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
54int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
55int perf_mem__lck_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
56int perf_mem__blk_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
57
58int perf_script__meminfo_scnprintf(char *bf, size_t size, struct mem_info *mem_info);
59
60struct c2c_stats {
61 u32 nr_entries;
62
63 u32 locks; /* count of 'lock' transactions */
64 u32 store; /* count of all stores in trace */
65 u32 st_uncache; /* stores to uncacheable address */
66 u32 st_noadrs; /* cacheable store with no address */
67 u32 st_l1hit; /* count of stores that hit L1D */
68 u32 st_l1miss; /* count of stores that miss L1D */
69 u32 st_na; /* count of stores with memory level is not available */
70 u32 load; /* count of all loads in trace */
71 u32 ld_excl; /* exclusive loads, rmt/lcl DRAM - snp none/miss */
72 u32 ld_shared; /* shared loads, rmt/lcl DRAM - snp hit */
73 u32 ld_uncache; /* loads to uncacheable address */
74 u32 ld_io; /* loads to io address */
75 u32 ld_miss; /* loads miss */
76 u32 ld_noadrs; /* cacheable load with no address */
77 u32 ld_fbhit; /* count of loads hitting Fill Buffer */
78 u32 ld_l1hit; /* count of loads that hit L1D */
79 u32 ld_l2hit; /* count of loads that hit L2D */
80 u32 ld_llchit; /* count of loads that hit LLC */
81 u32 lcl_hitm; /* count of loads with local HITM */
82 u32 rmt_hitm; /* count of loads with remote HITM */
83 u32 tot_hitm; /* count of loads with local and remote HITM */
84 u32 lcl_peer; /* count of loads with local peer cache */
85 u32 rmt_peer; /* count of loads with remote peer cache */
86 u32 tot_peer; /* count of loads with local and remote peer cache */
87 u32 rmt_hit; /* count of loads with remote hit clean; */
88 u32 lcl_dram; /* count of loads miss to local DRAM */
89 u32 rmt_dram; /* count of loads miss to remote DRAM */
90 u32 blk_data; /* count of loads blocked by data */
91 u32 blk_addr; /* count of loads blocked by address conflict */
92 u32 nomap; /* count of load/stores with no phys addrs */
93 u32 noparse; /* count of unparsable data sources */
94};
95
96struct hist_entry;
97int c2c_decode_stats(struct c2c_stats *stats, struct mem_info *mi);
98void c2c_add_stats(struct c2c_stats *stats, struct c2c_stats *add);
99
100#endif /* __PERF_MEM_EVENTS_H */