Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Performance counter support for POWER8 processors.
4 *
5 * Copyright 2009 Paul Mackerras, IBM Corporation.
6 * Copyright 2013 Michael Ellerman, IBM Corporation.
7 */
8
9#define pr_fmt(fmt) "power8-pmu: " fmt
10
11#include "isa207-common.h"
12
13/*
14 * Some power8 event codes.
15 */
16#define EVENT(_name, _code) _name = _code,
17
18enum {
19#include "power8-events-list.h"
20};
21
22#undef EVENT
23
24/* MMCRA IFM bits - POWER8 */
25#define POWER8_MMCRA_IFM1 0x0000000040000000UL
26#define POWER8_MMCRA_IFM2 0x0000000080000000UL
27#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
28#define POWER8_MMCRA_BHRB_MASK 0x00000000C0000000UL
29
30/*
31 * Raw event encoding for PowerISA v2.07 (Power8):
32 *
33 * 60 56 52 48 44 40 36 32
34 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
35 * | | [ ] [ thresh_cmp ] [ thresh_ctl ]
36 * | | | |
37 * | | *- IFM (Linux) thresh start/stop OR FAB match -*
38 * | *- BHRB (Linux)
39 * *- EBB (Linux)
40 *
41 * 28 24 20 16 12 8 4 0
42 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
43 * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
44 * | | | | |
45 * | | | | *- mark
46 * | | *- L1/L2/L3 cache_sel |
47 * | | |
48 * | *- sampling mode for marked events *- combine
49 * |
50 * *- thresh_sel
51 *
52 * Below uses IBM bit numbering.
53 *
54 * MMCR1[x:y] = unit (PMCxUNIT)
55 * MMCR1[x] = combine (PMCxCOMB)
56 *
57 * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
58 * # PM_MRK_FAB_RSP_MATCH
59 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
60 * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
61 * # PM_MRK_FAB_RSP_MATCH_CYC
62 * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
63 * else
64 * MMCRA[48:55] = thresh_ctl (THRESH START/END)
65 *
66 * if thresh_sel:
67 * MMCRA[45:47] = thresh_sel
68 *
69 * if thresh_cmp:
70 * MMCRA[22:24] = thresh_cmp[0:2]
71 * MMCRA[25:31] = thresh_cmp[3:9]
72 *
73 * if unit == 6 or unit == 7
74 * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
75 * else if unit == 8 or unit == 9:
76 * if cache_sel[0] == 0: # L3 bank
77 * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
78 * else if cache_sel[0] == 1:
79 * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
80 * else if cache_sel[1]: # L1 event
81 * MMCR1[16] = cache_sel[2]
82 * MMCR1[17] = cache_sel[3]
83 *
84 * if mark:
85 * MMCRA[63] = 1 (SAMPLE_ENABLE)
86 * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
87 * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
88 *
89 * if EBB and BHRB:
90 * MMCRA[32:33] = IFM
91 *
92 */
93
94/* PowerISA v2.07 format attribute structure*/
95extern const struct attribute_group isa207_pmu_format_group;
96
97/* Table of alternatives, sorted by column 0 */
98static const unsigned int event_alternatives[][MAX_ALT] = {
99 { PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT },
100 { PM_BR_MRK_2PATH, PM_BR_MRK_2PATH_ALT },
101 { PM_L3_CO_MEPF, PM_L3_CO_MEPF_ALT },
102 { PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L2MISS_ALT },
103 { PM_CMPLU_STALL_ALT, PM_CMPLU_STALL },
104 { PM_BR_2PATH, PM_BR_2PATH_ALT },
105 { PM_INST_DISP, PM_INST_DISP_ALT },
106 { PM_RUN_CYC_ALT, PM_RUN_CYC },
107 { PM_MRK_FILT_MATCH, PM_MRK_FILT_MATCH_ALT },
108 { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
109 { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
110};
111
112static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
113{
114 int num_alt = 0;
115
116 num_alt = isa207_get_alternatives(event, alt,
117 ARRAY_SIZE(event_alternatives), flags,
118 event_alternatives);
119
120 return num_alt;
121}
122
123GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
124GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
125GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
126GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
127GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
128GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
129GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
130GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
131GENERIC_EVENT_ATTR(mem_access, MEM_ACCESS);
132
133CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
134CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
135
136CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
137CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
138CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
139CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
140CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
141
142CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
143CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
144CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
145CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
146CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
147
148CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
149CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN);
150CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
151CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
152
153static struct attribute *power8_events_attr[] = {
154 GENERIC_EVENT_PTR(PM_CYC),
155 GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
156 GENERIC_EVENT_PTR(PM_CMPLU_STALL),
157 GENERIC_EVENT_PTR(PM_INST_CMPL),
158 GENERIC_EVENT_PTR(PM_BRU_FIN),
159 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
160 GENERIC_EVENT_PTR(PM_LD_REF_L1),
161 GENERIC_EVENT_PTR(PM_LD_MISS_L1),
162 GENERIC_EVENT_PTR(MEM_ACCESS),
163
164 CACHE_EVENT_PTR(PM_LD_MISS_L1),
165 CACHE_EVENT_PTR(PM_LD_REF_L1),
166 CACHE_EVENT_PTR(PM_L1_PREF),
167 CACHE_EVENT_PTR(PM_ST_MISS_L1),
168 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
169 CACHE_EVENT_PTR(PM_INST_FROM_L1),
170 CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
171 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
172 CACHE_EVENT_PTR(PM_DATA_FROM_L3),
173 CACHE_EVENT_PTR(PM_L3_PREF_ALL),
174 CACHE_EVENT_PTR(PM_L2_ST_MISS),
175 CACHE_EVENT_PTR(PM_L2_ST),
176
177 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
178 CACHE_EVENT_PTR(PM_BRU_FIN),
179
180 CACHE_EVENT_PTR(PM_DTLB_MISS),
181 CACHE_EVENT_PTR(PM_ITLB_MISS),
182 NULL
183};
184
185static const struct attribute_group power8_pmu_events_group = {
186 .name = "events",
187 .attrs = power8_events_attr,
188};
189
190static struct attribute *power8_pmu_caps_attrs[] = {
191 NULL
192};
193
194static struct attribute_group power8_pmu_caps_group = {
195 .name = "caps",
196 .attrs = power8_pmu_caps_attrs,
197};
198
199static const struct attribute_group *power8_pmu_attr_groups[] = {
200 &isa207_pmu_format_group,
201 &power8_pmu_events_group,
202 &power8_pmu_caps_group,
203 NULL,
204};
205
206static int power8_generic_events[] = {
207 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
208 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
209 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
210 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
211 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
212 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
213 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
214 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
215};
216
217static u64 power8_bhrb_filter_map(u64 branch_sample_type)
218{
219 u64 pmu_bhrb_filter = 0;
220
221 /* BHRB and regular PMU events share the same privilege state
222 * filter configuration. BHRB is always recorded along with a
223 * regular PMU event. As the privilege state filter is handled
224 * in the basic PMC configuration of the accompanying regular
225 * PMU event, we ignore any separate BHRB specific request.
226 */
227
228 /* No branch filter requested */
229 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
230 return pmu_bhrb_filter;
231
232 /* Invalid branch filter options - HW does not support */
233 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
234 return -1;
235
236 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
237 return -1;
238
239 if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
240 return -1;
241
242 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
243 pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
244 return pmu_bhrb_filter;
245 }
246
247 /* Every thing else is unsupported */
248 return -1;
249}
250
251static void power8_config_bhrb(u64 pmu_bhrb_filter)
252{
253 pmu_bhrb_filter &= POWER8_MMCRA_BHRB_MASK;
254
255 /* Enable BHRB filter in PMU */
256 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
257}
258
259#define C(x) PERF_COUNT_HW_CACHE_##x
260
261/*
262 * Table of generalized cache-related events.
263 * 0 means not supported, -1 means nonsensical, other values
264 * are event codes.
265 */
266static u64 power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
267 [ C(L1D) ] = {
268 [ C(OP_READ) ] = {
269 [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
270 [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
271 },
272 [ C(OP_WRITE) ] = {
273 [ C(RESULT_ACCESS) ] = 0,
274 [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
275 },
276 [ C(OP_PREFETCH) ] = {
277 [ C(RESULT_ACCESS) ] = PM_L1_PREF,
278 [ C(RESULT_MISS) ] = 0,
279 },
280 },
281 [ C(L1I) ] = {
282 [ C(OP_READ) ] = {
283 [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
284 [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
285 },
286 [ C(OP_WRITE) ] = {
287 [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
288 [ C(RESULT_MISS) ] = -1,
289 },
290 [ C(OP_PREFETCH) ] = {
291 [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
292 [ C(RESULT_MISS) ] = 0,
293 },
294 },
295 [ C(LL) ] = {
296 [ C(OP_READ) ] = {
297 [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
298 [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
299 },
300 [ C(OP_WRITE) ] = {
301 [ C(RESULT_ACCESS) ] = PM_L2_ST,
302 [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
303 },
304 [ C(OP_PREFETCH) ] = {
305 [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
306 [ C(RESULT_MISS) ] = 0,
307 },
308 },
309 [ C(DTLB) ] = {
310 [ C(OP_READ) ] = {
311 [ C(RESULT_ACCESS) ] = 0,
312 [ C(RESULT_MISS) ] = PM_DTLB_MISS,
313 },
314 [ C(OP_WRITE) ] = {
315 [ C(RESULT_ACCESS) ] = -1,
316 [ C(RESULT_MISS) ] = -1,
317 },
318 [ C(OP_PREFETCH) ] = {
319 [ C(RESULT_ACCESS) ] = -1,
320 [ C(RESULT_MISS) ] = -1,
321 },
322 },
323 [ C(ITLB) ] = {
324 [ C(OP_READ) ] = {
325 [ C(RESULT_ACCESS) ] = 0,
326 [ C(RESULT_MISS) ] = PM_ITLB_MISS,
327 },
328 [ C(OP_WRITE) ] = {
329 [ C(RESULT_ACCESS) ] = -1,
330 [ C(RESULT_MISS) ] = -1,
331 },
332 [ C(OP_PREFETCH) ] = {
333 [ C(RESULT_ACCESS) ] = -1,
334 [ C(RESULT_MISS) ] = -1,
335 },
336 },
337 [ C(BPU) ] = {
338 [ C(OP_READ) ] = {
339 [ C(RESULT_ACCESS) ] = PM_BRU_FIN,
340 [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
341 },
342 [ C(OP_WRITE) ] = {
343 [ C(RESULT_ACCESS) ] = -1,
344 [ C(RESULT_MISS) ] = -1,
345 },
346 [ C(OP_PREFETCH) ] = {
347 [ C(RESULT_ACCESS) ] = -1,
348 [ C(RESULT_MISS) ] = -1,
349 },
350 },
351 [ C(NODE) ] = {
352 [ C(OP_READ) ] = {
353 [ C(RESULT_ACCESS) ] = -1,
354 [ C(RESULT_MISS) ] = -1,
355 },
356 [ C(OP_WRITE) ] = {
357 [ C(RESULT_ACCESS) ] = -1,
358 [ C(RESULT_MISS) ] = -1,
359 },
360 [ C(OP_PREFETCH) ] = {
361 [ C(RESULT_ACCESS) ] = -1,
362 [ C(RESULT_MISS) ] = -1,
363 },
364 },
365};
366
367#undef C
368
369static struct power_pmu power8_pmu = {
370 .name = "POWER8",
371 .n_counter = MAX_PMU_COUNTERS,
372 .max_alternatives = MAX_ALT + 1,
373 .add_fields = ISA207_ADD_FIELDS,
374 .test_adder = ISA207_TEST_ADDER,
375 .compute_mmcr = isa207_compute_mmcr,
376 .config_bhrb = power8_config_bhrb,
377 .bhrb_filter_map = power8_bhrb_filter_map,
378 .get_constraint = isa207_get_constraint,
379 .get_alternatives = power8_get_alternatives,
380 .get_mem_data_src = isa207_get_mem_data_src,
381 .get_mem_weight = isa207_get_mem_weight,
382 .disable_pmc = isa207_disable_pmc,
383 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
384 .n_generic = ARRAY_SIZE(power8_generic_events),
385 .generic_events = power8_generic_events,
386 .cache_events = &power8_cache_events,
387 .attr_groups = power8_pmu_attr_groups,
388 .bhrb_nr = 32,
389};
390
391int __init init_power8_pmu(void)
392{
393 int rc;
394 unsigned int pvr = mfspr(SPRN_PVR);
395
396 if (PVR_VER(pvr) != PVR_POWER8E && PVR_VER(pvr) != PVR_POWER8NVL &&
397 PVR_VER(pvr) != PVR_POWER8)
398 return -ENODEV;
399
400 rc = register_power_pmu(&power8_pmu);
401 if (rc)
402 return rc;
403
404 /* Tell userspace that EBB is supported */
405 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
406
407 if (cpu_has_feature(CPU_FTR_PMAO_BUG))
408 pr_info("PMAO restore workaround active.\n");
409
410 return 0;
411}
1/*
2 * Performance counter support for POWER8 processors.
3 *
4 * Copyright 2009 Paul Mackerras, IBM Corporation.
5 * Copyright 2013 Michael Ellerman, IBM Corporation.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#define pr_fmt(fmt) "power8-pmu: " fmt
14
15#include "isa207-common.h"
16
17/*
18 * Some power8 event codes.
19 */
20#define EVENT(_name, _code) _name = _code,
21
22enum {
23#include "power8-events-list.h"
24};
25
26#undef EVENT
27
28/* MMCRA IFM bits - POWER8 */
29#define POWER8_MMCRA_IFM1 0x0000000040000000UL
30#define POWER8_MMCRA_IFM2 0x0000000080000000UL
31#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
32
33/* PowerISA v2.07 format attribute structure*/
34extern struct attribute_group isa207_pmu_format_group;
35
36/* Table of alternatives, sorted by column 0 */
37static const unsigned int event_alternatives[][MAX_ALT] = {
38 { PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT },
39 { PM_BR_MRK_2PATH, PM_BR_MRK_2PATH_ALT },
40 { PM_L3_CO_MEPF, PM_L3_CO_MEPF_ALT },
41 { PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L2MISS_ALT },
42 { PM_CMPLU_STALL_ALT, PM_CMPLU_STALL },
43 { PM_BR_2PATH, PM_BR_2PATH_ALT },
44 { PM_INST_DISP, PM_INST_DISP_ALT },
45 { PM_RUN_CYC_ALT, PM_RUN_CYC },
46 { PM_MRK_FILT_MATCH, PM_MRK_FILT_MATCH_ALT },
47 { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
48 { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
49};
50
51/*
52 * Scan the alternatives table for a match and return the
53 * index into the alternatives table if found, else -1.
54 */
55static int find_alternative(u64 event)
56{
57 int i, j;
58
59 for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
60 if (event < event_alternatives[i][0])
61 break;
62
63 for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
64 if (event == event_alternatives[i][j])
65 return i;
66 }
67
68 return -1;
69}
70
71static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
72{
73 int i, j, num_alt = 0;
74 u64 alt_event;
75
76 alt[num_alt++] = event;
77
78 i = find_alternative(event);
79 if (i >= 0) {
80 /* Filter out the original event, it's already in alt[0] */
81 for (j = 0; j < MAX_ALT; ++j) {
82 alt_event = event_alternatives[i][j];
83 if (alt_event && alt_event != event)
84 alt[num_alt++] = alt_event;
85 }
86 }
87
88 if (flags & PPMU_ONLY_COUNT_RUN) {
89 /*
90 * We're only counting in RUN state, so PM_CYC is equivalent to
91 * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
92 */
93 j = num_alt;
94 for (i = 0; i < num_alt; ++i) {
95 switch (alt[i]) {
96 case PM_CYC:
97 alt[j++] = PM_RUN_CYC;
98 break;
99 case PM_RUN_CYC:
100 alt[j++] = PM_CYC;
101 break;
102 case PM_INST_CMPL:
103 alt[j++] = PM_RUN_INST_CMPL;
104 break;
105 case PM_RUN_INST_CMPL:
106 alt[j++] = PM_INST_CMPL;
107 break;
108 }
109 }
110 num_alt = j;
111 }
112
113 return num_alt;
114}
115
116GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
117GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
118GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
119GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
120GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
121GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
122GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
123GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
124
125CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
126CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
127
128CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
129CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
130CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
131CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
132CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
133
134CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
135CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
136CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
137CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
138CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
139
140CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
141CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN);
142CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
143CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
144
145static struct attribute *power8_events_attr[] = {
146 GENERIC_EVENT_PTR(PM_CYC),
147 GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
148 GENERIC_EVENT_PTR(PM_CMPLU_STALL),
149 GENERIC_EVENT_PTR(PM_INST_CMPL),
150 GENERIC_EVENT_PTR(PM_BRU_FIN),
151 GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
152 GENERIC_EVENT_PTR(PM_LD_REF_L1),
153 GENERIC_EVENT_PTR(PM_LD_MISS_L1),
154
155 CACHE_EVENT_PTR(PM_LD_MISS_L1),
156 CACHE_EVENT_PTR(PM_LD_REF_L1),
157 CACHE_EVENT_PTR(PM_L1_PREF),
158 CACHE_EVENT_PTR(PM_ST_MISS_L1),
159 CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
160 CACHE_EVENT_PTR(PM_INST_FROM_L1),
161 CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
162 CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
163 CACHE_EVENT_PTR(PM_DATA_FROM_L3),
164 CACHE_EVENT_PTR(PM_L3_PREF_ALL),
165 CACHE_EVENT_PTR(PM_L2_ST_MISS),
166 CACHE_EVENT_PTR(PM_L2_ST),
167
168 CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
169 CACHE_EVENT_PTR(PM_BRU_FIN),
170
171 CACHE_EVENT_PTR(PM_DTLB_MISS),
172 CACHE_EVENT_PTR(PM_ITLB_MISS),
173 NULL
174};
175
176static struct attribute_group power8_pmu_events_group = {
177 .name = "events",
178 .attrs = power8_events_attr,
179};
180
181static const struct attribute_group *power8_pmu_attr_groups[] = {
182 &isa207_pmu_format_group,
183 &power8_pmu_events_group,
184 NULL,
185};
186
187static int power8_generic_events[] = {
188 [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
189 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
190 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
191 [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
192 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
193 [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
194 [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
195 [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
196};
197
198static u64 power8_bhrb_filter_map(u64 branch_sample_type)
199{
200 u64 pmu_bhrb_filter = 0;
201
202 /* BHRB and regular PMU events share the same privilege state
203 * filter configuration. BHRB is always recorded along with a
204 * regular PMU event. As the privilege state filter is handled
205 * in the basic PMC configuration of the accompanying regular
206 * PMU event, we ignore any separate BHRB specific request.
207 */
208
209 /* No branch filter requested */
210 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
211 return pmu_bhrb_filter;
212
213 /* Invalid branch filter options - HW does not support */
214 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
215 return -1;
216
217 if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
218 return -1;
219
220 if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
221 return -1;
222
223 if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
224 pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
225 return pmu_bhrb_filter;
226 }
227
228 /* Every thing else is unsupported */
229 return -1;
230}
231
232static void power8_config_bhrb(u64 pmu_bhrb_filter)
233{
234 /* Enable BHRB filter in PMU */
235 mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
236}
237
238#define C(x) PERF_COUNT_HW_CACHE_##x
239
240/*
241 * Table of generalized cache-related events.
242 * 0 means not supported, -1 means nonsensical, other values
243 * are event codes.
244 */
245static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
246 [ C(L1D) ] = {
247 [ C(OP_READ) ] = {
248 [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
249 [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
250 },
251 [ C(OP_WRITE) ] = {
252 [ C(RESULT_ACCESS) ] = 0,
253 [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
254 },
255 [ C(OP_PREFETCH) ] = {
256 [ C(RESULT_ACCESS) ] = PM_L1_PREF,
257 [ C(RESULT_MISS) ] = 0,
258 },
259 },
260 [ C(L1I) ] = {
261 [ C(OP_READ) ] = {
262 [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
263 [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
264 },
265 [ C(OP_WRITE) ] = {
266 [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
267 [ C(RESULT_MISS) ] = -1,
268 },
269 [ C(OP_PREFETCH) ] = {
270 [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
271 [ C(RESULT_MISS) ] = 0,
272 },
273 },
274 [ C(LL) ] = {
275 [ C(OP_READ) ] = {
276 [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
277 [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
278 },
279 [ C(OP_WRITE) ] = {
280 [ C(RESULT_ACCESS) ] = PM_L2_ST,
281 [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
282 },
283 [ C(OP_PREFETCH) ] = {
284 [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
285 [ C(RESULT_MISS) ] = 0,
286 },
287 },
288 [ C(DTLB) ] = {
289 [ C(OP_READ) ] = {
290 [ C(RESULT_ACCESS) ] = 0,
291 [ C(RESULT_MISS) ] = PM_DTLB_MISS,
292 },
293 [ C(OP_WRITE) ] = {
294 [ C(RESULT_ACCESS) ] = -1,
295 [ C(RESULT_MISS) ] = -1,
296 },
297 [ C(OP_PREFETCH) ] = {
298 [ C(RESULT_ACCESS) ] = -1,
299 [ C(RESULT_MISS) ] = -1,
300 },
301 },
302 [ C(ITLB) ] = {
303 [ C(OP_READ) ] = {
304 [ C(RESULT_ACCESS) ] = 0,
305 [ C(RESULT_MISS) ] = PM_ITLB_MISS,
306 },
307 [ C(OP_WRITE) ] = {
308 [ C(RESULT_ACCESS) ] = -1,
309 [ C(RESULT_MISS) ] = -1,
310 },
311 [ C(OP_PREFETCH) ] = {
312 [ C(RESULT_ACCESS) ] = -1,
313 [ C(RESULT_MISS) ] = -1,
314 },
315 },
316 [ C(BPU) ] = {
317 [ C(OP_READ) ] = {
318 [ C(RESULT_ACCESS) ] = PM_BRU_FIN,
319 [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
320 },
321 [ C(OP_WRITE) ] = {
322 [ C(RESULT_ACCESS) ] = -1,
323 [ C(RESULT_MISS) ] = -1,
324 },
325 [ C(OP_PREFETCH) ] = {
326 [ C(RESULT_ACCESS) ] = -1,
327 [ C(RESULT_MISS) ] = -1,
328 },
329 },
330 [ C(NODE) ] = {
331 [ C(OP_READ) ] = {
332 [ C(RESULT_ACCESS) ] = -1,
333 [ C(RESULT_MISS) ] = -1,
334 },
335 [ C(OP_WRITE) ] = {
336 [ C(RESULT_ACCESS) ] = -1,
337 [ C(RESULT_MISS) ] = -1,
338 },
339 [ C(OP_PREFETCH) ] = {
340 [ C(RESULT_ACCESS) ] = -1,
341 [ C(RESULT_MISS) ] = -1,
342 },
343 },
344};
345
346#undef C
347
348static struct power_pmu power8_pmu = {
349 .name = "POWER8",
350 .n_counter = MAX_PMU_COUNTERS,
351 .max_alternatives = MAX_ALT + 1,
352 .add_fields = ISA207_ADD_FIELDS,
353 .test_adder = ISA207_TEST_ADDER,
354 .compute_mmcr = isa207_compute_mmcr,
355 .config_bhrb = power8_config_bhrb,
356 .bhrb_filter_map = power8_bhrb_filter_map,
357 .get_constraint = isa207_get_constraint,
358 .get_alternatives = power8_get_alternatives,
359 .disable_pmc = isa207_disable_pmc,
360 .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
361 .n_generic = ARRAY_SIZE(power8_generic_events),
362 .generic_events = power8_generic_events,
363 .cache_events = &power8_cache_events,
364 .attr_groups = power8_pmu_attr_groups,
365 .bhrb_nr = 32,
366};
367
368static int __init init_power8_pmu(void)
369{
370 int rc;
371
372 if (!cur_cpu_spec->oprofile_cpu_type ||
373 strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
374 return -ENODEV;
375
376 rc = register_power_pmu(&power8_pmu);
377 if (rc)
378 return rc;
379
380 /* Tell userspace that EBB is supported */
381 cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
382
383 if (cpu_has_feature(CPU_FTR_PMAO_BUG))
384 pr_info("PMAO restore workaround active.\n");
385
386 return 0;
387}
388early_initcall(init_power8_pmu);