Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2018 SiFive
4 * Copyright (C) 2018 Andes Technology Corporation
5 *
6 */
7
8#ifndef _ASM_RISCV_PERF_EVENT_H
9#define _ASM_RISCV_PERF_EVENT_H
10
11#include <linux/perf_event.h>
12#include <linux/ptrace.h>
13#include <linux/interrupt.h>
14
15#ifdef CONFIG_RISCV_BASE_PMU
16#define RISCV_BASE_COUNTERS 2
17
18/*
19 * The RISCV_MAX_COUNTERS parameter should be specified.
20 */
21
22#define RISCV_MAX_COUNTERS 2
23
24/*
25 * These are the indexes of bits in counteren register *minus* 1,
26 * except for cycle. It would be coherent if it can directly mapped
27 * to counteren bit definition, but there is a *time* register at
28 * counteren[1]. Per-cpu structure is scarce resource here.
29 *
30 * According to the spec, an implementation can support counter up to
31 * mhpmcounter31, but many high-end processors has at most 6 general
32 * PMCs, we give the definition to MHPMCOUNTER8 here.
33 */
34#define RISCV_PMU_CYCLE 0
35#define RISCV_PMU_INSTRET 1
36#define RISCV_PMU_MHPMCOUNTER3 2
37#define RISCV_PMU_MHPMCOUNTER4 3
38#define RISCV_PMU_MHPMCOUNTER5 4
39#define RISCV_PMU_MHPMCOUNTER6 5
40#define RISCV_PMU_MHPMCOUNTER7 6
41#define RISCV_PMU_MHPMCOUNTER8 7
42
43#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
44
45struct cpu_hw_events {
46 /* # currently enabled events*/
47 int n_events;
48 /* currently enabled events */
49 struct perf_event *events[RISCV_MAX_COUNTERS];
50 /* vendor-defined PMU data */
51 void *platform;
52};
53
54struct riscv_pmu {
55 struct pmu *pmu;
56
57 /* generic hw/cache events table */
58 const int *hw_events;
59 const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
60 [PERF_COUNT_HW_CACHE_OP_MAX]
61 [PERF_COUNT_HW_CACHE_RESULT_MAX];
62 /* method used to map hw/cache events */
63 int (*map_hw_event)(u64 config);
64 int (*map_cache_event)(u64 config);
65
66 /* max generic hw events in map */
67 int max_events;
68 /* number total counters, 2(base) + x(general) */
69 int num_counters;
70 /* the width of the counter */
71 int counter_width;
72
73 /* vendor-defined PMU features */
74 void *platform;
75
76 irqreturn_t (*handle_irq)(int irq_num, void *dev);
77 int irq;
78};
79
80#endif
81#ifdef CONFIG_PERF_EVENTS
82#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
83#endif
84
85#endif /* _ASM_RISCV_PERF_EVENT_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2018 SiFive
4 * Copyright (C) 2018 Andes Technology Corporation
5 *
6 */
7
8#ifndef _ASM_RISCV_PERF_EVENT_H
9#define _ASM_RISCV_PERF_EVENT_H
10
11#include <linux/perf_event.h>
12#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
13
14#define perf_arch_fetch_caller_regs(regs, __ip) { \
15 (regs)->epc = (__ip); \
16 (regs)->s0 = (unsigned long) __builtin_frame_address(0); \
17 (regs)->sp = current_stack_pointer; \
18 (regs)->status = SR_PP; \
19}
20#endif /* _ASM_RISCV_PERF_EVENT_H */