Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * local apic based NMI watchdog for various CPUs.
4 *
5 * This file also handles reservation of performance counters for coordination
6 * with other users (like oprofile).
7 *
8 * Note that these events normally don't tick when the CPU idles. This means
9 * the frequency varies with CPU load.
10 *
11 * Original code for K7/P6 written by Keith Owens
12 *
13 */
14
15#include <linux/percpu.h>
16#include <linux/export.h>
17#include <linux/kernel.h>
18#include <linux/bitops.h>
19#include <linux/smp.h>
20#include <asm/nmi.h>
21#include <linux/kprobes.h>
22
23#include <asm/apic.h>
24#include <asm/perf_event.h>
25
26/*
27 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
28 * offset from MSR_P4_BSU_ESCR0.
29 *
30 * It will be the max for all platforms (for now)
31 */
32#define NMI_MAX_COUNTER_BITS 66
33
34/*
35 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
36 * evtsel_nmi_owner tracks the ownership of the event selection
37 * - different performance counters/ event selection may be reserved for
38 * different subsystems this reservation system just tries to coordinate
39 * things a little
40 */
41static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
42static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
43
44/* converts an msr to an appropriate reservation bit */
45static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
46{
47 /* returns the bit offset of the performance counter register */
48 switch (boot_cpu_data.x86_vendor) {
49 case X86_VENDOR_AMD:
50 if (msr >= MSR_F15H_PERF_CTR)
51 return (msr - MSR_F15H_PERF_CTR) >> 1;
52 return msr - MSR_K7_PERFCTR0;
53 case X86_VENDOR_INTEL:
54 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
55 return msr - MSR_ARCH_PERFMON_PERFCTR0;
56
57 switch (boot_cpu_data.x86) {
58 case 6:
59 return msr - MSR_P6_PERFCTR0;
60 case 11:
61 return msr - MSR_KNC_PERFCTR0;
62 case 15:
63 return msr - MSR_P4_BPU_PERFCTR0;
64 }
65 }
66 return 0;
67}
68
69/*
70 * converts an msr to an appropriate reservation bit
71 * returns the bit offset of the event selection register
72 */
73static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
74{
75 /* returns the bit offset of the event selection register */
76 switch (boot_cpu_data.x86_vendor) {
77 case X86_VENDOR_AMD:
78 if (msr >= MSR_F15H_PERF_CTL)
79 return (msr - MSR_F15H_PERF_CTL) >> 1;
80 return msr - MSR_K7_EVNTSEL0;
81 case X86_VENDOR_INTEL:
82 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
83 return msr - MSR_ARCH_PERFMON_EVENTSEL0;
84
85 switch (boot_cpu_data.x86) {
86 case 6:
87 return msr - MSR_P6_EVNTSEL0;
88 case 11:
89 return msr - MSR_KNC_EVNTSEL0;
90 case 15:
91 return msr - MSR_P4_BSU_ESCR0;
92 }
93 }
94 return 0;
95
96}
97
98/* checks for a bit availability (hack for oprofile) */
99int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
100{
101 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
102
103 return !test_bit(counter, perfctr_nmi_owner);
104}
105EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
106
107int reserve_perfctr_nmi(unsigned int msr)
108{
109 unsigned int counter;
110
111 counter = nmi_perfctr_msr_to_bit(msr);
112 /* register not managed by the allocator? */
113 if (counter > NMI_MAX_COUNTER_BITS)
114 return 1;
115
116 if (!test_and_set_bit(counter, perfctr_nmi_owner))
117 return 1;
118 return 0;
119}
120EXPORT_SYMBOL(reserve_perfctr_nmi);
121
122void release_perfctr_nmi(unsigned int msr)
123{
124 unsigned int counter;
125
126 counter = nmi_perfctr_msr_to_bit(msr);
127 /* register not managed by the allocator? */
128 if (counter > NMI_MAX_COUNTER_BITS)
129 return;
130
131 clear_bit(counter, perfctr_nmi_owner);
132}
133EXPORT_SYMBOL(release_perfctr_nmi);
134
135int reserve_evntsel_nmi(unsigned int msr)
136{
137 unsigned int counter;
138
139 counter = nmi_evntsel_msr_to_bit(msr);
140 /* register not managed by the allocator? */
141 if (counter > NMI_MAX_COUNTER_BITS)
142 return 1;
143
144 if (!test_and_set_bit(counter, evntsel_nmi_owner))
145 return 1;
146 return 0;
147}
148EXPORT_SYMBOL(reserve_evntsel_nmi);
149
150void release_evntsel_nmi(unsigned int msr)
151{
152 unsigned int counter;
153
154 counter = nmi_evntsel_msr_to_bit(msr);
155 /* register not managed by the allocator? */
156 if (counter > NMI_MAX_COUNTER_BITS)
157 return;
158
159 clear_bit(counter, evntsel_nmi_owner);
160}
161EXPORT_SYMBOL(release_evntsel_nmi);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * local apic based NMI watchdog for various CPUs.
4 *
5 * This file also handles reservation of performance counters for coordination
6 * with other users (like oprofile).
7 *
8 * Note that these events normally don't tick when the CPU idles. This means
9 * the frequency varies with CPU load.
10 *
11 * Original code for K7/P6 written by Keith Owens
12 *
13 */
14
15#include <linux/percpu.h>
16#include <linux/export.h>
17#include <linux/kernel.h>
18#include <linux/bitops.h>
19#include <linux/smp.h>
20#include <asm/nmi.h>
21#include <linux/kprobes.h>
22
23#include <asm/apic.h>
24#include <asm/perf_event.h>
25
26/*
27 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
28 * offset from MSR_P4_BSU_ESCR0.
29 *
30 * It will be the max for all platforms (for now)
31 */
32#define NMI_MAX_COUNTER_BITS 66
33
34/*
35 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
36 * evtsel_nmi_owner tracks the ownership of the event selection
37 * - different performance counters/ event selection may be reserved for
38 * different subsystems this reservation system just tries to coordinate
39 * things a little
40 */
41static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
42static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
43
44/* converts an msr to an appropriate reservation bit */
45static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
46{
47 /* returns the bit offset of the performance counter register */
48 switch (boot_cpu_data.x86_vendor) {
49 case X86_VENDOR_HYGON:
50 case X86_VENDOR_AMD:
51 if (msr >= MSR_F15H_PERF_CTR)
52 return (msr - MSR_F15H_PERF_CTR) >> 1;
53 return msr - MSR_K7_PERFCTR0;
54 case X86_VENDOR_INTEL:
55 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
56 return msr - MSR_ARCH_PERFMON_PERFCTR0;
57
58 switch (boot_cpu_data.x86) {
59 case 6:
60 return msr - MSR_P6_PERFCTR0;
61 case 11:
62 return msr - MSR_KNC_PERFCTR0;
63 case 15:
64 return msr - MSR_P4_BPU_PERFCTR0;
65 }
66 fallthrough;
67 case X86_VENDOR_ZHAOXIN:
68 case X86_VENDOR_CENTAUR:
69 return msr - MSR_ARCH_PERFMON_PERFCTR0;
70 }
71 return 0;
72}
73
74/*
75 * converts an msr to an appropriate reservation bit
76 * returns the bit offset of the event selection register
77 */
78static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
79{
80 /* returns the bit offset of the event selection register */
81 switch (boot_cpu_data.x86_vendor) {
82 case X86_VENDOR_HYGON:
83 case X86_VENDOR_AMD:
84 if (msr >= MSR_F15H_PERF_CTL)
85 return (msr - MSR_F15H_PERF_CTL) >> 1;
86 return msr - MSR_K7_EVNTSEL0;
87 case X86_VENDOR_INTEL:
88 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
89 return msr - MSR_ARCH_PERFMON_EVENTSEL0;
90
91 switch (boot_cpu_data.x86) {
92 case 6:
93 return msr - MSR_P6_EVNTSEL0;
94 case 11:
95 return msr - MSR_KNC_EVNTSEL0;
96 case 15:
97 return msr - MSR_P4_BSU_ESCR0;
98 }
99 fallthrough;
100 case X86_VENDOR_ZHAOXIN:
101 case X86_VENDOR_CENTAUR:
102 return msr - MSR_ARCH_PERFMON_EVENTSEL0;
103 }
104 return 0;
105
106}
107
108/* checks for a bit availability (hack for oprofile) */
109int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
110{
111 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
112
113 return !test_bit(counter, perfctr_nmi_owner);
114}
115EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
116
117int reserve_perfctr_nmi(unsigned int msr)
118{
119 unsigned int counter;
120
121 counter = nmi_perfctr_msr_to_bit(msr);
122 /* register not managed by the allocator? */
123 if (counter > NMI_MAX_COUNTER_BITS)
124 return 1;
125
126 if (!test_and_set_bit(counter, perfctr_nmi_owner))
127 return 1;
128 return 0;
129}
130EXPORT_SYMBOL(reserve_perfctr_nmi);
131
132void release_perfctr_nmi(unsigned int msr)
133{
134 unsigned int counter;
135
136 counter = nmi_perfctr_msr_to_bit(msr);
137 /* register not managed by the allocator? */
138 if (counter > NMI_MAX_COUNTER_BITS)
139 return;
140
141 clear_bit(counter, perfctr_nmi_owner);
142}
143EXPORT_SYMBOL(release_perfctr_nmi);
144
145int reserve_evntsel_nmi(unsigned int msr)
146{
147 unsigned int counter;
148
149 counter = nmi_evntsel_msr_to_bit(msr);
150 /* register not managed by the allocator? */
151 if (counter > NMI_MAX_COUNTER_BITS)
152 return 1;
153
154 if (!test_and_set_bit(counter, evntsel_nmi_owner))
155 return 1;
156 return 0;
157}
158EXPORT_SYMBOL(reserve_evntsel_nmi);
159
160void release_evntsel_nmi(unsigned int msr)
161{
162 unsigned int counter;
163
164 counter = nmi_evntsel_msr_to_bit(msr);
165 /* register not managed by the allocator? */
166 if (counter > NMI_MAX_COUNTER_BITS)
167 return;
168
169 clear_bit(counter, evntsel_nmi_owner);
170}
171EXPORT_SYMBOL(release_evntsel_nmi);