Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * local apic based NMI watchdog for various CPUs.
4 *
5 * This file also handles reservation of performance counters for coordination
6 * with other users.
7 *
8 * Note that these events normally don't tick when the CPU idles. This means
9 * the frequency varies with CPU load.
10 *
11 * Original code for K7/P6 written by Keith Owens
12 *
13 */
14
15#include <linux/percpu.h>
16#include <linux/export.h>
17#include <linux/kernel.h>
18#include <linux/bitops.h>
19#include <linux/smp.h>
20#include <asm/nmi.h>
21#include <linux/kprobes.h>
22
23#include <asm/apic.h>
24#include <asm/perf_event.h>
25
26/*
27 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
28 * offset from MSR_P4_BSU_ESCR0.
29 *
30 * It will be the max for all platforms (for now)
31 */
32#define NMI_MAX_COUNTER_BITS 66
33
34/*
35 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
36 * evtsel_nmi_owner tracks the ownership of the event selection
37 * - different performance counters/ event selection may be reserved for
38 * different subsystems this reservation system just tries to coordinate
39 * things a little
40 */
41static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
42static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
43
44/* converts an msr to an appropriate reservation bit */
45static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
46{
47 /* returns the bit offset of the performance counter register */
48 switch (boot_cpu_data.x86_vendor) {
49 case X86_VENDOR_HYGON:
50 case X86_VENDOR_AMD:
51 if (msr >= MSR_F15H_PERF_CTR)
52 return (msr - MSR_F15H_PERF_CTR) >> 1;
53 return msr - MSR_K7_PERFCTR0;
54 case X86_VENDOR_INTEL:
55 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
56 return msr - MSR_ARCH_PERFMON_PERFCTR0;
57
58 switch (boot_cpu_data.x86) {
59 case 6:
60 return msr - MSR_P6_PERFCTR0;
61 case 11:
62 return msr - MSR_KNC_PERFCTR0;
63 case 15:
64 return msr - MSR_P4_BPU_PERFCTR0;
65 }
66 break;
67 case X86_VENDOR_ZHAOXIN:
68 case X86_VENDOR_CENTAUR:
69 return msr - MSR_ARCH_PERFMON_PERFCTR0;
70 }
71 return 0;
72}
73
74/*
75 * converts an msr to an appropriate reservation bit
76 * returns the bit offset of the event selection register
77 */
78static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
79{
80 /* returns the bit offset of the event selection register */
81 switch (boot_cpu_data.x86_vendor) {
82 case X86_VENDOR_HYGON:
83 case X86_VENDOR_AMD:
84 if (msr >= MSR_F15H_PERF_CTL)
85 return (msr - MSR_F15H_PERF_CTL) >> 1;
86 return msr - MSR_K7_EVNTSEL0;
87 case X86_VENDOR_INTEL:
88 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
89 return msr - MSR_ARCH_PERFMON_EVENTSEL0;
90
91 switch (boot_cpu_data.x86) {
92 case 6:
93 return msr - MSR_P6_EVNTSEL0;
94 case 11:
95 return msr - MSR_KNC_EVNTSEL0;
96 case 15:
97 return msr - MSR_P4_BSU_ESCR0;
98 }
99 break;
100 case X86_VENDOR_ZHAOXIN:
101 case X86_VENDOR_CENTAUR:
102 return msr - MSR_ARCH_PERFMON_EVENTSEL0;
103 }
104 return 0;
105
106}
107
108int reserve_perfctr_nmi(unsigned int msr)
109{
110 unsigned int counter;
111
112 counter = nmi_perfctr_msr_to_bit(msr);
113 /* register not managed by the allocator? */
114 if (counter > NMI_MAX_COUNTER_BITS)
115 return 1;
116
117 if (!test_and_set_bit(counter, perfctr_nmi_owner))
118 return 1;
119 return 0;
120}
121EXPORT_SYMBOL(reserve_perfctr_nmi);
122
123void release_perfctr_nmi(unsigned int msr)
124{
125 unsigned int counter;
126
127 counter = nmi_perfctr_msr_to_bit(msr);
128 /* register not managed by the allocator? */
129 if (counter > NMI_MAX_COUNTER_BITS)
130 return;
131
132 clear_bit(counter, perfctr_nmi_owner);
133}
134EXPORT_SYMBOL(release_perfctr_nmi);
135
136int reserve_evntsel_nmi(unsigned int msr)
137{
138 unsigned int counter;
139
140 counter = nmi_evntsel_msr_to_bit(msr);
141 /* register not managed by the allocator? */
142 if (counter > NMI_MAX_COUNTER_BITS)
143 return 1;
144
145 if (!test_and_set_bit(counter, evntsel_nmi_owner))
146 return 1;
147 return 0;
148}
149EXPORT_SYMBOL(reserve_evntsel_nmi);
150
151void release_evntsel_nmi(unsigned int msr)
152{
153 unsigned int counter;
154
155 counter = nmi_evntsel_msr_to_bit(msr);
156 /* register not managed by the allocator? */
157 if (counter > NMI_MAX_COUNTER_BITS)
158 return;
159
160 clear_bit(counter, evntsel_nmi_owner);
161}
162EXPORT_SYMBOL(release_evntsel_nmi);
1/*
2 * local apic based NMI watchdog for various CPUs.
3 *
4 * This file also handles reservation of performance counters for coordination
5 * with other users (like oprofile).
6 *
7 * Note that these events normally don't tick when the CPU idles. This means
8 * the frequency varies with CPU load.
9 *
10 * Original code for K7/P6 written by Keith Owens
11 *
12 */
13
14#include <linux/percpu.h>
15#include <linux/export.h>
16#include <linux/kernel.h>
17#include <linux/bitops.h>
18#include <linux/smp.h>
19#include <asm/nmi.h>
20#include <linux/kprobes.h>
21
22#include <asm/apic.h>
23#include <asm/perf_event.h>
24
25/*
26 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
27 * offset from MSR_P4_BSU_ESCR0.
28 *
29 * It will be the max for all platforms (for now)
30 */
31#define NMI_MAX_COUNTER_BITS 66
32
33/*
34 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
35 * evtsel_nmi_owner tracks the ownership of the event selection
36 * - different performance counters/ event selection may be reserved for
37 * different subsystems this reservation system just tries to coordinate
38 * things a little
39 */
40static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
41static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
42
43/* converts an msr to an appropriate reservation bit */
44static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
45{
46 /* returns the bit offset of the performance counter register */
47 switch (boot_cpu_data.x86_vendor) {
48 case X86_VENDOR_AMD:
49 if (msr >= MSR_F15H_PERF_CTR)
50 return (msr - MSR_F15H_PERF_CTR) >> 1;
51 return msr - MSR_K7_PERFCTR0;
52 case X86_VENDOR_INTEL:
53 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
54 return msr - MSR_ARCH_PERFMON_PERFCTR0;
55
56 switch (boot_cpu_data.x86) {
57 case 6:
58 return msr - MSR_P6_PERFCTR0;
59 case 11:
60 return msr - MSR_KNC_PERFCTR0;
61 case 15:
62 return msr - MSR_P4_BPU_PERFCTR0;
63 }
64 }
65 return 0;
66}
67
68/*
69 * converts an msr to an appropriate reservation bit
70 * returns the bit offset of the event selection register
71 */
72static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
73{
74 /* returns the bit offset of the event selection register */
75 switch (boot_cpu_data.x86_vendor) {
76 case X86_VENDOR_AMD:
77 if (msr >= MSR_F15H_PERF_CTL)
78 return (msr - MSR_F15H_PERF_CTL) >> 1;
79 return msr - MSR_K7_EVNTSEL0;
80 case X86_VENDOR_INTEL:
81 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
82 return msr - MSR_ARCH_PERFMON_EVENTSEL0;
83
84 switch (boot_cpu_data.x86) {
85 case 6:
86 return msr - MSR_P6_EVNTSEL0;
87 case 11:
88 return msr - MSR_KNC_EVNTSEL0;
89 case 15:
90 return msr - MSR_P4_BSU_ESCR0;
91 }
92 }
93 return 0;
94
95}
96
97/* checks for a bit availability (hack for oprofile) */
98int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
99{
100 BUG_ON(counter > NMI_MAX_COUNTER_BITS);
101
102 return !test_bit(counter, perfctr_nmi_owner);
103}
104EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
105
106int reserve_perfctr_nmi(unsigned int msr)
107{
108 unsigned int counter;
109
110 counter = nmi_perfctr_msr_to_bit(msr);
111 /* register not managed by the allocator? */
112 if (counter > NMI_MAX_COUNTER_BITS)
113 return 1;
114
115 if (!test_and_set_bit(counter, perfctr_nmi_owner))
116 return 1;
117 return 0;
118}
119EXPORT_SYMBOL(reserve_perfctr_nmi);
120
121void release_perfctr_nmi(unsigned int msr)
122{
123 unsigned int counter;
124
125 counter = nmi_perfctr_msr_to_bit(msr);
126 /* register not managed by the allocator? */
127 if (counter > NMI_MAX_COUNTER_BITS)
128 return;
129
130 clear_bit(counter, perfctr_nmi_owner);
131}
132EXPORT_SYMBOL(release_perfctr_nmi);
133
134int reserve_evntsel_nmi(unsigned int msr)
135{
136 unsigned int counter;
137
138 counter = nmi_evntsel_msr_to_bit(msr);
139 /* register not managed by the allocator? */
140 if (counter > NMI_MAX_COUNTER_BITS)
141 return 1;
142
143 if (!test_and_set_bit(counter, evntsel_nmi_owner))
144 return 1;
145 return 0;
146}
147EXPORT_SYMBOL(reserve_evntsel_nmi);
148
149void release_evntsel_nmi(unsigned int msr)
150{
151 unsigned int counter;
152
153 counter = nmi_evntsel_msr_to_bit(msr);
154 /* register not managed by the allocator? */
155 if (counter > NMI_MAX_COUNTER_BITS)
156 return;
157
158 clear_bit(counter, evntsel_nmi_owner);
159}
160EXPORT_SYMBOL(release_evntsel_nmi);