Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * local apic based NMI watchdog for various CPUs.
  4 *
  5 * This file also handles reservation of performance counters for coordination
  6 * with other users (like oprofile).
  7 *
  8 * Note that these events normally don't tick when the CPU idles. This means
  9 * the frequency varies with CPU load.
 10 *
 11 * Original code for K7/P6 written by Keith Owens
 12 *
 13 */
 14
 15#include <linux/percpu.h>
 16#include <linux/export.h>
 17#include <linux/kernel.h>
 18#include <linux/bitops.h>
 19#include <linux/smp.h>
 20#include <asm/nmi.h>
 21#include <linux/kprobes.h>
 22
 23#include <asm/apic.h>
 24#include <asm/perf_event.h>
 25
 26/*
 27 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
 28 * offset from MSR_P4_BSU_ESCR0.
 29 *
 30 * It will be the max for all platforms (for now)
 31 */
 32#define NMI_MAX_COUNTER_BITS 66
 33
 34/*
 35 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
 36 * evtsel_nmi_owner tracks the ownership of the event selection
 37 * - different performance counters/ event selection may be reserved for
 38 *   different subsystems this reservation system just tries to coordinate
 39 *   things a little
 40 */
 41static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
 42static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
 43
 44/* converts an msr to an appropriate reservation bit */
 45static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
 46{
 47	/* returns the bit offset of the performance counter register */
 48	switch (boot_cpu_data.x86_vendor) {
 49	case X86_VENDOR_HYGON:
 50	case X86_VENDOR_AMD:
 51		if (msr >= MSR_F15H_PERF_CTR)
 52			return (msr - MSR_F15H_PERF_CTR) >> 1;
 53		return msr - MSR_K7_PERFCTR0;
 54	case X86_VENDOR_INTEL:
 55		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
 56			return msr - MSR_ARCH_PERFMON_PERFCTR0;
 57
 58		switch (boot_cpu_data.x86) {
 59		case 6:
 60			return msr - MSR_P6_PERFCTR0;
 61		case 11:
 62			return msr - MSR_KNC_PERFCTR0;
 63		case 15:
 64			return msr - MSR_P4_BPU_PERFCTR0;
 65		}
 66	}
 67	return 0;
 68}
 69
 70/*
 71 * converts an msr to an appropriate reservation bit
 72 * returns the bit offset of the event selection register
 73 */
 74static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
 75{
 76	/* returns the bit offset of the event selection register */
 77	switch (boot_cpu_data.x86_vendor) {
 78	case X86_VENDOR_HYGON:
 79	case X86_VENDOR_AMD:
 80		if (msr >= MSR_F15H_PERF_CTL)
 81			return (msr - MSR_F15H_PERF_CTL) >> 1;
 82		return msr - MSR_K7_EVNTSEL0;
 83	case X86_VENDOR_INTEL:
 84		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
 85			return msr - MSR_ARCH_PERFMON_EVENTSEL0;
 86
 87		switch (boot_cpu_data.x86) {
 88		case 6:
 89			return msr - MSR_P6_EVNTSEL0;
 90		case 11:
 91			return msr - MSR_KNC_EVNTSEL0;
 92		case 15:
 93			return msr - MSR_P4_BSU_ESCR0;
 94		}
 95	}
 96	return 0;
 97
 98}
 99
100/* checks for a bit availability (hack for oprofile) */
101int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
102{
103	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
104
105	return !test_bit(counter, perfctr_nmi_owner);
106}
107EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
108
109int reserve_perfctr_nmi(unsigned int msr)
110{
111	unsigned int counter;
112
113	counter = nmi_perfctr_msr_to_bit(msr);
114	/* register not managed by the allocator? */
115	if (counter > NMI_MAX_COUNTER_BITS)
116		return 1;
117
118	if (!test_and_set_bit(counter, perfctr_nmi_owner))
119		return 1;
120	return 0;
121}
122EXPORT_SYMBOL(reserve_perfctr_nmi);
123
124void release_perfctr_nmi(unsigned int msr)
125{
126	unsigned int counter;
127
128	counter = nmi_perfctr_msr_to_bit(msr);
129	/* register not managed by the allocator? */
130	if (counter > NMI_MAX_COUNTER_BITS)
131		return;
132
133	clear_bit(counter, perfctr_nmi_owner);
134}
135EXPORT_SYMBOL(release_perfctr_nmi);
136
137int reserve_evntsel_nmi(unsigned int msr)
138{
139	unsigned int counter;
140
141	counter = nmi_evntsel_msr_to_bit(msr);
142	/* register not managed by the allocator? */
143	if (counter > NMI_MAX_COUNTER_BITS)
144		return 1;
145
146	if (!test_and_set_bit(counter, evntsel_nmi_owner))
147		return 1;
148	return 0;
149}
150EXPORT_SYMBOL(reserve_evntsel_nmi);
151
152void release_evntsel_nmi(unsigned int msr)
153{
154	unsigned int counter;
155
156	counter = nmi_evntsel_msr_to_bit(msr);
157	/* register not managed by the allocator? */
158	if (counter > NMI_MAX_COUNTER_BITS)
159		return;
160
161	clear_bit(counter, evntsel_nmi_owner);
162}
163EXPORT_SYMBOL(release_evntsel_nmi);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * local apic based NMI watchdog for various CPUs.
  4 *
  5 * This file also handles reservation of performance counters for coordination
  6 * with other users (like oprofile).
  7 *
  8 * Note that these events normally don't tick when the CPU idles. This means
  9 * the frequency varies with CPU load.
 10 *
 11 * Original code for K7/P6 written by Keith Owens
 12 *
 13 */
 14
 15#include <linux/percpu.h>
 16#include <linux/export.h>
 17#include <linux/kernel.h>
 18#include <linux/bitops.h>
 19#include <linux/smp.h>
 20#include <asm/nmi.h>
 21#include <linux/kprobes.h>
 22
 23#include <asm/apic.h>
 24#include <asm/perf_event.h>
 25
 26/*
 27 * this number is calculated from Intel's MSR_P4_CRU_ESCR5 register and it's
 28 * offset from MSR_P4_BSU_ESCR0.
 29 *
 30 * It will be the max for all platforms (for now)
 31 */
 32#define NMI_MAX_COUNTER_BITS 66
 33
 34/*
 35 * perfctr_nmi_owner tracks the ownership of the perfctr registers:
 36 * evtsel_nmi_owner tracks the ownership of the event selection
 37 * - different performance counters/ event selection may be reserved for
 38 *   different subsystems this reservation system just tries to coordinate
 39 *   things a little
 40 */
 41static DECLARE_BITMAP(perfctr_nmi_owner, NMI_MAX_COUNTER_BITS);
 42static DECLARE_BITMAP(evntsel_nmi_owner, NMI_MAX_COUNTER_BITS);
 43
 44/* converts an msr to an appropriate reservation bit */
 45static inline unsigned int nmi_perfctr_msr_to_bit(unsigned int msr)
 46{
 47	/* returns the bit offset of the performance counter register */
 48	switch (boot_cpu_data.x86_vendor) {
 
 49	case X86_VENDOR_AMD:
 50		if (msr >= MSR_F15H_PERF_CTR)
 51			return (msr - MSR_F15H_PERF_CTR) >> 1;
 52		return msr - MSR_K7_PERFCTR0;
 53	case X86_VENDOR_INTEL:
 54		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
 55			return msr - MSR_ARCH_PERFMON_PERFCTR0;
 56
 57		switch (boot_cpu_data.x86) {
 58		case 6:
 59			return msr - MSR_P6_PERFCTR0;
 60		case 11:
 61			return msr - MSR_KNC_PERFCTR0;
 62		case 15:
 63			return msr - MSR_P4_BPU_PERFCTR0;
 64		}
 65	}
 66	return 0;
 67}
 68
 69/*
 70 * converts an msr to an appropriate reservation bit
 71 * returns the bit offset of the event selection register
 72 */
 73static inline unsigned int nmi_evntsel_msr_to_bit(unsigned int msr)
 74{
 75	/* returns the bit offset of the event selection register */
 76	switch (boot_cpu_data.x86_vendor) {
 
 77	case X86_VENDOR_AMD:
 78		if (msr >= MSR_F15H_PERF_CTL)
 79			return (msr - MSR_F15H_PERF_CTL) >> 1;
 80		return msr - MSR_K7_EVNTSEL0;
 81	case X86_VENDOR_INTEL:
 82		if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
 83			return msr - MSR_ARCH_PERFMON_EVENTSEL0;
 84
 85		switch (boot_cpu_data.x86) {
 86		case 6:
 87			return msr - MSR_P6_EVNTSEL0;
 88		case 11:
 89			return msr - MSR_KNC_EVNTSEL0;
 90		case 15:
 91			return msr - MSR_P4_BSU_ESCR0;
 92		}
 93	}
 94	return 0;
 95
 96}
 97
 98/* checks for a bit availability (hack for oprofile) */
 99int avail_to_resrv_perfctr_nmi_bit(unsigned int counter)
100{
101	BUG_ON(counter > NMI_MAX_COUNTER_BITS);
102
103	return !test_bit(counter, perfctr_nmi_owner);
104}
105EXPORT_SYMBOL(avail_to_resrv_perfctr_nmi_bit);
106
107int reserve_perfctr_nmi(unsigned int msr)
108{
109	unsigned int counter;
110
111	counter = nmi_perfctr_msr_to_bit(msr);
112	/* register not managed by the allocator? */
113	if (counter > NMI_MAX_COUNTER_BITS)
114		return 1;
115
116	if (!test_and_set_bit(counter, perfctr_nmi_owner))
117		return 1;
118	return 0;
119}
120EXPORT_SYMBOL(reserve_perfctr_nmi);
121
122void release_perfctr_nmi(unsigned int msr)
123{
124	unsigned int counter;
125
126	counter = nmi_perfctr_msr_to_bit(msr);
127	/* register not managed by the allocator? */
128	if (counter > NMI_MAX_COUNTER_BITS)
129		return;
130
131	clear_bit(counter, perfctr_nmi_owner);
132}
133EXPORT_SYMBOL(release_perfctr_nmi);
134
135int reserve_evntsel_nmi(unsigned int msr)
136{
137	unsigned int counter;
138
139	counter = nmi_evntsel_msr_to_bit(msr);
140	/* register not managed by the allocator? */
141	if (counter > NMI_MAX_COUNTER_BITS)
142		return 1;
143
144	if (!test_and_set_bit(counter, evntsel_nmi_owner))
145		return 1;
146	return 0;
147}
148EXPORT_SYMBOL(reserve_evntsel_nmi);
149
150void release_evntsel_nmi(unsigned int msr)
151{
152	unsigned int counter;
153
154	counter = nmi_evntsel_msr_to_bit(msr);
155	/* register not managed by the allocator? */
156	if (counter > NMI_MAX_COUNTER_BITS)
157		return;
158
159	clear_bit(counter, evntsel_nmi_owner);
160}
161EXPORT_SYMBOL(release_evntsel_nmi);