Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/export.h>
  3#include <linux/preempt.h>
  4#include <linux/smp.h>
  5#include <linux/completion.h>
  6#include <asm/msr.h>
  7
  8static void __rdmsr_on_cpu(void *info)
  9{
 10	struct msr_info *rv = info;
 11	struct msr *reg;
 12	int this_cpu = raw_smp_processor_id();
 13
 14	if (rv->msrs)
 15		reg = per_cpu_ptr(rv->msrs, this_cpu);
 16	else
 17		reg = &rv->reg;
 18
 19	rdmsr(rv->msr_no, reg->l, reg->h);
 20}
 21
 22static void __wrmsr_on_cpu(void *info)
 23{
 24	struct msr_info *rv = info;
 25	struct msr *reg;
 26	int this_cpu = raw_smp_processor_id();
 27
 28	if (rv->msrs)
 29		reg = per_cpu_ptr(rv->msrs, this_cpu);
 30	else
 31		reg = &rv->reg;
 32
 33	wrmsr(rv->msr_no, reg->l, reg->h);
 34}
 35
 36int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
 37{
 38	int err;
 39	struct msr_info rv;
 40
 41	memset(&rv, 0, sizeof(rv));
 42
 43	rv.msr_no = msr_no;
 44	err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
 45	*l = rv.reg.l;
 46	*h = rv.reg.h;
 47
 48	return err;
 49}
 50EXPORT_SYMBOL(rdmsr_on_cpu);
 51
 52int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
 53{
 54	int err;
 55	struct msr_info rv;
 56
 57	memset(&rv, 0, sizeof(rv));
 58
 59	rv.msr_no = msr_no;
 60	err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
 61	*q = rv.reg.q;
 62
 63	return err;
 64}
 65EXPORT_SYMBOL(rdmsrl_on_cpu);
 66
 67int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 68{
 69	int err;
 70	struct msr_info rv;
 71
 72	memset(&rv, 0, sizeof(rv));
 73
 74	rv.msr_no = msr_no;
 75	rv.reg.l = l;
 76	rv.reg.h = h;
 77	err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
 78
 79	return err;
 80}
 81EXPORT_SYMBOL(wrmsr_on_cpu);
 82
 83int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
 84{
 85	int err;
 86	struct msr_info rv;
 87
 88	memset(&rv, 0, sizeof(rv));
 89
 90	rv.msr_no = msr_no;
 91	rv.reg.q = q;
 92
 93	err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
 94
 95	return err;
 96}
 97EXPORT_SYMBOL(wrmsrl_on_cpu);
 98
 99static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
100			    struct msr *msrs,
101			    void (*msr_func) (void *info))
102{
103	struct msr_info rv;
104	int this_cpu;
105
106	memset(&rv, 0, sizeof(rv));
107
108	rv.msrs	  = msrs;
109	rv.msr_no = msr_no;
110
111	this_cpu = get_cpu();
112
113	if (cpumask_test_cpu(this_cpu, mask))
114		msr_func(&rv);
115
116	smp_call_function_many(mask, msr_func, &rv, 1);
117	put_cpu();
118}
119
120/* rdmsr on a bunch of CPUs
121 *
122 * @mask:       which CPUs
123 * @msr_no:     which MSR
124 * @msrs:       array of MSR values
125 *
126 */
127void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
128{
129	__rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
130}
131EXPORT_SYMBOL(rdmsr_on_cpus);
132
133/*
134 * wrmsr on a bunch of CPUs
135 *
136 * @mask:       which CPUs
137 * @msr_no:     which MSR
138 * @msrs:       array of MSR values
139 *
140 */
141void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
142{
143	__rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
144}
145EXPORT_SYMBOL(wrmsr_on_cpus);
146
147struct msr_info_completion {
148	struct msr_info		msr;
149	struct completion	done;
150};
151
152/* These "safe" variants are slower and should be used when the target MSR
153   may not actually exist. */
154static void __rdmsr_safe_on_cpu(void *info)
155{
156	struct msr_info_completion *rv = info;
157
158	rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
159	complete(&rv->done);
160}
161
162static void __wrmsr_safe_on_cpu(void *info)
163{
164	struct msr_info *rv = info;
165
166	rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
167}
168
169int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
170{
171	struct msr_info_completion rv;
172	call_single_data_t csd;
173	int err;
174
175	INIT_CSD(&csd, __rdmsr_safe_on_cpu, &rv);
176
177	memset(&rv, 0, sizeof(rv));
178	init_completion(&rv.done);
179	rv.msr.msr_no = msr_no;
180
181	err = smp_call_function_single_async(cpu, &csd);
182	if (!err) {
183		wait_for_completion(&rv.done);
184		err = rv.msr.err;
185	}
186	*l = rv.msr.reg.l;
187	*h = rv.msr.reg.h;
188
189	return err;
190}
191EXPORT_SYMBOL(rdmsr_safe_on_cpu);
192
193int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
194{
195	int err;
196	struct msr_info rv;
197
198	memset(&rv, 0, sizeof(rv));
199
200	rv.msr_no = msr_no;
201	rv.reg.l = l;
202	rv.reg.h = h;
203	err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
204
205	return err ? err : rv.err;
206}
207EXPORT_SYMBOL(wrmsr_safe_on_cpu);
208
209int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
210{
211	int err;
212	struct msr_info rv;
213
214	memset(&rv, 0, sizeof(rv));
215
216	rv.msr_no = msr_no;
217	rv.reg.q = q;
218
219	err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
220
221	return err ? err : rv.err;
222}
223EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
224
225int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
226{
227	u32 low, high;
228	int err;
229
230	err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
231	*q = (u64)high << 32 | low;
232
233	return err;
234}
235EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
236
237/*
238 * These variants are significantly slower, but allows control over
239 * the entire 32-bit GPR set.
240 */
241static void __rdmsr_safe_regs_on_cpu(void *info)
242{
243	struct msr_regs_info *rv = info;
244
245	rv->err = rdmsr_safe_regs(rv->regs);
246}
247
248static void __wrmsr_safe_regs_on_cpu(void *info)
249{
250	struct msr_regs_info *rv = info;
251
252	rv->err = wrmsr_safe_regs(rv->regs);
253}
254
255int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
256{
257	int err;
258	struct msr_regs_info rv;
259
260	rv.regs   = regs;
261	rv.err    = -EIO;
262	err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
263
264	return err ? err : rv.err;
265}
266EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
267
268int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
269{
270	int err;
271	struct msr_regs_info rv;
272
273	rv.regs = regs;
274	rv.err  = -EIO;
275	err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
276
277	return err ? err : rv.err;
278}
279EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/export.h>
  3#include <linux/preempt.h>
  4#include <linux/smp.h>
  5#include <linux/completion.h>
  6#include <asm/msr.h>
  7
  8static void __rdmsr_on_cpu(void *info)
  9{
 10	struct msr_info *rv = info;
 11	struct msr *reg;
 12	int this_cpu = raw_smp_processor_id();
 13
 14	if (rv->msrs)
 15		reg = per_cpu_ptr(rv->msrs, this_cpu);
 16	else
 17		reg = &rv->reg;
 18
 19	rdmsr(rv->msr_no, reg->l, reg->h);
 20}
 21
 22static void __wrmsr_on_cpu(void *info)
 23{
 24	struct msr_info *rv = info;
 25	struct msr *reg;
 26	int this_cpu = raw_smp_processor_id();
 27
 28	if (rv->msrs)
 29		reg = per_cpu_ptr(rv->msrs, this_cpu);
 30	else
 31		reg = &rv->reg;
 32
 33	wrmsr(rv->msr_no, reg->l, reg->h);
 34}
 35
 36int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
 37{
 38	int err;
 39	struct msr_info rv;
 40
 41	memset(&rv, 0, sizeof(rv));
 42
 43	rv.msr_no = msr_no;
 44	err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
 45	*l = rv.reg.l;
 46	*h = rv.reg.h;
 47
 48	return err;
 49}
 50EXPORT_SYMBOL(rdmsr_on_cpu);
 51
 52int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
 53{
 54	int err;
 55	struct msr_info rv;
 56
 57	memset(&rv, 0, sizeof(rv));
 58
 59	rv.msr_no = msr_no;
 60	err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
 61	*q = rv.reg.q;
 62
 63	return err;
 64}
 65EXPORT_SYMBOL(rdmsrl_on_cpu);
 66
 67int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
 68{
 69	int err;
 70	struct msr_info rv;
 71
 72	memset(&rv, 0, sizeof(rv));
 73
 74	rv.msr_no = msr_no;
 75	rv.reg.l = l;
 76	rv.reg.h = h;
 77	err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
 78
 79	return err;
 80}
 81EXPORT_SYMBOL(wrmsr_on_cpu);
 82
 83int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
 84{
 85	int err;
 86	struct msr_info rv;
 87
 88	memset(&rv, 0, sizeof(rv));
 89
 90	rv.msr_no = msr_no;
 91	rv.reg.q = q;
 92
 93	err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
 94
 95	return err;
 96}
 97EXPORT_SYMBOL(wrmsrl_on_cpu);
 98
 99static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
100			    struct msr *msrs,
101			    void (*msr_func) (void *info))
102{
103	struct msr_info rv;
104	int this_cpu;
105
106	memset(&rv, 0, sizeof(rv));
107
108	rv.msrs	  = msrs;
109	rv.msr_no = msr_no;
110
111	this_cpu = get_cpu();
112
113	if (cpumask_test_cpu(this_cpu, mask))
114		msr_func(&rv);
115
116	smp_call_function_many(mask, msr_func, &rv, 1);
117	put_cpu();
118}
119
120/* rdmsr on a bunch of CPUs
121 *
122 * @mask:       which CPUs
123 * @msr_no:     which MSR
124 * @msrs:       array of MSR values
125 *
126 */
127void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
128{
129	__rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
130}
131EXPORT_SYMBOL(rdmsr_on_cpus);
132
133/*
134 * wrmsr on a bunch of CPUs
135 *
136 * @mask:       which CPUs
137 * @msr_no:     which MSR
138 * @msrs:       array of MSR values
139 *
140 */
141void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
142{
143	__rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
144}
145EXPORT_SYMBOL(wrmsr_on_cpus);
146
147struct msr_info_completion {
148	struct msr_info		msr;
149	struct completion	done;
150};
151
152/* These "safe" variants are slower and should be used when the target MSR
153   may not actually exist. */
154static void __rdmsr_safe_on_cpu(void *info)
155{
156	struct msr_info_completion *rv = info;
157
158	rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
159	complete(&rv->done);
160}
161
162static void __wrmsr_safe_on_cpu(void *info)
163{
164	struct msr_info *rv = info;
165
166	rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
167}
168
169int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
170{
171	struct msr_info_completion rv;
172	call_single_data_t csd;
173	int err;
174
175	INIT_CSD(&csd, __rdmsr_safe_on_cpu, &rv);
176
177	memset(&rv, 0, sizeof(rv));
178	init_completion(&rv.done);
179	rv.msr.msr_no = msr_no;
180
181	err = smp_call_function_single_async(cpu, &csd);
182	if (!err) {
183		wait_for_completion(&rv.done);
184		err = rv.msr.err;
185	}
186	*l = rv.msr.reg.l;
187	*h = rv.msr.reg.h;
188
189	return err;
190}
191EXPORT_SYMBOL(rdmsr_safe_on_cpu);
192
193int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
194{
195	int err;
196	struct msr_info rv;
197
198	memset(&rv, 0, sizeof(rv));
199
200	rv.msr_no = msr_no;
201	rv.reg.l = l;
202	rv.reg.h = h;
203	err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
204
205	return err ? err : rv.err;
206}
207EXPORT_SYMBOL(wrmsr_safe_on_cpu);
208
209int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
210{
211	int err;
212	struct msr_info rv;
213
214	memset(&rv, 0, sizeof(rv));
215
216	rv.msr_no = msr_no;
217	rv.reg.q = q;
218
219	err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
220
221	return err ? err : rv.err;
222}
223EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
224
225int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
226{
227	u32 low, high;
228	int err;
229
230	err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
231	*q = (u64)high << 32 | low;
232
233	return err;
234}
235EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
236
237/*
238 * These variants are significantly slower, but allows control over
239 * the entire 32-bit GPR set.
240 */
241static void __rdmsr_safe_regs_on_cpu(void *info)
242{
243	struct msr_regs_info *rv = info;
244
245	rv->err = rdmsr_safe_regs(rv->regs);
246}
247
248static void __wrmsr_safe_regs_on_cpu(void *info)
249{
250	struct msr_regs_info *rv = info;
251
252	rv->err = wrmsr_safe_regs(rv->regs);
253}
254
255int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
256{
257	int err;
258	struct msr_regs_info rv;
259
260	rv.regs   = regs;
261	rv.err    = -EIO;
262	err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
263
264	return err ? err : rv.err;
265}
266EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
267
268int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
269{
270	int err;
271	struct msr_regs_info rv;
272
273	rv.regs = regs;
274	rv.err  = -EIO;
275	err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
276
277	return err ? err : rv.err;
278}
279EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);