Loading...
1#include <linux/export.h>
2#include <linux/preempt.h>
3#include <linux/smp.h>
4#include <asm/msr.h>
5
6static void __rdmsr_on_cpu(void *info)
7{
8 struct msr_info *rv = info;
9 struct msr *reg;
10 int this_cpu = raw_smp_processor_id();
11
12 if (rv->msrs)
13 reg = per_cpu_ptr(rv->msrs, this_cpu);
14 else
15 reg = &rv->reg;
16
17 rdmsr(rv->msr_no, reg->l, reg->h);
18}
19
20static void __wrmsr_on_cpu(void *info)
21{
22 struct msr_info *rv = info;
23 struct msr *reg;
24 int this_cpu = raw_smp_processor_id();
25
26 if (rv->msrs)
27 reg = per_cpu_ptr(rv->msrs, this_cpu);
28 else
29 reg = &rv->reg;
30
31 wrmsr(rv->msr_no, reg->l, reg->h);
32}
33
34int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
35{
36 int err;
37 struct msr_info rv;
38
39 memset(&rv, 0, sizeof(rv));
40
41 rv.msr_no = msr_no;
42 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
43 *l = rv.reg.l;
44 *h = rv.reg.h;
45
46 return err;
47}
48EXPORT_SYMBOL(rdmsr_on_cpu);
49
50int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
51{
52 int err;
53 struct msr_info rv;
54
55 memset(&rv, 0, sizeof(rv));
56
57 rv.msr_no = msr_no;
58 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
59 *q = rv.reg.q;
60
61 return err;
62}
63EXPORT_SYMBOL(rdmsrl_on_cpu);
64
65int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
66{
67 int err;
68 struct msr_info rv;
69
70 memset(&rv, 0, sizeof(rv));
71
72 rv.msr_no = msr_no;
73 rv.reg.l = l;
74 rv.reg.h = h;
75 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
76
77 return err;
78}
79EXPORT_SYMBOL(wrmsr_on_cpu);
80
81int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
82{
83 int err;
84 struct msr_info rv;
85
86 memset(&rv, 0, sizeof(rv));
87
88 rv.msr_no = msr_no;
89 rv.reg.q = q;
90
91 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
92
93 return err;
94}
95EXPORT_SYMBOL(wrmsrl_on_cpu);
96
97static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
98 struct msr *msrs,
99 void (*msr_func) (void *info))
100{
101 struct msr_info rv;
102 int this_cpu;
103
104 memset(&rv, 0, sizeof(rv));
105
106 rv.msrs = msrs;
107 rv.msr_no = msr_no;
108
109 this_cpu = get_cpu();
110
111 if (cpumask_test_cpu(this_cpu, mask))
112 msr_func(&rv);
113
114 smp_call_function_many(mask, msr_func, &rv, 1);
115 put_cpu();
116}
117
118/* rdmsr on a bunch of CPUs
119 *
120 * @mask: which CPUs
121 * @msr_no: which MSR
122 * @msrs: array of MSR values
123 *
124 */
125void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
126{
127 __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
128}
129EXPORT_SYMBOL(rdmsr_on_cpus);
130
131/*
132 * wrmsr on a bunch of CPUs
133 *
134 * @mask: which CPUs
135 * @msr_no: which MSR
136 * @msrs: array of MSR values
137 *
138 */
139void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
140{
141 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
142}
143EXPORT_SYMBOL(wrmsr_on_cpus);
144
145/* These "safe" variants are slower and should be used when the target MSR
146 may not actually exist. */
147static void __rdmsr_safe_on_cpu(void *info)
148{
149 struct msr_info *rv = info;
150
151 rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h);
152}
153
154static void __wrmsr_safe_on_cpu(void *info)
155{
156 struct msr_info *rv = info;
157
158 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
159}
160
161int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
162{
163 int err;
164 struct msr_info rv;
165
166 memset(&rv, 0, sizeof(rv));
167
168 rv.msr_no = msr_no;
169 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
170 *l = rv.reg.l;
171 *h = rv.reg.h;
172
173 return err ? err : rv.err;
174}
175EXPORT_SYMBOL(rdmsr_safe_on_cpu);
176
177int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
178{
179 int err;
180 struct msr_info rv;
181
182 memset(&rv, 0, sizeof(rv));
183
184 rv.msr_no = msr_no;
185 rv.reg.l = l;
186 rv.reg.h = h;
187 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
188
189 return err ? err : rv.err;
190}
191EXPORT_SYMBOL(wrmsr_safe_on_cpu);
192
193int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
194{
195 int err;
196 struct msr_info rv;
197
198 memset(&rv, 0, sizeof(rv));
199
200 rv.msr_no = msr_no;
201 rv.reg.q = q;
202
203 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
204
205 return err ? err : rv.err;
206}
207EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
208
209int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
210{
211 int err;
212 struct msr_info rv;
213
214 memset(&rv, 0, sizeof(rv));
215
216 rv.msr_no = msr_no;
217 err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
218 *q = rv.reg.q;
219
220 return err ? err : rv.err;
221}
222EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
223
224/*
225 * These variants are significantly slower, but allows control over
226 * the entire 32-bit GPR set.
227 */
228static void __rdmsr_safe_regs_on_cpu(void *info)
229{
230 struct msr_regs_info *rv = info;
231
232 rv->err = rdmsr_safe_regs(rv->regs);
233}
234
235static void __wrmsr_safe_regs_on_cpu(void *info)
236{
237 struct msr_regs_info *rv = info;
238
239 rv->err = wrmsr_safe_regs(rv->regs);
240}
241
242int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
243{
244 int err;
245 struct msr_regs_info rv;
246
247 rv.regs = regs;
248 rv.err = -EIO;
249 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
250
251 return err ? err : rv.err;
252}
253EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
254
255int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
256{
257 int err;
258 struct msr_regs_info rv;
259
260 rv.regs = regs;
261 rv.err = -EIO;
262 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
263
264 return err ? err : rv.err;
265}
266EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3#include <linux/preempt.h>
4#include <linux/smp.h>
5#include <linux/completion.h>
6#include <asm/msr.h>
7
8static void __rdmsr_on_cpu(void *info)
9{
10 struct msr_info *rv = info;
11 struct msr *reg;
12 int this_cpu = raw_smp_processor_id();
13
14 if (rv->msrs)
15 reg = per_cpu_ptr(rv->msrs, this_cpu);
16 else
17 reg = &rv->reg;
18
19 rdmsr(rv->msr_no, reg->l, reg->h);
20}
21
22static void __wrmsr_on_cpu(void *info)
23{
24 struct msr_info *rv = info;
25 struct msr *reg;
26 int this_cpu = raw_smp_processor_id();
27
28 if (rv->msrs)
29 reg = per_cpu_ptr(rv->msrs, this_cpu);
30 else
31 reg = &rv->reg;
32
33 wrmsr(rv->msr_no, reg->l, reg->h);
34}
35
36int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
37{
38 int err;
39 struct msr_info rv;
40
41 memset(&rv, 0, sizeof(rv));
42
43 rv.msr_no = msr_no;
44 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
45 *l = rv.reg.l;
46 *h = rv.reg.h;
47
48 return err;
49}
50EXPORT_SYMBOL(rdmsr_on_cpu);
51
52int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
53{
54 int err;
55 struct msr_info rv;
56
57 memset(&rv, 0, sizeof(rv));
58
59 rv.msr_no = msr_no;
60 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
61 *q = rv.reg.q;
62
63 return err;
64}
65EXPORT_SYMBOL(rdmsrl_on_cpu);
66
67int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
68{
69 int err;
70 struct msr_info rv;
71
72 memset(&rv, 0, sizeof(rv));
73
74 rv.msr_no = msr_no;
75 rv.reg.l = l;
76 rv.reg.h = h;
77 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
78
79 return err;
80}
81EXPORT_SYMBOL(wrmsr_on_cpu);
82
83int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
84{
85 int err;
86 struct msr_info rv;
87
88 memset(&rv, 0, sizeof(rv));
89
90 rv.msr_no = msr_no;
91 rv.reg.q = q;
92
93 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
94
95 return err;
96}
97EXPORT_SYMBOL(wrmsrl_on_cpu);
98
99static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
100 struct msr *msrs,
101 void (*msr_func) (void *info))
102{
103 struct msr_info rv;
104 int this_cpu;
105
106 memset(&rv, 0, sizeof(rv));
107
108 rv.msrs = msrs;
109 rv.msr_no = msr_no;
110
111 this_cpu = get_cpu();
112
113 if (cpumask_test_cpu(this_cpu, mask))
114 msr_func(&rv);
115
116 smp_call_function_many(mask, msr_func, &rv, 1);
117 put_cpu();
118}
119
120/* rdmsr on a bunch of CPUs
121 *
122 * @mask: which CPUs
123 * @msr_no: which MSR
124 * @msrs: array of MSR values
125 *
126 */
127void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
128{
129 __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
130}
131EXPORT_SYMBOL(rdmsr_on_cpus);
132
133/*
134 * wrmsr on a bunch of CPUs
135 *
136 * @mask: which CPUs
137 * @msr_no: which MSR
138 * @msrs: array of MSR values
139 *
140 */
141void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
142{
143 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
144}
145EXPORT_SYMBOL(wrmsr_on_cpus);
146
147struct msr_info_completion {
148 struct msr_info msr;
149 struct completion done;
150};
151
152/* These "safe" variants are slower and should be used when the target MSR
153 may not actually exist. */
154static void __rdmsr_safe_on_cpu(void *info)
155{
156 struct msr_info_completion *rv = info;
157
158 rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
159 complete(&rv->done);
160}
161
162static void __wrmsr_safe_on_cpu(void *info)
163{
164 struct msr_info *rv = info;
165
166 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
167}
168
169int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
170{
171 struct msr_info_completion rv;
172 call_single_data_t csd = {
173 .func = __rdmsr_safe_on_cpu,
174 .info = &rv,
175 };
176 int err;
177
178 memset(&rv, 0, sizeof(rv));
179 init_completion(&rv.done);
180 rv.msr.msr_no = msr_no;
181
182 err = smp_call_function_single_async(cpu, &csd);
183 if (!err) {
184 wait_for_completion(&rv.done);
185 err = rv.msr.err;
186 }
187 *l = rv.msr.reg.l;
188 *h = rv.msr.reg.h;
189
190 return err;
191}
192EXPORT_SYMBOL(rdmsr_safe_on_cpu);
193
194int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
195{
196 int err;
197 struct msr_info rv;
198
199 memset(&rv, 0, sizeof(rv));
200
201 rv.msr_no = msr_no;
202 rv.reg.l = l;
203 rv.reg.h = h;
204 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
205
206 return err ? err : rv.err;
207}
208EXPORT_SYMBOL(wrmsr_safe_on_cpu);
209
210int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
211{
212 int err;
213 struct msr_info rv;
214
215 memset(&rv, 0, sizeof(rv));
216
217 rv.msr_no = msr_no;
218 rv.reg.q = q;
219
220 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
221
222 return err ? err : rv.err;
223}
224EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
225
226int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
227{
228 u32 low, high;
229 int err;
230
231 err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
232 *q = (u64)high << 32 | low;
233
234 return err;
235}
236EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
237
238/*
239 * These variants are significantly slower, but allows control over
240 * the entire 32-bit GPR set.
241 */
242static void __rdmsr_safe_regs_on_cpu(void *info)
243{
244 struct msr_regs_info *rv = info;
245
246 rv->err = rdmsr_safe_regs(rv->regs);
247}
248
249static void __wrmsr_safe_regs_on_cpu(void *info)
250{
251 struct msr_regs_info *rv = info;
252
253 rv->err = wrmsr_safe_regs(rv->regs);
254}
255
256int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
257{
258 int err;
259 struct msr_regs_info rv;
260
261 rv.regs = regs;
262 rv.err = -EIO;
263 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
264
265 return err ? err : rv.err;
266}
267EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
268
269int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
270{
271 int err;
272 struct msr_regs_info rv;
273
274 rv.regs = regs;
275 rv.err = -EIO;
276 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
277
278 return err ? err : rv.err;
279}
280EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);