Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3#include <linux/preempt.h>
4#include <linux/smp.h>
5#include <linux/completion.h>
6#include <asm/msr.h>
7
8static void __rdmsr_on_cpu(void *info)
9{
10 struct msr_info *rv = info;
11 struct msr *reg;
12
13 if (rv->msrs)
14 reg = this_cpu_ptr(rv->msrs);
15 else
16 reg = &rv->reg;
17
18 rdmsr(rv->msr_no, reg->l, reg->h);
19}
20
21static void __wrmsr_on_cpu(void *info)
22{
23 struct msr_info *rv = info;
24 struct msr *reg;
25
26 if (rv->msrs)
27 reg = this_cpu_ptr(rv->msrs);
28 else
29 reg = &rv->reg;
30
31 wrmsr(rv->msr_no, reg->l, reg->h);
32}
33
34int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
35{
36 int err;
37 struct msr_info rv;
38
39 memset(&rv, 0, sizeof(rv));
40
41 rv.msr_no = msr_no;
42 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
43 *l = rv.reg.l;
44 *h = rv.reg.h;
45
46 return err;
47}
48EXPORT_SYMBOL(rdmsr_on_cpu);
49
50int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
51{
52 int err;
53 struct msr_info rv;
54
55 memset(&rv, 0, sizeof(rv));
56
57 rv.msr_no = msr_no;
58 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
59 *q = rv.reg.q;
60
61 return err;
62}
63EXPORT_SYMBOL(rdmsrl_on_cpu);
64
65int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
66{
67 int err;
68 struct msr_info rv;
69
70 memset(&rv, 0, sizeof(rv));
71
72 rv.msr_no = msr_no;
73 rv.reg.l = l;
74 rv.reg.h = h;
75 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
76
77 return err;
78}
79EXPORT_SYMBOL(wrmsr_on_cpu);
80
81int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
82{
83 int err;
84 struct msr_info rv;
85
86 memset(&rv, 0, sizeof(rv));
87
88 rv.msr_no = msr_no;
89 rv.reg.q = q;
90
91 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
92
93 return err;
94}
95EXPORT_SYMBOL(wrmsrl_on_cpu);
96
97static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
98 struct msr __percpu *msrs,
99 void (*msr_func) (void *info))
100{
101 struct msr_info rv;
102 int this_cpu;
103
104 memset(&rv, 0, sizeof(rv));
105
106 rv.msrs = msrs;
107 rv.msr_no = msr_no;
108
109 this_cpu = get_cpu();
110
111 if (cpumask_test_cpu(this_cpu, mask))
112 msr_func(&rv);
113
114 smp_call_function_many(mask, msr_func, &rv, 1);
115 put_cpu();
116}
117
118/* rdmsr on a bunch of CPUs
119 *
120 * @mask: which CPUs
121 * @msr_no: which MSR
122 * @msrs: array of MSR values
123 *
124 */
125void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs)
126{
127 __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
128}
129EXPORT_SYMBOL(rdmsr_on_cpus);
130
131/*
132 * wrmsr on a bunch of CPUs
133 *
134 * @mask: which CPUs
135 * @msr_no: which MSR
136 * @msrs: array of MSR values
137 *
138 */
139void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs)
140{
141 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
142}
143EXPORT_SYMBOL(wrmsr_on_cpus);
144
145struct msr_info_completion {
146 struct msr_info msr;
147 struct completion done;
148};
149
150/* These "safe" variants are slower and should be used when the target MSR
151 may not actually exist. */
152static void __rdmsr_safe_on_cpu(void *info)
153{
154 struct msr_info_completion *rv = info;
155
156 rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
157 complete(&rv->done);
158}
159
160static void __wrmsr_safe_on_cpu(void *info)
161{
162 struct msr_info *rv = info;
163
164 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
165}
166
167int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
168{
169 struct msr_info_completion rv;
170 call_single_data_t csd;
171 int err;
172
173 INIT_CSD(&csd, __rdmsr_safe_on_cpu, &rv);
174
175 memset(&rv, 0, sizeof(rv));
176 init_completion(&rv.done);
177 rv.msr.msr_no = msr_no;
178
179 err = smp_call_function_single_async(cpu, &csd);
180 if (!err) {
181 wait_for_completion(&rv.done);
182 err = rv.msr.err;
183 }
184 *l = rv.msr.reg.l;
185 *h = rv.msr.reg.h;
186
187 return err;
188}
189EXPORT_SYMBOL(rdmsr_safe_on_cpu);
190
191int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
192{
193 int err;
194 struct msr_info rv;
195
196 memset(&rv, 0, sizeof(rv));
197
198 rv.msr_no = msr_no;
199 rv.reg.l = l;
200 rv.reg.h = h;
201 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
202
203 return err ? err : rv.err;
204}
205EXPORT_SYMBOL(wrmsr_safe_on_cpu);
206
207int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
208{
209 int err;
210 struct msr_info rv;
211
212 memset(&rv, 0, sizeof(rv));
213
214 rv.msr_no = msr_no;
215 rv.reg.q = q;
216
217 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
218
219 return err ? err : rv.err;
220}
221EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
222
223int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
224{
225 u32 low, high;
226 int err;
227
228 err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
229 *q = (u64)high << 32 | low;
230
231 return err;
232}
233EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
234
235/*
236 * These variants are significantly slower, but allows control over
237 * the entire 32-bit GPR set.
238 */
239static void __rdmsr_safe_regs_on_cpu(void *info)
240{
241 struct msr_regs_info *rv = info;
242
243 rv->err = rdmsr_safe_regs(rv->regs);
244}
245
246static void __wrmsr_safe_regs_on_cpu(void *info)
247{
248 struct msr_regs_info *rv = info;
249
250 rv->err = wrmsr_safe_regs(rv->regs);
251}
252
253int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
254{
255 int err;
256 struct msr_regs_info rv;
257
258 rv.regs = regs;
259 rv.err = -EIO;
260 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
261
262 return err ? err : rv.err;
263}
264EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
265
266int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
267{
268 int err;
269 struct msr_regs_info rv;
270
271 rv.regs = regs;
272 rv.err = -EIO;
273 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
274
275 return err ? err : rv.err;
276}
277EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3#include <linux/preempt.h>
4#include <linux/smp.h>
5#include <linux/completion.h>
6#include <asm/msr.h>
7
8static void __rdmsr_on_cpu(void *info)
9{
10 struct msr_info *rv = info;
11 struct msr *reg;
12 int this_cpu = raw_smp_processor_id();
13
14 if (rv->msrs)
15 reg = per_cpu_ptr(rv->msrs, this_cpu);
16 else
17 reg = &rv->reg;
18
19 rdmsr(rv->msr_no, reg->l, reg->h);
20}
21
22static void __wrmsr_on_cpu(void *info)
23{
24 struct msr_info *rv = info;
25 struct msr *reg;
26 int this_cpu = raw_smp_processor_id();
27
28 if (rv->msrs)
29 reg = per_cpu_ptr(rv->msrs, this_cpu);
30 else
31 reg = &rv->reg;
32
33 wrmsr(rv->msr_no, reg->l, reg->h);
34}
35
36int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
37{
38 int err;
39 struct msr_info rv;
40
41 memset(&rv, 0, sizeof(rv));
42
43 rv.msr_no = msr_no;
44 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
45 *l = rv.reg.l;
46 *h = rv.reg.h;
47
48 return err;
49}
50EXPORT_SYMBOL(rdmsr_on_cpu);
51
52int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
53{
54 int err;
55 struct msr_info rv;
56
57 memset(&rv, 0, sizeof(rv));
58
59 rv.msr_no = msr_no;
60 err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
61 *q = rv.reg.q;
62
63 return err;
64}
65EXPORT_SYMBOL(rdmsrl_on_cpu);
66
67int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
68{
69 int err;
70 struct msr_info rv;
71
72 memset(&rv, 0, sizeof(rv));
73
74 rv.msr_no = msr_no;
75 rv.reg.l = l;
76 rv.reg.h = h;
77 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
78
79 return err;
80}
81EXPORT_SYMBOL(wrmsr_on_cpu);
82
83int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
84{
85 int err;
86 struct msr_info rv;
87
88 memset(&rv, 0, sizeof(rv));
89
90 rv.msr_no = msr_no;
91 rv.reg.q = q;
92
93 err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
94
95 return err;
96}
97EXPORT_SYMBOL(wrmsrl_on_cpu);
98
99static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
100 struct msr *msrs,
101 void (*msr_func) (void *info))
102{
103 struct msr_info rv;
104 int this_cpu;
105
106 memset(&rv, 0, sizeof(rv));
107
108 rv.msrs = msrs;
109 rv.msr_no = msr_no;
110
111 this_cpu = get_cpu();
112
113 if (cpumask_test_cpu(this_cpu, mask))
114 msr_func(&rv);
115
116 smp_call_function_many(mask, msr_func, &rv, 1);
117 put_cpu();
118}
119
120/* rdmsr on a bunch of CPUs
121 *
122 * @mask: which CPUs
123 * @msr_no: which MSR
124 * @msrs: array of MSR values
125 *
126 */
127void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
128{
129 __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
130}
131EXPORT_SYMBOL(rdmsr_on_cpus);
132
133/*
134 * wrmsr on a bunch of CPUs
135 *
136 * @mask: which CPUs
137 * @msr_no: which MSR
138 * @msrs: array of MSR values
139 *
140 */
141void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs)
142{
143 __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
144}
145EXPORT_SYMBOL(wrmsr_on_cpus);
146
147struct msr_info_completion {
148 struct msr_info msr;
149 struct completion done;
150};
151
152/* These "safe" variants are slower and should be used when the target MSR
153 may not actually exist. */
154static void __rdmsr_safe_on_cpu(void *info)
155{
156 struct msr_info_completion *rv = info;
157
158 rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
159 complete(&rv->done);
160}
161
162static void __wrmsr_safe_on_cpu(void *info)
163{
164 struct msr_info *rv = info;
165
166 rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
167}
168
169int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
170{
171 struct msr_info_completion rv;
172 call_single_data_t csd = {
173 .func = __rdmsr_safe_on_cpu,
174 .info = &rv,
175 };
176 int err;
177
178 memset(&rv, 0, sizeof(rv));
179 init_completion(&rv.done);
180 rv.msr.msr_no = msr_no;
181
182 err = smp_call_function_single_async(cpu, &csd);
183 if (!err) {
184 wait_for_completion(&rv.done);
185 err = rv.msr.err;
186 }
187 *l = rv.msr.reg.l;
188 *h = rv.msr.reg.h;
189
190 return err;
191}
192EXPORT_SYMBOL(rdmsr_safe_on_cpu);
193
194int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
195{
196 int err;
197 struct msr_info rv;
198
199 memset(&rv, 0, sizeof(rv));
200
201 rv.msr_no = msr_no;
202 rv.reg.l = l;
203 rv.reg.h = h;
204 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
205
206 return err ? err : rv.err;
207}
208EXPORT_SYMBOL(wrmsr_safe_on_cpu);
209
210int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
211{
212 int err;
213 struct msr_info rv;
214
215 memset(&rv, 0, sizeof(rv));
216
217 rv.msr_no = msr_no;
218 rv.reg.q = q;
219
220 err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
221
222 return err ? err : rv.err;
223}
224EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
225
226int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
227{
228 u32 low, high;
229 int err;
230
231 err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
232 *q = (u64)high << 32 | low;
233
234 return err;
235}
236EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
237
238/*
239 * These variants are significantly slower, but allows control over
240 * the entire 32-bit GPR set.
241 */
242static void __rdmsr_safe_regs_on_cpu(void *info)
243{
244 struct msr_regs_info *rv = info;
245
246 rv->err = rdmsr_safe_regs(rv->regs);
247}
248
249static void __wrmsr_safe_regs_on_cpu(void *info)
250{
251 struct msr_regs_info *rv = info;
252
253 rv->err = wrmsr_safe_regs(rv->regs);
254}
255
256int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
257{
258 int err;
259 struct msr_regs_info rv;
260
261 rv.regs = regs;
262 rv.err = -EIO;
263 err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
264
265 return err ? err : rv.err;
266}
267EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
268
269int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 *regs)
270{
271 int err;
272 struct msr_regs_info rv;
273
274 rv.regs = regs;
275 rv.err = -EIO;
276 err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
277
278 return err ? err : rv.err;
279}
280EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);