Loading...
Note: File does not exist in v6.8.
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3#include <linux/types.h>
4#include <linux/interrupt.h>
5#include <linux/irq_work.h>
6#include <linux/jump_label.h>
7#include <linux/kvm_para.h>
8#include <linux/reboot.h>
9#include <linux/static_call.h>
10#include <asm/paravirt.h>
11
12static int has_steal_clock;
13struct static_key paravirt_steal_enabled;
14struct static_key paravirt_steal_rq_enabled;
15static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
16DEFINE_STATIC_KEY_FALSE(virt_spin_lock_key);
17
18static u64 native_steal_clock(int cpu)
19{
20 return 0;
21}
22
23DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
24
25static bool steal_acc = true;
26
27static int __init parse_no_stealacc(char *arg)
28{
29 steal_acc = false;
30 return 0;
31}
32early_param("no-steal-acc", parse_no_stealacc);
33
34static u64 paravt_steal_clock(int cpu)
35{
36 int version;
37 u64 steal;
38 struct kvm_steal_time *src;
39
40 src = &per_cpu(steal_time, cpu);
41 do {
42
43 version = src->version;
44 virt_rmb(); /* Make sure that the version is read before the steal */
45 steal = src->steal;
46 virt_rmb(); /* Make sure that the steal is read before the next version */
47
48 } while ((version & 1) || (version != src->version));
49
50 return steal;
51}
52
53#ifdef CONFIG_SMP
54static struct smp_ops native_ops;
55
56static void pv_send_ipi_single(int cpu, unsigned int action)
57{
58 int min, old;
59 irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
60
61 if (unlikely(action == ACTION_BOOT_CPU)) {
62 native_ops.send_ipi_single(cpu, action);
63 return;
64 }
65
66 old = atomic_fetch_or(BIT(action), &info->message);
67 if (old)
68 return;
69
70 min = cpu_logical_map(cpu);
71 kvm_hypercall3(KVM_HCALL_FUNC_IPI, 1, 0, min);
72}
73
74#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
75
76static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
77{
78 int i, cpu, min = 0, max = 0, old;
79 __uint128_t bitmap = 0;
80 irq_cpustat_t *info;
81
82 if (cpumask_empty(mask))
83 return;
84
85 if (unlikely(action == ACTION_BOOT_CPU)) {
86 native_ops.send_ipi_mask(mask, action);
87 return;
88 }
89
90 action = BIT(action);
91 for_each_cpu(i, mask) {
92 info = &per_cpu(irq_stat, i);
93 old = atomic_fetch_or(action, &info->message);
94 if (old)
95 continue;
96
97 cpu = cpu_logical_map(i);
98 if (!bitmap) {
99 min = max = cpu;
100 } else if (cpu < min && cpu > (max - KVM_IPI_CLUSTER_SIZE)) {
101 /* cpu < min, and bitmap still enough */
102 bitmap <<= min - cpu;
103 min = cpu;
104 } else if (cpu > min && cpu < (min + KVM_IPI_CLUSTER_SIZE)) {
105 /* cpu > min, and bitmap still enough */
106 max = cpu > max ? cpu : max;
107 } else {
108 /*
109 * With cpu, bitmap will exceed KVM_IPI_CLUSTER_SIZE,
110 * send IPI here directly and skip the remaining CPUs.
111 */
112 kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
113 (unsigned long)(bitmap >> BITS_PER_LONG), min);
114 min = max = cpu;
115 bitmap = 0;
116 }
117 __set_bit(cpu - min, (unsigned long *)&bitmap);
118 }
119
120 if (bitmap)
121 kvm_hypercall3(KVM_HCALL_FUNC_IPI, (unsigned long)bitmap,
122 (unsigned long)(bitmap >> BITS_PER_LONG), min);
123}
124
125static irqreturn_t pv_ipi_interrupt(int irq, void *dev)
126{
127 u32 action;
128 irq_cpustat_t *info;
129
130 /* Clear SWI interrupt */
131 clear_csr_estat(1 << INT_SWI0);
132 info = this_cpu_ptr(&irq_stat);
133 action = atomic_xchg(&info->message, 0);
134
135 if (action & SMP_RESCHEDULE) {
136 scheduler_ipi();
137 info->ipi_irqs[IPI_RESCHEDULE]++;
138 }
139
140 if (action & SMP_CALL_FUNCTION) {
141 generic_smp_call_function_interrupt();
142 info->ipi_irqs[IPI_CALL_FUNCTION]++;
143 }
144
145 if (action & SMP_IRQ_WORK) {
146 irq_work_run();
147 info->ipi_irqs[IPI_IRQ_WORK]++;
148 }
149
150 if (action & SMP_CLEAR_VECTOR) {
151 complete_irq_moving();
152 info->ipi_irqs[IPI_CLEAR_VECTOR]++;
153 }
154
155 return IRQ_HANDLED;
156}
157
158static void pv_init_ipi(void)
159{
160 int r, swi;
161
162 /* Init native ipi irq for ACTION_BOOT_CPU */
163 native_ops.init_ipi();
164 swi = get_percpu_irq(INT_SWI0);
165 if (swi < 0)
166 panic("SWI0 IRQ mapping failed\n");
167 irq_set_percpu_devid(swi);
168 r = request_percpu_irq(swi, pv_ipi_interrupt, "SWI0-IPI", &irq_stat);
169 if (r < 0)
170 panic("SWI0 IRQ request failed\n");
171}
172#endif
173
174bool kvm_para_available(void)
175{
176 int config;
177 static int hypervisor_type;
178
179 if (!cpu_has_hypervisor)
180 return false;
181
182 if (!hypervisor_type) {
183 config = read_cpucfg(CPUCFG_KVM_SIG);
184 if (!memcmp(&config, KVM_SIGNATURE, 4))
185 hypervisor_type = HYPERVISOR_KVM;
186 }
187
188 return hypervisor_type == HYPERVISOR_KVM;
189}
190
191unsigned int kvm_arch_para_features(void)
192{
193 static unsigned int feature;
194
195 if (!kvm_para_available())
196 return 0;
197
198 if (!feature)
199 feature = read_cpucfg(CPUCFG_KVM_FEATURE);
200
201 return feature;
202}
203
204int __init pv_ipi_init(void)
205{
206 if (!kvm_para_has_feature(KVM_FEATURE_IPI))
207 return 0;
208
209#ifdef CONFIG_SMP
210 native_ops = mp_ops;
211 mp_ops.init_ipi = pv_init_ipi;
212 mp_ops.send_ipi_single = pv_send_ipi_single;
213 mp_ops.send_ipi_mask = pv_send_ipi_mask;
214#endif
215
216 return 0;
217}
218
219static int pv_enable_steal_time(void)
220{
221 int cpu = smp_processor_id();
222 unsigned long addr;
223 struct kvm_steal_time *st;
224
225 if (!has_steal_clock)
226 return -EPERM;
227
228 st = &per_cpu(steal_time, cpu);
229 addr = per_cpu_ptr_to_phys(st);
230
231 /* The whole structure kvm_steal_time should be in one page */
232 if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) {
233 pr_warn("Illegal PV steal time addr %lx\n", addr);
234 return -EFAULT;
235 }
236
237 addr |= KVM_STEAL_PHYS_VALID;
238 kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), addr);
239
240 return 0;
241}
242
243static void pv_disable_steal_time(void)
244{
245 if (has_steal_clock)
246 kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, BIT(KVM_FEATURE_STEAL_TIME), 0);
247}
248
249#ifdef CONFIG_SMP
250static int pv_time_cpu_online(unsigned int cpu)
251{
252 unsigned long flags;
253
254 local_irq_save(flags);
255 pv_enable_steal_time();
256 local_irq_restore(flags);
257
258 return 0;
259}
260
261static int pv_time_cpu_down_prepare(unsigned int cpu)
262{
263 unsigned long flags;
264
265 local_irq_save(flags);
266 pv_disable_steal_time();
267 local_irq_restore(flags);
268
269 return 0;
270}
271#endif
272
273static void pv_cpu_reboot(void *unused)
274{
275 pv_disable_steal_time();
276}
277
278static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, void *unused)
279{
280 on_each_cpu(pv_cpu_reboot, NULL, 1);
281 return NOTIFY_DONE;
282}
283
284static struct notifier_block pv_reboot_nb = {
285 .notifier_call = pv_reboot_notify,
286};
287
288int __init pv_time_init(void)
289{
290 int r;
291
292 if (!kvm_para_has_feature(KVM_FEATURE_STEAL_TIME))
293 return 0;
294
295 has_steal_clock = 1;
296 r = pv_enable_steal_time();
297 if (r < 0) {
298 has_steal_clock = 0;
299 return 0;
300 }
301 register_reboot_notifier(&pv_reboot_nb);
302
303#ifdef CONFIG_SMP
304 r = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
305 "loongarch/pv_time:online",
306 pv_time_cpu_online, pv_time_cpu_down_prepare);
307 if (r < 0) {
308 has_steal_clock = 0;
309 pr_err("Failed to install cpu hotplug callbacks\n");
310 return r;
311 }
312#endif
313
314 static_call_update(pv_steal_clock, paravt_steal_clock);
315
316 static_key_slow_inc(¶virt_steal_enabled);
317#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
318 if (steal_acc)
319 static_key_slow_inc(¶virt_steal_rq_enabled);
320#endif
321
322 pr_info("Using paravirt steal-time\n");
323
324 return 0;
325}
326
327int __init pv_spinlock_init(void)
328{
329 if (!cpu_has_hypervisor)
330 return 0;
331
332 static_branch_enable(&virt_spin_lock_key);
333
334 return 0;
335}