Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2012 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/arm-smccc.h>
8#include <linux/preempt.h>
9#include <linux/kvm_host.h>
10#include <linux/uaccess.h>
11#include <linux/wait.h>
12
13#include <asm/cputype.h>
14#include <asm/kvm_emulate.h>
15#include <asm/kvm_host.h>
16
17#include <kvm/arm_psci.h>
18
19/*
20 * This is an implementation of the Power State Coordination Interface
21 * as described in ARM document number ARM DEN 0022A.
22 */
23
24#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
25
26static u32 smccc_get_function(struct kvm_vcpu *vcpu)
27{
28 return vcpu_get_reg(vcpu, 0);
29}
30
31static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
32{
33 return vcpu_get_reg(vcpu, 1);
34}
35
36static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
37{
38 return vcpu_get_reg(vcpu, 2);
39}
40
41static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
42{
43 return vcpu_get_reg(vcpu, 3);
44}
45
46static void smccc_set_retval(struct kvm_vcpu *vcpu,
47 unsigned long a0,
48 unsigned long a1,
49 unsigned long a2,
50 unsigned long a3)
51{
52 vcpu_set_reg(vcpu, 0, a0);
53 vcpu_set_reg(vcpu, 1, a1);
54 vcpu_set_reg(vcpu, 2, a2);
55 vcpu_set_reg(vcpu, 3, a3);
56}
57
58static unsigned long psci_affinity_mask(unsigned long affinity_level)
59{
60 if (affinity_level <= 3)
61 return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
62
63 return 0;
64}
65
66static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
67{
68 /*
69 * NOTE: For simplicity, we make VCPU suspend emulation to be
70 * same-as WFI (Wait-for-interrupt) emulation.
71 *
72 * This means for KVM the wakeup events are interrupts and
73 * this is consistent with intended use of StateID as described
74 * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
75 *
76 * Further, we also treat power-down request to be same as
77 * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
78 * specification (ARM DEN 0022A). This means all suspend states
79 * for KVM will preserve the register state.
80 */
81 kvm_vcpu_block(vcpu);
82 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
83
84 return PSCI_RET_SUCCESS;
85}
86
87static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
88{
89 vcpu->arch.power_off = true;
90 kvm_make_request(KVM_REQ_SLEEP, vcpu);
91 kvm_vcpu_kick(vcpu);
92}
93
94static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
95{
96 struct vcpu_reset_state *reset_state;
97 struct kvm *kvm = source_vcpu->kvm;
98 struct kvm_vcpu *vcpu = NULL;
99 unsigned long cpu_id;
100
101 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
102 if (vcpu_mode_is_32bit(source_vcpu))
103 cpu_id &= ~((u32) 0);
104
105 vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
106
107 /*
108 * Make sure the caller requested a valid CPU and that the CPU is
109 * turned off.
110 */
111 if (!vcpu)
112 return PSCI_RET_INVALID_PARAMS;
113 if (!vcpu->arch.power_off) {
114 if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
115 return PSCI_RET_ALREADY_ON;
116 else
117 return PSCI_RET_INVALID_PARAMS;
118 }
119
120 reset_state = &vcpu->arch.reset_state;
121
122 reset_state->pc = smccc_get_arg2(source_vcpu);
123
124 /* Propagate caller endianness */
125 reset_state->be = kvm_vcpu_is_be(source_vcpu);
126
127 /*
128 * NOTE: We always update r0 (or x0) because for PSCI v0.1
129 * the general puspose registers are undefined upon CPU_ON.
130 */
131 reset_state->r0 = smccc_get_arg3(source_vcpu);
132
133 WRITE_ONCE(reset_state->reset, true);
134 kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
135
136 /*
137 * Make sure the reset request is observed if the change to
138 * power_state is observed.
139 */
140 smp_wmb();
141
142 vcpu->arch.power_off = false;
143 kvm_vcpu_wake_up(vcpu);
144
145 return PSCI_RET_SUCCESS;
146}
147
148static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
149{
150 int i, matching_cpus = 0;
151 unsigned long mpidr;
152 unsigned long target_affinity;
153 unsigned long target_affinity_mask;
154 unsigned long lowest_affinity_level;
155 struct kvm *kvm = vcpu->kvm;
156 struct kvm_vcpu *tmp;
157
158 target_affinity = smccc_get_arg1(vcpu);
159 lowest_affinity_level = smccc_get_arg2(vcpu);
160
161 /* Determine target affinity mask */
162 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
163 if (!target_affinity_mask)
164 return PSCI_RET_INVALID_PARAMS;
165
166 /* Ignore other bits of target affinity */
167 target_affinity &= target_affinity_mask;
168
169 /*
170 * If one or more VCPU matching target affinity are running
171 * then ON else OFF
172 */
173 kvm_for_each_vcpu(i, tmp, kvm) {
174 mpidr = kvm_vcpu_get_mpidr_aff(tmp);
175 if ((mpidr & target_affinity_mask) == target_affinity) {
176 matching_cpus++;
177 if (!tmp->arch.power_off)
178 return PSCI_0_2_AFFINITY_LEVEL_ON;
179 }
180 }
181
182 if (!matching_cpus)
183 return PSCI_RET_INVALID_PARAMS;
184
185 return PSCI_0_2_AFFINITY_LEVEL_OFF;
186}
187
188static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
189{
190 int i;
191 struct kvm_vcpu *tmp;
192
193 /*
194 * The KVM ABI specifies that a system event exit may call KVM_RUN
195 * again and may perform shutdown/reboot at a later time that when the
196 * actual request is made. Since we are implementing PSCI and a
197 * caller of PSCI reboot and shutdown expects that the system shuts
198 * down or reboots immediately, let's make sure that VCPUs are not run
199 * after this call is handled and before the VCPUs have been
200 * re-initialized.
201 */
202 kvm_for_each_vcpu(i, tmp, vcpu->kvm)
203 tmp->arch.power_off = true;
204 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
205
206 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
207 vcpu->run->system_event.type = type;
208 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
209}
210
211static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
212{
213 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
214}
215
216static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
217{
218 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
219}
220
221static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
222{
223 struct kvm *kvm = vcpu->kvm;
224 u32 psci_fn = smccc_get_function(vcpu);
225 unsigned long val;
226 int ret = 1;
227
228 switch (psci_fn) {
229 case PSCI_0_2_FN_PSCI_VERSION:
230 /*
231 * Bits[31:16] = Major Version = 0
232 * Bits[15:0] = Minor Version = 2
233 */
234 val = KVM_ARM_PSCI_0_2;
235 break;
236 case PSCI_0_2_FN_CPU_SUSPEND:
237 case PSCI_0_2_FN64_CPU_SUSPEND:
238 val = kvm_psci_vcpu_suspend(vcpu);
239 break;
240 case PSCI_0_2_FN_CPU_OFF:
241 kvm_psci_vcpu_off(vcpu);
242 val = PSCI_RET_SUCCESS;
243 break;
244 case PSCI_0_2_FN_CPU_ON:
245 case PSCI_0_2_FN64_CPU_ON:
246 mutex_lock(&kvm->lock);
247 val = kvm_psci_vcpu_on(vcpu);
248 mutex_unlock(&kvm->lock);
249 break;
250 case PSCI_0_2_FN_AFFINITY_INFO:
251 case PSCI_0_2_FN64_AFFINITY_INFO:
252 val = kvm_psci_vcpu_affinity_info(vcpu);
253 break;
254 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
255 /*
256 * Trusted OS is MP hence does not require migration
257 * or
258 * Trusted OS is not present
259 */
260 val = PSCI_0_2_TOS_MP;
261 break;
262 case PSCI_0_2_FN_SYSTEM_OFF:
263 kvm_psci_system_off(vcpu);
264 /*
265 * We should'nt be going back to guest VCPU after
266 * receiving SYSTEM_OFF request.
267 *
268 * If user space accidently/deliberately resumes
269 * guest VCPU after SYSTEM_OFF request then guest
270 * VCPU should see internal failure from PSCI return
271 * value. To achieve this, we preload r0 (or x0) with
272 * PSCI return value INTERNAL_FAILURE.
273 */
274 val = PSCI_RET_INTERNAL_FAILURE;
275 ret = 0;
276 break;
277 case PSCI_0_2_FN_SYSTEM_RESET:
278 kvm_psci_system_reset(vcpu);
279 /*
280 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
281 * with PSCI return value INTERNAL_FAILURE.
282 */
283 val = PSCI_RET_INTERNAL_FAILURE;
284 ret = 0;
285 break;
286 default:
287 val = PSCI_RET_NOT_SUPPORTED;
288 break;
289 }
290
291 smccc_set_retval(vcpu, val, 0, 0, 0);
292 return ret;
293}
294
295static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
296{
297 u32 psci_fn = smccc_get_function(vcpu);
298 u32 feature;
299 unsigned long val;
300 int ret = 1;
301
302 switch(psci_fn) {
303 case PSCI_0_2_FN_PSCI_VERSION:
304 val = KVM_ARM_PSCI_1_0;
305 break;
306 case PSCI_1_0_FN_PSCI_FEATURES:
307 feature = smccc_get_arg1(vcpu);
308 switch(feature) {
309 case PSCI_0_2_FN_PSCI_VERSION:
310 case PSCI_0_2_FN_CPU_SUSPEND:
311 case PSCI_0_2_FN64_CPU_SUSPEND:
312 case PSCI_0_2_FN_CPU_OFF:
313 case PSCI_0_2_FN_CPU_ON:
314 case PSCI_0_2_FN64_CPU_ON:
315 case PSCI_0_2_FN_AFFINITY_INFO:
316 case PSCI_0_2_FN64_AFFINITY_INFO:
317 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
318 case PSCI_0_2_FN_SYSTEM_OFF:
319 case PSCI_0_2_FN_SYSTEM_RESET:
320 case PSCI_1_0_FN_PSCI_FEATURES:
321 case ARM_SMCCC_VERSION_FUNC_ID:
322 val = 0;
323 break;
324 default:
325 val = PSCI_RET_NOT_SUPPORTED;
326 break;
327 }
328 break;
329 default:
330 return kvm_psci_0_2_call(vcpu);
331 }
332
333 smccc_set_retval(vcpu, val, 0, 0, 0);
334 return ret;
335}
336
337static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
338{
339 struct kvm *kvm = vcpu->kvm;
340 u32 psci_fn = smccc_get_function(vcpu);
341 unsigned long val;
342
343 switch (psci_fn) {
344 case KVM_PSCI_FN_CPU_OFF:
345 kvm_psci_vcpu_off(vcpu);
346 val = PSCI_RET_SUCCESS;
347 break;
348 case KVM_PSCI_FN_CPU_ON:
349 mutex_lock(&kvm->lock);
350 val = kvm_psci_vcpu_on(vcpu);
351 mutex_unlock(&kvm->lock);
352 break;
353 default:
354 val = PSCI_RET_NOT_SUPPORTED;
355 break;
356 }
357
358 smccc_set_retval(vcpu, val, 0, 0, 0);
359 return 1;
360}
361
362/**
363 * kvm_psci_call - handle PSCI call if r0 value is in range
364 * @vcpu: Pointer to the VCPU struct
365 *
366 * Handle PSCI calls from guests through traps from HVC instructions.
367 * The calling convention is similar to SMC calls to the secure world
368 * where the function number is placed in r0.
369 *
370 * This function returns: > 0 (success), 0 (success but exit to user
371 * space), and < 0 (errors)
372 *
373 * Errors:
374 * -EINVAL: Unrecognized PSCI function
375 */
376static int kvm_psci_call(struct kvm_vcpu *vcpu)
377{
378 switch (kvm_psci_version(vcpu, vcpu->kvm)) {
379 case KVM_ARM_PSCI_1_0:
380 return kvm_psci_1_0_call(vcpu);
381 case KVM_ARM_PSCI_0_2:
382 return kvm_psci_0_2_call(vcpu);
383 case KVM_ARM_PSCI_0_1:
384 return kvm_psci_0_1_call(vcpu);
385 default:
386 return -EINVAL;
387 };
388}
389
390int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
391{
392 u32 func_id = smccc_get_function(vcpu);
393 u32 val = SMCCC_RET_NOT_SUPPORTED;
394 u32 feature;
395
396 switch (func_id) {
397 case ARM_SMCCC_VERSION_FUNC_ID:
398 val = ARM_SMCCC_VERSION_1_1;
399 break;
400 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
401 feature = smccc_get_arg1(vcpu);
402 switch(feature) {
403 case ARM_SMCCC_ARCH_WORKAROUND_1:
404 switch (kvm_arm_harden_branch_predictor()) {
405 case KVM_BP_HARDEN_UNKNOWN:
406 break;
407 case KVM_BP_HARDEN_WA_NEEDED:
408 val = SMCCC_RET_SUCCESS;
409 break;
410 case KVM_BP_HARDEN_NOT_REQUIRED:
411 val = SMCCC_RET_NOT_REQUIRED;
412 break;
413 }
414 break;
415 case ARM_SMCCC_ARCH_WORKAROUND_2:
416 switch (kvm_arm_have_ssbd()) {
417 case KVM_SSBD_FORCE_DISABLE:
418 case KVM_SSBD_UNKNOWN:
419 break;
420 case KVM_SSBD_KERNEL:
421 val = SMCCC_RET_SUCCESS;
422 break;
423 case KVM_SSBD_FORCE_ENABLE:
424 case KVM_SSBD_MITIGATED:
425 val = SMCCC_RET_NOT_REQUIRED;
426 break;
427 }
428 break;
429 }
430 break;
431 default:
432 return kvm_psci_call(vcpu);
433 }
434
435 smccc_set_retval(vcpu, val, 0, 0, 0);
436 return 1;
437}
438
439int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
440{
441 return 3; /* PSCI version and two workaround registers */
442}
443
444int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
445{
446 if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices++))
447 return -EFAULT;
448
449 if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1, uindices++))
450 return -EFAULT;
451
452 if (put_user(KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2, uindices++))
453 return -EFAULT;
454
455 return 0;
456}
457
458#define KVM_REG_FEATURE_LEVEL_WIDTH 4
459#define KVM_REG_FEATURE_LEVEL_MASK (BIT(KVM_REG_FEATURE_LEVEL_WIDTH) - 1)
460
461/*
462 * Convert the workaround level into an easy-to-compare number, where higher
463 * values mean better protection.
464 */
465static int get_kernel_wa_level(u64 regid)
466{
467 switch (regid) {
468 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
469 switch (kvm_arm_harden_branch_predictor()) {
470 case KVM_BP_HARDEN_UNKNOWN:
471 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
472 case KVM_BP_HARDEN_WA_NEEDED:
473 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL;
474 case KVM_BP_HARDEN_NOT_REQUIRED:
475 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED;
476 }
477 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
478 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
479 switch (kvm_arm_have_ssbd()) {
480 case KVM_SSBD_FORCE_DISABLE:
481 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
482 case KVM_SSBD_KERNEL:
483 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
484 case KVM_SSBD_FORCE_ENABLE:
485 case KVM_SSBD_MITIGATED:
486 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
487 case KVM_SSBD_UNKNOWN:
488 default:
489 return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
490 }
491 }
492
493 return -EINVAL;
494}
495
496int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
497{
498 void __user *uaddr = (void __user *)(long)reg->addr;
499 u64 val;
500
501 switch (reg->id) {
502 case KVM_REG_ARM_PSCI_VERSION:
503 val = kvm_psci_version(vcpu, vcpu->kvm);
504 break;
505 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
506 val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
507 break;
508 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
509 val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
510
511 if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
512 kvm_arm_get_vcpu_workaround_2_flag(vcpu))
513 val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
514 break;
515 default:
516 return -ENOENT;
517 }
518
519 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
520 return -EFAULT;
521
522 return 0;
523}
524
525int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
526{
527 void __user *uaddr = (void __user *)(long)reg->addr;
528 u64 val;
529 int wa_level;
530
531 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
532 return -EFAULT;
533
534 switch (reg->id) {
535 case KVM_REG_ARM_PSCI_VERSION:
536 {
537 bool wants_02;
538
539 wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
540
541 switch (val) {
542 case KVM_ARM_PSCI_0_1:
543 if (wants_02)
544 return -EINVAL;
545 vcpu->kvm->arch.psci_version = val;
546 return 0;
547 case KVM_ARM_PSCI_0_2:
548 case KVM_ARM_PSCI_1_0:
549 if (!wants_02)
550 return -EINVAL;
551 vcpu->kvm->arch.psci_version = val;
552 return 0;
553 }
554 break;
555 }
556
557 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
558 if (val & ~KVM_REG_FEATURE_LEVEL_MASK)
559 return -EINVAL;
560
561 if (get_kernel_wa_level(reg->id) < val)
562 return -EINVAL;
563
564 return 0;
565
566 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
567 if (val & ~(KVM_REG_FEATURE_LEVEL_MASK |
568 KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
569 return -EINVAL;
570
571 wa_level = val & KVM_REG_FEATURE_LEVEL_MASK;
572
573 if (get_kernel_wa_level(reg->id) < wa_level)
574 return -EINVAL;
575
576 /* The enabled bit must not be set unless the level is AVAIL. */
577 if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
578 wa_level != val)
579 return -EINVAL;
580
581 /* Are we finished or do we need to check the enable bit ? */
582 if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL)
583 return 0;
584
585 /*
586 * If this kernel supports the workaround to be switched on
587 * or off, make sure it matches the requested setting.
588 */
589 switch (wa_level) {
590 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
591 kvm_arm_set_vcpu_workaround_2_flag(vcpu,
592 val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED);
593 break;
594 case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
595 kvm_arm_set_vcpu_workaround_2_flag(vcpu, true);
596 break;
597 }
598
599 return 0;
600 default:
601 return -ENOENT;
602 }
603
604 return -EINVAL;
605}
1/*
2 * Copyright (C) 2012 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/arm-smccc.h>
19#include <linux/preempt.h>
20#include <linux/kvm_host.h>
21#include <linux/uaccess.h>
22#include <linux/wait.h>
23
24#include <asm/cputype.h>
25#include <asm/kvm_emulate.h>
26#include <asm/kvm_host.h>
27
28#include <kvm/arm_psci.h>
29
30/*
31 * This is an implementation of the Power State Coordination Interface
32 * as described in ARM document number ARM DEN 0022A.
33 */
34
35#define AFFINITY_MASK(level) ~((0x1UL << ((level) * MPIDR_LEVEL_BITS)) - 1)
36
37static u32 smccc_get_function(struct kvm_vcpu *vcpu)
38{
39 return vcpu_get_reg(vcpu, 0);
40}
41
42static unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
43{
44 return vcpu_get_reg(vcpu, 1);
45}
46
47static unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
48{
49 return vcpu_get_reg(vcpu, 2);
50}
51
52static unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
53{
54 return vcpu_get_reg(vcpu, 3);
55}
56
57static void smccc_set_retval(struct kvm_vcpu *vcpu,
58 unsigned long a0,
59 unsigned long a1,
60 unsigned long a2,
61 unsigned long a3)
62{
63 vcpu_set_reg(vcpu, 0, a0);
64 vcpu_set_reg(vcpu, 1, a1);
65 vcpu_set_reg(vcpu, 2, a2);
66 vcpu_set_reg(vcpu, 3, a3);
67}
68
69static unsigned long psci_affinity_mask(unsigned long affinity_level)
70{
71 if (affinity_level <= 3)
72 return MPIDR_HWID_BITMASK & AFFINITY_MASK(affinity_level);
73
74 return 0;
75}
76
77static unsigned long kvm_psci_vcpu_suspend(struct kvm_vcpu *vcpu)
78{
79 /*
80 * NOTE: For simplicity, we make VCPU suspend emulation to be
81 * same-as WFI (Wait-for-interrupt) emulation.
82 *
83 * This means for KVM the wakeup events are interrupts and
84 * this is consistent with intended use of StateID as described
85 * in section 5.4.1 of PSCI v0.2 specification (ARM DEN 0022A).
86 *
87 * Further, we also treat power-down request to be same as
88 * stand-by request as-per section 5.4.2 clause 3 of PSCI v0.2
89 * specification (ARM DEN 0022A). This means all suspend states
90 * for KVM will preserve the register state.
91 */
92 kvm_vcpu_block(vcpu);
93 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
94
95 return PSCI_RET_SUCCESS;
96}
97
98static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu)
99{
100 vcpu->arch.power_off = true;
101 kvm_make_request(KVM_REQ_SLEEP, vcpu);
102 kvm_vcpu_kick(vcpu);
103}
104
105static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu)
106{
107 struct kvm *kvm = source_vcpu->kvm;
108 struct kvm_vcpu *vcpu = NULL;
109 struct swait_queue_head *wq;
110 unsigned long cpu_id;
111 unsigned long context_id;
112 phys_addr_t target_pc;
113
114 cpu_id = smccc_get_arg1(source_vcpu) & MPIDR_HWID_BITMASK;
115 if (vcpu_mode_is_32bit(source_vcpu))
116 cpu_id &= ~((u32) 0);
117
118 vcpu = kvm_mpidr_to_vcpu(kvm, cpu_id);
119
120 /*
121 * Make sure the caller requested a valid CPU and that the CPU is
122 * turned off.
123 */
124 if (!vcpu)
125 return PSCI_RET_INVALID_PARAMS;
126 if (!vcpu->arch.power_off) {
127 if (kvm_psci_version(source_vcpu, kvm) != KVM_ARM_PSCI_0_1)
128 return PSCI_RET_ALREADY_ON;
129 else
130 return PSCI_RET_INVALID_PARAMS;
131 }
132
133 target_pc = smccc_get_arg2(source_vcpu);
134 context_id = smccc_get_arg3(source_vcpu);
135
136 kvm_reset_vcpu(vcpu);
137
138 /* Gracefully handle Thumb2 entry point */
139 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
140 target_pc &= ~((phys_addr_t) 1);
141 vcpu_set_thumb(vcpu);
142 }
143
144 /* Propagate caller endianness */
145 if (kvm_vcpu_is_be(source_vcpu))
146 kvm_vcpu_set_be(vcpu);
147
148 *vcpu_pc(vcpu) = target_pc;
149 /*
150 * NOTE: We always update r0 (or x0) because for PSCI v0.1
151 * the general puspose registers are undefined upon CPU_ON.
152 */
153 smccc_set_retval(vcpu, context_id, 0, 0, 0);
154 vcpu->arch.power_off = false;
155 smp_mb(); /* Make sure the above is visible */
156
157 wq = kvm_arch_vcpu_wq(vcpu);
158 swake_up(wq);
159
160 return PSCI_RET_SUCCESS;
161}
162
163static unsigned long kvm_psci_vcpu_affinity_info(struct kvm_vcpu *vcpu)
164{
165 int i, matching_cpus = 0;
166 unsigned long mpidr;
167 unsigned long target_affinity;
168 unsigned long target_affinity_mask;
169 unsigned long lowest_affinity_level;
170 struct kvm *kvm = vcpu->kvm;
171 struct kvm_vcpu *tmp;
172
173 target_affinity = smccc_get_arg1(vcpu);
174 lowest_affinity_level = smccc_get_arg2(vcpu);
175
176 /* Determine target affinity mask */
177 target_affinity_mask = psci_affinity_mask(lowest_affinity_level);
178 if (!target_affinity_mask)
179 return PSCI_RET_INVALID_PARAMS;
180
181 /* Ignore other bits of target affinity */
182 target_affinity &= target_affinity_mask;
183
184 /*
185 * If one or more VCPU matching target affinity are running
186 * then ON else OFF
187 */
188 kvm_for_each_vcpu(i, tmp, kvm) {
189 mpidr = kvm_vcpu_get_mpidr_aff(tmp);
190 if ((mpidr & target_affinity_mask) == target_affinity) {
191 matching_cpus++;
192 if (!tmp->arch.power_off)
193 return PSCI_0_2_AFFINITY_LEVEL_ON;
194 }
195 }
196
197 if (!matching_cpus)
198 return PSCI_RET_INVALID_PARAMS;
199
200 return PSCI_0_2_AFFINITY_LEVEL_OFF;
201}
202
203static void kvm_prepare_system_event(struct kvm_vcpu *vcpu, u32 type)
204{
205 int i;
206 struct kvm_vcpu *tmp;
207
208 /*
209 * The KVM ABI specifies that a system event exit may call KVM_RUN
210 * again and may perform shutdown/reboot at a later time that when the
211 * actual request is made. Since we are implementing PSCI and a
212 * caller of PSCI reboot and shutdown expects that the system shuts
213 * down or reboots immediately, let's make sure that VCPUs are not run
214 * after this call is handled and before the VCPUs have been
215 * re-initialized.
216 */
217 kvm_for_each_vcpu(i, tmp, vcpu->kvm)
218 tmp->arch.power_off = true;
219 kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
220
221 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event));
222 vcpu->run->system_event.type = type;
223 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
224}
225
226static void kvm_psci_system_off(struct kvm_vcpu *vcpu)
227{
228 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_SHUTDOWN);
229}
230
231static void kvm_psci_system_reset(struct kvm_vcpu *vcpu)
232{
233 kvm_prepare_system_event(vcpu, KVM_SYSTEM_EVENT_RESET);
234}
235
236static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
237{
238 struct kvm *kvm = vcpu->kvm;
239 u32 psci_fn = smccc_get_function(vcpu);
240 unsigned long val;
241 int ret = 1;
242
243 switch (psci_fn) {
244 case PSCI_0_2_FN_PSCI_VERSION:
245 /*
246 * Bits[31:16] = Major Version = 0
247 * Bits[15:0] = Minor Version = 2
248 */
249 val = KVM_ARM_PSCI_0_2;
250 break;
251 case PSCI_0_2_FN_CPU_SUSPEND:
252 case PSCI_0_2_FN64_CPU_SUSPEND:
253 val = kvm_psci_vcpu_suspend(vcpu);
254 break;
255 case PSCI_0_2_FN_CPU_OFF:
256 kvm_psci_vcpu_off(vcpu);
257 val = PSCI_RET_SUCCESS;
258 break;
259 case PSCI_0_2_FN_CPU_ON:
260 case PSCI_0_2_FN64_CPU_ON:
261 mutex_lock(&kvm->lock);
262 val = kvm_psci_vcpu_on(vcpu);
263 mutex_unlock(&kvm->lock);
264 break;
265 case PSCI_0_2_FN_AFFINITY_INFO:
266 case PSCI_0_2_FN64_AFFINITY_INFO:
267 val = kvm_psci_vcpu_affinity_info(vcpu);
268 break;
269 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
270 /*
271 * Trusted OS is MP hence does not require migration
272 * or
273 * Trusted OS is not present
274 */
275 val = PSCI_0_2_TOS_MP;
276 break;
277 case PSCI_0_2_FN_SYSTEM_OFF:
278 kvm_psci_system_off(vcpu);
279 /*
280 * We should'nt be going back to guest VCPU after
281 * receiving SYSTEM_OFF request.
282 *
283 * If user space accidently/deliberately resumes
284 * guest VCPU after SYSTEM_OFF request then guest
285 * VCPU should see internal failure from PSCI return
286 * value. To achieve this, we preload r0 (or x0) with
287 * PSCI return value INTERNAL_FAILURE.
288 */
289 val = PSCI_RET_INTERNAL_FAILURE;
290 ret = 0;
291 break;
292 case PSCI_0_2_FN_SYSTEM_RESET:
293 kvm_psci_system_reset(vcpu);
294 /*
295 * Same reason as SYSTEM_OFF for preloading r0 (or x0)
296 * with PSCI return value INTERNAL_FAILURE.
297 */
298 val = PSCI_RET_INTERNAL_FAILURE;
299 ret = 0;
300 break;
301 default:
302 val = PSCI_RET_NOT_SUPPORTED;
303 break;
304 }
305
306 smccc_set_retval(vcpu, val, 0, 0, 0);
307 return ret;
308}
309
310static int kvm_psci_1_0_call(struct kvm_vcpu *vcpu)
311{
312 u32 psci_fn = smccc_get_function(vcpu);
313 u32 feature;
314 unsigned long val;
315 int ret = 1;
316
317 switch(psci_fn) {
318 case PSCI_0_2_FN_PSCI_VERSION:
319 val = KVM_ARM_PSCI_1_0;
320 break;
321 case PSCI_1_0_FN_PSCI_FEATURES:
322 feature = smccc_get_arg1(vcpu);
323 switch(feature) {
324 case PSCI_0_2_FN_PSCI_VERSION:
325 case PSCI_0_2_FN_CPU_SUSPEND:
326 case PSCI_0_2_FN64_CPU_SUSPEND:
327 case PSCI_0_2_FN_CPU_OFF:
328 case PSCI_0_2_FN_CPU_ON:
329 case PSCI_0_2_FN64_CPU_ON:
330 case PSCI_0_2_FN_AFFINITY_INFO:
331 case PSCI_0_2_FN64_AFFINITY_INFO:
332 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
333 case PSCI_0_2_FN_SYSTEM_OFF:
334 case PSCI_0_2_FN_SYSTEM_RESET:
335 case PSCI_1_0_FN_PSCI_FEATURES:
336 case ARM_SMCCC_VERSION_FUNC_ID:
337 val = 0;
338 break;
339 default:
340 val = PSCI_RET_NOT_SUPPORTED;
341 break;
342 }
343 break;
344 default:
345 return kvm_psci_0_2_call(vcpu);
346 }
347
348 smccc_set_retval(vcpu, val, 0, 0, 0);
349 return ret;
350}
351
352static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
353{
354 struct kvm *kvm = vcpu->kvm;
355 u32 psci_fn = smccc_get_function(vcpu);
356 unsigned long val;
357
358 switch (psci_fn) {
359 case KVM_PSCI_FN_CPU_OFF:
360 kvm_psci_vcpu_off(vcpu);
361 val = PSCI_RET_SUCCESS;
362 break;
363 case KVM_PSCI_FN_CPU_ON:
364 mutex_lock(&kvm->lock);
365 val = kvm_psci_vcpu_on(vcpu);
366 mutex_unlock(&kvm->lock);
367 break;
368 default:
369 val = PSCI_RET_NOT_SUPPORTED;
370 break;
371 }
372
373 smccc_set_retval(vcpu, val, 0, 0, 0);
374 return 1;
375}
376
377/**
378 * kvm_psci_call - handle PSCI call if r0 value is in range
379 * @vcpu: Pointer to the VCPU struct
380 *
381 * Handle PSCI calls from guests through traps from HVC instructions.
382 * The calling convention is similar to SMC calls to the secure world
383 * where the function number is placed in r0.
384 *
385 * This function returns: > 0 (success), 0 (success but exit to user
386 * space), and < 0 (errors)
387 *
388 * Errors:
389 * -EINVAL: Unrecognized PSCI function
390 */
391static int kvm_psci_call(struct kvm_vcpu *vcpu)
392{
393 switch (kvm_psci_version(vcpu, vcpu->kvm)) {
394 case KVM_ARM_PSCI_1_0:
395 return kvm_psci_1_0_call(vcpu);
396 case KVM_ARM_PSCI_0_2:
397 return kvm_psci_0_2_call(vcpu);
398 case KVM_ARM_PSCI_0_1:
399 return kvm_psci_0_1_call(vcpu);
400 default:
401 return -EINVAL;
402 };
403}
404
405int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
406{
407 u32 func_id = smccc_get_function(vcpu);
408 u32 val = PSCI_RET_NOT_SUPPORTED;
409 u32 feature;
410
411 switch (func_id) {
412 case ARM_SMCCC_VERSION_FUNC_ID:
413 val = ARM_SMCCC_VERSION_1_1;
414 break;
415 case ARM_SMCCC_ARCH_FEATURES_FUNC_ID:
416 feature = smccc_get_arg1(vcpu);
417 switch(feature) {
418 case ARM_SMCCC_ARCH_WORKAROUND_1:
419 if (kvm_arm_harden_branch_predictor())
420 val = 0;
421 break;
422 }
423 break;
424 default:
425 return kvm_psci_call(vcpu);
426 }
427
428 smccc_set_retval(vcpu, val, 0, 0, 0);
429 return 1;
430}
431
432int kvm_arm_get_fw_num_regs(struct kvm_vcpu *vcpu)
433{
434 return 1; /* PSCI version */
435}
436
437int kvm_arm_copy_fw_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
438{
439 if (put_user(KVM_REG_ARM_PSCI_VERSION, uindices))
440 return -EFAULT;
441
442 return 0;
443}
444
445int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
446{
447 if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
448 void __user *uaddr = (void __user *)(long)reg->addr;
449 u64 val;
450
451 val = kvm_psci_version(vcpu, vcpu->kvm);
452 if (copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)))
453 return -EFAULT;
454
455 return 0;
456 }
457
458 return -EINVAL;
459}
460
461int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
462{
463 if (reg->id == KVM_REG_ARM_PSCI_VERSION) {
464 void __user *uaddr = (void __user *)(long)reg->addr;
465 bool wants_02;
466 u64 val;
467
468 if (copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id)))
469 return -EFAULT;
470
471 wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features);
472
473 switch (val) {
474 case KVM_ARM_PSCI_0_1:
475 if (wants_02)
476 return -EINVAL;
477 vcpu->kvm->arch.psci_version = val;
478 return 0;
479 case KVM_ARM_PSCI_0_2:
480 case KVM_ARM_PSCI_1_0:
481 if (!wants_02)
482 return -EINVAL;
483 vcpu->kvm->arch.psci_version = val;
484 return 0;
485 }
486 }
487
488 return -EINVAL;
489}