Loading...
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2017 Arm Ltd.
3#define pr_fmt(fmt) "sdei: " fmt
4
5#include <acpi/ghes.h>
6#include <linux/acpi.h>
7#include <linux/arm_sdei.h>
8#include <linux/arm-smccc.h>
9#include <linux/atomic.h>
10#include <linux/bitops.h>
11#include <linux/compiler.h>
12#include <linux/cpuhotplug.h>
13#include <linux/cpu.h>
14#include <linux/cpu_pm.h>
15#include <linux/errno.h>
16#include <linux/hardirq.h>
17#include <linux/kernel.h>
18#include <linux/kprobes.h>
19#include <linux/kvm_host.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/notifier.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/percpu.h>
26#include <linux/platform_device.h>
27#include <linux/pm.h>
28#include <linux/ptrace.h>
29#include <linux/preempt.h>
30#include <linux/reboot.h>
31#include <linux/slab.h>
32#include <linux/smp.h>
33#include <linux/spinlock.h>
34#include <linux/uaccess.h>
35
36/*
37 * The call to use to reach the firmware.
38 */
39static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
40 unsigned long arg0, unsigned long arg1,
41 unsigned long arg2, unsigned long arg3,
42 unsigned long arg4, struct arm_smccc_res *res);
43
44/* entry point from firmware to arch asm code */
45static unsigned long sdei_entry_point;
46
47struct sdei_event {
48 /* These three are protected by the sdei_list_lock */
49 struct list_head list;
50 bool reregister;
51 bool reenable;
52
53 u32 event_num;
54 u8 type;
55 u8 priority;
56
57 /* This pointer is handed to firmware as the event argument. */
58 union {
59 /* Shared events */
60 struct sdei_registered_event *registered;
61
62 /* CPU private events */
63 struct sdei_registered_event __percpu *private_registered;
64 };
65};
66
67/* Take the mutex for any API call or modification. Take the mutex first. */
68static DEFINE_MUTEX(sdei_events_lock);
69
70/* and then hold this when modifying the list */
71static DEFINE_SPINLOCK(sdei_list_lock);
72static LIST_HEAD(sdei_list);
73
74/* Private events are registered/enabled via IPI passing one of these */
75struct sdei_crosscall_args {
76 struct sdei_event *event;
77 atomic_t errors;
78 int first_error;
79};
80
81#define CROSSCALL_INIT(arg, event) (arg.event = event, \
82 arg.first_error = 0, \
83 atomic_set(&arg.errors, 0))
84
85static inline int sdei_do_cross_call(void *fn, struct sdei_event * event)
86{
87 struct sdei_crosscall_args arg;
88
89 CROSSCALL_INIT(arg, event);
90 on_each_cpu(fn, &arg, true);
91
92 return arg.first_error;
93}
94
95static inline void
96sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
97{
98 if (err && (atomic_inc_return(&arg->errors) == 1))
99 arg->first_error = err;
100}
101
102static int sdei_to_linux_errno(unsigned long sdei_err)
103{
104 switch (sdei_err) {
105 case SDEI_NOT_SUPPORTED:
106 return -EOPNOTSUPP;
107 case SDEI_INVALID_PARAMETERS:
108 return -EINVAL;
109 case SDEI_DENIED:
110 return -EPERM;
111 case SDEI_PENDING:
112 return -EINPROGRESS;
113 case SDEI_OUT_OF_RESOURCE:
114 return -ENOMEM;
115 }
116
117 /* Not an error value ... */
118 return sdei_err;
119}
120
121/*
122 * If x0 is any of these values, then the call failed, use sdei_to_linux_errno()
123 * to translate.
124 */
125static int sdei_is_err(struct arm_smccc_res *res)
126{
127 switch (res->a0) {
128 case SDEI_NOT_SUPPORTED:
129 case SDEI_INVALID_PARAMETERS:
130 case SDEI_DENIED:
131 case SDEI_PENDING:
132 case SDEI_OUT_OF_RESOURCE:
133 return true;
134 }
135
136 return false;
137}
138
139static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
140 unsigned long arg1, unsigned long arg2,
141 unsigned long arg3, unsigned long arg4,
142 u64 *result)
143{
144 int err = 0;
145 struct arm_smccc_res res;
146
147 if (sdei_firmware_call) {
148 sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
149 &res);
150 if (sdei_is_err(&res))
151 err = sdei_to_linux_errno(res.a0);
152 } else {
153 /*
154 * !sdei_firmware_call means we failed to probe or called
155 * sdei_mark_interface_broken(). -EIO is not an error returned
156 * by sdei_to_linux_errno() and is used to suppress messages
157 * from this driver.
158 */
159 err = -EIO;
160 res.a0 = SDEI_NOT_SUPPORTED;
161 }
162
163 if (result)
164 *result = res.a0;
165
166 return err;
167}
168NOKPROBE_SYMBOL(invoke_sdei_fn);
169
170static struct sdei_event *sdei_event_find(u32 event_num)
171{
172 struct sdei_event *e, *found = NULL;
173
174 lockdep_assert_held(&sdei_events_lock);
175
176 spin_lock(&sdei_list_lock);
177 list_for_each_entry(e, &sdei_list, list) {
178 if (e->event_num == event_num) {
179 found = e;
180 break;
181 }
182 }
183 spin_unlock(&sdei_list_lock);
184
185 return found;
186}
187
188int sdei_api_event_context(u32 query, u64 *result)
189{
190 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
191 result);
192}
193NOKPROBE_SYMBOL(sdei_api_event_context);
194
195static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
196{
197 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
198 0, 0, result);
199}
200
201static struct sdei_event *sdei_event_create(u32 event_num,
202 sdei_event_callback *cb,
203 void *cb_arg)
204{
205 int err;
206 u64 result;
207 struct sdei_event *event;
208 struct sdei_registered_event *reg;
209
210 lockdep_assert_held(&sdei_events_lock);
211
212 event = kzalloc(sizeof(*event), GFP_KERNEL);
213 if (!event)
214 return ERR_PTR(-ENOMEM);
215
216 INIT_LIST_HEAD(&event->list);
217 event->event_num = event_num;
218
219 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
220 &result);
221 if (err) {
222 kfree(event);
223 return ERR_PTR(err);
224 }
225 event->priority = result;
226
227 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
228 &result);
229 if (err) {
230 kfree(event);
231 return ERR_PTR(err);
232 }
233 event->type = result;
234
235 if (event->type == SDEI_EVENT_TYPE_SHARED) {
236 reg = kzalloc(sizeof(*reg), GFP_KERNEL);
237 if (!reg) {
238 kfree(event);
239 return ERR_PTR(-ENOMEM);
240 }
241
242 reg->event_num = event_num;
243 reg->priority = event->priority;
244
245 reg->callback = cb;
246 reg->callback_arg = cb_arg;
247 event->registered = reg;
248 } else {
249 int cpu;
250 struct sdei_registered_event __percpu *regs;
251
252 regs = alloc_percpu(struct sdei_registered_event);
253 if (!regs) {
254 kfree(event);
255 return ERR_PTR(-ENOMEM);
256 }
257
258 for_each_possible_cpu(cpu) {
259 reg = per_cpu_ptr(regs, cpu);
260
261 reg->event_num = event->event_num;
262 reg->priority = event->priority;
263 reg->callback = cb;
264 reg->callback_arg = cb_arg;
265 }
266
267 event->private_registered = regs;
268 }
269
270 spin_lock(&sdei_list_lock);
271 list_add(&event->list, &sdei_list);
272 spin_unlock(&sdei_list_lock);
273
274 return event;
275}
276
277static void sdei_event_destroy_llocked(struct sdei_event *event)
278{
279 lockdep_assert_held(&sdei_events_lock);
280 lockdep_assert_held(&sdei_list_lock);
281
282 list_del(&event->list);
283
284 if (event->type == SDEI_EVENT_TYPE_SHARED)
285 kfree(event->registered);
286 else
287 free_percpu(event->private_registered);
288
289 kfree(event);
290}
291
292static void sdei_event_destroy(struct sdei_event *event)
293{
294 spin_lock(&sdei_list_lock);
295 sdei_event_destroy_llocked(event);
296 spin_unlock(&sdei_list_lock);
297}
298
299static int sdei_api_get_version(u64 *version)
300{
301 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
302}
303
304int sdei_mask_local_cpu(void)
305{
306 int err;
307
308 WARN_ON_ONCE(preemptible());
309
310 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
311 if (err && err != -EIO) {
312 pr_warn_once("failed to mask CPU[%u]: %d\n",
313 smp_processor_id(), err);
314 return err;
315 }
316
317 return 0;
318}
319
320static void _ipi_mask_cpu(void *ignored)
321{
322 sdei_mask_local_cpu();
323}
324
325int sdei_unmask_local_cpu(void)
326{
327 int err;
328
329 WARN_ON_ONCE(preemptible());
330
331 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
332 if (err && err != -EIO) {
333 pr_warn_once("failed to unmask CPU[%u]: %d\n",
334 smp_processor_id(), err);
335 return err;
336 }
337
338 return 0;
339}
340
341static void _ipi_unmask_cpu(void *ignored)
342{
343 sdei_unmask_local_cpu();
344}
345
346static void _ipi_private_reset(void *ignored)
347{
348 int err;
349
350 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
351 NULL);
352 if (err && err != -EIO)
353 pr_warn_once("failed to reset CPU[%u]: %d\n",
354 smp_processor_id(), err);
355}
356
357static int sdei_api_shared_reset(void)
358{
359 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
360 NULL);
361}
362
363static void sdei_mark_interface_broken(void)
364{
365 pr_err("disabling SDEI firmware interface\n");
366 on_each_cpu(&_ipi_mask_cpu, NULL, true);
367 sdei_firmware_call = NULL;
368}
369
370static int sdei_platform_reset(void)
371{
372 int err;
373
374 on_each_cpu(&_ipi_private_reset, NULL, true);
375 err = sdei_api_shared_reset();
376 if (err) {
377 pr_err("Failed to reset platform: %d\n", err);
378 sdei_mark_interface_broken();
379 }
380
381 return err;
382}
383
384static int sdei_api_event_enable(u32 event_num)
385{
386 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
387 0, NULL);
388}
389
390/* Called directly by the hotplug callbacks */
391static void _local_event_enable(void *data)
392{
393 int err;
394 struct sdei_crosscall_args *arg = data;
395
396 WARN_ON_ONCE(preemptible());
397
398 err = sdei_api_event_enable(arg->event->event_num);
399
400 sdei_cross_call_return(arg, err);
401}
402
403int sdei_event_enable(u32 event_num)
404{
405 int err = -EINVAL;
406 struct sdei_event *event;
407
408 mutex_lock(&sdei_events_lock);
409 event = sdei_event_find(event_num);
410 if (!event) {
411 mutex_unlock(&sdei_events_lock);
412 return -ENOENT;
413 }
414
415
416 cpus_read_lock();
417 if (event->type == SDEI_EVENT_TYPE_SHARED)
418 err = sdei_api_event_enable(event->event_num);
419 else
420 err = sdei_do_cross_call(_local_event_enable, event);
421
422 if (!err) {
423 spin_lock(&sdei_list_lock);
424 event->reenable = true;
425 spin_unlock(&sdei_list_lock);
426 }
427 cpus_read_unlock();
428 mutex_unlock(&sdei_events_lock);
429
430 return err;
431}
432
433static int sdei_api_event_disable(u32 event_num)
434{
435 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
436 0, 0, NULL);
437}
438
439static void _ipi_event_disable(void *data)
440{
441 int err;
442 struct sdei_crosscall_args *arg = data;
443
444 err = sdei_api_event_disable(arg->event->event_num);
445
446 sdei_cross_call_return(arg, err);
447}
448
449int sdei_event_disable(u32 event_num)
450{
451 int err = -EINVAL;
452 struct sdei_event *event;
453
454 mutex_lock(&sdei_events_lock);
455 event = sdei_event_find(event_num);
456 if (!event) {
457 mutex_unlock(&sdei_events_lock);
458 return -ENOENT;
459 }
460
461 spin_lock(&sdei_list_lock);
462 event->reenable = false;
463 spin_unlock(&sdei_list_lock);
464
465 if (event->type == SDEI_EVENT_TYPE_SHARED)
466 err = sdei_api_event_disable(event->event_num);
467 else
468 err = sdei_do_cross_call(_ipi_event_disable, event);
469 mutex_unlock(&sdei_events_lock);
470
471 return err;
472}
473
474static int sdei_api_event_unregister(u32 event_num)
475{
476 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
477 0, 0, 0, NULL);
478}
479
480/* Called directly by the hotplug callbacks */
481static void _local_event_unregister(void *data)
482{
483 int err;
484 struct sdei_crosscall_args *arg = data;
485
486 WARN_ON_ONCE(preemptible());
487
488 err = sdei_api_event_unregister(arg->event->event_num);
489
490 sdei_cross_call_return(arg, err);
491}
492
493static int _sdei_event_unregister(struct sdei_event *event)
494{
495 lockdep_assert_held(&sdei_events_lock);
496
497 if (event->type == SDEI_EVENT_TYPE_SHARED)
498 return sdei_api_event_unregister(event->event_num);
499
500 return sdei_do_cross_call(_local_event_unregister, event);
501}
502
503int sdei_event_unregister(u32 event_num)
504{
505 int err;
506 struct sdei_event *event;
507
508 WARN_ON(in_nmi());
509
510 mutex_lock(&sdei_events_lock);
511 event = sdei_event_find(event_num);
512 do {
513 if (!event) {
514 pr_warn("Event %u not registered\n", event_num);
515 err = -ENOENT;
516 break;
517 }
518
519 spin_lock(&sdei_list_lock);
520 event->reregister = false;
521 event->reenable = false;
522 spin_unlock(&sdei_list_lock);
523
524 err = _sdei_event_unregister(event);
525 if (err)
526 break;
527
528 sdei_event_destroy(event);
529 } while (0);
530 mutex_unlock(&sdei_events_lock);
531
532 return err;
533}
534
535/*
536 * unregister events, but don't destroy them as they are re-registered by
537 * sdei_reregister_shared().
538 */
539static int sdei_unregister_shared(void)
540{
541 int err = 0;
542 struct sdei_event *event;
543
544 mutex_lock(&sdei_events_lock);
545 spin_lock(&sdei_list_lock);
546 list_for_each_entry(event, &sdei_list, list) {
547 if (event->type != SDEI_EVENT_TYPE_SHARED)
548 continue;
549
550 err = _sdei_event_unregister(event);
551 if (err)
552 break;
553 }
554 spin_unlock(&sdei_list_lock);
555 mutex_unlock(&sdei_events_lock);
556
557 return err;
558}
559
560static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
561 void *arg, u64 flags, u64 affinity)
562{
563 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
564 (unsigned long)entry_point, (unsigned long)arg,
565 flags, affinity, NULL);
566}
567
568/* Called directly by the hotplug callbacks */
569static void _local_event_register(void *data)
570{
571 int err;
572 struct sdei_registered_event *reg;
573 struct sdei_crosscall_args *arg = data;
574
575 WARN_ON(preemptible());
576
577 reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
578 err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
579 reg, 0, 0);
580
581 sdei_cross_call_return(arg, err);
582}
583
584static int _sdei_event_register(struct sdei_event *event)
585{
586 int err;
587
588 lockdep_assert_held(&sdei_events_lock);
589
590 if (event->type == SDEI_EVENT_TYPE_SHARED)
591 return sdei_api_event_register(event->event_num,
592 sdei_entry_point,
593 event->registered,
594 SDEI_EVENT_REGISTER_RM_ANY, 0);
595
596 err = sdei_do_cross_call(_local_event_register, event);
597 if (err)
598 sdei_do_cross_call(_local_event_unregister, event);
599
600 return err;
601}
602
603int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
604{
605 int err;
606 struct sdei_event *event;
607
608 WARN_ON(in_nmi());
609
610 mutex_lock(&sdei_events_lock);
611 do {
612 if (sdei_event_find(event_num)) {
613 pr_warn("Event %u already registered\n", event_num);
614 err = -EBUSY;
615 break;
616 }
617
618 event = sdei_event_create(event_num, cb, arg);
619 if (IS_ERR(event)) {
620 err = PTR_ERR(event);
621 pr_warn("Failed to create event %u: %d\n", event_num,
622 err);
623 break;
624 }
625
626 cpus_read_lock();
627 err = _sdei_event_register(event);
628 if (err) {
629 sdei_event_destroy(event);
630 pr_warn("Failed to register event %u: %d\n", event_num,
631 err);
632 } else {
633 spin_lock(&sdei_list_lock);
634 event->reregister = true;
635 spin_unlock(&sdei_list_lock);
636 }
637 cpus_read_unlock();
638 } while (0);
639 mutex_unlock(&sdei_events_lock);
640
641 return err;
642}
643
644static int sdei_reregister_event_llocked(struct sdei_event *event)
645{
646 int err;
647
648 lockdep_assert_held(&sdei_events_lock);
649 lockdep_assert_held(&sdei_list_lock);
650
651 err = _sdei_event_register(event);
652 if (err) {
653 pr_err("Failed to re-register event %u\n", event->event_num);
654 sdei_event_destroy_llocked(event);
655 return err;
656 }
657
658 if (event->reenable) {
659 if (event->type == SDEI_EVENT_TYPE_SHARED)
660 err = sdei_api_event_enable(event->event_num);
661 else
662 err = sdei_do_cross_call(_local_event_enable, event);
663 }
664
665 if (err)
666 pr_err("Failed to re-enable event %u\n", event->event_num);
667
668 return err;
669}
670
671static int sdei_reregister_shared(void)
672{
673 int err = 0;
674 struct sdei_event *event;
675
676 mutex_lock(&sdei_events_lock);
677 spin_lock(&sdei_list_lock);
678 list_for_each_entry(event, &sdei_list, list) {
679 if (event->type != SDEI_EVENT_TYPE_SHARED)
680 continue;
681
682 if (event->reregister) {
683 err = sdei_reregister_event_llocked(event);
684 if (err)
685 break;
686 }
687 }
688 spin_unlock(&sdei_list_lock);
689 mutex_unlock(&sdei_events_lock);
690
691 return err;
692}
693
694static int sdei_cpuhp_down(unsigned int cpu)
695{
696 struct sdei_event *event;
697 struct sdei_crosscall_args arg;
698
699 /* un-register private events */
700 spin_lock(&sdei_list_lock);
701 list_for_each_entry(event, &sdei_list, list) {
702 if (event->type == SDEI_EVENT_TYPE_SHARED)
703 continue;
704
705 CROSSCALL_INIT(arg, event);
706 /* call the cross-call function locally... */
707 _local_event_unregister(&arg);
708 if (arg.first_error)
709 pr_err("Failed to unregister event %u: %d\n",
710 event->event_num, arg.first_error);
711 }
712 spin_unlock(&sdei_list_lock);
713
714 return sdei_mask_local_cpu();
715}
716
717static int sdei_cpuhp_up(unsigned int cpu)
718{
719 struct sdei_event *event;
720 struct sdei_crosscall_args arg;
721
722 /* re-register/enable private events */
723 spin_lock(&sdei_list_lock);
724 list_for_each_entry(event, &sdei_list, list) {
725 if (event->type == SDEI_EVENT_TYPE_SHARED)
726 continue;
727
728 if (event->reregister) {
729 CROSSCALL_INIT(arg, event);
730 /* call the cross-call function locally... */
731 _local_event_register(&arg);
732 if (arg.first_error)
733 pr_err("Failed to re-register event %u: %d\n",
734 event->event_num, arg.first_error);
735 }
736
737 if (event->reenable) {
738 CROSSCALL_INIT(arg, event);
739 _local_event_enable(&arg);
740 if (arg.first_error)
741 pr_err("Failed to re-enable event %u: %d\n",
742 event->event_num, arg.first_error);
743 }
744 }
745 spin_unlock(&sdei_list_lock);
746
747 return sdei_unmask_local_cpu();
748}
749
750/* When entering idle, mask/unmask events for this cpu */
751static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
752 void *data)
753{
754 int rv;
755
756 switch (action) {
757 case CPU_PM_ENTER:
758 rv = sdei_mask_local_cpu();
759 break;
760 case CPU_PM_EXIT:
761 case CPU_PM_ENTER_FAILED:
762 rv = sdei_unmask_local_cpu();
763 break;
764 default:
765 return NOTIFY_DONE;
766 }
767
768 if (rv)
769 return notifier_from_errno(rv);
770
771 return NOTIFY_OK;
772}
773
774static struct notifier_block sdei_pm_nb = {
775 .notifier_call = sdei_pm_notifier,
776};
777
778static int sdei_device_suspend(struct device *dev)
779{
780 on_each_cpu(_ipi_mask_cpu, NULL, true);
781
782 return 0;
783}
784
785static int sdei_device_resume(struct device *dev)
786{
787 on_each_cpu(_ipi_unmask_cpu, NULL, true);
788
789 return 0;
790}
791
792/*
793 * We need all events to be reregistered when we resume from hibernate.
794 *
795 * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
796 * events during freeze, then re-register and re-enable them during thaw
797 * and restore.
798 */
799static int sdei_device_freeze(struct device *dev)
800{
801 int err;
802
803 /* unregister private events */
804 cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
805
806 err = sdei_unregister_shared();
807 if (err)
808 return err;
809
810 return 0;
811}
812
813static int sdei_device_thaw(struct device *dev)
814{
815 int err;
816
817 /* re-register shared events */
818 err = sdei_reregister_shared();
819 if (err) {
820 pr_warn("Failed to re-register shared events...\n");
821 sdei_mark_interface_broken();
822 return err;
823 }
824
825 err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
826 &sdei_cpuhp_up, &sdei_cpuhp_down);
827 if (err)
828 pr_warn("Failed to re-register CPU hotplug notifier...\n");
829
830 return err;
831}
832
833static int sdei_device_restore(struct device *dev)
834{
835 int err;
836
837 err = sdei_platform_reset();
838 if (err)
839 return err;
840
841 return sdei_device_thaw(dev);
842}
843
844static const struct dev_pm_ops sdei_pm_ops = {
845 .suspend = sdei_device_suspend,
846 .resume = sdei_device_resume,
847 .freeze = sdei_device_freeze,
848 .thaw = sdei_device_thaw,
849 .restore = sdei_device_restore,
850};
851
852/*
853 * Mask all CPUs and unregister all events on panic, reboot or kexec.
854 */
855static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
856 void *data)
857{
858 /*
859 * We are going to reset the interface, after this there is no point
860 * doing work when we take CPUs offline.
861 */
862 cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
863
864 sdei_platform_reset();
865
866 return NOTIFY_OK;
867}
868
869static struct notifier_block sdei_reboot_nb = {
870 .notifier_call = sdei_reboot_notifier,
871};
872
873static void sdei_smccc_smc(unsigned long function_id,
874 unsigned long arg0, unsigned long arg1,
875 unsigned long arg2, unsigned long arg3,
876 unsigned long arg4, struct arm_smccc_res *res)
877{
878 arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
879}
880NOKPROBE_SYMBOL(sdei_smccc_smc);
881
882static void sdei_smccc_hvc(unsigned long function_id,
883 unsigned long arg0, unsigned long arg1,
884 unsigned long arg2, unsigned long arg3,
885 unsigned long arg4, struct arm_smccc_res *res)
886{
887 arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
888}
889NOKPROBE_SYMBOL(sdei_smccc_hvc);
890
891int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
892 sdei_event_callback *critical_cb)
893{
894 int err;
895 u64 result;
896 u32 event_num;
897 sdei_event_callback *cb;
898
899 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
900 return -EOPNOTSUPP;
901
902 event_num = ghes->generic->notify.vector;
903 if (event_num == 0) {
904 /*
905 * Event 0 is reserved by the specification for
906 * SDEI_EVENT_SIGNAL.
907 */
908 return -EINVAL;
909 }
910
911 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
912 &result);
913 if (err)
914 return err;
915
916 if (result == SDEI_EVENT_PRIORITY_CRITICAL)
917 cb = critical_cb;
918 else
919 cb = normal_cb;
920
921 err = sdei_event_register(event_num, cb, ghes);
922 if (!err)
923 err = sdei_event_enable(event_num);
924
925 return err;
926}
927
928int sdei_unregister_ghes(struct ghes *ghes)
929{
930 int i;
931 int err;
932 u32 event_num = ghes->generic->notify.vector;
933
934 might_sleep();
935
936 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
937 return -EOPNOTSUPP;
938
939 /*
940 * The event may be running on another CPU. Disable it
941 * to stop new events, then try to unregister a few times.
942 */
943 err = sdei_event_disable(event_num);
944 if (err)
945 return err;
946
947 for (i = 0; i < 3; i++) {
948 err = sdei_event_unregister(event_num);
949 if (err != -EINPROGRESS)
950 break;
951
952 schedule();
953 }
954
955 return err;
956}
957
958static int sdei_get_conduit(struct platform_device *pdev)
959{
960 const char *method;
961 struct device_node *np = pdev->dev.of_node;
962
963 sdei_firmware_call = NULL;
964 if (np) {
965 if (of_property_read_string(np, "method", &method)) {
966 pr_warn("missing \"method\" property\n");
967 return SMCCC_CONDUIT_NONE;
968 }
969
970 if (!strcmp("hvc", method)) {
971 sdei_firmware_call = &sdei_smccc_hvc;
972 return SMCCC_CONDUIT_HVC;
973 } else if (!strcmp("smc", method)) {
974 sdei_firmware_call = &sdei_smccc_smc;
975 return SMCCC_CONDUIT_SMC;
976 }
977
978 pr_warn("invalid \"method\" property: %s\n", method);
979 } else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) {
980 if (acpi_psci_use_hvc()) {
981 sdei_firmware_call = &sdei_smccc_hvc;
982 return SMCCC_CONDUIT_HVC;
983 } else {
984 sdei_firmware_call = &sdei_smccc_smc;
985 return SMCCC_CONDUIT_SMC;
986 }
987 }
988
989 return SMCCC_CONDUIT_NONE;
990}
991
992static int sdei_probe(struct platform_device *pdev)
993{
994 int err;
995 u64 ver = 0;
996 int conduit;
997
998 conduit = sdei_get_conduit(pdev);
999 if (!sdei_firmware_call)
1000 return 0;
1001
1002 err = sdei_api_get_version(&ver);
1003 if (err == -EOPNOTSUPP)
1004 pr_err("advertised but not implemented in platform firmware\n");
1005 if (err) {
1006 pr_err("Failed to get SDEI version: %d\n", err);
1007 sdei_mark_interface_broken();
1008 return err;
1009 }
1010
1011 pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
1012 (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
1013 (int)SDEI_VERSION_VENDOR(ver));
1014
1015 if (SDEI_VERSION_MAJOR(ver) != 1) {
1016 pr_warn("Conflicting SDEI version detected.\n");
1017 sdei_mark_interface_broken();
1018 return -EINVAL;
1019 }
1020
1021 err = sdei_platform_reset();
1022 if (err)
1023 return err;
1024
1025 sdei_entry_point = sdei_arch_get_entry_point(conduit);
1026 if (!sdei_entry_point) {
1027 /* Not supported due to hardware or boot configuration */
1028 sdei_mark_interface_broken();
1029 return 0;
1030 }
1031
1032 err = cpu_pm_register_notifier(&sdei_pm_nb);
1033 if (err) {
1034 pr_warn("Failed to register CPU PM notifier...\n");
1035 goto error;
1036 }
1037
1038 err = register_reboot_notifier(&sdei_reboot_nb);
1039 if (err) {
1040 pr_warn("Failed to register reboot notifier...\n");
1041 goto remove_cpupm;
1042 }
1043
1044 err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
1045 &sdei_cpuhp_up, &sdei_cpuhp_down);
1046 if (err) {
1047 pr_warn("Failed to register CPU hotplug notifier...\n");
1048 goto remove_reboot;
1049 }
1050
1051 return 0;
1052
1053remove_reboot:
1054 unregister_reboot_notifier(&sdei_reboot_nb);
1055
1056remove_cpupm:
1057 cpu_pm_unregister_notifier(&sdei_pm_nb);
1058
1059error:
1060 sdei_mark_interface_broken();
1061 return err;
1062}
1063
1064static const struct of_device_id sdei_of_match[] = {
1065 { .compatible = "arm,sdei-1.0" },
1066 {}
1067};
1068
1069static struct platform_driver sdei_driver = {
1070 .driver = {
1071 .name = "sdei",
1072 .pm = &sdei_pm_ops,
1073 .of_match_table = sdei_of_match,
1074 },
1075 .probe = sdei_probe,
1076};
1077
1078static bool __init sdei_present_acpi(void)
1079{
1080 acpi_status status;
1081 struct acpi_table_header *sdei_table_header;
1082
1083 if (acpi_disabled)
1084 return false;
1085
1086 status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
1087 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
1088 const char *msg = acpi_format_exception(status);
1089
1090 pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
1091 }
1092 if (ACPI_FAILURE(status))
1093 return false;
1094
1095 acpi_put_table(sdei_table_header);
1096
1097 return true;
1098}
1099
1100static int __init sdei_init(void)
1101{
1102 int ret = platform_driver_register(&sdei_driver);
1103
1104 if (!ret && sdei_present_acpi()) {
1105 struct platform_device *pdev;
1106
1107 pdev = platform_device_register_simple(sdei_driver.driver.name,
1108 0, NULL, 0);
1109 if (IS_ERR(pdev))
1110 pr_info("Failed to register ACPI:SDEI platform device %ld\n",
1111 PTR_ERR(pdev));
1112 }
1113
1114 return ret;
1115}
1116
1117/*
1118 * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
1119 * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
1120 * by device_initcall(). We want to be called in the middle.
1121 */
1122subsys_initcall_sync(sdei_init);
1123
1124int sdei_event_handler(struct pt_regs *regs,
1125 struct sdei_registered_event *arg)
1126{
1127 int err;
1128 mm_segment_t orig_addr_limit;
1129 u32 event_num = arg->event_num;
1130
1131 /*
1132 * Save restore 'fs'.
1133 * The architecture's entry code save/restores 'fs' when taking an
1134 * exception from the kernel. This ensures addr_limit isn't inherited
1135 * if you interrupted something that allowed the uaccess routines to
1136 * access kernel memory.
1137 * Do the same here because this doesn't come via the same entry code.
1138 */
1139 orig_addr_limit = force_uaccess_begin();
1140
1141 err = arg->callback(event_num, regs, arg->callback_arg);
1142 if (err)
1143 pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
1144 event_num, smp_processor_id(), err);
1145
1146 force_uaccess_end(orig_addr_limit);
1147
1148 return err;
1149}
1150NOKPROBE_SYMBOL(sdei_event_handler);
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2017 Arm Ltd.
3#define pr_fmt(fmt) "sdei: " fmt
4
5#include <acpi/ghes.h>
6#include <linux/acpi.h>
7#include <linux/arm_sdei.h>
8#include <linux/arm-smccc.h>
9#include <linux/atomic.h>
10#include <linux/bitops.h>
11#include <linux/compiler.h>
12#include <linux/cpuhotplug.h>
13#include <linux/cpu.h>
14#include <linux/cpu_pm.h>
15#include <linux/errno.h>
16#include <linux/hardirq.h>
17#include <linux/kernel.h>
18#include <linux/kprobes.h>
19#include <linux/kvm_host.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/notifier.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/percpu.h>
26#include <linux/platform_device.h>
27#include <linux/pm.h>
28#include <linux/ptrace.h>
29#include <linux/preempt.h>
30#include <linux/reboot.h>
31#include <linux/slab.h>
32#include <linux/smp.h>
33#include <linux/spinlock.h>
34#include <linux/uaccess.h>
35
36/*
37 * The call to use to reach the firmware.
38 */
39static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
40 unsigned long arg0, unsigned long arg1,
41 unsigned long arg2, unsigned long arg3,
42 unsigned long arg4, struct arm_smccc_res *res);
43
44/* entry point from firmware to arch asm code */
45static unsigned long sdei_entry_point;
46
47struct sdei_event {
48 /* These three are protected by the sdei_list_lock */
49 struct list_head list;
50 bool reregister;
51 bool reenable;
52
53 u32 event_num;
54 u8 type;
55 u8 priority;
56
57 /* This pointer is handed to firmware as the event argument. */
58 union {
59 /* Shared events */
60 struct sdei_registered_event *registered;
61
62 /* CPU private events */
63 struct sdei_registered_event __percpu *private_registered;
64 };
65};
66
67/* Take the mutex for any API call or modification. Take the mutex first. */
68static DEFINE_MUTEX(sdei_events_lock);
69
70/* and then hold this when modifying the list */
71static DEFINE_SPINLOCK(sdei_list_lock);
72static LIST_HEAD(sdei_list);
73
74/* Private events are registered/enabled via IPI passing one of these */
75struct sdei_crosscall_args {
76 struct sdei_event *event;
77 atomic_t errors;
78 int first_error;
79};
80
81#define CROSSCALL_INIT(arg, event) (arg.event = event, \
82 arg.first_error = 0, \
83 atomic_set(&arg.errors, 0))
84
85static inline int sdei_do_cross_call(void *fn, struct sdei_event * event)
86{
87 struct sdei_crosscall_args arg;
88
89 CROSSCALL_INIT(arg, event);
90 on_each_cpu(fn, &arg, true);
91
92 return arg.first_error;
93}
94
95static inline void
96sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
97{
98 if (err && (atomic_inc_return(&arg->errors) == 1))
99 arg->first_error = err;
100}
101
102static int sdei_to_linux_errno(unsigned long sdei_err)
103{
104 switch (sdei_err) {
105 case SDEI_NOT_SUPPORTED:
106 return -EOPNOTSUPP;
107 case SDEI_INVALID_PARAMETERS:
108 return -EINVAL;
109 case SDEI_DENIED:
110 return -EPERM;
111 case SDEI_PENDING:
112 return -EINPROGRESS;
113 case SDEI_OUT_OF_RESOURCE:
114 return -ENOMEM;
115 }
116
117 /* Not an error value ... */
118 return sdei_err;
119}
120
121/*
122 * If x0 is any of these values, then the call failed, use sdei_to_linux_errno()
123 * to translate.
124 */
125static int sdei_is_err(struct arm_smccc_res *res)
126{
127 switch (res->a0) {
128 case SDEI_NOT_SUPPORTED:
129 case SDEI_INVALID_PARAMETERS:
130 case SDEI_DENIED:
131 case SDEI_PENDING:
132 case SDEI_OUT_OF_RESOURCE:
133 return true;
134 }
135
136 return false;
137}
138
139static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
140 unsigned long arg1, unsigned long arg2,
141 unsigned long arg3, unsigned long arg4,
142 u64 *result)
143{
144 int err = 0;
145 struct arm_smccc_res res;
146
147 if (sdei_firmware_call) {
148 sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
149 &res);
150 if (sdei_is_err(&res))
151 err = sdei_to_linux_errno(res.a0);
152 } else {
153 /*
154 * !sdei_firmware_call means we failed to probe or called
155 * sdei_mark_interface_broken(). -EIO is not an error returned
156 * by sdei_to_linux_errno() and is used to suppress messages
157 * from this driver.
158 */
159 err = -EIO;
160 res.a0 = SDEI_NOT_SUPPORTED;
161 }
162
163 if (result)
164 *result = res.a0;
165
166 return err;
167}
168NOKPROBE_SYMBOL(invoke_sdei_fn);
169
170static struct sdei_event *sdei_event_find(u32 event_num)
171{
172 struct sdei_event *e, *found = NULL;
173
174 lockdep_assert_held(&sdei_events_lock);
175
176 spin_lock(&sdei_list_lock);
177 list_for_each_entry(e, &sdei_list, list) {
178 if (e->event_num == event_num) {
179 found = e;
180 break;
181 }
182 }
183 spin_unlock(&sdei_list_lock);
184
185 return found;
186}
187
188int sdei_api_event_context(u32 query, u64 *result)
189{
190 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
191 result);
192}
193NOKPROBE_SYMBOL(sdei_api_event_context);
194
195static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
196{
197 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
198 0, 0, result);
199}
200
201static struct sdei_event *sdei_event_create(u32 event_num,
202 sdei_event_callback *cb,
203 void *cb_arg)
204{
205 int err;
206 u64 result;
207 struct sdei_event *event;
208 struct sdei_registered_event *reg;
209
210 lockdep_assert_held(&sdei_events_lock);
211
212 event = kzalloc(sizeof(*event), GFP_KERNEL);
213 if (!event)
214 return ERR_PTR(-ENOMEM);
215
216 INIT_LIST_HEAD(&event->list);
217 event->event_num = event_num;
218
219 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
220 &result);
221 if (err) {
222 kfree(event);
223 return ERR_PTR(err);
224 }
225 event->priority = result;
226
227 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
228 &result);
229 if (err) {
230 kfree(event);
231 return ERR_PTR(err);
232 }
233 event->type = result;
234
235 if (event->type == SDEI_EVENT_TYPE_SHARED) {
236 reg = kzalloc(sizeof(*reg), GFP_KERNEL);
237 if (!reg) {
238 kfree(event);
239 return ERR_PTR(-ENOMEM);
240 }
241
242 reg->event_num = event_num;
243 reg->priority = event->priority;
244
245 reg->callback = cb;
246 reg->callback_arg = cb_arg;
247 event->registered = reg;
248 } else {
249 int cpu;
250 struct sdei_registered_event __percpu *regs;
251
252 regs = alloc_percpu(struct sdei_registered_event);
253 if (!regs) {
254 kfree(event);
255 return ERR_PTR(-ENOMEM);
256 }
257
258 for_each_possible_cpu(cpu) {
259 reg = per_cpu_ptr(regs, cpu);
260
261 reg->event_num = event->event_num;
262 reg->priority = event->priority;
263 reg->callback = cb;
264 reg->callback_arg = cb_arg;
265 }
266
267 event->private_registered = regs;
268 }
269
270 if (sdei_event_find(event_num)) {
271 kfree(event->registered);
272 kfree(event);
273 event = ERR_PTR(-EBUSY);
274 } else {
275 spin_lock(&sdei_list_lock);
276 list_add(&event->list, &sdei_list);
277 spin_unlock(&sdei_list_lock);
278 }
279
280 return event;
281}
282
283static void sdei_event_destroy(struct sdei_event *event)
284{
285 lockdep_assert_held(&sdei_events_lock);
286
287 spin_lock(&sdei_list_lock);
288 list_del(&event->list);
289 spin_unlock(&sdei_list_lock);
290
291 if (event->type == SDEI_EVENT_TYPE_SHARED)
292 kfree(event->registered);
293 else
294 free_percpu(event->private_registered);
295
296 kfree(event);
297}
298
299static int sdei_api_get_version(u64 *version)
300{
301 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
302}
303
304int sdei_mask_local_cpu(void)
305{
306 int err;
307
308 WARN_ON_ONCE(preemptible());
309
310 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
311 if (err && err != -EIO) {
312 pr_warn_once("failed to mask CPU[%u]: %d\n",
313 smp_processor_id(), err);
314 return err;
315 }
316
317 return 0;
318}
319
320static void _ipi_mask_cpu(void *ignored)
321{
322 sdei_mask_local_cpu();
323}
324
325int sdei_unmask_local_cpu(void)
326{
327 int err;
328
329 WARN_ON_ONCE(preemptible());
330
331 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
332 if (err && err != -EIO) {
333 pr_warn_once("failed to unmask CPU[%u]: %d\n",
334 smp_processor_id(), err);
335 return err;
336 }
337
338 return 0;
339}
340
341static void _ipi_unmask_cpu(void *ignored)
342{
343 sdei_unmask_local_cpu();
344}
345
346static void _ipi_private_reset(void *ignored)
347{
348 int err;
349
350 err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
351 NULL);
352 if (err && err != -EIO)
353 pr_warn_once("failed to reset CPU[%u]: %d\n",
354 smp_processor_id(), err);
355}
356
357static int sdei_api_shared_reset(void)
358{
359 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
360 NULL);
361}
362
363static void sdei_mark_interface_broken(void)
364{
365 pr_err("disabling SDEI firmware interface\n");
366 on_each_cpu(&_ipi_mask_cpu, NULL, true);
367 sdei_firmware_call = NULL;
368}
369
370static int sdei_platform_reset(void)
371{
372 int err;
373
374 on_each_cpu(&_ipi_private_reset, NULL, true);
375 err = sdei_api_shared_reset();
376 if (err) {
377 pr_err("Failed to reset platform: %d\n", err);
378 sdei_mark_interface_broken();
379 }
380
381 return err;
382}
383
384static int sdei_api_event_enable(u32 event_num)
385{
386 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
387 0, NULL);
388}
389
390/* Called directly by the hotplug callbacks */
391static void _local_event_enable(void *data)
392{
393 int err;
394 struct sdei_crosscall_args *arg = data;
395
396 WARN_ON_ONCE(preemptible());
397
398 err = sdei_api_event_enable(arg->event->event_num);
399
400 sdei_cross_call_return(arg, err);
401}
402
403int sdei_event_enable(u32 event_num)
404{
405 int err = -EINVAL;
406 struct sdei_event *event;
407
408 mutex_lock(&sdei_events_lock);
409 event = sdei_event_find(event_num);
410 if (!event) {
411 mutex_unlock(&sdei_events_lock);
412 return -ENOENT;
413 }
414
415 spin_lock(&sdei_list_lock);
416 event->reenable = true;
417 spin_unlock(&sdei_list_lock);
418
419 if (event->type == SDEI_EVENT_TYPE_SHARED)
420 err = sdei_api_event_enable(event->event_num);
421 else
422 err = sdei_do_cross_call(_local_event_enable, event);
423 mutex_unlock(&sdei_events_lock);
424
425 return err;
426}
427EXPORT_SYMBOL(sdei_event_enable);
428
429static int sdei_api_event_disable(u32 event_num)
430{
431 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
432 0, 0, NULL);
433}
434
435static void _ipi_event_disable(void *data)
436{
437 int err;
438 struct sdei_crosscall_args *arg = data;
439
440 err = sdei_api_event_disable(arg->event->event_num);
441
442 sdei_cross_call_return(arg, err);
443}
444
445int sdei_event_disable(u32 event_num)
446{
447 int err = -EINVAL;
448 struct sdei_event *event;
449
450 mutex_lock(&sdei_events_lock);
451 event = sdei_event_find(event_num);
452 if (!event) {
453 mutex_unlock(&sdei_events_lock);
454 return -ENOENT;
455 }
456
457 spin_lock(&sdei_list_lock);
458 event->reenable = false;
459 spin_unlock(&sdei_list_lock);
460
461 if (event->type == SDEI_EVENT_TYPE_SHARED)
462 err = sdei_api_event_disable(event->event_num);
463 else
464 err = sdei_do_cross_call(_ipi_event_disable, event);
465 mutex_unlock(&sdei_events_lock);
466
467 return err;
468}
469EXPORT_SYMBOL(sdei_event_disable);
470
471static int sdei_api_event_unregister(u32 event_num)
472{
473 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
474 0, 0, 0, NULL);
475}
476
477/* Called directly by the hotplug callbacks */
478static void _local_event_unregister(void *data)
479{
480 int err;
481 struct sdei_crosscall_args *arg = data;
482
483 WARN_ON_ONCE(preemptible());
484
485 err = sdei_api_event_unregister(arg->event->event_num);
486
487 sdei_cross_call_return(arg, err);
488}
489
490static int _sdei_event_unregister(struct sdei_event *event)
491{
492 lockdep_assert_held(&sdei_events_lock);
493
494 spin_lock(&sdei_list_lock);
495 event->reregister = false;
496 event->reenable = false;
497 spin_unlock(&sdei_list_lock);
498
499 if (event->type == SDEI_EVENT_TYPE_SHARED)
500 return sdei_api_event_unregister(event->event_num);
501
502 return sdei_do_cross_call(_local_event_unregister, event);
503}
504
505int sdei_event_unregister(u32 event_num)
506{
507 int err;
508 struct sdei_event *event;
509
510 WARN_ON(in_nmi());
511
512 mutex_lock(&sdei_events_lock);
513 event = sdei_event_find(event_num);
514 do {
515 if (!event) {
516 pr_warn("Event %u not registered\n", event_num);
517 err = -ENOENT;
518 break;
519 }
520
521 err = _sdei_event_unregister(event);
522 if (err)
523 break;
524
525 sdei_event_destroy(event);
526 } while (0);
527 mutex_unlock(&sdei_events_lock);
528
529 return err;
530}
531EXPORT_SYMBOL(sdei_event_unregister);
532
533/*
534 * unregister events, but don't destroy them as they are re-registered by
535 * sdei_reregister_shared().
536 */
537static int sdei_unregister_shared(void)
538{
539 int err = 0;
540 struct sdei_event *event;
541
542 mutex_lock(&sdei_events_lock);
543 spin_lock(&sdei_list_lock);
544 list_for_each_entry(event, &sdei_list, list) {
545 if (event->type != SDEI_EVENT_TYPE_SHARED)
546 continue;
547
548 err = _sdei_event_unregister(event);
549 if (err)
550 break;
551 }
552 spin_unlock(&sdei_list_lock);
553 mutex_unlock(&sdei_events_lock);
554
555 return err;
556}
557
558static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
559 void *arg, u64 flags, u64 affinity)
560{
561 return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
562 (unsigned long)entry_point, (unsigned long)arg,
563 flags, affinity, NULL);
564}
565
566/* Called directly by the hotplug callbacks */
567static void _local_event_register(void *data)
568{
569 int err;
570 struct sdei_registered_event *reg;
571 struct sdei_crosscall_args *arg = data;
572
573 WARN_ON(preemptible());
574
575 reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
576 err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
577 reg, 0, 0);
578
579 sdei_cross_call_return(arg, err);
580}
581
582static int _sdei_event_register(struct sdei_event *event)
583{
584 int err;
585
586 lockdep_assert_held(&sdei_events_lock);
587
588 spin_lock(&sdei_list_lock);
589 event->reregister = true;
590 spin_unlock(&sdei_list_lock);
591
592 if (event->type == SDEI_EVENT_TYPE_SHARED)
593 return sdei_api_event_register(event->event_num,
594 sdei_entry_point,
595 event->registered,
596 SDEI_EVENT_REGISTER_RM_ANY, 0);
597
598
599 err = sdei_do_cross_call(_local_event_register, event);
600 if (err) {
601 spin_lock(&sdei_list_lock);
602 event->reregister = false;
603 event->reenable = false;
604 spin_unlock(&sdei_list_lock);
605
606 sdei_do_cross_call(_local_event_unregister, event);
607 }
608
609 return err;
610}
611
612int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
613{
614 int err;
615 struct sdei_event *event;
616
617 WARN_ON(in_nmi());
618
619 mutex_lock(&sdei_events_lock);
620 do {
621 if (sdei_event_find(event_num)) {
622 pr_warn("Event %u already registered\n", event_num);
623 err = -EBUSY;
624 break;
625 }
626
627 event = sdei_event_create(event_num, cb, arg);
628 if (IS_ERR(event)) {
629 err = PTR_ERR(event);
630 pr_warn("Failed to create event %u: %d\n", event_num,
631 err);
632 break;
633 }
634
635 err = _sdei_event_register(event);
636 if (err) {
637 sdei_event_destroy(event);
638 pr_warn("Failed to register event %u: %d\n", event_num,
639 err);
640 }
641 } while (0);
642 mutex_unlock(&sdei_events_lock);
643
644 return err;
645}
646EXPORT_SYMBOL(sdei_event_register);
647
648static int sdei_reregister_event(struct sdei_event *event)
649{
650 int err;
651
652 lockdep_assert_held(&sdei_events_lock);
653
654 err = _sdei_event_register(event);
655 if (err) {
656 pr_err("Failed to re-register event %u\n", event->event_num);
657 sdei_event_destroy(event);
658 return err;
659 }
660
661 if (event->reenable) {
662 if (event->type == SDEI_EVENT_TYPE_SHARED)
663 err = sdei_api_event_enable(event->event_num);
664 else
665 err = sdei_do_cross_call(_local_event_enable, event);
666 }
667
668 if (err)
669 pr_err("Failed to re-enable event %u\n", event->event_num);
670
671 return err;
672}
673
674static int sdei_reregister_shared(void)
675{
676 int err = 0;
677 struct sdei_event *event;
678
679 mutex_lock(&sdei_events_lock);
680 spin_lock(&sdei_list_lock);
681 list_for_each_entry(event, &sdei_list, list) {
682 if (event->type != SDEI_EVENT_TYPE_SHARED)
683 continue;
684
685 if (event->reregister) {
686 err = sdei_reregister_event(event);
687 if (err)
688 break;
689 }
690 }
691 spin_unlock(&sdei_list_lock);
692 mutex_unlock(&sdei_events_lock);
693
694 return err;
695}
696
697static int sdei_cpuhp_down(unsigned int cpu)
698{
699 struct sdei_event *event;
700 struct sdei_crosscall_args arg;
701
702 /* un-register private events */
703 spin_lock(&sdei_list_lock);
704 list_for_each_entry(event, &sdei_list, list) {
705 if (event->type == SDEI_EVENT_TYPE_SHARED)
706 continue;
707
708 CROSSCALL_INIT(arg, event);
709 /* call the cross-call function locally... */
710 _local_event_unregister(&arg);
711 if (arg.first_error)
712 pr_err("Failed to unregister event %u: %d\n",
713 event->event_num, arg.first_error);
714 }
715 spin_unlock(&sdei_list_lock);
716
717 return sdei_mask_local_cpu();
718}
719
720static int sdei_cpuhp_up(unsigned int cpu)
721{
722 struct sdei_event *event;
723 struct sdei_crosscall_args arg;
724
725 /* re-register/enable private events */
726 spin_lock(&sdei_list_lock);
727 list_for_each_entry(event, &sdei_list, list) {
728 if (event->type == SDEI_EVENT_TYPE_SHARED)
729 continue;
730
731 if (event->reregister) {
732 CROSSCALL_INIT(arg, event);
733 /* call the cross-call function locally... */
734 _local_event_register(&arg);
735 if (arg.first_error)
736 pr_err("Failed to re-register event %u: %d\n",
737 event->event_num, arg.first_error);
738 }
739
740 if (event->reenable) {
741 CROSSCALL_INIT(arg, event);
742 _local_event_enable(&arg);
743 if (arg.first_error)
744 pr_err("Failed to re-enable event %u: %d\n",
745 event->event_num, arg.first_error);
746 }
747 }
748 spin_unlock(&sdei_list_lock);
749
750 return sdei_unmask_local_cpu();
751}
752
753/* When entering idle, mask/unmask events for this cpu */
754static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
755 void *data)
756{
757 int rv;
758
759 switch (action) {
760 case CPU_PM_ENTER:
761 rv = sdei_mask_local_cpu();
762 break;
763 case CPU_PM_EXIT:
764 case CPU_PM_ENTER_FAILED:
765 rv = sdei_unmask_local_cpu();
766 break;
767 default:
768 return NOTIFY_DONE;
769 }
770
771 if (rv)
772 return notifier_from_errno(rv);
773
774 return NOTIFY_OK;
775}
776
777static struct notifier_block sdei_pm_nb = {
778 .notifier_call = sdei_pm_notifier,
779};
780
781static int sdei_device_suspend(struct device *dev)
782{
783 on_each_cpu(_ipi_mask_cpu, NULL, true);
784
785 return 0;
786}
787
788static int sdei_device_resume(struct device *dev)
789{
790 on_each_cpu(_ipi_unmask_cpu, NULL, true);
791
792 return 0;
793}
794
795/*
796 * We need all events to be reregistered when we resume from hibernate.
797 *
798 * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
799 * events during freeze, then re-register and re-enable them during thaw
800 * and restore.
801 */
802static int sdei_device_freeze(struct device *dev)
803{
804 int err;
805
806 /* unregister private events */
807 cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
808
809 err = sdei_unregister_shared();
810 if (err)
811 return err;
812
813 return 0;
814}
815
816static int sdei_device_thaw(struct device *dev)
817{
818 int err;
819
820 /* re-register shared events */
821 err = sdei_reregister_shared();
822 if (err) {
823 pr_warn("Failed to re-register shared events...\n");
824 sdei_mark_interface_broken();
825 return err;
826 }
827
828 err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
829 &sdei_cpuhp_up, &sdei_cpuhp_down);
830 if (err)
831 pr_warn("Failed to re-register CPU hotplug notifier...\n");
832
833 return err;
834}
835
836static int sdei_device_restore(struct device *dev)
837{
838 int err;
839
840 err = sdei_platform_reset();
841 if (err)
842 return err;
843
844 return sdei_device_thaw(dev);
845}
846
847static const struct dev_pm_ops sdei_pm_ops = {
848 .suspend = sdei_device_suspend,
849 .resume = sdei_device_resume,
850 .freeze = sdei_device_freeze,
851 .thaw = sdei_device_thaw,
852 .restore = sdei_device_restore,
853};
854
855/*
856 * Mask all CPUs and unregister all events on panic, reboot or kexec.
857 */
858static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
859 void *data)
860{
861 /*
862 * We are going to reset the interface, after this there is no point
863 * doing work when we take CPUs offline.
864 */
865 cpuhp_remove_state(CPUHP_AP_ARM_SDEI_STARTING);
866
867 sdei_platform_reset();
868
869 return NOTIFY_OK;
870}
871
872static struct notifier_block sdei_reboot_nb = {
873 .notifier_call = sdei_reboot_notifier,
874};
875
876static void sdei_smccc_smc(unsigned long function_id,
877 unsigned long arg0, unsigned long arg1,
878 unsigned long arg2, unsigned long arg3,
879 unsigned long arg4, struct arm_smccc_res *res)
880{
881 arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
882}
883NOKPROBE_SYMBOL(sdei_smccc_smc);
884
885static void sdei_smccc_hvc(unsigned long function_id,
886 unsigned long arg0, unsigned long arg1,
887 unsigned long arg2, unsigned long arg3,
888 unsigned long arg4, struct arm_smccc_res *res)
889{
890 arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
891}
892NOKPROBE_SYMBOL(sdei_smccc_hvc);
893
894int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
895 sdei_event_callback *critical_cb)
896{
897 int err;
898 u64 result;
899 u32 event_num;
900 sdei_event_callback *cb;
901
902 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
903 return -EOPNOTSUPP;
904
905 event_num = ghes->generic->notify.vector;
906 if (event_num == 0) {
907 /*
908 * Event 0 is reserved by the specification for
909 * SDEI_EVENT_SIGNAL.
910 */
911 return -EINVAL;
912 }
913
914 err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
915 &result);
916 if (err)
917 return err;
918
919 if (result == SDEI_EVENT_PRIORITY_CRITICAL)
920 cb = critical_cb;
921 else
922 cb = normal_cb;
923
924 err = sdei_event_register(event_num, cb, ghes);
925 if (!err)
926 err = sdei_event_enable(event_num);
927
928 return err;
929}
930
931int sdei_unregister_ghes(struct ghes *ghes)
932{
933 int i;
934 int err;
935 u32 event_num = ghes->generic->notify.vector;
936
937 might_sleep();
938
939 if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
940 return -EOPNOTSUPP;
941
942 /*
943 * The event may be running on another CPU. Disable it
944 * to stop new events, then try to unregister a few times.
945 */
946 err = sdei_event_disable(event_num);
947 if (err)
948 return err;
949
950 for (i = 0; i < 3; i++) {
951 err = sdei_event_unregister(event_num);
952 if (err != -EINPROGRESS)
953 break;
954
955 schedule();
956 }
957
958 return err;
959}
960
961static int sdei_get_conduit(struct platform_device *pdev)
962{
963 const char *method;
964 struct device_node *np = pdev->dev.of_node;
965
966 sdei_firmware_call = NULL;
967 if (np) {
968 if (of_property_read_string(np, "method", &method)) {
969 pr_warn("missing \"method\" property\n");
970 return CONDUIT_INVALID;
971 }
972
973 if (!strcmp("hvc", method)) {
974 sdei_firmware_call = &sdei_smccc_hvc;
975 return CONDUIT_HVC;
976 } else if (!strcmp("smc", method)) {
977 sdei_firmware_call = &sdei_smccc_smc;
978 return CONDUIT_SMC;
979 }
980
981 pr_warn("invalid \"method\" property: %s\n", method);
982 } else if (IS_ENABLED(CONFIG_ACPI) && !acpi_disabled) {
983 if (acpi_psci_use_hvc()) {
984 sdei_firmware_call = &sdei_smccc_hvc;
985 return CONDUIT_HVC;
986 } else {
987 sdei_firmware_call = &sdei_smccc_smc;
988 return CONDUIT_SMC;
989 }
990 }
991
992 return CONDUIT_INVALID;
993}
994
995static int sdei_probe(struct platform_device *pdev)
996{
997 int err;
998 u64 ver = 0;
999 int conduit;
1000
1001 conduit = sdei_get_conduit(pdev);
1002 if (!sdei_firmware_call)
1003 return 0;
1004
1005 err = sdei_api_get_version(&ver);
1006 if (err == -EOPNOTSUPP)
1007 pr_err("advertised but not implemented in platform firmware\n");
1008 if (err) {
1009 pr_err("Failed to get SDEI version: %d\n", err);
1010 sdei_mark_interface_broken();
1011 return err;
1012 }
1013
1014 pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
1015 (int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
1016 (int)SDEI_VERSION_VENDOR(ver));
1017
1018 if (SDEI_VERSION_MAJOR(ver) != 1) {
1019 pr_warn("Conflicting SDEI version detected.\n");
1020 sdei_mark_interface_broken();
1021 return -EINVAL;
1022 }
1023
1024 err = sdei_platform_reset();
1025 if (err)
1026 return err;
1027
1028 sdei_entry_point = sdei_arch_get_entry_point(conduit);
1029 if (!sdei_entry_point) {
1030 /* Not supported due to hardware or boot configuration */
1031 sdei_mark_interface_broken();
1032 return 0;
1033 }
1034
1035 err = cpu_pm_register_notifier(&sdei_pm_nb);
1036 if (err) {
1037 pr_warn("Failed to register CPU PM notifier...\n");
1038 goto error;
1039 }
1040
1041 err = register_reboot_notifier(&sdei_reboot_nb);
1042 if (err) {
1043 pr_warn("Failed to register reboot notifier...\n");
1044 goto remove_cpupm;
1045 }
1046
1047 err = cpuhp_setup_state(CPUHP_AP_ARM_SDEI_STARTING, "SDEI",
1048 &sdei_cpuhp_up, &sdei_cpuhp_down);
1049 if (err) {
1050 pr_warn("Failed to register CPU hotplug notifier...\n");
1051 goto remove_reboot;
1052 }
1053
1054 return 0;
1055
1056remove_reboot:
1057 unregister_reboot_notifier(&sdei_reboot_nb);
1058
1059remove_cpupm:
1060 cpu_pm_unregister_notifier(&sdei_pm_nb);
1061
1062error:
1063 sdei_mark_interface_broken();
1064 return err;
1065}
1066
1067static const struct of_device_id sdei_of_match[] = {
1068 { .compatible = "arm,sdei-1.0" },
1069 {}
1070};
1071
1072static struct platform_driver sdei_driver = {
1073 .driver = {
1074 .name = "sdei",
1075 .pm = &sdei_pm_ops,
1076 .of_match_table = sdei_of_match,
1077 },
1078 .probe = sdei_probe,
1079};
1080
1081static bool __init sdei_present_dt(void)
1082{
1083 struct device_node *np, *fw_np;
1084
1085 fw_np = of_find_node_by_name(NULL, "firmware");
1086 if (!fw_np)
1087 return false;
1088
1089 np = of_find_matching_node(fw_np, sdei_of_match);
1090 if (!np)
1091 return false;
1092 of_node_put(np);
1093
1094 return true;
1095}
1096
1097static bool __init sdei_present_acpi(void)
1098{
1099 acpi_status status;
1100 struct platform_device *pdev;
1101 struct acpi_table_header *sdei_table_header;
1102
1103 if (acpi_disabled)
1104 return false;
1105
1106 status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
1107 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
1108 const char *msg = acpi_format_exception(status);
1109
1110 pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
1111 }
1112 if (ACPI_FAILURE(status))
1113 return false;
1114
1115 pdev = platform_device_register_simple(sdei_driver.driver.name, 0, NULL,
1116 0);
1117 if (IS_ERR(pdev))
1118 return false;
1119
1120 return true;
1121}
1122
1123static int __init sdei_init(void)
1124{
1125 if (sdei_present_dt() || sdei_present_acpi())
1126 platform_driver_register(&sdei_driver);
1127
1128 return 0;
1129}
1130
1131/*
1132 * On an ACPI system SDEI needs to be ready before HEST:GHES tries to register
1133 * its events. ACPI is initialised from a subsys_initcall(), GHES is initialised
1134 * by device_initcall(). We want to be called in the middle.
1135 */
1136subsys_initcall_sync(sdei_init);
1137
1138int sdei_event_handler(struct pt_regs *regs,
1139 struct sdei_registered_event *arg)
1140{
1141 int err;
1142 mm_segment_t orig_addr_limit;
1143 u32 event_num = arg->event_num;
1144
1145 orig_addr_limit = get_fs();
1146 set_fs(USER_DS);
1147
1148 err = arg->callback(event_num, regs, arg->callback_arg);
1149 if (err)
1150 pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
1151 event_num, smp_processor_id(), err);
1152
1153 set_fs(orig_addr_limit);
1154
1155 return err;
1156}
1157NOKPROBE_SYMBOL(sdei_event_handler);