Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2016 IBM Corporation.
4 */
5#include <linux/types.h>
6#include <linux/kernel.h>
7#include <linux/irq.h>
8#include <linux/smp.h>
9#include <linux/interrupt.h>
10#include <linux/irqdomain.h>
11#include <linux/cpu.h>
12#include <linux/of.h>
13
14#include <asm/smp.h>
15#include <asm/irq.h>
16#include <asm/errno.h>
17#include <asm/xics.h>
18#include <asm/io.h>
19#include <asm/opal.h>
20#include <asm/kvm_ppc.h>
21
22static void icp_opal_teardown_cpu(void)
23{
24 int hw_cpu = hard_smp_processor_id();
25
26 /* Clear any pending IPI */
27 opal_int_set_mfrr(hw_cpu, 0xff);
28}
29
30static void icp_opal_flush_ipi(void)
31{
32 /*
33 * We take the ipi irq but and never return so we need to EOI the IPI,
34 * but want to leave our priority 0.
35 *
36 * Should we check all the other interrupts too?
37 * Should we be flagging idle loop instead?
38 * Or creating some task to be scheduled?
39 */
40 if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
41 force_external_irq_replay();
42}
43
44static unsigned int icp_opal_get_xirr(void)
45{
46 unsigned int kvm_xirr;
47 __be32 hw_xirr;
48 int64_t rc;
49
50 /* Handle an interrupt latched by KVM first */
51 kvm_xirr = kvmppc_get_xics_latch();
52 if (kvm_xirr)
53 return kvm_xirr;
54
55 /* Then ask OPAL */
56 rc = opal_int_get_xirr(&hw_xirr, false);
57 if (rc < 0)
58 return 0;
59 return be32_to_cpu(hw_xirr);
60}
61
62static unsigned int icp_opal_get_irq(void)
63{
64 unsigned int xirr;
65 unsigned int vec;
66 unsigned int irq;
67
68 xirr = icp_opal_get_xirr();
69 vec = xirr & 0x00ffffff;
70 if (vec == XICS_IRQ_SPURIOUS)
71 return 0;
72
73 irq = irq_find_mapping(xics_host, vec);
74 if (likely(irq)) {
75 xics_push_cppr(vec);
76 return irq;
77 }
78
79 /* We don't have a linux mapping, so have rtas mask it. */
80 xics_mask_unknown_vec(vec);
81
82 /* We might learn about it later, so EOI it */
83 if (opal_int_eoi(xirr) > 0)
84 force_external_irq_replay();
85
86 return 0;
87}
88
89static void icp_opal_set_cpu_priority(unsigned char cppr)
90{
91 /*
92 * Here be dragons. The caller has asked to allow only IPI's and not
93 * external interrupts. But OPAL XIVE doesn't support that. So instead
94 * of allowing no interrupts allow all. That's still not right, but
95 * currently the only caller who does this is xics_migrate_irqs_away()
96 * and it works in that case.
97 */
98 if (cppr >= DEFAULT_PRIORITY)
99 cppr = LOWEST_PRIORITY;
100
101 xics_set_base_cppr(cppr);
102 opal_int_set_cppr(cppr);
103 iosync();
104}
105
106static void icp_opal_eoi(struct irq_data *d)
107{
108 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
109 int64_t rc;
110
111 iosync();
112 rc = opal_int_eoi((xics_pop_cppr() << 24) | hw_irq);
113
114 /*
115 * EOI tells us whether there are more interrupts to fetch.
116 *
117 * Some HW implementations might not be able to send us another
118 * external interrupt in that case, so we force a replay.
119 */
120 if (rc > 0)
121 force_external_irq_replay();
122}
123
124#ifdef CONFIG_SMP
125
126static void icp_opal_cause_ipi(int cpu)
127{
128 int hw_cpu = get_hard_smp_processor_id(cpu);
129
130 kvmppc_set_host_ipi(cpu);
131 opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
132}
133
134static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
135{
136 int cpu = smp_processor_id();
137
138 kvmppc_clear_host_ipi(cpu);
139 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
140
141 return smp_ipi_demux();
142}
143
144/*
145 * Called when an interrupt is received on an off-line CPU to
146 * clear the interrupt, so that the CPU can go back to nap mode.
147 */
148void icp_opal_flush_interrupt(void)
149{
150 unsigned int xirr;
151 unsigned int vec;
152
153 do {
154 xirr = icp_opal_get_xirr();
155 vec = xirr & 0x00ffffff;
156 if (vec == XICS_IRQ_SPURIOUS)
157 break;
158 if (vec == XICS_IPI) {
159 /* Clear pending IPI */
160 int cpu = smp_processor_id();
161 kvmppc_clear_host_ipi(cpu);
162 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
163 } else {
164 pr_err("XICS: hw interrupt 0x%x to offline cpu, "
165 "disabling\n", vec);
166 xics_mask_unknown_vec(vec);
167 }
168
169 /* EOI the interrupt */
170 } while (opal_int_eoi(xirr) > 0);
171}
172
173#endif /* CONFIG_SMP */
174
175static const struct icp_ops icp_opal_ops = {
176 .get_irq = icp_opal_get_irq,
177 .eoi = icp_opal_eoi,
178 .set_priority = icp_opal_set_cpu_priority,
179 .teardown_cpu = icp_opal_teardown_cpu,
180 .flush_ipi = icp_opal_flush_ipi,
181#ifdef CONFIG_SMP
182 .ipi_action = icp_opal_ipi_action,
183 .cause_ipi = icp_opal_cause_ipi,
184#endif
185};
186
187int __init icp_opal_init(void)
188{
189 struct device_node *np;
190
191 np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
192 if (!np)
193 return -ENODEV;
194
195 icp_ops = &icp_opal_ops;
196
197 printk("XICS: Using OPAL ICP fallbacks\n");
198
199 of_node_put(np);
200 return 0;
201}
202
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright 2016 IBM Corporation.
4 */
5#include <linux/types.h>
6#include <linux/kernel.h>
7#include <linux/irq.h>
8#include <linux/smp.h>
9#include <linux/interrupt.h>
10#include <linux/cpu.h>
11#include <linux/of.h>
12
13#include <asm/smp.h>
14#include <asm/irq.h>
15#include <asm/errno.h>
16#include <asm/xics.h>
17#include <asm/io.h>
18#include <asm/opal.h>
19#include <asm/kvm_ppc.h>
20
21static void icp_opal_teardown_cpu(void)
22{
23 int hw_cpu = hard_smp_processor_id();
24
25 /* Clear any pending IPI */
26 opal_int_set_mfrr(hw_cpu, 0xff);
27}
28
29static void icp_opal_flush_ipi(void)
30{
31 /*
32 * We take the ipi irq but and never return so we need to EOI the IPI,
33 * but want to leave our priority 0.
34 *
35 * Should we check all the other interrupts too?
36 * Should we be flagging idle loop instead?
37 * Or creating some task to be scheduled?
38 */
39 if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
40 force_external_irq_replay();
41}
42
43static unsigned int icp_opal_get_xirr(void)
44{
45 unsigned int kvm_xirr;
46 __be32 hw_xirr;
47 int64_t rc;
48
49 /* Handle an interrupt latched by KVM first */
50 kvm_xirr = kvmppc_get_xics_latch();
51 if (kvm_xirr)
52 return kvm_xirr;
53
54 /* Then ask OPAL */
55 rc = opal_int_get_xirr(&hw_xirr, false);
56 if (rc < 0)
57 return 0;
58 return be32_to_cpu(hw_xirr);
59}
60
61static unsigned int icp_opal_get_irq(void)
62{
63 unsigned int xirr;
64 unsigned int vec;
65 unsigned int irq;
66
67 xirr = icp_opal_get_xirr();
68 vec = xirr & 0x00ffffff;
69 if (vec == XICS_IRQ_SPURIOUS)
70 return 0;
71
72 irq = irq_find_mapping(xics_host, vec);
73 if (likely(irq)) {
74 xics_push_cppr(vec);
75 return irq;
76 }
77
78 /* We don't have a linux mapping, so have rtas mask it. */
79 xics_mask_unknown_vec(vec);
80
81 /* We might learn about it later, so EOI it */
82 if (opal_int_eoi(xirr) > 0)
83 force_external_irq_replay();
84
85 return 0;
86}
87
88static void icp_opal_set_cpu_priority(unsigned char cppr)
89{
90 /*
91 * Here be dragons. The caller has asked to allow only IPI's and not
92 * external interrupts. But OPAL XIVE doesn't support that. So instead
93 * of allowing no interrupts allow all. That's still not right, but
94 * currently the only caller who does this is xics_migrate_irqs_away()
95 * and it works in that case.
96 */
97 if (cppr >= DEFAULT_PRIORITY)
98 cppr = LOWEST_PRIORITY;
99
100 xics_set_base_cppr(cppr);
101 opal_int_set_cppr(cppr);
102 iosync();
103}
104
105static void icp_opal_eoi(struct irq_data *d)
106{
107 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
108 int64_t rc;
109
110 iosync();
111 rc = opal_int_eoi((xics_pop_cppr() << 24) | hw_irq);
112
113 /*
114 * EOI tells us whether there are more interrupts to fetch.
115 *
116 * Some HW implementations might not be able to send us another
117 * external interrupt in that case, so we force a replay.
118 */
119 if (rc > 0)
120 force_external_irq_replay();
121}
122
123#ifdef CONFIG_SMP
124
125static void icp_opal_cause_ipi(int cpu)
126{
127 int hw_cpu = get_hard_smp_processor_id(cpu);
128
129 kvmppc_set_host_ipi(cpu);
130 opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
131}
132
133static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
134{
135 int cpu = smp_processor_id();
136
137 kvmppc_clear_host_ipi(cpu);
138 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
139
140 return smp_ipi_demux();
141}
142
143/*
144 * Called when an interrupt is received on an off-line CPU to
145 * clear the interrupt, so that the CPU can go back to nap mode.
146 */
147void icp_opal_flush_interrupt(void)
148{
149 unsigned int xirr;
150 unsigned int vec;
151
152 do {
153 xirr = icp_opal_get_xirr();
154 vec = xirr & 0x00ffffff;
155 if (vec == XICS_IRQ_SPURIOUS)
156 break;
157 if (vec == XICS_IPI) {
158 /* Clear pending IPI */
159 int cpu = smp_processor_id();
160 kvmppc_clear_host_ipi(cpu);
161 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
162 } else {
163 pr_err("XICS: hw interrupt 0x%x to offline cpu, "
164 "disabling\n", vec);
165 xics_mask_unknown_vec(vec);
166 }
167
168 /* EOI the interrupt */
169 } while (opal_int_eoi(xirr) > 0);
170}
171
172#endif /* CONFIG_SMP */
173
174static const struct icp_ops icp_opal_ops = {
175 .get_irq = icp_opal_get_irq,
176 .eoi = icp_opal_eoi,
177 .set_priority = icp_opal_set_cpu_priority,
178 .teardown_cpu = icp_opal_teardown_cpu,
179 .flush_ipi = icp_opal_flush_ipi,
180#ifdef CONFIG_SMP
181 .ipi_action = icp_opal_ipi_action,
182 .cause_ipi = icp_opal_cause_ipi,
183#endif
184};
185
186int icp_opal_init(void)
187{
188 struct device_node *np;
189
190 np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
191 if (!np)
192 return -ENODEV;
193
194 icp_ops = &icp_opal_ops;
195
196 printk("XICS: Using OPAL ICP fallbacks\n");
197
198 return 0;
199}
200