Loading...
1/*
2 * irqchip.c: Common API for in kernel interrupt controllers
3 * Copyright (c) 2007, Intel Corporation.
4 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
5 * Copyright (c) 2013, Alexander Graf <agraf@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
15 *
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
18 * Place - Suite 330, Boston, MA 02111-1307 USA.
19 *
20 * This file is derived from virt/kvm/irq_comm.c.
21 *
22 * Authors:
23 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
24 * Alexander Graf <agraf@suse.de>
25 */
26
27#include <linux/kvm_host.h>
28#include <linux/slab.h>
29#include <linux/export.h>
30#include <trace/events/kvm.h>
31#include "irq.h"
32
33bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
34{
35 struct kvm_irq_ack_notifier *kian;
36 int gsi;
37
38 rcu_read_lock();
39 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
40 if (gsi != -1)
41 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
42 link)
43 if (kian->gsi == gsi) {
44 rcu_read_unlock();
45 return true;
46 }
47
48 rcu_read_unlock();
49
50 return false;
51}
52EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
53
54void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
55{
56 struct kvm_irq_ack_notifier *kian;
57 int gsi;
58
59 trace_kvm_ack_irq(irqchip, pin);
60
61 rcu_read_lock();
62 gsi = rcu_dereference(kvm->irq_routing)->chip[irqchip][pin];
63 if (gsi != -1)
64 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
65 link)
66 if (kian->gsi == gsi)
67 kian->irq_acked(kian);
68 rcu_read_unlock();
69}
70
71void kvm_register_irq_ack_notifier(struct kvm *kvm,
72 struct kvm_irq_ack_notifier *kian)
73{
74 mutex_lock(&kvm->irq_lock);
75 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
76 mutex_unlock(&kvm->irq_lock);
77#ifdef __KVM_HAVE_IOAPIC
78 kvm_vcpu_request_scan_ioapic(kvm);
79#endif
80}
81
82void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
83 struct kvm_irq_ack_notifier *kian)
84{
85 mutex_lock(&kvm->irq_lock);
86 hlist_del_init_rcu(&kian->link);
87 mutex_unlock(&kvm->irq_lock);
88 synchronize_rcu();
89#ifdef __KVM_HAVE_IOAPIC
90 kvm_vcpu_request_scan_ioapic(kvm);
91#endif
92}
93
94int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
95{
96 struct kvm_kernel_irq_routing_entry route;
97
98 if (!irqchip_in_kernel(kvm) || msi->flags != 0)
99 return -EINVAL;
100
101 route.msi.address_lo = msi->address_lo;
102 route.msi.address_hi = msi->address_hi;
103 route.msi.data = msi->data;
104
105 return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
106}
107
108/*
109 * Return value:
110 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
111 * = 0 Interrupt was coalesced (previous irq is still pending)
112 * > 0 Number of CPUs interrupt was delivered to
113 */
114int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
115 bool line_status)
116{
117 struct kvm_kernel_irq_routing_entry *e, irq_set[KVM_NR_IRQCHIPS];
118 int ret = -1, i = 0;
119 struct kvm_irq_routing_table *irq_rt;
120
121 trace_kvm_set_irq(irq, level, irq_source_id);
122
123 /* Not possible to detect if the guest uses the PIC or the
124 * IOAPIC. So set the bit in both. The guest will ignore
125 * writes to the unused one.
126 */
127 rcu_read_lock();
128 irq_rt = rcu_dereference(kvm->irq_routing);
129 if (irq < irq_rt->nr_rt_entries)
130 hlist_for_each_entry(e, &irq_rt->map[irq], link)
131 irq_set[i++] = *e;
132 rcu_read_unlock();
133
134 while(i--) {
135 int r;
136 r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
137 line_status);
138 if (r < 0)
139 continue;
140
141 ret = r + ((ret < 0) ? 0 : ret);
142 }
143
144 return ret;
145}
146
147void kvm_free_irq_routing(struct kvm *kvm)
148{
149 /* Called only during vm destruction. Nobody can use the pointer
150 at this stage */
151 kfree(kvm->irq_routing);
152}
153
154static int setup_routing_entry(struct kvm_irq_routing_table *rt,
155 struct kvm_kernel_irq_routing_entry *e,
156 const struct kvm_irq_routing_entry *ue)
157{
158 int r = -EINVAL;
159 struct kvm_kernel_irq_routing_entry *ei;
160
161 /*
162 * Do not allow GSI to be mapped to the same irqchip more than once.
163 * Allow only one to one mapping between GSI and MSI.
164 */
165 hlist_for_each_entry(ei, &rt->map[ue->gsi], link)
166 if (ei->type == KVM_IRQ_ROUTING_MSI ||
167 ue->type == KVM_IRQ_ROUTING_MSI ||
168 ue->u.irqchip.irqchip == ei->irqchip.irqchip)
169 return r;
170
171 e->gsi = ue->gsi;
172 e->type = ue->type;
173 r = kvm_set_routing_entry(rt, e, ue);
174 if (r)
175 goto out;
176
177 hlist_add_head(&e->link, &rt->map[e->gsi]);
178 r = 0;
179out:
180 return r;
181}
182
183int kvm_set_irq_routing(struct kvm *kvm,
184 const struct kvm_irq_routing_entry *ue,
185 unsigned nr,
186 unsigned flags)
187{
188 struct kvm_irq_routing_table *new, *old;
189 u32 i, j, nr_rt_entries = 0;
190 int r;
191
192 for (i = 0; i < nr; ++i) {
193 if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES)
194 return -EINVAL;
195 nr_rt_entries = max(nr_rt_entries, ue[i].gsi);
196 }
197
198 nr_rt_entries += 1;
199
200 new = kzalloc(sizeof(*new) + (nr_rt_entries * sizeof(struct hlist_head))
201 + (nr * sizeof(struct kvm_kernel_irq_routing_entry)),
202 GFP_KERNEL);
203
204 if (!new)
205 return -ENOMEM;
206
207 new->rt_entries = (void *)&new->map[nr_rt_entries];
208
209 new->nr_rt_entries = nr_rt_entries;
210 for (i = 0; i < KVM_NR_IRQCHIPS; i++)
211 for (j = 0; j < KVM_IRQCHIP_NUM_PINS; j++)
212 new->chip[i][j] = -1;
213
214 for (i = 0; i < nr; ++i) {
215 r = -EINVAL;
216 if (ue->flags)
217 goto out;
218 r = setup_routing_entry(new, &new->rt_entries[i], ue);
219 if (r)
220 goto out;
221 ++ue;
222 }
223
224 mutex_lock(&kvm->irq_lock);
225 old = kvm->irq_routing;
226 kvm_irq_routing_update(kvm, new);
227 mutex_unlock(&kvm->irq_lock);
228
229 synchronize_rcu();
230
231 new = old;
232 r = 0;
233
234out:
235 kfree(new);
236 return r;
237}
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * irqchip.c: Common API for in kernel interrupt controllers
4 * Copyright (c) 2007, Intel Corporation.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6 * Copyright (c) 2013, Alexander Graf <agraf@suse.de>
7 *
8 * This file is derived from virt/kvm/irq_comm.c.
9 *
10 * Authors:
11 * Yaozu (Eddie) Dong <Eddie.dong@intel.com>
12 * Alexander Graf <agraf@suse.de>
13 */
14
15#include <linux/kvm_host.h>
16#include <linux/slab.h>
17#include <linux/srcu.h>
18#include <linux/export.h>
19#include <trace/events/kvm.h>
20#include "irq.h"
21
22int kvm_irq_map_gsi(struct kvm *kvm,
23 struct kvm_kernel_irq_routing_entry *entries, int gsi)
24{
25 struct kvm_irq_routing_table *irq_rt;
26 struct kvm_kernel_irq_routing_entry *e;
27 int n = 0;
28
29 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
30 lockdep_is_held(&kvm->irq_lock));
31 if (irq_rt && gsi < irq_rt->nr_rt_entries) {
32 hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
33 entries[n] = *e;
34 ++n;
35 }
36 }
37
38 return n;
39}
40
41int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
42{
43 struct kvm_irq_routing_table *irq_rt;
44
45 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
46 return irq_rt->chip[irqchip][pin];
47}
48
49int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
50{
51 struct kvm_kernel_irq_routing_entry route;
52
53 if (!irqchip_in_kernel(kvm) || (msi->flags & ~KVM_MSI_VALID_DEVID))
54 return -EINVAL;
55
56 route.msi.address_lo = msi->address_lo;
57 route.msi.address_hi = msi->address_hi;
58 route.msi.data = msi->data;
59 route.msi.flags = msi->flags;
60 route.msi.devid = msi->devid;
61
62 return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);
63}
64
65/*
66 * Return value:
67 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
68 * = 0 Interrupt was coalesced (previous irq is still pending)
69 * > 0 Number of CPUs interrupt was delivered to
70 */
71int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
72 bool line_status)
73{
74 struct kvm_kernel_irq_routing_entry irq_set[KVM_NR_IRQCHIPS];
75 int ret = -1, i, idx;
76
77 trace_kvm_set_irq(irq, level, irq_source_id);
78
79 /* Not possible to detect if the guest uses the PIC or the
80 * IOAPIC. So set the bit in both. The guest will ignore
81 * writes to the unused one.
82 */
83 idx = srcu_read_lock(&kvm->irq_srcu);
84 i = kvm_irq_map_gsi(kvm, irq_set, irq);
85 srcu_read_unlock(&kvm->irq_srcu, idx);
86
87 while (i--) {
88 int r;
89 r = irq_set[i].set(&irq_set[i], kvm, irq_source_id, level,
90 line_status);
91 if (r < 0)
92 continue;
93
94 ret = r + ((ret < 0) ? 0 : ret);
95 }
96
97 return ret;
98}
99
100static void free_irq_routing_table(struct kvm_irq_routing_table *rt)
101{
102 int i;
103
104 if (!rt)
105 return;
106
107 for (i = 0; i < rt->nr_rt_entries; ++i) {
108 struct kvm_kernel_irq_routing_entry *e;
109 struct hlist_node *n;
110
111 hlist_for_each_entry_safe(e, n, &rt->map[i], link) {
112 hlist_del(&e->link);
113 kfree(e);
114 }
115 }
116
117 kfree(rt);
118}
119
120void kvm_free_irq_routing(struct kvm *kvm)
121{
122 /* Called only during vm destruction. Nobody can use the pointer
123 at this stage */
124 struct kvm_irq_routing_table *rt = rcu_access_pointer(kvm->irq_routing);
125 free_irq_routing_table(rt);
126}
127
128static int setup_routing_entry(struct kvm *kvm,
129 struct kvm_irq_routing_table *rt,
130 struct kvm_kernel_irq_routing_entry *e,
131 const struct kvm_irq_routing_entry *ue)
132{
133 struct kvm_kernel_irq_routing_entry *ei;
134 int r;
135 u32 gsi = array_index_nospec(ue->gsi, KVM_MAX_IRQ_ROUTES);
136
137 /*
138 * Do not allow GSI to be mapped to the same irqchip more than once.
139 * Allow only one to one mapping between GSI and non-irqchip routing.
140 */
141 hlist_for_each_entry(ei, &rt->map[gsi], link)
142 if (ei->type != KVM_IRQ_ROUTING_IRQCHIP ||
143 ue->type != KVM_IRQ_ROUTING_IRQCHIP ||
144 ue->u.irqchip.irqchip == ei->irqchip.irqchip)
145 return -EINVAL;
146
147 e->gsi = gsi;
148 e->type = ue->type;
149 r = kvm_set_routing_entry(kvm, e, ue);
150 if (r)
151 return r;
152 if (e->type == KVM_IRQ_ROUTING_IRQCHIP)
153 rt->chip[e->irqchip.irqchip][e->irqchip.pin] = e->gsi;
154
155 hlist_add_head(&e->link, &rt->map[e->gsi]);
156
157 return 0;
158}
159
160void __attribute__((weak)) kvm_arch_irq_routing_update(struct kvm *kvm)
161{
162}
163
164bool __weak kvm_arch_can_set_irq_routing(struct kvm *kvm)
165{
166 return true;
167}
168
169int kvm_set_irq_routing(struct kvm *kvm,
170 const struct kvm_irq_routing_entry *ue,
171 unsigned nr,
172 unsigned flags)
173{
174 struct kvm_irq_routing_table *new, *old;
175 struct kvm_kernel_irq_routing_entry *e;
176 u32 i, j, nr_rt_entries = 0;
177 int r;
178
179 for (i = 0; i < nr; ++i) {
180 if (ue[i].gsi >= KVM_MAX_IRQ_ROUTES)
181 return -EINVAL;
182 nr_rt_entries = max(nr_rt_entries, ue[i].gsi);
183 }
184
185 nr_rt_entries += 1;
186
187 new = kzalloc(struct_size(new, map, nr_rt_entries), GFP_KERNEL_ACCOUNT);
188 if (!new)
189 return -ENOMEM;
190
191 new->nr_rt_entries = nr_rt_entries;
192 for (i = 0; i < KVM_NR_IRQCHIPS; i++)
193 for (j = 0; j < KVM_IRQCHIP_NUM_PINS; j++)
194 new->chip[i][j] = -1;
195
196 for (i = 0; i < nr; ++i) {
197 r = -ENOMEM;
198 e = kzalloc(sizeof(*e), GFP_KERNEL_ACCOUNT);
199 if (!e)
200 goto out;
201
202 r = -EINVAL;
203 switch (ue->type) {
204 case KVM_IRQ_ROUTING_MSI:
205 if (ue->flags & ~KVM_MSI_VALID_DEVID)
206 goto free_entry;
207 break;
208 default:
209 if (ue->flags)
210 goto free_entry;
211 break;
212 }
213 r = setup_routing_entry(kvm, new, e, ue);
214 if (r)
215 goto free_entry;
216 ++ue;
217 }
218
219 mutex_lock(&kvm->irq_lock);
220 old = rcu_dereference_protected(kvm->irq_routing, 1);
221 rcu_assign_pointer(kvm->irq_routing, new);
222 kvm_irq_routing_update(kvm);
223 kvm_arch_irq_routing_update(kvm);
224 mutex_unlock(&kvm->irq_lock);
225
226 kvm_arch_post_irq_routing_update(kvm);
227
228 synchronize_srcu_expedited(&kvm->irq_srcu);
229
230 new = old;
231 r = 0;
232 goto out;
233
234free_entry:
235 kfree(e);
236out:
237 free_irq_routing_table(new);
238
239 return r;
240}