Loading...
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
20 *
21 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
24 */
25
26#include <linux/list.h>
27#include <linux/kvm_host.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/stat.h>
31#include <linux/dmar.h>
32#include <linux/iommu.h>
33#include <linux/intel-iommu.h>
34
35static bool allow_unsafe_assigned_interrupts;
36module_param_named(allow_unsafe_assigned_interrupts,
37 allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
38MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
39 "Enable device assignment on platforms without interrupt remapping support.");
40
41static int kvm_iommu_unmap_memslots(struct kvm *kvm);
42static void kvm_iommu_put_pages(struct kvm *kvm,
43 gfn_t base_gfn, unsigned long npages);
44
45static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
46 gfn_t gfn, unsigned long size)
47{
48 gfn_t end_gfn;
49 pfn_t pfn;
50
51 pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
52 end_gfn = gfn + (size >> PAGE_SHIFT);
53 gfn += 1;
54
55 if (is_error_pfn(pfn))
56 return pfn;
57
58 while (gfn < end_gfn)
59 gfn_to_pfn_memslot(kvm, slot, gfn++);
60
61 return pfn;
62}
63
64int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
65{
66 gfn_t gfn, end_gfn;
67 pfn_t pfn;
68 int r = 0;
69 struct iommu_domain *domain = kvm->arch.iommu_domain;
70 int flags;
71
72 /* check if iommu exists and in use */
73 if (!domain)
74 return 0;
75
76 gfn = slot->base_gfn;
77 end_gfn = gfn + slot->npages;
78
79 flags = IOMMU_READ | IOMMU_WRITE;
80 if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
81 flags |= IOMMU_CACHE;
82
83
84 while (gfn < end_gfn) {
85 unsigned long page_size;
86
87 /* Check if already mapped */
88 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
89 gfn += 1;
90 continue;
91 }
92
93 /* Get the page size we could use to map */
94 page_size = kvm_host_page_size(kvm, gfn);
95
96 /* Make sure the page_size does not exceed the memslot */
97 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
98 page_size >>= 1;
99
100 /* Make sure gfn is aligned to the page size we want to map */
101 while ((gfn << PAGE_SHIFT) & (page_size - 1))
102 page_size >>= 1;
103
104 /*
105 * Pin all pages we are about to map in memory. This is
106 * important because we unmap and unpin in 4kb steps later.
107 */
108 pfn = kvm_pin_pages(kvm, slot, gfn, page_size);
109 if (is_error_pfn(pfn)) {
110 gfn += 1;
111 continue;
112 }
113
114 /* Map into IO address space */
115 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
116 page_size, flags);
117 if (r) {
118 printk(KERN_ERR "kvm_iommu_map_address:"
119 "iommu failed to map pfn=%llx\n", pfn);
120 goto unmap_pages;
121 }
122
123 gfn += page_size >> PAGE_SHIFT;
124
125
126 }
127
128 return 0;
129
130unmap_pages:
131 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
132 return r;
133}
134
135static int kvm_iommu_map_memslots(struct kvm *kvm)
136{
137 int idx, r = 0;
138 struct kvm_memslots *slots;
139 struct kvm_memory_slot *memslot;
140
141 idx = srcu_read_lock(&kvm->srcu);
142 slots = kvm_memslots(kvm);
143
144 kvm_for_each_memslot(memslot, slots) {
145 r = kvm_iommu_map_pages(kvm, memslot);
146 if (r)
147 break;
148 }
149 srcu_read_unlock(&kvm->srcu, idx);
150
151 return r;
152}
153
154int kvm_assign_device(struct kvm *kvm,
155 struct kvm_assigned_dev_kernel *assigned_dev)
156{
157 struct pci_dev *pdev = NULL;
158 struct iommu_domain *domain = kvm->arch.iommu_domain;
159 int r, last_flags;
160
161 /* check if iommu exists and in use */
162 if (!domain)
163 return 0;
164
165 pdev = assigned_dev->dev;
166 if (pdev == NULL)
167 return -ENODEV;
168
169 r = iommu_attach_device(domain, &pdev->dev);
170 if (r) {
171 printk(KERN_ERR "assign device %x:%x:%x.%x failed",
172 pci_domain_nr(pdev->bus),
173 pdev->bus->number,
174 PCI_SLOT(pdev->devfn),
175 PCI_FUNC(pdev->devfn));
176 return r;
177 }
178
179 last_flags = kvm->arch.iommu_flags;
180 if (iommu_domain_has_cap(kvm->arch.iommu_domain,
181 IOMMU_CAP_CACHE_COHERENCY))
182 kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
183
184 /* Check if need to update IOMMU page table for guest memory */
185 if ((last_flags ^ kvm->arch.iommu_flags) ==
186 KVM_IOMMU_CACHE_COHERENCY) {
187 kvm_iommu_unmap_memslots(kvm);
188 r = kvm_iommu_map_memslots(kvm);
189 if (r)
190 goto out_unmap;
191 }
192
193 pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
194
195 printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
196 assigned_dev->host_segnr,
197 assigned_dev->host_busnr,
198 PCI_SLOT(assigned_dev->host_devfn),
199 PCI_FUNC(assigned_dev->host_devfn));
200
201 return 0;
202out_unmap:
203 kvm_iommu_unmap_memslots(kvm);
204 return r;
205}
206
207int kvm_deassign_device(struct kvm *kvm,
208 struct kvm_assigned_dev_kernel *assigned_dev)
209{
210 struct iommu_domain *domain = kvm->arch.iommu_domain;
211 struct pci_dev *pdev = NULL;
212
213 /* check if iommu exists and in use */
214 if (!domain)
215 return 0;
216
217 pdev = assigned_dev->dev;
218 if (pdev == NULL)
219 return -ENODEV;
220
221 iommu_detach_device(domain, &pdev->dev);
222
223 pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
224
225 printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
226 assigned_dev->host_segnr,
227 assigned_dev->host_busnr,
228 PCI_SLOT(assigned_dev->host_devfn),
229 PCI_FUNC(assigned_dev->host_devfn));
230
231 return 0;
232}
233
234int kvm_iommu_map_guest(struct kvm *kvm)
235{
236 int r;
237
238 if (!iommu_present(&pci_bus_type)) {
239 printk(KERN_ERR "%s: iommu not found\n", __func__);
240 return -ENODEV;
241 }
242
243 mutex_lock(&kvm->slots_lock);
244
245 kvm->arch.iommu_domain = iommu_domain_alloc(&pci_bus_type);
246 if (!kvm->arch.iommu_domain) {
247 r = -ENOMEM;
248 goto out_unlock;
249 }
250
251 if (!allow_unsafe_assigned_interrupts &&
252 !iommu_domain_has_cap(kvm->arch.iommu_domain,
253 IOMMU_CAP_INTR_REMAP)) {
254 printk(KERN_WARNING "%s: No interrupt remapping support,"
255 " disallowing device assignment."
256 " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
257 " module option.\n", __func__);
258 iommu_domain_free(kvm->arch.iommu_domain);
259 kvm->arch.iommu_domain = NULL;
260 r = -EPERM;
261 goto out_unlock;
262 }
263
264 r = kvm_iommu_map_memslots(kvm);
265 if (r)
266 kvm_iommu_unmap_memslots(kvm);
267
268out_unlock:
269 mutex_unlock(&kvm->slots_lock);
270 return r;
271}
272
273static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
274{
275 unsigned long i;
276
277 for (i = 0; i < npages; ++i)
278 kvm_release_pfn_clean(pfn + i);
279}
280
281static void kvm_iommu_put_pages(struct kvm *kvm,
282 gfn_t base_gfn, unsigned long npages)
283{
284 struct iommu_domain *domain;
285 gfn_t end_gfn, gfn;
286 pfn_t pfn;
287 u64 phys;
288
289 domain = kvm->arch.iommu_domain;
290 end_gfn = base_gfn + npages;
291 gfn = base_gfn;
292
293 /* check if iommu exists and in use */
294 if (!domain)
295 return;
296
297 while (gfn < end_gfn) {
298 unsigned long unmap_pages;
299 size_t size;
300
301 /* Get physical address */
302 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
303 pfn = phys >> PAGE_SHIFT;
304
305 /* Unmap address from IO address space */
306 size = iommu_unmap(domain, gfn_to_gpa(gfn), PAGE_SIZE);
307 unmap_pages = 1ULL << get_order(size);
308
309 /* Unpin all pages we just unmapped to not leak any memory */
310 kvm_unpin_pages(kvm, pfn, unmap_pages);
311
312 gfn += unmap_pages;
313 }
314}
315
316void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
317{
318 kvm_iommu_put_pages(kvm, slot->base_gfn, slot->npages);
319}
320
321static int kvm_iommu_unmap_memslots(struct kvm *kvm)
322{
323 int idx;
324 struct kvm_memslots *slots;
325 struct kvm_memory_slot *memslot;
326
327 idx = srcu_read_lock(&kvm->srcu);
328 slots = kvm_memslots(kvm);
329
330 kvm_for_each_memslot(memslot, slots)
331 kvm_iommu_unmap_pages(kvm, memslot);
332
333 srcu_read_unlock(&kvm->srcu, idx);
334
335 return 0;
336}
337
338int kvm_iommu_unmap_guest(struct kvm *kvm)
339{
340 struct iommu_domain *domain = kvm->arch.iommu_domain;
341
342 /* check if iommu exists and in use */
343 if (!domain)
344 return 0;
345
346 mutex_lock(&kvm->slots_lock);
347 kvm_iommu_unmap_memslots(kvm);
348 kvm->arch.iommu_domain = NULL;
349 mutex_unlock(&kvm->slots_lock);
350
351 iommu_domain_free(domain);
352 return 0;
353}
1/*
2 * Copyright (c) 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
16 *
17 * Copyright (C) 2006-2008 Intel Corporation
18 * Copyright IBM Corporation, 2008
19 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
20 *
21 * Author: Allen M. Kay <allen.m.kay@intel.com>
22 * Author: Weidong Han <weidong.han@intel.com>
23 * Author: Ben-Ami Yassour <benami@il.ibm.com>
24 */
25
26#include <linux/list.h>
27#include <linux/kvm_host.h>
28#include <linux/pci.h>
29#include <linux/dmar.h>
30#include <linux/iommu.h>
31#include <linux/intel-iommu.h>
32
33static int allow_unsafe_assigned_interrupts;
34module_param_named(allow_unsafe_assigned_interrupts,
35 allow_unsafe_assigned_interrupts, bool, S_IRUGO | S_IWUSR);
36MODULE_PARM_DESC(allow_unsafe_assigned_interrupts,
37 "Enable device assignment on platforms without interrupt remapping support.");
38
39static int kvm_iommu_unmap_memslots(struct kvm *kvm);
40static void kvm_iommu_put_pages(struct kvm *kvm,
41 gfn_t base_gfn, unsigned long npages);
42
43static pfn_t kvm_pin_pages(struct kvm *kvm, struct kvm_memory_slot *slot,
44 gfn_t gfn, unsigned long size)
45{
46 gfn_t end_gfn;
47 pfn_t pfn;
48
49 pfn = gfn_to_pfn_memslot(kvm, slot, gfn);
50 end_gfn = gfn + (size >> PAGE_SHIFT);
51 gfn += 1;
52
53 if (is_error_pfn(pfn))
54 return pfn;
55
56 while (gfn < end_gfn)
57 gfn_to_pfn_memslot(kvm, slot, gfn++);
58
59 return pfn;
60}
61
62int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
63{
64 gfn_t gfn, end_gfn;
65 pfn_t pfn;
66 int r = 0;
67 struct iommu_domain *domain = kvm->arch.iommu_domain;
68 int flags;
69
70 /* check if iommu exists and in use */
71 if (!domain)
72 return 0;
73
74 gfn = slot->base_gfn;
75 end_gfn = gfn + slot->npages;
76
77 flags = IOMMU_READ | IOMMU_WRITE;
78 if (kvm->arch.iommu_flags & KVM_IOMMU_CACHE_COHERENCY)
79 flags |= IOMMU_CACHE;
80
81
82 while (gfn < end_gfn) {
83 unsigned long page_size;
84
85 /* Check if already mapped */
86 if (iommu_iova_to_phys(domain, gfn_to_gpa(gfn))) {
87 gfn += 1;
88 continue;
89 }
90
91 /* Get the page size we could use to map */
92 page_size = kvm_host_page_size(kvm, gfn);
93
94 /* Make sure the page_size does not exceed the memslot */
95 while ((gfn + (page_size >> PAGE_SHIFT)) > end_gfn)
96 page_size >>= 1;
97
98 /* Make sure gfn is aligned to the page size we want to map */
99 while ((gfn << PAGE_SHIFT) & (page_size - 1))
100 page_size >>= 1;
101
102 /*
103 * Pin all pages we are about to map in memory. This is
104 * important because we unmap and unpin in 4kb steps later.
105 */
106 pfn = kvm_pin_pages(kvm, slot, gfn, page_size);
107 if (is_error_pfn(pfn)) {
108 gfn += 1;
109 continue;
110 }
111
112 /* Map into IO address space */
113 r = iommu_map(domain, gfn_to_gpa(gfn), pfn_to_hpa(pfn),
114 get_order(page_size), flags);
115 if (r) {
116 printk(KERN_ERR "kvm_iommu_map_address:"
117 "iommu failed to map pfn=%llx\n", pfn);
118 goto unmap_pages;
119 }
120
121 gfn += page_size >> PAGE_SHIFT;
122
123
124 }
125
126 return 0;
127
128unmap_pages:
129 kvm_iommu_put_pages(kvm, slot->base_gfn, gfn);
130 return r;
131}
132
133static int kvm_iommu_map_memslots(struct kvm *kvm)
134{
135 int i, idx, r = 0;
136 struct kvm_memslots *slots;
137
138 idx = srcu_read_lock(&kvm->srcu);
139 slots = kvm_memslots(kvm);
140
141 for (i = 0; i < slots->nmemslots; i++) {
142 r = kvm_iommu_map_pages(kvm, &slots->memslots[i]);
143 if (r)
144 break;
145 }
146 srcu_read_unlock(&kvm->srcu, idx);
147
148 return r;
149}
150
151int kvm_assign_device(struct kvm *kvm,
152 struct kvm_assigned_dev_kernel *assigned_dev)
153{
154 struct pci_dev *pdev = NULL;
155 struct iommu_domain *domain = kvm->arch.iommu_domain;
156 int r, last_flags;
157
158 /* check if iommu exists and in use */
159 if (!domain)
160 return 0;
161
162 pdev = assigned_dev->dev;
163 if (pdev == NULL)
164 return -ENODEV;
165
166 r = iommu_attach_device(domain, &pdev->dev);
167 if (r) {
168 printk(KERN_ERR "assign device %x:%x:%x.%x failed",
169 pci_domain_nr(pdev->bus),
170 pdev->bus->number,
171 PCI_SLOT(pdev->devfn),
172 PCI_FUNC(pdev->devfn));
173 return r;
174 }
175
176 last_flags = kvm->arch.iommu_flags;
177 if (iommu_domain_has_cap(kvm->arch.iommu_domain,
178 IOMMU_CAP_CACHE_COHERENCY))
179 kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;
180
181 /* Check if need to update IOMMU page table for guest memory */
182 if ((last_flags ^ kvm->arch.iommu_flags) ==
183 KVM_IOMMU_CACHE_COHERENCY) {
184 kvm_iommu_unmap_memslots(kvm);
185 r = kvm_iommu_map_memslots(kvm);
186 if (r)
187 goto out_unmap;
188 }
189
190 printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
191 assigned_dev->host_segnr,
192 assigned_dev->host_busnr,
193 PCI_SLOT(assigned_dev->host_devfn),
194 PCI_FUNC(assigned_dev->host_devfn));
195
196 return 0;
197out_unmap:
198 kvm_iommu_unmap_memslots(kvm);
199 return r;
200}
201
202int kvm_deassign_device(struct kvm *kvm,
203 struct kvm_assigned_dev_kernel *assigned_dev)
204{
205 struct iommu_domain *domain = kvm->arch.iommu_domain;
206 struct pci_dev *pdev = NULL;
207
208 /* check if iommu exists and in use */
209 if (!domain)
210 return 0;
211
212 pdev = assigned_dev->dev;
213 if (pdev == NULL)
214 return -ENODEV;
215
216 iommu_detach_device(domain, &pdev->dev);
217
218 printk(KERN_DEBUG "deassign device %x:%x:%x.%x\n",
219 assigned_dev->host_segnr,
220 assigned_dev->host_busnr,
221 PCI_SLOT(assigned_dev->host_devfn),
222 PCI_FUNC(assigned_dev->host_devfn));
223
224 return 0;
225}
226
227int kvm_iommu_map_guest(struct kvm *kvm)
228{
229 int r;
230
231 if (!iommu_found()) {
232 printk(KERN_ERR "%s: iommu not found\n", __func__);
233 return -ENODEV;
234 }
235
236 kvm->arch.iommu_domain = iommu_domain_alloc();
237 if (!kvm->arch.iommu_domain)
238 return -ENOMEM;
239
240 if (!allow_unsafe_assigned_interrupts &&
241 !iommu_domain_has_cap(kvm->arch.iommu_domain,
242 IOMMU_CAP_INTR_REMAP)) {
243 printk(KERN_WARNING "%s: No interrupt remapping support,"
244 " disallowing device assignment."
245 " Re-enble with \"allow_unsafe_assigned_interrupts=1\""
246 " module option.\n", __func__);
247 iommu_domain_free(kvm->arch.iommu_domain);
248 kvm->arch.iommu_domain = NULL;
249 return -EPERM;
250 }
251
252 r = kvm_iommu_map_memslots(kvm);
253 if (r)
254 goto out_unmap;
255
256 return 0;
257
258out_unmap:
259 kvm_iommu_unmap_memslots(kvm);
260 return r;
261}
262
263static void kvm_unpin_pages(struct kvm *kvm, pfn_t pfn, unsigned long npages)
264{
265 unsigned long i;
266
267 for (i = 0; i < npages; ++i)
268 kvm_release_pfn_clean(pfn + i);
269}
270
271static void kvm_iommu_put_pages(struct kvm *kvm,
272 gfn_t base_gfn, unsigned long npages)
273{
274 struct iommu_domain *domain;
275 gfn_t end_gfn, gfn;
276 pfn_t pfn;
277 u64 phys;
278
279 domain = kvm->arch.iommu_domain;
280 end_gfn = base_gfn + npages;
281 gfn = base_gfn;
282
283 /* check if iommu exists and in use */
284 if (!domain)
285 return;
286
287 while (gfn < end_gfn) {
288 unsigned long unmap_pages;
289 int order;
290
291 /* Get physical address */
292 phys = iommu_iova_to_phys(domain, gfn_to_gpa(gfn));
293 pfn = phys >> PAGE_SHIFT;
294
295 /* Unmap address from IO address space */
296 order = iommu_unmap(domain, gfn_to_gpa(gfn), 0);
297 unmap_pages = 1ULL << order;
298
299 /* Unpin all pages we just unmapped to not leak any memory */
300 kvm_unpin_pages(kvm, pfn, unmap_pages);
301
302 gfn += unmap_pages;
303 }
304}
305
306static int kvm_iommu_unmap_memslots(struct kvm *kvm)
307{
308 int i, idx;
309 struct kvm_memslots *slots;
310
311 idx = srcu_read_lock(&kvm->srcu);
312 slots = kvm_memslots(kvm);
313
314 for (i = 0; i < slots->nmemslots; i++) {
315 kvm_iommu_put_pages(kvm, slots->memslots[i].base_gfn,
316 slots->memslots[i].npages);
317 }
318 srcu_read_unlock(&kvm->srcu, idx);
319
320 return 0;
321}
322
323int kvm_iommu_unmap_guest(struct kvm *kvm)
324{
325 struct iommu_domain *domain = kvm->arch.iommu_domain;
326
327 /* check if iommu exists and in use */
328 if (!domain)
329 return 0;
330
331 kvm_iommu_unmap_memslots(kvm);
332 iommu_domain_free(domain);
333 return 0;
334}