Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
4 */
5
6#include <linux/vfio.h>
7#include <linux/cdx/cdx_bus.h>
8
9#include "private.h"
10
11static int vfio_cdx_open_device(struct vfio_device *core_vdev)
12{
13 struct vfio_cdx_device *vdev =
14 container_of(core_vdev, struct vfio_cdx_device, vdev);
15 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
16 int count = cdx_dev->res_count;
17 int i, ret;
18
19 vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region),
20 GFP_KERNEL_ACCOUNT);
21 if (!vdev->regions)
22 return -ENOMEM;
23
24 for (i = 0; i < count; i++) {
25 struct resource *res = &cdx_dev->res[i];
26
27 vdev->regions[i].addr = res->start;
28 vdev->regions[i].size = resource_size(res);
29 vdev->regions[i].type = res->flags;
30 /*
31 * Only regions addressed with PAGE granularity may be
32 * MMAP'ed securely.
33 */
34 if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
35 !(vdev->regions[i].size & ~PAGE_MASK))
36 vdev->regions[i].flags |=
37 VFIO_REGION_INFO_FLAG_MMAP;
38 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
39 if (!(cdx_dev->res[i].flags & IORESOURCE_READONLY))
40 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
41 }
42 ret = cdx_dev_reset(core_vdev->dev);
43 if (ret) {
44 kfree(vdev->regions);
45 vdev->regions = NULL;
46 return ret;
47 }
48 ret = cdx_clear_master(cdx_dev);
49 if (ret)
50 vdev->flags &= ~BME_SUPPORT;
51 else
52 vdev->flags |= BME_SUPPORT;
53
54 return 0;
55}
56
57static void vfio_cdx_close_device(struct vfio_device *core_vdev)
58{
59 struct vfio_cdx_device *vdev =
60 container_of(core_vdev, struct vfio_cdx_device, vdev);
61
62 kfree(vdev->regions);
63 cdx_dev_reset(core_vdev->dev);
64}
65
66static int vfio_cdx_bm_ctrl(struct vfio_device *core_vdev, u32 flags,
67 void __user *arg, size_t argsz)
68{
69 size_t minsz =
70 offsetofend(struct vfio_device_feature_bus_master, op);
71 struct vfio_cdx_device *vdev =
72 container_of(core_vdev, struct vfio_cdx_device, vdev);
73 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
74 struct vfio_device_feature_bus_master ops;
75 int ret;
76
77 if (!(vdev->flags & BME_SUPPORT))
78 return -ENOTTY;
79
80 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
81 sizeof(ops));
82 if (ret != 1)
83 return ret;
84
85 if (copy_from_user(&ops, arg, minsz))
86 return -EFAULT;
87
88 switch (ops.op) {
89 case VFIO_DEVICE_FEATURE_CLEAR_MASTER:
90 return cdx_clear_master(cdx_dev);
91 case VFIO_DEVICE_FEATURE_SET_MASTER:
92 return cdx_set_master(cdx_dev);
93 default:
94 return -EINVAL;
95 }
96}
97
98static int vfio_cdx_ioctl_feature(struct vfio_device *device, u32 flags,
99 void __user *arg, size_t argsz)
100{
101 switch (flags & VFIO_DEVICE_FEATURE_MASK) {
102 case VFIO_DEVICE_FEATURE_BUS_MASTER:
103 return vfio_cdx_bm_ctrl(device, flags, arg, argsz);
104 default:
105 return -ENOTTY;
106 }
107}
108
109static int vfio_cdx_ioctl_get_info(struct vfio_cdx_device *vdev,
110 struct vfio_device_info __user *arg)
111{
112 unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
113 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
114 struct vfio_device_info info;
115
116 if (copy_from_user(&info, arg, minsz))
117 return -EFAULT;
118
119 if (info.argsz < minsz)
120 return -EINVAL;
121
122 info.flags = VFIO_DEVICE_FLAGS_CDX;
123 info.flags |= VFIO_DEVICE_FLAGS_RESET;
124
125 info.num_regions = cdx_dev->res_count;
126 info.num_irqs = 0;
127
128 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
129}
130
131static int vfio_cdx_ioctl_get_region_info(struct vfio_cdx_device *vdev,
132 struct vfio_region_info __user *arg)
133{
134 unsigned long minsz = offsetofend(struct vfio_region_info, offset);
135 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
136 struct vfio_region_info info;
137
138 if (copy_from_user(&info, arg, minsz))
139 return -EFAULT;
140
141 if (info.argsz < minsz)
142 return -EINVAL;
143
144 if (info.index >= cdx_dev->res_count)
145 return -EINVAL;
146
147 /* map offset to the physical address */
148 info.offset = vfio_cdx_index_to_offset(info.index);
149 info.size = vdev->regions[info.index].size;
150 info.flags = vdev->regions[info.index].flags;
151
152 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
153}
154
155static long vfio_cdx_ioctl(struct vfio_device *core_vdev,
156 unsigned int cmd, unsigned long arg)
157{
158 struct vfio_cdx_device *vdev =
159 container_of(core_vdev, struct vfio_cdx_device, vdev);
160 void __user *uarg = (void __user *)arg;
161
162 switch (cmd) {
163 case VFIO_DEVICE_GET_INFO:
164 return vfio_cdx_ioctl_get_info(vdev, uarg);
165 case VFIO_DEVICE_GET_REGION_INFO:
166 return vfio_cdx_ioctl_get_region_info(vdev, uarg);
167 case VFIO_DEVICE_RESET:
168 return cdx_dev_reset(core_vdev->dev);
169 default:
170 return -ENOTTY;
171 }
172}
173
174static int vfio_cdx_mmap_mmio(struct vfio_cdx_region region,
175 struct vm_area_struct *vma)
176{
177 u64 size = vma->vm_end - vma->vm_start;
178 u64 pgoff, base;
179
180 pgoff = vma->vm_pgoff &
181 ((1U << (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
182 base = pgoff << PAGE_SHIFT;
183
184 if (base + size > region.size)
185 return -EINVAL;
186
187 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
188 vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
189
190 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
191 size, vma->vm_page_prot);
192}
193
194static int vfio_cdx_mmap(struct vfio_device *core_vdev,
195 struct vm_area_struct *vma)
196{
197 struct vfio_cdx_device *vdev =
198 container_of(core_vdev, struct vfio_cdx_device, vdev);
199 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
200 unsigned int index;
201
202 index = vma->vm_pgoff >> (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT);
203
204 if (index >= cdx_dev->res_count)
205 return -EINVAL;
206
207 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
208 return -EINVAL;
209
210 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) &&
211 (vma->vm_flags & VM_READ))
212 return -EPERM;
213
214 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) &&
215 (vma->vm_flags & VM_WRITE))
216 return -EPERM;
217
218 return vfio_cdx_mmap_mmio(vdev->regions[index], vma);
219}
220
221static const struct vfio_device_ops vfio_cdx_ops = {
222 .name = "vfio-cdx",
223 .open_device = vfio_cdx_open_device,
224 .close_device = vfio_cdx_close_device,
225 .ioctl = vfio_cdx_ioctl,
226 .device_feature = vfio_cdx_ioctl_feature,
227 .mmap = vfio_cdx_mmap,
228 .bind_iommufd = vfio_iommufd_physical_bind,
229 .unbind_iommufd = vfio_iommufd_physical_unbind,
230 .attach_ioas = vfio_iommufd_physical_attach_ioas,
231};
232
233static int vfio_cdx_probe(struct cdx_device *cdx_dev)
234{
235 struct vfio_cdx_device *vdev;
236 struct device *dev = &cdx_dev->dev;
237 int ret;
238
239 vdev = vfio_alloc_device(vfio_cdx_device, vdev, dev,
240 &vfio_cdx_ops);
241 if (IS_ERR(vdev))
242 return PTR_ERR(vdev);
243
244 ret = vfio_register_group_dev(&vdev->vdev);
245 if (ret)
246 goto out_uninit;
247
248 dev_set_drvdata(dev, vdev);
249 return 0;
250
251out_uninit:
252 vfio_put_device(&vdev->vdev);
253 return ret;
254}
255
256static int vfio_cdx_remove(struct cdx_device *cdx_dev)
257{
258 struct device *dev = &cdx_dev->dev;
259 struct vfio_cdx_device *vdev = dev_get_drvdata(dev);
260
261 vfio_unregister_group_dev(&vdev->vdev);
262 vfio_put_device(&vdev->vdev);
263
264 return 0;
265}
266
267static const struct cdx_device_id vfio_cdx_table[] = {
268 { CDX_DEVICE_DRIVER_OVERRIDE(CDX_ANY_ID, CDX_ANY_ID,
269 CDX_ID_F_VFIO_DRIVER_OVERRIDE) }, /* match all by default */
270 {}
271};
272
273MODULE_DEVICE_TABLE(cdx, vfio_cdx_table);
274
275static struct cdx_driver vfio_cdx_driver = {
276 .probe = vfio_cdx_probe,
277 .remove = vfio_cdx_remove,
278 .match_id_table = vfio_cdx_table,
279 .driver = {
280 .name = "vfio-cdx",
281 },
282 .driver_managed_dma = true,
283};
284
285module_driver(vfio_cdx_driver, cdx_driver_register, cdx_driver_unregister);
286
287MODULE_LICENSE("GPL");
288MODULE_DESCRIPTION("VFIO for CDX devices - User Level meta-driver");
289MODULE_IMPORT_NS(CDX_BUS);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2022-2023, Advanced Micro Devices, Inc.
4 */
5
6#include <linux/vfio.h>
7#include <linux/cdx/cdx_bus.h>
8
9#include "private.h"
10
11static int vfio_cdx_open_device(struct vfio_device *core_vdev)
12{
13 struct vfio_cdx_device *vdev =
14 container_of(core_vdev, struct vfio_cdx_device, vdev);
15 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
16 int count = cdx_dev->res_count;
17 int i, ret;
18
19 vdev->regions = kcalloc(count, sizeof(struct vfio_cdx_region),
20 GFP_KERNEL_ACCOUNT);
21 if (!vdev->regions)
22 return -ENOMEM;
23
24 for (i = 0; i < count; i++) {
25 struct resource *res = &cdx_dev->res[i];
26
27 vdev->regions[i].addr = res->start;
28 vdev->regions[i].size = resource_size(res);
29 vdev->regions[i].type = res->flags;
30 /*
31 * Only regions addressed with PAGE granularity may be
32 * MMAP'ed securely.
33 */
34 if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
35 !(vdev->regions[i].size & ~PAGE_MASK))
36 vdev->regions[i].flags |=
37 VFIO_REGION_INFO_FLAG_MMAP;
38 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
39 if (!(cdx_dev->res[i].flags & IORESOURCE_READONLY))
40 vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_WRITE;
41 }
42 ret = cdx_dev_reset(core_vdev->dev);
43 if (ret) {
44 kfree(vdev->regions);
45 vdev->regions = NULL;
46 return ret;
47 }
48 ret = cdx_clear_master(cdx_dev);
49 if (ret)
50 vdev->flags &= ~BME_SUPPORT;
51 else
52 vdev->flags |= BME_SUPPORT;
53
54 return 0;
55}
56
57static void vfio_cdx_close_device(struct vfio_device *core_vdev)
58{
59 struct vfio_cdx_device *vdev =
60 container_of(core_vdev, struct vfio_cdx_device, vdev);
61
62 kfree(vdev->regions);
63 cdx_dev_reset(core_vdev->dev);
64 vfio_cdx_irqs_cleanup(vdev);
65}
66
67static int vfio_cdx_bm_ctrl(struct vfio_device *core_vdev, u32 flags,
68 void __user *arg, size_t argsz)
69{
70 size_t minsz =
71 offsetofend(struct vfio_device_feature_bus_master, op);
72 struct vfio_cdx_device *vdev =
73 container_of(core_vdev, struct vfio_cdx_device, vdev);
74 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
75 struct vfio_device_feature_bus_master ops;
76 int ret;
77
78 if (!(vdev->flags & BME_SUPPORT))
79 return -ENOTTY;
80
81 ret = vfio_check_feature(flags, argsz, VFIO_DEVICE_FEATURE_SET,
82 sizeof(ops));
83 if (ret != 1)
84 return ret;
85
86 if (copy_from_user(&ops, arg, minsz))
87 return -EFAULT;
88
89 switch (ops.op) {
90 case VFIO_DEVICE_FEATURE_CLEAR_MASTER:
91 return cdx_clear_master(cdx_dev);
92 case VFIO_DEVICE_FEATURE_SET_MASTER:
93 return cdx_set_master(cdx_dev);
94 default:
95 return -EINVAL;
96 }
97}
98
99static int vfio_cdx_ioctl_feature(struct vfio_device *device, u32 flags,
100 void __user *arg, size_t argsz)
101{
102 switch (flags & VFIO_DEVICE_FEATURE_MASK) {
103 case VFIO_DEVICE_FEATURE_BUS_MASTER:
104 return vfio_cdx_bm_ctrl(device, flags, arg, argsz);
105 default:
106 return -ENOTTY;
107 }
108}
109
110static int vfio_cdx_ioctl_get_info(struct vfio_cdx_device *vdev,
111 struct vfio_device_info __user *arg)
112{
113 unsigned long minsz = offsetofend(struct vfio_device_info, num_irqs);
114 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
115 struct vfio_device_info info;
116
117 if (copy_from_user(&info, arg, minsz))
118 return -EFAULT;
119
120 if (info.argsz < minsz)
121 return -EINVAL;
122
123 info.flags = VFIO_DEVICE_FLAGS_CDX;
124 info.flags |= VFIO_DEVICE_FLAGS_RESET;
125
126 info.num_regions = cdx_dev->res_count;
127 info.num_irqs = cdx_dev->num_msi ? 1 : 0;
128
129 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
130}
131
132static int vfio_cdx_ioctl_get_region_info(struct vfio_cdx_device *vdev,
133 struct vfio_region_info __user *arg)
134{
135 unsigned long minsz = offsetofend(struct vfio_region_info, offset);
136 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
137 struct vfio_region_info info;
138
139 if (copy_from_user(&info, arg, minsz))
140 return -EFAULT;
141
142 if (info.argsz < minsz)
143 return -EINVAL;
144
145 if (info.index >= cdx_dev->res_count)
146 return -EINVAL;
147
148 /* map offset to the physical address */
149 info.offset = vfio_cdx_index_to_offset(info.index);
150 info.size = vdev->regions[info.index].size;
151 info.flags = vdev->regions[info.index].flags;
152
153 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
154}
155
156static int vfio_cdx_ioctl_get_irq_info(struct vfio_cdx_device *vdev,
157 struct vfio_irq_info __user *arg)
158{
159 unsigned long minsz = offsetofend(struct vfio_irq_info, count);
160 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
161 struct vfio_irq_info info;
162
163 if (copy_from_user(&info, arg, minsz))
164 return -EFAULT;
165
166 if (info.argsz < minsz)
167 return -EINVAL;
168
169 if (info.index >= 1)
170 return -EINVAL;
171
172 if (!cdx_dev->num_msi)
173 return -EINVAL;
174
175 info.flags = VFIO_IRQ_INFO_EVENTFD | VFIO_IRQ_INFO_NORESIZE;
176 info.count = cdx_dev->num_msi;
177
178 return copy_to_user(arg, &info, minsz) ? -EFAULT : 0;
179}
180
181static int vfio_cdx_ioctl_set_irqs(struct vfio_cdx_device *vdev,
182 struct vfio_irq_set __user *arg)
183{
184 unsigned long minsz = offsetofend(struct vfio_irq_set, count);
185 struct cdx_device *cdx_dev = to_cdx_device(vdev->vdev.dev);
186 struct vfio_irq_set hdr;
187 size_t data_size = 0;
188 u8 *data = NULL;
189 int ret = 0;
190
191 if (copy_from_user(&hdr, arg, minsz))
192 return -EFAULT;
193
194 ret = vfio_set_irqs_validate_and_prepare(&hdr, cdx_dev->num_msi,
195 1, &data_size);
196 if (ret)
197 return ret;
198
199 if (data_size) {
200 data = memdup_user(arg->data, data_size);
201 if (IS_ERR(data))
202 return PTR_ERR(data);
203 }
204
205 ret = vfio_cdx_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
206 hdr.start, hdr.count, data);
207 kfree(data);
208
209 return ret;
210}
211
212static long vfio_cdx_ioctl(struct vfio_device *core_vdev,
213 unsigned int cmd, unsigned long arg)
214{
215 struct vfio_cdx_device *vdev =
216 container_of(core_vdev, struct vfio_cdx_device, vdev);
217 void __user *uarg = (void __user *)arg;
218
219 switch (cmd) {
220 case VFIO_DEVICE_GET_INFO:
221 return vfio_cdx_ioctl_get_info(vdev, uarg);
222 case VFIO_DEVICE_GET_REGION_INFO:
223 return vfio_cdx_ioctl_get_region_info(vdev, uarg);
224 case VFIO_DEVICE_GET_IRQ_INFO:
225 return vfio_cdx_ioctl_get_irq_info(vdev, uarg);
226 case VFIO_DEVICE_SET_IRQS:
227 return vfio_cdx_ioctl_set_irqs(vdev, uarg);
228 case VFIO_DEVICE_RESET:
229 return cdx_dev_reset(core_vdev->dev);
230 default:
231 return -ENOTTY;
232 }
233}
234
235static int vfio_cdx_mmap_mmio(struct vfio_cdx_region region,
236 struct vm_area_struct *vma)
237{
238 u64 size = vma->vm_end - vma->vm_start;
239 u64 pgoff, base;
240
241 pgoff = vma->vm_pgoff &
242 ((1U << (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
243 base = pgoff << PAGE_SHIFT;
244
245 if (base + size > region.size)
246 return -EINVAL;
247
248 vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
249 vma->vm_page_prot = pgprot_device(vma->vm_page_prot);
250
251 return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
252 size, vma->vm_page_prot);
253}
254
255static int vfio_cdx_mmap(struct vfio_device *core_vdev,
256 struct vm_area_struct *vma)
257{
258 struct vfio_cdx_device *vdev =
259 container_of(core_vdev, struct vfio_cdx_device, vdev);
260 struct cdx_device *cdx_dev = to_cdx_device(core_vdev->dev);
261 unsigned int index;
262
263 index = vma->vm_pgoff >> (VFIO_CDX_OFFSET_SHIFT - PAGE_SHIFT);
264
265 if (index >= cdx_dev->res_count)
266 return -EINVAL;
267
268 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
269 return -EINVAL;
270
271 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ) &&
272 (vma->vm_flags & VM_READ))
273 return -EPERM;
274
275 if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE) &&
276 (vma->vm_flags & VM_WRITE))
277 return -EPERM;
278
279 return vfio_cdx_mmap_mmio(vdev->regions[index], vma);
280}
281
282static const struct vfio_device_ops vfio_cdx_ops = {
283 .name = "vfio-cdx",
284 .open_device = vfio_cdx_open_device,
285 .close_device = vfio_cdx_close_device,
286 .ioctl = vfio_cdx_ioctl,
287 .device_feature = vfio_cdx_ioctl_feature,
288 .mmap = vfio_cdx_mmap,
289 .bind_iommufd = vfio_iommufd_physical_bind,
290 .unbind_iommufd = vfio_iommufd_physical_unbind,
291 .attach_ioas = vfio_iommufd_physical_attach_ioas,
292};
293
294static int vfio_cdx_probe(struct cdx_device *cdx_dev)
295{
296 struct vfio_cdx_device *vdev;
297 struct device *dev = &cdx_dev->dev;
298 int ret;
299
300 vdev = vfio_alloc_device(vfio_cdx_device, vdev, dev,
301 &vfio_cdx_ops);
302 if (IS_ERR(vdev))
303 return PTR_ERR(vdev);
304
305 ret = vfio_register_group_dev(&vdev->vdev);
306 if (ret)
307 goto out_uninit;
308
309 dev_set_drvdata(dev, vdev);
310 return 0;
311
312out_uninit:
313 vfio_put_device(&vdev->vdev);
314 return ret;
315}
316
317static int vfio_cdx_remove(struct cdx_device *cdx_dev)
318{
319 struct device *dev = &cdx_dev->dev;
320 struct vfio_cdx_device *vdev = dev_get_drvdata(dev);
321
322 vfio_unregister_group_dev(&vdev->vdev);
323 vfio_put_device(&vdev->vdev);
324
325 return 0;
326}
327
328static const struct cdx_device_id vfio_cdx_table[] = {
329 { CDX_DEVICE_DRIVER_OVERRIDE(CDX_ANY_ID, CDX_ANY_ID,
330 CDX_ID_F_VFIO_DRIVER_OVERRIDE) }, /* match all by default */
331 {}
332};
333
334MODULE_DEVICE_TABLE(cdx, vfio_cdx_table);
335
336static struct cdx_driver vfio_cdx_driver = {
337 .probe = vfio_cdx_probe,
338 .remove = vfio_cdx_remove,
339 .match_id_table = vfio_cdx_table,
340 .driver = {
341 .name = "vfio-cdx",
342 },
343 .driver_managed_dma = true,
344};
345
346module_driver(vfio_cdx_driver, cdx_driver_register, cdx_driver_unregister);
347
348MODULE_LICENSE("GPL");
349MODULE_DESCRIPTION("VFIO for CDX devices - User Level meta-driver");
350MODULE_IMPORT_NS("CDX_BUS");