Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
5 *
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
8 *
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
11 * their supports.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/device.h>
18#include <linux/mm.h>
19#include <linux/slab.h>
20#include <linux/iommu.h>
21#include <linux/uuid.h>
22#include <linux/vdpa.h>
23#include <linux/nospec.h>
24#include <linux/vhost.h>
25
26#include "vhost.h"
27
28enum {
29 VHOST_VDPA_BACKEND_FEATURES =
30 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
32 (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
33};
34
35#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
36
37#define VHOST_VDPA_IOTLB_BUCKETS 16
38
39struct vhost_vdpa_as {
40 struct hlist_node hash_link;
41 struct vhost_iotlb iotlb;
42 u32 id;
43};
44
45struct vhost_vdpa {
46 struct vhost_dev vdev;
47 struct iommu_domain *domain;
48 struct vhost_virtqueue *vqs;
49 struct completion completion;
50 struct vdpa_device *vdpa;
51 struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
52 struct device dev;
53 struct cdev cdev;
54 atomic_t opened;
55 u32 nvqs;
56 int virtio_id;
57 int minor;
58 struct eventfd_ctx *config_ctx;
59 int in_batch;
60 struct vdpa_iova_range range;
61 u32 batch_asid;
62 bool suspended;
63};
64
65static DEFINE_IDA(vhost_vdpa_ida);
66
67static dev_t vhost_vdpa_major;
68
69static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
70 struct vhost_iotlb *iotlb, u64 start,
71 u64 last, u32 asid);
72
73static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
74{
75 struct vhost_vdpa_as *as = container_of(iotlb, struct
76 vhost_vdpa_as, iotlb);
77 return as->id;
78}
79
80static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
81{
82 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
83 struct vhost_vdpa_as *as;
84
85 hlist_for_each_entry(as, head, hash_link)
86 if (as->id == asid)
87 return as;
88
89 return NULL;
90}
91
92static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
93{
94 struct vhost_vdpa_as *as = asid_to_as(v, asid);
95
96 if (!as)
97 return NULL;
98
99 return &as->iotlb;
100}
101
102static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
103{
104 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
105 struct vhost_vdpa_as *as;
106
107 if (asid_to_as(v, asid))
108 return NULL;
109
110 if (asid >= v->vdpa->nas)
111 return NULL;
112
113 as = kmalloc(sizeof(*as), GFP_KERNEL);
114 if (!as)
115 return NULL;
116
117 vhost_iotlb_init(&as->iotlb, 0, 0);
118 as->id = asid;
119 hlist_add_head(&as->hash_link, head);
120
121 return as;
122}
123
124static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
125 u32 asid)
126{
127 struct vhost_vdpa_as *as = asid_to_as(v, asid);
128
129 if (as)
130 return as;
131
132 return vhost_vdpa_alloc_as(v, asid);
133}
134
135static void vhost_vdpa_reset_map(struct vhost_vdpa *v, u32 asid)
136{
137 struct vdpa_device *vdpa = v->vdpa;
138 const struct vdpa_config_ops *ops = vdpa->config;
139
140 if (ops->reset_map)
141 ops->reset_map(vdpa, asid);
142}
143
144static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
145{
146 struct vhost_vdpa_as *as = asid_to_as(v, asid);
147
148 if (!as)
149 return -EINVAL;
150
151 hlist_del(&as->hash_link);
152 vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
153 /*
154 * Devices with vendor specific IOMMU may need to restore
155 * iotlb to the initial or default state, which cannot be
156 * cleaned up in the all range unmap call above. Give them
157 * a chance to clean up or reset the map to the desired
158 * state.
159 */
160 vhost_vdpa_reset_map(v, asid);
161 kfree(as);
162
163 return 0;
164}
165
166static void handle_vq_kick(struct vhost_work *work)
167{
168 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
169 poll.work);
170 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
171 const struct vdpa_config_ops *ops = v->vdpa->config;
172
173 ops->kick_vq(v->vdpa, vq - v->vqs);
174}
175
176static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
177{
178 struct vhost_virtqueue *vq = private;
179 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
180
181 if (call_ctx)
182 eventfd_signal(call_ctx);
183
184 return IRQ_HANDLED;
185}
186
187static irqreturn_t vhost_vdpa_config_cb(void *private)
188{
189 struct vhost_vdpa *v = private;
190 struct eventfd_ctx *config_ctx = v->config_ctx;
191
192 if (config_ctx)
193 eventfd_signal(config_ctx);
194
195 return IRQ_HANDLED;
196}
197
198static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
199{
200 struct vhost_virtqueue *vq = &v->vqs[qid];
201 const struct vdpa_config_ops *ops = v->vdpa->config;
202 struct vdpa_device *vdpa = v->vdpa;
203 int ret, irq;
204
205 if (!ops->get_vq_irq)
206 return;
207
208 irq = ops->get_vq_irq(vdpa, qid);
209 if (irq < 0)
210 return;
211
212 if (!vq->call_ctx.ctx)
213 return;
214
215 vq->call_ctx.producer.irq = irq;
216 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
217 if (unlikely(ret))
218 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
219 qid, vq->call_ctx.producer.token, ret);
220}
221
222static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
223{
224 struct vhost_virtqueue *vq = &v->vqs[qid];
225
226 irq_bypass_unregister_producer(&vq->call_ctx.producer);
227}
228
229static int _compat_vdpa_reset(struct vhost_vdpa *v)
230{
231 struct vdpa_device *vdpa = v->vdpa;
232 u32 flags = 0;
233
234 v->suspended = false;
235
236 if (v->vdev.vqs) {
237 flags |= !vhost_backend_has_feature(v->vdev.vqs[0],
238 VHOST_BACKEND_F_IOTLB_PERSIST) ?
239 VDPA_RESET_F_CLEAN_MAP : 0;
240 }
241
242 return vdpa_reset(vdpa, flags);
243}
244
245static int vhost_vdpa_reset(struct vhost_vdpa *v)
246{
247 v->in_batch = 0;
248 return _compat_vdpa_reset(v);
249}
250
251static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
252{
253 struct vdpa_device *vdpa = v->vdpa;
254 const struct vdpa_config_ops *ops = vdpa->config;
255
256 if (!vdpa->use_va || !ops->bind_mm)
257 return 0;
258
259 return ops->bind_mm(vdpa, v->vdev.mm);
260}
261
262static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
263{
264 struct vdpa_device *vdpa = v->vdpa;
265 const struct vdpa_config_ops *ops = vdpa->config;
266
267 if (!vdpa->use_va || !ops->unbind_mm)
268 return;
269
270 ops->unbind_mm(vdpa);
271}
272
273static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
274{
275 struct vdpa_device *vdpa = v->vdpa;
276 const struct vdpa_config_ops *ops = vdpa->config;
277 u32 device_id;
278
279 device_id = ops->get_device_id(vdpa);
280
281 if (copy_to_user(argp, &device_id, sizeof(device_id)))
282 return -EFAULT;
283
284 return 0;
285}
286
287static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
288{
289 struct vdpa_device *vdpa = v->vdpa;
290 const struct vdpa_config_ops *ops = vdpa->config;
291 u8 status;
292
293 status = ops->get_status(vdpa);
294
295 if (copy_to_user(statusp, &status, sizeof(status)))
296 return -EFAULT;
297
298 return 0;
299}
300
301static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
302{
303 struct vdpa_device *vdpa = v->vdpa;
304 const struct vdpa_config_ops *ops = vdpa->config;
305 u8 status, status_old;
306 u32 nvqs = v->nvqs;
307 int ret;
308 u16 i;
309
310 if (copy_from_user(&status, statusp, sizeof(status)))
311 return -EFAULT;
312
313 status_old = ops->get_status(vdpa);
314
315 /*
316 * Userspace shouldn't remove status bits unless reset the
317 * status to 0.
318 */
319 if (status != 0 && (status_old & ~status) != 0)
320 return -EINVAL;
321
322 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
323 for (i = 0; i < nvqs; i++)
324 vhost_vdpa_unsetup_vq_irq(v, i);
325
326 if (status == 0) {
327 ret = _compat_vdpa_reset(v);
328 if (ret)
329 return ret;
330 } else
331 vdpa_set_status(vdpa, status);
332
333 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
334 for (i = 0; i < nvqs; i++)
335 vhost_vdpa_setup_vq_irq(v, i);
336
337 return 0;
338}
339
340static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
341 struct vhost_vdpa_config *c)
342{
343 struct vdpa_device *vdpa = v->vdpa;
344 size_t size = vdpa->config->get_config_size(vdpa);
345
346 if (c->len == 0 || c->off > size)
347 return -EINVAL;
348
349 if (c->len > size - c->off)
350 return -E2BIG;
351
352 return 0;
353}
354
355static long vhost_vdpa_get_config(struct vhost_vdpa *v,
356 struct vhost_vdpa_config __user *c)
357{
358 struct vdpa_device *vdpa = v->vdpa;
359 struct vhost_vdpa_config config;
360 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
361 u8 *buf;
362
363 if (copy_from_user(&config, c, size))
364 return -EFAULT;
365 if (vhost_vdpa_config_validate(v, &config))
366 return -EINVAL;
367 buf = kvzalloc(config.len, GFP_KERNEL);
368 if (!buf)
369 return -ENOMEM;
370
371 vdpa_get_config(vdpa, config.off, buf, config.len);
372
373 if (copy_to_user(c->buf, buf, config.len)) {
374 kvfree(buf);
375 return -EFAULT;
376 }
377
378 kvfree(buf);
379 return 0;
380}
381
382static long vhost_vdpa_set_config(struct vhost_vdpa *v,
383 struct vhost_vdpa_config __user *c)
384{
385 struct vdpa_device *vdpa = v->vdpa;
386 struct vhost_vdpa_config config;
387 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
388 u8 *buf;
389
390 if (copy_from_user(&config, c, size))
391 return -EFAULT;
392 if (vhost_vdpa_config_validate(v, &config))
393 return -EINVAL;
394
395 buf = vmemdup_user(c->buf, config.len);
396 if (IS_ERR(buf))
397 return PTR_ERR(buf);
398
399 vdpa_set_config(vdpa, config.off, buf, config.len);
400
401 kvfree(buf);
402 return 0;
403}
404
405static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
406{
407 struct vdpa_device *vdpa = v->vdpa;
408 const struct vdpa_config_ops *ops = vdpa->config;
409
410 return ops->suspend;
411}
412
413static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
414{
415 struct vdpa_device *vdpa = v->vdpa;
416 const struct vdpa_config_ops *ops = vdpa->config;
417
418 return ops->resume;
419}
420
421static bool vhost_vdpa_has_desc_group(const struct vhost_vdpa *v)
422{
423 struct vdpa_device *vdpa = v->vdpa;
424 const struct vdpa_config_ops *ops = vdpa->config;
425
426 return ops->get_vq_desc_group;
427}
428
429static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
430{
431 struct vdpa_device *vdpa = v->vdpa;
432 const struct vdpa_config_ops *ops = vdpa->config;
433 u64 features;
434
435 features = ops->get_device_features(vdpa);
436
437 if (copy_to_user(featurep, &features, sizeof(features)))
438 return -EFAULT;
439
440 return 0;
441}
442
443static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
444{
445 struct vdpa_device *vdpa = v->vdpa;
446 const struct vdpa_config_ops *ops = vdpa->config;
447
448 if (!ops->get_backend_features)
449 return 0;
450 else
451 return ops->get_backend_features(vdpa);
452}
453
454static bool vhost_vdpa_has_persistent_map(const struct vhost_vdpa *v)
455{
456 struct vdpa_device *vdpa = v->vdpa;
457 const struct vdpa_config_ops *ops = vdpa->config;
458
459 return (!ops->set_map && !ops->dma_map) || ops->reset_map ||
460 vhost_vdpa_get_backend_features(v) & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
461}
462
463static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
464{
465 struct vdpa_device *vdpa = v->vdpa;
466 const struct vdpa_config_ops *ops = vdpa->config;
467 struct vhost_dev *d = &v->vdev;
468 u64 actual_features;
469 u64 features;
470 int i;
471
472 /*
473 * It's not allowed to change the features after they have
474 * been negotiated.
475 */
476 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
477 return -EBUSY;
478
479 if (copy_from_user(&features, featurep, sizeof(features)))
480 return -EFAULT;
481
482 if (vdpa_set_features(vdpa, features))
483 return -EINVAL;
484
485 /* let the vqs know what has been configured */
486 actual_features = ops->get_driver_features(vdpa);
487 for (i = 0; i < d->nvqs; ++i) {
488 struct vhost_virtqueue *vq = d->vqs[i];
489
490 mutex_lock(&vq->mutex);
491 vq->acked_features = actual_features;
492 mutex_unlock(&vq->mutex);
493 }
494
495 return 0;
496}
497
498static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
499{
500 struct vdpa_device *vdpa = v->vdpa;
501 const struct vdpa_config_ops *ops = vdpa->config;
502 u16 num;
503
504 num = ops->get_vq_num_max(vdpa);
505
506 if (copy_to_user(argp, &num, sizeof(num)))
507 return -EFAULT;
508
509 return 0;
510}
511
512static void vhost_vdpa_config_put(struct vhost_vdpa *v)
513{
514 if (v->config_ctx) {
515 eventfd_ctx_put(v->config_ctx);
516 v->config_ctx = NULL;
517 }
518}
519
520static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
521{
522 struct vdpa_callback cb;
523 int fd;
524 struct eventfd_ctx *ctx;
525
526 cb.callback = vhost_vdpa_config_cb;
527 cb.private = v;
528 if (copy_from_user(&fd, argp, sizeof(fd)))
529 return -EFAULT;
530
531 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
532 swap(ctx, v->config_ctx);
533
534 if (!IS_ERR_OR_NULL(ctx))
535 eventfd_ctx_put(ctx);
536
537 if (IS_ERR(v->config_ctx)) {
538 long ret = PTR_ERR(v->config_ctx);
539
540 v->config_ctx = NULL;
541 return ret;
542 }
543
544 v->vdpa->config->set_config_cb(v->vdpa, &cb);
545
546 return 0;
547}
548
549static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
550{
551 struct vhost_vdpa_iova_range range = {
552 .first = v->range.first,
553 .last = v->range.last,
554 };
555
556 if (copy_to_user(argp, &range, sizeof(range)))
557 return -EFAULT;
558 return 0;
559}
560
561static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
562{
563 struct vdpa_device *vdpa = v->vdpa;
564 const struct vdpa_config_ops *ops = vdpa->config;
565 u32 size;
566
567 size = ops->get_config_size(vdpa);
568
569 if (copy_to_user(argp, &size, sizeof(size)))
570 return -EFAULT;
571
572 return 0;
573}
574
575static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
576{
577 struct vdpa_device *vdpa = v->vdpa;
578
579 if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
580 return -EFAULT;
581
582 return 0;
583}
584
585/* After a successful return of ioctl the device must not process more
586 * virtqueue descriptors. The device can answer to read or writes of config
587 * fields as if it were not suspended. In particular, writing to "queue_enable"
588 * with a value of 1 will not make the device start processing buffers.
589 */
590static long vhost_vdpa_suspend(struct vhost_vdpa *v)
591{
592 struct vdpa_device *vdpa = v->vdpa;
593 const struct vdpa_config_ops *ops = vdpa->config;
594 int ret;
595
596 if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
597 return 0;
598
599 if (!ops->suspend)
600 return -EOPNOTSUPP;
601
602 ret = ops->suspend(vdpa);
603 if (!ret)
604 v->suspended = true;
605
606 return ret;
607}
608
609/* After a successful return of this ioctl the device resumes processing
610 * virtqueue descriptors. The device becomes fully operational the same way it
611 * was before it was suspended.
612 */
613static long vhost_vdpa_resume(struct vhost_vdpa *v)
614{
615 struct vdpa_device *vdpa = v->vdpa;
616 const struct vdpa_config_ops *ops = vdpa->config;
617 int ret;
618
619 if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
620 return 0;
621
622 if (!ops->resume)
623 return -EOPNOTSUPP;
624
625 ret = ops->resume(vdpa);
626 if (!ret)
627 v->suspended = false;
628
629 return ret;
630}
631
632static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
633 void __user *argp)
634{
635 struct vdpa_device *vdpa = v->vdpa;
636 const struct vdpa_config_ops *ops = vdpa->config;
637 struct vdpa_vq_state vq_state;
638 struct vdpa_callback cb;
639 struct vhost_virtqueue *vq;
640 struct vhost_vring_state s;
641 u32 idx;
642 long r;
643
644 r = get_user(idx, (u32 __user *)argp);
645 if (r < 0)
646 return r;
647
648 if (idx >= v->nvqs)
649 return -ENOBUFS;
650
651 idx = array_index_nospec(idx, v->nvqs);
652 vq = &v->vqs[idx];
653
654 switch (cmd) {
655 case VHOST_VDPA_SET_VRING_ENABLE:
656 if (copy_from_user(&s, argp, sizeof(s)))
657 return -EFAULT;
658 ops->set_vq_ready(vdpa, idx, s.num);
659 return 0;
660 case VHOST_VDPA_GET_VRING_GROUP:
661 if (!ops->get_vq_group)
662 return -EOPNOTSUPP;
663 s.index = idx;
664 s.num = ops->get_vq_group(vdpa, idx);
665 if (s.num >= vdpa->ngroups)
666 return -EIO;
667 else if (copy_to_user(argp, &s, sizeof(s)))
668 return -EFAULT;
669 return 0;
670 case VHOST_VDPA_GET_VRING_DESC_GROUP:
671 if (!vhost_vdpa_has_desc_group(v))
672 return -EOPNOTSUPP;
673 s.index = idx;
674 s.num = ops->get_vq_desc_group(vdpa, idx);
675 if (s.num >= vdpa->ngroups)
676 return -EIO;
677 else if (copy_to_user(argp, &s, sizeof(s)))
678 return -EFAULT;
679 return 0;
680 case VHOST_VDPA_SET_GROUP_ASID:
681 if (copy_from_user(&s, argp, sizeof(s)))
682 return -EFAULT;
683 if (s.num >= vdpa->nas)
684 return -EINVAL;
685 if (!ops->set_group_asid)
686 return -EOPNOTSUPP;
687 return ops->set_group_asid(vdpa, idx, s.num);
688 case VHOST_VDPA_GET_VRING_SIZE:
689 if (!ops->get_vq_size)
690 return -EOPNOTSUPP;
691 s.index = idx;
692 s.num = ops->get_vq_size(vdpa, idx);
693 if (copy_to_user(argp, &s, sizeof(s)))
694 return -EFAULT;
695 return 0;
696 case VHOST_GET_VRING_BASE:
697 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
698 if (r)
699 return r;
700
701 if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
702 vq->last_avail_idx = vq_state.packed.last_avail_idx |
703 (vq_state.packed.last_avail_counter << 15);
704 vq->last_used_idx = vq_state.packed.last_used_idx |
705 (vq_state.packed.last_used_counter << 15);
706 } else {
707 vq->last_avail_idx = vq_state.split.avail_index;
708 }
709 break;
710 case VHOST_SET_VRING_CALL:
711 if (vq->call_ctx.ctx) {
712 if (ops->get_status(vdpa) &
713 VIRTIO_CONFIG_S_DRIVER_OK)
714 vhost_vdpa_unsetup_vq_irq(v, idx);
715 vq->call_ctx.producer.token = NULL;
716 }
717 break;
718 }
719
720 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
721 if (r)
722 return r;
723
724 switch (cmd) {
725 case VHOST_SET_VRING_ADDR:
726 if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
727 return -EINVAL;
728
729 if (ops->set_vq_address(vdpa, idx,
730 (u64)(uintptr_t)vq->desc,
731 (u64)(uintptr_t)vq->avail,
732 (u64)(uintptr_t)vq->used))
733 r = -EINVAL;
734 break;
735
736 case VHOST_SET_VRING_BASE:
737 if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
738 return -EINVAL;
739
740 if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
741 vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
742 vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
743 vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
744 vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
745 } else {
746 vq_state.split.avail_index = vq->last_avail_idx;
747 }
748 r = ops->set_vq_state(vdpa, idx, &vq_state);
749 break;
750
751 case VHOST_SET_VRING_CALL:
752 if (vq->call_ctx.ctx) {
753 cb.callback = vhost_vdpa_virtqueue_cb;
754 cb.private = vq;
755 cb.trigger = vq->call_ctx.ctx;
756 vq->call_ctx.producer.token = vq->call_ctx.ctx;
757 if (ops->get_status(vdpa) &
758 VIRTIO_CONFIG_S_DRIVER_OK)
759 vhost_vdpa_setup_vq_irq(v, idx);
760 } else {
761 cb.callback = NULL;
762 cb.private = NULL;
763 cb.trigger = NULL;
764 }
765 ops->set_vq_cb(vdpa, idx, &cb);
766 break;
767
768 case VHOST_SET_VRING_NUM:
769 ops->set_vq_num(vdpa, idx, vq->num);
770 break;
771 }
772
773 return r;
774}
775
776static long vhost_vdpa_unlocked_ioctl(struct file *filep,
777 unsigned int cmd, unsigned long arg)
778{
779 struct vhost_vdpa *v = filep->private_data;
780 struct vhost_dev *d = &v->vdev;
781 void __user *argp = (void __user *)arg;
782 u64 __user *featurep = argp;
783 u64 features;
784 long r = 0;
785
786 if (cmd == VHOST_SET_BACKEND_FEATURES) {
787 if (copy_from_user(&features, featurep, sizeof(features)))
788 return -EFAULT;
789 if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
790 BIT_ULL(VHOST_BACKEND_F_DESC_ASID) |
791 BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST) |
792 BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
793 BIT_ULL(VHOST_BACKEND_F_RESUME) |
794 BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
795 return -EOPNOTSUPP;
796 if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
797 !vhost_vdpa_can_suspend(v))
798 return -EOPNOTSUPP;
799 if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
800 !vhost_vdpa_can_resume(v))
801 return -EOPNOTSUPP;
802 if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
803 !(features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)))
804 return -EINVAL;
805 if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
806 !vhost_vdpa_has_desc_group(v))
807 return -EOPNOTSUPP;
808 if ((features & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST)) &&
809 !vhost_vdpa_has_persistent_map(v))
810 return -EOPNOTSUPP;
811 vhost_set_backend_features(&v->vdev, features);
812 return 0;
813 }
814
815 mutex_lock(&d->mutex);
816
817 switch (cmd) {
818 case VHOST_VDPA_GET_DEVICE_ID:
819 r = vhost_vdpa_get_device_id(v, argp);
820 break;
821 case VHOST_VDPA_GET_STATUS:
822 r = vhost_vdpa_get_status(v, argp);
823 break;
824 case VHOST_VDPA_SET_STATUS:
825 r = vhost_vdpa_set_status(v, argp);
826 break;
827 case VHOST_VDPA_GET_CONFIG:
828 r = vhost_vdpa_get_config(v, argp);
829 break;
830 case VHOST_VDPA_SET_CONFIG:
831 r = vhost_vdpa_set_config(v, argp);
832 break;
833 case VHOST_GET_FEATURES:
834 r = vhost_vdpa_get_features(v, argp);
835 break;
836 case VHOST_SET_FEATURES:
837 r = vhost_vdpa_set_features(v, argp);
838 break;
839 case VHOST_VDPA_GET_VRING_NUM:
840 r = vhost_vdpa_get_vring_num(v, argp);
841 break;
842 case VHOST_VDPA_GET_GROUP_NUM:
843 if (copy_to_user(argp, &v->vdpa->ngroups,
844 sizeof(v->vdpa->ngroups)))
845 r = -EFAULT;
846 break;
847 case VHOST_VDPA_GET_AS_NUM:
848 if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
849 r = -EFAULT;
850 break;
851 case VHOST_SET_LOG_BASE:
852 case VHOST_SET_LOG_FD:
853 r = -ENOIOCTLCMD;
854 break;
855 case VHOST_VDPA_SET_CONFIG_CALL:
856 r = vhost_vdpa_set_config_call(v, argp);
857 break;
858 case VHOST_GET_BACKEND_FEATURES:
859 features = VHOST_VDPA_BACKEND_FEATURES;
860 if (vhost_vdpa_can_suspend(v))
861 features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
862 if (vhost_vdpa_can_resume(v))
863 features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
864 if (vhost_vdpa_has_desc_group(v))
865 features |= BIT_ULL(VHOST_BACKEND_F_DESC_ASID);
866 if (vhost_vdpa_has_persistent_map(v))
867 features |= BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
868 features |= vhost_vdpa_get_backend_features(v);
869 if (copy_to_user(featurep, &features, sizeof(features)))
870 r = -EFAULT;
871 break;
872 case VHOST_VDPA_GET_IOVA_RANGE:
873 r = vhost_vdpa_get_iova_range(v, argp);
874 break;
875 case VHOST_VDPA_GET_CONFIG_SIZE:
876 r = vhost_vdpa_get_config_size(v, argp);
877 break;
878 case VHOST_VDPA_GET_VQS_COUNT:
879 r = vhost_vdpa_get_vqs_count(v, argp);
880 break;
881 case VHOST_VDPA_SUSPEND:
882 r = vhost_vdpa_suspend(v);
883 break;
884 case VHOST_VDPA_RESUME:
885 r = vhost_vdpa_resume(v);
886 break;
887 default:
888 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
889 if (r == -ENOIOCTLCMD)
890 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
891 break;
892 }
893
894 if (r)
895 goto out;
896
897 switch (cmd) {
898 case VHOST_SET_OWNER:
899 r = vhost_vdpa_bind_mm(v);
900 if (r)
901 vhost_dev_reset_owner(d, NULL);
902 break;
903 }
904out:
905 mutex_unlock(&d->mutex);
906 return r;
907}
908static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
909 struct vhost_iotlb_map *map, u32 asid)
910{
911 struct vdpa_device *vdpa = v->vdpa;
912 const struct vdpa_config_ops *ops = vdpa->config;
913 if (ops->dma_map) {
914 ops->dma_unmap(vdpa, asid, map->start, map->size);
915 } else if (ops->set_map == NULL) {
916 iommu_unmap(v->domain, map->start, map->size);
917 }
918}
919
920static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
921 u64 start, u64 last, u32 asid)
922{
923 struct vhost_dev *dev = &v->vdev;
924 struct vhost_iotlb_map *map;
925 struct page *page;
926 unsigned long pfn, pinned;
927
928 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
929 pinned = PFN_DOWN(map->size);
930 for (pfn = PFN_DOWN(map->addr);
931 pinned > 0; pfn++, pinned--) {
932 page = pfn_to_page(pfn);
933 if (map->perm & VHOST_ACCESS_WO)
934 set_page_dirty_lock(page);
935 unpin_user_page(page);
936 }
937 atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
938 vhost_vdpa_general_unmap(v, map, asid);
939 vhost_iotlb_map_free(iotlb, map);
940 }
941}
942
943static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
944 u64 start, u64 last, u32 asid)
945{
946 struct vhost_iotlb_map *map;
947 struct vdpa_map_file *map_file;
948
949 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
950 map_file = (struct vdpa_map_file *)map->opaque;
951 fput(map_file->file);
952 kfree(map_file);
953 vhost_vdpa_general_unmap(v, map, asid);
954 vhost_iotlb_map_free(iotlb, map);
955 }
956}
957
958static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
959 struct vhost_iotlb *iotlb, u64 start,
960 u64 last, u32 asid)
961{
962 struct vdpa_device *vdpa = v->vdpa;
963
964 if (vdpa->use_va)
965 return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
966
967 return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
968}
969
970static int perm_to_iommu_flags(u32 perm)
971{
972 int flags = 0;
973
974 switch (perm) {
975 case VHOST_ACCESS_WO:
976 flags |= IOMMU_WRITE;
977 break;
978 case VHOST_ACCESS_RO:
979 flags |= IOMMU_READ;
980 break;
981 case VHOST_ACCESS_RW:
982 flags |= (IOMMU_WRITE | IOMMU_READ);
983 break;
984 default:
985 WARN(1, "invalidate vhost IOTLB permission\n");
986 break;
987 }
988
989 return flags | IOMMU_CACHE;
990}
991
992static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
993 u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
994{
995 struct vhost_dev *dev = &v->vdev;
996 struct vdpa_device *vdpa = v->vdpa;
997 const struct vdpa_config_ops *ops = vdpa->config;
998 u32 asid = iotlb_to_asid(iotlb);
999 int r = 0;
1000
1001 r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
1002 pa, perm, opaque);
1003 if (r)
1004 return r;
1005
1006 if (ops->dma_map) {
1007 r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
1008 } else if (ops->set_map) {
1009 if (!v->in_batch)
1010 r = ops->set_map(vdpa, asid, iotlb);
1011 } else {
1012 r = iommu_map(v->domain, iova, pa, size,
1013 perm_to_iommu_flags(perm),
1014 GFP_KERNEL_ACCOUNT);
1015 }
1016 if (r) {
1017 vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
1018 return r;
1019 }
1020
1021 if (!vdpa->use_va)
1022 atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
1023
1024 return 0;
1025}
1026
1027static void vhost_vdpa_unmap(struct vhost_vdpa *v,
1028 struct vhost_iotlb *iotlb,
1029 u64 iova, u64 size)
1030{
1031 struct vdpa_device *vdpa = v->vdpa;
1032 const struct vdpa_config_ops *ops = vdpa->config;
1033 u32 asid = iotlb_to_asid(iotlb);
1034
1035 vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
1036
1037 if (ops->set_map) {
1038 if (!v->in_batch)
1039 ops->set_map(vdpa, asid, iotlb);
1040 }
1041
1042}
1043
1044static int vhost_vdpa_va_map(struct vhost_vdpa *v,
1045 struct vhost_iotlb *iotlb,
1046 u64 iova, u64 size, u64 uaddr, u32 perm)
1047{
1048 struct vhost_dev *dev = &v->vdev;
1049 u64 offset, map_size, map_iova = iova;
1050 struct vdpa_map_file *map_file;
1051 struct vm_area_struct *vma;
1052 int ret = 0;
1053
1054 mmap_read_lock(dev->mm);
1055
1056 while (size) {
1057 vma = find_vma(dev->mm, uaddr);
1058 if (!vma) {
1059 ret = -EINVAL;
1060 break;
1061 }
1062 map_size = min(size, vma->vm_end - uaddr);
1063 if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
1064 !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
1065 goto next;
1066
1067 map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
1068 if (!map_file) {
1069 ret = -ENOMEM;
1070 break;
1071 }
1072 offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
1073 map_file->offset = offset;
1074 map_file->file = get_file(vma->vm_file);
1075 ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
1076 perm, map_file);
1077 if (ret) {
1078 fput(map_file->file);
1079 kfree(map_file);
1080 break;
1081 }
1082next:
1083 size -= map_size;
1084 uaddr += map_size;
1085 map_iova += map_size;
1086 }
1087 if (ret)
1088 vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
1089
1090 mmap_read_unlock(dev->mm);
1091
1092 return ret;
1093}
1094
1095static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
1096 struct vhost_iotlb *iotlb,
1097 u64 iova, u64 size, u64 uaddr, u32 perm)
1098{
1099 struct vhost_dev *dev = &v->vdev;
1100 struct page **page_list;
1101 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
1102 unsigned int gup_flags = FOLL_LONGTERM;
1103 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
1104 unsigned long lock_limit, sz2pin, nchunks, i;
1105 u64 start = iova;
1106 long pinned;
1107 int ret = 0;
1108
1109 /* Limit the use of memory for bookkeeping */
1110 page_list = (struct page **) __get_free_page(GFP_KERNEL);
1111 if (!page_list)
1112 return -ENOMEM;
1113
1114 if (perm & VHOST_ACCESS_WO)
1115 gup_flags |= FOLL_WRITE;
1116
1117 npages = PFN_UP(size + (iova & ~PAGE_MASK));
1118 if (!npages) {
1119 ret = -EINVAL;
1120 goto free;
1121 }
1122
1123 mmap_read_lock(dev->mm);
1124
1125 lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
1126 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
1127 ret = -ENOMEM;
1128 goto unlock;
1129 }
1130
1131 cur_base = uaddr & PAGE_MASK;
1132 iova &= PAGE_MASK;
1133 nchunks = 0;
1134
1135 while (npages) {
1136 sz2pin = min_t(unsigned long, npages, list_size);
1137 pinned = pin_user_pages(cur_base, sz2pin,
1138 gup_flags, page_list);
1139 if (sz2pin != pinned) {
1140 if (pinned < 0) {
1141 ret = pinned;
1142 } else {
1143 unpin_user_pages(page_list, pinned);
1144 ret = -ENOMEM;
1145 }
1146 goto out;
1147 }
1148 nchunks++;
1149
1150 if (!last_pfn)
1151 map_pfn = page_to_pfn(page_list[0]);
1152
1153 for (i = 0; i < pinned; i++) {
1154 unsigned long this_pfn = page_to_pfn(page_list[i]);
1155 u64 csize;
1156
1157 if (last_pfn && (this_pfn != last_pfn + 1)) {
1158 /* Pin a contiguous chunk of memory */
1159 csize = PFN_PHYS(last_pfn - map_pfn + 1);
1160 ret = vhost_vdpa_map(v, iotlb, iova, csize,
1161 PFN_PHYS(map_pfn),
1162 perm, NULL);
1163 if (ret) {
1164 /*
1165 * Unpin the pages that are left unmapped
1166 * from this point on in the current
1167 * page_list. The remaining outstanding
1168 * ones which may stride across several
1169 * chunks will be covered in the common
1170 * error path subsequently.
1171 */
1172 unpin_user_pages(&page_list[i],
1173 pinned - i);
1174 goto out;
1175 }
1176
1177 map_pfn = this_pfn;
1178 iova += csize;
1179 nchunks = 0;
1180 }
1181
1182 last_pfn = this_pfn;
1183 }
1184
1185 cur_base += PFN_PHYS(pinned);
1186 npages -= pinned;
1187 }
1188
1189 /* Pin the rest chunk */
1190 ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
1191 PFN_PHYS(map_pfn), perm, NULL);
1192out:
1193 if (ret) {
1194 if (nchunks) {
1195 unsigned long pfn;
1196
1197 /*
1198 * Unpin the outstanding pages which are yet to be
1199 * mapped but haven't due to vdpa_map() or
1200 * pin_user_pages() failure.
1201 *
1202 * Mapped pages are accounted in vdpa_map(), hence
1203 * the corresponding unpinning will be handled by
1204 * vdpa_unmap().
1205 */
1206 WARN_ON(!last_pfn);
1207 for (pfn = map_pfn; pfn <= last_pfn; pfn++)
1208 unpin_user_page(pfn_to_page(pfn));
1209 }
1210 vhost_vdpa_unmap(v, iotlb, start, size);
1211 }
1212unlock:
1213 mmap_read_unlock(dev->mm);
1214free:
1215 free_page((unsigned long)page_list);
1216 return ret;
1217
1218}
1219
1220static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
1221 struct vhost_iotlb *iotlb,
1222 struct vhost_iotlb_msg *msg)
1223{
1224 struct vdpa_device *vdpa = v->vdpa;
1225
1226 if (msg->iova < v->range.first || !msg->size ||
1227 msg->iova > U64_MAX - msg->size + 1 ||
1228 msg->iova + msg->size - 1 > v->range.last)
1229 return -EINVAL;
1230
1231 if (vhost_iotlb_itree_first(iotlb, msg->iova,
1232 msg->iova + msg->size - 1))
1233 return -EEXIST;
1234
1235 if (vdpa->use_va)
1236 return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
1237 msg->uaddr, msg->perm);
1238
1239 return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
1240 msg->perm);
1241}
1242
1243static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
1244 struct vhost_iotlb_msg *msg)
1245{
1246 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
1247 struct vdpa_device *vdpa = v->vdpa;
1248 const struct vdpa_config_ops *ops = vdpa->config;
1249 struct vhost_iotlb *iotlb = NULL;
1250 struct vhost_vdpa_as *as = NULL;
1251 int r = 0;
1252
1253 mutex_lock(&dev->mutex);
1254
1255 r = vhost_dev_check_owner(dev);
1256 if (r)
1257 goto unlock;
1258
1259 if (msg->type == VHOST_IOTLB_UPDATE ||
1260 msg->type == VHOST_IOTLB_BATCH_BEGIN) {
1261 as = vhost_vdpa_find_alloc_as(v, asid);
1262 if (!as) {
1263 dev_err(&v->dev, "can't find and alloc asid %d\n",
1264 asid);
1265 r = -EINVAL;
1266 goto unlock;
1267 }
1268 iotlb = &as->iotlb;
1269 } else
1270 iotlb = asid_to_iotlb(v, asid);
1271
1272 if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
1273 if (v->in_batch && v->batch_asid != asid) {
1274 dev_info(&v->dev, "batch id %d asid %d\n",
1275 v->batch_asid, asid);
1276 }
1277 if (!iotlb)
1278 dev_err(&v->dev, "no iotlb for asid %d\n", asid);
1279 r = -EINVAL;
1280 goto unlock;
1281 }
1282
1283 switch (msg->type) {
1284 case VHOST_IOTLB_UPDATE:
1285 r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
1286 break;
1287 case VHOST_IOTLB_INVALIDATE:
1288 vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
1289 break;
1290 case VHOST_IOTLB_BATCH_BEGIN:
1291 v->batch_asid = asid;
1292 v->in_batch = true;
1293 break;
1294 case VHOST_IOTLB_BATCH_END:
1295 if (v->in_batch && ops->set_map)
1296 ops->set_map(vdpa, asid, iotlb);
1297 v->in_batch = false;
1298 break;
1299 default:
1300 r = -EINVAL;
1301 break;
1302 }
1303unlock:
1304 mutex_unlock(&dev->mutex);
1305
1306 return r;
1307}
1308
1309static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
1310 struct iov_iter *from)
1311{
1312 struct file *file = iocb->ki_filp;
1313 struct vhost_vdpa *v = file->private_data;
1314 struct vhost_dev *dev = &v->vdev;
1315
1316 return vhost_chr_write_iter(dev, from);
1317}
1318
1319static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
1320{
1321 struct vdpa_device *vdpa = v->vdpa;
1322 const struct vdpa_config_ops *ops = vdpa->config;
1323 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1324 int ret;
1325
1326 /* Device want to do DMA by itself */
1327 if (ops->set_map || ops->dma_map)
1328 return 0;
1329
1330 if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY)) {
1331 dev_warn_once(&v->dev,
1332 "Failed to allocate domain, device is not IOMMU cache coherent capable\n");
1333 return -ENOTSUPP;
1334 }
1335
1336 v->domain = iommu_paging_domain_alloc(dma_dev);
1337 if (IS_ERR(v->domain)) {
1338 ret = PTR_ERR(v->domain);
1339 v->domain = NULL;
1340 return ret;
1341 }
1342
1343 ret = iommu_attach_device(v->domain, dma_dev);
1344 if (ret)
1345 goto err_attach;
1346
1347 return 0;
1348
1349err_attach:
1350 iommu_domain_free(v->domain);
1351 v->domain = NULL;
1352 return ret;
1353}
1354
1355static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
1356{
1357 struct vdpa_device *vdpa = v->vdpa;
1358 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1359
1360 if (v->domain) {
1361 iommu_detach_device(v->domain, dma_dev);
1362 iommu_domain_free(v->domain);
1363 }
1364
1365 v->domain = NULL;
1366}
1367
1368static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
1369{
1370 struct vdpa_iova_range *range = &v->range;
1371 struct vdpa_device *vdpa = v->vdpa;
1372 const struct vdpa_config_ops *ops = vdpa->config;
1373
1374 if (ops->get_iova_range) {
1375 *range = ops->get_iova_range(vdpa);
1376 } else if (v->domain && v->domain->geometry.force_aperture) {
1377 range->first = v->domain->geometry.aperture_start;
1378 range->last = v->domain->geometry.aperture_end;
1379 } else {
1380 range->first = 0;
1381 range->last = ULLONG_MAX;
1382 }
1383}
1384
1385static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
1386{
1387 struct vhost_vdpa_as *as;
1388 u32 asid;
1389
1390 for (asid = 0; asid < v->vdpa->nas; asid++) {
1391 as = asid_to_as(v, asid);
1392 if (as)
1393 vhost_vdpa_remove_as(v, asid);
1394 }
1395
1396 vhost_vdpa_free_domain(v);
1397 vhost_dev_cleanup(&v->vdev);
1398 kfree(v->vdev.vqs);
1399 v->vdev.vqs = NULL;
1400}
1401
1402static int vhost_vdpa_open(struct inode *inode, struct file *filep)
1403{
1404 struct vhost_vdpa *v;
1405 struct vhost_dev *dev;
1406 struct vhost_virtqueue **vqs;
1407 int r, opened;
1408 u32 i, nvqs;
1409
1410 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
1411
1412 opened = atomic_cmpxchg(&v->opened, 0, 1);
1413 if (opened)
1414 return -EBUSY;
1415
1416 nvqs = v->nvqs;
1417 r = vhost_vdpa_reset(v);
1418 if (r)
1419 goto err;
1420
1421 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1422 if (!vqs) {
1423 r = -ENOMEM;
1424 goto err;
1425 }
1426
1427 dev = &v->vdev;
1428 for (i = 0; i < nvqs; i++) {
1429 vqs[i] = &v->vqs[i];
1430 vqs[i]->handle_kick = handle_vq_kick;
1431 vqs[i]->call_ctx.ctx = NULL;
1432 }
1433 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
1434 vhost_vdpa_process_iotlb_msg);
1435
1436 r = vhost_vdpa_alloc_domain(v);
1437 if (r)
1438 goto err_alloc_domain;
1439
1440 vhost_vdpa_set_iova_range(v);
1441
1442 filep->private_data = v;
1443
1444 return 0;
1445
1446err_alloc_domain:
1447 vhost_vdpa_cleanup(v);
1448err:
1449 atomic_dec(&v->opened);
1450 return r;
1451}
1452
1453static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
1454{
1455 u32 i;
1456
1457 for (i = 0; i < v->nvqs; i++)
1458 vhost_vdpa_unsetup_vq_irq(v, i);
1459}
1460
1461static int vhost_vdpa_release(struct inode *inode, struct file *filep)
1462{
1463 struct vhost_vdpa *v = filep->private_data;
1464 struct vhost_dev *d = &v->vdev;
1465
1466 mutex_lock(&d->mutex);
1467 filep->private_data = NULL;
1468 vhost_vdpa_clean_irq(v);
1469 vhost_vdpa_reset(v);
1470 vhost_dev_stop(&v->vdev);
1471 vhost_vdpa_unbind_mm(v);
1472 vhost_vdpa_config_put(v);
1473 vhost_vdpa_cleanup(v);
1474 mutex_unlock(&d->mutex);
1475
1476 atomic_dec(&v->opened);
1477 complete(&v->completion);
1478
1479 return 0;
1480}
1481
1482#ifdef CONFIG_MMU
1483static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
1484{
1485 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
1486 struct vdpa_device *vdpa = v->vdpa;
1487 const struct vdpa_config_ops *ops = vdpa->config;
1488 struct vdpa_notification_area notify;
1489 struct vm_area_struct *vma = vmf->vma;
1490 u16 index = vma->vm_pgoff;
1491
1492 notify = ops->get_vq_notification(vdpa, index);
1493
1494 return vmf_insert_pfn(vma, vmf->address & PAGE_MASK, PFN_DOWN(notify.addr));
1495}
1496
1497static const struct vm_operations_struct vhost_vdpa_vm_ops = {
1498 .fault = vhost_vdpa_fault,
1499};
1500
1501static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
1502{
1503 struct vhost_vdpa *v = vma->vm_file->private_data;
1504 struct vdpa_device *vdpa = v->vdpa;
1505 const struct vdpa_config_ops *ops = vdpa->config;
1506 struct vdpa_notification_area notify;
1507 unsigned long index = vma->vm_pgoff;
1508
1509 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1510 return -EINVAL;
1511 if ((vma->vm_flags & VM_SHARED) == 0)
1512 return -EINVAL;
1513 if (vma->vm_flags & VM_READ)
1514 return -EINVAL;
1515 if (index > 65535)
1516 return -EINVAL;
1517 if (!ops->get_vq_notification)
1518 return -ENOTSUPP;
1519
1520 /* To be safe and easily modelled by userspace, We only
1521 * support the doorbell which sits on the page boundary and
1522 * does not share the page with other registers.
1523 */
1524 notify = ops->get_vq_notification(vdpa, index);
1525 if (notify.addr & (PAGE_SIZE - 1))
1526 return -EINVAL;
1527 if (vma->vm_end - vma->vm_start != notify.size)
1528 return -ENOTSUPP;
1529
1530 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1531 vma->vm_ops = &vhost_vdpa_vm_ops;
1532 return 0;
1533}
1534#endif /* CONFIG_MMU */
1535
1536static const struct file_operations vhost_vdpa_fops = {
1537 .owner = THIS_MODULE,
1538 .open = vhost_vdpa_open,
1539 .release = vhost_vdpa_release,
1540 .write_iter = vhost_vdpa_chr_write_iter,
1541 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1542#ifdef CONFIG_MMU
1543 .mmap = vhost_vdpa_mmap,
1544#endif /* CONFIG_MMU */
1545 .compat_ioctl = compat_ptr_ioctl,
1546};
1547
1548static void vhost_vdpa_release_dev(struct device *device)
1549{
1550 struct vhost_vdpa *v =
1551 container_of(device, struct vhost_vdpa, dev);
1552
1553 ida_free(&vhost_vdpa_ida, v->minor);
1554 kfree(v->vqs);
1555 kfree(v);
1556}
1557
1558static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1559{
1560 const struct vdpa_config_ops *ops = vdpa->config;
1561 struct vhost_vdpa *v;
1562 int minor;
1563 int i, r;
1564
1565 /* We can't support platform IOMMU device with more than 1
1566 * group or as
1567 */
1568 if (!ops->set_map && !ops->dma_map &&
1569 (vdpa->ngroups > 1 || vdpa->nas > 1))
1570 return -EOPNOTSUPP;
1571
1572 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1573 if (!v)
1574 return -ENOMEM;
1575
1576 minor = ida_alloc_max(&vhost_vdpa_ida, VHOST_VDPA_DEV_MAX - 1,
1577 GFP_KERNEL);
1578 if (minor < 0) {
1579 kfree(v);
1580 return minor;
1581 }
1582
1583 atomic_set(&v->opened, 0);
1584 v->minor = minor;
1585 v->vdpa = vdpa;
1586 v->nvqs = vdpa->nvqs;
1587 v->virtio_id = ops->get_device_id(vdpa);
1588
1589 device_initialize(&v->dev);
1590 v->dev.release = vhost_vdpa_release_dev;
1591 v->dev.parent = &vdpa->dev;
1592 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1593 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1594 GFP_KERNEL);
1595 if (!v->vqs) {
1596 r = -ENOMEM;
1597 goto err;
1598 }
1599
1600 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1601 if (r)
1602 goto err;
1603
1604 cdev_init(&v->cdev, &vhost_vdpa_fops);
1605 v->cdev.owner = THIS_MODULE;
1606
1607 r = cdev_device_add(&v->cdev, &v->dev);
1608 if (r)
1609 goto err;
1610
1611 init_completion(&v->completion);
1612 vdpa_set_drvdata(vdpa, v);
1613
1614 for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
1615 INIT_HLIST_HEAD(&v->as[i]);
1616
1617 return 0;
1618
1619err:
1620 put_device(&v->dev);
1621 return r;
1622}
1623
1624static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1625{
1626 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1627 int opened;
1628
1629 cdev_device_del(&v->cdev, &v->dev);
1630
1631 do {
1632 opened = atomic_cmpxchg(&v->opened, 0, 1);
1633 if (!opened)
1634 break;
1635 wait_for_completion(&v->completion);
1636 } while (1);
1637
1638 put_device(&v->dev);
1639}
1640
1641static struct vdpa_driver vhost_vdpa_driver = {
1642 .driver = {
1643 .name = "vhost_vdpa",
1644 },
1645 .probe = vhost_vdpa_probe,
1646 .remove = vhost_vdpa_remove,
1647};
1648
1649static int __init vhost_vdpa_init(void)
1650{
1651 int r;
1652
1653 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1654 "vhost-vdpa");
1655 if (r)
1656 goto err_alloc_chrdev;
1657
1658 r = vdpa_register_driver(&vhost_vdpa_driver);
1659 if (r)
1660 goto err_vdpa_register_driver;
1661
1662 return 0;
1663
1664err_vdpa_register_driver:
1665 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1666err_alloc_chrdev:
1667 return r;
1668}
1669module_init(vhost_vdpa_init);
1670
1671static void __exit vhost_vdpa_exit(void)
1672{
1673 vdpa_unregister_driver(&vhost_vdpa_driver);
1674 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1675}
1676module_exit(vhost_vdpa_exit);
1677
1678MODULE_VERSION("0.0.1");
1679MODULE_LICENSE("GPL v2");
1680MODULE_AUTHOR("Intel Corporation");
1681MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2018-2020 Intel Corporation.
4 * Copyright (C) 2020 Red Hat, Inc.
5 *
6 * Author: Tiwei Bie <tiwei.bie@intel.com>
7 * Jason Wang <jasowang@redhat.com>
8 *
9 * Thanks Michael S. Tsirkin for the valuable comments and
10 * suggestions. And thanks to Cunming Liang and Zhihong Wang for all
11 * their supports.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/cdev.h>
17#include <linux/device.h>
18#include <linux/mm.h>
19#include <linux/slab.h>
20#include <linux/iommu.h>
21#include <linux/uuid.h>
22#include <linux/vdpa.h>
23#include <linux/nospec.h>
24#include <linux/vhost.h>
25
26#include "vhost.h"
27
28enum {
29 VHOST_VDPA_BACKEND_FEATURES =
30 (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
31 (1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
32 (1ULL << VHOST_BACKEND_F_IOTLB_ASID),
33};
34
35#define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
36
37#define VHOST_VDPA_IOTLB_BUCKETS 16
38
39struct vhost_vdpa_as {
40 struct hlist_node hash_link;
41 struct vhost_iotlb iotlb;
42 u32 id;
43};
44
45struct vhost_vdpa {
46 struct vhost_dev vdev;
47 struct iommu_domain *domain;
48 struct vhost_virtqueue *vqs;
49 struct completion completion;
50 struct vdpa_device *vdpa;
51 struct hlist_head as[VHOST_VDPA_IOTLB_BUCKETS];
52 struct device dev;
53 struct cdev cdev;
54 atomic_t opened;
55 u32 nvqs;
56 int virtio_id;
57 int minor;
58 struct eventfd_ctx *config_ctx;
59 int in_batch;
60 struct vdpa_iova_range range;
61 u32 batch_asid;
62 bool suspended;
63};
64
65static DEFINE_IDA(vhost_vdpa_ida);
66
67static dev_t vhost_vdpa_major;
68
69static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
70 struct vhost_iotlb *iotlb, u64 start,
71 u64 last, u32 asid);
72
73static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
74{
75 struct vhost_vdpa_as *as = container_of(iotlb, struct
76 vhost_vdpa_as, iotlb);
77 return as->id;
78}
79
80static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
81{
82 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
83 struct vhost_vdpa_as *as;
84
85 hlist_for_each_entry(as, head, hash_link)
86 if (as->id == asid)
87 return as;
88
89 return NULL;
90}
91
92static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
93{
94 struct vhost_vdpa_as *as = asid_to_as(v, asid);
95
96 if (!as)
97 return NULL;
98
99 return &as->iotlb;
100}
101
102static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
103{
104 struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
105 struct vhost_vdpa_as *as;
106
107 if (asid_to_as(v, asid))
108 return NULL;
109
110 if (asid >= v->vdpa->nas)
111 return NULL;
112
113 as = kmalloc(sizeof(*as), GFP_KERNEL);
114 if (!as)
115 return NULL;
116
117 vhost_iotlb_init(&as->iotlb, 0, 0);
118 as->id = asid;
119 hlist_add_head(&as->hash_link, head);
120
121 return as;
122}
123
124static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
125 u32 asid)
126{
127 struct vhost_vdpa_as *as = asid_to_as(v, asid);
128
129 if (as)
130 return as;
131
132 return vhost_vdpa_alloc_as(v, asid);
133}
134
135static void vhost_vdpa_reset_map(struct vhost_vdpa *v, u32 asid)
136{
137 struct vdpa_device *vdpa = v->vdpa;
138 const struct vdpa_config_ops *ops = vdpa->config;
139
140 if (ops->reset_map)
141 ops->reset_map(vdpa, asid);
142}
143
144static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
145{
146 struct vhost_vdpa_as *as = asid_to_as(v, asid);
147
148 if (!as)
149 return -EINVAL;
150
151 hlist_del(&as->hash_link);
152 vhost_vdpa_iotlb_unmap(v, &as->iotlb, 0ULL, 0ULL - 1, asid);
153 /*
154 * Devices with vendor specific IOMMU may need to restore
155 * iotlb to the initial or default state, which cannot be
156 * cleaned up in the all range unmap call above. Give them
157 * a chance to clean up or reset the map to the desired
158 * state.
159 */
160 vhost_vdpa_reset_map(v, asid);
161 kfree(as);
162
163 return 0;
164}
165
166static void handle_vq_kick(struct vhost_work *work)
167{
168 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
169 poll.work);
170 struct vhost_vdpa *v = container_of(vq->dev, struct vhost_vdpa, vdev);
171 const struct vdpa_config_ops *ops = v->vdpa->config;
172
173 ops->kick_vq(v->vdpa, vq - v->vqs);
174}
175
176static irqreturn_t vhost_vdpa_virtqueue_cb(void *private)
177{
178 struct vhost_virtqueue *vq = private;
179 struct eventfd_ctx *call_ctx = vq->call_ctx.ctx;
180
181 if (call_ctx)
182 eventfd_signal(call_ctx);
183
184 return IRQ_HANDLED;
185}
186
187static irqreturn_t vhost_vdpa_config_cb(void *private)
188{
189 struct vhost_vdpa *v = private;
190 struct eventfd_ctx *config_ctx = v->config_ctx;
191
192 if (config_ctx)
193 eventfd_signal(config_ctx);
194
195 return IRQ_HANDLED;
196}
197
198static void vhost_vdpa_setup_vq_irq(struct vhost_vdpa *v, u16 qid)
199{
200 struct vhost_virtqueue *vq = &v->vqs[qid];
201 const struct vdpa_config_ops *ops = v->vdpa->config;
202 struct vdpa_device *vdpa = v->vdpa;
203 int ret, irq;
204
205 if (!ops->get_vq_irq)
206 return;
207
208 irq = ops->get_vq_irq(vdpa, qid);
209 if (irq < 0)
210 return;
211
212 irq_bypass_unregister_producer(&vq->call_ctx.producer);
213 if (!vq->call_ctx.ctx)
214 return;
215
216 vq->call_ctx.producer.token = vq->call_ctx.ctx;
217 vq->call_ctx.producer.irq = irq;
218 ret = irq_bypass_register_producer(&vq->call_ctx.producer);
219 if (unlikely(ret))
220 dev_info(&v->dev, "vq %u, irq bypass producer (token %p) registration fails, ret = %d\n",
221 qid, vq->call_ctx.producer.token, ret);
222}
223
224static void vhost_vdpa_unsetup_vq_irq(struct vhost_vdpa *v, u16 qid)
225{
226 struct vhost_virtqueue *vq = &v->vqs[qid];
227
228 irq_bypass_unregister_producer(&vq->call_ctx.producer);
229}
230
231static int _compat_vdpa_reset(struct vhost_vdpa *v)
232{
233 struct vdpa_device *vdpa = v->vdpa;
234 u32 flags = 0;
235
236 v->suspended = false;
237
238 if (v->vdev.vqs) {
239 flags |= !vhost_backend_has_feature(v->vdev.vqs[0],
240 VHOST_BACKEND_F_IOTLB_PERSIST) ?
241 VDPA_RESET_F_CLEAN_MAP : 0;
242 }
243
244 return vdpa_reset(vdpa, flags);
245}
246
247static int vhost_vdpa_reset(struct vhost_vdpa *v)
248{
249 v->in_batch = 0;
250 return _compat_vdpa_reset(v);
251}
252
253static long vhost_vdpa_bind_mm(struct vhost_vdpa *v)
254{
255 struct vdpa_device *vdpa = v->vdpa;
256 const struct vdpa_config_ops *ops = vdpa->config;
257
258 if (!vdpa->use_va || !ops->bind_mm)
259 return 0;
260
261 return ops->bind_mm(vdpa, v->vdev.mm);
262}
263
264static void vhost_vdpa_unbind_mm(struct vhost_vdpa *v)
265{
266 struct vdpa_device *vdpa = v->vdpa;
267 const struct vdpa_config_ops *ops = vdpa->config;
268
269 if (!vdpa->use_va || !ops->unbind_mm)
270 return;
271
272 ops->unbind_mm(vdpa);
273}
274
275static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
276{
277 struct vdpa_device *vdpa = v->vdpa;
278 const struct vdpa_config_ops *ops = vdpa->config;
279 u32 device_id;
280
281 device_id = ops->get_device_id(vdpa);
282
283 if (copy_to_user(argp, &device_id, sizeof(device_id)))
284 return -EFAULT;
285
286 return 0;
287}
288
289static long vhost_vdpa_get_status(struct vhost_vdpa *v, u8 __user *statusp)
290{
291 struct vdpa_device *vdpa = v->vdpa;
292 const struct vdpa_config_ops *ops = vdpa->config;
293 u8 status;
294
295 status = ops->get_status(vdpa);
296
297 if (copy_to_user(statusp, &status, sizeof(status)))
298 return -EFAULT;
299
300 return 0;
301}
302
303static long vhost_vdpa_set_status(struct vhost_vdpa *v, u8 __user *statusp)
304{
305 struct vdpa_device *vdpa = v->vdpa;
306 const struct vdpa_config_ops *ops = vdpa->config;
307 u8 status, status_old;
308 u32 nvqs = v->nvqs;
309 int ret;
310 u16 i;
311
312 if (copy_from_user(&status, statusp, sizeof(status)))
313 return -EFAULT;
314
315 status_old = ops->get_status(vdpa);
316
317 /*
318 * Userspace shouldn't remove status bits unless reset the
319 * status to 0.
320 */
321 if (status != 0 && (status_old & ~status) != 0)
322 return -EINVAL;
323
324 if ((status_old & VIRTIO_CONFIG_S_DRIVER_OK) && !(status & VIRTIO_CONFIG_S_DRIVER_OK))
325 for (i = 0; i < nvqs; i++)
326 vhost_vdpa_unsetup_vq_irq(v, i);
327
328 if (status == 0) {
329 ret = _compat_vdpa_reset(v);
330 if (ret)
331 return ret;
332 } else
333 vdpa_set_status(vdpa, status);
334
335 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && !(status_old & VIRTIO_CONFIG_S_DRIVER_OK))
336 for (i = 0; i < nvqs; i++)
337 vhost_vdpa_setup_vq_irq(v, i);
338
339 return 0;
340}
341
342static int vhost_vdpa_config_validate(struct vhost_vdpa *v,
343 struct vhost_vdpa_config *c)
344{
345 struct vdpa_device *vdpa = v->vdpa;
346 size_t size = vdpa->config->get_config_size(vdpa);
347
348 if (c->len == 0 || c->off > size)
349 return -EINVAL;
350
351 if (c->len > size - c->off)
352 return -E2BIG;
353
354 return 0;
355}
356
357static long vhost_vdpa_get_config(struct vhost_vdpa *v,
358 struct vhost_vdpa_config __user *c)
359{
360 struct vdpa_device *vdpa = v->vdpa;
361 struct vhost_vdpa_config config;
362 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
363 u8 *buf;
364
365 if (copy_from_user(&config, c, size))
366 return -EFAULT;
367 if (vhost_vdpa_config_validate(v, &config))
368 return -EINVAL;
369 buf = kvzalloc(config.len, GFP_KERNEL);
370 if (!buf)
371 return -ENOMEM;
372
373 vdpa_get_config(vdpa, config.off, buf, config.len);
374
375 if (copy_to_user(c->buf, buf, config.len)) {
376 kvfree(buf);
377 return -EFAULT;
378 }
379
380 kvfree(buf);
381 return 0;
382}
383
384static long vhost_vdpa_set_config(struct vhost_vdpa *v,
385 struct vhost_vdpa_config __user *c)
386{
387 struct vdpa_device *vdpa = v->vdpa;
388 struct vhost_vdpa_config config;
389 unsigned long size = offsetof(struct vhost_vdpa_config, buf);
390 u8 *buf;
391
392 if (copy_from_user(&config, c, size))
393 return -EFAULT;
394 if (vhost_vdpa_config_validate(v, &config))
395 return -EINVAL;
396
397 buf = vmemdup_user(c->buf, config.len);
398 if (IS_ERR(buf))
399 return PTR_ERR(buf);
400
401 vdpa_set_config(vdpa, config.off, buf, config.len);
402
403 kvfree(buf);
404 return 0;
405}
406
407static bool vhost_vdpa_can_suspend(const struct vhost_vdpa *v)
408{
409 struct vdpa_device *vdpa = v->vdpa;
410 const struct vdpa_config_ops *ops = vdpa->config;
411
412 return ops->suspend;
413}
414
415static bool vhost_vdpa_can_resume(const struct vhost_vdpa *v)
416{
417 struct vdpa_device *vdpa = v->vdpa;
418 const struct vdpa_config_ops *ops = vdpa->config;
419
420 return ops->resume;
421}
422
423static bool vhost_vdpa_has_desc_group(const struct vhost_vdpa *v)
424{
425 struct vdpa_device *vdpa = v->vdpa;
426 const struct vdpa_config_ops *ops = vdpa->config;
427
428 return ops->get_vq_desc_group;
429}
430
431static long vhost_vdpa_get_features(struct vhost_vdpa *v, u64 __user *featurep)
432{
433 struct vdpa_device *vdpa = v->vdpa;
434 const struct vdpa_config_ops *ops = vdpa->config;
435 u64 features;
436
437 features = ops->get_device_features(vdpa);
438
439 if (copy_to_user(featurep, &features, sizeof(features)))
440 return -EFAULT;
441
442 return 0;
443}
444
445static u64 vhost_vdpa_get_backend_features(const struct vhost_vdpa *v)
446{
447 struct vdpa_device *vdpa = v->vdpa;
448 const struct vdpa_config_ops *ops = vdpa->config;
449
450 if (!ops->get_backend_features)
451 return 0;
452 else
453 return ops->get_backend_features(vdpa);
454}
455
456static bool vhost_vdpa_has_persistent_map(const struct vhost_vdpa *v)
457{
458 struct vdpa_device *vdpa = v->vdpa;
459 const struct vdpa_config_ops *ops = vdpa->config;
460
461 return (!ops->set_map && !ops->dma_map) || ops->reset_map ||
462 vhost_vdpa_get_backend_features(v) & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
463}
464
465static long vhost_vdpa_set_features(struct vhost_vdpa *v, u64 __user *featurep)
466{
467 struct vdpa_device *vdpa = v->vdpa;
468 const struct vdpa_config_ops *ops = vdpa->config;
469 struct vhost_dev *d = &v->vdev;
470 u64 actual_features;
471 u64 features;
472 int i;
473
474 /*
475 * It's not allowed to change the features after they have
476 * been negotiated.
477 */
478 if (ops->get_status(vdpa) & VIRTIO_CONFIG_S_FEATURES_OK)
479 return -EBUSY;
480
481 if (copy_from_user(&features, featurep, sizeof(features)))
482 return -EFAULT;
483
484 if (vdpa_set_features(vdpa, features))
485 return -EINVAL;
486
487 /* let the vqs know what has been configured */
488 actual_features = ops->get_driver_features(vdpa);
489 for (i = 0; i < d->nvqs; ++i) {
490 struct vhost_virtqueue *vq = d->vqs[i];
491
492 mutex_lock(&vq->mutex);
493 vq->acked_features = actual_features;
494 mutex_unlock(&vq->mutex);
495 }
496
497 return 0;
498}
499
500static long vhost_vdpa_get_vring_num(struct vhost_vdpa *v, u16 __user *argp)
501{
502 struct vdpa_device *vdpa = v->vdpa;
503 const struct vdpa_config_ops *ops = vdpa->config;
504 u16 num;
505
506 num = ops->get_vq_num_max(vdpa);
507
508 if (copy_to_user(argp, &num, sizeof(num)))
509 return -EFAULT;
510
511 return 0;
512}
513
514static void vhost_vdpa_config_put(struct vhost_vdpa *v)
515{
516 if (v->config_ctx) {
517 eventfd_ctx_put(v->config_ctx);
518 v->config_ctx = NULL;
519 }
520}
521
522static long vhost_vdpa_set_config_call(struct vhost_vdpa *v, u32 __user *argp)
523{
524 struct vdpa_callback cb;
525 int fd;
526 struct eventfd_ctx *ctx;
527
528 cb.callback = vhost_vdpa_config_cb;
529 cb.private = v;
530 if (copy_from_user(&fd, argp, sizeof(fd)))
531 return -EFAULT;
532
533 ctx = fd == VHOST_FILE_UNBIND ? NULL : eventfd_ctx_fdget(fd);
534 swap(ctx, v->config_ctx);
535
536 if (!IS_ERR_OR_NULL(ctx))
537 eventfd_ctx_put(ctx);
538
539 if (IS_ERR(v->config_ctx)) {
540 long ret = PTR_ERR(v->config_ctx);
541
542 v->config_ctx = NULL;
543 return ret;
544 }
545
546 v->vdpa->config->set_config_cb(v->vdpa, &cb);
547
548 return 0;
549}
550
551static long vhost_vdpa_get_iova_range(struct vhost_vdpa *v, u32 __user *argp)
552{
553 struct vhost_vdpa_iova_range range = {
554 .first = v->range.first,
555 .last = v->range.last,
556 };
557
558 if (copy_to_user(argp, &range, sizeof(range)))
559 return -EFAULT;
560 return 0;
561}
562
563static long vhost_vdpa_get_config_size(struct vhost_vdpa *v, u32 __user *argp)
564{
565 struct vdpa_device *vdpa = v->vdpa;
566 const struct vdpa_config_ops *ops = vdpa->config;
567 u32 size;
568
569 size = ops->get_config_size(vdpa);
570
571 if (copy_to_user(argp, &size, sizeof(size)))
572 return -EFAULT;
573
574 return 0;
575}
576
577static long vhost_vdpa_get_vqs_count(struct vhost_vdpa *v, u32 __user *argp)
578{
579 struct vdpa_device *vdpa = v->vdpa;
580
581 if (copy_to_user(argp, &vdpa->nvqs, sizeof(vdpa->nvqs)))
582 return -EFAULT;
583
584 return 0;
585}
586
587/* After a successful return of ioctl the device must not process more
588 * virtqueue descriptors. The device can answer to read or writes of config
589 * fields as if it were not suspended. In particular, writing to "queue_enable"
590 * with a value of 1 will not make the device start processing buffers.
591 */
592static long vhost_vdpa_suspend(struct vhost_vdpa *v)
593{
594 struct vdpa_device *vdpa = v->vdpa;
595 const struct vdpa_config_ops *ops = vdpa->config;
596 int ret;
597
598 if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
599 return 0;
600
601 if (!ops->suspend)
602 return -EOPNOTSUPP;
603
604 ret = ops->suspend(vdpa);
605 if (!ret)
606 v->suspended = true;
607
608 return ret;
609}
610
611/* After a successful return of this ioctl the device resumes processing
612 * virtqueue descriptors. The device becomes fully operational the same way it
613 * was before it was suspended.
614 */
615static long vhost_vdpa_resume(struct vhost_vdpa *v)
616{
617 struct vdpa_device *vdpa = v->vdpa;
618 const struct vdpa_config_ops *ops = vdpa->config;
619 int ret;
620
621 if (!(ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK))
622 return 0;
623
624 if (!ops->resume)
625 return -EOPNOTSUPP;
626
627 ret = ops->resume(vdpa);
628 if (!ret)
629 v->suspended = false;
630
631 return ret;
632}
633
634static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
635 void __user *argp)
636{
637 struct vdpa_device *vdpa = v->vdpa;
638 const struct vdpa_config_ops *ops = vdpa->config;
639 struct vdpa_vq_state vq_state;
640 struct vdpa_callback cb;
641 struct vhost_virtqueue *vq;
642 struct vhost_vring_state s;
643 u32 idx;
644 long r;
645
646 r = get_user(idx, (u32 __user *)argp);
647 if (r < 0)
648 return r;
649
650 if (idx >= v->nvqs)
651 return -ENOBUFS;
652
653 idx = array_index_nospec(idx, v->nvqs);
654 vq = &v->vqs[idx];
655
656 switch (cmd) {
657 case VHOST_VDPA_SET_VRING_ENABLE:
658 if (copy_from_user(&s, argp, sizeof(s)))
659 return -EFAULT;
660 ops->set_vq_ready(vdpa, idx, s.num);
661 return 0;
662 case VHOST_VDPA_GET_VRING_GROUP:
663 if (!ops->get_vq_group)
664 return -EOPNOTSUPP;
665 s.index = idx;
666 s.num = ops->get_vq_group(vdpa, idx);
667 if (s.num >= vdpa->ngroups)
668 return -EIO;
669 else if (copy_to_user(argp, &s, sizeof(s)))
670 return -EFAULT;
671 return 0;
672 case VHOST_VDPA_GET_VRING_DESC_GROUP:
673 if (!vhost_vdpa_has_desc_group(v))
674 return -EOPNOTSUPP;
675 s.index = idx;
676 s.num = ops->get_vq_desc_group(vdpa, idx);
677 if (s.num >= vdpa->ngroups)
678 return -EIO;
679 else if (copy_to_user(argp, &s, sizeof(s)))
680 return -EFAULT;
681 return 0;
682 case VHOST_VDPA_SET_GROUP_ASID:
683 if (copy_from_user(&s, argp, sizeof(s)))
684 return -EFAULT;
685 if (s.num >= vdpa->nas)
686 return -EINVAL;
687 if (!ops->set_group_asid)
688 return -EOPNOTSUPP;
689 return ops->set_group_asid(vdpa, idx, s.num);
690 case VHOST_VDPA_GET_VRING_SIZE:
691 if (!ops->get_vq_size)
692 return -EOPNOTSUPP;
693 s.index = idx;
694 s.num = ops->get_vq_size(vdpa, idx);
695 if (copy_to_user(argp, &s, sizeof(s)))
696 return -EFAULT;
697 return 0;
698 case VHOST_GET_VRING_BASE:
699 r = ops->get_vq_state(v->vdpa, idx, &vq_state);
700 if (r)
701 return r;
702
703 if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
704 vq->last_avail_idx = vq_state.packed.last_avail_idx |
705 (vq_state.packed.last_avail_counter << 15);
706 vq->last_used_idx = vq_state.packed.last_used_idx |
707 (vq_state.packed.last_used_counter << 15);
708 } else {
709 vq->last_avail_idx = vq_state.split.avail_index;
710 }
711 break;
712 }
713
714 r = vhost_vring_ioctl(&v->vdev, cmd, argp);
715 if (r)
716 return r;
717
718 switch (cmd) {
719 case VHOST_SET_VRING_ADDR:
720 if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
721 return -EINVAL;
722
723 if (ops->set_vq_address(vdpa, idx,
724 (u64)(uintptr_t)vq->desc,
725 (u64)(uintptr_t)vq->avail,
726 (u64)(uintptr_t)vq->used))
727 r = -EINVAL;
728 break;
729
730 case VHOST_SET_VRING_BASE:
731 if ((ops->get_status(vdpa) & VIRTIO_CONFIG_S_DRIVER_OK) && !v->suspended)
732 return -EINVAL;
733
734 if (vhost_has_feature(vq, VIRTIO_F_RING_PACKED)) {
735 vq_state.packed.last_avail_idx = vq->last_avail_idx & 0x7fff;
736 vq_state.packed.last_avail_counter = !!(vq->last_avail_idx & 0x8000);
737 vq_state.packed.last_used_idx = vq->last_used_idx & 0x7fff;
738 vq_state.packed.last_used_counter = !!(vq->last_used_idx & 0x8000);
739 } else {
740 vq_state.split.avail_index = vq->last_avail_idx;
741 }
742 r = ops->set_vq_state(vdpa, idx, &vq_state);
743 break;
744
745 case VHOST_SET_VRING_CALL:
746 if (vq->call_ctx.ctx) {
747 cb.callback = vhost_vdpa_virtqueue_cb;
748 cb.private = vq;
749 cb.trigger = vq->call_ctx.ctx;
750 } else {
751 cb.callback = NULL;
752 cb.private = NULL;
753 cb.trigger = NULL;
754 }
755 ops->set_vq_cb(vdpa, idx, &cb);
756 vhost_vdpa_setup_vq_irq(v, idx);
757 break;
758
759 case VHOST_SET_VRING_NUM:
760 ops->set_vq_num(vdpa, idx, vq->num);
761 break;
762 }
763
764 return r;
765}
766
767static long vhost_vdpa_unlocked_ioctl(struct file *filep,
768 unsigned int cmd, unsigned long arg)
769{
770 struct vhost_vdpa *v = filep->private_data;
771 struct vhost_dev *d = &v->vdev;
772 void __user *argp = (void __user *)arg;
773 u64 __user *featurep = argp;
774 u64 features;
775 long r = 0;
776
777 if (cmd == VHOST_SET_BACKEND_FEATURES) {
778 if (copy_from_user(&features, featurep, sizeof(features)))
779 return -EFAULT;
780 if (features & ~(VHOST_VDPA_BACKEND_FEATURES |
781 BIT_ULL(VHOST_BACKEND_F_DESC_ASID) |
782 BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST) |
783 BIT_ULL(VHOST_BACKEND_F_SUSPEND) |
784 BIT_ULL(VHOST_BACKEND_F_RESUME) |
785 BIT_ULL(VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK)))
786 return -EOPNOTSUPP;
787 if ((features & BIT_ULL(VHOST_BACKEND_F_SUSPEND)) &&
788 !vhost_vdpa_can_suspend(v))
789 return -EOPNOTSUPP;
790 if ((features & BIT_ULL(VHOST_BACKEND_F_RESUME)) &&
791 !vhost_vdpa_can_resume(v))
792 return -EOPNOTSUPP;
793 if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
794 !(features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)))
795 return -EINVAL;
796 if ((features & BIT_ULL(VHOST_BACKEND_F_DESC_ASID)) &&
797 !vhost_vdpa_has_desc_group(v))
798 return -EOPNOTSUPP;
799 if ((features & BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST)) &&
800 !vhost_vdpa_has_persistent_map(v))
801 return -EOPNOTSUPP;
802 vhost_set_backend_features(&v->vdev, features);
803 return 0;
804 }
805
806 mutex_lock(&d->mutex);
807
808 switch (cmd) {
809 case VHOST_VDPA_GET_DEVICE_ID:
810 r = vhost_vdpa_get_device_id(v, argp);
811 break;
812 case VHOST_VDPA_GET_STATUS:
813 r = vhost_vdpa_get_status(v, argp);
814 break;
815 case VHOST_VDPA_SET_STATUS:
816 r = vhost_vdpa_set_status(v, argp);
817 break;
818 case VHOST_VDPA_GET_CONFIG:
819 r = vhost_vdpa_get_config(v, argp);
820 break;
821 case VHOST_VDPA_SET_CONFIG:
822 r = vhost_vdpa_set_config(v, argp);
823 break;
824 case VHOST_GET_FEATURES:
825 r = vhost_vdpa_get_features(v, argp);
826 break;
827 case VHOST_SET_FEATURES:
828 r = vhost_vdpa_set_features(v, argp);
829 break;
830 case VHOST_VDPA_GET_VRING_NUM:
831 r = vhost_vdpa_get_vring_num(v, argp);
832 break;
833 case VHOST_VDPA_GET_GROUP_NUM:
834 if (copy_to_user(argp, &v->vdpa->ngroups,
835 sizeof(v->vdpa->ngroups)))
836 r = -EFAULT;
837 break;
838 case VHOST_VDPA_GET_AS_NUM:
839 if (copy_to_user(argp, &v->vdpa->nas, sizeof(v->vdpa->nas)))
840 r = -EFAULT;
841 break;
842 case VHOST_SET_LOG_BASE:
843 case VHOST_SET_LOG_FD:
844 r = -ENOIOCTLCMD;
845 break;
846 case VHOST_VDPA_SET_CONFIG_CALL:
847 r = vhost_vdpa_set_config_call(v, argp);
848 break;
849 case VHOST_GET_BACKEND_FEATURES:
850 features = VHOST_VDPA_BACKEND_FEATURES;
851 if (vhost_vdpa_can_suspend(v))
852 features |= BIT_ULL(VHOST_BACKEND_F_SUSPEND);
853 if (vhost_vdpa_can_resume(v))
854 features |= BIT_ULL(VHOST_BACKEND_F_RESUME);
855 if (vhost_vdpa_has_desc_group(v))
856 features |= BIT_ULL(VHOST_BACKEND_F_DESC_ASID);
857 if (vhost_vdpa_has_persistent_map(v))
858 features |= BIT_ULL(VHOST_BACKEND_F_IOTLB_PERSIST);
859 features |= vhost_vdpa_get_backend_features(v);
860 if (copy_to_user(featurep, &features, sizeof(features)))
861 r = -EFAULT;
862 break;
863 case VHOST_VDPA_GET_IOVA_RANGE:
864 r = vhost_vdpa_get_iova_range(v, argp);
865 break;
866 case VHOST_VDPA_GET_CONFIG_SIZE:
867 r = vhost_vdpa_get_config_size(v, argp);
868 break;
869 case VHOST_VDPA_GET_VQS_COUNT:
870 r = vhost_vdpa_get_vqs_count(v, argp);
871 break;
872 case VHOST_VDPA_SUSPEND:
873 r = vhost_vdpa_suspend(v);
874 break;
875 case VHOST_VDPA_RESUME:
876 r = vhost_vdpa_resume(v);
877 break;
878 default:
879 r = vhost_dev_ioctl(&v->vdev, cmd, argp);
880 if (r == -ENOIOCTLCMD)
881 r = vhost_vdpa_vring_ioctl(v, cmd, argp);
882 break;
883 }
884
885 if (r)
886 goto out;
887
888 switch (cmd) {
889 case VHOST_SET_OWNER:
890 r = vhost_vdpa_bind_mm(v);
891 if (r)
892 vhost_dev_reset_owner(d, NULL);
893 break;
894 }
895out:
896 mutex_unlock(&d->mutex);
897 return r;
898}
899static void vhost_vdpa_general_unmap(struct vhost_vdpa *v,
900 struct vhost_iotlb_map *map, u32 asid)
901{
902 struct vdpa_device *vdpa = v->vdpa;
903 const struct vdpa_config_ops *ops = vdpa->config;
904 if (ops->dma_map) {
905 ops->dma_unmap(vdpa, asid, map->start, map->size);
906 } else if (ops->set_map == NULL) {
907 iommu_unmap(v->domain, map->start, map->size);
908 }
909}
910
911static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
912 u64 start, u64 last, u32 asid)
913{
914 struct vhost_dev *dev = &v->vdev;
915 struct vhost_iotlb_map *map;
916 struct page *page;
917 unsigned long pfn, pinned;
918
919 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
920 pinned = PFN_DOWN(map->size);
921 for (pfn = PFN_DOWN(map->addr);
922 pinned > 0; pfn++, pinned--) {
923 page = pfn_to_page(pfn);
924 if (map->perm & VHOST_ACCESS_WO)
925 set_page_dirty_lock(page);
926 unpin_user_page(page);
927 }
928 atomic64_sub(PFN_DOWN(map->size), &dev->mm->pinned_vm);
929 vhost_vdpa_general_unmap(v, map, asid);
930 vhost_iotlb_map_free(iotlb, map);
931 }
932}
933
934static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
935 u64 start, u64 last, u32 asid)
936{
937 struct vhost_iotlb_map *map;
938 struct vdpa_map_file *map_file;
939
940 while ((map = vhost_iotlb_itree_first(iotlb, start, last)) != NULL) {
941 map_file = (struct vdpa_map_file *)map->opaque;
942 fput(map_file->file);
943 kfree(map_file);
944 vhost_vdpa_general_unmap(v, map, asid);
945 vhost_iotlb_map_free(iotlb, map);
946 }
947}
948
949static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
950 struct vhost_iotlb *iotlb, u64 start,
951 u64 last, u32 asid)
952{
953 struct vdpa_device *vdpa = v->vdpa;
954
955 if (vdpa->use_va)
956 return vhost_vdpa_va_unmap(v, iotlb, start, last, asid);
957
958 return vhost_vdpa_pa_unmap(v, iotlb, start, last, asid);
959}
960
961static int perm_to_iommu_flags(u32 perm)
962{
963 int flags = 0;
964
965 switch (perm) {
966 case VHOST_ACCESS_WO:
967 flags |= IOMMU_WRITE;
968 break;
969 case VHOST_ACCESS_RO:
970 flags |= IOMMU_READ;
971 break;
972 case VHOST_ACCESS_RW:
973 flags |= (IOMMU_WRITE | IOMMU_READ);
974 break;
975 default:
976 WARN(1, "invalidate vhost IOTLB permission\n");
977 break;
978 }
979
980 return flags | IOMMU_CACHE;
981}
982
983static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
984 u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
985{
986 struct vhost_dev *dev = &v->vdev;
987 struct vdpa_device *vdpa = v->vdpa;
988 const struct vdpa_config_ops *ops = vdpa->config;
989 u32 asid = iotlb_to_asid(iotlb);
990 int r = 0;
991
992 r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
993 pa, perm, opaque);
994 if (r)
995 return r;
996
997 if (ops->dma_map) {
998 r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
999 } else if (ops->set_map) {
1000 if (!v->in_batch)
1001 r = ops->set_map(vdpa, asid, iotlb);
1002 } else {
1003 r = iommu_map(v->domain, iova, pa, size,
1004 perm_to_iommu_flags(perm),
1005 GFP_KERNEL_ACCOUNT);
1006 }
1007 if (r) {
1008 vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
1009 return r;
1010 }
1011
1012 if (!vdpa->use_va)
1013 atomic64_add(PFN_DOWN(size), &dev->mm->pinned_vm);
1014
1015 return 0;
1016}
1017
1018static void vhost_vdpa_unmap(struct vhost_vdpa *v,
1019 struct vhost_iotlb *iotlb,
1020 u64 iova, u64 size)
1021{
1022 struct vdpa_device *vdpa = v->vdpa;
1023 const struct vdpa_config_ops *ops = vdpa->config;
1024 u32 asid = iotlb_to_asid(iotlb);
1025
1026 vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1, asid);
1027
1028 if (ops->set_map) {
1029 if (!v->in_batch)
1030 ops->set_map(vdpa, asid, iotlb);
1031 }
1032
1033}
1034
1035static int vhost_vdpa_va_map(struct vhost_vdpa *v,
1036 struct vhost_iotlb *iotlb,
1037 u64 iova, u64 size, u64 uaddr, u32 perm)
1038{
1039 struct vhost_dev *dev = &v->vdev;
1040 u64 offset, map_size, map_iova = iova;
1041 struct vdpa_map_file *map_file;
1042 struct vm_area_struct *vma;
1043 int ret = 0;
1044
1045 mmap_read_lock(dev->mm);
1046
1047 while (size) {
1048 vma = find_vma(dev->mm, uaddr);
1049 if (!vma) {
1050 ret = -EINVAL;
1051 break;
1052 }
1053 map_size = min(size, vma->vm_end - uaddr);
1054 if (!(vma->vm_file && (vma->vm_flags & VM_SHARED) &&
1055 !(vma->vm_flags & (VM_IO | VM_PFNMAP))))
1056 goto next;
1057
1058 map_file = kzalloc(sizeof(*map_file), GFP_KERNEL);
1059 if (!map_file) {
1060 ret = -ENOMEM;
1061 break;
1062 }
1063 offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
1064 map_file->offset = offset;
1065 map_file->file = get_file(vma->vm_file);
1066 ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
1067 perm, map_file);
1068 if (ret) {
1069 fput(map_file->file);
1070 kfree(map_file);
1071 break;
1072 }
1073next:
1074 size -= map_size;
1075 uaddr += map_size;
1076 map_iova += map_size;
1077 }
1078 if (ret)
1079 vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
1080
1081 mmap_read_unlock(dev->mm);
1082
1083 return ret;
1084}
1085
1086static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
1087 struct vhost_iotlb *iotlb,
1088 u64 iova, u64 size, u64 uaddr, u32 perm)
1089{
1090 struct vhost_dev *dev = &v->vdev;
1091 struct page **page_list;
1092 unsigned long list_size = PAGE_SIZE / sizeof(struct page *);
1093 unsigned int gup_flags = FOLL_LONGTERM;
1094 unsigned long npages, cur_base, map_pfn, last_pfn = 0;
1095 unsigned long lock_limit, sz2pin, nchunks, i;
1096 u64 start = iova;
1097 long pinned;
1098 int ret = 0;
1099
1100 /* Limit the use of memory for bookkeeping */
1101 page_list = (struct page **) __get_free_page(GFP_KERNEL);
1102 if (!page_list)
1103 return -ENOMEM;
1104
1105 if (perm & VHOST_ACCESS_WO)
1106 gup_flags |= FOLL_WRITE;
1107
1108 npages = PFN_UP(size + (iova & ~PAGE_MASK));
1109 if (!npages) {
1110 ret = -EINVAL;
1111 goto free;
1112 }
1113
1114 mmap_read_lock(dev->mm);
1115
1116 lock_limit = PFN_DOWN(rlimit(RLIMIT_MEMLOCK));
1117 if (npages + atomic64_read(&dev->mm->pinned_vm) > lock_limit) {
1118 ret = -ENOMEM;
1119 goto unlock;
1120 }
1121
1122 cur_base = uaddr & PAGE_MASK;
1123 iova &= PAGE_MASK;
1124 nchunks = 0;
1125
1126 while (npages) {
1127 sz2pin = min_t(unsigned long, npages, list_size);
1128 pinned = pin_user_pages(cur_base, sz2pin,
1129 gup_flags, page_list);
1130 if (sz2pin != pinned) {
1131 if (pinned < 0) {
1132 ret = pinned;
1133 } else {
1134 unpin_user_pages(page_list, pinned);
1135 ret = -ENOMEM;
1136 }
1137 goto out;
1138 }
1139 nchunks++;
1140
1141 if (!last_pfn)
1142 map_pfn = page_to_pfn(page_list[0]);
1143
1144 for (i = 0; i < pinned; i++) {
1145 unsigned long this_pfn = page_to_pfn(page_list[i]);
1146 u64 csize;
1147
1148 if (last_pfn && (this_pfn != last_pfn + 1)) {
1149 /* Pin a contiguous chunk of memory */
1150 csize = PFN_PHYS(last_pfn - map_pfn + 1);
1151 ret = vhost_vdpa_map(v, iotlb, iova, csize,
1152 PFN_PHYS(map_pfn),
1153 perm, NULL);
1154 if (ret) {
1155 /*
1156 * Unpin the pages that are left unmapped
1157 * from this point on in the current
1158 * page_list. The remaining outstanding
1159 * ones which may stride across several
1160 * chunks will be covered in the common
1161 * error path subsequently.
1162 */
1163 unpin_user_pages(&page_list[i],
1164 pinned - i);
1165 goto out;
1166 }
1167
1168 map_pfn = this_pfn;
1169 iova += csize;
1170 nchunks = 0;
1171 }
1172
1173 last_pfn = this_pfn;
1174 }
1175
1176 cur_base += PFN_PHYS(pinned);
1177 npages -= pinned;
1178 }
1179
1180 /* Pin the rest chunk */
1181 ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
1182 PFN_PHYS(map_pfn), perm, NULL);
1183out:
1184 if (ret) {
1185 if (nchunks) {
1186 unsigned long pfn;
1187
1188 /*
1189 * Unpin the outstanding pages which are yet to be
1190 * mapped but haven't due to vdpa_map() or
1191 * pin_user_pages() failure.
1192 *
1193 * Mapped pages are accounted in vdpa_map(), hence
1194 * the corresponding unpinning will be handled by
1195 * vdpa_unmap().
1196 */
1197 WARN_ON(!last_pfn);
1198 for (pfn = map_pfn; pfn <= last_pfn; pfn++)
1199 unpin_user_page(pfn_to_page(pfn));
1200 }
1201 vhost_vdpa_unmap(v, iotlb, start, size);
1202 }
1203unlock:
1204 mmap_read_unlock(dev->mm);
1205free:
1206 free_page((unsigned long)page_list);
1207 return ret;
1208
1209}
1210
1211static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
1212 struct vhost_iotlb *iotlb,
1213 struct vhost_iotlb_msg *msg)
1214{
1215 struct vdpa_device *vdpa = v->vdpa;
1216
1217 if (msg->iova < v->range.first || !msg->size ||
1218 msg->iova > U64_MAX - msg->size + 1 ||
1219 msg->iova + msg->size - 1 > v->range.last)
1220 return -EINVAL;
1221
1222 if (vhost_iotlb_itree_first(iotlb, msg->iova,
1223 msg->iova + msg->size - 1))
1224 return -EEXIST;
1225
1226 if (vdpa->use_va)
1227 return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
1228 msg->uaddr, msg->perm);
1229
1230 return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
1231 msg->perm);
1232}
1233
1234static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
1235 struct vhost_iotlb_msg *msg)
1236{
1237 struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
1238 struct vdpa_device *vdpa = v->vdpa;
1239 const struct vdpa_config_ops *ops = vdpa->config;
1240 struct vhost_iotlb *iotlb = NULL;
1241 struct vhost_vdpa_as *as = NULL;
1242 int r = 0;
1243
1244 mutex_lock(&dev->mutex);
1245
1246 r = vhost_dev_check_owner(dev);
1247 if (r)
1248 goto unlock;
1249
1250 if (msg->type == VHOST_IOTLB_UPDATE ||
1251 msg->type == VHOST_IOTLB_BATCH_BEGIN) {
1252 as = vhost_vdpa_find_alloc_as(v, asid);
1253 if (!as) {
1254 dev_err(&v->dev, "can't find and alloc asid %d\n",
1255 asid);
1256 r = -EINVAL;
1257 goto unlock;
1258 }
1259 iotlb = &as->iotlb;
1260 } else
1261 iotlb = asid_to_iotlb(v, asid);
1262
1263 if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
1264 if (v->in_batch && v->batch_asid != asid) {
1265 dev_info(&v->dev, "batch id %d asid %d\n",
1266 v->batch_asid, asid);
1267 }
1268 if (!iotlb)
1269 dev_err(&v->dev, "no iotlb for asid %d\n", asid);
1270 r = -EINVAL;
1271 goto unlock;
1272 }
1273
1274 switch (msg->type) {
1275 case VHOST_IOTLB_UPDATE:
1276 r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
1277 break;
1278 case VHOST_IOTLB_INVALIDATE:
1279 vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
1280 break;
1281 case VHOST_IOTLB_BATCH_BEGIN:
1282 v->batch_asid = asid;
1283 v->in_batch = true;
1284 break;
1285 case VHOST_IOTLB_BATCH_END:
1286 if (v->in_batch && ops->set_map)
1287 ops->set_map(vdpa, asid, iotlb);
1288 v->in_batch = false;
1289 break;
1290 default:
1291 r = -EINVAL;
1292 break;
1293 }
1294unlock:
1295 mutex_unlock(&dev->mutex);
1296
1297 return r;
1298}
1299
1300static ssize_t vhost_vdpa_chr_write_iter(struct kiocb *iocb,
1301 struct iov_iter *from)
1302{
1303 struct file *file = iocb->ki_filp;
1304 struct vhost_vdpa *v = file->private_data;
1305 struct vhost_dev *dev = &v->vdev;
1306
1307 return vhost_chr_write_iter(dev, from);
1308}
1309
1310static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v)
1311{
1312 struct vdpa_device *vdpa = v->vdpa;
1313 const struct vdpa_config_ops *ops = vdpa->config;
1314 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1315 const struct bus_type *bus;
1316 int ret;
1317
1318 /* Device want to do DMA by itself */
1319 if (ops->set_map || ops->dma_map)
1320 return 0;
1321
1322 bus = dma_dev->bus;
1323 if (!bus)
1324 return -EFAULT;
1325
1326 if (!device_iommu_capable(dma_dev, IOMMU_CAP_CACHE_COHERENCY)) {
1327 dev_warn_once(&v->dev,
1328 "Failed to allocate domain, device is not IOMMU cache coherent capable\n");
1329 return -ENOTSUPP;
1330 }
1331
1332 v->domain = iommu_domain_alloc(bus);
1333 if (!v->domain)
1334 return -EIO;
1335
1336 ret = iommu_attach_device(v->domain, dma_dev);
1337 if (ret)
1338 goto err_attach;
1339
1340 return 0;
1341
1342err_attach:
1343 iommu_domain_free(v->domain);
1344 v->domain = NULL;
1345 return ret;
1346}
1347
1348static void vhost_vdpa_free_domain(struct vhost_vdpa *v)
1349{
1350 struct vdpa_device *vdpa = v->vdpa;
1351 struct device *dma_dev = vdpa_get_dma_dev(vdpa);
1352
1353 if (v->domain) {
1354 iommu_detach_device(v->domain, dma_dev);
1355 iommu_domain_free(v->domain);
1356 }
1357
1358 v->domain = NULL;
1359}
1360
1361static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
1362{
1363 struct vdpa_iova_range *range = &v->range;
1364 struct vdpa_device *vdpa = v->vdpa;
1365 const struct vdpa_config_ops *ops = vdpa->config;
1366
1367 if (ops->get_iova_range) {
1368 *range = ops->get_iova_range(vdpa);
1369 } else if (v->domain && v->domain->geometry.force_aperture) {
1370 range->first = v->domain->geometry.aperture_start;
1371 range->last = v->domain->geometry.aperture_end;
1372 } else {
1373 range->first = 0;
1374 range->last = ULLONG_MAX;
1375 }
1376}
1377
1378static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
1379{
1380 struct vhost_vdpa_as *as;
1381 u32 asid;
1382
1383 for (asid = 0; asid < v->vdpa->nas; asid++) {
1384 as = asid_to_as(v, asid);
1385 if (as)
1386 vhost_vdpa_remove_as(v, asid);
1387 }
1388
1389 vhost_vdpa_free_domain(v);
1390 vhost_dev_cleanup(&v->vdev);
1391 kfree(v->vdev.vqs);
1392 v->vdev.vqs = NULL;
1393}
1394
1395static int vhost_vdpa_open(struct inode *inode, struct file *filep)
1396{
1397 struct vhost_vdpa *v;
1398 struct vhost_dev *dev;
1399 struct vhost_virtqueue **vqs;
1400 int r, opened;
1401 u32 i, nvqs;
1402
1403 v = container_of(inode->i_cdev, struct vhost_vdpa, cdev);
1404
1405 opened = atomic_cmpxchg(&v->opened, 0, 1);
1406 if (opened)
1407 return -EBUSY;
1408
1409 nvqs = v->nvqs;
1410 r = vhost_vdpa_reset(v);
1411 if (r)
1412 goto err;
1413
1414 vqs = kmalloc_array(nvqs, sizeof(*vqs), GFP_KERNEL);
1415 if (!vqs) {
1416 r = -ENOMEM;
1417 goto err;
1418 }
1419
1420 dev = &v->vdev;
1421 for (i = 0; i < nvqs; i++) {
1422 vqs[i] = &v->vqs[i];
1423 vqs[i]->handle_kick = handle_vq_kick;
1424 }
1425 vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
1426 vhost_vdpa_process_iotlb_msg);
1427
1428 r = vhost_vdpa_alloc_domain(v);
1429 if (r)
1430 goto err_alloc_domain;
1431
1432 vhost_vdpa_set_iova_range(v);
1433
1434 filep->private_data = v;
1435
1436 return 0;
1437
1438err_alloc_domain:
1439 vhost_vdpa_cleanup(v);
1440err:
1441 atomic_dec(&v->opened);
1442 return r;
1443}
1444
1445static void vhost_vdpa_clean_irq(struct vhost_vdpa *v)
1446{
1447 u32 i;
1448
1449 for (i = 0; i < v->nvqs; i++)
1450 vhost_vdpa_unsetup_vq_irq(v, i);
1451}
1452
1453static int vhost_vdpa_release(struct inode *inode, struct file *filep)
1454{
1455 struct vhost_vdpa *v = filep->private_data;
1456 struct vhost_dev *d = &v->vdev;
1457
1458 mutex_lock(&d->mutex);
1459 filep->private_data = NULL;
1460 vhost_vdpa_clean_irq(v);
1461 vhost_vdpa_reset(v);
1462 vhost_dev_stop(&v->vdev);
1463 vhost_vdpa_unbind_mm(v);
1464 vhost_vdpa_config_put(v);
1465 vhost_vdpa_cleanup(v);
1466 mutex_unlock(&d->mutex);
1467
1468 atomic_dec(&v->opened);
1469 complete(&v->completion);
1470
1471 return 0;
1472}
1473
1474#ifdef CONFIG_MMU
1475static vm_fault_t vhost_vdpa_fault(struct vm_fault *vmf)
1476{
1477 struct vhost_vdpa *v = vmf->vma->vm_file->private_data;
1478 struct vdpa_device *vdpa = v->vdpa;
1479 const struct vdpa_config_ops *ops = vdpa->config;
1480 struct vdpa_notification_area notify;
1481 struct vm_area_struct *vma = vmf->vma;
1482 u16 index = vma->vm_pgoff;
1483
1484 notify = ops->get_vq_notification(vdpa, index);
1485
1486 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1487 if (remap_pfn_range(vma, vmf->address & PAGE_MASK,
1488 PFN_DOWN(notify.addr), PAGE_SIZE,
1489 vma->vm_page_prot))
1490 return VM_FAULT_SIGBUS;
1491
1492 return VM_FAULT_NOPAGE;
1493}
1494
1495static const struct vm_operations_struct vhost_vdpa_vm_ops = {
1496 .fault = vhost_vdpa_fault,
1497};
1498
1499static int vhost_vdpa_mmap(struct file *file, struct vm_area_struct *vma)
1500{
1501 struct vhost_vdpa *v = vma->vm_file->private_data;
1502 struct vdpa_device *vdpa = v->vdpa;
1503 const struct vdpa_config_ops *ops = vdpa->config;
1504 struct vdpa_notification_area notify;
1505 unsigned long index = vma->vm_pgoff;
1506
1507 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
1508 return -EINVAL;
1509 if ((vma->vm_flags & VM_SHARED) == 0)
1510 return -EINVAL;
1511 if (vma->vm_flags & VM_READ)
1512 return -EINVAL;
1513 if (index > 65535)
1514 return -EINVAL;
1515 if (!ops->get_vq_notification)
1516 return -ENOTSUPP;
1517
1518 /* To be safe and easily modelled by userspace, We only
1519 * support the doorbell which sits on the page boundary and
1520 * does not share the page with other registers.
1521 */
1522 notify = ops->get_vq_notification(vdpa, index);
1523 if (notify.addr & (PAGE_SIZE - 1))
1524 return -EINVAL;
1525 if (vma->vm_end - vma->vm_start != notify.size)
1526 return -ENOTSUPP;
1527
1528 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1529 vma->vm_ops = &vhost_vdpa_vm_ops;
1530 return 0;
1531}
1532#endif /* CONFIG_MMU */
1533
1534static const struct file_operations vhost_vdpa_fops = {
1535 .owner = THIS_MODULE,
1536 .open = vhost_vdpa_open,
1537 .release = vhost_vdpa_release,
1538 .write_iter = vhost_vdpa_chr_write_iter,
1539 .unlocked_ioctl = vhost_vdpa_unlocked_ioctl,
1540#ifdef CONFIG_MMU
1541 .mmap = vhost_vdpa_mmap,
1542#endif /* CONFIG_MMU */
1543 .compat_ioctl = compat_ptr_ioctl,
1544};
1545
1546static void vhost_vdpa_release_dev(struct device *device)
1547{
1548 struct vhost_vdpa *v =
1549 container_of(device, struct vhost_vdpa, dev);
1550
1551 ida_simple_remove(&vhost_vdpa_ida, v->minor);
1552 kfree(v->vqs);
1553 kfree(v);
1554}
1555
1556static int vhost_vdpa_probe(struct vdpa_device *vdpa)
1557{
1558 const struct vdpa_config_ops *ops = vdpa->config;
1559 struct vhost_vdpa *v;
1560 int minor;
1561 int i, r;
1562
1563 /* We can't support platform IOMMU device with more than 1
1564 * group or as
1565 */
1566 if (!ops->set_map && !ops->dma_map &&
1567 (vdpa->ngroups > 1 || vdpa->nas > 1))
1568 return -EOPNOTSUPP;
1569
1570 v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1571 if (!v)
1572 return -ENOMEM;
1573
1574 minor = ida_simple_get(&vhost_vdpa_ida, 0,
1575 VHOST_VDPA_DEV_MAX, GFP_KERNEL);
1576 if (minor < 0) {
1577 kfree(v);
1578 return minor;
1579 }
1580
1581 atomic_set(&v->opened, 0);
1582 v->minor = minor;
1583 v->vdpa = vdpa;
1584 v->nvqs = vdpa->nvqs;
1585 v->virtio_id = ops->get_device_id(vdpa);
1586
1587 device_initialize(&v->dev);
1588 v->dev.release = vhost_vdpa_release_dev;
1589 v->dev.parent = &vdpa->dev;
1590 v->dev.devt = MKDEV(MAJOR(vhost_vdpa_major), minor);
1591 v->vqs = kmalloc_array(v->nvqs, sizeof(struct vhost_virtqueue),
1592 GFP_KERNEL);
1593 if (!v->vqs) {
1594 r = -ENOMEM;
1595 goto err;
1596 }
1597
1598 r = dev_set_name(&v->dev, "vhost-vdpa-%u", minor);
1599 if (r)
1600 goto err;
1601
1602 cdev_init(&v->cdev, &vhost_vdpa_fops);
1603 v->cdev.owner = THIS_MODULE;
1604
1605 r = cdev_device_add(&v->cdev, &v->dev);
1606 if (r)
1607 goto err;
1608
1609 init_completion(&v->completion);
1610 vdpa_set_drvdata(vdpa, v);
1611
1612 for (i = 0; i < VHOST_VDPA_IOTLB_BUCKETS; i++)
1613 INIT_HLIST_HEAD(&v->as[i]);
1614
1615 return 0;
1616
1617err:
1618 put_device(&v->dev);
1619 return r;
1620}
1621
1622static void vhost_vdpa_remove(struct vdpa_device *vdpa)
1623{
1624 struct vhost_vdpa *v = vdpa_get_drvdata(vdpa);
1625 int opened;
1626
1627 cdev_device_del(&v->cdev, &v->dev);
1628
1629 do {
1630 opened = atomic_cmpxchg(&v->opened, 0, 1);
1631 if (!opened)
1632 break;
1633 wait_for_completion(&v->completion);
1634 } while (1);
1635
1636 put_device(&v->dev);
1637}
1638
1639static struct vdpa_driver vhost_vdpa_driver = {
1640 .driver = {
1641 .name = "vhost_vdpa",
1642 },
1643 .probe = vhost_vdpa_probe,
1644 .remove = vhost_vdpa_remove,
1645};
1646
1647static int __init vhost_vdpa_init(void)
1648{
1649 int r;
1650
1651 r = alloc_chrdev_region(&vhost_vdpa_major, 0, VHOST_VDPA_DEV_MAX,
1652 "vhost-vdpa");
1653 if (r)
1654 goto err_alloc_chrdev;
1655
1656 r = vdpa_register_driver(&vhost_vdpa_driver);
1657 if (r)
1658 goto err_vdpa_register_driver;
1659
1660 return 0;
1661
1662err_vdpa_register_driver:
1663 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1664err_alloc_chrdev:
1665 return r;
1666}
1667module_init(vhost_vdpa_init);
1668
1669static void __exit vhost_vdpa_exit(void)
1670{
1671 vdpa_unregister_driver(&vhost_vdpa_driver);
1672 unregister_chrdev_region(vhost_vdpa_major, VHOST_VDPA_DEV_MAX);
1673}
1674module_exit(vhost_vdpa_exit);
1675
1676MODULE_VERSION("0.0.1");
1677MODULE_LICENSE("GPL v2");
1678MODULE_AUTHOR("Intel Corporation");
1679MODULE_DESCRIPTION("vDPA-based vhost backend for virtio");