Loading...
1/*
2 * KVM coalesced MMIO
3 *
4 * Copyright (c) 2008 Bull S.A.S.
5 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
6 *
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 *
9 */
10
11#include "iodev.h"
12
13#include <linux/kvm_host.h>
14#include <linux/slab.h>
15#include <linux/kvm.h>
16
17#include "coalesced_mmio.h"
18
19static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
20{
21 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
22}
23
24static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 gpa_t addr, int len)
26{
27 /* is it in a batchable area ?
28 * (addr,len) is fully included in
29 * (zone->addr, zone->size)
30 */
31 if (len < 0)
32 return 0;
33 if (addr + len < addr)
34 return 0;
35 if (addr < dev->zone.addr)
36 return 0;
37 if (addr + len > dev->zone.addr + dev->zone.size)
38 return 0;
39 return 1;
40}
41
42static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
43{
44 struct kvm_coalesced_mmio_ring *ring;
45 unsigned avail;
46
47 /* Are we able to batch it ? */
48
49 /* last is the first free entry
50 * check if we don't meet the first used entry
51 * there is always one unused entry in the buffer
52 */
53 ring = dev->kvm->coalesced_mmio_ring;
54 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
55 if (avail == 0) {
56 /* full */
57 return 0;
58 }
59
60 return 1;
61}
62
63static int coalesced_mmio_write(struct kvm_io_device *this,
64 gpa_t addr, int len, const void *val)
65{
66 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
67 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
68
69 if (!coalesced_mmio_in_range(dev, addr, len))
70 return -EOPNOTSUPP;
71
72 spin_lock(&dev->kvm->ring_lock);
73
74 if (!coalesced_mmio_has_room(dev)) {
75 spin_unlock(&dev->kvm->ring_lock);
76 return -EOPNOTSUPP;
77 }
78
79 /* copy data in first free entry of the ring */
80
81 ring->coalesced_mmio[ring->last].phys_addr = addr;
82 ring->coalesced_mmio[ring->last].len = len;
83 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
84 smp_wmb();
85 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
86 spin_unlock(&dev->kvm->ring_lock);
87 return 0;
88}
89
90static void coalesced_mmio_destructor(struct kvm_io_device *this)
91{
92 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
93
94 list_del(&dev->list);
95
96 kfree(dev);
97}
98
99static const struct kvm_io_device_ops coalesced_mmio_ops = {
100 .write = coalesced_mmio_write,
101 .destructor = coalesced_mmio_destructor,
102};
103
104int kvm_coalesced_mmio_init(struct kvm *kvm)
105{
106 struct page *page;
107 int ret;
108
109 ret = -ENOMEM;
110 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
111 if (!page)
112 goto out_err;
113
114 ret = 0;
115 kvm->coalesced_mmio_ring = page_address(page);
116
117 /*
118 * We're using this spinlock to sync access to the coalesced ring.
119 * The list doesn't need it's own lock since device registration and
120 * unregistration should only happen when kvm->slots_lock is held.
121 */
122 spin_lock_init(&kvm->ring_lock);
123 INIT_LIST_HEAD(&kvm->coalesced_zones);
124
125out_err:
126 return ret;
127}
128
129void kvm_coalesced_mmio_free(struct kvm *kvm)
130{
131 if (kvm->coalesced_mmio_ring)
132 free_page((unsigned long)kvm->coalesced_mmio_ring);
133}
134
135int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
136 struct kvm_coalesced_mmio_zone *zone)
137{
138 int ret;
139 struct kvm_coalesced_mmio_dev *dev;
140
141 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
142 if (!dev)
143 return -ENOMEM;
144
145 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
146 dev->kvm = kvm;
147 dev->zone = *zone;
148
149 mutex_lock(&kvm->slots_lock);
150 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
151 zone->size, &dev->dev);
152 if (ret < 0)
153 goto out_free_dev;
154 list_add_tail(&dev->list, &kvm->coalesced_zones);
155 mutex_unlock(&kvm->slots_lock);
156
157 return 0;
158
159out_free_dev:
160 mutex_unlock(&kvm->slots_lock);
161 kfree(dev);
162
163 return ret;
164}
165
166int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
167 struct kvm_coalesced_mmio_zone *zone)
168{
169 struct kvm_coalesced_mmio_dev *dev, *tmp;
170
171 mutex_lock(&kvm->slots_lock);
172
173 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
174 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
175 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
176 kvm_iodevice_destructor(&dev->dev);
177 }
178
179 mutex_unlock(&kvm->slots_lock);
180
181 return 0;
182}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KVM coalesced MMIO
4 *
5 * Copyright (c) 2008 Bull S.A.S.
6 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7 *
8 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 *
10 */
11
12#include <kvm/iodev.h>
13
14#include <linux/kvm_host.h>
15#include <linux/slab.h>
16#include <linux/kvm.h>
17
18#include "coalesced_mmio.h"
19
20static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21{
22 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
23}
24
25static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
26 gpa_t addr, int len)
27{
28 /* is it in a batchable area ?
29 * (addr,len) is fully included in
30 * (zone->addr, zone->size)
31 */
32 if (len < 0)
33 return 0;
34 if (addr + len < addr)
35 return 0;
36 if (addr < dev->zone.addr)
37 return 0;
38 if (addr + len > dev->zone.addr + dev->zone.size)
39 return 0;
40 return 1;
41}
42
43static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
44 struct kvm_io_device *this, gpa_t addr,
45 int len, const void *val)
46{
47 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
48 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
49 __u32 insert;
50
51 if (!coalesced_mmio_in_range(dev, addr, len))
52 return -EOPNOTSUPP;
53
54 spin_lock(&dev->kvm->ring_lock);
55
56 /*
57 * last is the index of the entry to fill. Verify userspace hasn't
58 * set last to be out of range, and that there is room in the ring.
59 * Leave one entry free in the ring so that userspace can differentiate
60 * between an empty ring and a full ring.
61 */
62 insert = READ_ONCE(ring->last);
63 if (insert >= KVM_COALESCED_MMIO_MAX ||
64 (insert + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) {
65 spin_unlock(&dev->kvm->ring_lock);
66 return -EOPNOTSUPP;
67 }
68
69 /* copy data in first free entry of the ring */
70
71 ring->coalesced_mmio[insert].phys_addr = addr;
72 ring->coalesced_mmio[insert].len = len;
73 memcpy(ring->coalesced_mmio[insert].data, val, len);
74 ring->coalesced_mmio[insert].pio = dev->zone.pio;
75 smp_wmb();
76 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
77 spin_unlock(&dev->kvm->ring_lock);
78 return 0;
79}
80
81static void coalesced_mmio_destructor(struct kvm_io_device *this)
82{
83 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
84
85 list_del(&dev->list);
86
87 kfree(dev);
88}
89
90static const struct kvm_io_device_ops coalesced_mmio_ops = {
91 .write = coalesced_mmio_write,
92 .destructor = coalesced_mmio_destructor,
93};
94
95int kvm_coalesced_mmio_init(struct kvm *kvm)
96{
97 struct page *page;
98
99 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
100 if (!page)
101 return -ENOMEM;
102
103 kvm->coalesced_mmio_ring = page_address(page);
104
105 /*
106 * We're using this spinlock to sync access to the coalesced ring.
107 * The list doesn't need its own lock since device registration and
108 * unregistration should only happen when kvm->slots_lock is held.
109 */
110 spin_lock_init(&kvm->ring_lock);
111 INIT_LIST_HEAD(&kvm->coalesced_zones);
112
113 return 0;
114}
115
116void kvm_coalesced_mmio_free(struct kvm *kvm)
117{
118 if (kvm->coalesced_mmio_ring)
119 free_page((unsigned long)kvm->coalesced_mmio_ring);
120}
121
122int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
123 struct kvm_coalesced_mmio_zone *zone)
124{
125 int ret;
126 struct kvm_coalesced_mmio_dev *dev;
127
128 if (zone->pio != 1 && zone->pio != 0)
129 return -EINVAL;
130
131 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev),
132 GFP_KERNEL_ACCOUNT);
133 if (!dev)
134 return -ENOMEM;
135
136 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
137 dev->kvm = kvm;
138 dev->zone = *zone;
139
140 mutex_lock(&kvm->slots_lock);
141 ret = kvm_io_bus_register_dev(kvm,
142 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
143 zone->addr, zone->size, &dev->dev);
144 if (ret < 0)
145 goto out_free_dev;
146 list_add_tail(&dev->list, &kvm->coalesced_zones);
147 mutex_unlock(&kvm->slots_lock);
148
149 return 0;
150
151out_free_dev:
152 mutex_unlock(&kvm->slots_lock);
153 kfree(dev);
154
155 return ret;
156}
157
158int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
159 struct kvm_coalesced_mmio_zone *zone)
160{
161 struct kvm_coalesced_mmio_dev *dev, *tmp;
162 int r;
163
164 if (zone->pio != 1 && zone->pio != 0)
165 return -EINVAL;
166
167 mutex_lock(&kvm->slots_lock);
168
169 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
170 if (zone->pio == dev->zone.pio &&
171 coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
172 r = kvm_io_bus_unregister_dev(kvm,
173 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
174 /*
175 * On failure, unregister destroys all devices on the
176 * bus, including the target device. There's no need
177 * to restart the walk as there aren't any zones left.
178 */
179 if (r)
180 break;
181 }
182 }
183
184 mutex_unlock(&kvm->slots_lock);
185
186 /*
187 * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
188 * perspective, the coalesced MMIO is most definitely unregistered.
189 */
190 return 0;
191}