Loading...
1/*
2 * KVM coalesced MMIO
3 *
4 * Copyright (c) 2008 Bull S.A.S.
5 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
6 *
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 *
9 */
10
11#include <kvm/iodev.h>
12
13#include <linux/kvm_host.h>
14#include <linux/slab.h>
15#include <linux/kvm.h>
16
17#include "coalesced_mmio.h"
18
19static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
20{
21 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
22}
23
24static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 gpa_t addr, int len)
26{
27 /* is it in a batchable area ?
28 * (addr,len) is fully included in
29 * (zone->addr, zone->size)
30 */
31 if (len < 0)
32 return 0;
33 if (addr + len < addr)
34 return 0;
35 if (addr < dev->zone.addr)
36 return 0;
37 if (addr + len > dev->zone.addr + dev->zone.size)
38 return 0;
39 return 1;
40}
41
42static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
43{
44 struct kvm_coalesced_mmio_ring *ring;
45 unsigned avail;
46
47 /* Are we able to batch it ? */
48
49 /* last is the first free entry
50 * check if we don't meet the first used entry
51 * there is always one unused entry in the buffer
52 */
53 ring = dev->kvm->coalesced_mmio_ring;
54 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
55 if (avail == 0) {
56 /* full */
57 return 0;
58 }
59
60 return 1;
61}
62
63static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
64 struct kvm_io_device *this, gpa_t addr,
65 int len, const void *val)
66{
67 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
68 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
69
70 if (!coalesced_mmio_in_range(dev, addr, len))
71 return -EOPNOTSUPP;
72
73 spin_lock(&dev->kvm->ring_lock);
74
75 if (!coalesced_mmio_has_room(dev)) {
76 spin_unlock(&dev->kvm->ring_lock);
77 return -EOPNOTSUPP;
78 }
79
80 /* copy data in first free entry of the ring */
81
82 ring->coalesced_mmio[ring->last].phys_addr = addr;
83 ring->coalesced_mmio[ring->last].len = len;
84 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
85 smp_wmb();
86 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
87 spin_unlock(&dev->kvm->ring_lock);
88 return 0;
89}
90
91static void coalesced_mmio_destructor(struct kvm_io_device *this)
92{
93 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
94
95 list_del(&dev->list);
96
97 kfree(dev);
98}
99
100static const struct kvm_io_device_ops coalesced_mmio_ops = {
101 .write = coalesced_mmio_write,
102 .destructor = coalesced_mmio_destructor,
103};
104
105int kvm_coalesced_mmio_init(struct kvm *kvm)
106{
107 struct page *page;
108 int ret;
109
110 ret = -ENOMEM;
111 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
112 if (!page)
113 goto out_err;
114
115 ret = 0;
116 kvm->coalesced_mmio_ring = page_address(page);
117
118 /*
119 * We're using this spinlock to sync access to the coalesced ring.
120 * The list doesn't need it's own lock since device registration and
121 * unregistration should only happen when kvm->slots_lock is held.
122 */
123 spin_lock_init(&kvm->ring_lock);
124 INIT_LIST_HEAD(&kvm->coalesced_zones);
125
126out_err:
127 return ret;
128}
129
130void kvm_coalesced_mmio_free(struct kvm *kvm)
131{
132 if (kvm->coalesced_mmio_ring)
133 free_page((unsigned long)kvm->coalesced_mmio_ring);
134}
135
136int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
137 struct kvm_coalesced_mmio_zone *zone)
138{
139 int ret;
140 struct kvm_coalesced_mmio_dev *dev;
141
142 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
143 if (!dev)
144 return -ENOMEM;
145
146 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
147 dev->kvm = kvm;
148 dev->zone = *zone;
149
150 mutex_lock(&kvm->slots_lock);
151 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
152 zone->size, &dev->dev);
153 if (ret < 0)
154 goto out_free_dev;
155 list_add_tail(&dev->list, &kvm->coalesced_zones);
156 mutex_unlock(&kvm->slots_lock);
157
158 return 0;
159
160out_free_dev:
161 mutex_unlock(&kvm->slots_lock);
162 kfree(dev);
163
164 return ret;
165}
166
167int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
168 struct kvm_coalesced_mmio_zone *zone)
169{
170 struct kvm_coalesced_mmio_dev *dev, *tmp;
171
172 mutex_lock(&kvm->slots_lock);
173
174 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
175 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
176 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
177 kvm_iodevice_destructor(&dev->dev);
178 }
179
180 mutex_unlock(&kvm->slots_lock);
181
182 return 0;
183}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KVM coalesced MMIO
4 *
5 * Copyright (c) 2008 Bull S.A.S.
6 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7 *
8 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 *
10 */
11
12#include <kvm/iodev.h>
13
14#include <linux/kvm_host.h>
15#include <linux/slab.h>
16#include <linux/kvm.h>
17
18#include "coalesced_mmio.h"
19
20static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21{
22 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
23}
24
25static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
26 gpa_t addr, int len)
27{
28 /* is it in a batchable area ?
29 * (addr,len) is fully included in
30 * (zone->addr, zone->size)
31 */
32 if (len < 0)
33 return 0;
34 if (addr + len < addr)
35 return 0;
36 if (addr < dev->zone.addr)
37 return 0;
38 if (addr + len > dev->zone.addr + dev->zone.size)
39 return 0;
40 return 1;
41}
42
43static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
44{
45 struct kvm_coalesced_mmio_ring *ring;
46 unsigned avail;
47
48 /* Are we able to batch it ? */
49
50 /* last is the first free entry
51 * check if we don't meet the first used entry
52 * there is always one unused entry in the buffer
53 */
54 ring = dev->kvm->coalesced_mmio_ring;
55 avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
56 if (avail == 0) {
57 /* full */
58 return 0;
59 }
60
61 return 1;
62}
63
64static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
65 struct kvm_io_device *this, gpa_t addr,
66 int len, const void *val)
67{
68 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
69 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
70 __u32 insert;
71
72 if (!coalesced_mmio_in_range(dev, addr, len))
73 return -EOPNOTSUPP;
74
75 spin_lock(&dev->kvm->ring_lock);
76
77 insert = READ_ONCE(ring->last);
78 if (!coalesced_mmio_has_room(dev, insert) ||
79 insert >= KVM_COALESCED_MMIO_MAX) {
80 spin_unlock(&dev->kvm->ring_lock);
81 return -EOPNOTSUPP;
82 }
83
84 /* copy data in first free entry of the ring */
85
86 ring->coalesced_mmio[insert].phys_addr = addr;
87 ring->coalesced_mmio[insert].len = len;
88 memcpy(ring->coalesced_mmio[insert].data, val, len);
89 ring->coalesced_mmio[insert].pio = dev->zone.pio;
90 smp_wmb();
91 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
92 spin_unlock(&dev->kvm->ring_lock);
93 return 0;
94}
95
96static void coalesced_mmio_destructor(struct kvm_io_device *this)
97{
98 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
99
100 list_del(&dev->list);
101
102 kfree(dev);
103}
104
105static const struct kvm_io_device_ops coalesced_mmio_ops = {
106 .write = coalesced_mmio_write,
107 .destructor = coalesced_mmio_destructor,
108};
109
110int kvm_coalesced_mmio_init(struct kvm *kvm)
111{
112 struct page *page;
113
114 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
115 if (!page)
116 return -ENOMEM;
117
118 kvm->coalesced_mmio_ring = page_address(page);
119
120 /*
121 * We're using this spinlock to sync access to the coalesced ring.
122 * The list doesn't need its own lock since device registration and
123 * unregistration should only happen when kvm->slots_lock is held.
124 */
125 spin_lock_init(&kvm->ring_lock);
126 INIT_LIST_HEAD(&kvm->coalesced_zones);
127
128 return 0;
129}
130
131void kvm_coalesced_mmio_free(struct kvm *kvm)
132{
133 if (kvm->coalesced_mmio_ring)
134 free_page((unsigned long)kvm->coalesced_mmio_ring);
135}
136
137int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
138 struct kvm_coalesced_mmio_zone *zone)
139{
140 int ret;
141 struct kvm_coalesced_mmio_dev *dev;
142
143 if (zone->pio != 1 && zone->pio != 0)
144 return -EINVAL;
145
146 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev),
147 GFP_KERNEL_ACCOUNT);
148 if (!dev)
149 return -ENOMEM;
150
151 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
152 dev->kvm = kvm;
153 dev->zone = *zone;
154
155 mutex_lock(&kvm->slots_lock);
156 ret = kvm_io_bus_register_dev(kvm,
157 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
158 zone->addr, zone->size, &dev->dev);
159 if (ret < 0)
160 goto out_free_dev;
161 list_add_tail(&dev->list, &kvm->coalesced_zones);
162 mutex_unlock(&kvm->slots_lock);
163
164 return 0;
165
166out_free_dev:
167 mutex_unlock(&kvm->slots_lock);
168 kfree(dev);
169
170 return ret;
171}
172
173int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
174 struct kvm_coalesced_mmio_zone *zone)
175{
176 struct kvm_coalesced_mmio_dev *dev, *tmp;
177 int r;
178
179 if (zone->pio != 1 && zone->pio != 0)
180 return -EINVAL;
181
182 mutex_lock(&kvm->slots_lock);
183
184 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
185 if (zone->pio == dev->zone.pio &&
186 coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
187 r = kvm_io_bus_unregister_dev(kvm,
188 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
189
190 /*
191 * On failure, unregister destroys all devices on the
192 * bus _except_ the target device, i.e. coalesced_zones
193 * has been modified. No need to restart the walk as
194 * there aren't any zones left.
195 */
196 if (r)
197 break;
198 kvm_iodevice_destructor(&dev->dev);
199 }
200 }
201
202 mutex_unlock(&kvm->slots_lock);
203
204 /*
205 * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
206 * perspective, the coalesced MMIO is most definitely unregistered.
207 */
208 return 0;
209}