Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KVM coalesced MMIO
4 *
5 * Copyright (c) 2008 Bull S.A.S.
6 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7 *
8 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 *
10 */
11
12#include <kvm/iodev.h>
13
14#include <linux/kvm_host.h>
15#include <linux/slab.h>
16#include <linux/kvm.h>
17
18#include "coalesced_mmio.h"
19
20static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21{
22 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
23}
24
25static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
26 gpa_t addr, int len)
27{
28 /* is it in a batchable area ?
29 * (addr,len) is fully included in
30 * (zone->addr, zone->size)
31 */
32 if (len < 0)
33 return 0;
34 if (addr + len < addr)
35 return 0;
36 if (addr < dev->zone.addr)
37 return 0;
38 if (addr + len > dev->zone.addr + dev->zone.size)
39 return 0;
40 return 1;
41}
42
43static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
44 struct kvm_io_device *this, gpa_t addr,
45 int len, const void *val)
46{
47 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
48 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
49 __u32 insert;
50
51 if (!coalesced_mmio_in_range(dev, addr, len))
52 return -EOPNOTSUPP;
53
54 spin_lock(&dev->kvm->ring_lock);
55
56 /*
57 * last is the index of the entry to fill. Verify userspace hasn't
58 * set last to be out of range, and that there is room in the ring.
59 * Leave one entry free in the ring so that userspace can differentiate
60 * between an empty ring and a full ring.
61 */
62 insert = READ_ONCE(ring->last);
63 if (insert >= KVM_COALESCED_MMIO_MAX ||
64 (insert + 1) % KVM_COALESCED_MMIO_MAX == READ_ONCE(ring->first)) {
65 spin_unlock(&dev->kvm->ring_lock);
66 return -EOPNOTSUPP;
67 }
68
69 /* copy data in first free entry of the ring */
70
71 ring->coalesced_mmio[insert].phys_addr = addr;
72 ring->coalesced_mmio[insert].len = len;
73 memcpy(ring->coalesced_mmio[insert].data, val, len);
74 ring->coalesced_mmio[insert].pio = dev->zone.pio;
75 smp_wmb();
76 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
77 spin_unlock(&dev->kvm->ring_lock);
78 return 0;
79}
80
81static void coalesced_mmio_destructor(struct kvm_io_device *this)
82{
83 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
84
85 list_del(&dev->list);
86
87 kfree(dev);
88}
89
90static const struct kvm_io_device_ops coalesced_mmio_ops = {
91 .write = coalesced_mmio_write,
92 .destructor = coalesced_mmio_destructor,
93};
94
95int kvm_coalesced_mmio_init(struct kvm *kvm)
96{
97 struct page *page;
98
99 page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
100 if (!page)
101 return -ENOMEM;
102
103 kvm->coalesced_mmio_ring = page_address(page);
104
105 /*
106 * We're using this spinlock to sync access to the coalesced ring.
107 * The list doesn't need its own lock since device registration and
108 * unregistration should only happen when kvm->slots_lock is held.
109 */
110 spin_lock_init(&kvm->ring_lock);
111 INIT_LIST_HEAD(&kvm->coalesced_zones);
112
113 return 0;
114}
115
116void kvm_coalesced_mmio_free(struct kvm *kvm)
117{
118 if (kvm->coalesced_mmio_ring)
119 free_page((unsigned long)kvm->coalesced_mmio_ring);
120}
121
122int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
123 struct kvm_coalesced_mmio_zone *zone)
124{
125 int ret;
126 struct kvm_coalesced_mmio_dev *dev;
127
128 if (zone->pio != 1 && zone->pio != 0)
129 return -EINVAL;
130
131 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev),
132 GFP_KERNEL_ACCOUNT);
133 if (!dev)
134 return -ENOMEM;
135
136 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
137 dev->kvm = kvm;
138 dev->zone = *zone;
139
140 mutex_lock(&kvm->slots_lock);
141 ret = kvm_io_bus_register_dev(kvm,
142 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
143 zone->addr, zone->size, &dev->dev);
144 if (ret < 0)
145 goto out_free_dev;
146 list_add_tail(&dev->list, &kvm->coalesced_zones);
147 mutex_unlock(&kvm->slots_lock);
148
149 return 0;
150
151out_free_dev:
152 mutex_unlock(&kvm->slots_lock);
153 kfree(dev);
154
155 return ret;
156}
157
158int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
159 struct kvm_coalesced_mmio_zone *zone)
160{
161 struct kvm_coalesced_mmio_dev *dev, *tmp;
162 int r;
163
164 if (zone->pio != 1 && zone->pio != 0)
165 return -EINVAL;
166
167 mutex_lock(&kvm->slots_lock);
168
169 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list) {
170 if (zone->pio == dev->zone.pio &&
171 coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
172 r = kvm_io_bus_unregister_dev(kvm,
173 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
174 /*
175 * On failure, unregister destroys all devices on the
176 * bus, including the target device. There's no need
177 * to restart the walk as there aren't any zones left.
178 */
179 if (r)
180 break;
181 }
182 }
183
184 mutex_unlock(&kvm->slots_lock);
185
186 /*
187 * Ignore the result of kvm_io_bus_unregister_dev(), from userspace's
188 * perspective, the coalesced MMIO is most definitely unregistered.
189 */
190 return 0;
191}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KVM coalesced MMIO
4 *
5 * Copyright (c) 2008 Bull S.A.S.
6 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7 *
8 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 *
10 */
11
12#include <kvm/iodev.h>
13
14#include <linux/kvm_host.h>
15#include <linux/slab.h>
16#include <linux/kvm.h>
17
18#include "coalesced_mmio.h"
19
20static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21{
22 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
23}
24
25static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
26 gpa_t addr, int len)
27{
28 /* is it in a batchable area ?
29 * (addr,len) is fully included in
30 * (zone->addr, zone->size)
31 */
32 if (len < 0)
33 return 0;
34 if (addr + len < addr)
35 return 0;
36 if (addr < dev->zone.addr)
37 return 0;
38 if (addr + len > dev->zone.addr + dev->zone.size)
39 return 0;
40 return 1;
41}
42
43static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
44{
45 struct kvm_coalesced_mmio_ring *ring;
46 unsigned avail;
47
48 /* Are we able to batch it ? */
49
50 /* last is the first free entry
51 * check if we don't meet the first used entry
52 * there is always one unused entry in the buffer
53 */
54 ring = dev->kvm->coalesced_mmio_ring;
55 avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
56 if (avail == 0) {
57 /* full */
58 return 0;
59 }
60
61 return 1;
62}
63
64static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
65 struct kvm_io_device *this, gpa_t addr,
66 int len, const void *val)
67{
68 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
69 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
70 __u32 insert;
71
72 if (!coalesced_mmio_in_range(dev, addr, len))
73 return -EOPNOTSUPP;
74
75 spin_lock(&dev->kvm->ring_lock);
76
77 insert = READ_ONCE(ring->last);
78 if (!coalesced_mmio_has_room(dev, insert) ||
79 insert >= KVM_COALESCED_MMIO_MAX) {
80 spin_unlock(&dev->kvm->ring_lock);
81 return -EOPNOTSUPP;
82 }
83
84 /* copy data in first free entry of the ring */
85
86 ring->coalesced_mmio[insert].phys_addr = addr;
87 ring->coalesced_mmio[insert].len = len;
88 memcpy(ring->coalesced_mmio[insert].data, val, len);
89 ring->coalesced_mmio[insert].pio = dev->zone.pio;
90 smp_wmb();
91 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
92 spin_unlock(&dev->kvm->ring_lock);
93 return 0;
94}
95
96static void coalesced_mmio_destructor(struct kvm_io_device *this)
97{
98 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
99
100 list_del(&dev->list);
101
102 kfree(dev);
103}
104
105static const struct kvm_io_device_ops coalesced_mmio_ops = {
106 .write = coalesced_mmio_write,
107 .destructor = coalesced_mmio_destructor,
108};
109
110int kvm_coalesced_mmio_init(struct kvm *kvm)
111{
112 struct page *page;
113
114 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
115 if (!page)
116 return -ENOMEM;
117
118 kvm->coalesced_mmio_ring = page_address(page);
119
120 /*
121 * We're using this spinlock to sync access to the coalesced ring.
122 * The list doesn't need its own lock since device registration and
123 * unregistration should only happen when kvm->slots_lock is held.
124 */
125 spin_lock_init(&kvm->ring_lock);
126 INIT_LIST_HEAD(&kvm->coalesced_zones);
127
128 return 0;
129}
130
131void kvm_coalesced_mmio_free(struct kvm *kvm)
132{
133 if (kvm->coalesced_mmio_ring)
134 free_page((unsigned long)kvm->coalesced_mmio_ring);
135}
136
137int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
138 struct kvm_coalesced_mmio_zone *zone)
139{
140 int ret;
141 struct kvm_coalesced_mmio_dev *dev;
142
143 if (zone->pio != 1 && zone->pio != 0)
144 return -EINVAL;
145
146 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev),
147 GFP_KERNEL_ACCOUNT);
148 if (!dev)
149 return -ENOMEM;
150
151 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
152 dev->kvm = kvm;
153 dev->zone = *zone;
154
155 mutex_lock(&kvm->slots_lock);
156 ret = kvm_io_bus_register_dev(kvm,
157 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
158 zone->addr, zone->size, &dev->dev);
159 if (ret < 0)
160 goto out_free_dev;
161 list_add_tail(&dev->list, &kvm->coalesced_zones);
162 mutex_unlock(&kvm->slots_lock);
163
164 return 0;
165
166out_free_dev:
167 mutex_unlock(&kvm->slots_lock);
168 kfree(dev);
169
170 return ret;
171}
172
173int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
174 struct kvm_coalesced_mmio_zone *zone)
175{
176 struct kvm_coalesced_mmio_dev *dev, *tmp;
177
178 if (zone->pio != 1 && zone->pio != 0)
179 return -EINVAL;
180
181 mutex_lock(&kvm->slots_lock);
182
183 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
184 if (zone->pio == dev->zone.pio &&
185 coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
186 kvm_io_bus_unregister_dev(kvm,
187 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
188 kvm_iodevice_destructor(&dev->dev);
189 }
190
191 mutex_unlock(&kvm->slots_lock);
192
193 return 0;
194}