Loading...
1/*
2 * KVM coalesced MMIO
3 *
4 * Copyright (c) 2008 Bull S.A.S.
5 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
6 *
7 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
8 *
9 */
10
11#include "iodev.h"
12
13#include <linux/kvm_host.h>
14#include <linux/slab.h>
15#include <linux/kvm.h>
16
17#include "coalesced_mmio.h"
18
19static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
20{
21 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
22}
23
24static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
25 gpa_t addr, int len)
26{
27 /* is it in a batchable area ?
28 * (addr,len) is fully included in
29 * (zone->addr, zone->size)
30 */
31 if (len < 0)
32 return 0;
33 if (addr + len < addr)
34 return 0;
35 if (addr < dev->zone.addr)
36 return 0;
37 if (addr + len > dev->zone.addr + dev->zone.size)
38 return 0;
39 return 1;
40}
41
42static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
43{
44 struct kvm_coalesced_mmio_ring *ring;
45 unsigned avail;
46
47 /* Are we able to batch it ? */
48
49 /* last is the first free entry
50 * check if we don't meet the first used entry
51 * there is always one unused entry in the buffer
52 */
53 ring = dev->kvm->coalesced_mmio_ring;
54 avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
55 if (avail == 0) {
56 /* full */
57 return 0;
58 }
59
60 return 1;
61}
62
63static int coalesced_mmio_write(struct kvm_io_device *this,
64 gpa_t addr, int len, const void *val)
65{
66 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
67 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
68
69 if (!coalesced_mmio_in_range(dev, addr, len))
70 return -EOPNOTSUPP;
71
72 spin_lock(&dev->kvm->ring_lock);
73
74 if (!coalesced_mmio_has_room(dev)) {
75 spin_unlock(&dev->kvm->ring_lock);
76 return -EOPNOTSUPP;
77 }
78
79 /* copy data in first free entry of the ring */
80
81 ring->coalesced_mmio[ring->last].phys_addr = addr;
82 ring->coalesced_mmio[ring->last].len = len;
83 memcpy(ring->coalesced_mmio[ring->last].data, val, len);
84 smp_wmb();
85 ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
86 spin_unlock(&dev->kvm->ring_lock);
87 return 0;
88}
89
90static void coalesced_mmio_destructor(struct kvm_io_device *this)
91{
92 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
93
94 list_del(&dev->list);
95
96 kfree(dev);
97}
98
99static const struct kvm_io_device_ops coalesced_mmio_ops = {
100 .write = coalesced_mmio_write,
101 .destructor = coalesced_mmio_destructor,
102};
103
104int kvm_coalesced_mmio_init(struct kvm *kvm)
105{
106 struct page *page;
107 int ret;
108
109 ret = -ENOMEM;
110 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
111 if (!page)
112 goto out_err;
113
114 ret = 0;
115 kvm->coalesced_mmio_ring = page_address(page);
116
117 /*
118 * We're using this spinlock to sync access to the coalesced ring.
119 * The list doesn't need it's own lock since device registration and
120 * unregistration should only happen when kvm->slots_lock is held.
121 */
122 spin_lock_init(&kvm->ring_lock);
123 INIT_LIST_HEAD(&kvm->coalesced_zones);
124
125out_err:
126 return ret;
127}
128
129void kvm_coalesced_mmio_free(struct kvm *kvm)
130{
131 if (kvm->coalesced_mmio_ring)
132 free_page((unsigned long)kvm->coalesced_mmio_ring);
133}
134
135int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
136 struct kvm_coalesced_mmio_zone *zone)
137{
138 int ret;
139 struct kvm_coalesced_mmio_dev *dev;
140
141 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
142 if (!dev)
143 return -ENOMEM;
144
145 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
146 dev->kvm = kvm;
147 dev->zone = *zone;
148
149 mutex_lock(&kvm->slots_lock);
150 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
151 zone->size, &dev->dev);
152 if (ret < 0)
153 goto out_free_dev;
154 list_add_tail(&dev->list, &kvm->coalesced_zones);
155 mutex_unlock(&kvm->slots_lock);
156
157 return 0;
158
159out_free_dev:
160 mutex_unlock(&kvm->slots_lock);
161 kfree(dev);
162
163 return ret;
164}
165
166int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
167 struct kvm_coalesced_mmio_zone *zone)
168{
169 struct kvm_coalesced_mmio_dev *dev, *tmp;
170
171 mutex_lock(&kvm->slots_lock);
172
173 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
174 if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
175 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
176 kvm_iodevice_destructor(&dev->dev);
177 }
178
179 mutex_unlock(&kvm->slots_lock);
180
181 return 0;
182}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KVM coalesced MMIO
4 *
5 * Copyright (c) 2008 Bull S.A.S.
6 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
7 *
8 * Author: Laurent Vivier <Laurent.Vivier@bull.net>
9 *
10 */
11
12#include <kvm/iodev.h>
13
14#include <linux/kvm_host.h>
15#include <linux/slab.h>
16#include <linux/kvm.h>
17
18#include "coalesced_mmio.h"
19
20static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
21{
22 return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
23}
24
25static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
26 gpa_t addr, int len)
27{
28 /* is it in a batchable area ?
29 * (addr,len) is fully included in
30 * (zone->addr, zone->size)
31 */
32 if (len < 0)
33 return 0;
34 if (addr + len < addr)
35 return 0;
36 if (addr < dev->zone.addr)
37 return 0;
38 if (addr + len > dev->zone.addr + dev->zone.size)
39 return 0;
40 return 1;
41}
42
43static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev, u32 last)
44{
45 struct kvm_coalesced_mmio_ring *ring;
46 unsigned avail;
47
48 /* Are we able to batch it ? */
49
50 /* last is the first free entry
51 * check if we don't meet the first used entry
52 * there is always one unused entry in the buffer
53 */
54 ring = dev->kvm->coalesced_mmio_ring;
55 avail = (ring->first - last - 1) % KVM_COALESCED_MMIO_MAX;
56 if (avail == 0) {
57 /* full */
58 return 0;
59 }
60
61 return 1;
62}
63
64static int coalesced_mmio_write(struct kvm_vcpu *vcpu,
65 struct kvm_io_device *this, gpa_t addr,
66 int len, const void *val)
67{
68 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
69 struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
70 __u32 insert;
71
72 if (!coalesced_mmio_in_range(dev, addr, len))
73 return -EOPNOTSUPP;
74
75 spin_lock(&dev->kvm->ring_lock);
76
77 insert = READ_ONCE(ring->last);
78 if (!coalesced_mmio_has_room(dev, insert) ||
79 insert >= KVM_COALESCED_MMIO_MAX) {
80 spin_unlock(&dev->kvm->ring_lock);
81 return -EOPNOTSUPP;
82 }
83
84 /* copy data in first free entry of the ring */
85
86 ring->coalesced_mmio[insert].phys_addr = addr;
87 ring->coalesced_mmio[insert].len = len;
88 memcpy(ring->coalesced_mmio[insert].data, val, len);
89 ring->coalesced_mmio[insert].pio = dev->zone.pio;
90 smp_wmb();
91 ring->last = (insert + 1) % KVM_COALESCED_MMIO_MAX;
92 spin_unlock(&dev->kvm->ring_lock);
93 return 0;
94}
95
96static void coalesced_mmio_destructor(struct kvm_io_device *this)
97{
98 struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
99
100 list_del(&dev->list);
101
102 kfree(dev);
103}
104
105static const struct kvm_io_device_ops coalesced_mmio_ops = {
106 .write = coalesced_mmio_write,
107 .destructor = coalesced_mmio_destructor,
108};
109
110int kvm_coalesced_mmio_init(struct kvm *kvm)
111{
112 struct page *page;
113 int ret;
114
115 ret = -ENOMEM;
116 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
117 if (!page)
118 goto out_err;
119
120 ret = 0;
121 kvm->coalesced_mmio_ring = page_address(page);
122
123 /*
124 * We're using this spinlock to sync access to the coalesced ring.
125 * The list doesn't need it's own lock since device registration and
126 * unregistration should only happen when kvm->slots_lock is held.
127 */
128 spin_lock_init(&kvm->ring_lock);
129 INIT_LIST_HEAD(&kvm->coalesced_zones);
130
131out_err:
132 return ret;
133}
134
135void kvm_coalesced_mmio_free(struct kvm *kvm)
136{
137 if (kvm->coalesced_mmio_ring)
138 free_page((unsigned long)kvm->coalesced_mmio_ring);
139}
140
141int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
142 struct kvm_coalesced_mmio_zone *zone)
143{
144 int ret;
145 struct kvm_coalesced_mmio_dev *dev;
146
147 if (zone->pio != 1 && zone->pio != 0)
148 return -EINVAL;
149
150 dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev),
151 GFP_KERNEL_ACCOUNT);
152 if (!dev)
153 return -ENOMEM;
154
155 kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
156 dev->kvm = kvm;
157 dev->zone = *zone;
158
159 mutex_lock(&kvm->slots_lock);
160 ret = kvm_io_bus_register_dev(kvm,
161 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS,
162 zone->addr, zone->size, &dev->dev);
163 if (ret < 0)
164 goto out_free_dev;
165 list_add_tail(&dev->list, &kvm->coalesced_zones);
166 mutex_unlock(&kvm->slots_lock);
167
168 return 0;
169
170out_free_dev:
171 mutex_unlock(&kvm->slots_lock);
172 kfree(dev);
173
174 return ret;
175}
176
177int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
178 struct kvm_coalesced_mmio_zone *zone)
179{
180 struct kvm_coalesced_mmio_dev *dev, *tmp;
181
182 if (zone->pio != 1 && zone->pio != 0)
183 return -EINVAL;
184
185 mutex_lock(&kvm->slots_lock);
186
187 list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
188 if (zone->pio == dev->zone.pio &&
189 coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
190 kvm_io_bus_unregister_dev(kvm,
191 zone->pio ? KVM_PIO_BUS : KVM_MMIO_BUS, &dev->dev);
192 kvm_iodevice_destructor(&dev->dev);
193 }
194
195 mutex_unlock(&kvm->slots_lock);
196
197 return 0;
198}