Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * KVM coalesced MMIO
  3 *
  4 * Copyright (c) 2008 Bull S.A.S.
  5 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
  6 *
  7 *  Author: Laurent Vivier <Laurent.Vivier@bull.net>
  8 *
  9 */
 10
 11#include "iodev.h"
 12
 13#include <linux/kvm_host.h>
 14#include <linux/slab.h>
 15#include <linux/kvm.h>
 16
 17#include "coalesced_mmio.h"
 18
 19static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
 20{
 21	return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
 22}
 23
 24static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
 25				   gpa_t addr, int len)
 26{
 27	/* is it in a batchable area ?
 28	 * (addr,len) is fully included in
 29	 * (zone->addr, zone->size)
 30	 */
 31	if (len < 0)
 32		return 0;
 33	if (addr + len < addr)
 34		return 0;
 35	if (addr < dev->zone.addr)
 36		return 0;
 37	if (addr + len > dev->zone.addr + dev->zone.size)
 38		return 0;
 39	return 1;
 40}
 41
 42static int coalesced_mmio_has_room(struct kvm_coalesced_mmio_dev *dev)
 43{
 44	struct kvm_coalesced_mmio_ring *ring;
 45	unsigned avail;
 
 46
 47	/* Are we able to batch it ? */
 48
 49	/* last is the first free entry
 50	 * check if we don't meet the first used entry
 51	 * there is always one unused entry in the buffer
 52	 */
 53	ring = dev->kvm->coalesced_mmio_ring;
 54	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
 55	if (avail == 0) {
 56		/* full */
 57		return 0;
 58	}
 59
 60	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 61}
 62
 63static int coalesced_mmio_write(struct kvm_io_device *this,
 64				gpa_t addr, int len, const void *val)
 65{
 66	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
 67	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
 68
 69	if (!coalesced_mmio_in_range(dev, addr, len))
 70		return -EOPNOTSUPP;
 71
 72	spin_lock(&dev->kvm->ring_lock);
 73
 74	if (!coalesced_mmio_has_room(dev)) {
 75		spin_unlock(&dev->kvm->ring_lock);
 76		return -EOPNOTSUPP;
 77	}
 78
 79	/* copy data in first free entry of the ring */
 80
 81	ring->coalesced_mmio[ring->last].phys_addr = addr;
 82	ring->coalesced_mmio[ring->last].len = len;
 83	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
 84	smp_wmb();
 85	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
 86	spin_unlock(&dev->kvm->ring_lock);
 87	return 0;
 88}
 89
 90static void coalesced_mmio_destructor(struct kvm_io_device *this)
 91{
 92	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
 93
 94	list_del(&dev->list);
 95
 96	kfree(dev);
 97}
 98
 99static const struct kvm_io_device_ops coalesced_mmio_ops = {
100	.write      = coalesced_mmio_write,
101	.destructor = coalesced_mmio_destructor,
102};
103
104int kvm_coalesced_mmio_init(struct kvm *kvm)
105{
 
106	struct page *page;
107	int ret;
108
109	ret = -ENOMEM;
110	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
111	if (!page)
112		goto out_err;
113
114	ret = 0;
115	kvm->coalesced_mmio_ring = page_address(page);
116
117	/*
118	 * We're using this spinlock to sync access to the coalesced ring.
119	 * The list doesn't need it's own lock since device registration and
120	 * unregistration should only happen when kvm->slots_lock is held.
121	 */
122	spin_lock_init(&kvm->ring_lock);
123	INIT_LIST_HEAD(&kvm->coalesced_zones);
 
124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125out_err:
126	return ret;
127}
128
129void kvm_coalesced_mmio_free(struct kvm *kvm)
130{
131	if (kvm->coalesced_mmio_ring)
132		free_page((unsigned long)kvm->coalesced_mmio_ring);
133}
134
135int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
136					 struct kvm_coalesced_mmio_zone *zone)
137{
138	int ret;
139	struct kvm_coalesced_mmio_dev *dev;
140
141	dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
142	if (!dev)
143		return -ENOMEM;
144
145	kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
146	dev->kvm = kvm;
147	dev->zone = *zone;
148
149	mutex_lock(&kvm->slots_lock);
150	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, zone->addr,
151				      zone->size, &dev->dev);
152	if (ret < 0)
153		goto out_free_dev;
154	list_add_tail(&dev->list, &kvm->coalesced_zones);
155	mutex_unlock(&kvm->slots_lock);
156
157	return 0;
 
158
159out_free_dev:
160	mutex_unlock(&kvm->slots_lock);
161	kfree(dev);
162
163	return ret;
164}
165
166int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
167					   struct kvm_coalesced_mmio_zone *zone)
168{
169	struct kvm_coalesced_mmio_dev *dev, *tmp;
 
 
 
 
 
170
171	mutex_lock(&kvm->slots_lock);
172
173	list_for_each_entry_safe(dev, tmp, &kvm->coalesced_zones, list)
174		if (coalesced_mmio_in_range(dev, zone->addr, zone->size)) {
175			kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &dev->dev);
176			kvm_iodevice_destructor(&dev->dev);
 
 
 
 
 
 
 
 
177		}
 
 
178
179	mutex_unlock(&kvm->slots_lock);
180
181	return 0;
182}
v3.1
  1/*
  2 * KVM coalesced MMIO
  3 *
  4 * Copyright (c) 2008 Bull S.A.S.
  5 * Copyright 2009 Red Hat, Inc. and/or its affiliates.
  6 *
  7 *  Author: Laurent Vivier <Laurent.Vivier@bull.net>
  8 *
  9 */
 10
 11#include "iodev.h"
 12
 13#include <linux/kvm_host.h>
 14#include <linux/slab.h>
 15#include <linux/kvm.h>
 16
 17#include "coalesced_mmio.h"
 18
 19static inline struct kvm_coalesced_mmio_dev *to_mmio(struct kvm_io_device *dev)
 20{
 21	return container_of(dev, struct kvm_coalesced_mmio_dev, dev);
 22}
 23
 24static int coalesced_mmio_in_range(struct kvm_coalesced_mmio_dev *dev,
 25				   gpa_t addr, int len)
 26{
 27	struct kvm_coalesced_mmio_zone *zone;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 28	struct kvm_coalesced_mmio_ring *ring;
 29	unsigned avail;
 30	int i;
 31
 32	/* Are we able to batch it ? */
 33
 34	/* last is the first free entry
 35	 * check if we don't meet the first used entry
 36	 * there is always one unused entry in the buffer
 37	 */
 38	ring = dev->kvm->coalesced_mmio_ring;
 39	avail = (ring->first - ring->last - 1) % KVM_COALESCED_MMIO_MAX;
 40	if (avail < KVM_MAX_VCPUS) {
 41		/* full */
 42		return 0;
 43	}
 44
 45	/* is it in a batchable area ? */
 46
 47	for (i = 0; i < dev->nb_zones; i++) {
 48		zone = &dev->zone[i];
 49
 50		/* (addr,len) is fully included in
 51		 * (zone->addr, zone->size)
 52		 */
 53
 54		if (zone->addr <= addr &&
 55		    addr + len <= zone->addr + zone->size)
 56			return 1;
 57	}
 58	return 0;
 59}
 60
 61static int coalesced_mmio_write(struct kvm_io_device *this,
 62				gpa_t addr, int len, const void *val)
 63{
 64	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
 65	struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;
 
 66	if (!coalesced_mmio_in_range(dev, addr, len))
 67		return -EOPNOTSUPP;
 68
 69	spin_lock(&dev->lock);
 
 
 
 
 
 70
 71	/* copy data in first free entry of the ring */
 72
 73	ring->coalesced_mmio[ring->last].phys_addr = addr;
 74	ring->coalesced_mmio[ring->last].len = len;
 75	memcpy(ring->coalesced_mmio[ring->last].data, val, len);
 76	smp_wmb();
 77	ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
 78	spin_unlock(&dev->lock);
 79	return 0;
 80}
 81
 82static void coalesced_mmio_destructor(struct kvm_io_device *this)
 83{
 84	struct kvm_coalesced_mmio_dev *dev = to_mmio(this);
 85
 
 
 86	kfree(dev);
 87}
 88
 89static const struct kvm_io_device_ops coalesced_mmio_ops = {
 90	.write      = coalesced_mmio_write,
 91	.destructor = coalesced_mmio_destructor,
 92};
 93
 94int kvm_coalesced_mmio_init(struct kvm *kvm)
 95{
 96	struct kvm_coalesced_mmio_dev *dev;
 97	struct page *page;
 98	int ret;
 99
100	ret = -ENOMEM;
101	page = alloc_page(GFP_KERNEL | __GFP_ZERO);
102	if (!page)
103		goto out_err;
 
 
104	kvm->coalesced_mmio_ring = page_address(page);
105
106	ret = -ENOMEM;
107	dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
108	if (!dev)
109		goto out_free_page;
110	spin_lock_init(&dev->lock);
111	kvm_iodevice_init(&dev->dev, &coalesced_mmio_ops);
112	dev->kvm = kvm;
113	kvm->coalesced_mmio_dev = dev;
114
115	mutex_lock(&kvm->slots_lock);
116	ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, &dev->dev);
117	mutex_unlock(&kvm->slots_lock);
118	if (ret < 0)
119		goto out_free_dev;
120
121	return ret;
122
123out_free_dev:
124	kvm->coalesced_mmio_dev = NULL;
125	kfree(dev);
126out_free_page:
127	kvm->coalesced_mmio_ring = NULL;
128	__free_page(page);
129out_err:
130	return ret;
131}
132
133void kvm_coalesced_mmio_free(struct kvm *kvm)
134{
135	if (kvm->coalesced_mmio_ring)
136		free_page((unsigned long)kvm->coalesced_mmio_ring);
137}
138
139int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
140					 struct kvm_coalesced_mmio_zone *zone)
141{
142	struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
 
 
 
 
 
143
144	if (dev == NULL)
145		return -ENXIO;
 
146
147	mutex_lock(&kvm->slots_lock);
148	if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
149		mutex_unlock(&kvm->slots_lock);
150		return -ENOBUFS;
151	}
 
 
152
153	dev->zone[dev->nb_zones] = *zone;
154	dev->nb_zones++;
155
 
156	mutex_unlock(&kvm->slots_lock);
157	return 0;
 
 
158}
159
160int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
161					   struct kvm_coalesced_mmio_zone *zone)
162{
163	int i;
164	struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
165	struct kvm_coalesced_mmio_zone *z;
166
167	if (dev == NULL)
168		return -ENXIO;
169
170	mutex_lock(&kvm->slots_lock);
171
172	i = dev->nb_zones;
173	while (i) {
174		z = &dev->zone[i - 1];
175
176		/* unregister all zones
177		 * included in (zone->addr, zone->size)
178		 */
179
180		if (zone->addr <= z->addr &&
181		    z->addr + z->size <= zone->addr + zone->size) {
182			dev->nb_zones--;
183			*z = dev->zone[dev->nb_zones];
184		}
185		i--;
186	}
187
188	mutex_unlock(&kvm->slots_lock);
189
190	return 0;
191}